PR target/61853
[official-gcc.git] / gcc / config / pa / pa.c
blob20f2ef662d380c0d7bebf043545c8b36dcb2805e
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
55 #include "builtins.h"
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
59 int
60 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
62 enum machine_mode store_mode;
63 enum machine_mode other_mode;
64 rtx set;
66 if (recog_memoized (in_insn) < 0
67 || (get_attr_type (in_insn) != TYPE_FPSTORE
68 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
69 || recog_memoized (out_insn) < 0)
70 return 0;
72 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74 set = single_set (out_insn);
75 if (!set)
76 return 0;
78 other_mode = GET_MODE (SET_SRC (set));
80 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
87 #else
88 #define DO_FRAME_NOTES 0
89 #endif
90 #endif
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx, rtx);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
96 reg_class_t);
97 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
98 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
99 static inline rtx force_mode (enum machine_mode, rtx);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
103 rtx, rtx);
104 static bool forward_branch_p (rtx_insn *);
105 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static int compute_movmem_length (rtx);
108 static int compute_clrmem_length (rtx);
109 static bool pa_assemble_integer (rtx, unsigned int, int);
110 static void remove_useless_addtr_insns (int);
111 static void store_reg (int, HOST_WIDE_INT, int);
112 static void store_reg_modify (int, int, HOST_WIDE_INT);
113 static void load_reg (int, HOST_WIDE_INT, int);
114 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
115 static rtx pa_function_value (const_tree, const_tree, bool);
116 static rtx pa_libcall_value (enum machine_mode, const_rtx);
117 static bool pa_function_value_regno_p (const unsigned int);
118 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void update_total_code_bytes (unsigned int);
120 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
121 static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
122 static int pa_adjust_priority (rtx_insn *, int);
123 static int pa_issue_rate (void);
124 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
125 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
126 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
127 ATTRIBUTE_UNUSED;
128 static void pa_encode_section_info (tree, rtx, int);
129 static const char *pa_strip_name_encoding (const char *);
130 static bool pa_function_ok_for_sibcall (tree, tree);
131 static void pa_globalize_label (FILE *, const char *)
132 ATTRIBUTE_UNUSED;
133 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
134 HOST_WIDE_INT, tree);
135 #if !defined(USE_COLLECT2)
136 static void pa_asm_out_constructor (rtx, int);
137 static void pa_asm_out_destructor (rtx, int);
138 #endif
139 static void pa_init_builtins (void);
140 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
141 static rtx hppa_builtin_saveregs (void);
142 static void hppa_va_start (tree, rtx);
143 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
144 static bool pa_scalar_mode_supported_p (enum machine_mode);
145 static bool pa_commutative_p (const_rtx x, int outer_code);
146 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
147 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
148 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
149 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
153 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
158 static void output_deferred_plabels (void);
159 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
160 #ifdef ASM_OUTPUT_EXTERNAL_REAL
161 static void pa_hpux_file_end (void);
162 #endif
163 static void pa_init_libfuncs (void);
164 static rtx pa_struct_value_rtx (tree, int);
165 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
166 const_tree, bool);
167 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
168 tree, bool);
169 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
170 const_tree, bool);
171 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
172 const_tree, bool);
173 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
174 static struct machine_function * pa_init_machine_status (void);
175 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
176 enum machine_mode,
177 secondary_reload_info *);
178 static void pa_extra_live_on_entry (bitmap);
179 static enum machine_mode pa_promote_function_mode (const_tree,
180 enum machine_mode, int *,
181 const_tree, int);
183 static void pa_asm_trampoline_template (FILE *);
184 static void pa_trampoline_init (rtx, tree, rtx);
185 static rtx pa_trampoline_adjust_address (rtx);
186 static rtx pa_delegitimize_address (rtx);
187 static bool pa_print_operand_punct_valid_p (unsigned char);
188 static rtx pa_internal_arg_pointer (void);
189 static bool pa_can_eliminate (const int, const int);
190 static void pa_conditional_register_usage (void);
191 static enum machine_mode pa_c_mode_for_suffix (char);
192 static section *pa_function_section (tree, enum node_frequency, bool, bool);
193 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
194 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
195 static unsigned int pa_section_type_flags (tree, const char *, int);
196 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
198 /* The following extra sections are only used for SOM. */
199 static GTY(()) section *som_readonly_data_section;
200 static GTY(()) section *som_one_only_readonly_data_section;
201 static GTY(()) section *som_one_only_data_section;
202 static GTY(()) section *som_tm_clone_table_section;
204 /* Counts for the number of callee-saved general and floating point
205 registers which were saved by the current function's prologue. */
206 static int gr_saved, fr_saved;
208 /* Boolean indicating whether the return pointer was saved by the
209 current function's prologue. */
210 static bool rp_saved;
212 static rtx find_addr_reg (rtx);
214 /* Keep track of the number of bytes we have output in the CODE subspace
215 during this compilation so we'll know when to emit inline long-calls. */
216 unsigned long total_code_bytes;
218 /* The last address of the previous function plus the number of bytes in
219 associated thunks that have been output. This is used to determine if
220 a thunk can use an IA-relative branch to reach its target function. */
221 static unsigned int last_address;
223 /* Variables to handle plabels that we discover are necessary at assembly
224 output time. They are output after the current function. */
225 struct GTY(()) deferred_plabel
227 rtx internal_label;
228 rtx symbol;
230 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
231 deferred_plabels;
232 static size_t n_deferred_plabels = 0;
234 /* Initialize the GCC target structure. */
236 #undef TARGET_OPTION_OVERRIDE
237 #define TARGET_OPTION_OVERRIDE pa_option_override
239 #undef TARGET_ASM_ALIGNED_HI_OP
240 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
241 #undef TARGET_ASM_ALIGNED_SI_OP
242 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
243 #undef TARGET_ASM_ALIGNED_DI_OP
244 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
245 #undef TARGET_ASM_UNALIGNED_HI_OP
246 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
247 #undef TARGET_ASM_UNALIGNED_SI_OP
248 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
249 #undef TARGET_ASM_UNALIGNED_DI_OP
250 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
251 #undef TARGET_ASM_INTEGER
252 #define TARGET_ASM_INTEGER pa_assemble_integer
254 #undef TARGET_ASM_FUNCTION_PROLOGUE
255 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
256 #undef TARGET_ASM_FUNCTION_EPILOGUE
257 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
259 #undef TARGET_FUNCTION_VALUE
260 #define TARGET_FUNCTION_VALUE pa_function_value
261 #undef TARGET_LIBCALL_VALUE
262 #define TARGET_LIBCALL_VALUE pa_libcall_value
263 #undef TARGET_FUNCTION_VALUE_REGNO_P
264 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
266 #undef TARGET_LEGITIMIZE_ADDRESS
267 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
269 #undef TARGET_SCHED_ADJUST_COST
270 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
271 #undef TARGET_SCHED_ADJUST_PRIORITY
272 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
273 #undef TARGET_SCHED_ISSUE_RATE
274 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
276 #undef TARGET_ENCODE_SECTION_INFO
277 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
278 #undef TARGET_STRIP_NAME_ENCODING
279 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
281 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
282 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
284 #undef TARGET_COMMUTATIVE_P
285 #define TARGET_COMMUTATIVE_P pa_commutative_p
287 #undef TARGET_ASM_OUTPUT_MI_THUNK
288 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
289 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
290 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
292 #undef TARGET_ASM_FILE_END
293 #ifdef ASM_OUTPUT_EXTERNAL_REAL
294 #define TARGET_ASM_FILE_END pa_hpux_file_end
295 #else
296 #define TARGET_ASM_FILE_END output_deferred_plabels
297 #endif
299 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
300 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
302 #if !defined(USE_COLLECT2)
303 #undef TARGET_ASM_CONSTRUCTOR
304 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
305 #undef TARGET_ASM_DESTRUCTOR
306 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
307 #endif
309 #undef TARGET_INIT_BUILTINS
310 #define TARGET_INIT_BUILTINS pa_init_builtins
312 #undef TARGET_EXPAND_BUILTIN
313 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
315 #undef TARGET_REGISTER_MOVE_COST
316 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
317 #undef TARGET_RTX_COSTS
318 #define TARGET_RTX_COSTS hppa_rtx_costs
319 #undef TARGET_ADDRESS_COST
320 #define TARGET_ADDRESS_COST hppa_address_cost
322 #undef TARGET_MACHINE_DEPENDENT_REORG
323 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
325 #undef TARGET_INIT_LIBFUNCS
326 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
328 #undef TARGET_PROMOTE_FUNCTION_MODE
329 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
330 #undef TARGET_PROMOTE_PROTOTYPES
331 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
333 #undef TARGET_STRUCT_VALUE_RTX
334 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
335 #undef TARGET_RETURN_IN_MEMORY
336 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
337 #undef TARGET_MUST_PASS_IN_STACK
338 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
339 #undef TARGET_PASS_BY_REFERENCE
340 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
341 #undef TARGET_CALLEE_COPIES
342 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
343 #undef TARGET_ARG_PARTIAL_BYTES
344 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
345 #undef TARGET_FUNCTION_ARG
346 #define TARGET_FUNCTION_ARG pa_function_arg
347 #undef TARGET_FUNCTION_ARG_ADVANCE
348 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
349 #undef TARGET_FUNCTION_ARG_BOUNDARY
350 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
352 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
353 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
354 #undef TARGET_EXPAND_BUILTIN_VA_START
355 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
356 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
357 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
359 #undef TARGET_SCALAR_MODE_SUPPORTED_P
360 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
362 #undef TARGET_CANNOT_FORCE_CONST_MEM
363 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
365 #undef TARGET_SECONDARY_RELOAD
366 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
368 #undef TARGET_EXTRA_LIVE_ON_ENTRY
369 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
371 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
372 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
373 #undef TARGET_TRAMPOLINE_INIT
374 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
375 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
376 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
377 #undef TARGET_DELEGITIMIZE_ADDRESS
378 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
379 #undef TARGET_INTERNAL_ARG_POINTER
380 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
381 #undef TARGET_CAN_ELIMINATE
382 #define TARGET_CAN_ELIMINATE pa_can_eliminate
383 #undef TARGET_CONDITIONAL_REGISTER_USAGE
384 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
385 #undef TARGET_C_MODE_FOR_SUFFIX
386 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
387 #undef TARGET_ASM_FUNCTION_SECTION
388 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
390 #undef TARGET_LEGITIMATE_CONSTANT_P
391 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
392 #undef TARGET_SECTION_TYPE_FLAGS
393 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
394 #undef TARGET_LEGITIMATE_ADDRESS_P
395 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
397 struct gcc_target targetm = TARGET_INITIALIZER;
399 /* Parse the -mfixed-range= option string. */
401 static void
402 fix_range (const char *const_str)
404 int i, first, last;
405 char *str, *dash, *comma;
407 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
408 REG2 are either register names or register numbers. The effect
409 of this option is to mark the registers in the range from REG1 to
410 REG2 as ``fixed'' so they won't be used by the compiler. This is
411 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
413 i = strlen (const_str);
414 str = (char *) alloca (i + 1);
415 memcpy (str, const_str, i + 1);
417 while (1)
419 dash = strchr (str, '-');
420 if (!dash)
422 warning (0, "value of -mfixed-range must have form REG1-REG2");
423 return;
425 *dash = '\0';
427 comma = strchr (dash + 1, ',');
428 if (comma)
429 *comma = '\0';
431 first = decode_reg_name (str);
432 if (first < 0)
434 warning (0, "unknown register name: %s", str);
435 return;
438 last = decode_reg_name (dash + 1);
439 if (last < 0)
441 warning (0, "unknown register name: %s", dash + 1);
442 return;
445 *dash = '-';
447 if (first > last)
449 warning (0, "%s-%s is an empty range", str, dash + 1);
450 return;
453 for (i = first; i <= last; ++i)
454 fixed_regs[i] = call_used_regs[i] = 1;
456 if (!comma)
457 break;
459 *comma = ',';
460 str = comma + 1;
463 /* Check if all floating point registers have been fixed. */
464 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
465 if (!fixed_regs[i])
466 break;
468 if (i > FP_REG_LAST)
469 target_flags |= MASK_DISABLE_FPREGS;
472 /* Implement the TARGET_OPTION_OVERRIDE hook. */
474 static void
475 pa_option_override (void)
477 unsigned int i;
478 cl_deferred_option *opt;
479 vec<cl_deferred_option> *v
480 = (vec<cl_deferred_option> *) pa_deferred_options;
482 if (v)
483 FOR_EACH_VEC_ELT (*v, i, opt)
485 switch (opt->opt_index)
487 case OPT_mfixed_range_:
488 fix_range (opt->arg);
489 break;
491 default:
492 gcc_unreachable ();
496 /* Unconditional branches in the delay slot are not compatible with dwarf2
497 call frame information. There is no benefit in using this optimization
498 on PA8000 and later processors. */
499 if (pa_cpu >= PROCESSOR_8000
500 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
501 && flag_exceptions)
502 || flag_unwind_tables)
503 target_flags &= ~MASK_JUMP_IN_DELAY;
505 if (flag_pic && TARGET_PORTABLE_RUNTIME)
507 warning (0, "PIC code generation is not supported in the portable runtime model");
510 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
512 warning (0, "PIC code generation is not compatible with fast indirect calls");
515 if (! TARGET_GAS && write_symbols != NO_DEBUG)
517 warning (0, "-g is only supported when using GAS on this processor,");
518 warning (0, "-g option disabled");
519 write_symbols = NO_DEBUG;
522 /* We only support the "big PIC" model now. And we always generate PIC
523 code when in 64bit mode. */
524 if (flag_pic == 1 || TARGET_64BIT)
525 flag_pic = 2;
527 /* Disable -freorder-blocks-and-partition as we don't support hot and
528 cold partitioning. */
529 if (flag_reorder_blocks_and_partition)
531 inform (input_location,
532 "-freorder-blocks-and-partition does not work "
533 "on this architecture");
534 flag_reorder_blocks_and_partition = 0;
535 flag_reorder_blocks = 1;
538 /* We can't guarantee that .dword is available for 32-bit targets. */
539 if (UNITS_PER_WORD == 4)
540 targetm.asm_out.aligned_op.di = NULL;
542 /* The unaligned ops are only available when using GAS. */
543 if (!TARGET_GAS)
545 targetm.asm_out.unaligned_op.hi = NULL;
546 targetm.asm_out.unaligned_op.si = NULL;
547 targetm.asm_out.unaligned_op.di = NULL;
550 init_machine_status = pa_init_machine_status;
553 enum pa_builtins
555 PA_BUILTIN_COPYSIGNQ,
556 PA_BUILTIN_FABSQ,
557 PA_BUILTIN_INFQ,
558 PA_BUILTIN_HUGE_VALQ,
559 PA_BUILTIN_max
562 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
564 static void
565 pa_init_builtins (void)
567 #ifdef DONT_HAVE_FPUTC_UNLOCKED
569 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
570 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
571 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
573 #endif
574 #if TARGET_HPUX_11
576 tree decl;
578 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
579 set_user_assembler_name (decl, "_Isfinite");
580 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
581 set_user_assembler_name (decl, "_Isfinitef");
583 #endif
585 if (HPUX_LONG_DOUBLE_LIBRARY)
587 tree decl, ftype;
589 /* Under HPUX, the __float128 type is a synonym for "long double". */
590 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
591 "__float128");
593 /* TFmode support builtins. */
594 ftype = build_function_type_list (long_double_type_node,
595 long_double_type_node,
596 NULL_TREE);
597 decl = add_builtin_function ("__builtin_fabsq", ftype,
598 PA_BUILTIN_FABSQ, BUILT_IN_MD,
599 "_U_Qfabs", NULL_TREE);
600 TREE_READONLY (decl) = 1;
601 pa_builtins[PA_BUILTIN_FABSQ] = decl;
603 ftype = build_function_type_list (long_double_type_node,
604 long_double_type_node,
605 long_double_type_node,
606 NULL_TREE);
607 decl = add_builtin_function ("__builtin_copysignq", ftype,
608 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
609 "_U_Qfcopysign", NULL_TREE);
610 TREE_READONLY (decl) = 1;
611 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
613 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
614 decl = add_builtin_function ("__builtin_infq", ftype,
615 PA_BUILTIN_INFQ, BUILT_IN_MD,
616 NULL, NULL_TREE);
617 pa_builtins[PA_BUILTIN_INFQ] = decl;
619 decl = add_builtin_function ("__builtin_huge_valq", ftype,
620 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
621 NULL, NULL_TREE);
622 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
626 static rtx
627 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
628 enum machine_mode mode ATTRIBUTE_UNUSED,
629 int ignore ATTRIBUTE_UNUSED)
631 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
632 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
634 switch (fcode)
636 case PA_BUILTIN_FABSQ:
637 case PA_BUILTIN_COPYSIGNQ:
638 return expand_call (exp, target, ignore);
640 case PA_BUILTIN_INFQ:
641 case PA_BUILTIN_HUGE_VALQ:
643 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
644 REAL_VALUE_TYPE inf;
645 rtx tmp;
647 real_inf (&inf);
648 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
650 tmp = validize_mem (force_const_mem (target_mode, tmp));
652 if (target == 0)
653 target = gen_reg_rtx (target_mode);
655 emit_move_insn (target, tmp);
656 return target;
659 default:
660 gcc_unreachable ();
663 return NULL_RTX;
666 /* Function to init struct machine_function.
667 This will be called, via a pointer variable,
668 from push_function_context. */
670 static struct machine_function *
671 pa_init_machine_status (void)
673 return ggc_cleared_alloc<machine_function> ();
676 /* If FROM is a probable pointer register, mark TO as a probable
677 pointer register with the same pointer alignment as FROM. */
679 static void
680 copy_reg_pointer (rtx to, rtx from)
682 if (REG_POINTER (from))
683 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
686 /* Return 1 if X contains a symbolic expression. We know these
687 expressions will have one of a few well defined forms, so
688 we need only check those forms. */
690 pa_symbolic_expression_p (rtx x)
693 /* Strip off any HIGH. */
694 if (GET_CODE (x) == HIGH)
695 x = XEXP (x, 0);
697 return symbolic_operand (x, VOIDmode);
700 /* Accept any constant that can be moved in one instruction into a
701 general register. */
703 pa_cint_ok_for_move (HOST_WIDE_INT ival)
705 /* OK if ldo, ldil, or zdepi, can be used. */
706 return (VAL_14_BITS_P (ival)
707 || pa_ldil_cint_p (ival)
708 || pa_zdepi_cint_p (ival));
711 /* True iff ldil can be used to load this CONST_INT. The least
712 significant 11 bits of the value must be zero and the value must
713 not change sign when extended from 32 to 64 bits. */
715 pa_ldil_cint_p (HOST_WIDE_INT ival)
717 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
719 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
722 /* True iff zdepi can be used to generate this CONST_INT.
723 zdepi first sign extends a 5-bit signed number to a given field
724 length, then places this field anywhere in a zero. */
726 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
728 unsigned HOST_WIDE_INT lsb_mask, t;
730 /* This might not be obvious, but it's at least fast.
731 This function is critical; we don't have the time loops would take. */
732 lsb_mask = x & -x;
733 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
734 /* Return true iff t is a power of two. */
735 return ((t & (t - 1)) == 0);
738 /* True iff depi or extru can be used to compute (reg & mask).
739 Accept bit pattern like these:
740 0....01....1
741 1....10....0
742 1..10..01..1 */
744 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
746 mask = ~mask;
747 mask += mask & -mask;
748 return (mask & (mask - 1)) == 0;
751 /* True iff depi can be used to compute (reg | MASK). */
753 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
755 mask += mask & -mask;
756 return (mask & (mask - 1)) == 0;
759 /* Legitimize PIC addresses. If the address is already
760 position-independent, we return ORIG. Newly generated
761 position-independent addresses go to REG. If we need more
762 than one register, we lose. */
764 static rtx
765 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
767 rtx pic_ref = orig;
769 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
771 /* Labels need special handling. */
772 if (pic_label_operand (orig, mode))
774 rtx insn;
776 /* We do not want to go through the movXX expanders here since that
777 would create recursion.
779 Nor do we really want to call a generator for a named pattern
780 since that requires multiple patterns if we want to support
781 multiple word sizes.
783 So instead we just emit the raw set, which avoids the movXX
784 expanders completely. */
785 mark_reg_pointer (reg, BITS_PER_UNIT);
786 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
788 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
789 add_reg_note (insn, REG_EQUAL, orig);
791 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
792 and update LABEL_NUSES because this is not done automatically. */
793 if (reload_in_progress || reload_completed)
795 /* Extract LABEL_REF. */
796 if (GET_CODE (orig) == CONST)
797 orig = XEXP (XEXP (orig, 0), 0);
798 /* Extract CODE_LABEL. */
799 orig = XEXP (orig, 0);
800 add_reg_note (insn, REG_LABEL_OPERAND, orig);
801 /* Make sure we have label and not a note. */
802 if (LABEL_P (orig))
803 LABEL_NUSES (orig)++;
805 crtl->uses_pic_offset_table = 1;
806 return reg;
808 if (GET_CODE (orig) == SYMBOL_REF)
810 rtx insn, tmp_reg;
812 gcc_assert (reg);
814 /* Before reload, allocate a temporary register for the intermediate
815 result. This allows the sequence to be deleted when the final
816 result is unused and the insns are trivially dead. */
817 tmp_reg = ((reload_in_progress || reload_completed)
818 ? reg : gen_reg_rtx (Pmode));
820 if (function_label_operand (orig, VOIDmode))
822 /* Force function label into memory in word mode. */
823 orig = XEXP (force_const_mem (word_mode, orig), 0);
824 /* Load plabel address from DLT. */
825 emit_move_insn (tmp_reg,
826 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
827 gen_rtx_HIGH (word_mode, orig)));
828 pic_ref
829 = gen_const_mem (Pmode,
830 gen_rtx_LO_SUM (Pmode, tmp_reg,
831 gen_rtx_UNSPEC (Pmode,
832 gen_rtvec (1, orig),
833 UNSPEC_DLTIND14R)));
834 emit_move_insn (reg, pic_ref);
835 /* Now load address of function descriptor. */
836 pic_ref = gen_rtx_MEM (Pmode, reg);
838 else
840 /* Load symbol reference from DLT. */
841 emit_move_insn (tmp_reg,
842 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
843 gen_rtx_HIGH (word_mode, orig)));
844 pic_ref
845 = gen_const_mem (Pmode,
846 gen_rtx_LO_SUM (Pmode, tmp_reg,
847 gen_rtx_UNSPEC (Pmode,
848 gen_rtvec (1, orig),
849 UNSPEC_DLTIND14R)));
852 crtl->uses_pic_offset_table = 1;
853 mark_reg_pointer (reg, BITS_PER_UNIT);
854 insn = emit_move_insn (reg, pic_ref);
856 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
857 set_unique_reg_note (insn, REG_EQUAL, orig);
859 return reg;
861 else if (GET_CODE (orig) == CONST)
863 rtx base;
865 if (GET_CODE (XEXP (orig, 0)) == PLUS
866 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
867 return orig;
869 gcc_assert (reg);
870 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
872 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
873 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
874 base == reg ? 0 : reg);
876 if (GET_CODE (orig) == CONST_INT)
878 if (INT_14_BITS (orig))
879 return plus_constant (Pmode, base, INTVAL (orig));
880 orig = force_reg (Pmode, orig);
882 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
883 /* Likewise, should we set special REG_NOTEs here? */
886 return pic_ref;
889 static GTY(()) rtx gen_tls_tga;
891 static rtx
892 gen_tls_get_addr (void)
894 if (!gen_tls_tga)
895 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
896 return gen_tls_tga;
899 static rtx
900 hppa_tls_call (rtx arg)
902 rtx ret;
904 ret = gen_reg_rtx (Pmode);
905 emit_library_call_value (gen_tls_get_addr (), ret,
906 LCT_CONST, Pmode, 1, arg, Pmode);
908 return ret;
911 static rtx
912 legitimize_tls_address (rtx addr)
914 rtx ret, insn, tmp, t1, t2, tp;
916 /* Currently, we can't handle anything but a SYMBOL_REF. */
917 if (GET_CODE (addr) != SYMBOL_REF)
918 return addr;
920 switch (SYMBOL_REF_TLS_MODEL (addr))
922 case TLS_MODEL_GLOBAL_DYNAMIC:
923 tmp = gen_reg_rtx (Pmode);
924 if (flag_pic)
925 emit_insn (gen_tgd_load_pic (tmp, addr));
926 else
927 emit_insn (gen_tgd_load (tmp, addr));
928 ret = hppa_tls_call (tmp);
929 break;
931 case TLS_MODEL_LOCAL_DYNAMIC:
932 ret = gen_reg_rtx (Pmode);
933 tmp = gen_reg_rtx (Pmode);
934 start_sequence ();
935 if (flag_pic)
936 emit_insn (gen_tld_load_pic (tmp, addr));
937 else
938 emit_insn (gen_tld_load (tmp, addr));
939 t1 = hppa_tls_call (tmp);
940 insn = get_insns ();
941 end_sequence ();
942 t2 = gen_reg_rtx (Pmode);
943 emit_libcall_block (insn, t2, t1,
944 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
945 UNSPEC_TLSLDBASE));
946 emit_insn (gen_tld_offset_load (ret, addr, t2));
947 break;
949 case TLS_MODEL_INITIAL_EXEC:
950 tp = gen_reg_rtx (Pmode);
951 tmp = gen_reg_rtx (Pmode);
952 ret = gen_reg_rtx (Pmode);
953 emit_insn (gen_tp_load (tp));
954 if (flag_pic)
955 emit_insn (gen_tie_load_pic (tmp, addr));
956 else
957 emit_insn (gen_tie_load (tmp, addr));
958 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
959 break;
961 case TLS_MODEL_LOCAL_EXEC:
962 tp = gen_reg_rtx (Pmode);
963 ret = gen_reg_rtx (Pmode);
964 emit_insn (gen_tp_load (tp));
965 emit_insn (gen_tle_load (ret, addr, tp));
966 break;
968 default:
969 gcc_unreachable ();
972 return ret;
975 /* Try machine-dependent ways of modifying an illegitimate address
976 to be legitimate. If we find one, return the new, valid address.
977 This macro is used in only one place: `memory_address' in explow.c.
979 OLDX is the address as it was before break_out_memory_refs was called.
980 In some cases it is useful to look at this to decide what needs to be done.
982 It is always safe for this macro to do nothing. It exists to recognize
983 opportunities to optimize the output.
985 For the PA, transform:
987 memory(X + <large int>)
989 into:
991 if (<large int> & mask) >= 16
992 Y = (<large int> & ~mask) + mask + 1 Round up.
993 else
994 Y = (<large int> & ~mask) Round down.
995 Z = X + Y
996 memory (Z + (<large int> - Y));
998 This is for CSE to find several similar references, and only use one Z.
1000 X can either be a SYMBOL_REF or REG, but because combine cannot
1001 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1002 D will not fit in 14 bits.
1004 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1005 0x1f as the mask.
1007 MODE_INT references allow displacements which fit in 14 bits, so use
1008 0x3fff as the mask.
1010 This relies on the fact that most mode MODE_FLOAT references will use FP
1011 registers and most mode MODE_INT references will use integer registers.
1012 (In the rare case of an FP register used in an integer MODE, we depend
1013 on secondary reloads to clean things up.)
1016 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1017 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1018 addressing modes to be used).
1020 Put X and Z into registers. Then put the entire expression into
1021 a register. */
1024 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1025 enum machine_mode mode)
1027 rtx orig = x;
1029 /* We need to canonicalize the order of operands in unscaled indexed
1030 addresses since the code that checks if an address is valid doesn't
1031 always try both orders. */
1032 if (!TARGET_NO_SPACE_REGS
1033 && GET_CODE (x) == PLUS
1034 && GET_MODE (x) == Pmode
1035 && REG_P (XEXP (x, 0))
1036 && REG_P (XEXP (x, 1))
1037 && REG_POINTER (XEXP (x, 0))
1038 && !REG_POINTER (XEXP (x, 1)))
1039 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1041 if (tls_referenced_p (x))
1042 return legitimize_tls_address (x);
1043 else if (flag_pic)
1044 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1046 /* Strip off CONST. */
1047 if (GET_CODE (x) == CONST)
1048 x = XEXP (x, 0);
1050 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1051 That should always be safe. */
1052 if (GET_CODE (x) == PLUS
1053 && GET_CODE (XEXP (x, 0)) == REG
1054 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1056 rtx reg = force_reg (Pmode, XEXP (x, 1));
1057 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1060 /* Note we must reject symbols which represent function addresses
1061 since the assembler/linker can't handle arithmetic on plabels. */
1062 if (GET_CODE (x) == PLUS
1063 && GET_CODE (XEXP (x, 1)) == CONST_INT
1064 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1065 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1066 || GET_CODE (XEXP (x, 0)) == REG))
1068 rtx int_part, ptr_reg;
1069 int newoffset;
1070 int offset = INTVAL (XEXP (x, 1));
1071 int mask;
1073 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1074 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1076 /* Choose which way to round the offset. Round up if we
1077 are >= halfway to the next boundary. */
1078 if ((offset & mask) >= ((mask + 1) / 2))
1079 newoffset = (offset & ~ mask) + mask + 1;
1080 else
1081 newoffset = (offset & ~ mask);
1083 /* If the newoffset will not fit in 14 bits (ldo), then
1084 handling this would take 4 or 5 instructions (2 to load
1085 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1086 add the new offset and the SYMBOL_REF.) Combine can
1087 not handle 4->2 or 5->2 combinations, so do not create
1088 them. */
1089 if (! VAL_14_BITS_P (newoffset)
1090 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1092 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1093 rtx tmp_reg
1094 = force_reg (Pmode,
1095 gen_rtx_HIGH (Pmode, const_part));
1096 ptr_reg
1097 = force_reg (Pmode,
1098 gen_rtx_LO_SUM (Pmode,
1099 tmp_reg, const_part));
1101 else
1103 if (! VAL_14_BITS_P (newoffset))
1104 int_part = force_reg (Pmode, GEN_INT (newoffset));
1105 else
1106 int_part = GEN_INT (newoffset);
1108 ptr_reg = force_reg (Pmode,
1109 gen_rtx_PLUS (Pmode,
1110 force_reg (Pmode, XEXP (x, 0)),
1111 int_part));
1113 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1116 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1118 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1119 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1120 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1121 && (OBJECT_P (XEXP (x, 1))
1122 || GET_CODE (XEXP (x, 1)) == SUBREG)
1123 && GET_CODE (XEXP (x, 1)) != CONST)
1125 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1126 rtx reg1, reg2;
1128 reg1 = XEXP (x, 1);
1129 if (GET_CODE (reg1) != REG)
1130 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1132 reg2 = XEXP (XEXP (x, 0), 0);
1133 if (GET_CODE (reg2) != REG)
1134 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1136 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1137 gen_rtx_MULT (Pmode,
1138 reg2,
1139 GEN_INT (val)),
1140 reg1));
1143 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1145 Only do so for floating point modes since this is more speculative
1146 and we lose if it's an integer store. */
1147 if (GET_CODE (x) == PLUS
1148 && GET_CODE (XEXP (x, 0)) == PLUS
1149 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1150 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1151 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1152 && (mode == SFmode || mode == DFmode))
1155 /* First, try and figure out what to use as a base register. */
1156 rtx reg1, reg2, base, idx;
1158 reg1 = XEXP (XEXP (x, 0), 1);
1159 reg2 = XEXP (x, 1);
1160 base = NULL_RTX;
1161 idx = NULL_RTX;
1163 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1164 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1165 it's a base register below. */
1166 if (GET_CODE (reg1) != REG)
1167 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1169 if (GET_CODE (reg2) != REG)
1170 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1172 /* Figure out what the base and index are. */
1174 if (GET_CODE (reg1) == REG
1175 && REG_POINTER (reg1))
1177 base = reg1;
1178 idx = gen_rtx_PLUS (Pmode,
1179 gen_rtx_MULT (Pmode,
1180 XEXP (XEXP (XEXP (x, 0), 0), 0),
1181 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1182 XEXP (x, 1));
1184 else if (GET_CODE (reg2) == REG
1185 && REG_POINTER (reg2))
1187 base = reg2;
1188 idx = XEXP (x, 0);
1191 if (base == 0)
1192 return orig;
1194 /* If the index adds a large constant, try to scale the
1195 constant so that it can be loaded with only one insn. */
1196 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1197 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1198 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1199 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1201 /* Divide the CONST_INT by the scale factor, then add it to A. */
1202 int val = INTVAL (XEXP (idx, 1));
1204 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1205 reg1 = XEXP (XEXP (idx, 0), 0);
1206 if (GET_CODE (reg1) != REG)
1207 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1209 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1211 /* We can now generate a simple scaled indexed address. */
1212 return
1213 force_reg
1214 (Pmode, gen_rtx_PLUS (Pmode,
1215 gen_rtx_MULT (Pmode, reg1,
1216 XEXP (XEXP (idx, 0), 1)),
1217 base));
1220 /* If B + C is still a valid base register, then add them. */
1221 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1222 && INTVAL (XEXP (idx, 1)) <= 4096
1223 && INTVAL (XEXP (idx, 1)) >= -4096)
1225 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1226 rtx reg1, reg2;
1228 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1230 reg2 = XEXP (XEXP (idx, 0), 0);
1231 if (GET_CODE (reg2) != CONST_INT)
1232 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1234 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1235 gen_rtx_MULT (Pmode,
1236 reg2,
1237 GEN_INT (val)),
1238 reg1));
1241 /* Get the index into a register, then add the base + index and
1242 return a register holding the result. */
1244 /* First get A into a register. */
1245 reg1 = XEXP (XEXP (idx, 0), 0);
1246 if (GET_CODE (reg1) != REG)
1247 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1249 /* And get B into a register. */
1250 reg2 = XEXP (idx, 1);
1251 if (GET_CODE (reg2) != REG)
1252 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1254 reg1 = force_reg (Pmode,
1255 gen_rtx_PLUS (Pmode,
1256 gen_rtx_MULT (Pmode, reg1,
1257 XEXP (XEXP (idx, 0), 1)),
1258 reg2));
1260 /* Add the result to our base register and return. */
1261 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1265 /* Uh-oh. We might have an address for x[n-100000]. This needs
1266 special handling to avoid creating an indexed memory address
1267 with x-100000 as the base.
1269 If the constant part is small enough, then it's still safe because
1270 there is a guard page at the beginning and end of the data segment.
1272 Scaled references are common enough that we want to try and rearrange the
1273 terms so that we can use indexing for these addresses too. Only
1274 do the optimization for floatint point modes. */
1276 if (GET_CODE (x) == PLUS
1277 && pa_symbolic_expression_p (XEXP (x, 1)))
1279 /* Ugly. We modify things here so that the address offset specified
1280 by the index expression is computed first, then added to x to form
1281 the entire address. */
1283 rtx regx1, regx2, regy1, regy2, y;
1285 /* Strip off any CONST. */
1286 y = XEXP (x, 1);
1287 if (GET_CODE (y) == CONST)
1288 y = XEXP (y, 0);
1290 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1292 /* See if this looks like
1293 (plus (mult (reg) (shadd_const))
1294 (const (plus (symbol_ref) (const_int))))
1296 Where const_int is small. In that case the const
1297 expression is a valid pointer for indexing.
1299 If const_int is big, but can be divided evenly by shadd_const
1300 and added to (reg). This allows more scaled indexed addresses. */
1301 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1302 && GET_CODE (XEXP (x, 0)) == MULT
1303 && GET_CODE (XEXP (y, 1)) == CONST_INT
1304 && INTVAL (XEXP (y, 1)) >= -4096
1305 && INTVAL (XEXP (y, 1)) <= 4095
1306 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1307 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1309 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1310 rtx reg1, reg2;
1312 reg1 = XEXP (x, 1);
1313 if (GET_CODE (reg1) != REG)
1314 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1316 reg2 = XEXP (XEXP (x, 0), 0);
1317 if (GET_CODE (reg2) != REG)
1318 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1320 return force_reg (Pmode,
1321 gen_rtx_PLUS (Pmode,
1322 gen_rtx_MULT (Pmode,
1323 reg2,
1324 GEN_INT (val)),
1325 reg1));
1327 else if ((mode == DFmode || mode == SFmode)
1328 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1329 && GET_CODE (XEXP (x, 0)) == MULT
1330 && GET_CODE (XEXP (y, 1)) == CONST_INT
1331 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1332 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1333 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1335 regx1
1336 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1337 / INTVAL (XEXP (XEXP (x, 0), 1))));
1338 regx2 = XEXP (XEXP (x, 0), 0);
1339 if (GET_CODE (regx2) != REG)
1340 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1341 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1342 regx2, regx1));
1343 return
1344 force_reg (Pmode,
1345 gen_rtx_PLUS (Pmode,
1346 gen_rtx_MULT (Pmode, regx2,
1347 XEXP (XEXP (x, 0), 1)),
1348 force_reg (Pmode, XEXP (y, 0))));
1350 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1351 && INTVAL (XEXP (y, 1)) >= -4096
1352 && INTVAL (XEXP (y, 1)) <= 4095)
1354 /* This is safe because of the guard page at the
1355 beginning and end of the data space. Just
1356 return the original address. */
1357 return orig;
1359 else
1361 /* Doesn't look like one we can optimize. */
1362 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1363 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1364 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1365 regx1 = force_reg (Pmode,
1366 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1367 regx1, regy2));
1368 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1373 return orig;
1376 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1378 Compute extra cost of moving data between one register class
1379 and another.
1381 Make moves from SAR so expensive they should never happen. We used to
1382 have 0xffff here, but that generates overflow in rare cases.
1384 Copies involving a FP register and a non-FP register are relatively
1385 expensive because they must go through memory.
1387 Other copies are reasonably cheap. */
1389 static int
1390 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1391 reg_class_t from, reg_class_t to)
1393 if (from == SHIFT_REGS)
1394 return 0x100;
1395 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1396 return 18;
1397 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1398 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1399 return 16;
1400 else
1401 return 2;
1404 /* For the HPPA, REG and REG+CONST is cost 0
1405 and addresses involving symbolic constants are cost 2.
1407 PIC addresses are very expensive.
1409 It is no coincidence that this has the same structure
1410 as pa_legitimate_address_p. */
1412 static int
1413 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1414 addr_space_t as ATTRIBUTE_UNUSED,
1415 bool speed ATTRIBUTE_UNUSED)
1417 switch (GET_CODE (X))
1419 case REG:
1420 case PLUS:
1421 case LO_SUM:
1422 return 1;
1423 case HIGH:
1424 return 2;
1425 default:
1426 return 4;
1430 /* Compute a (partial) cost for rtx X. Return true if the complete
1431 cost has been computed, and false if subexpressions should be
1432 scanned. In either case, *TOTAL contains the cost result. */
1434 static bool
1435 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1436 int *total, bool speed ATTRIBUTE_UNUSED)
1438 int factor;
1440 switch (code)
1442 case CONST_INT:
1443 if (INTVAL (x) == 0)
1444 *total = 0;
1445 else if (INT_14_BITS (x))
1446 *total = 1;
1447 else
1448 *total = 2;
1449 return true;
1451 case HIGH:
1452 *total = 2;
1453 return true;
1455 case CONST:
1456 case LABEL_REF:
1457 case SYMBOL_REF:
1458 *total = 4;
1459 return true;
1461 case CONST_DOUBLE:
1462 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1463 && outer_code != SET)
1464 *total = 0;
1465 else
1466 *total = 8;
1467 return true;
1469 case MULT:
1470 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1472 *total = COSTS_N_INSNS (3);
1473 return true;
1476 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1477 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1478 if (factor == 0)
1479 factor = 1;
1481 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1482 *total = factor * factor * COSTS_N_INSNS (8);
1483 else
1484 *total = factor * factor * COSTS_N_INSNS (20);
1485 return true;
1487 case DIV:
1488 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1490 *total = COSTS_N_INSNS (14);
1491 return true;
1493 /* FALLTHRU */
1495 case UDIV:
1496 case MOD:
1497 case UMOD:
1498 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1499 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1500 if (factor == 0)
1501 factor = 1;
1503 *total = factor * factor * COSTS_N_INSNS (60);
1504 return true;
1506 case PLUS: /* this includes shNadd insns */
1507 case MINUS:
1508 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1510 *total = COSTS_N_INSNS (3);
1511 return true;
1514 /* A size N times larger than UNITS_PER_WORD needs N times as
1515 many insns, taking N times as long. */
1516 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1517 if (factor == 0)
1518 factor = 1;
1519 *total = factor * COSTS_N_INSNS (1);
1520 return true;
1522 case ASHIFT:
1523 case ASHIFTRT:
1524 case LSHIFTRT:
1525 *total = COSTS_N_INSNS (1);
1526 return true;
1528 default:
1529 return false;
1533 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1534 new rtx with the correct mode. */
1535 static inline rtx
1536 force_mode (enum machine_mode mode, rtx orig)
1538 if (mode == GET_MODE (orig))
1539 return orig;
1541 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1543 return gen_rtx_REG (mode, REGNO (orig));
1546 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1548 static bool
1549 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1551 return tls_referenced_p (x);
1554 /* Emit insns to move operands[1] into operands[0].
1556 Return 1 if we have written out everything that needs to be done to
1557 do the move. Otherwise, return 0 and the caller will emit the move
1558 normally.
1560 Note SCRATCH_REG may not be in the proper mode depending on how it
1561 will be used. This routine is responsible for creating a new copy
1562 of SCRATCH_REG in the proper mode. */
1565 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1567 register rtx operand0 = operands[0];
1568 register rtx operand1 = operands[1];
1569 register rtx tem;
1571 /* We can only handle indexed addresses in the destination operand
1572 of floating point stores. Thus, we need to break out indexed
1573 addresses from the destination operand. */
1574 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1576 gcc_assert (can_create_pseudo_p ());
1578 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1579 operand0 = replace_equiv_address (operand0, tem);
1582 /* On targets with non-equivalent space registers, break out unscaled
1583 indexed addresses from the source operand before the final CSE.
1584 We have to do this because the REG_POINTER flag is not correctly
1585 carried through various optimization passes and CSE may substitute
1586 a pseudo without the pointer set for one with the pointer set. As
1587 a result, we loose various opportunities to create insns with
1588 unscaled indexed addresses. */
1589 if (!TARGET_NO_SPACE_REGS
1590 && !cse_not_expected
1591 && GET_CODE (operand1) == MEM
1592 && GET_CODE (XEXP (operand1, 0)) == PLUS
1593 && REG_P (XEXP (XEXP (operand1, 0), 0))
1594 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1595 operand1
1596 = replace_equiv_address (operand1,
1597 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1599 if (scratch_reg
1600 && reload_in_progress && GET_CODE (operand0) == REG
1601 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1602 operand0 = reg_equiv_mem (REGNO (operand0));
1603 else if (scratch_reg
1604 && reload_in_progress && GET_CODE (operand0) == SUBREG
1605 && GET_CODE (SUBREG_REG (operand0)) == REG
1606 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1608 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1609 the code which tracks sets/uses for delete_output_reload. */
1610 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1611 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1612 SUBREG_BYTE (operand0));
1613 operand0 = alter_subreg (&temp, true);
1616 if (scratch_reg
1617 && reload_in_progress && GET_CODE (operand1) == REG
1618 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1619 operand1 = reg_equiv_mem (REGNO (operand1));
1620 else if (scratch_reg
1621 && reload_in_progress && GET_CODE (operand1) == SUBREG
1622 && GET_CODE (SUBREG_REG (operand1)) == REG
1623 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1625 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1626 the code which tracks sets/uses for delete_output_reload. */
1627 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1628 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1629 SUBREG_BYTE (operand1));
1630 operand1 = alter_subreg (&temp, true);
1633 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1634 && ((tem = find_replacement (&XEXP (operand0, 0)))
1635 != XEXP (operand0, 0)))
1636 operand0 = replace_equiv_address (operand0, tem);
1638 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1639 && ((tem = find_replacement (&XEXP (operand1, 0)))
1640 != XEXP (operand1, 0)))
1641 operand1 = replace_equiv_address (operand1, tem);
1643 /* Handle secondary reloads for loads/stores of FP registers from
1644 REG+D addresses where D does not fit in 5 or 14 bits, including
1645 (subreg (mem (addr))) cases. */
1646 if (scratch_reg
1647 && fp_reg_operand (operand0, mode)
1648 && (MEM_P (operand1)
1649 || (GET_CODE (operand1) == SUBREG
1650 && MEM_P (XEXP (operand1, 0))))
1651 && !floating_point_store_memory_operand (operand1, mode))
1653 if (GET_CODE (operand1) == SUBREG)
1654 operand1 = XEXP (operand1, 0);
1656 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1657 it in WORD_MODE regardless of what mode it was originally given
1658 to us. */
1659 scratch_reg = force_mode (word_mode, scratch_reg);
1661 /* D might not fit in 14 bits either; for such cases load D into
1662 scratch reg. */
1663 if (reg_plus_base_memory_operand (operand1, mode)
1664 && !(TARGET_PA_20
1665 && !TARGET_ELF32
1666 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1668 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1669 emit_move_insn (scratch_reg,
1670 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1671 Pmode,
1672 XEXP (XEXP (operand1, 0), 0),
1673 scratch_reg));
1675 else
1676 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1677 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1678 replace_equiv_address (operand1, scratch_reg)));
1679 return 1;
1681 else if (scratch_reg
1682 && fp_reg_operand (operand1, mode)
1683 && (MEM_P (operand0)
1684 || (GET_CODE (operand0) == SUBREG
1685 && MEM_P (XEXP (operand0, 0))))
1686 && !floating_point_store_memory_operand (operand0, mode))
1688 if (GET_CODE (operand0) == SUBREG)
1689 operand0 = XEXP (operand0, 0);
1691 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1692 it in WORD_MODE regardless of what mode it was originally given
1693 to us. */
1694 scratch_reg = force_mode (word_mode, scratch_reg);
1696 /* D might not fit in 14 bits either; for such cases load D into
1697 scratch reg. */
1698 if (reg_plus_base_memory_operand (operand0, mode)
1699 && !(TARGET_PA_20
1700 && !TARGET_ELF32
1701 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1703 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1704 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1705 0)),
1706 Pmode,
1707 XEXP (XEXP (operand0, 0),
1709 scratch_reg));
1711 else
1712 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1713 emit_insn (gen_rtx_SET (VOIDmode,
1714 replace_equiv_address (operand0, scratch_reg),
1715 operand1));
1716 return 1;
1718 /* Handle secondary reloads for loads of FP registers from constant
1719 expressions by forcing the constant into memory. For the most part,
1720 this is only necessary for SImode and DImode.
1722 Use scratch_reg to hold the address of the memory location. */
1723 else if (scratch_reg
1724 && CONSTANT_P (operand1)
1725 && fp_reg_operand (operand0, mode))
1727 rtx const_mem, xoperands[2];
1729 if (operand1 == CONST0_RTX (mode))
1731 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1732 return 1;
1735 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1736 it in WORD_MODE regardless of what mode it was originally given
1737 to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1740 /* Force the constant into memory and put the address of the
1741 memory location into scratch_reg. */
1742 const_mem = force_const_mem (mode, operand1);
1743 xoperands[0] = scratch_reg;
1744 xoperands[1] = XEXP (const_mem, 0);
1745 pa_emit_move_sequence (xoperands, Pmode, 0);
1747 /* Now load the destination register. */
1748 emit_insn (gen_rtx_SET (mode, operand0,
1749 replace_equiv_address (const_mem, scratch_reg)));
1750 return 1;
1752 /* Handle secondary reloads for SAR. These occur when trying to load
1753 the SAR from memory or a constant. */
1754 else if (scratch_reg
1755 && GET_CODE (operand0) == REG
1756 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1757 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1758 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1760 /* D might not fit in 14 bits either; for such cases load D into
1761 scratch reg. */
1762 if (GET_CODE (operand1) == MEM
1763 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1765 /* We are reloading the address into the scratch register, so we
1766 want to make sure the scratch register is a full register. */
1767 scratch_reg = force_mode (word_mode, scratch_reg);
1769 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1770 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1771 0)),
1772 Pmode,
1773 XEXP (XEXP (operand1, 0),
1775 scratch_reg));
1777 /* Now we are going to load the scratch register from memory,
1778 we want to load it in the same width as the original MEM,
1779 which must be the same as the width of the ultimate destination,
1780 OPERAND0. */
1781 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1783 emit_move_insn (scratch_reg,
1784 replace_equiv_address (operand1, scratch_reg));
1786 else
1788 /* We want to load the scratch register using the same mode as
1789 the ultimate destination. */
1790 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1792 emit_move_insn (scratch_reg, operand1);
1795 /* And emit the insn to set the ultimate destination. We know that
1796 the scratch register has the same mode as the destination at this
1797 point. */
1798 emit_move_insn (operand0, scratch_reg);
1799 return 1;
1801 /* Handle the most common case: storing into a register. */
1802 else if (register_operand (operand0, mode))
1804 /* Legitimize TLS symbol references. This happens for references
1805 that aren't a legitimate constant. */
1806 if (PA_SYMBOL_REF_TLS_P (operand1))
1807 operand1 = legitimize_tls_address (operand1);
1809 if (register_operand (operand1, mode)
1810 || (GET_CODE (operand1) == CONST_INT
1811 && pa_cint_ok_for_move (INTVAL (operand1)))
1812 || (operand1 == CONST0_RTX (mode))
1813 || (GET_CODE (operand1) == HIGH
1814 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1815 /* Only `general_operands' can come here, so MEM is ok. */
1816 || GET_CODE (operand1) == MEM)
1818 /* Various sets are created during RTL generation which don't
1819 have the REG_POINTER flag correctly set. After the CSE pass,
1820 instruction recognition can fail if we don't consistently
1821 set this flag when performing register copies. This should
1822 also improve the opportunities for creating insns that use
1823 unscaled indexing. */
1824 if (REG_P (operand0) && REG_P (operand1))
1826 if (REG_POINTER (operand1)
1827 && !REG_POINTER (operand0)
1828 && !HARD_REGISTER_P (operand0))
1829 copy_reg_pointer (operand0, operand1);
1832 /* When MEMs are broken out, the REG_POINTER flag doesn't
1833 get set. In some cases, we can set the REG_POINTER flag
1834 from the declaration for the MEM. */
1835 if (REG_P (operand0)
1836 && GET_CODE (operand1) == MEM
1837 && !REG_POINTER (operand0))
1839 tree decl = MEM_EXPR (operand1);
1841 /* Set the register pointer flag and register alignment
1842 if the declaration for this memory reference is a
1843 pointer type. */
1844 if (decl)
1846 tree type;
1848 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1849 tree operand 1. */
1850 if (TREE_CODE (decl) == COMPONENT_REF)
1851 decl = TREE_OPERAND (decl, 1);
1853 type = TREE_TYPE (decl);
1854 type = strip_array_types (type);
1856 if (POINTER_TYPE_P (type))
1858 int align;
1860 type = TREE_TYPE (type);
1861 /* Using TYPE_ALIGN_OK is rather conservative as
1862 only the ada frontend actually sets it. */
1863 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1864 : BITS_PER_UNIT);
1865 mark_reg_pointer (operand0, align);
1870 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1871 return 1;
1874 else if (GET_CODE (operand0) == MEM)
1876 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1877 && !(reload_in_progress || reload_completed))
1879 rtx temp = gen_reg_rtx (DFmode);
1881 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1882 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1883 return 1;
1885 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1887 /* Run this case quickly. */
1888 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1889 return 1;
1891 if (! (reload_in_progress || reload_completed))
1893 operands[0] = validize_mem (operand0);
1894 operands[1] = operand1 = force_reg (mode, operand1);
1898 /* Simplify the source if we need to.
1899 Note we do have to handle function labels here, even though we do
1900 not consider them legitimate constants. Loop optimizations can
1901 call the emit_move_xxx with one as a source. */
1902 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1903 || (GET_CODE (operand1) == HIGH
1904 && symbolic_operand (XEXP (operand1, 0), mode))
1905 || function_label_operand (operand1, VOIDmode)
1906 || tls_referenced_p (operand1))
1908 int ishighonly = 0;
1910 if (GET_CODE (operand1) == HIGH)
1912 ishighonly = 1;
1913 operand1 = XEXP (operand1, 0);
1915 if (symbolic_operand (operand1, mode))
1917 /* Argh. The assembler and linker can't handle arithmetic
1918 involving plabels.
1920 So we force the plabel into memory, load operand0 from
1921 the memory location, then add in the constant part. */
1922 if ((GET_CODE (operand1) == CONST
1923 && GET_CODE (XEXP (operand1, 0)) == PLUS
1924 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1925 VOIDmode))
1926 || function_label_operand (operand1, VOIDmode))
1928 rtx temp, const_part;
1930 /* Figure out what (if any) scratch register to use. */
1931 if (reload_in_progress || reload_completed)
1933 scratch_reg = scratch_reg ? scratch_reg : operand0;
1934 /* SCRATCH_REG will hold an address and maybe the actual
1935 data. We want it in WORD_MODE regardless of what mode it
1936 was originally given to us. */
1937 scratch_reg = force_mode (word_mode, scratch_reg);
1939 else if (flag_pic)
1940 scratch_reg = gen_reg_rtx (Pmode);
1942 if (GET_CODE (operand1) == CONST)
1944 /* Save away the constant part of the expression. */
1945 const_part = XEXP (XEXP (operand1, 0), 1);
1946 gcc_assert (GET_CODE (const_part) == CONST_INT);
1948 /* Force the function label into memory. */
1949 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1951 else
1953 /* No constant part. */
1954 const_part = NULL_RTX;
1956 /* Force the function label into memory. */
1957 temp = force_const_mem (mode, operand1);
1961 /* Get the address of the memory location. PIC-ify it if
1962 necessary. */
1963 temp = XEXP (temp, 0);
1964 if (flag_pic)
1965 temp = legitimize_pic_address (temp, mode, scratch_reg);
1967 /* Put the address of the memory location into our destination
1968 register. */
1969 operands[1] = temp;
1970 pa_emit_move_sequence (operands, mode, scratch_reg);
1972 /* Now load from the memory location into our destination
1973 register. */
1974 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1975 pa_emit_move_sequence (operands, mode, scratch_reg);
1977 /* And add back in the constant part. */
1978 if (const_part != NULL_RTX)
1979 expand_inc (operand0, const_part);
1981 return 1;
1984 if (flag_pic)
1986 rtx temp;
1988 if (reload_in_progress || reload_completed)
1990 temp = scratch_reg ? scratch_reg : operand0;
1991 /* TEMP will hold an address and maybe the actual
1992 data. We want it in WORD_MODE regardless of what mode it
1993 was originally given to us. */
1994 temp = force_mode (word_mode, temp);
1996 else
1997 temp = gen_reg_rtx (Pmode);
1999 /* (const (plus (symbol) (const_int))) must be forced to
2000 memory during/after reload if the const_int will not fit
2001 in 14 bits. */
2002 if (GET_CODE (operand1) == CONST
2003 && GET_CODE (XEXP (operand1, 0)) == PLUS
2004 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2005 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2006 && (reload_completed || reload_in_progress)
2007 && flag_pic)
2009 rtx const_mem = force_const_mem (mode, operand1);
2010 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2011 mode, temp);
2012 operands[1] = replace_equiv_address (const_mem, operands[1]);
2013 pa_emit_move_sequence (operands, mode, temp);
2015 else
2017 operands[1] = legitimize_pic_address (operand1, mode, temp);
2018 if (REG_P (operand0) && REG_P (operands[1]))
2019 copy_reg_pointer (operand0, operands[1]);
2020 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2023 /* On the HPPA, references to data space are supposed to use dp,
2024 register 27, but showing it in the RTL inhibits various cse
2025 and loop optimizations. */
2026 else
2028 rtx temp, set;
2030 if (reload_in_progress || reload_completed)
2032 temp = scratch_reg ? scratch_reg : operand0;
2033 /* TEMP will hold an address and maybe the actual
2034 data. We want it in WORD_MODE regardless of what mode it
2035 was originally given to us. */
2036 temp = force_mode (word_mode, temp);
2038 else
2039 temp = gen_reg_rtx (mode);
2041 /* Loading a SYMBOL_REF into a register makes that register
2042 safe to be used as the base in an indexed address.
2044 Don't mark hard registers though. That loses. */
2045 if (GET_CODE (operand0) == REG
2046 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2047 mark_reg_pointer (operand0, BITS_PER_UNIT);
2048 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2049 mark_reg_pointer (temp, BITS_PER_UNIT);
2051 if (ishighonly)
2052 set = gen_rtx_SET (mode, operand0, temp);
2053 else
2054 set = gen_rtx_SET (VOIDmode,
2055 operand0,
2056 gen_rtx_LO_SUM (mode, temp, operand1));
2058 emit_insn (gen_rtx_SET (VOIDmode,
2059 temp,
2060 gen_rtx_HIGH (mode, operand1)));
2061 emit_insn (set);
2064 return 1;
2066 else if (tls_referenced_p (operand1))
2068 rtx tmp = operand1;
2069 rtx addend = NULL;
2071 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2073 addend = XEXP (XEXP (tmp, 0), 1);
2074 tmp = XEXP (XEXP (tmp, 0), 0);
2077 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2078 tmp = legitimize_tls_address (tmp);
2079 if (addend)
2081 tmp = gen_rtx_PLUS (mode, tmp, addend);
2082 tmp = force_operand (tmp, operands[0]);
2084 operands[1] = tmp;
2086 else if (GET_CODE (operand1) != CONST_INT
2087 || !pa_cint_ok_for_move (INTVAL (operand1)))
2089 rtx insn, temp;
2090 rtx op1 = operand1;
2091 HOST_WIDE_INT value = 0;
2092 HOST_WIDE_INT insv = 0;
2093 int insert = 0;
2095 if (GET_CODE (operand1) == CONST_INT)
2096 value = INTVAL (operand1);
2098 if (TARGET_64BIT
2099 && GET_CODE (operand1) == CONST_INT
2100 && HOST_BITS_PER_WIDE_INT > 32
2101 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2103 HOST_WIDE_INT nval;
2105 /* Extract the low order 32 bits of the value and sign extend.
2106 If the new value is the same as the original value, we can
2107 can use the original value as-is. If the new value is
2108 different, we use it and insert the most-significant 32-bits
2109 of the original value into the final result. */
2110 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2111 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2112 if (value != nval)
2114 #if HOST_BITS_PER_WIDE_INT > 32
2115 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2116 #endif
2117 insert = 1;
2118 value = nval;
2119 operand1 = GEN_INT (nval);
2123 if (reload_in_progress || reload_completed)
2124 temp = scratch_reg ? scratch_reg : operand0;
2125 else
2126 temp = gen_reg_rtx (mode);
2128 /* We don't directly split DImode constants on 32-bit targets
2129 because PLUS uses an 11-bit immediate and the insn sequence
2130 generated is not as efficient as the one using HIGH/LO_SUM. */
2131 if (GET_CODE (operand1) == CONST_INT
2132 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2133 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2134 && !insert)
2136 /* Directly break constant into high and low parts. This
2137 provides better optimization opportunities because various
2138 passes recognize constants split with PLUS but not LO_SUM.
2139 We use a 14-bit signed low part except when the addition
2140 of 0x4000 to the high part might change the sign of the
2141 high part. */
2142 HOST_WIDE_INT low = value & 0x3fff;
2143 HOST_WIDE_INT high = value & ~ 0x3fff;
2145 if (low >= 0x2000)
2147 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2148 high += 0x2000;
2149 else
2150 high += 0x4000;
2153 low = value - high;
2155 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2156 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2158 else
2160 emit_insn (gen_rtx_SET (VOIDmode, temp,
2161 gen_rtx_HIGH (mode, operand1)));
2162 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2165 insn = emit_move_insn (operands[0], operands[1]);
2167 /* Now insert the most significant 32 bits of the value
2168 into the register. When we don't have a second register
2169 available, it could take up to nine instructions to load
2170 a 64-bit integer constant. Prior to reload, we force
2171 constants that would take more than three instructions
2172 to load to the constant pool. During and after reload,
2173 we have to handle all possible values. */
2174 if (insert)
2176 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2177 register and the value to be inserted is outside the
2178 range that can be loaded with three depdi instructions. */
2179 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2181 operand1 = GEN_INT (insv);
2183 emit_insn (gen_rtx_SET (VOIDmode, temp,
2184 gen_rtx_HIGH (mode, operand1)));
2185 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2186 if (mode == DImode)
2187 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2188 const0_rtx, temp));
2189 else
2190 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2191 const0_rtx, temp));
2193 else
2195 int len = 5, pos = 27;
2197 /* Insert the bits using the depdi instruction. */
2198 while (pos >= 0)
2200 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2201 HOST_WIDE_INT sign = v5 < 0;
2203 /* Left extend the insertion. */
2204 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2205 while (pos > 0 && (insv & 1) == sign)
2207 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2208 len += 1;
2209 pos -= 1;
2212 if (mode == DImode)
2213 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2214 GEN_INT (pos), GEN_INT (v5)));
2215 else
2216 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2217 GEN_INT (pos), GEN_INT (v5)));
2219 len = pos > 0 && pos < 5 ? pos : 5;
2220 pos -= len;
2225 set_unique_reg_note (insn, REG_EQUAL, op1);
2227 return 1;
2230 /* Now have insn-emit do whatever it normally does. */
2231 return 0;
2234 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2235 it will need a link/runtime reloc). */
2238 pa_reloc_needed (tree exp)
2240 int reloc = 0;
2242 switch (TREE_CODE (exp))
2244 case ADDR_EXPR:
2245 return 1;
2247 case POINTER_PLUS_EXPR:
2248 case PLUS_EXPR:
2249 case MINUS_EXPR:
2250 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2251 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2252 break;
2254 CASE_CONVERT:
2255 case NON_LVALUE_EXPR:
2256 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2257 break;
2259 case CONSTRUCTOR:
2261 tree value;
2262 unsigned HOST_WIDE_INT ix;
2264 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2265 if (value)
2266 reloc |= pa_reloc_needed (value);
2268 break;
2270 case ERROR_MARK:
2271 break;
2273 default:
2274 break;
2276 return reloc;
2280 /* Return the best assembler insn template
2281 for moving operands[1] into operands[0] as a fullword. */
2282 const char *
2283 pa_singlemove_string (rtx *operands)
2285 HOST_WIDE_INT intval;
2287 if (GET_CODE (operands[0]) == MEM)
2288 return "stw %r1,%0";
2289 if (GET_CODE (operands[1]) == MEM)
2290 return "ldw %1,%0";
2291 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2293 long i;
2294 REAL_VALUE_TYPE d;
2296 gcc_assert (GET_MODE (operands[1]) == SFmode);
2298 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2299 bit pattern. */
2300 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2301 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2303 operands[1] = GEN_INT (i);
2304 /* Fall through to CONST_INT case. */
2306 if (GET_CODE (operands[1]) == CONST_INT)
2308 intval = INTVAL (operands[1]);
2310 if (VAL_14_BITS_P (intval))
2311 return "ldi %1,%0";
2312 else if ((intval & 0x7ff) == 0)
2313 return "ldil L'%1,%0";
2314 else if (pa_zdepi_cint_p (intval))
2315 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2316 else
2317 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2319 return "copy %1,%0";
2323 /* Compute position (in OP[1]) and width (in OP[2])
2324 useful for copying IMM to a register using the zdepi
2325 instructions. Store the immediate value to insert in OP[0]. */
2326 static void
2327 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2329 int lsb, len;
2331 /* Find the least significant set bit in IMM. */
2332 for (lsb = 0; lsb < 32; lsb++)
2334 if ((imm & 1) != 0)
2335 break;
2336 imm >>= 1;
2339 /* Choose variants based on *sign* of the 5-bit field. */
2340 if ((imm & 0x10) == 0)
2341 len = (lsb <= 28) ? 4 : 32 - lsb;
2342 else
2344 /* Find the width of the bitstring in IMM. */
2345 for (len = 5; len < 32 - lsb; len++)
2347 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2348 break;
2351 /* Sign extend IMM as a 5-bit value. */
2352 imm = (imm & 0xf) - 0x10;
2355 op[0] = imm;
2356 op[1] = 31 - lsb;
2357 op[2] = len;
2360 /* Compute position (in OP[1]) and width (in OP[2])
2361 useful for copying IMM to a register using the depdi,z
2362 instructions. Store the immediate value to insert in OP[0]. */
2364 static void
2365 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2367 int lsb, len, maxlen;
2369 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2371 /* Find the least significant set bit in IMM. */
2372 for (lsb = 0; lsb < maxlen; lsb++)
2374 if ((imm & 1) != 0)
2375 break;
2376 imm >>= 1;
2379 /* Choose variants based on *sign* of the 5-bit field. */
2380 if ((imm & 0x10) == 0)
2381 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2382 else
2384 /* Find the width of the bitstring in IMM. */
2385 for (len = 5; len < maxlen - lsb; len++)
2387 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2388 break;
2391 /* Extend length if host is narrow and IMM is negative. */
2392 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2393 len += 32;
2395 /* Sign extend IMM as a 5-bit value. */
2396 imm = (imm & 0xf) - 0x10;
2399 op[0] = imm;
2400 op[1] = 63 - lsb;
2401 op[2] = len;
2404 /* Output assembler code to perform a doubleword move insn
2405 with operands OPERANDS. */
2407 const char *
2408 pa_output_move_double (rtx *operands)
2410 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2411 rtx latehalf[2];
2412 rtx addreg0 = 0, addreg1 = 0;
2414 /* First classify both operands. */
2416 if (REG_P (operands[0]))
2417 optype0 = REGOP;
2418 else if (offsettable_memref_p (operands[0]))
2419 optype0 = OFFSOP;
2420 else if (GET_CODE (operands[0]) == MEM)
2421 optype0 = MEMOP;
2422 else
2423 optype0 = RNDOP;
2425 if (REG_P (operands[1]))
2426 optype1 = REGOP;
2427 else if (CONSTANT_P (operands[1]))
2428 optype1 = CNSTOP;
2429 else if (offsettable_memref_p (operands[1]))
2430 optype1 = OFFSOP;
2431 else if (GET_CODE (operands[1]) == MEM)
2432 optype1 = MEMOP;
2433 else
2434 optype1 = RNDOP;
2436 /* Check for the cases that the operand constraints are not
2437 supposed to allow to happen. */
2438 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2440 /* Handle copies between general and floating registers. */
2442 if (optype0 == REGOP && optype1 == REGOP
2443 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2445 if (FP_REG_P (operands[0]))
2447 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2448 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2449 return "{fldds|fldd} -16(%%sp),%0";
2451 else
2453 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2454 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2455 return "{ldws|ldw} -12(%%sp),%R0";
2459 /* Handle auto decrementing and incrementing loads and stores
2460 specifically, since the structure of the function doesn't work
2461 for them without major modification. Do it better when we learn
2462 this port about the general inc/dec addressing of PA.
2463 (This was written by tege. Chide him if it doesn't work.) */
2465 if (optype0 == MEMOP)
2467 /* We have to output the address syntax ourselves, since print_operand
2468 doesn't deal with the addresses we want to use. Fix this later. */
2470 rtx addr = XEXP (operands[0], 0);
2471 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2473 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2475 operands[0] = XEXP (addr, 0);
2476 gcc_assert (GET_CODE (operands[1]) == REG
2477 && GET_CODE (operands[0]) == REG);
2479 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2481 /* No overlap between high target register and address
2482 register. (We do this in a non-obvious way to
2483 save a register file writeback) */
2484 if (GET_CODE (addr) == POST_INC)
2485 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2486 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2488 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2490 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2492 operands[0] = XEXP (addr, 0);
2493 gcc_assert (GET_CODE (operands[1]) == REG
2494 && GET_CODE (operands[0]) == REG);
2496 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2497 /* No overlap between high target register and address
2498 register. (We do this in a non-obvious way to save a
2499 register file writeback) */
2500 if (GET_CODE (addr) == PRE_INC)
2501 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2502 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2505 if (optype1 == MEMOP)
2507 /* We have to output the address syntax ourselves, since print_operand
2508 doesn't deal with the addresses we want to use. Fix this later. */
2510 rtx addr = XEXP (operands[1], 0);
2511 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2513 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2515 operands[1] = XEXP (addr, 0);
2516 gcc_assert (GET_CODE (operands[0]) == REG
2517 && GET_CODE (operands[1]) == REG);
2519 if (!reg_overlap_mentioned_p (high_reg, addr))
2521 /* No overlap between high target register and address
2522 register. (We do this in a non-obvious way to
2523 save a register file writeback) */
2524 if (GET_CODE (addr) == POST_INC)
2525 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2526 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2528 else
2530 /* This is an undefined situation. We should load into the
2531 address register *and* update that register. Probably
2532 we don't need to handle this at all. */
2533 if (GET_CODE (addr) == POST_INC)
2534 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2535 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2538 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2540 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2542 operands[1] = XEXP (addr, 0);
2543 gcc_assert (GET_CODE (operands[0]) == REG
2544 && GET_CODE (operands[1]) == REG);
2546 if (!reg_overlap_mentioned_p (high_reg, addr))
2548 /* No overlap between high target register and address
2549 register. (We do this in a non-obvious way to
2550 save a register file writeback) */
2551 if (GET_CODE (addr) == PRE_INC)
2552 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2553 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2555 else
2557 /* This is an undefined situation. We should load into the
2558 address register *and* update that register. Probably
2559 we don't need to handle this at all. */
2560 if (GET_CODE (addr) == PRE_INC)
2561 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2562 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2565 else if (GET_CODE (addr) == PLUS
2566 && GET_CODE (XEXP (addr, 0)) == MULT)
2568 rtx xoperands[4];
2569 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2571 if (!reg_overlap_mentioned_p (high_reg, addr))
2573 xoperands[0] = high_reg;
2574 xoperands[1] = XEXP (addr, 1);
2575 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2576 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2577 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2578 xoperands);
2579 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2581 else
2583 xoperands[0] = high_reg;
2584 xoperands[1] = XEXP (addr, 1);
2585 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2586 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2587 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2588 xoperands);
2589 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2594 /* If an operand is an unoffsettable memory ref, find a register
2595 we can increment temporarily to make it refer to the second word. */
2597 if (optype0 == MEMOP)
2598 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2600 if (optype1 == MEMOP)
2601 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2603 /* Ok, we can do one word at a time.
2604 Normally we do the low-numbered word first.
2606 In either case, set up in LATEHALF the operands to use
2607 for the high-numbered word and in some cases alter the
2608 operands in OPERANDS to be suitable for the low-numbered word. */
2610 if (optype0 == REGOP)
2611 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2612 else if (optype0 == OFFSOP)
2613 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2614 else
2615 latehalf[0] = operands[0];
2617 if (optype1 == REGOP)
2618 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2619 else if (optype1 == OFFSOP)
2620 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2621 else if (optype1 == CNSTOP)
2622 split_double (operands[1], &operands[1], &latehalf[1]);
2623 else
2624 latehalf[1] = operands[1];
2626 /* If the first move would clobber the source of the second one,
2627 do them in the other order.
2629 This can happen in two cases:
2631 mem -> register where the first half of the destination register
2632 is the same register used in the memory's address. Reload
2633 can create such insns.
2635 mem in this case will be either register indirect or register
2636 indirect plus a valid offset.
2638 register -> register move where REGNO(dst) == REGNO(src + 1)
2639 someone (Tim/Tege?) claimed this can happen for parameter loads.
2641 Handle mem -> register case first. */
2642 if (optype0 == REGOP
2643 && (optype1 == MEMOP || optype1 == OFFSOP)
2644 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2645 operands[1], 0))
2647 /* Do the late half first. */
2648 if (addreg1)
2649 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2650 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2652 /* Then clobber. */
2653 if (addreg1)
2654 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2655 return pa_singlemove_string (operands);
2658 /* Now handle register -> register case. */
2659 if (optype0 == REGOP && optype1 == REGOP
2660 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2662 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2663 return pa_singlemove_string (operands);
2666 /* Normal case: do the two words, low-numbered first. */
2668 output_asm_insn (pa_singlemove_string (operands), operands);
2670 /* Make any unoffsettable addresses point at high-numbered word. */
2671 if (addreg0)
2672 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2673 if (addreg1)
2674 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2676 /* Do that word. */
2677 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2679 /* Undo the adds we just did. */
2680 if (addreg0)
2681 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2682 if (addreg1)
2683 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2685 return "";
2688 const char *
2689 pa_output_fp_move_double (rtx *operands)
2691 if (FP_REG_P (operands[0]))
2693 if (FP_REG_P (operands[1])
2694 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2695 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2696 else
2697 output_asm_insn ("fldd%F1 %1,%0", operands);
2699 else if (FP_REG_P (operands[1]))
2701 output_asm_insn ("fstd%F0 %1,%0", operands);
2703 else
2705 rtx xoperands[2];
2707 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2709 /* This is a pain. You have to be prepared to deal with an
2710 arbitrary address here including pre/post increment/decrement.
2712 so avoid this in the MD. */
2713 gcc_assert (GET_CODE (operands[0]) == REG);
2715 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2716 xoperands[0] = operands[0];
2717 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2719 return "";
2722 /* Return a REG that occurs in ADDR with coefficient 1.
2723 ADDR can be effectively incremented by incrementing REG. */
2725 static rtx
2726 find_addr_reg (rtx addr)
2728 while (GET_CODE (addr) == PLUS)
2730 if (GET_CODE (XEXP (addr, 0)) == REG)
2731 addr = XEXP (addr, 0);
2732 else if (GET_CODE (XEXP (addr, 1)) == REG)
2733 addr = XEXP (addr, 1);
2734 else if (CONSTANT_P (XEXP (addr, 0)))
2735 addr = XEXP (addr, 1);
2736 else if (CONSTANT_P (XEXP (addr, 1)))
2737 addr = XEXP (addr, 0);
2738 else
2739 gcc_unreachable ();
2741 gcc_assert (GET_CODE (addr) == REG);
2742 return addr;
2745 /* Emit code to perform a block move.
2747 OPERANDS[0] is the destination pointer as a REG, clobbered.
2748 OPERANDS[1] is the source pointer as a REG, clobbered.
2749 OPERANDS[2] is a register for temporary storage.
2750 OPERANDS[3] is a register for temporary storage.
2751 OPERANDS[4] is the size as a CONST_INT
2752 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2753 OPERANDS[6] is another temporary register. */
2755 const char *
2756 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2758 int align = INTVAL (operands[5]);
2759 unsigned long n_bytes = INTVAL (operands[4]);
2761 /* We can't move more than a word at a time because the PA
2762 has no longer integer move insns. (Could use fp mem ops?) */
2763 if (align > (TARGET_64BIT ? 8 : 4))
2764 align = (TARGET_64BIT ? 8 : 4);
2766 /* Note that we know each loop below will execute at least twice
2767 (else we would have open-coded the copy). */
2768 switch (align)
2770 case 8:
2771 /* Pre-adjust the loop counter. */
2772 operands[4] = GEN_INT (n_bytes - 16);
2773 output_asm_insn ("ldi %4,%2", operands);
2775 /* Copying loop. */
2776 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2777 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2778 output_asm_insn ("std,ma %3,8(%0)", operands);
2779 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2780 output_asm_insn ("std,ma %6,8(%0)", operands);
2782 /* Handle the residual. There could be up to 7 bytes of
2783 residual to copy! */
2784 if (n_bytes % 16 != 0)
2786 operands[4] = GEN_INT (n_bytes % 8);
2787 if (n_bytes % 16 >= 8)
2788 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2789 if (n_bytes % 8 != 0)
2790 output_asm_insn ("ldd 0(%1),%6", operands);
2791 if (n_bytes % 16 >= 8)
2792 output_asm_insn ("std,ma %3,8(%0)", operands);
2793 if (n_bytes % 8 != 0)
2794 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2796 return "";
2798 case 4:
2799 /* Pre-adjust the loop counter. */
2800 operands[4] = GEN_INT (n_bytes - 8);
2801 output_asm_insn ("ldi %4,%2", operands);
2803 /* Copying loop. */
2804 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2805 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2806 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2807 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2808 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2810 /* Handle the residual. There could be up to 7 bytes of
2811 residual to copy! */
2812 if (n_bytes % 8 != 0)
2814 operands[4] = GEN_INT (n_bytes % 4);
2815 if (n_bytes % 8 >= 4)
2816 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2817 if (n_bytes % 4 != 0)
2818 output_asm_insn ("ldw 0(%1),%6", operands);
2819 if (n_bytes % 8 >= 4)
2820 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2821 if (n_bytes % 4 != 0)
2822 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2824 return "";
2826 case 2:
2827 /* Pre-adjust the loop counter. */
2828 operands[4] = GEN_INT (n_bytes - 4);
2829 output_asm_insn ("ldi %4,%2", operands);
2831 /* Copying loop. */
2832 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2833 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2834 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2835 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2836 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2838 /* Handle the residual. */
2839 if (n_bytes % 4 != 0)
2841 if (n_bytes % 4 >= 2)
2842 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2843 if (n_bytes % 2 != 0)
2844 output_asm_insn ("ldb 0(%1),%6", operands);
2845 if (n_bytes % 4 >= 2)
2846 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2847 if (n_bytes % 2 != 0)
2848 output_asm_insn ("stb %6,0(%0)", operands);
2850 return "";
2852 case 1:
2853 /* Pre-adjust the loop counter. */
2854 operands[4] = GEN_INT (n_bytes - 2);
2855 output_asm_insn ("ldi %4,%2", operands);
2857 /* Copying loop. */
2858 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2859 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2860 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2861 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2862 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2864 /* Handle the residual. */
2865 if (n_bytes % 2 != 0)
2867 output_asm_insn ("ldb 0(%1),%3", operands);
2868 output_asm_insn ("stb %3,0(%0)", operands);
2870 return "";
2872 default:
2873 gcc_unreachable ();
2877 /* Count the number of insns necessary to handle this block move.
2879 Basic structure is the same as emit_block_move, except that we
2880 count insns rather than emit them. */
2882 static int
2883 compute_movmem_length (rtx insn)
2885 rtx pat = PATTERN (insn);
2886 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2887 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2888 unsigned int n_insns = 0;
2890 /* We can't move more than four bytes at a time because the PA
2891 has no longer integer move insns. (Could use fp mem ops?) */
2892 if (align > (TARGET_64BIT ? 8 : 4))
2893 align = (TARGET_64BIT ? 8 : 4);
2895 /* The basic copying loop. */
2896 n_insns = 6;
2898 /* Residuals. */
2899 if (n_bytes % (2 * align) != 0)
2901 if ((n_bytes % (2 * align)) >= align)
2902 n_insns += 2;
2904 if ((n_bytes % align) != 0)
2905 n_insns += 2;
2908 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2909 return n_insns * 4;
2912 /* Emit code to perform a block clear.
2914 OPERANDS[0] is the destination pointer as a REG, clobbered.
2915 OPERANDS[1] is a register for temporary storage.
2916 OPERANDS[2] is the size as a CONST_INT
2917 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2919 const char *
2920 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2922 int align = INTVAL (operands[3]);
2923 unsigned long n_bytes = INTVAL (operands[2]);
2925 /* We can't clear more than a word at a time because the PA
2926 has no longer integer move insns. */
2927 if (align > (TARGET_64BIT ? 8 : 4))
2928 align = (TARGET_64BIT ? 8 : 4);
2930 /* Note that we know each loop below will execute at least twice
2931 (else we would have open-coded the copy). */
2932 switch (align)
2934 case 8:
2935 /* Pre-adjust the loop counter. */
2936 operands[2] = GEN_INT (n_bytes - 16);
2937 output_asm_insn ("ldi %2,%1", operands);
2939 /* Loop. */
2940 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2941 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2942 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2944 /* Handle the residual. There could be up to 7 bytes of
2945 residual to copy! */
2946 if (n_bytes % 16 != 0)
2948 operands[2] = GEN_INT (n_bytes % 8);
2949 if (n_bytes % 16 >= 8)
2950 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2951 if (n_bytes % 8 != 0)
2952 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2954 return "";
2956 case 4:
2957 /* Pre-adjust the loop counter. */
2958 operands[2] = GEN_INT (n_bytes - 8);
2959 output_asm_insn ("ldi %2,%1", operands);
2961 /* Loop. */
2962 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2963 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2964 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2966 /* Handle the residual. There could be up to 7 bytes of
2967 residual to copy! */
2968 if (n_bytes % 8 != 0)
2970 operands[2] = GEN_INT (n_bytes % 4);
2971 if (n_bytes % 8 >= 4)
2972 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2973 if (n_bytes % 4 != 0)
2974 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2976 return "";
2978 case 2:
2979 /* Pre-adjust the loop counter. */
2980 operands[2] = GEN_INT (n_bytes - 4);
2981 output_asm_insn ("ldi %2,%1", operands);
2983 /* Loop. */
2984 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2985 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2986 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2988 /* Handle the residual. */
2989 if (n_bytes % 4 != 0)
2991 if (n_bytes % 4 >= 2)
2992 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2993 if (n_bytes % 2 != 0)
2994 output_asm_insn ("stb %%r0,0(%0)", operands);
2996 return "";
2998 case 1:
2999 /* Pre-adjust the loop counter. */
3000 operands[2] = GEN_INT (n_bytes - 2);
3001 output_asm_insn ("ldi %2,%1", operands);
3003 /* Loop. */
3004 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3005 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3006 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3008 /* Handle the residual. */
3009 if (n_bytes % 2 != 0)
3010 output_asm_insn ("stb %%r0,0(%0)", operands);
3012 return "";
3014 default:
3015 gcc_unreachable ();
3019 /* Count the number of insns necessary to handle this block move.
3021 Basic structure is the same as emit_block_move, except that we
3022 count insns rather than emit them. */
3024 static int
3025 compute_clrmem_length (rtx insn)
3027 rtx pat = PATTERN (insn);
3028 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3029 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3030 unsigned int n_insns = 0;
3032 /* We can't clear more than a word at a time because the PA
3033 has no longer integer move insns. */
3034 if (align > (TARGET_64BIT ? 8 : 4))
3035 align = (TARGET_64BIT ? 8 : 4);
3037 /* The basic loop. */
3038 n_insns = 4;
3040 /* Residuals. */
3041 if (n_bytes % (2 * align) != 0)
3043 if ((n_bytes % (2 * align)) >= align)
3044 n_insns++;
3046 if ((n_bytes % align) != 0)
3047 n_insns++;
3050 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3051 return n_insns * 4;
3055 const char *
3056 pa_output_and (rtx *operands)
3058 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3060 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3061 int ls0, ls1, ms0, p, len;
3063 for (ls0 = 0; ls0 < 32; ls0++)
3064 if ((mask & (1 << ls0)) == 0)
3065 break;
3067 for (ls1 = ls0; ls1 < 32; ls1++)
3068 if ((mask & (1 << ls1)) != 0)
3069 break;
3071 for (ms0 = ls1; ms0 < 32; ms0++)
3072 if ((mask & (1 << ms0)) == 0)
3073 break;
3075 gcc_assert (ms0 == 32);
3077 if (ls1 == 32)
3079 len = ls0;
3081 gcc_assert (len);
3083 operands[2] = GEN_INT (len);
3084 return "{extru|extrw,u} %1,31,%2,%0";
3086 else
3088 /* We could use this `depi' for the case above as well, but `depi'
3089 requires one more register file access than an `extru'. */
3091 p = 31 - ls0;
3092 len = ls1 - ls0;
3094 operands[2] = GEN_INT (p);
3095 operands[3] = GEN_INT (len);
3096 return "{depi|depwi} 0,%2,%3,%0";
3099 else
3100 return "and %1,%2,%0";
3103 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3104 storing the result in operands[0]. */
3105 const char *
3106 pa_output_64bit_and (rtx *operands)
3108 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3110 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3111 int ls0, ls1, ms0, p, len;
3113 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3114 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3115 break;
3117 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3118 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3119 break;
3121 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3122 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3123 break;
3125 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3127 if (ls1 == HOST_BITS_PER_WIDE_INT)
3129 len = ls0;
3131 gcc_assert (len);
3133 operands[2] = GEN_INT (len);
3134 return "extrd,u %1,63,%2,%0";
3136 else
3138 /* We could use this `depi' for the case above as well, but `depi'
3139 requires one more register file access than an `extru'. */
3141 p = 63 - ls0;
3142 len = ls1 - ls0;
3144 operands[2] = GEN_INT (p);
3145 operands[3] = GEN_INT (len);
3146 return "depdi 0,%2,%3,%0";
3149 else
3150 return "and %1,%2,%0";
3153 const char *
3154 pa_output_ior (rtx *operands)
3156 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3157 int bs0, bs1, p, len;
3159 if (INTVAL (operands[2]) == 0)
3160 return "copy %1,%0";
3162 for (bs0 = 0; bs0 < 32; bs0++)
3163 if ((mask & (1 << bs0)) != 0)
3164 break;
3166 for (bs1 = bs0; bs1 < 32; bs1++)
3167 if ((mask & (1 << bs1)) == 0)
3168 break;
3170 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3172 p = 31 - bs0;
3173 len = bs1 - bs0;
3175 operands[2] = GEN_INT (p);
3176 operands[3] = GEN_INT (len);
3177 return "{depi|depwi} -1,%2,%3,%0";
3180 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3181 storing the result in operands[0]. */
3182 const char *
3183 pa_output_64bit_ior (rtx *operands)
3185 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3186 int bs0, bs1, p, len;
3188 if (INTVAL (operands[2]) == 0)
3189 return "copy %1,%0";
3191 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3192 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3193 break;
3195 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3196 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3197 break;
3199 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3200 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3202 p = 63 - bs0;
3203 len = bs1 - bs0;
3205 operands[2] = GEN_INT (p);
3206 operands[3] = GEN_INT (len);
3207 return "depdi -1,%2,%3,%0";
3210 /* Target hook for assembling integer objects. This code handles
3211 aligned SI and DI integers specially since function references
3212 must be preceded by P%. */
3214 static bool
3215 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3217 if (size == UNITS_PER_WORD
3218 && aligned_p
3219 && function_label_operand (x, VOIDmode))
3221 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3223 /* We don't want an OPD when generating fast indirect calls. */
3224 if (!TARGET_FAST_INDIRECT_CALLS)
3225 fputs ("P%", asm_out_file);
3227 output_addr_const (asm_out_file, x);
3228 fputc ('\n', asm_out_file);
3229 return true;
3231 return default_assemble_integer (x, size, aligned_p);
3234 /* Output an ascii string. */
3235 void
3236 pa_output_ascii (FILE *file, const char *p, int size)
3238 int i;
3239 int chars_output;
3240 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3242 /* The HP assembler can only take strings of 256 characters at one
3243 time. This is a limitation on input line length, *not* the
3244 length of the string. Sigh. Even worse, it seems that the
3245 restriction is in number of input characters (see \xnn &
3246 \whatever). So we have to do this very carefully. */
3248 fputs ("\t.STRING \"", file);
3250 chars_output = 0;
3251 for (i = 0; i < size; i += 4)
3253 int co = 0;
3254 int io = 0;
3255 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3257 register unsigned int c = (unsigned char) p[i + io];
3259 if (c == '\"' || c == '\\')
3260 partial_output[co++] = '\\';
3261 if (c >= ' ' && c < 0177)
3262 partial_output[co++] = c;
3263 else
3265 unsigned int hexd;
3266 partial_output[co++] = '\\';
3267 partial_output[co++] = 'x';
3268 hexd = c / 16 - 0 + '0';
3269 if (hexd > '9')
3270 hexd -= '9' - 'a' + 1;
3271 partial_output[co++] = hexd;
3272 hexd = c % 16 - 0 + '0';
3273 if (hexd > '9')
3274 hexd -= '9' - 'a' + 1;
3275 partial_output[co++] = hexd;
3278 if (chars_output + co > 243)
3280 fputs ("\"\n\t.STRING \"", file);
3281 chars_output = 0;
3283 fwrite (partial_output, 1, (size_t) co, file);
3284 chars_output += co;
3285 co = 0;
3287 fputs ("\"\n", file);
3290 /* Try to rewrite floating point comparisons & branches to avoid
3291 useless add,tr insns.
3293 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3294 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3295 first attempt to remove useless add,tr insns. It is zero
3296 for the second pass as reorg sometimes leaves bogus REG_DEAD
3297 notes lying around.
3299 When CHECK_NOTES is zero we can only eliminate add,tr insns
3300 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3301 instructions. */
3302 static void
3303 remove_useless_addtr_insns (int check_notes)
3305 rtx_insn *insn;
3306 static int pass = 0;
3308 /* This is fairly cheap, so always run it when optimizing. */
3309 if (optimize > 0)
3311 int fcmp_count = 0;
3312 int fbranch_count = 0;
3314 /* Walk all the insns in this function looking for fcmp & fbranch
3315 instructions. Keep track of how many of each we find. */
3316 for (insn = get_insns (); insn; insn = next_insn (insn))
3318 rtx tmp;
3320 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3321 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3322 continue;
3324 tmp = PATTERN (insn);
3326 /* It must be a set. */
3327 if (GET_CODE (tmp) != SET)
3328 continue;
3330 /* If the destination is CCFP, then we've found an fcmp insn. */
3331 tmp = SET_DEST (tmp);
3332 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3334 fcmp_count++;
3335 continue;
3338 tmp = PATTERN (insn);
3339 /* If this is an fbranch instruction, bump the fbranch counter. */
3340 if (GET_CODE (tmp) == SET
3341 && SET_DEST (tmp) == pc_rtx
3342 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3343 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3344 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3345 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3347 fbranch_count++;
3348 continue;
3353 /* Find all floating point compare + branch insns. If possible,
3354 reverse the comparison & the branch to avoid add,tr insns. */
3355 for (insn = get_insns (); insn; insn = next_insn (insn))
3357 rtx tmp;
3358 rtx_insn *next;
3360 /* Ignore anything that isn't an INSN. */
3361 if (! NONJUMP_INSN_P (insn))
3362 continue;
3364 tmp = PATTERN (insn);
3366 /* It must be a set. */
3367 if (GET_CODE (tmp) != SET)
3368 continue;
3370 /* The destination must be CCFP, which is register zero. */
3371 tmp = SET_DEST (tmp);
3372 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3373 continue;
3375 /* INSN should be a set of CCFP.
3377 See if the result of this insn is used in a reversed FP
3378 conditional branch. If so, reverse our condition and
3379 the branch. Doing so avoids useless add,tr insns. */
3380 next = next_insn (insn);
3381 while (next)
3383 /* Jumps, calls and labels stop our search. */
3384 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3385 break;
3387 /* As does another fcmp insn. */
3388 if (NONJUMP_INSN_P (next)
3389 && GET_CODE (PATTERN (next)) == SET
3390 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3391 && REGNO (SET_DEST (PATTERN (next))) == 0)
3392 break;
3394 next = next_insn (next);
3397 /* Is NEXT_INSN a branch? */
3398 if (next && JUMP_P (next))
3400 rtx pattern = PATTERN (next);
3402 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3403 and CCFP dies, then reverse our conditional and the branch
3404 to avoid the add,tr. */
3405 if (GET_CODE (pattern) == SET
3406 && SET_DEST (pattern) == pc_rtx
3407 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3408 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3409 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3410 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3411 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3412 && (fcmp_count == fbranch_count
3413 || (check_notes
3414 && find_regno_note (next, REG_DEAD, 0))))
3416 /* Reverse the branch. */
3417 tmp = XEXP (SET_SRC (pattern), 1);
3418 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3419 XEXP (SET_SRC (pattern), 2) = tmp;
3420 INSN_CODE (next) = -1;
3422 /* Reverse our condition. */
3423 tmp = PATTERN (insn);
3424 PUT_CODE (XEXP (tmp, 1),
3425 (reverse_condition_maybe_unordered
3426 (GET_CODE (XEXP (tmp, 1)))));
3432 pass = !pass;
3436 /* You may have trouble believing this, but this is the 32 bit HP-PA
3437 stack layout. Wow.
3439 Offset Contents
3441 Variable arguments (optional; any number may be allocated)
3443 SP-(4*(N+9)) arg word N
3445 SP-56 arg word 5
3446 SP-52 arg word 4
3448 Fixed arguments (must be allocated; may remain unused)
3450 SP-48 arg word 3
3451 SP-44 arg word 2
3452 SP-40 arg word 1
3453 SP-36 arg word 0
3455 Frame Marker
3457 SP-32 External Data Pointer (DP)
3458 SP-28 External sr4
3459 SP-24 External/stub RP (RP')
3460 SP-20 Current RP
3461 SP-16 Static Link
3462 SP-12 Clean up
3463 SP-8 Calling Stub RP (RP'')
3464 SP-4 Previous SP
3466 Top of Frame
3468 SP-0 Stack Pointer (points to next available address)
3472 /* This function saves registers as follows. Registers marked with ' are
3473 this function's registers (as opposed to the previous function's).
3474 If a frame_pointer isn't needed, r4 is saved as a general register;
3475 the space for the frame pointer is still allocated, though, to keep
3476 things simple.
3479 Top of Frame
3481 SP (FP') Previous FP
3482 SP + 4 Alignment filler (sigh)
3483 SP + 8 Space for locals reserved here.
3487 SP + n All call saved register used.
3491 SP + o All call saved fp registers used.
3495 SP + p (SP') points to next available address.
3499 /* Global variables set by output_function_prologue(). */
3500 /* Size of frame. Need to know this to emit return insns from
3501 leaf procedures. */
3502 static HOST_WIDE_INT actual_fsize, local_fsize;
3503 static int save_fregs;
3505 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3506 Handle case where DISP > 8k by using the add_high_const patterns.
3508 Note in DISP > 8k case, we will leave the high part of the address
3509 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3511 static void
3512 store_reg (int reg, HOST_WIDE_INT disp, int base)
3514 rtx insn, dest, src, basereg;
3516 src = gen_rtx_REG (word_mode, reg);
3517 basereg = gen_rtx_REG (Pmode, base);
3518 if (VAL_14_BITS_P (disp))
3520 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3521 insn = emit_move_insn (dest, src);
3523 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3525 rtx delta = GEN_INT (disp);
3526 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3528 emit_move_insn (tmpreg, delta);
3529 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3530 if (DO_FRAME_NOTES)
3532 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3533 gen_rtx_SET (VOIDmode, tmpreg,
3534 gen_rtx_PLUS (Pmode, basereg, delta)));
3535 RTX_FRAME_RELATED_P (insn) = 1;
3537 dest = gen_rtx_MEM (word_mode, tmpreg);
3538 insn = emit_move_insn (dest, src);
3540 else
3542 rtx delta = GEN_INT (disp);
3543 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3544 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3546 emit_move_insn (tmpreg, high);
3547 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3548 insn = emit_move_insn (dest, src);
3549 if (DO_FRAME_NOTES)
3550 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3551 gen_rtx_SET (VOIDmode,
3552 gen_rtx_MEM (word_mode,
3553 gen_rtx_PLUS (word_mode,
3554 basereg,
3555 delta)),
3556 src));
3559 if (DO_FRAME_NOTES)
3560 RTX_FRAME_RELATED_P (insn) = 1;
3563 /* Emit RTL to store REG at the memory location specified by BASE and then
3564 add MOD to BASE. MOD must be <= 8k. */
3566 static void
3567 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3569 rtx insn, basereg, srcreg, delta;
3571 gcc_assert (VAL_14_BITS_P (mod));
3573 basereg = gen_rtx_REG (Pmode, base);
3574 srcreg = gen_rtx_REG (word_mode, reg);
3575 delta = GEN_INT (mod);
3577 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3578 if (DO_FRAME_NOTES)
3580 RTX_FRAME_RELATED_P (insn) = 1;
3582 /* RTX_FRAME_RELATED_P must be set on each frame related set
3583 in a parallel with more than one element. */
3584 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3585 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3589 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3590 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3591 whether to add a frame note or not.
3593 In the DISP > 8k case, we leave the high part of the address in %r1.
3594 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3596 static void
3597 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3599 rtx insn;
3601 if (VAL_14_BITS_P (disp))
3603 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3604 plus_constant (Pmode,
3605 gen_rtx_REG (Pmode, base), disp));
3607 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3609 rtx basereg = gen_rtx_REG (Pmode, base);
3610 rtx delta = GEN_INT (disp);
3611 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3613 emit_move_insn (tmpreg, delta);
3614 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3615 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3616 if (DO_FRAME_NOTES)
3617 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3618 gen_rtx_SET (VOIDmode, tmpreg,
3619 gen_rtx_PLUS (Pmode, basereg, delta)));
3621 else
3623 rtx basereg = gen_rtx_REG (Pmode, base);
3624 rtx delta = GEN_INT (disp);
3625 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3627 emit_move_insn (tmpreg,
3628 gen_rtx_PLUS (Pmode, basereg,
3629 gen_rtx_HIGH (Pmode, delta)));
3630 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3631 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3634 if (DO_FRAME_NOTES && note)
3635 RTX_FRAME_RELATED_P (insn) = 1;
3638 HOST_WIDE_INT
3639 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3641 int freg_saved = 0;
3642 int i, j;
3644 /* The code in pa_expand_prologue and pa_expand_epilogue must
3645 be consistent with the rounding and size calculation done here.
3646 Change them at the same time. */
3648 /* We do our own stack alignment. First, round the size of the
3649 stack locals up to a word boundary. */
3650 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3652 /* Space for previous frame pointer + filler. If any frame is
3653 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3654 waste some space here for the sake of HP compatibility. The
3655 first slot is only used when the frame pointer is needed. */
3656 if (size || frame_pointer_needed)
3657 size += STARTING_FRAME_OFFSET;
3659 /* If the current function calls __builtin_eh_return, then we need
3660 to allocate stack space for registers that will hold data for
3661 the exception handler. */
3662 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3664 unsigned int i;
3666 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3667 continue;
3668 size += i * UNITS_PER_WORD;
3671 /* Account for space used by the callee general register saves. */
3672 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3673 if (df_regs_ever_live_p (i))
3674 size += UNITS_PER_WORD;
3676 /* Account for space used by the callee floating point register saves. */
3677 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3678 if (df_regs_ever_live_p (i)
3679 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3681 freg_saved = 1;
3683 /* We always save both halves of the FP register, so always
3684 increment the frame size by 8 bytes. */
3685 size += 8;
3688 /* If any of the floating registers are saved, account for the
3689 alignment needed for the floating point register save block. */
3690 if (freg_saved)
3692 size = (size + 7) & ~7;
3693 if (fregs_live)
3694 *fregs_live = 1;
3697 /* The various ABIs include space for the outgoing parameters in the
3698 size of the current function's stack frame. We don't need to align
3699 for the outgoing arguments as their alignment is set by the final
3700 rounding for the frame as a whole. */
3701 size += crtl->outgoing_args_size;
3703 /* Allocate space for the fixed frame marker. This space must be
3704 allocated for any function that makes calls or allocates
3705 stack space. */
3706 if (!crtl->is_leaf || size)
3707 size += TARGET_64BIT ? 48 : 32;
3709 /* Finally, round to the preferred stack boundary. */
3710 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3711 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3714 /* Generate the assembly code for function entry. FILE is a stdio
3715 stream to output the code to. SIZE is an int: how many units of
3716 temporary storage to allocate.
3718 Refer to the array `regs_ever_live' to determine which registers to
3719 save; `regs_ever_live[I]' is nonzero if register number I is ever
3720 used in the function. This function is responsible for knowing
3721 which registers should not be saved even if used. */
3723 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3724 of memory. If any fpu reg is used in the function, we allocate
3725 such a block here, at the bottom of the frame, just in case it's needed.
3727 If this function is a leaf procedure, then we may choose not
3728 to do a "save" insn. The decision about whether or not
3729 to do this is made in regclass.c. */
3731 static void
3732 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3734 /* The function's label and associated .PROC must never be
3735 separated and must be output *after* any profiling declarations
3736 to avoid changing spaces/subspaces within a procedure. */
3737 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3738 fputs ("\t.PROC\n", file);
3740 /* pa_expand_prologue does the dirty work now. We just need
3741 to output the assembler directives which denote the start
3742 of a function. */
3743 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3744 if (crtl->is_leaf)
3745 fputs (",NO_CALLS", file);
3746 else
3747 fputs (",CALLS", file);
3748 if (rp_saved)
3749 fputs (",SAVE_RP", file);
3751 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3752 at the beginning of the frame and that it is used as the frame
3753 pointer for the frame. We do this because our current frame
3754 layout doesn't conform to that specified in the HP runtime
3755 documentation and we need a way to indicate to programs such as
3756 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3757 isn't used by HP compilers but is supported by the assembler.
3758 However, SAVE_SP is supposed to indicate that the previous stack
3759 pointer has been saved in the frame marker. */
3760 if (frame_pointer_needed)
3761 fputs (",SAVE_SP", file);
3763 /* Pass on information about the number of callee register saves
3764 performed in the prologue.
3766 The compiler is supposed to pass the highest register number
3767 saved, the assembler then has to adjust that number before
3768 entering it into the unwind descriptor (to account for any
3769 caller saved registers with lower register numbers than the
3770 first callee saved register). */
3771 if (gr_saved)
3772 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3774 if (fr_saved)
3775 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3777 fputs ("\n\t.ENTRY\n", file);
3779 remove_useless_addtr_insns (0);
3782 void
3783 pa_expand_prologue (void)
3785 int merge_sp_adjust_with_store = 0;
3786 HOST_WIDE_INT size = get_frame_size ();
3787 HOST_WIDE_INT offset;
3788 int i;
3789 rtx insn, tmpreg;
3791 gr_saved = 0;
3792 fr_saved = 0;
3793 save_fregs = 0;
3795 /* Compute total size for frame pointer, filler, locals and rounding to
3796 the next word boundary. Similar code appears in pa_compute_frame_size
3797 and must be changed in tandem with this code. */
3798 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3799 if (local_fsize || frame_pointer_needed)
3800 local_fsize += STARTING_FRAME_OFFSET;
3802 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3803 if (flag_stack_usage_info)
3804 current_function_static_stack_size = actual_fsize;
3806 /* Compute a few things we will use often. */
3807 tmpreg = gen_rtx_REG (word_mode, 1);
3809 /* Save RP first. The calling conventions manual states RP will
3810 always be stored into the caller's frame at sp - 20 or sp - 16
3811 depending on which ABI is in use. */
3812 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3814 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3815 rp_saved = true;
3817 else
3818 rp_saved = false;
3820 /* Allocate the local frame and set up the frame pointer if needed. */
3821 if (actual_fsize != 0)
3823 if (frame_pointer_needed)
3825 /* Copy the old frame pointer temporarily into %r1. Set up the
3826 new stack pointer, then store away the saved old frame pointer
3827 into the stack at sp and at the same time update the stack
3828 pointer by actual_fsize bytes. Two versions, first
3829 handles small (<8k) frames. The second handles large (>=8k)
3830 frames. */
3831 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3832 if (DO_FRAME_NOTES)
3833 RTX_FRAME_RELATED_P (insn) = 1;
3835 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3836 if (DO_FRAME_NOTES)
3837 RTX_FRAME_RELATED_P (insn) = 1;
3839 if (VAL_14_BITS_P (actual_fsize))
3840 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3841 else
3843 /* It is incorrect to store the saved frame pointer at *sp,
3844 then increment sp (writes beyond the current stack boundary).
3846 So instead use stwm to store at *sp and post-increment the
3847 stack pointer as an atomic operation. Then increment sp to
3848 finish allocating the new frame. */
3849 HOST_WIDE_INT adjust1 = 8192 - 64;
3850 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3852 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3853 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3854 adjust2, 1);
3857 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3858 we need to store the previous stack pointer (frame pointer)
3859 into the frame marker on targets that use the HP unwind
3860 library. This allows the HP unwind library to be used to
3861 unwind GCC frames. However, we are not fully compatible
3862 with the HP library because our frame layout differs from
3863 that specified in the HP runtime specification.
3865 We don't want a frame note on this instruction as the frame
3866 marker moves during dynamic stack allocation.
3868 This instruction also serves as a blockage to prevent
3869 register spills from being scheduled before the stack
3870 pointer is raised. This is necessary as we store
3871 registers using the frame pointer as a base register,
3872 and the frame pointer is set before sp is raised. */
3873 if (TARGET_HPUX_UNWIND_LIBRARY)
3875 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3876 GEN_INT (TARGET_64BIT ? -8 : -4));
3878 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3879 hard_frame_pointer_rtx);
3881 else
3882 emit_insn (gen_blockage ());
3884 /* no frame pointer needed. */
3885 else
3887 /* In some cases we can perform the first callee register save
3888 and allocating the stack frame at the same time. If so, just
3889 make a note of it and defer allocating the frame until saving
3890 the callee registers. */
3891 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3892 merge_sp_adjust_with_store = 1;
3893 /* Can not optimize. Adjust the stack frame by actual_fsize
3894 bytes. */
3895 else
3896 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3897 actual_fsize, 1);
3901 /* Normal register save.
3903 Do not save the frame pointer in the frame_pointer_needed case. It
3904 was done earlier. */
3905 if (frame_pointer_needed)
3907 offset = local_fsize;
3909 /* Saving the EH return data registers in the frame is the simplest
3910 way to get the frame unwind information emitted. We put them
3911 just before the general registers. */
3912 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3914 unsigned int i, regno;
3916 for (i = 0; ; ++i)
3918 regno = EH_RETURN_DATA_REGNO (i);
3919 if (regno == INVALID_REGNUM)
3920 break;
3922 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3923 offset += UNITS_PER_WORD;
3927 for (i = 18; i >= 4; i--)
3928 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3930 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3931 offset += UNITS_PER_WORD;
3932 gr_saved++;
3934 /* Account for %r3 which is saved in a special place. */
3935 gr_saved++;
3937 /* No frame pointer needed. */
3938 else
3940 offset = local_fsize - actual_fsize;
3942 /* Saving the EH return data registers in the frame is the simplest
3943 way to get the frame unwind information emitted. */
3944 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3946 unsigned int i, regno;
3948 for (i = 0; ; ++i)
3950 regno = EH_RETURN_DATA_REGNO (i);
3951 if (regno == INVALID_REGNUM)
3952 break;
3954 /* If merge_sp_adjust_with_store is nonzero, then we can
3955 optimize the first save. */
3956 if (merge_sp_adjust_with_store)
3958 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3959 merge_sp_adjust_with_store = 0;
3961 else
3962 store_reg (regno, offset, STACK_POINTER_REGNUM);
3963 offset += UNITS_PER_WORD;
3967 for (i = 18; i >= 3; i--)
3968 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3970 /* If merge_sp_adjust_with_store is nonzero, then we can
3971 optimize the first GR save. */
3972 if (merge_sp_adjust_with_store)
3974 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3975 merge_sp_adjust_with_store = 0;
3977 else
3978 store_reg (i, offset, STACK_POINTER_REGNUM);
3979 offset += UNITS_PER_WORD;
3980 gr_saved++;
3983 /* If we wanted to merge the SP adjustment with a GR save, but we never
3984 did any GR saves, then just emit the adjustment here. */
3985 if (merge_sp_adjust_with_store)
3986 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3987 actual_fsize, 1);
3990 /* The hppa calling conventions say that %r19, the pic offset
3991 register, is saved at sp - 32 (in this function's frame)
3992 when generating PIC code. FIXME: What is the correct thing
3993 to do for functions which make no calls and allocate no
3994 frame? Do we need to allocate a frame, or can we just omit
3995 the save? For now we'll just omit the save.
3997 We don't want a note on this insn as the frame marker can
3998 move if there is a dynamic stack allocation. */
3999 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4001 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4003 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4007 /* Align pointer properly (doubleword boundary). */
4008 offset = (offset + 7) & ~7;
4010 /* Floating point register store. */
4011 if (save_fregs)
4013 rtx base;
4015 /* First get the frame or stack pointer to the start of the FP register
4016 save area. */
4017 if (frame_pointer_needed)
4019 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4020 base = hard_frame_pointer_rtx;
4022 else
4024 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4025 base = stack_pointer_rtx;
4028 /* Now actually save the FP registers. */
4029 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4031 if (df_regs_ever_live_p (i)
4032 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4034 rtx addr, insn, reg;
4035 addr = gen_rtx_MEM (DFmode,
4036 gen_rtx_POST_INC (word_mode, tmpreg));
4037 reg = gen_rtx_REG (DFmode, i);
4038 insn = emit_move_insn (addr, reg);
4039 if (DO_FRAME_NOTES)
4041 RTX_FRAME_RELATED_P (insn) = 1;
4042 if (TARGET_64BIT)
4044 rtx mem = gen_rtx_MEM (DFmode,
4045 plus_constant (Pmode, base,
4046 offset));
4047 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4048 gen_rtx_SET (VOIDmode, mem, reg));
4050 else
4052 rtx meml = gen_rtx_MEM (SFmode,
4053 plus_constant (Pmode, base,
4054 offset));
4055 rtx memr = gen_rtx_MEM (SFmode,
4056 plus_constant (Pmode, base,
4057 offset + 4));
4058 rtx regl = gen_rtx_REG (SFmode, i);
4059 rtx regr = gen_rtx_REG (SFmode, i + 1);
4060 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4061 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4062 rtvec vec;
4064 RTX_FRAME_RELATED_P (setl) = 1;
4065 RTX_FRAME_RELATED_P (setr) = 1;
4066 vec = gen_rtvec (2, setl, setr);
4067 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4068 gen_rtx_SEQUENCE (VOIDmode, vec));
4071 offset += GET_MODE_SIZE (DFmode);
4072 fr_saved++;
4078 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4079 Handle case where DISP > 8k by using the add_high_const patterns. */
4081 static void
4082 load_reg (int reg, HOST_WIDE_INT disp, int base)
4084 rtx dest = gen_rtx_REG (word_mode, reg);
4085 rtx basereg = gen_rtx_REG (Pmode, base);
4086 rtx src;
4088 if (VAL_14_BITS_P (disp))
4089 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4090 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4092 rtx delta = GEN_INT (disp);
4093 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4095 emit_move_insn (tmpreg, delta);
4096 if (TARGET_DISABLE_INDEXING)
4098 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4099 src = gen_rtx_MEM (word_mode, tmpreg);
4101 else
4102 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4104 else
4106 rtx delta = GEN_INT (disp);
4107 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4108 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4110 emit_move_insn (tmpreg, high);
4111 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4114 emit_move_insn (dest, src);
4117 /* Update the total code bytes output to the text section. */
4119 static void
4120 update_total_code_bytes (unsigned int nbytes)
4122 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4123 && !IN_NAMED_SECTION_P (cfun->decl))
4125 unsigned int old_total = total_code_bytes;
4127 total_code_bytes += nbytes;
4129 /* Be prepared to handle overflows. */
4130 if (old_total > total_code_bytes)
4131 total_code_bytes = UINT_MAX;
4135 /* This function generates the assembly code for function exit.
4136 Args are as for output_function_prologue ().
4138 The function epilogue should not depend on the current stack
4139 pointer! It should use the frame pointer only. This is mandatory
4140 because of alloca; we also take advantage of it to omit stack
4141 adjustments before returning. */
4143 static void
4144 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4146 rtx_insn *insn = get_last_insn ();
4147 bool extra_nop;
4149 /* pa_expand_epilogue does the dirty work now. We just need
4150 to output the assembler directives which denote the end
4151 of a function.
4153 To make debuggers happy, emit a nop if the epilogue was completely
4154 eliminated due to a volatile call as the last insn in the
4155 current function. That way the return address (in %r2) will
4156 always point to a valid instruction in the current function. */
4158 /* Get the last real insn. */
4159 if (NOTE_P (insn))
4160 insn = prev_real_insn (insn);
4162 /* If it is a sequence, then look inside. */
4163 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4164 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4166 /* If insn is a CALL_INSN, then it must be a call to a volatile
4167 function (otherwise there would be epilogue insns). */
4168 if (insn && CALL_P (insn))
4170 fputs ("\tnop\n", file);
4171 extra_nop = true;
4173 else
4174 extra_nop = false;
4176 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4178 if (TARGET_SOM && TARGET_GAS)
4180 /* We are done with this subspace except possibly for some additional
4181 debug information. Forget that we are in this subspace to ensure
4182 that the next function is output in its own subspace. */
4183 in_section = NULL;
4184 cfun->machine->in_nsubspa = 2;
4187 /* Thunks do their own insn accounting. */
4188 if (cfun->is_thunk)
4189 return;
4191 if (INSN_ADDRESSES_SET_P ())
4193 last_address = extra_nop ? 4 : 0;
4194 insn = get_last_nonnote_insn ();
4195 last_address += INSN_ADDRESSES (INSN_UID (insn));
4196 if (INSN_P (insn))
4197 last_address += insn_default_length (insn);
4198 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4199 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4201 else
4202 last_address = UINT_MAX;
4204 /* Finally, update the total number of code bytes output so far. */
4205 update_total_code_bytes (last_address);
4208 void
4209 pa_expand_epilogue (void)
4211 rtx tmpreg;
4212 HOST_WIDE_INT offset;
4213 HOST_WIDE_INT ret_off = 0;
4214 int i;
4215 int merge_sp_adjust_with_load = 0;
4217 /* We will use this often. */
4218 tmpreg = gen_rtx_REG (word_mode, 1);
4220 /* Try to restore RP early to avoid load/use interlocks when
4221 RP gets used in the return (bv) instruction. This appears to still
4222 be necessary even when we schedule the prologue and epilogue. */
4223 if (rp_saved)
4225 ret_off = TARGET_64BIT ? -16 : -20;
4226 if (frame_pointer_needed)
4228 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4229 ret_off = 0;
4231 else
4233 /* No frame pointer, and stack is smaller than 8k. */
4234 if (VAL_14_BITS_P (ret_off - actual_fsize))
4236 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4237 ret_off = 0;
4242 /* General register restores. */
4243 if (frame_pointer_needed)
4245 offset = local_fsize;
4247 /* If the current function calls __builtin_eh_return, then we need
4248 to restore the saved EH data registers. */
4249 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4251 unsigned int i, regno;
4253 for (i = 0; ; ++i)
4255 regno = EH_RETURN_DATA_REGNO (i);
4256 if (regno == INVALID_REGNUM)
4257 break;
4259 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4260 offset += UNITS_PER_WORD;
4264 for (i = 18; i >= 4; i--)
4265 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4267 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4268 offset += UNITS_PER_WORD;
4271 else
4273 offset = local_fsize - actual_fsize;
4275 /* If the current function calls __builtin_eh_return, then we need
4276 to restore the saved EH data registers. */
4277 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4279 unsigned int i, regno;
4281 for (i = 0; ; ++i)
4283 regno = EH_RETURN_DATA_REGNO (i);
4284 if (regno == INVALID_REGNUM)
4285 break;
4287 /* Only for the first load.
4288 merge_sp_adjust_with_load holds the register load
4289 with which we will merge the sp adjustment. */
4290 if (merge_sp_adjust_with_load == 0
4291 && local_fsize == 0
4292 && VAL_14_BITS_P (-actual_fsize))
4293 merge_sp_adjust_with_load = regno;
4294 else
4295 load_reg (regno, offset, STACK_POINTER_REGNUM);
4296 offset += UNITS_PER_WORD;
4300 for (i = 18; i >= 3; i--)
4302 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4304 /* Only for the first load.
4305 merge_sp_adjust_with_load holds the register load
4306 with which we will merge the sp adjustment. */
4307 if (merge_sp_adjust_with_load == 0
4308 && local_fsize == 0
4309 && VAL_14_BITS_P (-actual_fsize))
4310 merge_sp_adjust_with_load = i;
4311 else
4312 load_reg (i, offset, STACK_POINTER_REGNUM);
4313 offset += UNITS_PER_WORD;
4318 /* Align pointer properly (doubleword boundary). */
4319 offset = (offset + 7) & ~7;
4321 /* FP register restores. */
4322 if (save_fregs)
4324 /* Adjust the register to index off of. */
4325 if (frame_pointer_needed)
4326 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4327 else
4328 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4330 /* Actually do the restores now. */
4331 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4332 if (df_regs_ever_live_p (i)
4333 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4335 rtx src = gen_rtx_MEM (DFmode,
4336 gen_rtx_POST_INC (word_mode, tmpreg));
4337 rtx dest = gen_rtx_REG (DFmode, i);
4338 emit_move_insn (dest, src);
4342 /* Emit a blockage insn here to keep these insns from being moved to
4343 an earlier spot in the epilogue, or into the main instruction stream.
4345 This is necessary as we must not cut the stack back before all the
4346 restores are finished. */
4347 emit_insn (gen_blockage ());
4349 /* Reset stack pointer (and possibly frame pointer). The stack
4350 pointer is initially set to fp + 64 to avoid a race condition. */
4351 if (frame_pointer_needed)
4353 rtx delta = GEN_INT (-64);
4355 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4356 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4357 stack_pointer_rtx, delta));
4359 /* If we were deferring a callee register restore, do it now. */
4360 else if (merge_sp_adjust_with_load)
4362 rtx delta = GEN_INT (-actual_fsize);
4363 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4365 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4367 else if (actual_fsize != 0)
4368 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4369 - actual_fsize, 0);
4371 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4372 frame greater than 8k), do so now. */
4373 if (ret_off != 0)
4374 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4376 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4378 rtx sa = EH_RETURN_STACKADJ_RTX;
4380 emit_insn (gen_blockage ());
4381 emit_insn (TARGET_64BIT
4382 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4383 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4387 bool
4388 pa_can_use_return_insn (void)
4390 if (!reload_completed)
4391 return false;
4393 if (frame_pointer_needed)
4394 return false;
4396 if (df_regs_ever_live_p (2))
4397 return false;
4399 if (crtl->profile)
4400 return false;
4402 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4406 hppa_pic_save_rtx (void)
4408 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4411 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4412 #define NO_DEFERRED_PROFILE_COUNTERS 0
4413 #endif
4416 /* Vector of funcdef numbers. */
4417 static vec<int> funcdef_nos;
4419 /* Output deferred profile counters. */
4420 static void
4421 output_deferred_profile_counters (void)
4423 unsigned int i;
4424 int align, n;
4426 if (funcdef_nos.is_empty ())
4427 return;
4429 switch_to_section (data_section);
4430 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4431 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4433 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4435 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4436 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4439 funcdef_nos.release ();
4442 void
4443 hppa_profile_hook (int label_no)
4445 /* We use SImode for the address of the function in both 32 and
4446 64-bit code to avoid having to provide DImode versions of the
4447 lcla2 and load_offset_label_address insn patterns. */
4448 rtx reg = gen_reg_rtx (SImode);
4449 rtx_code_label *label_rtx = gen_label_rtx ();
4450 rtx begin_label_rtx, call_insn;
4451 char begin_label_name[16];
4453 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4454 label_no);
4455 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4457 if (TARGET_64BIT)
4458 emit_move_insn (arg_pointer_rtx,
4459 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4460 GEN_INT (64)));
4462 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4464 /* The address of the function is loaded into %r25 with an instruction-
4465 relative sequence that avoids the use of relocations. The sequence
4466 is split so that the load_offset_label_address instruction can
4467 occupy the delay slot of the call to _mcount. */
4468 if (TARGET_PA_20)
4469 emit_insn (gen_lcla2 (reg, label_rtx));
4470 else
4471 emit_insn (gen_lcla1 (reg, label_rtx));
4473 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4474 reg, begin_label_rtx, label_rtx));
4476 #if !NO_DEFERRED_PROFILE_COUNTERS
4478 rtx count_label_rtx, addr, r24;
4479 char count_label_name[16];
4481 funcdef_nos.safe_push (label_no);
4482 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4483 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4485 addr = force_reg (Pmode, count_label_rtx);
4486 r24 = gen_rtx_REG (Pmode, 24);
4487 emit_move_insn (r24, addr);
4489 call_insn =
4490 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4491 gen_rtx_SYMBOL_REF (Pmode,
4492 "_mcount")),
4493 GEN_INT (TARGET_64BIT ? 24 : 12)));
4495 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4497 #else
4499 call_insn =
4500 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4501 gen_rtx_SYMBOL_REF (Pmode,
4502 "_mcount")),
4503 GEN_INT (TARGET_64BIT ? 16 : 8)));
4505 #endif
4507 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4508 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4510 /* Indicate the _mcount call cannot throw, nor will it execute a
4511 non-local goto. */
4512 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4515 /* Fetch the return address for the frame COUNT steps up from
4516 the current frame, after the prologue. FRAMEADDR is the
4517 frame pointer of the COUNT frame.
4519 We want to ignore any export stub remnants here. To handle this,
4520 we examine the code at the return address, and if it is an export
4521 stub, we return a memory rtx for the stub return address stored
4522 at frame-24.
4524 The value returned is used in two different ways:
4526 1. To find a function's caller.
4528 2. To change the return address for a function.
4530 This function handles most instances of case 1; however, it will
4531 fail if there are two levels of stubs to execute on the return
4532 path. The only way I believe that can happen is if the return value
4533 needs a parameter relocation, which never happens for C code.
4535 This function handles most instances of case 2; however, it will
4536 fail if we did not originally have stub code on the return path
4537 but will need stub code on the new return path. This can happen if
4538 the caller & callee are both in the main program, but the new
4539 return location is in a shared library. */
4542 pa_return_addr_rtx (int count, rtx frameaddr)
4544 rtx label;
4545 rtx rp;
4546 rtx saved_rp;
4547 rtx ins;
4549 /* The instruction stream at the return address of a PA1.X export stub is:
4551 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4552 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4553 0x00011820 | stub+16: mtsp r1,sr0
4554 0xe0400002 | stub+20: be,n 0(sr0,rp)
4556 0xe0400002 must be specified as -532676606 so that it won't be
4557 rejected as an invalid immediate operand on 64-bit hosts.
4559 The instruction stream at the return address of a PA2.0 export stub is:
4561 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4562 0xe840d002 | stub+12: bve,n (rp)
4565 HOST_WIDE_INT insns[4];
4566 int i, len;
4568 if (count != 0)
4569 return NULL_RTX;
4571 rp = get_hard_reg_initial_val (Pmode, 2);
4573 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4574 return rp;
4576 /* If there is no export stub then just use the value saved from
4577 the return pointer register. */
4579 saved_rp = gen_reg_rtx (Pmode);
4580 emit_move_insn (saved_rp, rp);
4582 /* Get pointer to the instruction stream. We have to mask out the
4583 privilege level from the two low order bits of the return address
4584 pointer here so that ins will point to the start of the first
4585 instruction that would have been executed if we returned. */
4586 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4587 label = gen_label_rtx ();
4589 if (TARGET_PA_20)
4591 insns[0] = 0x4bc23fd1;
4592 insns[1] = -398405630;
4593 len = 2;
4595 else
4597 insns[0] = 0x4bc23fd1;
4598 insns[1] = 0x004010a1;
4599 insns[2] = 0x00011820;
4600 insns[3] = -532676606;
4601 len = 4;
4604 /* Check the instruction stream at the normal return address for the
4605 export stub. If it is an export stub, than our return address is
4606 really in -24[frameaddr]. */
4608 for (i = 0; i < len; i++)
4610 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4611 rtx op1 = GEN_INT (insns[i]);
4612 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4615 /* Here we know that our return address points to an export
4616 stub. We don't want to return the address of the export stub,
4617 but rather the return address of the export stub. That return
4618 address is stored at -24[frameaddr]. */
4620 emit_move_insn (saved_rp,
4621 gen_rtx_MEM (Pmode,
4622 memory_address (Pmode,
4623 plus_constant (Pmode, frameaddr,
4624 -24))));
4626 emit_label (label);
4628 return saved_rp;
4631 void
4632 pa_emit_bcond_fp (rtx operands[])
4634 enum rtx_code code = GET_CODE (operands[0]);
4635 rtx operand0 = operands[1];
4636 rtx operand1 = operands[2];
4637 rtx label = operands[3];
4639 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4640 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4642 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4643 gen_rtx_IF_THEN_ELSE (VOIDmode,
4644 gen_rtx_fmt_ee (NE,
4645 VOIDmode,
4646 gen_rtx_REG (CCFPmode, 0),
4647 const0_rtx),
4648 gen_rtx_LABEL_REF (VOIDmode, label),
4649 pc_rtx)));
4653 /* Adjust the cost of a scheduling dependency. Return the new cost of
4654 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4656 static int
4657 pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4659 enum attr_type attr_type;
4661 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4662 true dependencies as they are described with bypasses now. */
4663 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4664 return cost;
4666 if (! recog_memoized (insn))
4667 return 0;
4669 attr_type = get_attr_type (insn);
4671 switch (REG_NOTE_KIND (link))
4673 case REG_DEP_ANTI:
4674 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4675 cycles later. */
4677 if (attr_type == TYPE_FPLOAD)
4679 rtx pat = PATTERN (insn);
4680 rtx dep_pat = PATTERN (dep_insn);
4681 if (GET_CODE (pat) == PARALLEL)
4683 /* This happens for the fldXs,mb patterns. */
4684 pat = XVECEXP (pat, 0, 0);
4686 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4687 /* If this happens, we have to extend this to schedule
4688 optimally. Return 0 for now. */
4689 return 0;
4691 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4693 if (! recog_memoized (dep_insn))
4694 return 0;
4695 switch (get_attr_type (dep_insn))
4697 case TYPE_FPALU:
4698 case TYPE_FPMULSGL:
4699 case TYPE_FPMULDBL:
4700 case TYPE_FPDIVSGL:
4701 case TYPE_FPDIVDBL:
4702 case TYPE_FPSQRTSGL:
4703 case TYPE_FPSQRTDBL:
4704 /* A fpload can't be issued until one cycle before a
4705 preceding arithmetic operation has finished if
4706 the target of the fpload is any of the sources
4707 (or destination) of the arithmetic operation. */
4708 return insn_default_latency (dep_insn) - 1;
4710 default:
4711 return 0;
4715 else if (attr_type == TYPE_FPALU)
4717 rtx pat = PATTERN (insn);
4718 rtx dep_pat = PATTERN (dep_insn);
4719 if (GET_CODE (pat) == PARALLEL)
4721 /* This happens for the fldXs,mb patterns. */
4722 pat = XVECEXP (pat, 0, 0);
4724 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4725 /* If this happens, we have to extend this to schedule
4726 optimally. Return 0 for now. */
4727 return 0;
4729 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4731 if (! recog_memoized (dep_insn))
4732 return 0;
4733 switch (get_attr_type (dep_insn))
4735 case TYPE_FPDIVSGL:
4736 case TYPE_FPDIVDBL:
4737 case TYPE_FPSQRTSGL:
4738 case TYPE_FPSQRTDBL:
4739 /* An ALU flop can't be issued until two cycles before a
4740 preceding divide or sqrt operation has finished if
4741 the target of the ALU flop is any of the sources
4742 (or destination) of the divide or sqrt operation. */
4743 return insn_default_latency (dep_insn) - 2;
4745 default:
4746 return 0;
4751 /* For other anti dependencies, the cost is 0. */
4752 return 0;
4754 case REG_DEP_OUTPUT:
4755 /* Output dependency; DEP_INSN writes a register that INSN writes some
4756 cycles later. */
4757 if (attr_type == TYPE_FPLOAD)
4759 rtx pat = PATTERN (insn);
4760 rtx dep_pat = PATTERN (dep_insn);
4761 if (GET_CODE (pat) == PARALLEL)
4763 /* This happens for the fldXs,mb patterns. */
4764 pat = XVECEXP (pat, 0, 0);
4766 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4767 /* If this happens, we have to extend this to schedule
4768 optimally. Return 0 for now. */
4769 return 0;
4771 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4773 if (! recog_memoized (dep_insn))
4774 return 0;
4775 switch (get_attr_type (dep_insn))
4777 case TYPE_FPALU:
4778 case TYPE_FPMULSGL:
4779 case TYPE_FPMULDBL:
4780 case TYPE_FPDIVSGL:
4781 case TYPE_FPDIVDBL:
4782 case TYPE_FPSQRTSGL:
4783 case TYPE_FPSQRTDBL:
4784 /* A fpload can't be issued until one cycle before a
4785 preceding arithmetic operation has finished if
4786 the target of the fpload is the destination of the
4787 arithmetic operation.
4789 Exception: For PA7100LC, PA7200 and PA7300, the cost
4790 is 3 cycles, unless they bundle together. We also
4791 pay the penalty if the second insn is a fpload. */
4792 return insn_default_latency (dep_insn) - 1;
4794 default:
4795 return 0;
4799 else if (attr_type == TYPE_FPALU)
4801 rtx pat = PATTERN (insn);
4802 rtx dep_pat = PATTERN (dep_insn);
4803 if (GET_CODE (pat) == PARALLEL)
4805 /* This happens for the fldXs,mb patterns. */
4806 pat = XVECEXP (pat, 0, 0);
4808 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4809 /* If this happens, we have to extend this to schedule
4810 optimally. Return 0 for now. */
4811 return 0;
4813 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4815 if (! recog_memoized (dep_insn))
4816 return 0;
4817 switch (get_attr_type (dep_insn))
4819 case TYPE_FPDIVSGL:
4820 case TYPE_FPDIVDBL:
4821 case TYPE_FPSQRTSGL:
4822 case TYPE_FPSQRTDBL:
4823 /* An ALU flop can't be issued until two cycles before a
4824 preceding divide or sqrt operation has finished if
4825 the target of the ALU flop is also the target of
4826 the divide or sqrt operation. */
4827 return insn_default_latency (dep_insn) - 2;
4829 default:
4830 return 0;
4835 /* For other output dependencies, the cost is 0. */
4836 return 0;
4838 default:
4839 gcc_unreachable ();
4843 /* Adjust scheduling priorities. We use this to try and keep addil
4844 and the next use of %r1 close together. */
4845 static int
4846 pa_adjust_priority (rtx_insn *insn, int priority)
4848 rtx set = single_set (insn);
4849 rtx src, dest;
4850 if (set)
4852 src = SET_SRC (set);
4853 dest = SET_DEST (set);
4854 if (GET_CODE (src) == LO_SUM
4855 && symbolic_operand (XEXP (src, 1), VOIDmode)
4856 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4857 priority >>= 3;
4859 else if (GET_CODE (src) == MEM
4860 && GET_CODE (XEXP (src, 0)) == LO_SUM
4861 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4862 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4863 priority >>= 1;
4865 else if (GET_CODE (dest) == MEM
4866 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4867 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4868 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4869 priority >>= 3;
4871 return priority;
4874 /* The 700 can only issue a single insn at a time.
4875 The 7XXX processors can issue two insns at a time.
4876 The 8000 can issue 4 insns at a time. */
4877 static int
4878 pa_issue_rate (void)
4880 switch (pa_cpu)
4882 case PROCESSOR_700: return 1;
4883 case PROCESSOR_7100: return 2;
4884 case PROCESSOR_7100LC: return 2;
4885 case PROCESSOR_7200: return 2;
4886 case PROCESSOR_7300: return 2;
4887 case PROCESSOR_8000: return 4;
4889 default:
4890 gcc_unreachable ();
4896 /* Return any length plus adjustment needed by INSN which already has
4897 its length computed as LENGTH. Return LENGTH if no adjustment is
4898 necessary.
4900 Also compute the length of an inline block move here as it is too
4901 complicated to express as a length attribute in pa.md. */
4903 pa_adjust_insn_length (rtx_insn *insn, int length)
4905 rtx pat = PATTERN (insn);
4907 /* If length is negative or undefined, provide initial length. */
4908 if ((unsigned int) length >= INT_MAX)
4910 if (GET_CODE (pat) == SEQUENCE)
4911 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4913 switch (get_attr_type (insn))
4915 case TYPE_MILLI:
4916 length = pa_attr_length_millicode_call (insn);
4917 break;
4918 case TYPE_CALL:
4919 length = pa_attr_length_call (insn, 0);
4920 break;
4921 case TYPE_SIBCALL:
4922 length = pa_attr_length_call (insn, 1);
4923 break;
4924 case TYPE_DYNCALL:
4925 length = pa_attr_length_indirect_call (insn);
4926 break;
4927 case TYPE_SH_FUNC_ADRS:
4928 length = pa_attr_length_millicode_call (insn) + 20;
4929 break;
4930 default:
4931 gcc_unreachable ();
4935 /* Block move pattern. */
4936 if (NONJUMP_INSN_P (insn)
4937 && GET_CODE (pat) == PARALLEL
4938 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4939 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4940 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4941 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4942 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4943 length += compute_movmem_length (insn) - 4;
4944 /* Block clear pattern. */
4945 else if (NONJUMP_INSN_P (insn)
4946 && GET_CODE (pat) == PARALLEL
4947 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4948 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4949 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4950 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4951 length += compute_clrmem_length (insn) - 4;
4952 /* Conditional branch with an unfilled delay slot. */
4953 else if (JUMP_P (insn) && ! simplejump_p (insn))
4955 /* Adjust a short backwards conditional with an unfilled delay slot. */
4956 if (GET_CODE (pat) == SET
4957 && length == 4
4958 && JUMP_LABEL (insn) != NULL_RTX
4959 && ! forward_branch_p (insn))
4960 length += 4;
4961 else if (GET_CODE (pat) == PARALLEL
4962 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4963 && length == 4)
4964 length += 4;
4965 /* Adjust dbra insn with short backwards conditional branch with
4966 unfilled delay slot -- only for case where counter is in a
4967 general register register. */
4968 else if (GET_CODE (pat) == PARALLEL
4969 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4970 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4971 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4972 && length == 4
4973 && ! forward_branch_p (insn))
4974 length += 4;
4976 return length;
4979 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4981 static bool
4982 pa_print_operand_punct_valid_p (unsigned char code)
4984 if (code == '@'
4985 || code == '#'
4986 || code == '*'
4987 || code == '^')
4988 return true;
4990 return false;
4993 /* Print operand X (an rtx) in assembler syntax to file FILE.
4994 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4995 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4997 void
4998 pa_print_operand (FILE *file, rtx x, int code)
5000 switch (code)
5002 case '#':
5003 /* Output a 'nop' if there's nothing for the delay slot. */
5004 if (dbr_sequence_length () == 0)
5005 fputs ("\n\tnop", file);
5006 return;
5007 case '*':
5008 /* Output a nullification completer if there's nothing for the */
5009 /* delay slot or nullification is requested. */
5010 if (dbr_sequence_length () == 0 ||
5011 (final_sequence &&
5012 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5013 fputs (",n", file);
5014 return;
5015 case 'R':
5016 /* Print out the second register name of a register pair.
5017 I.e., R (6) => 7. */
5018 fputs (reg_names[REGNO (x) + 1], file);
5019 return;
5020 case 'r':
5021 /* A register or zero. */
5022 if (x == const0_rtx
5023 || (x == CONST0_RTX (DFmode))
5024 || (x == CONST0_RTX (SFmode)))
5026 fputs ("%r0", file);
5027 return;
5029 else
5030 break;
5031 case 'f':
5032 /* A register or zero (floating point). */
5033 if (x == const0_rtx
5034 || (x == CONST0_RTX (DFmode))
5035 || (x == CONST0_RTX (SFmode)))
5037 fputs ("%fr0", file);
5038 return;
5040 else
5041 break;
5042 case 'A':
5044 rtx xoperands[2];
5046 xoperands[0] = XEXP (XEXP (x, 0), 0);
5047 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5048 pa_output_global_address (file, xoperands[1], 0);
5049 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5050 return;
5053 case 'C': /* Plain (C)ondition */
5054 case 'X':
5055 switch (GET_CODE (x))
5057 case EQ:
5058 fputs ("=", file); break;
5059 case NE:
5060 fputs ("<>", file); break;
5061 case GT:
5062 fputs (">", file); break;
5063 case GE:
5064 fputs (">=", file); break;
5065 case GEU:
5066 fputs (">>=", file); break;
5067 case GTU:
5068 fputs (">>", file); break;
5069 case LT:
5070 fputs ("<", file); break;
5071 case LE:
5072 fputs ("<=", file); break;
5073 case LEU:
5074 fputs ("<<=", file); break;
5075 case LTU:
5076 fputs ("<<", file); break;
5077 default:
5078 gcc_unreachable ();
5080 return;
5081 case 'N': /* Condition, (N)egated */
5082 switch (GET_CODE (x))
5084 case EQ:
5085 fputs ("<>", file); break;
5086 case NE:
5087 fputs ("=", file); break;
5088 case GT:
5089 fputs ("<=", file); break;
5090 case GE:
5091 fputs ("<", file); break;
5092 case GEU:
5093 fputs ("<<", file); break;
5094 case GTU:
5095 fputs ("<<=", file); break;
5096 case LT:
5097 fputs (">=", file); break;
5098 case LE:
5099 fputs (">", file); break;
5100 case LEU:
5101 fputs (">>", file); break;
5102 case LTU:
5103 fputs (">>=", file); break;
5104 default:
5105 gcc_unreachable ();
5107 return;
5108 /* For floating point comparisons. Note that the output
5109 predicates are the complement of the desired mode. The
5110 conditions for GT, GE, LT, LE and LTGT cause an invalid
5111 operation exception if the result is unordered and this
5112 exception is enabled in the floating-point status register. */
5113 case 'Y':
5114 switch (GET_CODE (x))
5116 case EQ:
5117 fputs ("!=", file); break;
5118 case NE:
5119 fputs ("=", file); break;
5120 case GT:
5121 fputs ("!>", file); break;
5122 case GE:
5123 fputs ("!>=", file); break;
5124 case LT:
5125 fputs ("!<", file); break;
5126 case LE:
5127 fputs ("!<=", file); break;
5128 case LTGT:
5129 fputs ("!<>", file); break;
5130 case UNLE:
5131 fputs ("!?<=", file); break;
5132 case UNLT:
5133 fputs ("!?<", file); break;
5134 case UNGE:
5135 fputs ("!?>=", file); break;
5136 case UNGT:
5137 fputs ("!?>", file); break;
5138 case UNEQ:
5139 fputs ("!?=", file); break;
5140 case UNORDERED:
5141 fputs ("!?", file); break;
5142 case ORDERED:
5143 fputs ("?", file); break;
5144 default:
5145 gcc_unreachable ();
5147 return;
5148 case 'S': /* Condition, operands are (S)wapped. */
5149 switch (GET_CODE (x))
5151 case EQ:
5152 fputs ("=", file); break;
5153 case NE:
5154 fputs ("<>", file); break;
5155 case GT:
5156 fputs ("<", file); break;
5157 case GE:
5158 fputs ("<=", file); break;
5159 case GEU:
5160 fputs ("<<=", file); break;
5161 case GTU:
5162 fputs ("<<", file); break;
5163 case LT:
5164 fputs (">", file); break;
5165 case LE:
5166 fputs (">=", file); break;
5167 case LEU:
5168 fputs (">>=", file); break;
5169 case LTU:
5170 fputs (">>", file); break;
5171 default:
5172 gcc_unreachable ();
5174 return;
5175 case 'B': /* Condition, (B)oth swapped and negate. */
5176 switch (GET_CODE (x))
5178 case EQ:
5179 fputs ("<>", file); break;
5180 case NE:
5181 fputs ("=", file); break;
5182 case GT:
5183 fputs (">=", file); break;
5184 case GE:
5185 fputs (">", file); break;
5186 case GEU:
5187 fputs (">>", file); break;
5188 case GTU:
5189 fputs (">>=", file); break;
5190 case LT:
5191 fputs ("<=", file); break;
5192 case LE:
5193 fputs ("<", file); break;
5194 case LEU:
5195 fputs ("<<", file); break;
5196 case LTU:
5197 fputs ("<<=", file); break;
5198 default:
5199 gcc_unreachable ();
5201 return;
5202 case 'k':
5203 gcc_assert (GET_CODE (x) == CONST_INT);
5204 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5205 return;
5206 case 'Q':
5207 gcc_assert (GET_CODE (x) == CONST_INT);
5208 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5209 return;
5210 case 'L':
5211 gcc_assert (GET_CODE (x) == CONST_INT);
5212 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5213 return;
5214 case 'O':
5215 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5216 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5217 return;
5218 case 'p':
5219 gcc_assert (GET_CODE (x) == CONST_INT);
5220 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5221 return;
5222 case 'P':
5223 gcc_assert (GET_CODE (x) == CONST_INT);
5224 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5225 return;
5226 case 'I':
5227 if (GET_CODE (x) == CONST_INT)
5228 fputs ("i", file);
5229 return;
5230 case 'M':
5231 case 'F':
5232 switch (GET_CODE (XEXP (x, 0)))
5234 case PRE_DEC:
5235 case PRE_INC:
5236 if (ASSEMBLER_DIALECT == 0)
5237 fputs ("s,mb", file);
5238 else
5239 fputs (",mb", file);
5240 break;
5241 case POST_DEC:
5242 case POST_INC:
5243 if (ASSEMBLER_DIALECT == 0)
5244 fputs ("s,ma", file);
5245 else
5246 fputs (",ma", file);
5247 break;
5248 case PLUS:
5249 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5250 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5252 if (ASSEMBLER_DIALECT == 0)
5253 fputs ("x", file);
5255 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5256 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5258 if (ASSEMBLER_DIALECT == 0)
5259 fputs ("x,s", file);
5260 else
5261 fputs (",s", file);
5263 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5264 fputs ("s", file);
5265 break;
5266 default:
5267 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5268 fputs ("s", file);
5269 break;
5271 return;
5272 case 'G':
5273 pa_output_global_address (file, x, 0);
5274 return;
5275 case 'H':
5276 pa_output_global_address (file, x, 1);
5277 return;
5278 case 0: /* Don't do anything special */
5279 break;
5280 case 'Z':
5282 unsigned op[3];
5283 compute_zdepwi_operands (INTVAL (x), op);
5284 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5285 return;
5287 case 'z':
5289 unsigned op[3];
5290 compute_zdepdi_operands (INTVAL (x), op);
5291 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5292 return;
5294 case 'c':
5295 /* We can get here from a .vtable_inherit due to our
5296 CONSTANT_ADDRESS_P rejecting perfectly good constant
5297 addresses. */
5298 break;
5299 default:
5300 gcc_unreachable ();
5302 if (GET_CODE (x) == REG)
5304 fputs (reg_names [REGNO (x)], file);
5305 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5307 fputs ("R", file);
5308 return;
5310 if (FP_REG_P (x)
5311 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5312 && (REGNO (x) & 1) == 0)
5313 fputs ("L", file);
5315 else if (GET_CODE (x) == MEM)
5317 int size = GET_MODE_SIZE (GET_MODE (x));
5318 rtx base = NULL_RTX;
5319 switch (GET_CODE (XEXP (x, 0)))
5321 case PRE_DEC:
5322 case POST_DEC:
5323 base = XEXP (XEXP (x, 0), 0);
5324 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5325 break;
5326 case PRE_INC:
5327 case POST_INC:
5328 base = XEXP (XEXP (x, 0), 0);
5329 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5330 break;
5331 case PLUS:
5332 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5333 fprintf (file, "%s(%s)",
5334 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5335 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5336 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5337 fprintf (file, "%s(%s)",
5338 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5339 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5340 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5341 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5343 /* Because the REG_POINTER flag can get lost during reload,
5344 pa_legitimate_address_p canonicalizes the order of the
5345 index and base registers in the combined move patterns. */
5346 rtx base = XEXP (XEXP (x, 0), 1);
5347 rtx index = XEXP (XEXP (x, 0), 0);
5349 fprintf (file, "%s(%s)",
5350 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5352 else
5353 output_address (XEXP (x, 0));
5354 break;
5355 default:
5356 output_address (XEXP (x, 0));
5357 break;
5360 else
5361 output_addr_const (file, x);
5364 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5366 void
5367 pa_output_global_address (FILE *file, rtx x, int round_constant)
5370 /* Imagine (high (const (plus ...))). */
5371 if (GET_CODE (x) == HIGH)
5372 x = XEXP (x, 0);
5374 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5375 output_addr_const (file, x);
5376 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5378 output_addr_const (file, x);
5379 fputs ("-$global$", file);
5381 else if (GET_CODE (x) == CONST)
5383 const char *sep = "";
5384 int offset = 0; /* assembler wants -$global$ at end */
5385 rtx base = NULL_RTX;
5387 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5389 case SYMBOL_REF:
5390 base = XEXP (XEXP (x, 0), 0);
5391 output_addr_const (file, base);
5392 break;
5393 case CONST_INT:
5394 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5395 break;
5396 default:
5397 gcc_unreachable ();
5400 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5402 case SYMBOL_REF:
5403 base = XEXP (XEXP (x, 0), 1);
5404 output_addr_const (file, base);
5405 break;
5406 case CONST_INT:
5407 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5408 break;
5409 default:
5410 gcc_unreachable ();
5413 /* How bogus. The compiler is apparently responsible for
5414 rounding the constant if it uses an LR field selector.
5416 The linker and/or assembler seem a better place since
5417 they have to do this kind of thing already.
5419 If we fail to do this, HP's optimizing linker may eliminate
5420 an addil, but not update the ldw/stw/ldo instruction that
5421 uses the result of the addil. */
5422 if (round_constant)
5423 offset = ((offset + 0x1000) & ~0x1fff);
5425 switch (GET_CODE (XEXP (x, 0)))
5427 case PLUS:
5428 if (offset < 0)
5430 offset = -offset;
5431 sep = "-";
5433 else
5434 sep = "+";
5435 break;
5437 case MINUS:
5438 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5439 sep = "-";
5440 break;
5442 default:
5443 gcc_unreachable ();
5446 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5447 fputs ("-$global$", file);
5448 if (offset)
5449 fprintf (file, "%s%d", sep, offset);
5451 else
5452 output_addr_const (file, x);
5455 /* Output boilerplate text to appear at the beginning of the file.
5456 There are several possible versions. */
5457 #define aputs(x) fputs(x, asm_out_file)
5458 static inline void
5459 pa_file_start_level (void)
5461 if (TARGET_64BIT)
5462 aputs ("\t.LEVEL 2.0w\n");
5463 else if (TARGET_PA_20)
5464 aputs ("\t.LEVEL 2.0\n");
5465 else if (TARGET_PA_11)
5466 aputs ("\t.LEVEL 1.1\n");
5467 else
5468 aputs ("\t.LEVEL 1.0\n");
5471 static inline void
5472 pa_file_start_space (int sortspace)
5474 aputs ("\t.SPACE $PRIVATE$");
5475 if (sortspace)
5476 aputs (",SORT=16");
5477 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5478 if (flag_tm)
5479 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5480 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5481 "\n\t.SPACE $TEXT$");
5482 if (sortspace)
5483 aputs (",SORT=8");
5484 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5485 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5488 static inline void
5489 pa_file_start_file (int want_version)
5491 if (write_symbols != NO_DEBUG)
5493 output_file_directive (asm_out_file, main_input_filename);
5494 if (want_version)
5495 aputs ("\t.version\t\"01.01\"\n");
5499 static inline void
5500 pa_file_start_mcount (const char *aswhat)
5502 if (profile_flag)
5503 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5506 static void
5507 pa_elf_file_start (void)
5509 pa_file_start_level ();
5510 pa_file_start_mcount ("ENTRY");
5511 pa_file_start_file (0);
5514 static void
5515 pa_som_file_start (void)
5517 pa_file_start_level ();
5518 pa_file_start_space (0);
5519 aputs ("\t.IMPORT $global$,DATA\n"
5520 "\t.IMPORT $$dyncall,MILLICODE\n");
5521 pa_file_start_mcount ("CODE");
5522 pa_file_start_file (0);
5525 static void
5526 pa_linux_file_start (void)
5528 pa_file_start_file (1);
5529 pa_file_start_level ();
5530 pa_file_start_mcount ("CODE");
5533 static void
5534 pa_hpux64_gas_file_start (void)
5536 pa_file_start_level ();
5537 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5538 if (profile_flag)
5539 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5540 #endif
5541 pa_file_start_file (1);
5544 static void
5545 pa_hpux64_hpas_file_start (void)
5547 pa_file_start_level ();
5548 pa_file_start_space (1);
5549 pa_file_start_mcount ("CODE");
5550 pa_file_start_file (0);
5552 #undef aputs
5554 /* Search the deferred plabel list for SYMBOL and return its internal
5555 label. If an entry for SYMBOL is not found, a new entry is created. */
5558 pa_get_deferred_plabel (rtx symbol)
5560 const char *fname = XSTR (symbol, 0);
5561 size_t i;
5563 /* See if we have already put this function on the list of deferred
5564 plabels. This list is generally small, so a liner search is not
5565 too ugly. If it proves too slow replace it with something faster. */
5566 for (i = 0; i < n_deferred_plabels; i++)
5567 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5568 break;
5570 /* If the deferred plabel list is empty, or this entry was not found
5571 on the list, create a new entry on the list. */
5572 if (deferred_plabels == NULL || i == n_deferred_plabels)
5574 tree id;
5576 if (deferred_plabels == 0)
5577 deferred_plabels = ggc_alloc<deferred_plabel> ();
5578 else
5579 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5580 deferred_plabels,
5581 n_deferred_plabels + 1);
5583 i = n_deferred_plabels++;
5584 deferred_plabels[i].internal_label = gen_label_rtx ();
5585 deferred_plabels[i].symbol = symbol;
5587 /* Gross. We have just implicitly taken the address of this
5588 function. Mark it in the same manner as assemble_name. */
5589 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5590 if (id)
5591 mark_referenced (id);
5594 return deferred_plabels[i].internal_label;
5597 static void
5598 output_deferred_plabels (void)
5600 size_t i;
5602 /* If we have some deferred plabels, then we need to switch into the
5603 data or readonly data section, and align it to a 4 byte boundary
5604 before outputting the deferred plabels. */
5605 if (n_deferred_plabels)
5607 switch_to_section (flag_pic ? data_section : readonly_data_section);
5608 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5611 /* Now output the deferred plabels. */
5612 for (i = 0; i < n_deferred_plabels; i++)
5614 targetm.asm_out.internal_label (asm_out_file, "L",
5615 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5616 assemble_integer (deferred_plabels[i].symbol,
5617 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5621 /* Initialize optabs to point to emulation routines. */
5623 static void
5624 pa_init_libfuncs (void)
5626 if (HPUX_LONG_DOUBLE_LIBRARY)
5628 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5629 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5630 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5631 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5632 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5633 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5634 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5635 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5636 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5638 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5639 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5640 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5641 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5642 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5643 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5644 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5646 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5647 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5648 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5649 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5651 set_conv_libfunc (sfix_optab, SImode, TFmode,
5652 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5653 : "_U_Qfcnvfxt_quad_to_sgl");
5654 set_conv_libfunc (sfix_optab, DImode, TFmode,
5655 "_U_Qfcnvfxt_quad_to_dbl");
5656 set_conv_libfunc (ufix_optab, SImode, TFmode,
5657 "_U_Qfcnvfxt_quad_to_usgl");
5658 set_conv_libfunc (ufix_optab, DImode, TFmode,
5659 "_U_Qfcnvfxt_quad_to_udbl");
5661 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5662 "_U_Qfcnvxf_sgl_to_quad");
5663 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5664 "_U_Qfcnvxf_dbl_to_quad");
5665 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5666 "_U_Qfcnvxf_usgl_to_quad");
5667 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5668 "_U_Qfcnvxf_udbl_to_quad");
5671 if (TARGET_SYNC_LIBCALL)
5672 init_sync_libfuncs (UNITS_PER_WORD);
5675 /* HP's millicode routines mean something special to the assembler.
5676 Keep track of which ones we have used. */
5678 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5679 static void import_milli (enum millicodes);
5680 static char imported[(int) end1000];
5681 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5682 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5683 #define MILLI_START 10
5685 static void
5686 import_milli (enum millicodes code)
5688 char str[sizeof (import_string)];
5690 if (!imported[(int) code])
5692 imported[(int) code] = 1;
5693 strcpy (str, import_string);
5694 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5695 output_asm_insn (str, 0);
5699 /* The register constraints have put the operands and return value in
5700 the proper registers. */
5702 const char *
5703 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5705 import_milli (mulI);
5706 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5709 /* Emit the rtl for doing a division by a constant. */
5711 /* Do magic division millicodes exist for this value? */
5712 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5714 /* We'll use an array to keep track of the magic millicodes and
5715 whether or not we've used them already. [n][0] is signed, [n][1] is
5716 unsigned. */
5718 static int div_milli[16][2];
5721 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5723 if (GET_CODE (operands[2]) == CONST_INT
5724 && INTVAL (operands[2]) > 0
5725 && INTVAL (operands[2]) < 16
5726 && pa_magic_milli[INTVAL (operands[2])])
5728 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5730 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5731 emit
5732 (gen_rtx_PARALLEL
5733 (VOIDmode,
5734 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5735 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5736 SImode,
5737 gen_rtx_REG (SImode, 26),
5738 operands[2])),
5739 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5740 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5741 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5742 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5743 gen_rtx_CLOBBER (VOIDmode, ret))));
5744 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5745 return 1;
5747 return 0;
5750 const char *
5751 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5753 int divisor;
5755 /* If the divisor is a constant, try to use one of the special
5756 opcodes .*/
5757 if (GET_CODE (operands[0]) == CONST_INT)
5759 static char buf[100];
5760 divisor = INTVAL (operands[0]);
5761 if (!div_milli[divisor][unsignedp])
5763 div_milli[divisor][unsignedp] = 1;
5764 if (unsignedp)
5765 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5766 else
5767 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5769 if (unsignedp)
5771 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5772 INTVAL (operands[0]));
5773 return pa_output_millicode_call (insn,
5774 gen_rtx_SYMBOL_REF (SImode, buf));
5776 else
5778 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5779 INTVAL (operands[0]));
5780 return pa_output_millicode_call (insn,
5781 gen_rtx_SYMBOL_REF (SImode, buf));
5784 /* Divisor isn't a special constant. */
5785 else
5787 if (unsignedp)
5789 import_milli (divU);
5790 return pa_output_millicode_call (insn,
5791 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5793 else
5795 import_milli (divI);
5796 return pa_output_millicode_call (insn,
5797 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5802 /* Output a $$rem millicode to do mod. */
5804 const char *
5805 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5807 if (unsignedp)
5809 import_milli (remU);
5810 return pa_output_millicode_call (insn,
5811 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5813 else
5815 import_milli (remI);
5816 return pa_output_millicode_call (insn,
5817 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5821 void
5822 pa_output_arg_descriptor (rtx call_insn)
5824 const char *arg_regs[4];
5825 enum machine_mode arg_mode;
5826 rtx link;
5827 int i, output_flag = 0;
5828 int regno;
5830 /* We neither need nor want argument location descriptors for the
5831 64bit runtime environment or the ELF32 environment. */
5832 if (TARGET_64BIT || TARGET_ELF32)
5833 return;
5835 for (i = 0; i < 4; i++)
5836 arg_regs[i] = 0;
5838 /* Specify explicitly that no argument relocations should take place
5839 if using the portable runtime calling conventions. */
5840 if (TARGET_PORTABLE_RUNTIME)
5842 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5843 asm_out_file);
5844 return;
5847 gcc_assert (CALL_P (call_insn));
5848 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5849 link; link = XEXP (link, 1))
5851 rtx use = XEXP (link, 0);
5853 if (! (GET_CODE (use) == USE
5854 && GET_CODE (XEXP (use, 0)) == REG
5855 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5856 continue;
5858 arg_mode = GET_MODE (XEXP (use, 0));
5859 regno = REGNO (XEXP (use, 0));
5860 if (regno >= 23 && regno <= 26)
5862 arg_regs[26 - regno] = "GR";
5863 if (arg_mode == DImode)
5864 arg_regs[25 - regno] = "GR";
5866 else if (regno >= 32 && regno <= 39)
5868 if (arg_mode == SFmode)
5869 arg_regs[(regno - 32) / 2] = "FR";
5870 else
5872 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5873 arg_regs[(regno - 34) / 2] = "FR";
5874 arg_regs[(regno - 34) / 2 + 1] = "FU";
5875 #else
5876 arg_regs[(regno - 34) / 2] = "FU";
5877 arg_regs[(regno - 34) / 2 + 1] = "FR";
5878 #endif
5882 fputs ("\t.CALL ", asm_out_file);
5883 for (i = 0; i < 4; i++)
5885 if (arg_regs[i])
5887 if (output_flag++)
5888 fputc (',', asm_out_file);
5889 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5892 fputc ('\n', asm_out_file);
5895 /* Inform reload about cases where moving X with a mode MODE to or from
5896 a register in RCLASS requires an extra scratch or immediate register.
5897 Return the class needed for the immediate register. */
5899 static reg_class_t
5900 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5901 enum machine_mode mode, secondary_reload_info *sri)
5903 int regno;
5904 enum reg_class rclass = (enum reg_class) rclass_i;
5906 /* Handle the easy stuff first. */
5907 if (rclass == R1_REGS)
5908 return NO_REGS;
5910 if (REG_P (x))
5912 regno = REGNO (x);
5913 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5914 return NO_REGS;
5916 else
5917 regno = -1;
5919 /* If we have something like (mem (mem (...)), we can safely assume the
5920 inner MEM will end up in a general register after reloading, so there's
5921 no need for a secondary reload. */
5922 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5923 return NO_REGS;
5925 /* Trying to load a constant into a FP register during PIC code
5926 generation requires %r1 as a scratch register. For float modes,
5927 the only legitimate constant is CONST0_RTX. However, there are
5928 a few patterns that accept constant double operands. */
5929 if (flag_pic
5930 && FP_REG_CLASS_P (rclass)
5931 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5933 switch (mode)
5935 case SImode:
5936 sri->icode = CODE_FOR_reload_insi_r1;
5937 break;
5939 case DImode:
5940 sri->icode = CODE_FOR_reload_indi_r1;
5941 break;
5943 case SFmode:
5944 sri->icode = CODE_FOR_reload_insf_r1;
5945 break;
5947 case DFmode:
5948 sri->icode = CODE_FOR_reload_indf_r1;
5949 break;
5951 default:
5952 gcc_unreachable ();
5954 return NO_REGS;
5957 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5958 register when we're generating PIC code or when the operand isn't
5959 readonly. */
5960 if (pa_symbolic_expression_p (x))
5962 if (GET_CODE (x) == HIGH)
5963 x = XEXP (x, 0);
5965 if (flag_pic || !read_only_operand (x, VOIDmode))
5967 switch (mode)
5969 case SImode:
5970 sri->icode = CODE_FOR_reload_insi_r1;
5971 break;
5973 case DImode:
5974 sri->icode = CODE_FOR_reload_indi_r1;
5975 break;
5977 default:
5978 gcc_unreachable ();
5980 return NO_REGS;
5984 /* Profiling showed the PA port spends about 1.3% of its compilation
5985 time in true_regnum from calls inside pa_secondary_reload_class. */
5986 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5987 regno = true_regnum (x);
5989 /* Handle reloads for floating point loads and stores. */
5990 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5991 && FP_REG_CLASS_P (rclass))
5993 if (MEM_P (x))
5995 x = XEXP (x, 0);
5997 /* We don't need an intermediate for indexed and LO_SUM DLT
5998 memory addresses. When INT14_OK_STRICT is true, it might
5999 appear that we could directly allow register indirect
6000 memory addresses. However, this doesn't work because we
6001 don't support SUBREGs in floating-point register copies
6002 and reload doesn't tell us when it's going to use a SUBREG. */
6003 if (IS_INDEX_ADDR_P (x)
6004 || IS_LO_SUM_DLT_ADDR_P (x))
6005 return NO_REGS;
6007 /* Request intermediate general register. */
6008 return GENERAL_REGS;
6011 /* Request a secondary reload with a general scratch register
6012 for everything else. ??? Could symbolic operands be handled
6013 directly when generating non-pic PA 2.0 code? */
6014 sri->icode = (in_p
6015 ? direct_optab_handler (reload_in_optab, mode)
6016 : direct_optab_handler (reload_out_optab, mode));
6017 return NO_REGS;
6020 /* A SAR<->FP register copy requires an intermediate general register
6021 and secondary memory. We need a secondary reload with a general
6022 scratch register for spills. */
6023 if (rclass == SHIFT_REGS)
6025 /* Handle spill. */
6026 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6028 sri->icode = (in_p
6029 ? direct_optab_handler (reload_in_optab, mode)
6030 : direct_optab_handler (reload_out_optab, mode));
6031 return NO_REGS;
6034 /* Handle FP copy. */
6035 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6036 return GENERAL_REGS;
6039 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6040 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6041 && FP_REG_CLASS_P (rclass))
6042 return GENERAL_REGS;
6044 return NO_REGS;
6047 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6048 is only marked as live on entry by df-scan when it is a fixed
6049 register. It isn't a fixed register in the 64-bit runtime,
6050 so we need to mark it here. */
6052 static void
6053 pa_extra_live_on_entry (bitmap regs)
6055 if (TARGET_64BIT)
6056 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6059 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6060 to prevent it from being deleted. */
6063 pa_eh_return_handler_rtx (void)
6065 rtx tmp;
6067 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6068 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6069 tmp = gen_rtx_MEM (word_mode, tmp);
6070 tmp->volatil = 1;
6071 return tmp;
6074 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6075 by invisible reference. As a GCC extension, we also pass anything
6076 with a zero or variable size by reference.
6078 The 64-bit runtime does not describe passing any types by invisible
6079 reference. The internals of GCC can't currently handle passing
6080 empty structures, and zero or variable length arrays when they are
6081 not passed entirely on the stack or by reference. Thus, as a GCC
6082 extension, we pass these types by reference. The HP compiler doesn't
6083 support these types, so hopefully there shouldn't be any compatibility
6084 issues. This may have to be revisited when HP releases a C99 compiler
6085 or updates the ABI. */
6087 static bool
6088 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6089 enum machine_mode mode, const_tree type,
6090 bool named ATTRIBUTE_UNUSED)
6092 HOST_WIDE_INT size;
6094 if (type)
6095 size = int_size_in_bytes (type);
6096 else
6097 size = GET_MODE_SIZE (mode);
6099 if (TARGET_64BIT)
6100 return size <= 0;
6101 else
6102 return size <= 0 || size > 8;
6105 enum direction
6106 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6108 if (mode == BLKmode
6109 || (TARGET_64BIT
6110 && type
6111 && (AGGREGATE_TYPE_P (type)
6112 || TREE_CODE (type) == COMPLEX_TYPE
6113 || TREE_CODE (type) == VECTOR_TYPE)))
6115 /* Return none if justification is not required. */
6116 if (type
6117 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6118 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6119 return none;
6121 /* The directions set here are ignored when a BLKmode argument larger
6122 than a word is placed in a register. Different code is used for
6123 the stack and registers. This makes it difficult to have a
6124 consistent data representation for both the stack and registers.
6125 For both runtimes, the justification and padding for arguments on
6126 the stack and in registers should be identical. */
6127 if (TARGET_64BIT)
6128 /* The 64-bit runtime specifies left justification for aggregates. */
6129 return upward;
6130 else
6131 /* The 32-bit runtime architecture specifies right justification.
6132 When the argument is passed on the stack, the argument is padded
6133 with garbage on the left. The HP compiler pads with zeros. */
6134 return downward;
6137 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6138 return downward;
6139 else
6140 return none;
6144 /* Do what is necessary for `va_start'. We look at the current function
6145 to determine if stdargs or varargs is used and fill in an initial
6146 va_list. A pointer to this constructor is returned. */
6148 static rtx
6149 hppa_builtin_saveregs (void)
6151 rtx offset, dest;
6152 tree fntype = TREE_TYPE (current_function_decl);
6153 int argadj = ((!stdarg_p (fntype))
6154 ? UNITS_PER_WORD : 0);
6156 if (argadj)
6157 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6158 else
6159 offset = crtl->args.arg_offset_rtx;
6161 if (TARGET_64BIT)
6163 int i, off;
6165 /* Adjust for varargs/stdarg differences. */
6166 if (argadj)
6167 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6168 else
6169 offset = crtl->args.arg_offset_rtx;
6171 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6172 from the incoming arg pointer and growing to larger addresses. */
6173 for (i = 26, off = -64; i >= 19; i--, off += 8)
6174 emit_move_insn (gen_rtx_MEM (word_mode,
6175 plus_constant (Pmode,
6176 arg_pointer_rtx, off)),
6177 gen_rtx_REG (word_mode, i));
6179 /* The incoming args pointer points just beyond the flushback area;
6180 normally this is not a serious concern. However, when we are doing
6181 varargs/stdargs we want to make the arg pointer point to the start
6182 of the incoming argument area. */
6183 emit_move_insn (virtual_incoming_args_rtx,
6184 plus_constant (Pmode, arg_pointer_rtx, -64));
6186 /* Now return a pointer to the first anonymous argument. */
6187 return copy_to_reg (expand_binop (Pmode, add_optab,
6188 virtual_incoming_args_rtx,
6189 offset, 0, 0, OPTAB_LIB_WIDEN));
6192 /* Store general registers on the stack. */
6193 dest = gen_rtx_MEM (BLKmode,
6194 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6195 -16));
6196 set_mem_alias_set (dest, get_varargs_alias_set ());
6197 set_mem_align (dest, BITS_PER_WORD);
6198 move_block_from_reg (23, dest, 4);
6200 /* move_block_from_reg will emit code to store the argument registers
6201 individually as scalar stores.
6203 However, other insns may later load from the same addresses for
6204 a structure load (passing a struct to a varargs routine).
6206 The alias code assumes that such aliasing can never happen, so we
6207 have to keep memory referencing insns from moving up beyond the
6208 last argument register store. So we emit a blockage insn here. */
6209 emit_insn (gen_blockage ());
6211 return copy_to_reg (expand_binop (Pmode, add_optab,
6212 crtl->args.internal_arg_pointer,
6213 offset, 0, 0, OPTAB_LIB_WIDEN));
6216 static void
6217 hppa_va_start (tree valist, rtx nextarg)
6219 nextarg = expand_builtin_saveregs ();
6220 std_expand_builtin_va_start (valist, nextarg);
6223 static tree
6224 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6225 gimple_seq *post_p)
6227 if (TARGET_64BIT)
6229 /* Args grow upward. We can use the generic routines. */
6230 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6232 else /* !TARGET_64BIT */
6234 tree ptr = build_pointer_type (type);
6235 tree valist_type;
6236 tree t, u;
6237 unsigned int size, ofs;
6238 bool indirect;
6240 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6241 if (indirect)
6243 type = ptr;
6244 ptr = build_pointer_type (type);
6246 size = int_size_in_bytes (type);
6247 valist_type = TREE_TYPE (valist);
6249 /* Args grow down. Not handled by generic routines. */
6251 u = fold_convert (sizetype, size_in_bytes (type));
6252 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6253 t = fold_build_pointer_plus (valist, u);
6255 /* Align to 4 or 8 byte boundary depending on argument size. */
6257 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6258 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6259 t = fold_convert (valist_type, t);
6261 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6263 ofs = (8 - size) % 4;
6264 if (ofs != 0)
6265 t = fold_build_pointer_plus_hwi (t, ofs);
6267 t = fold_convert (ptr, t);
6268 t = build_va_arg_indirect_ref (t);
6270 if (indirect)
6271 t = build_va_arg_indirect_ref (t);
6273 return t;
6277 /* True if MODE is valid for the target. By "valid", we mean able to
6278 be manipulated in non-trivial ways. In particular, this means all
6279 the arithmetic is supported.
6281 Currently, TImode is not valid as the HP 64-bit runtime documentation
6282 doesn't document the alignment and calling conventions for this type.
6283 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6284 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6286 static bool
6287 pa_scalar_mode_supported_p (enum machine_mode mode)
6289 int precision = GET_MODE_PRECISION (mode);
6291 switch (GET_MODE_CLASS (mode))
6293 case MODE_PARTIAL_INT:
6294 case MODE_INT:
6295 if (precision == CHAR_TYPE_SIZE)
6296 return true;
6297 if (precision == SHORT_TYPE_SIZE)
6298 return true;
6299 if (precision == INT_TYPE_SIZE)
6300 return true;
6301 if (precision == LONG_TYPE_SIZE)
6302 return true;
6303 if (precision == LONG_LONG_TYPE_SIZE)
6304 return true;
6305 return false;
6307 case MODE_FLOAT:
6308 if (precision == FLOAT_TYPE_SIZE)
6309 return true;
6310 if (precision == DOUBLE_TYPE_SIZE)
6311 return true;
6312 if (precision == LONG_DOUBLE_TYPE_SIZE)
6313 return true;
6314 return false;
6316 case MODE_DECIMAL_FLOAT:
6317 return false;
6319 default:
6320 gcc_unreachable ();
6324 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6325 it branches into the delay slot. Otherwise, return FALSE. */
6327 static bool
6328 branch_to_delay_slot_p (rtx_insn *insn)
6330 rtx jump_insn;
6332 if (dbr_sequence_length ())
6333 return FALSE;
6335 jump_insn = next_active_insn (JUMP_LABEL (insn));
6336 while (insn)
6338 insn = next_active_insn (insn);
6339 if (jump_insn == insn)
6340 return TRUE;
6342 /* We can't rely on the length of asms. So, we return FALSE when
6343 the branch is followed by an asm. */
6344 if (!insn
6345 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6346 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6347 || get_attr_length (insn) > 0)
6348 break;
6351 return FALSE;
6354 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6356 This occurs when INSN has an unfilled delay slot and is followed
6357 by an asm. Disaster can occur if the asm is empty and the jump
6358 branches into the delay slot. So, we add a nop in the delay slot
6359 when this occurs. */
6361 static bool
6362 branch_needs_nop_p (rtx_insn *insn)
6364 rtx jump_insn;
6366 if (dbr_sequence_length ())
6367 return FALSE;
6369 jump_insn = next_active_insn (JUMP_LABEL (insn));
6370 while (insn)
6372 insn = next_active_insn (insn);
6373 if (!insn || jump_insn == insn)
6374 return TRUE;
6376 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6377 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6378 && get_attr_length (insn) > 0)
6379 break;
6382 return FALSE;
6385 /* Return TRUE if INSN, a forward jump insn, can use nullification
6386 to skip the following instruction. This avoids an extra cycle due
6387 to a mis-predicted branch when we fall through. */
6389 static bool
6390 use_skip_p (rtx_insn *insn)
6392 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6394 while (insn)
6396 insn = next_active_insn (insn);
6398 /* We can't rely on the length of asms, so we can't skip asms. */
6399 if (!insn
6400 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6401 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6402 break;
6403 if (get_attr_length (insn) == 4
6404 && jump_insn == next_active_insn (insn))
6405 return TRUE;
6406 if (get_attr_length (insn) > 0)
6407 break;
6410 return FALSE;
6413 /* This routine handles all the normal conditional branch sequences we
6414 might need to generate. It handles compare immediate vs compare
6415 register, nullification of delay slots, varying length branches,
6416 negated branches, and all combinations of the above. It returns the
6417 output appropriate to emit the branch corresponding to all given
6418 parameters. */
6420 const char *
6421 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6423 static char buf[100];
6424 bool useskip;
6425 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6426 int length = get_attr_length (insn);
6427 int xdelay;
6429 /* A conditional branch to the following instruction (e.g. the delay slot)
6430 is asking for a disaster. This can happen when not optimizing and
6431 when jump optimization fails.
6433 While it is usually safe to emit nothing, this can fail if the
6434 preceding instruction is a nullified branch with an empty delay
6435 slot and the same branch target as this branch. We could check
6436 for this but jump optimization should eliminate nop jumps. It
6437 is always safe to emit a nop. */
6438 if (branch_to_delay_slot_p (insn))
6439 return "nop";
6441 /* The doubleword form of the cmpib instruction doesn't have the LEU
6442 and GTU conditions while the cmpb instruction does. Since we accept
6443 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6444 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6445 operands[2] = gen_rtx_REG (DImode, 0);
6446 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6447 operands[1] = gen_rtx_REG (DImode, 0);
6449 /* If this is a long branch with its delay slot unfilled, set `nullify'
6450 as it can nullify the delay slot and save a nop. */
6451 if (length == 8 && dbr_sequence_length () == 0)
6452 nullify = 1;
6454 /* If this is a short forward conditional branch which did not get
6455 its delay slot filled, the delay slot can still be nullified. */
6456 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6457 nullify = forward_branch_p (insn);
6459 /* A forward branch over a single nullified insn can be done with a
6460 comclr instruction. This avoids a single cycle penalty due to
6461 mis-predicted branch if we fall through (branch not taken). */
6462 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6464 switch (length)
6466 /* All short conditional branches except backwards with an unfilled
6467 delay slot. */
6468 case 4:
6469 if (useskip)
6470 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6471 else
6472 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6473 if (GET_MODE (operands[1]) == DImode)
6474 strcat (buf, "*");
6475 if (negated)
6476 strcat (buf, "%B3");
6477 else
6478 strcat (buf, "%S3");
6479 if (useskip)
6480 strcat (buf, " %2,%r1,%%r0");
6481 else if (nullify)
6483 if (branch_needs_nop_p (insn))
6484 strcat (buf, ",n %2,%r1,%0%#");
6485 else
6486 strcat (buf, ",n %2,%r1,%0");
6488 else
6489 strcat (buf, " %2,%r1,%0");
6490 break;
6492 /* All long conditionals. Note a short backward branch with an
6493 unfilled delay slot is treated just like a long backward branch
6494 with an unfilled delay slot. */
6495 case 8:
6496 /* Handle weird backwards branch with a filled delay slot
6497 which is nullified. */
6498 if (dbr_sequence_length () != 0
6499 && ! forward_branch_p (insn)
6500 && nullify)
6502 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6503 if (GET_MODE (operands[1]) == DImode)
6504 strcat (buf, "*");
6505 if (negated)
6506 strcat (buf, "%S3");
6507 else
6508 strcat (buf, "%B3");
6509 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6511 /* Handle short backwards branch with an unfilled delay slot.
6512 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6513 taken and untaken branches. */
6514 else if (dbr_sequence_length () == 0
6515 && ! forward_branch_p (insn)
6516 && INSN_ADDRESSES_SET_P ()
6517 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6518 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6520 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6521 if (GET_MODE (operands[1]) == DImode)
6522 strcat (buf, "*");
6523 if (negated)
6524 strcat (buf, "%B3 %2,%r1,%0%#");
6525 else
6526 strcat (buf, "%S3 %2,%r1,%0%#");
6528 else
6530 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6531 if (GET_MODE (operands[1]) == DImode)
6532 strcat (buf, "*");
6533 if (negated)
6534 strcat (buf, "%S3");
6535 else
6536 strcat (buf, "%B3");
6537 if (nullify)
6538 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6539 else
6540 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6542 break;
6544 default:
6545 /* The reversed conditional branch must branch over one additional
6546 instruction if the delay slot is filled and needs to be extracted
6547 by pa_output_lbranch. If the delay slot is empty or this is a
6548 nullified forward branch, the instruction after the reversed
6549 condition branch must be nullified. */
6550 if (dbr_sequence_length () == 0
6551 || (nullify && forward_branch_p (insn)))
6553 nullify = 1;
6554 xdelay = 0;
6555 operands[4] = GEN_INT (length);
6557 else
6559 xdelay = 1;
6560 operands[4] = GEN_INT (length + 4);
6563 /* Create a reversed conditional branch which branches around
6564 the following insns. */
6565 if (GET_MODE (operands[1]) != DImode)
6567 if (nullify)
6569 if (negated)
6570 strcpy (buf,
6571 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6572 else
6573 strcpy (buf,
6574 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6576 else
6578 if (negated)
6579 strcpy (buf,
6580 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6581 else
6582 strcpy (buf,
6583 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6586 else
6588 if (nullify)
6590 if (negated)
6591 strcpy (buf,
6592 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6593 else
6594 strcpy (buf,
6595 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6597 else
6599 if (negated)
6600 strcpy (buf,
6601 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6602 else
6603 strcpy (buf,
6604 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6608 output_asm_insn (buf, operands);
6609 return pa_output_lbranch (operands[0], insn, xdelay);
6611 return buf;
6614 /* This routine handles output of long unconditional branches that
6615 exceed the maximum range of a simple branch instruction. Since
6616 we don't have a register available for the branch, we save register
6617 %r1 in the frame marker, load the branch destination DEST into %r1,
6618 execute the branch, and restore %r1 in the delay slot of the branch.
6620 Since long branches may have an insn in the delay slot and the
6621 delay slot is used to restore %r1, we in general need to extract
6622 this insn and execute it before the branch. However, to facilitate
6623 use of this function by conditional branches, we also provide an
6624 option to not extract the delay insn so that it will be emitted
6625 after the long branch. So, if there is an insn in the delay slot,
6626 it is extracted if XDELAY is nonzero.
6628 The lengths of the various long-branch sequences are 20, 16 and 24
6629 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6631 const char *
6632 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6634 rtx xoperands[2];
6636 xoperands[0] = dest;
6638 /* First, free up the delay slot. */
6639 if (xdelay && dbr_sequence_length () != 0)
6641 /* We can't handle a jump in the delay slot. */
6642 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6644 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6645 optimize, 0, NULL);
6647 /* Now delete the delay insn. */
6648 SET_INSN_DELETED (NEXT_INSN (insn));
6651 /* Output an insn to save %r1. The runtime documentation doesn't
6652 specify whether the "Clean Up" slot in the callers frame can
6653 be clobbered by the callee. It isn't copied by HP's builtin
6654 alloca, so this suggests that it can be clobbered if necessary.
6655 The "Static Link" location is copied by HP builtin alloca, so
6656 we avoid using it. Using the cleanup slot might be a problem
6657 if we have to interoperate with languages that pass cleanup
6658 information. However, it should be possible to handle these
6659 situations with GCC's asm feature.
6661 The "Current RP" slot is reserved for the called procedure, so
6662 we try to use it when we don't have a frame of our own. It's
6663 rather unlikely that we won't have a frame when we need to emit
6664 a very long branch.
6666 Really the way to go long term is a register scavenger; goto
6667 the target of the jump and find a register which we can use
6668 as a scratch to hold the value in %r1. Then, we wouldn't have
6669 to free up the delay slot or clobber a slot that may be needed
6670 for other purposes. */
6671 if (TARGET_64BIT)
6673 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6674 /* Use the return pointer slot in the frame marker. */
6675 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6676 else
6677 /* Use the slot at -40 in the frame marker since HP builtin
6678 alloca doesn't copy it. */
6679 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6681 else
6683 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6684 /* Use the return pointer slot in the frame marker. */
6685 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6686 else
6687 /* Use the "Clean Up" slot in the frame marker. In GCC,
6688 the only other use of this location is for copying a
6689 floating point double argument from a floating-point
6690 register to two general registers. The copy is done
6691 as an "atomic" operation when outputting a call, so it
6692 won't interfere with our using the location here. */
6693 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6696 if (TARGET_PORTABLE_RUNTIME)
6698 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6699 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6700 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6702 else if (flag_pic)
6704 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6705 if (TARGET_SOM || !TARGET_GAS)
6707 xoperands[1] = gen_label_rtx ();
6708 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6709 targetm.asm_out.internal_label (asm_out_file, "L",
6710 CODE_LABEL_NUMBER (xoperands[1]));
6711 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6713 else
6715 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6716 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6718 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6720 else
6721 /* Now output a very long branch to the original target. */
6722 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6724 /* Now restore the value of %r1 in the delay slot. */
6725 if (TARGET_64BIT)
6727 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6728 return "ldd -16(%%r30),%%r1";
6729 else
6730 return "ldd -40(%%r30),%%r1";
6732 else
6734 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6735 return "ldw -20(%%r30),%%r1";
6736 else
6737 return "ldw -12(%%r30),%%r1";
6741 /* This routine handles all the branch-on-bit conditional branch sequences we
6742 might need to generate. It handles nullification of delay slots,
6743 varying length branches, negated branches and all combinations of the
6744 above. it returns the appropriate output template to emit the branch. */
6746 const char *
6747 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6749 static char buf[100];
6750 bool useskip;
6751 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6752 int length = get_attr_length (insn);
6753 int xdelay;
6755 /* A conditional branch to the following instruction (e.g. the delay slot) is
6756 asking for a disaster. I do not think this can happen as this pattern
6757 is only used when optimizing; jump optimization should eliminate the
6758 jump. But be prepared just in case. */
6760 if (branch_to_delay_slot_p (insn))
6761 return "nop";
6763 /* If this is a long branch with its delay slot unfilled, set `nullify'
6764 as it can nullify the delay slot and save a nop. */
6765 if (length == 8 && dbr_sequence_length () == 0)
6766 nullify = 1;
6768 /* If this is a short forward conditional branch which did not get
6769 its delay slot filled, the delay slot can still be nullified. */
6770 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6771 nullify = forward_branch_p (insn);
6773 /* A forward branch over a single nullified insn can be done with a
6774 extrs instruction. This avoids a single cycle penalty due to
6775 mis-predicted branch if we fall through (branch not taken). */
6776 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6778 switch (length)
6781 /* All short conditional branches except backwards with an unfilled
6782 delay slot. */
6783 case 4:
6784 if (useskip)
6785 strcpy (buf, "{extrs,|extrw,s,}");
6786 else
6787 strcpy (buf, "bb,");
6788 if (useskip && GET_MODE (operands[0]) == DImode)
6789 strcpy (buf, "extrd,s,*");
6790 else if (GET_MODE (operands[0]) == DImode)
6791 strcpy (buf, "bb,*");
6792 if ((which == 0 && negated)
6793 || (which == 1 && ! negated))
6794 strcat (buf, ">=");
6795 else
6796 strcat (buf, "<");
6797 if (useskip)
6798 strcat (buf, " %0,%1,1,%%r0");
6799 else if (nullify && negated)
6801 if (branch_needs_nop_p (insn))
6802 strcat (buf, ",n %0,%1,%3%#");
6803 else
6804 strcat (buf, ",n %0,%1,%3");
6806 else if (nullify && ! negated)
6808 if (branch_needs_nop_p (insn))
6809 strcat (buf, ",n %0,%1,%2%#");
6810 else
6811 strcat (buf, ",n %0,%1,%2");
6813 else if (! nullify && negated)
6814 strcat (buf, " %0,%1,%3");
6815 else if (! nullify && ! negated)
6816 strcat (buf, " %0,%1,%2");
6817 break;
6819 /* All long conditionals. Note a short backward branch with an
6820 unfilled delay slot is treated just like a long backward branch
6821 with an unfilled delay slot. */
6822 case 8:
6823 /* Handle weird backwards branch with a filled delay slot
6824 which is nullified. */
6825 if (dbr_sequence_length () != 0
6826 && ! forward_branch_p (insn)
6827 && nullify)
6829 strcpy (buf, "bb,");
6830 if (GET_MODE (operands[0]) == DImode)
6831 strcat (buf, "*");
6832 if ((which == 0 && negated)
6833 || (which == 1 && ! negated))
6834 strcat (buf, "<");
6835 else
6836 strcat (buf, ">=");
6837 if (negated)
6838 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6839 else
6840 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6842 /* Handle short backwards branch with an unfilled delay slot.
6843 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6844 taken and untaken branches. */
6845 else if (dbr_sequence_length () == 0
6846 && ! forward_branch_p (insn)
6847 && INSN_ADDRESSES_SET_P ()
6848 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6849 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6851 strcpy (buf, "bb,");
6852 if (GET_MODE (operands[0]) == DImode)
6853 strcat (buf, "*");
6854 if ((which == 0 && negated)
6855 || (which == 1 && ! negated))
6856 strcat (buf, ">=");
6857 else
6858 strcat (buf, "<");
6859 if (negated)
6860 strcat (buf, " %0,%1,%3%#");
6861 else
6862 strcat (buf, " %0,%1,%2%#");
6864 else
6866 if (GET_MODE (operands[0]) == DImode)
6867 strcpy (buf, "extrd,s,*");
6868 else
6869 strcpy (buf, "{extrs,|extrw,s,}");
6870 if ((which == 0 && negated)
6871 || (which == 1 && ! negated))
6872 strcat (buf, "<");
6873 else
6874 strcat (buf, ">=");
6875 if (nullify && negated)
6876 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6877 else if (nullify && ! negated)
6878 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6879 else if (negated)
6880 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6881 else
6882 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6884 break;
6886 default:
6887 /* The reversed conditional branch must branch over one additional
6888 instruction if the delay slot is filled and needs to be extracted
6889 by pa_output_lbranch. If the delay slot is empty or this is a
6890 nullified forward branch, the instruction after the reversed
6891 condition branch must be nullified. */
6892 if (dbr_sequence_length () == 0
6893 || (nullify && forward_branch_p (insn)))
6895 nullify = 1;
6896 xdelay = 0;
6897 operands[4] = GEN_INT (length);
6899 else
6901 xdelay = 1;
6902 operands[4] = GEN_INT (length + 4);
6905 if (GET_MODE (operands[0]) == DImode)
6906 strcpy (buf, "bb,*");
6907 else
6908 strcpy (buf, "bb,");
6909 if ((which == 0 && negated)
6910 || (which == 1 && !negated))
6911 strcat (buf, "<");
6912 else
6913 strcat (buf, ">=");
6914 if (nullify)
6915 strcat (buf, ",n %0,%1,.+%4");
6916 else
6917 strcat (buf, " %0,%1,.+%4");
6918 output_asm_insn (buf, operands);
6919 return pa_output_lbranch (negated ? operands[3] : operands[2],
6920 insn, xdelay);
6922 return buf;
6925 /* This routine handles all the branch-on-variable-bit conditional branch
6926 sequences we might need to generate. It handles nullification of delay
6927 slots, varying length branches, negated branches and all combinations
6928 of the above. it returns the appropriate output template to emit the
6929 branch. */
6931 const char *
6932 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
6933 int which)
6935 static char buf[100];
6936 bool useskip;
6937 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6938 int length = get_attr_length (insn);
6939 int xdelay;
6941 /* A conditional branch to the following instruction (e.g. the delay slot) is
6942 asking for a disaster. I do not think this can happen as this pattern
6943 is only used when optimizing; jump optimization should eliminate the
6944 jump. But be prepared just in case. */
6946 if (branch_to_delay_slot_p (insn))
6947 return "nop";
6949 /* If this is a long branch with its delay slot unfilled, set `nullify'
6950 as it can nullify the delay slot and save a nop. */
6951 if (length == 8 && dbr_sequence_length () == 0)
6952 nullify = 1;
6954 /* If this is a short forward conditional branch which did not get
6955 its delay slot filled, the delay slot can still be nullified. */
6956 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6957 nullify = forward_branch_p (insn);
6959 /* A forward branch over a single nullified insn can be done with a
6960 extrs instruction. This avoids a single cycle penalty due to
6961 mis-predicted branch if we fall through (branch not taken). */
6962 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6964 switch (length)
6967 /* All short conditional branches except backwards with an unfilled
6968 delay slot. */
6969 case 4:
6970 if (useskip)
6971 strcpy (buf, "{vextrs,|extrw,s,}");
6972 else
6973 strcpy (buf, "{bvb,|bb,}");
6974 if (useskip && GET_MODE (operands[0]) == DImode)
6975 strcpy (buf, "extrd,s,*");
6976 else if (GET_MODE (operands[0]) == DImode)
6977 strcpy (buf, "bb,*");
6978 if ((which == 0 && negated)
6979 || (which == 1 && ! negated))
6980 strcat (buf, ">=");
6981 else
6982 strcat (buf, "<");
6983 if (useskip)
6984 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6985 else if (nullify && negated)
6987 if (branch_needs_nop_p (insn))
6988 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6989 else
6990 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6992 else if (nullify && ! negated)
6994 if (branch_needs_nop_p (insn))
6995 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6996 else
6997 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6999 else if (! nullify && negated)
7000 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7001 else if (! nullify && ! negated)
7002 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7003 break;
7005 /* All long conditionals. Note a short backward branch with an
7006 unfilled delay slot is treated just like a long backward branch
7007 with an unfilled delay slot. */
7008 case 8:
7009 /* Handle weird backwards branch with a filled delay slot
7010 which is nullified. */
7011 if (dbr_sequence_length () != 0
7012 && ! forward_branch_p (insn)
7013 && nullify)
7015 strcpy (buf, "{bvb,|bb,}");
7016 if (GET_MODE (operands[0]) == DImode)
7017 strcat (buf, "*");
7018 if ((which == 0 && negated)
7019 || (which == 1 && ! negated))
7020 strcat (buf, "<");
7021 else
7022 strcat (buf, ">=");
7023 if (negated)
7024 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7025 else
7026 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7028 /* Handle short backwards branch with an unfilled delay slot.
7029 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7030 taken and untaken branches. */
7031 else if (dbr_sequence_length () == 0
7032 && ! forward_branch_p (insn)
7033 && INSN_ADDRESSES_SET_P ()
7034 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7035 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7037 strcpy (buf, "{bvb,|bb,}");
7038 if (GET_MODE (operands[0]) == DImode)
7039 strcat (buf, "*");
7040 if ((which == 0 && negated)
7041 || (which == 1 && ! negated))
7042 strcat (buf, ">=");
7043 else
7044 strcat (buf, "<");
7045 if (negated)
7046 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7047 else
7048 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7050 else
7052 strcpy (buf, "{vextrs,|extrw,s,}");
7053 if (GET_MODE (operands[0]) == DImode)
7054 strcpy (buf, "extrd,s,*");
7055 if ((which == 0 && negated)
7056 || (which == 1 && ! negated))
7057 strcat (buf, "<");
7058 else
7059 strcat (buf, ">=");
7060 if (nullify && negated)
7061 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7062 else if (nullify && ! negated)
7063 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7064 else if (negated)
7065 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7066 else
7067 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7069 break;
7071 default:
7072 /* The reversed conditional branch must branch over one additional
7073 instruction if the delay slot is filled and needs to be extracted
7074 by pa_output_lbranch. If the delay slot is empty or this is a
7075 nullified forward branch, the instruction after the reversed
7076 condition branch must be nullified. */
7077 if (dbr_sequence_length () == 0
7078 || (nullify && forward_branch_p (insn)))
7080 nullify = 1;
7081 xdelay = 0;
7082 operands[4] = GEN_INT (length);
7084 else
7086 xdelay = 1;
7087 operands[4] = GEN_INT (length + 4);
7090 if (GET_MODE (operands[0]) == DImode)
7091 strcpy (buf, "bb,*");
7092 else
7093 strcpy (buf, "{bvb,|bb,}");
7094 if ((which == 0 && negated)
7095 || (which == 1 && !negated))
7096 strcat (buf, "<");
7097 else
7098 strcat (buf, ">=");
7099 if (nullify)
7100 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7101 else
7102 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7103 output_asm_insn (buf, operands);
7104 return pa_output_lbranch (negated ? operands[3] : operands[2],
7105 insn, xdelay);
7107 return buf;
7110 /* Return the output template for emitting a dbra type insn.
7112 Note it may perform some output operations on its own before
7113 returning the final output string. */
7114 const char *
7115 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7117 int length = get_attr_length (insn);
7119 /* A conditional branch to the following instruction (e.g. the delay slot) is
7120 asking for a disaster. Be prepared! */
7122 if (branch_to_delay_slot_p (insn))
7124 if (which_alternative == 0)
7125 return "ldo %1(%0),%0";
7126 else if (which_alternative == 1)
7128 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7129 output_asm_insn ("ldw -16(%%r30),%4", operands);
7130 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7131 return "{fldws|fldw} -16(%%r30),%0";
7133 else
7135 output_asm_insn ("ldw %0,%4", operands);
7136 return "ldo %1(%4),%4\n\tstw %4,%0";
7140 if (which_alternative == 0)
7142 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7143 int xdelay;
7145 /* If this is a long branch with its delay slot unfilled, set `nullify'
7146 as it can nullify the delay slot and save a nop. */
7147 if (length == 8 && dbr_sequence_length () == 0)
7148 nullify = 1;
7150 /* If this is a short forward conditional branch which did not get
7151 its delay slot filled, the delay slot can still be nullified. */
7152 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7153 nullify = forward_branch_p (insn);
7155 switch (length)
7157 case 4:
7158 if (nullify)
7160 if (branch_needs_nop_p (insn))
7161 return "addib,%C2,n %1,%0,%3%#";
7162 else
7163 return "addib,%C2,n %1,%0,%3";
7165 else
7166 return "addib,%C2 %1,%0,%3";
7168 case 8:
7169 /* Handle weird backwards branch with a fulled delay slot
7170 which is nullified. */
7171 if (dbr_sequence_length () != 0
7172 && ! forward_branch_p (insn)
7173 && nullify)
7174 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7175 /* Handle short backwards branch with an unfilled delay slot.
7176 Using a addb;nop rather than addi;bl saves 1 cycle for both
7177 taken and untaken branches. */
7178 else if (dbr_sequence_length () == 0
7179 && ! forward_branch_p (insn)
7180 && INSN_ADDRESSES_SET_P ()
7181 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7182 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7183 return "addib,%C2 %1,%0,%3%#";
7185 /* Handle normal cases. */
7186 if (nullify)
7187 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7188 else
7189 return "addi,%N2 %1,%0,%0\n\tb %3";
7191 default:
7192 /* The reversed conditional branch must branch over one additional
7193 instruction if the delay slot is filled and needs to be extracted
7194 by pa_output_lbranch. If the delay slot is empty or this is a
7195 nullified forward branch, the instruction after the reversed
7196 condition branch must be nullified. */
7197 if (dbr_sequence_length () == 0
7198 || (nullify && forward_branch_p (insn)))
7200 nullify = 1;
7201 xdelay = 0;
7202 operands[4] = GEN_INT (length);
7204 else
7206 xdelay = 1;
7207 operands[4] = GEN_INT (length + 4);
7210 if (nullify)
7211 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7212 else
7213 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7215 return pa_output_lbranch (operands[3], insn, xdelay);
7219 /* Deal with gross reload from FP register case. */
7220 else if (which_alternative == 1)
7222 /* Move loop counter from FP register to MEM then into a GR,
7223 increment the GR, store the GR into MEM, and finally reload
7224 the FP register from MEM from within the branch's delay slot. */
7225 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7226 operands);
7227 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7228 if (length == 24)
7229 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7230 else if (length == 28)
7231 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7232 else
7234 operands[5] = GEN_INT (length - 16);
7235 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7236 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7237 return pa_output_lbranch (operands[3], insn, 0);
7240 /* Deal with gross reload from memory case. */
7241 else
7243 /* Reload loop counter from memory, the store back to memory
7244 happens in the branch's delay slot. */
7245 output_asm_insn ("ldw %0,%4", operands);
7246 if (length == 12)
7247 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7248 else if (length == 16)
7249 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7250 else
7252 operands[5] = GEN_INT (length - 4);
7253 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7254 return pa_output_lbranch (operands[3], insn, 0);
7259 /* Return the output template for emitting a movb type insn.
7261 Note it may perform some output operations on its own before
7262 returning the final output string. */
7263 const char *
7264 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7265 int reverse_comparison)
7267 int length = get_attr_length (insn);
7269 /* A conditional branch to the following instruction (e.g. the delay slot) is
7270 asking for a disaster. Be prepared! */
7272 if (branch_to_delay_slot_p (insn))
7274 if (which_alternative == 0)
7275 return "copy %1,%0";
7276 else if (which_alternative == 1)
7278 output_asm_insn ("stw %1,-16(%%r30)", operands);
7279 return "{fldws|fldw} -16(%%r30),%0";
7281 else if (which_alternative == 2)
7282 return "stw %1,%0";
7283 else
7284 return "mtsar %r1";
7287 /* Support the second variant. */
7288 if (reverse_comparison)
7289 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7291 if (which_alternative == 0)
7293 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7294 int xdelay;
7296 /* If this is a long branch with its delay slot unfilled, set `nullify'
7297 as it can nullify the delay slot and save a nop. */
7298 if (length == 8 && dbr_sequence_length () == 0)
7299 nullify = 1;
7301 /* If this is a short forward conditional branch which did not get
7302 its delay slot filled, the delay slot can still be nullified. */
7303 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7304 nullify = forward_branch_p (insn);
7306 switch (length)
7308 case 4:
7309 if (nullify)
7311 if (branch_needs_nop_p (insn))
7312 return "movb,%C2,n %1,%0,%3%#";
7313 else
7314 return "movb,%C2,n %1,%0,%3";
7316 else
7317 return "movb,%C2 %1,%0,%3";
7319 case 8:
7320 /* Handle weird backwards branch with a filled delay slot
7321 which is nullified. */
7322 if (dbr_sequence_length () != 0
7323 && ! forward_branch_p (insn)
7324 && nullify)
7325 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7327 /* Handle short backwards branch with an unfilled delay slot.
7328 Using a movb;nop rather than or;bl saves 1 cycle for both
7329 taken and untaken branches. */
7330 else if (dbr_sequence_length () == 0
7331 && ! forward_branch_p (insn)
7332 && INSN_ADDRESSES_SET_P ()
7333 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7334 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7335 return "movb,%C2 %1,%0,%3%#";
7336 /* Handle normal cases. */
7337 if (nullify)
7338 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7339 else
7340 return "or,%N2 %1,%%r0,%0\n\tb %3";
7342 default:
7343 /* The reversed conditional branch must branch over one additional
7344 instruction if the delay slot is filled and needs to be extracted
7345 by pa_output_lbranch. If the delay slot is empty or this is a
7346 nullified forward branch, the instruction after the reversed
7347 condition branch must be nullified. */
7348 if (dbr_sequence_length () == 0
7349 || (nullify && forward_branch_p (insn)))
7351 nullify = 1;
7352 xdelay = 0;
7353 operands[4] = GEN_INT (length);
7355 else
7357 xdelay = 1;
7358 operands[4] = GEN_INT (length + 4);
7361 if (nullify)
7362 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7363 else
7364 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7366 return pa_output_lbranch (operands[3], insn, xdelay);
7369 /* Deal with gross reload for FP destination register case. */
7370 else if (which_alternative == 1)
7372 /* Move source register to MEM, perform the branch test, then
7373 finally load the FP register from MEM from within the branch's
7374 delay slot. */
7375 output_asm_insn ("stw %1,-16(%%r30)", operands);
7376 if (length == 12)
7377 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7378 else if (length == 16)
7379 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7380 else
7382 operands[4] = GEN_INT (length - 4);
7383 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7384 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7385 return pa_output_lbranch (operands[3], insn, 0);
7388 /* Deal with gross reload from memory case. */
7389 else if (which_alternative == 2)
7391 /* Reload loop counter from memory, the store back to memory
7392 happens in the branch's delay slot. */
7393 if (length == 8)
7394 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7395 else if (length == 12)
7396 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7397 else
7399 operands[4] = GEN_INT (length);
7400 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7401 operands);
7402 return pa_output_lbranch (operands[3], insn, 0);
7405 /* Handle SAR as a destination. */
7406 else
7408 if (length == 8)
7409 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7410 else if (length == 12)
7411 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7412 else
7414 operands[4] = GEN_INT (length);
7415 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7416 operands);
7417 return pa_output_lbranch (operands[3], insn, 0);
7422 /* Copy any FP arguments in INSN into integer registers. */
7423 static void
7424 copy_fp_args (rtx insn)
7426 rtx link;
7427 rtx xoperands[2];
7429 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7431 int arg_mode, regno;
7432 rtx use = XEXP (link, 0);
7434 if (! (GET_CODE (use) == USE
7435 && GET_CODE (XEXP (use, 0)) == REG
7436 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7437 continue;
7439 arg_mode = GET_MODE (XEXP (use, 0));
7440 regno = REGNO (XEXP (use, 0));
7442 /* Is it a floating point register? */
7443 if (regno >= 32 && regno <= 39)
7445 /* Copy the FP register into an integer register via memory. */
7446 if (arg_mode == SFmode)
7448 xoperands[0] = XEXP (use, 0);
7449 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7450 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7451 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7453 else
7455 xoperands[0] = XEXP (use, 0);
7456 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7457 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7458 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7459 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7465 /* Compute length of the FP argument copy sequence for INSN. */
7466 static int
7467 length_fp_args (rtx insn)
7469 int length = 0;
7470 rtx link;
7472 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7474 int arg_mode, regno;
7475 rtx use = XEXP (link, 0);
7477 if (! (GET_CODE (use) == USE
7478 && GET_CODE (XEXP (use, 0)) == REG
7479 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7480 continue;
7482 arg_mode = GET_MODE (XEXP (use, 0));
7483 regno = REGNO (XEXP (use, 0));
7485 /* Is it a floating point register? */
7486 if (regno >= 32 && regno <= 39)
7488 if (arg_mode == SFmode)
7489 length += 8;
7490 else
7491 length += 12;
7495 return length;
7498 /* Return the attribute length for the millicode call instruction INSN.
7499 The length must match the code generated by pa_output_millicode_call.
7500 We include the delay slot in the returned length as it is better to
7501 over estimate the length than to under estimate it. */
7504 pa_attr_length_millicode_call (rtx_insn *insn)
7506 unsigned long distance = -1;
7507 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7509 if (INSN_ADDRESSES_SET_P ())
7511 distance = (total + insn_current_reference_address (insn));
7512 if (distance < total)
7513 distance = -1;
7516 if (TARGET_64BIT)
7518 if (!TARGET_LONG_CALLS && distance < 7600000)
7519 return 8;
7521 return 20;
7523 else if (TARGET_PORTABLE_RUNTIME)
7524 return 24;
7525 else
7527 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7528 return 8;
7530 if (!flag_pic)
7531 return 12;
7533 return 24;
7537 /* INSN is a function call. It may have an unconditional jump
7538 in its delay slot.
7540 CALL_DEST is the routine we are calling. */
7542 const char *
7543 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7545 int attr_length = get_attr_length (insn);
7546 int seq_length = dbr_sequence_length ();
7547 int distance;
7548 rtx seq_insn;
7549 rtx xoperands[3];
7551 xoperands[0] = call_dest;
7552 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7554 /* Handle the common case where we are sure that the branch will
7555 reach the beginning of the $CODE$ subspace. The within reach
7556 form of the $$sh_func_adrs call has a length of 28. Because it
7557 has an attribute type of sh_func_adrs, it never has a nonzero
7558 sequence length (i.e., the delay slot is never filled). */
7559 if (!TARGET_LONG_CALLS
7560 && (attr_length == 8
7561 || (attr_length == 28
7562 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7564 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7566 else
7568 if (TARGET_64BIT)
7570 /* It might seem that one insn could be saved by accessing
7571 the millicode function using the linkage table. However,
7572 this doesn't work in shared libraries and other dynamically
7573 loaded objects. Using a pc-relative sequence also avoids
7574 problems related to the implicit use of the gp register. */
7575 output_asm_insn ("b,l .+8,%%r1", xoperands);
7577 if (TARGET_GAS)
7579 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7580 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7582 else
7584 xoperands[1] = gen_label_rtx ();
7585 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7586 targetm.asm_out.internal_label (asm_out_file, "L",
7587 CODE_LABEL_NUMBER (xoperands[1]));
7588 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7591 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7593 else if (TARGET_PORTABLE_RUNTIME)
7595 /* Pure portable runtime doesn't allow be/ble; we also don't
7596 have PIC support in the assembler/linker, so this sequence
7597 is needed. */
7599 /* Get the address of our target into %r1. */
7600 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7601 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7603 /* Get our return address into %r31. */
7604 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7605 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7607 /* Jump to our target address in %r1. */
7608 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7610 else if (!flag_pic)
7612 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7613 if (TARGET_PA_20)
7614 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7615 else
7616 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7618 else
7620 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7621 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7623 if (TARGET_SOM || !TARGET_GAS)
7625 /* The HP assembler can generate relocations for the
7626 difference of two symbols. GAS can do this for a
7627 millicode symbol but not an arbitrary external
7628 symbol when generating SOM output. */
7629 xoperands[1] = gen_label_rtx ();
7630 targetm.asm_out.internal_label (asm_out_file, "L",
7631 CODE_LABEL_NUMBER (xoperands[1]));
7632 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7633 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7635 else
7637 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7638 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7639 xoperands);
7642 /* Jump to our target address in %r1. */
7643 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7647 if (seq_length == 0)
7648 output_asm_insn ("nop", xoperands);
7650 /* We are done if there isn't a jump in the delay slot. */
7651 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7652 return "";
7654 /* This call has an unconditional jump in its delay slot. */
7655 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7657 /* See if the return address can be adjusted. Use the containing
7658 sequence insn's address. */
7659 if (INSN_ADDRESSES_SET_P ())
7661 seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
7662 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7663 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7665 if (VAL_14_BITS_P (distance))
7667 xoperands[1] = gen_label_rtx ();
7668 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7669 targetm.asm_out.internal_label (asm_out_file, "L",
7670 CODE_LABEL_NUMBER (xoperands[1]));
7672 else
7673 /* ??? This branch may not reach its target. */
7674 output_asm_insn ("nop\n\tb,n %0", xoperands);
7676 else
7677 /* ??? This branch may not reach its target. */
7678 output_asm_insn ("nop\n\tb,n %0", xoperands);
7680 /* Delete the jump. */
7681 SET_INSN_DELETED (NEXT_INSN (insn));
7683 return "";
7686 /* Return the attribute length of the call instruction INSN. The SIBCALL
7687 flag indicates whether INSN is a regular call or a sibling call. The
7688 length returned must be longer than the code actually generated by
7689 pa_output_call. Since branch shortening is done before delay branch
7690 sequencing, there is no way to determine whether or not the delay
7691 slot will be filled during branch shortening. Even when the delay
7692 slot is filled, we may have to add a nop if the delay slot contains
7693 a branch that can't reach its target. Thus, we always have to include
7694 the delay slot in the length estimate. This used to be done in
7695 pa_adjust_insn_length but we do it here now as some sequences always
7696 fill the delay slot and we can save four bytes in the estimate for
7697 these sequences. */
7700 pa_attr_length_call (rtx_insn *insn, int sibcall)
7702 int local_call;
7703 rtx call, call_dest;
7704 tree call_decl;
7705 int length = 0;
7706 rtx pat = PATTERN (insn);
7707 unsigned long distance = -1;
7709 gcc_assert (CALL_P (insn));
7711 if (INSN_ADDRESSES_SET_P ())
7713 unsigned long total;
7715 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7716 distance = (total + insn_current_reference_address (insn));
7717 if (distance < total)
7718 distance = -1;
7721 gcc_assert (GET_CODE (pat) == PARALLEL);
7723 /* Get the call rtx. */
7724 call = XVECEXP (pat, 0, 0);
7725 if (GET_CODE (call) == SET)
7726 call = SET_SRC (call);
7728 gcc_assert (GET_CODE (call) == CALL);
7730 /* Determine if this is a local call. */
7731 call_dest = XEXP (XEXP (call, 0), 0);
7732 call_decl = SYMBOL_REF_DECL (call_dest);
7733 local_call = call_decl && targetm.binds_local_p (call_decl);
7735 /* pc-relative branch. */
7736 if (!TARGET_LONG_CALLS
7737 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7738 || distance < MAX_PCREL17F_OFFSET))
7739 length += 8;
7741 /* 64-bit plabel sequence. */
7742 else if (TARGET_64BIT && !local_call)
7743 length += sibcall ? 28 : 24;
7745 /* non-pic long absolute branch sequence. */
7746 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7747 length += 12;
7749 /* long pc-relative branch sequence. */
7750 else if (TARGET_LONG_PIC_SDIFF_CALL
7751 || (TARGET_GAS && !TARGET_SOM
7752 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7754 length += 20;
7756 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7757 length += 8;
7760 /* 32-bit plabel sequence. */
7761 else
7763 length += 32;
7765 if (TARGET_SOM)
7766 length += length_fp_args (insn);
7768 if (flag_pic)
7769 length += 4;
7771 if (!TARGET_PA_20)
7773 if (!sibcall)
7774 length += 8;
7776 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7777 length += 8;
7781 return length;
7784 /* INSN is a function call. It may have an unconditional jump
7785 in its delay slot.
7787 CALL_DEST is the routine we are calling. */
7789 const char *
7790 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7792 int delay_insn_deleted = 0;
7793 int delay_slot_filled = 0;
7794 int seq_length = dbr_sequence_length ();
7795 tree call_decl = SYMBOL_REF_DECL (call_dest);
7796 int local_call = call_decl && targetm.binds_local_p (call_decl);
7797 rtx xoperands[2];
7799 xoperands[0] = call_dest;
7801 /* Handle the common case where we're sure that the branch will reach
7802 the beginning of the "$CODE$" subspace. This is the beginning of
7803 the current function if we are in a named section. */
7804 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7806 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7807 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7809 else
7811 if (TARGET_64BIT && !local_call)
7813 /* ??? As far as I can tell, the HP linker doesn't support the
7814 long pc-relative sequence described in the 64-bit runtime
7815 architecture. So, we use a slightly longer indirect call. */
7816 xoperands[0] = pa_get_deferred_plabel (call_dest);
7817 xoperands[1] = gen_label_rtx ();
7819 /* If this isn't a sibcall, we put the load of %r27 into the
7820 delay slot. We can't do this in a sibcall as we don't
7821 have a second call-clobbered scratch register available. */
7822 if (seq_length != 0
7823 && ! JUMP_P (NEXT_INSN (insn))
7824 && !sibcall)
7826 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7827 optimize, 0, NULL);
7829 /* Now delete the delay insn. */
7830 SET_INSN_DELETED (NEXT_INSN (insn));
7831 delay_insn_deleted = 1;
7834 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7835 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7836 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7838 if (sibcall)
7840 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7841 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7842 output_asm_insn ("bve (%%r1)", xoperands);
7844 else
7846 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7847 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7848 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7849 delay_slot_filled = 1;
7852 else
7854 int indirect_call = 0;
7856 /* Emit a long call. There are several different sequences
7857 of increasing length and complexity. In most cases,
7858 they don't allow an instruction in the delay slot. */
7859 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7860 && !TARGET_LONG_PIC_SDIFF_CALL
7861 && !(TARGET_GAS && !TARGET_SOM
7862 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7863 && !TARGET_64BIT)
7864 indirect_call = 1;
7866 if (seq_length != 0
7867 && ! JUMP_P (NEXT_INSN (insn))
7868 && !sibcall
7869 && (!TARGET_PA_20
7870 || indirect_call
7871 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7873 /* A non-jump insn in the delay slot. By definition we can
7874 emit this insn before the call (and in fact before argument
7875 relocating. */
7876 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7877 NULL);
7879 /* Now delete the delay insn. */
7880 SET_INSN_DELETED (NEXT_INSN (insn));
7881 delay_insn_deleted = 1;
7884 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7886 /* This is the best sequence for making long calls in
7887 non-pic code. Unfortunately, GNU ld doesn't provide
7888 the stub needed for external calls, and GAS's support
7889 for this with the SOM linker is buggy. It is safe
7890 to use this for local calls. */
7891 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7892 if (sibcall)
7893 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7894 else
7896 if (TARGET_PA_20)
7897 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7898 xoperands);
7899 else
7900 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7902 output_asm_insn ("copy %%r31,%%r2", xoperands);
7903 delay_slot_filled = 1;
7906 else
7908 if (TARGET_LONG_PIC_SDIFF_CALL)
7910 /* The HP assembler and linker can handle relocations
7911 for the difference of two symbols. The HP assembler
7912 recognizes the sequence as a pc-relative call and
7913 the linker provides stubs when needed. */
7914 xoperands[1] = gen_label_rtx ();
7915 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7916 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7917 targetm.asm_out.internal_label (asm_out_file, "L",
7918 CODE_LABEL_NUMBER (xoperands[1]));
7919 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7921 else if (TARGET_GAS && !TARGET_SOM
7922 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7924 /* GAS currently can't generate the relocations that
7925 are needed for the SOM linker under HP-UX using this
7926 sequence. The GNU linker doesn't generate the stubs
7927 that are needed for external calls on TARGET_ELF32
7928 with this sequence. For now, we have to use a
7929 longer plabel sequence when using GAS. */
7930 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7931 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7932 xoperands);
7933 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7934 xoperands);
7936 else
7938 /* Emit a long plabel-based call sequence. This is
7939 essentially an inline implementation of $$dyncall.
7940 We don't actually try to call $$dyncall as this is
7941 as difficult as calling the function itself. */
7942 xoperands[0] = pa_get_deferred_plabel (call_dest);
7943 xoperands[1] = gen_label_rtx ();
7945 /* Since the call is indirect, FP arguments in registers
7946 need to be copied to the general registers. Then, the
7947 argument relocation stub will copy them back. */
7948 if (TARGET_SOM)
7949 copy_fp_args (insn);
7951 if (flag_pic)
7953 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7954 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7955 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7957 else
7959 output_asm_insn ("addil LR'%0-$global$,%%r27",
7960 xoperands);
7961 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7962 xoperands);
7965 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7966 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7967 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7968 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7970 if (!sibcall && !TARGET_PA_20)
7972 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7973 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7974 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7975 else
7976 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7980 if (TARGET_PA_20)
7982 if (sibcall)
7983 output_asm_insn ("bve (%%r1)", xoperands);
7984 else
7986 if (indirect_call)
7988 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7989 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7990 delay_slot_filled = 1;
7992 else
7993 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7996 else
7998 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7999 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8000 xoperands);
8002 if (sibcall)
8004 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8005 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8006 else
8007 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8009 else
8011 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8012 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8013 else
8014 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8016 if (indirect_call)
8017 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8018 else
8019 output_asm_insn ("copy %%r31,%%r2", xoperands);
8020 delay_slot_filled = 1;
8027 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8028 output_asm_insn ("nop", xoperands);
8030 /* We are done if there isn't a jump in the delay slot. */
8031 if (seq_length == 0
8032 || delay_insn_deleted
8033 || ! JUMP_P (NEXT_INSN (insn)))
8034 return "";
8036 /* A sibcall should never have a branch in the delay slot. */
8037 gcc_assert (!sibcall);
8039 /* This call has an unconditional jump in its delay slot. */
8040 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8042 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8044 /* See if the return address can be adjusted. Use the containing
8045 sequence insn's address. This would break the regular call/return@
8046 relationship assumed by the table based eh unwinder, so only do that
8047 if the call is not possibly throwing. */
8048 rtx seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
8049 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8050 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8052 if (VAL_14_BITS_P (distance)
8053 && !(can_throw_internal (insn) || can_throw_external (insn)))
8055 xoperands[1] = gen_label_rtx ();
8056 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8057 targetm.asm_out.internal_label (asm_out_file, "L",
8058 CODE_LABEL_NUMBER (xoperands[1]));
8060 else
8061 output_asm_insn ("nop\n\tb,n %0", xoperands);
8063 else
8064 output_asm_insn ("b,n %0", xoperands);
8066 /* Delete the jump. */
8067 SET_INSN_DELETED (NEXT_INSN (insn));
8069 return "";
8072 /* Return the attribute length of the indirect call instruction INSN.
8073 The length must match the code generated by output_indirect call.
8074 The returned length includes the delay slot. Currently, the delay
8075 slot of an indirect call sequence is not exposed and it is used by
8076 the sequence itself. */
8079 pa_attr_length_indirect_call (rtx_insn *insn)
8081 unsigned long distance = -1;
8082 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8084 if (INSN_ADDRESSES_SET_P ())
8086 distance = (total + insn_current_reference_address (insn));
8087 if (distance < total)
8088 distance = -1;
8091 if (TARGET_64BIT)
8092 return 12;
8094 if (TARGET_FAST_INDIRECT_CALLS
8095 || (!TARGET_LONG_CALLS
8096 && !TARGET_PORTABLE_RUNTIME
8097 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8098 || distance < MAX_PCREL17F_OFFSET)))
8099 return 8;
8101 if (flag_pic)
8102 return 20;
8104 if (TARGET_PORTABLE_RUNTIME)
8105 return 16;
8107 /* Out of reach, can use ble. */
8108 return 12;
8111 const char *
8112 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8114 rtx xoperands[1];
8116 if (TARGET_64BIT)
8118 xoperands[0] = call_dest;
8119 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8120 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8121 return "";
8124 /* First the special case for kernels, level 0 systems, etc. */
8125 if (TARGET_FAST_INDIRECT_CALLS)
8126 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8128 /* Now the normal case -- we can reach $$dyncall directly or
8129 we're sure that we can get there via a long-branch stub.
8131 No need to check target flags as the length uniquely identifies
8132 the remaining cases. */
8133 if (pa_attr_length_indirect_call (insn) == 8)
8135 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8136 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8137 variant of the B,L instruction can't be used on the SOM target. */
8138 if (TARGET_PA_20 && !TARGET_SOM)
8139 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8140 else
8141 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8144 /* Long millicode call, but we are not generating PIC or portable runtime
8145 code. */
8146 if (pa_attr_length_indirect_call (insn) == 12)
8147 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8149 /* Long millicode call for portable runtime. */
8150 if (pa_attr_length_indirect_call (insn) == 16)
8151 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8153 /* We need a long PIC call to $$dyncall. */
8154 xoperands[0] = NULL_RTX;
8155 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8156 if (TARGET_SOM || !TARGET_GAS)
8158 xoperands[0] = gen_label_rtx ();
8159 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8160 targetm.asm_out.internal_label (asm_out_file, "L",
8161 CODE_LABEL_NUMBER (xoperands[0]));
8162 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8164 else
8166 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8167 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8168 xoperands);
8170 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8171 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8172 return "";
8175 /* In HPUX 8.0's shared library scheme, special relocations are needed
8176 for function labels if they might be passed to a function
8177 in a shared library (because shared libraries don't live in code
8178 space), and special magic is needed to construct their address. */
8180 void
8181 pa_encode_label (rtx sym)
8183 const char *str = XSTR (sym, 0);
8184 int len = strlen (str) + 1;
8185 char *newstr, *p;
8187 p = newstr = XALLOCAVEC (char, len + 1);
8188 *p++ = '@';
8189 strcpy (p, str);
8191 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8194 static void
8195 pa_encode_section_info (tree decl, rtx rtl, int first)
8197 int old_referenced = 0;
8199 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8200 old_referenced
8201 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8203 default_encode_section_info (decl, rtl, first);
8205 if (first && TEXT_SPACE_P (decl))
8207 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8208 if (TREE_CODE (decl) == FUNCTION_DECL)
8209 pa_encode_label (XEXP (rtl, 0));
8211 else if (old_referenced)
8212 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8215 /* This is sort of inverse to pa_encode_section_info. */
8217 static const char *
8218 pa_strip_name_encoding (const char *str)
8220 str += (*str == '@');
8221 str += (*str == '*');
8222 return str;
8225 /* Returns 1 if OP is a function label involved in a simple addition
8226 with a constant. Used to keep certain patterns from matching
8227 during instruction combination. */
8229 pa_is_function_label_plus_const (rtx op)
8231 /* Strip off any CONST. */
8232 if (GET_CODE (op) == CONST)
8233 op = XEXP (op, 0);
8235 return (GET_CODE (op) == PLUS
8236 && function_label_operand (XEXP (op, 0), VOIDmode)
8237 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8240 /* Output assembly code for a thunk to FUNCTION. */
8242 static void
8243 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8244 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8245 tree function)
8247 static unsigned int current_thunk_number;
8248 int val_14 = VAL_14_BITS_P (delta);
8249 unsigned int old_last_address = last_address, nbytes = 0;
8250 char label[16];
8251 rtx xoperands[4];
8253 xoperands[0] = XEXP (DECL_RTL (function), 0);
8254 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8255 xoperands[2] = GEN_INT (delta);
8257 final_start_function (emit_barrier (), file, 1);
8259 /* Output the thunk. We know that the function is in the same
8260 translation unit (i.e., the same space) as the thunk, and that
8261 thunks are output after their method. Thus, we don't need an
8262 external branch to reach the function. With SOM and GAS,
8263 functions and thunks are effectively in different sections.
8264 Thus, we can always use a IA-relative branch and the linker
8265 will add a long branch stub if necessary.
8267 However, we have to be careful when generating PIC code on the
8268 SOM port to ensure that the sequence does not transfer to an
8269 import stub for the target function as this could clobber the
8270 return value saved at SP-24. This would also apply to the
8271 32-bit linux port if the multi-space model is implemented. */
8272 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8273 && !(flag_pic && TREE_PUBLIC (function))
8274 && (TARGET_GAS || last_address < 262132))
8275 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8276 && ((targetm_common.have_named_sections
8277 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8278 /* The GNU 64-bit linker has rather poor stub management.
8279 So, we use a long branch from thunks that aren't in
8280 the same section as the target function. */
8281 && ((!TARGET_64BIT
8282 && (DECL_SECTION_NAME (thunk_fndecl)
8283 != DECL_SECTION_NAME (function)))
8284 || ((DECL_SECTION_NAME (thunk_fndecl)
8285 == DECL_SECTION_NAME (function))
8286 && last_address < 262132)))
8287 /* In this case, we need to be able to reach the start of
8288 the stub table even though the function is likely closer
8289 and can be jumped to directly. */
8290 || (targetm_common.have_named_sections
8291 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8292 && DECL_SECTION_NAME (function) == NULL
8293 && total_code_bytes < MAX_PCREL17F_OFFSET)
8294 /* Likewise. */
8295 || (!targetm_common.have_named_sections
8296 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8298 if (!val_14)
8299 output_asm_insn ("addil L'%2,%%r26", xoperands);
8301 output_asm_insn ("b %0", xoperands);
8303 if (val_14)
8305 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8306 nbytes += 8;
8308 else
8310 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8311 nbytes += 12;
8314 else if (TARGET_64BIT)
8316 /* We only have one call-clobbered scratch register, so we can't
8317 make use of the delay slot if delta doesn't fit in 14 bits. */
8318 if (!val_14)
8320 output_asm_insn ("addil L'%2,%%r26", xoperands);
8321 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8324 output_asm_insn ("b,l .+8,%%r1", xoperands);
8326 if (TARGET_GAS)
8328 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8329 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8331 else
8333 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8334 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8337 if (val_14)
8339 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8340 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8341 nbytes += 20;
8343 else
8345 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8346 nbytes += 24;
8349 else if (TARGET_PORTABLE_RUNTIME)
8351 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8352 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8354 if (!val_14)
8355 output_asm_insn ("addil L'%2,%%r26", xoperands);
8357 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8359 if (val_14)
8361 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8362 nbytes += 16;
8364 else
8366 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8367 nbytes += 20;
8370 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8372 /* The function is accessible from outside this module. The only
8373 way to avoid an import stub between the thunk and function is to
8374 call the function directly with an indirect sequence similar to
8375 that used by $$dyncall. This is possible because $$dyncall acts
8376 as the import stub in an indirect call. */
8377 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8378 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8379 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8380 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8381 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8382 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8383 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8384 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8385 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8387 if (!val_14)
8389 output_asm_insn ("addil L'%2,%%r26", xoperands);
8390 nbytes += 4;
8393 if (TARGET_PA_20)
8395 output_asm_insn ("bve (%%r22)", xoperands);
8396 nbytes += 36;
8398 else if (TARGET_NO_SPACE_REGS)
8400 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8401 nbytes += 36;
8403 else
8405 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8406 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8407 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8408 nbytes += 44;
8411 if (val_14)
8412 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8413 else
8414 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8416 else if (flag_pic)
8418 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8420 if (TARGET_SOM || !TARGET_GAS)
8422 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8423 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8425 else
8427 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8428 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8431 if (!val_14)
8432 output_asm_insn ("addil L'%2,%%r26", xoperands);
8434 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8436 if (val_14)
8438 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8439 nbytes += 20;
8441 else
8443 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8444 nbytes += 24;
8447 else
8449 if (!val_14)
8450 output_asm_insn ("addil L'%2,%%r26", xoperands);
8452 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8453 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8455 if (val_14)
8457 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8458 nbytes += 12;
8460 else
8462 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8463 nbytes += 16;
8467 final_end_function ();
8469 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8471 switch_to_section (data_section);
8472 output_asm_insn (".align 4", xoperands);
8473 ASM_OUTPUT_LABEL (file, label);
8474 output_asm_insn (".word P'%0", xoperands);
8477 current_thunk_number++;
8478 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8479 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8480 last_address += nbytes;
8481 if (old_last_address > last_address)
8482 last_address = UINT_MAX;
8483 update_total_code_bytes (nbytes);
8486 /* Only direct calls to static functions are allowed to be sibling (tail)
8487 call optimized.
8489 This restriction is necessary because some linker generated stubs will
8490 store return pointers into rp' in some cases which might clobber a
8491 live value already in rp'.
8493 In a sibcall the current function and the target function share stack
8494 space. Thus if the path to the current function and the path to the
8495 target function save a value in rp', they save the value into the
8496 same stack slot, which has undesirable consequences.
8498 Because of the deferred binding nature of shared libraries any function
8499 with external scope could be in a different load module and thus require
8500 rp' to be saved when calling that function. So sibcall optimizations
8501 can only be safe for static function.
8503 Note that GCC never needs return value relocations, so we don't have to
8504 worry about static calls with return value relocations (which require
8505 saving rp').
8507 It is safe to perform a sibcall optimization when the target function
8508 will never return. */
8509 static bool
8510 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8512 if (TARGET_PORTABLE_RUNTIME)
8513 return false;
8515 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8516 single subspace mode and the call is not indirect. As far as I know,
8517 there is no operating system support for the multiple subspace mode.
8518 It might be possible to support indirect calls if we didn't use
8519 $$dyncall (see the indirect sequence generated in pa_output_call). */
8520 if (TARGET_ELF32)
8521 return (decl != NULL_TREE);
8523 /* Sibcalls are not ok because the arg pointer register is not a fixed
8524 register. This prevents the sibcall optimization from occurring. In
8525 addition, there are problems with stub placement using GNU ld. This
8526 is because a normal sibcall branch uses a 17-bit relocation while
8527 a regular call branch uses a 22-bit relocation. As a result, more
8528 care needs to be taken in the placement of long-branch stubs. */
8529 if (TARGET_64BIT)
8530 return false;
8532 /* Sibcalls are only ok within a translation unit. */
8533 return (decl && !TREE_PUBLIC (decl));
8536 /* ??? Addition is not commutative on the PA due to the weird implicit
8537 space register selection rules for memory addresses. Therefore, we
8538 don't consider a + b == b + a, as this might be inside a MEM. */
8539 static bool
8540 pa_commutative_p (const_rtx x, int outer_code)
8542 return (COMMUTATIVE_P (x)
8543 && (TARGET_NO_SPACE_REGS
8544 || (outer_code != UNKNOWN && outer_code != MEM)
8545 || GET_CODE (x) != PLUS));
8548 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8549 use in fmpyadd instructions. */
8551 pa_fmpyaddoperands (rtx *operands)
8553 enum machine_mode mode = GET_MODE (operands[0]);
8555 /* Must be a floating point mode. */
8556 if (mode != SFmode && mode != DFmode)
8557 return 0;
8559 /* All modes must be the same. */
8560 if (! (mode == GET_MODE (operands[1])
8561 && mode == GET_MODE (operands[2])
8562 && mode == GET_MODE (operands[3])
8563 && mode == GET_MODE (operands[4])
8564 && mode == GET_MODE (operands[5])))
8565 return 0;
8567 /* All operands must be registers. */
8568 if (! (GET_CODE (operands[1]) == REG
8569 && GET_CODE (operands[2]) == REG
8570 && GET_CODE (operands[3]) == REG
8571 && GET_CODE (operands[4]) == REG
8572 && GET_CODE (operands[5]) == REG))
8573 return 0;
8575 /* Only 2 real operands to the addition. One of the input operands must
8576 be the same as the output operand. */
8577 if (! rtx_equal_p (operands[3], operands[4])
8578 && ! rtx_equal_p (operands[3], operands[5]))
8579 return 0;
8581 /* Inout operand of add cannot conflict with any operands from multiply. */
8582 if (rtx_equal_p (operands[3], operands[0])
8583 || rtx_equal_p (operands[3], operands[1])
8584 || rtx_equal_p (operands[3], operands[2]))
8585 return 0;
8587 /* multiply cannot feed into addition operands. */
8588 if (rtx_equal_p (operands[4], operands[0])
8589 || rtx_equal_p (operands[5], operands[0]))
8590 return 0;
8592 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8593 if (mode == SFmode
8594 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8595 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8596 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8597 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8598 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8599 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8600 return 0;
8602 /* Passed. Operands are suitable for fmpyadd. */
8603 return 1;
8606 #if !defined(USE_COLLECT2)
8607 static void
8608 pa_asm_out_constructor (rtx symbol, int priority)
8610 if (!function_label_operand (symbol, VOIDmode))
8611 pa_encode_label (symbol);
8613 #ifdef CTORS_SECTION_ASM_OP
8614 default_ctor_section_asm_out_constructor (symbol, priority);
8615 #else
8616 # ifdef TARGET_ASM_NAMED_SECTION
8617 default_named_section_asm_out_constructor (symbol, priority);
8618 # else
8619 default_stabs_asm_out_constructor (symbol, priority);
8620 # endif
8621 #endif
8624 static void
8625 pa_asm_out_destructor (rtx symbol, int priority)
8627 if (!function_label_operand (symbol, VOIDmode))
8628 pa_encode_label (symbol);
8630 #ifdef DTORS_SECTION_ASM_OP
8631 default_dtor_section_asm_out_destructor (symbol, priority);
8632 #else
8633 # ifdef TARGET_ASM_NAMED_SECTION
8634 default_named_section_asm_out_destructor (symbol, priority);
8635 # else
8636 default_stabs_asm_out_destructor (symbol, priority);
8637 # endif
8638 #endif
8640 #endif
8642 /* This function places uninitialized global data in the bss section.
8643 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8644 function on the SOM port to prevent uninitialized global data from
8645 being placed in the data section. */
8647 void
8648 pa_asm_output_aligned_bss (FILE *stream,
8649 const char *name,
8650 unsigned HOST_WIDE_INT size,
8651 unsigned int align)
8653 switch_to_section (bss_section);
8654 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8656 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8657 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8658 #endif
8660 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8661 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8662 #endif
8664 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8665 ASM_OUTPUT_LABEL (stream, name);
8666 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8669 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8670 that doesn't allow the alignment of global common storage to be directly
8671 specified. The SOM linker aligns common storage based on the rounded
8672 value of the NUM_BYTES parameter in the .comm directive. It's not
8673 possible to use the .align directive as it doesn't affect the alignment
8674 of the label associated with a .comm directive. */
8676 void
8677 pa_asm_output_aligned_common (FILE *stream,
8678 const char *name,
8679 unsigned HOST_WIDE_INT size,
8680 unsigned int align)
8682 unsigned int max_common_align;
8684 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8685 if (align > max_common_align)
8687 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8688 "for global common data. Using %u",
8689 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8690 align = max_common_align;
8693 switch_to_section (bss_section);
8695 assemble_name (stream, name);
8696 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8697 MAX (size, align / BITS_PER_UNIT));
8700 /* We can't use .comm for local common storage as the SOM linker effectively
8701 treats the symbol as universal and uses the same storage for local symbols
8702 with the same name in different object files. The .block directive
8703 reserves an uninitialized block of storage. However, it's not common
8704 storage. Fortunately, GCC never requests common storage with the same
8705 name in any given translation unit. */
8707 void
8708 pa_asm_output_aligned_local (FILE *stream,
8709 const char *name,
8710 unsigned HOST_WIDE_INT size,
8711 unsigned int align)
8713 switch_to_section (bss_section);
8714 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8716 #ifdef LOCAL_ASM_OP
8717 fprintf (stream, "%s", LOCAL_ASM_OP);
8718 assemble_name (stream, name);
8719 fprintf (stream, "\n");
8720 #endif
8722 ASM_OUTPUT_LABEL (stream, name);
8723 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8726 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8727 use in fmpysub instructions. */
8729 pa_fmpysuboperands (rtx *operands)
8731 enum machine_mode mode = GET_MODE (operands[0]);
8733 /* Must be a floating point mode. */
8734 if (mode != SFmode && mode != DFmode)
8735 return 0;
8737 /* All modes must be the same. */
8738 if (! (mode == GET_MODE (operands[1])
8739 && mode == GET_MODE (operands[2])
8740 && mode == GET_MODE (operands[3])
8741 && mode == GET_MODE (operands[4])
8742 && mode == GET_MODE (operands[5])))
8743 return 0;
8745 /* All operands must be registers. */
8746 if (! (GET_CODE (operands[1]) == REG
8747 && GET_CODE (operands[2]) == REG
8748 && GET_CODE (operands[3]) == REG
8749 && GET_CODE (operands[4]) == REG
8750 && GET_CODE (operands[5]) == REG))
8751 return 0;
8753 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8754 operation, so operands[4] must be the same as operand[3]. */
8755 if (! rtx_equal_p (operands[3], operands[4]))
8756 return 0;
8758 /* multiply cannot feed into subtraction. */
8759 if (rtx_equal_p (operands[5], operands[0]))
8760 return 0;
8762 /* Inout operand of sub cannot conflict with any operands from multiply. */
8763 if (rtx_equal_p (operands[3], operands[0])
8764 || rtx_equal_p (operands[3], operands[1])
8765 || rtx_equal_p (operands[3], operands[2]))
8766 return 0;
8768 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8769 if (mode == SFmode
8770 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8771 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8772 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8773 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8774 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8775 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8776 return 0;
8778 /* Passed. Operands are suitable for fmpysub. */
8779 return 1;
8782 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8783 constants for shadd instructions. */
8785 pa_shadd_constant_p (int val)
8787 if (val == 2 || val == 4 || val == 8)
8788 return 1;
8789 else
8790 return 0;
8793 /* Return TRUE if INSN branches forward. */
8795 static bool
8796 forward_branch_p (rtx_insn *insn)
8798 rtx lab = JUMP_LABEL (insn);
8800 /* The INSN must have a jump label. */
8801 gcc_assert (lab != NULL_RTX);
8803 if (INSN_ADDRESSES_SET_P ())
8804 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8806 while (insn)
8808 if (insn == lab)
8809 return true;
8810 else
8811 insn = NEXT_INSN (insn);
8814 return false;
8817 /* Return 1 if INSN is in the delay slot of a call instruction. */
8819 pa_jump_in_call_delay (rtx_insn *insn)
8822 if (! JUMP_P (insn))
8823 return 0;
8825 if (PREV_INSN (insn)
8826 && PREV_INSN (PREV_INSN (insn))
8827 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8829 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8831 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8832 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8835 else
8836 return 0;
8839 /* Output an unconditional move and branch insn. */
8841 const char *
8842 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8844 int length = get_attr_length (insn);
8846 /* These are the cases in which we win. */
8847 if (length == 4)
8848 return "mov%I1b,tr %1,%0,%2";
8850 /* None of the following cases win, but they don't lose either. */
8851 if (length == 8)
8853 if (dbr_sequence_length () == 0)
8855 /* Nothing in the delay slot, fake it by putting the combined
8856 insn (the copy or add) in the delay slot of a bl. */
8857 if (GET_CODE (operands[1]) == CONST_INT)
8858 return "b %2\n\tldi %1,%0";
8859 else
8860 return "b %2\n\tcopy %1,%0";
8862 else
8864 /* Something in the delay slot, but we've got a long branch. */
8865 if (GET_CODE (operands[1]) == CONST_INT)
8866 return "ldi %1,%0\n\tb %2";
8867 else
8868 return "copy %1,%0\n\tb %2";
8872 if (GET_CODE (operands[1]) == CONST_INT)
8873 output_asm_insn ("ldi %1,%0", operands);
8874 else
8875 output_asm_insn ("copy %1,%0", operands);
8876 return pa_output_lbranch (operands[2], insn, 1);
8879 /* Output an unconditional add and branch insn. */
8881 const char *
8882 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8884 int length = get_attr_length (insn);
8886 /* To make life easy we want operand0 to be the shared input/output
8887 operand and operand1 to be the readonly operand. */
8888 if (operands[0] == operands[1])
8889 operands[1] = operands[2];
8891 /* These are the cases in which we win. */
8892 if (length == 4)
8893 return "add%I1b,tr %1,%0,%3";
8895 /* None of the following cases win, but they don't lose either. */
8896 if (length == 8)
8898 if (dbr_sequence_length () == 0)
8899 /* Nothing in the delay slot, fake it by putting the combined
8900 insn (the copy or add) in the delay slot of a bl. */
8901 return "b %3\n\tadd%I1 %1,%0,%0";
8902 else
8903 /* Something in the delay slot, but we've got a long branch. */
8904 return "add%I1 %1,%0,%0\n\tb %3";
8907 output_asm_insn ("add%I1 %1,%0,%0", operands);
8908 return pa_output_lbranch (operands[3], insn, 1);
8911 /* Return nonzero if INSN (a jump insn) immediately follows a call
8912 to a named function. This is used to avoid filling the delay slot
8913 of the jump since it can usually be eliminated by modifying RP in
8914 the delay slot of the call. */
8917 pa_following_call (rtx_insn *insn)
8919 if (! TARGET_JUMP_IN_DELAY)
8920 return 0;
8922 /* Find the previous real insn, skipping NOTEs. */
8923 insn = PREV_INSN (insn);
8924 while (insn && NOTE_P (insn))
8925 insn = PREV_INSN (insn);
8927 /* Check for CALL_INSNs and millicode calls. */
8928 if (insn
8929 && ((CALL_P (insn)
8930 && get_attr_type (insn) != TYPE_DYNCALL)
8931 || (NONJUMP_INSN_P (insn)
8932 && GET_CODE (PATTERN (insn)) != SEQUENCE
8933 && GET_CODE (PATTERN (insn)) != USE
8934 && GET_CODE (PATTERN (insn)) != CLOBBER
8935 && get_attr_type (insn) == TYPE_MILLI)))
8936 return 1;
8938 return 0;
8941 /* We use this hook to perform a PA specific optimization which is difficult
8942 to do in earlier passes. */
8944 static void
8945 pa_reorg (void)
8947 remove_useless_addtr_insns (1);
8949 if (pa_cpu < PROCESSOR_8000)
8950 pa_combine_instructions ();
8953 /* The PA has a number of odd instructions which can perform multiple
8954 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8955 it may be profitable to combine two instructions into one instruction
8956 with two outputs. It's not profitable PA2.0 machines because the
8957 two outputs would take two slots in the reorder buffers.
8959 This routine finds instructions which can be combined and combines
8960 them. We only support some of the potential combinations, and we
8961 only try common ways to find suitable instructions.
8963 * addb can add two registers or a register and a small integer
8964 and jump to a nearby (+-8k) location. Normally the jump to the
8965 nearby location is conditional on the result of the add, but by
8966 using the "true" condition we can make the jump unconditional.
8967 Thus addb can perform two independent operations in one insn.
8969 * movb is similar to addb in that it can perform a reg->reg
8970 or small immediate->reg copy and jump to a nearby (+-8k location).
8972 * fmpyadd and fmpysub can perform a FP multiply and either an
8973 FP add or FP sub if the operands of the multiply and add/sub are
8974 independent (there are other minor restrictions). Note both
8975 the fmpy and fadd/fsub can in theory move to better spots according
8976 to data dependencies, but for now we require the fmpy stay at a
8977 fixed location.
8979 * Many of the memory operations can perform pre & post updates
8980 of index registers. GCC's pre/post increment/decrement addressing
8981 is far too simple to take advantage of all the possibilities. This
8982 pass may not be suitable since those insns may not be independent.
8984 * comclr can compare two ints or an int and a register, nullify
8985 the following instruction and zero some other register. This
8986 is more difficult to use as it's harder to find an insn which
8987 will generate a comclr than finding something like an unconditional
8988 branch. (conditional moves & long branches create comclr insns).
8990 * Most arithmetic operations can conditionally skip the next
8991 instruction. They can be viewed as "perform this operation
8992 and conditionally jump to this nearby location" (where nearby
8993 is an insns away). These are difficult to use due to the
8994 branch length restrictions. */
8996 static void
8997 pa_combine_instructions (void)
8999 rtx_insn *anchor;
9001 /* This can get expensive since the basic algorithm is on the
9002 order of O(n^2) (or worse). Only do it for -O2 or higher
9003 levels of optimization. */
9004 if (optimize < 2)
9005 return;
9007 /* Walk down the list of insns looking for "anchor" insns which
9008 may be combined with "floating" insns. As the name implies,
9009 "anchor" instructions don't move, while "floating" insns may
9010 move around. */
9011 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9012 rtx_insn *new_rtx = make_insn_raw (par);
9014 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9016 enum attr_pa_combine_type anchor_attr;
9017 enum attr_pa_combine_type floater_attr;
9019 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9020 Also ignore any special USE insns. */
9021 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9022 || GET_CODE (PATTERN (anchor)) == USE
9023 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9024 continue;
9026 anchor_attr = get_attr_pa_combine_type (anchor);
9027 /* See if anchor is an insn suitable for combination. */
9028 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9029 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9030 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9031 && ! forward_branch_p (anchor)))
9033 rtx_insn *floater;
9035 for (floater = PREV_INSN (anchor);
9036 floater;
9037 floater = PREV_INSN (floater))
9039 if (NOTE_P (floater)
9040 || (NONJUMP_INSN_P (floater)
9041 && (GET_CODE (PATTERN (floater)) == USE
9042 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9043 continue;
9045 /* Anything except a regular INSN will stop our search. */
9046 if (! NONJUMP_INSN_P (floater))
9048 floater = NULL;
9049 break;
9052 /* See if FLOATER is suitable for combination with the
9053 anchor. */
9054 floater_attr = get_attr_pa_combine_type (floater);
9055 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9056 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9057 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9058 && floater_attr == PA_COMBINE_TYPE_FMPY))
9060 /* If ANCHOR and FLOATER can be combined, then we're
9061 done with this pass. */
9062 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9063 SET_DEST (PATTERN (floater)),
9064 XEXP (SET_SRC (PATTERN (floater)), 0),
9065 XEXP (SET_SRC (PATTERN (floater)), 1)))
9066 break;
9069 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9070 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9072 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9074 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9075 SET_DEST (PATTERN (floater)),
9076 XEXP (SET_SRC (PATTERN (floater)), 0),
9077 XEXP (SET_SRC (PATTERN (floater)), 1)))
9078 break;
9080 else
9082 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9083 SET_DEST (PATTERN (floater)),
9084 SET_SRC (PATTERN (floater)),
9085 SET_SRC (PATTERN (floater))))
9086 break;
9091 /* If we didn't find anything on the backwards scan try forwards. */
9092 if (!floater
9093 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9094 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9096 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9098 if (NOTE_P (floater)
9099 || (NONJUMP_INSN_P (floater)
9100 && (GET_CODE (PATTERN (floater)) == USE
9101 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9103 continue;
9105 /* Anything except a regular INSN will stop our search. */
9106 if (! NONJUMP_INSN_P (floater))
9108 floater = NULL;
9109 break;
9112 /* See if FLOATER is suitable for combination with the
9113 anchor. */
9114 floater_attr = get_attr_pa_combine_type (floater);
9115 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9116 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9117 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9118 && floater_attr == PA_COMBINE_TYPE_FMPY))
9120 /* If ANCHOR and FLOATER can be combined, then we're
9121 done with this pass. */
9122 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9123 SET_DEST (PATTERN (floater)),
9124 XEXP (SET_SRC (PATTERN (floater)),
9126 XEXP (SET_SRC (PATTERN (floater)),
9127 1)))
9128 break;
9133 /* FLOATER will be nonzero if we found a suitable floating
9134 insn for combination with ANCHOR. */
9135 if (floater
9136 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9137 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9139 /* Emit the new instruction and delete the old anchor. */
9140 emit_insn_before (gen_rtx_PARALLEL
9141 (VOIDmode,
9142 gen_rtvec (2, PATTERN (anchor),
9143 PATTERN (floater))),
9144 anchor);
9146 SET_INSN_DELETED (anchor);
9148 /* Emit a special USE insn for FLOATER, then delete
9149 the floating insn. */
9150 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9151 delete_insn (floater);
9153 continue;
9155 else if (floater
9156 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9158 rtx temp;
9159 /* Emit the new_jump instruction and delete the old anchor. */
9160 temp
9161 = emit_jump_insn_before (gen_rtx_PARALLEL
9162 (VOIDmode,
9163 gen_rtvec (2, PATTERN (anchor),
9164 PATTERN (floater))),
9165 anchor);
9167 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9168 SET_INSN_DELETED (anchor);
9170 /* Emit a special USE insn for FLOATER, then delete
9171 the floating insn. */
9172 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9173 delete_insn (floater);
9174 continue;
9180 static int
9181 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9182 int reversed, rtx dest,
9183 rtx src1, rtx src2)
9185 int insn_code_number;
9186 rtx_insn *start, *end;
9188 /* Create a PARALLEL with the patterns of ANCHOR and
9189 FLOATER, try to recognize it, then test constraints
9190 for the resulting pattern.
9192 If the pattern doesn't match or the constraints
9193 aren't met keep searching for a suitable floater
9194 insn. */
9195 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9196 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9197 INSN_CODE (new_rtx) = -1;
9198 insn_code_number = recog_memoized (new_rtx);
9199 if (insn_code_number < 0
9200 || (extract_insn (new_rtx), ! constrain_operands (1)))
9201 return 0;
9203 if (reversed)
9205 start = anchor;
9206 end = floater;
9208 else
9210 start = floater;
9211 end = anchor;
9214 /* There's up to three operands to consider. One
9215 output and two inputs.
9217 The output must not be used between FLOATER & ANCHOR
9218 exclusive. The inputs must not be set between
9219 FLOATER and ANCHOR exclusive. */
9221 if (reg_used_between_p (dest, start, end))
9222 return 0;
9224 if (reg_set_between_p (src1, start, end))
9225 return 0;
9227 if (reg_set_between_p (src2, start, end))
9228 return 0;
9230 /* If we get here, then everything is good. */
9231 return 1;
9234 /* Return nonzero if references for INSN are delayed.
9236 Millicode insns are actually function calls with some special
9237 constraints on arguments and register usage.
9239 Millicode calls always expect their arguments in the integer argument
9240 registers, and always return their result in %r29 (ret1). They
9241 are expected to clobber their arguments, %r1, %r29, and the return
9242 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9244 This function tells reorg that the references to arguments and
9245 millicode calls do not appear to happen until after the millicode call.
9246 This allows reorg to put insns which set the argument registers into the
9247 delay slot of the millicode call -- thus they act more like traditional
9248 CALL_INSNs.
9250 Note we cannot consider side effects of the insn to be delayed because
9251 the branch and link insn will clobber the return pointer. If we happened
9252 to use the return pointer in the delay slot of the call, then we lose.
9254 get_attr_type will try to recognize the given insn, so make sure to
9255 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9256 in particular. */
9258 pa_insn_refs_are_delayed (rtx_insn *insn)
9260 return ((NONJUMP_INSN_P (insn)
9261 && GET_CODE (PATTERN (insn)) != SEQUENCE
9262 && GET_CODE (PATTERN (insn)) != USE
9263 && GET_CODE (PATTERN (insn)) != CLOBBER
9264 && get_attr_type (insn) == TYPE_MILLI));
9267 /* Promote the return value, but not the arguments. */
9269 static enum machine_mode
9270 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9271 enum machine_mode mode,
9272 int *punsignedp ATTRIBUTE_UNUSED,
9273 const_tree fntype ATTRIBUTE_UNUSED,
9274 int for_return)
9276 if (for_return == 0)
9277 return mode;
9278 return promote_mode (type, mode, punsignedp);
9281 /* On the HP-PA the value is found in register(s) 28(-29), unless
9282 the mode is SF or DF. Then the value is returned in fr4 (32).
9284 This must perform the same promotions as PROMOTE_MODE, else promoting
9285 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9287 Small structures must be returned in a PARALLEL on PA64 in order
9288 to match the HP Compiler ABI. */
9290 static rtx
9291 pa_function_value (const_tree valtype,
9292 const_tree func ATTRIBUTE_UNUSED,
9293 bool outgoing ATTRIBUTE_UNUSED)
9295 enum machine_mode valmode;
9297 if (AGGREGATE_TYPE_P (valtype)
9298 || TREE_CODE (valtype) == COMPLEX_TYPE
9299 || TREE_CODE (valtype) == VECTOR_TYPE)
9301 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9303 /* Handle aggregates that fit exactly in a word or double word. */
9304 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9305 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9307 if (TARGET_64BIT)
9309 /* Aggregates with a size less than or equal to 128 bits are
9310 returned in GR 28(-29). They are left justified. The pad
9311 bits are undefined. Larger aggregates are returned in
9312 memory. */
9313 rtx loc[2];
9314 int i, offset = 0;
9315 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9317 for (i = 0; i < ub; i++)
9319 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9320 gen_rtx_REG (DImode, 28 + i),
9321 GEN_INT (offset));
9322 offset += 8;
9325 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9327 else if (valsize > UNITS_PER_WORD)
9329 /* Aggregates 5 to 8 bytes in size are returned in general
9330 registers r28-r29 in the same manner as other non
9331 floating-point objects. The data is right-justified and
9332 zero-extended to 64 bits. This is opposite to the normal
9333 justification used on big endian targets and requires
9334 special treatment. */
9335 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9336 gen_rtx_REG (DImode, 28), const0_rtx);
9337 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9341 if ((INTEGRAL_TYPE_P (valtype)
9342 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9343 || POINTER_TYPE_P (valtype))
9344 valmode = word_mode;
9345 else
9346 valmode = TYPE_MODE (valtype);
9348 if (TREE_CODE (valtype) == REAL_TYPE
9349 && !AGGREGATE_TYPE_P (valtype)
9350 && TYPE_MODE (valtype) != TFmode
9351 && !TARGET_SOFT_FLOAT)
9352 return gen_rtx_REG (valmode, 32);
9354 return gen_rtx_REG (valmode, 28);
9357 /* Implement the TARGET_LIBCALL_VALUE hook. */
9359 static rtx
9360 pa_libcall_value (enum machine_mode mode,
9361 const_rtx fun ATTRIBUTE_UNUSED)
9363 if (! TARGET_SOFT_FLOAT
9364 && (mode == SFmode || mode == DFmode))
9365 return gen_rtx_REG (mode, 32);
9366 else
9367 return gen_rtx_REG (mode, 28);
9370 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9372 static bool
9373 pa_function_value_regno_p (const unsigned int regno)
9375 if (regno == 28
9376 || (! TARGET_SOFT_FLOAT && regno == 32))
9377 return true;
9379 return false;
9382 /* Update the data in CUM to advance over an argument
9383 of mode MODE and data type TYPE.
9384 (TYPE is null for libcalls where that information may not be available.) */
9386 static void
9387 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9388 const_tree type, bool named ATTRIBUTE_UNUSED)
9390 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9391 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9393 cum->nargs_prototype--;
9394 cum->words += (arg_size
9395 + ((cum->words & 01)
9396 && type != NULL_TREE
9397 && arg_size > 1));
9400 /* Return the location of a parameter that is passed in a register or NULL
9401 if the parameter has any component that is passed in memory.
9403 This is new code and will be pushed to into the net sources after
9404 further testing.
9406 ??? We might want to restructure this so that it looks more like other
9407 ports. */
9408 static rtx
9409 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9410 const_tree type, bool named ATTRIBUTE_UNUSED)
9412 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9413 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9414 int alignment = 0;
9415 int arg_size;
9416 int fpr_reg_base;
9417 int gpr_reg_base;
9418 rtx retval;
9420 if (mode == VOIDmode)
9421 return NULL_RTX;
9423 arg_size = FUNCTION_ARG_SIZE (mode, type);
9425 /* If this arg would be passed partially or totally on the stack, then
9426 this routine should return zero. pa_arg_partial_bytes will
9427 handle arguments which are split between regs and stack slots if
9428 the ABI mandates split arguments. */
9429 if (!TARGET_64BIT)
9431 /* The 32-bit ABI does not split arguments. */
9432 if (cum->words + arg_size > max_arg_words)
9433 return NULL_RTX;
9435 else
9437 if (arg_size > 1)
9438 alignment = cum->words & 1;
9439 if (cum->words + alignment >= max_arg_words)
9440 return NULL_RTX;
9443 /* The 32bit ABIs and the 64bit ABIs are rather different,
9444 particularly in their handling of FP registers. We might
9445 be able to cleverly share code between them, but I'm not
9446 going to bother in the hope that splitting them up results
9447 in code that is more easily understood. */
9449 if (TARGET_64BIT)
9451 /* Advance the base registers to their current locations.
9453 Remember, gprs grow towards smaller register numbers while
9454 fprs grow to higher register numbers. Also remember that
9455 although FP regs are 32-bit addressable, we pretend that
9456 the registers are 64-bits wide. */
9457 gpr_reg_base = 26 - cum->words;
9458 fpr_reg_base = 32 + cum->words;
9460 /* Arguments wider than one word and small aggregates need special
9461 treatment. */
9462 if (arg_size > 1
9463 || mode == BLKmode
9464 || (type && (AGGREGATE_TYPE_P (type)
9465 || TREE_CODE (type) == COMPLEX_TYPE
9466 || TREE_CODE (type) == VECTOR_TYPE)))
9468 /* Double-extended precision (80-bit), quad-precision (128-bit)
9469 and aggregates including complex numbers are aligned on
9470 128-bit boundaries. The first eight 64-bit argument slots
9471 are associated one-to-one, with general registers r26
9472 through r19, and also with floating-point registers fr4
9473 through fr11. Arguments larger than one word are always
9474 passed in general registers.
9476 Using a PARALLEL with a word mode register results in left
9477 justified data on a big-endian target. */
9479 rtx loc[8];
9480 int i, offset = 0, ub = arg_size;
9482 /* Align the base register. */
9483 gpr_reg_base -= alignment;
9485 ub = MIN (ub, max_arg_words - cum->words - alignment);
9486 for (i = 0; i < ub; i++)
9488 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9489 gen_rtx_REG (DImode, gpr_reg_base),
9490 GEN_INT (offset));
9491 gpr_reg_base -= 1;
9492 offset += 8;
9495 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9498 else
9500 /* If the argument is larger than a word, then we know precisely
9501 which registers we must use. */
9502 if (arg_size > 1)
9504 if (cum->words)
9506 gpr_reg_base = 23;
9507 fpr_reg_base = 38;
9509 else
9511 gpr_reg_base = 25;
9512 fpr_reg_base = 34;
9515 /* Structures 5 to 8 bytes in size are passed in the general
9516 registers in the same manner as other non floating-point
9517 objects. The data is right-justified and zero-extended
9518 to 64 bits. This is opposite to the normal justification
9519 used on big endian targets and requires special treatment.
9520 We now define BLOCK_REG_PADDING to pad these objects.
9521 Aggregates, complex and vector types are passed in the same
9522 manner as structures. */
9523 if (mode == BLKmode
9524 || (type && (AGGREGATE_TYPE_P (type)
9525 || TREE_CODE (type) == COMPLEX_TYPE
9526 || TREE_CODE (type) == VECTOR_TYPE)))
9528 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9529 gen_rtx_REG (DImode, gpr_reg_base),
9530 const0_rtx);
9531 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9534 else
9536 /* We have a single word (32 bits). A simple computation
9537 will get us the register #s we need. */
9538 gpr_reg_base = 26 - cum->words;
9539 fpr_reg_base = 32 + 2 * cum->words;
9543 /* Determine if the argument needs to be passed in both general and
9544 floating point registers. */
9545 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9546 /* If we are doing soft-float with portable runtime, then there
9547 is no need to worry about FP regs. */
9548 && !TARGET_SOFT_FLOAT
9549 /* The parameter must be some kind of scalar float, else we just
9550 pass it in integer registers. */
9551 && GET_MODE_CLASS (mode) == MODE_FLOAT
9552 /* The target function must not have a prototype. */
9553 && cum->nargs_prototype <= 0
9554 /* libcalls do not need to pass items in both FP and general
9555 registers. */
9556 && type != NULL_TREE
9557 /* All this hair applies to "outgoing" args only. This includes
9558 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9559 && !cum->incoming)
9560 /* Also pass outgoing floating arguments in both registers in indirect
9561 calls with the 32 bit ABI and the HP assembler since there is no
9562 way to the specify argument locations in static functions. */
9563 || (!TARGET_64BIT
9564 && !TARGET_GAS
9565 && !cum->incoming
9566 && cum->indirect
9567 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9569 retval
9570 = gen_rtx_PARALLEL
9571 (mode,
9572 gen_rtvec (2,
9573 gen_rtx_EXPR_LIST (VOIDmode,
9574 gen_rtx_REG (mode, fpr_reg_base),
9575 const0_rtx),
9576 gen_rtx_EXPR_LIST (VOIDmode,
9577 gen_rtx_REG (mode, gpr_reg_base),
9578 const0_rtx)));
9580 else
9582 /* See if we should pass this parameter in a general register. */
9583 if (TARGET_SOFT_FLOAT
9584 /* Indirect calls in the normal 32bit ABI require all arguments
9585 to be passed in general registers. */
9586 || (!TARGET_PORTABLE_RUNTIME
9587 && !TARGET_64BIT
9588 && !TARGET_ELF32
9589 && cum->indirect)
9590 /* If the parameter is not a scalar floating-point parameter,
9591 then it belongs in GPRs. */
9592 || GET_MODE_CLASS (mode) != MODE_FLOAT
9593 /* Structure with single SFmode field belongs in GPR. */
9594 || (type && AGGREGATE_TYPE_P (type)))
9595 retval = gen_rtx_REG (mode, gpr_reg_base);
9596 else
9597 retval = gen_rtx_REG (mode, fpr_reg_base);
9599 return retval;
9602 /* Arguments larger than one word are double word aligned. */
9604 static unsigned int
9605 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9607 bool singleword = (type
9608 ? (integer_zerop (TYPE_SIZE (type))
9609 || !TREE_CONSTANT (TYPE_SIZE (type))
9610 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9611 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9613 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9616 /* If this arg would be passed totally in registers or totally on the stack,
9617 then this routine should return zero. */
9619 static int
9620 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9621 tree type, bool named ATTRIBUTE_UNUSED)
9623 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9624 unsigned int max_arg_words = 8;
9625 unsigned int offset = 0;
9627 if (!TARGET_64BIT)
9628 return 0;
9630 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9631 offset = 1;
9633 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9634 /* Arg fits fully into registers. */
9635 return 0;
9636 else if (cum->words + offset >= max_arg_words)
9637 /* Arg fully on the stack. */
9638 return 0;
9639 else
9640 /* Arg is split. */
9641 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9645 /* A get_unnamed_section callback for switching to the text section.
9647 This function is only used with SOM. Because we don't support
9648 named subspaces, we can only create a new subspace or switch back
9649 to the default text subspace. */
9651 static void
9652 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9654 gcc_assert (TARGET_SOM);
9655 if (TARGET_GAS)
9657 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9659 /* We only want to emit a .nsubspa directive once at the
9660 start of the function. */
9661 cfun->machine->in_nsubspa = 1;
9663 /* Create a new subspace for the text. This provides
9664 better stub placement and one-only functions. */
9665 if (cfun->decl
9666 && DECL_ONE_ONLY (cfun->decl)
9667 && !DECL_WEAK (cfun->decl))
9669 output_section_asm_op ("\t.SPACE $TEXT$\n"
9670 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9671 "ACCESS=44,SORT=24,COMDAT");
9672 return;
9675 else
9677 /* There isn't a current function or the body of the current
9678 function has been completed. So, we are changing to the
9679 text section to output debugging information. Thus, we
9680 need to forget that we are in the text section so that
9681 varasm.c will call us when text_section is selected again. */
9682 gcc_assert (!cfun || !cfun->machine
9683 || cfun->machine->in_nsubspa == 2);
9684 in_section = NULL;
9686 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9687 return;
9689 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9692 /* A get_unnamed_section callback for switching to comdat data
9693 sections. This function is only used with SOM. */
9695 static void
9696 som_output_comdat_data_section_asm_op (const void *data)
9698 in_section = NULL;
9699 output_section_asm_op (data);
9702 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9704 static void
9705 pa_som_asm_init_sections (void)
9707 text_section
9708 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9710 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9711 is not being generated. */
9712 som_readonly_data_section
9713 = get_unnamed_section (0, output_section_asm_op,
9714 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9716 /* When secondary definitions are not supported, SOM makes readonly
9717 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9718 the comdat flag. */
9719 som_one_only_readonly_data_section
9720 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9721 "\t.SPACE $TEXT$\n"
9722 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9723 "ACCESS=0x2c,SORT=16,COMDAT");
9726 /* When secondary definitions are not supported, SOM makes data one-only
9727 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9728 som_one_only_data_section
9729 = get_unnamed_section (SECTION_WRITE,
9730 som_output_comdat_data_section_asm_op,
9731 "\t.SPACE $PRIVATE$\n"
9732 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9733 "ACCESS=31,SORT=24,COMDAT");
9735 if (flag_tm)
9736 som_tm_clone_table_section
9737 = get_unnamed_section (0, output_section_asm_op,
9738 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9740 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9741 which reference data within the $TEXT$ space (for example constant
9742 strings in the $LIT$ subspace).
9744 The assemblers (GAS and HP as) both have problems with handling
9745 the difference of two symbols which is the other correct way to
9746 reference constant data during PIC code generation.
9748 So, there's no way to reference constant data which is in the
9749 $TEXT$ space during PIC generation. Instead place all constant
9750 data into the $PRIVATE$ subspace (this reduces sharing, but it
9751 works correctly). */
9752 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9754 /* We must not have a reference to an external symbol defined in a
9755 shared library in a readonly section, else the SOM linker will
9756 complain.
9758 So, we force exception information into the data section. */
9759 exception_section = data_section;
9762 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9764 static section *
9765 pa_som_tm_clone_table_section (void)
9767 return som_tm_clone_table_section;
9770 /* On hpux10, the linker will give an error if we have a reference
9771 in the read-only data section to a symbol defined in a shared
9772 library. Therefore, expressions that might require a reloc can
9773 not be placed in the read-only data section. */
9775 static section *
9776 pa_select_section (tree exp, int reloc,
9777 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9779 if (TREE_CODE (exp) == VAR_DECL
9780 && TREE_READONLY (exp)
9781 && !TREE_THIS_VOLATILE (exp)
9782 && DECL_INITIAL (exp)
9783 && (DECL_INITIAL (exp) == error_mark_node
9784 || TREE_CONSTANT (DECL_INITIAL (exp)))
9785 && !reloc)
9787 if (TARGET_SOM
9788 && DECL_ONE_ONLY (exp)
9789 && !DECL_WEAK (exp))
9790 return som_one_only_readonly_data_section;
9791 else
9792 return readonly_data_section;
9794 else if (CONSTANT_CLASS_P (exp) && !reloc)
9795 return readonly_data_section;
9796 else if (TARGET_SOM
9797 && TREE_CODE (exp) == VAR_DECL
9798 && DECL_ONE_ONLY (exp)
9799 && !DECL_WEAK (exp))
9800 return som_one_only_data_section;
9801 else
9802 return data_section;
9805 static void
9806 pa_globalize_label (FILE *stream, const char *name)
9808 /* We only handle DATA objects here, functions are globalized in
9809 ASM_DECLARE_FUNCTION_NAME. */
9810 if (! FUNCTION_NAME_P (name))
9812 fputs ("\t.EXPORT ", stream);
9813 assemble_name (stream, name);
9814 fputs (",DATA\n", stream);
9818 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9820 static rtx
9821 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9822 int incoming ATTRIBUTE_UNUSED)
9824 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9827 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9829 bool
9830 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9832 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9833 PA64 ABI says that objects larger than 128 bits are returned in memory.
9834 Note, int_size_in_bytes can return -1 if the size of the object is
9835 variable or larger than the maximum value that can be expressed as
9836 a HOST_WIDE_INT. It can also return zero for an empty type. The
9837 simplest way to handle variable and empty types is to pass them in
9838 memory. This avoids problems in defining the boundaries of argument
9839 slots, allocating registers, etc. */
9840 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9841 || int_size_in_bytes (type) <= 0);
9844 /* Structure to hold declaration and name of external symbols that are
9845 emitted by GCC. We generate a vector of these symbols and output them
9846 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9847 This avoids putting out names that are never really used. */
9849 typedef struct GTY(()) extern_symbol
9851 tree decl;
9852 const char *name;
9853 } extern_symbol;
9855 /* Define gc'd vector type for extern_symbol. */
9857 /* Vector of extern_symbol pointers. */
9858 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9860 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9861 /* Mark DECL (name NAME) as an external reference (assembler output
9862 file FILE). This saves the names to output at the end of the file
9863 if actually referenced. */
9865 void
9866 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9868 gcc_assert (file == asm_out_file);
9869 extern_symbol p = {decl, name};
9870 vec_safe_push (extern_symbols, p);
9873 /* Output text required at the end of an assembler file.
9874 This includes deferred plabels and .import directives for
9875 all external symbols that were actually referenced. */
9877 static void
9878 pa_hpux_file_end (void)
9880 unsigned int i;
9881 extern_symbol *p;
9883 if (!NO_DEFERRED_PROFILE_COUNTERS)
9884 output_deferred_profile_counters ();
9886 output_deferred_plabels ();
9888 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9890 tree decl = p->decl;
9892 if (!TREE_ASM_WRITTEN (decl)
9893 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9894 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9897 vec_free (extern_symbols);
9899 #endif
9901 /* Return true if a change from mode FROM to mode TO for a register
9902 in register class RCLASS is invalid. */
9904 bool
9905 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9906 enum reg_class rclass)
9908 if (from == to)
9909 return false;
9911 /* Reject changes to/from complex and vector modes. */
9912 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9913 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9914 return true;
9916 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9917 return false;
9919 /* There is no way to load QImode or HImode values directly from
9920 memory. SImode loads to the FP registers are not zero extended.
9921 On the 64-bit target, this conflicts with the definition of
9922 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9923 with different sizes in the floating-point registers. */
9924 if (MAYBE_FP_REG_CLASS_P (rclass))
9925 return true;
9927 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9928 in specific sets of registers. Thus, we cannot allow changing
9929 to a larger mode when it's larger than a word. */
9930 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9931 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9932 return true;
9934 return false;
9937 /* Returns TRUE if it is a good idea to tie two pseudo registers
9938 when one has mode MODE1 and one has mode MODE2.
9939 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9940 for any hard reg, then this must be FALSE for correct output.
9942 We should return FALSE for QImode and HImode because these modes
9943 are not ok in the floating-point registers. However, this prevents
9944 tieing these modes to SImode and DImode in the general registers.
9945 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9946 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9947 in the floating-point registers. */
9949 bool
9950 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9952 /* Don't tie modes in different classes. */
9953 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9954 return false;
9956 return true;
9960 /* Length in units of the trampoline instruction code. */
9962 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9965 /* Output assembler code for a block containing the constant parts
9966 of a trampoline, leaving space for the variable parts.\
9968 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9969 and then branches to the specified routine.
9971 This code template is copied from text segment to stack location
9972 and then patched with pa_trampoline_init to contain valid values,
9973 and then entered as a subroutine.
9975 It is best to keep this as small as possible to avoid having to
9976 flush multiple lines in the cache. */
9978 static void
9979 pa_asm_trampoline_template (FILE *f)
9981 if (!TARGET_64BIT)
9983 fputs ("\tldw 36(%r22),%r21\n", f);
9984 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9985 if (ASSEMBLER_DIALECT == 0)
9986 fputs ("\tdepi 0,31,2,%r21\n", f);
9987 else
9988 fputs ("\tdepwi 0,31,2,%r21\n", f);
9989 fputs ("\tldw 4(%r21),%r19\n", f);
9990 fputs ("\tldw 0(%r21),%r21\n", f);
9991 if (TARGET_PA_20)
9993 fputs ("\tbve (%r21)\n", f);
9994 fputs ("\tldw 40(%r22),%r29\n", f);
9995 fputs ("\t.word 0\n", f);
9996 fputs ("\t.word 0\n", f);
9998 else
10000 fputs ("\tldsid (%r21),%r1\n", f);
10001 fputs ("\tmtsp %r1,%sr0\n", f);
10002 fputs ("\tbe 0(%sr0,%r21)\n", f);
10003 fputs ("\tldw 40(%r22),%r29\n", f);
10005 fputs ("\t.word 0\n", f);
10006 fputs ("\t.word 0\n", f);
10007 fputs ("\t.word 0\n", f);
10008 fputs ("\t.word 0\n", f);
10010 else
10012 fputs ("\t.dword 0\n", f);
10013 fputs ("\t.dword 0\n", f);
10014 fputs ("\t.dword 0\n", f);
10015 fputs ("\t.dword 0\n", f);
10016 fputs ("\tmfia %r31\n", f);
10017 fputs ("\tldd 24(%r31),%r1\n", f);
10018 fputs ("\tldd 24(%r1),%r27\n", f);
10019 fputs ("\tldd 16(%r1),%r1\n", f);
10020 fputs ("\tbve (%r1)\n", f);
10021 fputs ("\tldd 32(%r31),%r31\n", f);
10022 fputs ("\t.dword 0 ; fptr\n", f);
10023 fputs ("\t.dword 0 ; static link\n", f);
10027 /* Emit RTL insns to initialize the variable parts of a trampoline.
10028 FNADDR is an RTX for the address of the function's pure code.
10029 CXT is an RTX for the static chain value for the function.
10031 Move the function address to the trampoline template at offset 36.
10032 Move the static chain value to trampoline template at offset 40.
10033 Move the trampoline address to trampoline template at offset 44.
10034 Move r19 to trampoline template at offset 48. The latter two
10035 words create a plabel for the indirect call to the trampoline.
10037 A similar sequence is used for the 64-bit port but the plabel is
10038 at the beginning of the trampoline.
10040 Finally, the cache entries for the trampoline code are flushed.
10041 This is necessary to ensure that the trampoline instruction sequence
10042 is written to memory prior to any attempts at prefetching the code
10043 sequence. */
10045 static void
10046 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10048 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10049 rtx start_addr = gen_reg_rtx (Pmode);
10050 rtx end_addr = gen_reg_rtx (Pmode);
10051 rtx line_length = gen_reg_rtx (Pmode);
10052 rtx r_tramp, tmp;
10054 emit_block_move (m_tramp, assemble_trampoline_template (),
10055 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10056 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10058 if (!TARGET_64BIT)
10060 tmp = adjust_address (m_tramp, Pmode, 36);
10061 emit_move_insn (tmp, fnaddr);
10062 tmp = adjust_address (m_tramp, Pmode, 40);
10063 emit_move_insn (tmp, chain_value);
10065 /* Create a fat pointer for the trampoline. */
10066 tmp = adjust_address (m_tramp, Pmode, 44);
10067 emit_move_insn (tmp, r_tramp);
10068 tmp = adjust_address (m_tramp, Pmode, 48);
10069 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10071 /* fdc and fic only use registers for the address to flush,
10072 they do not accept integer displacements. We align the
10073 start and end addresses to the beginning of their respective
10074 cache lines to minimize the number of lines flushed. */
10075 emit_insn (gen_andsi3 (start_addr, r_tramp,
10076 GEN_INT (-MIN_CACHELINE_SIZE)));
10077 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10078 TRAMPOLINE_CODE_SIZE-1));
10079 emit_insn (gen_andsi3 (end_addr, tmp,
10080 GEN_INT (-MIN_CACHELINE_SIZE)));
10081 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10082 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10083 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10084 gen_reg_rtx (Pmode),
10085 gen_reg_rtx (Pmode)));
10087 else
10089 tmp = adjust_address (m_tramp, Pmode, 56);
10090 emit_move_insn (tmp, fnaddr);
10091 tmp = adjust_address (m_tramp, Pmode, 64);
10092 emit_move_insn (tmp, chain_value);
10094 /* Create a fat pointer for the trampoline. */
10095 tmp = adjust_address (m_tramp, Pmode, 16);
10096 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10097 r_tramp, 32)));
10098 tmp = adjust_address (m_tramp, Pmode, 24);
10099 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10101 /* fdc and fic only use registers for the address to flush,
10102 they do not accept integer displacements. We align the
10103 start and end addresses to the beginning of their respective
10104 cache lines to minimize the number of lines flushed. */
10105 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10106 emit_insn (gen_anddi3 (start_addr, tmp,
10107 GEN_INT (-MIN_CACHELINE_SIZE)));
10108 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10109 TRAMPOLINE_CODE_SIZE - 1));
10110 emit_insn (gen_anddi3 (end_addr, tmp,
10111 GEN_INT (-MIN_CACHELINE_SIZE)));
10112 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10113 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10114 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10115 gen_reg_rtx (Pmode),
10116 gen_reg_rtx (Pmode)));
10119 #ifdef HAVE_ENABLE_EXECUTE_STACK
10120  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10121      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10122 #endif
10125 /* Perform any machine-specific adjustment in the address of the trampoline.
10126 ADDR contains the address that was passed to pa_trampoline_init.
10127 Adjust the trampoline address to point to the plabel at offset 44. */
10129 static rtx
10130 pa_trampoline_adjust_address (rtx addr)
10132 if (!TARGET_64BIT)
10133 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10134 return addr;
10137 static rtx
10138 pa_delegitimize_address (rtx orig_x)
10140 rtx x = delegitimize_mem_from_attrs (orig_x);
10142 if (GET_CODE (x) == LO_SUM
10143 && GET_CODE (XEXP (x, 1)) == UNSPEC
10144 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10145 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10146 return x;
10149 static rtx
10150 pa_internal_arg_pointer (void)
10152 /* The argument pointer and the hard frame pointer are the same in
10153 the 32-bit runtime, so we don't need a copy. */
10154 if (TARGET_64BIT)
10155 return copy_to_reg (virtual_incoming_args_rtx);
10156 else
10157 return virtual_incoming_args_rtx;
10160 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10161 Frame pointer elimination is automatically handled. */
10163 static bool
10164 pa_can_eliminate (const int from, const int to)
10166 /* The argument cannot be eliminated in the 64-bit runtime. */
10167 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10168 return false;
10170 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10171 ? ! frame_pointer_needed
10172 : true);
10175 /* Define the offset between two registers, FROM to be eliminated and its
10176 replacement TO, at the start of a routine. */
10177 HOST_WIDE_INT
10178 pa_initial_elimination_offset (int from, int to)
10180 HOST_WIDE_INT offset;
10182 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10183 && to == STACK_POINTER_REGNUM)
10184 offset = -pa_compute_frame_size (get_frame_size (), 0);
10185 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10186 offset = 0;
10187 else
10188 gcc_unreachable ();
10190 return offset;
10193 static void
10194 pa_conditional_register_usage (void)
10196 int i;
10198 if (!TARGET_64BIT && !TARGET_PA_11)
10200 for (i = 56; i <= FP_REG_LAST; i++)
10201 fixed_regs[i] = call_used_regs[i] = 1;
10202 for (i = 33; i < 56; i += 2)
10203 fixed_regs[i] = call_used_regs[i] = 1;
10205 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10207 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10208 fixed_regs[i] = call_used_regs[i] = 1;
10210 if (flag_pic)
10211 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10214 /* Target hook for c_mode_for_suffix. */
10216 static enum machine_mode
10217 pa_c_mode_for_suffix (char suffix)
10219 if (HPUX_LONG_DOUBLE_LIBRARY)
10221 if (suffix == 'q')
10222 return TFmode;
10225 return VOIDmode;
10228 /* Target hook for function_section. */
10230 static section *
10231 pa_function_section (tree decl, enum node_frequency freq,
10232 bool startup, bool exit)
10234 /* Put functions in text section if target doesn't have named sections. */
10235 if (!targetm_common.have_named_sections)
10236 return text_section;
10238 /* Force nested functions into the same section as the containing
10239 function. */
10240 if (decl
10241 && DECL_SECTION_NAME (decl) == NULL
10242 && DECL_CONTEXT (decl) != NULL_TREE
10243 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10244 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10245 return function_section (DECL_CONTEXT (decl));
10247 /* Otherwise, use the default function section. */
10248 return default_function_section (decl, freq, startup, exit);
10251 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10253 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10254 that need more than three instructions to load prior to reload. This
10255 limit is somewhat arbitrary. It takes three instructions to load a
10256 CONST_INT from memory but two are memory accesses. It may be better
10257 to increase the allowed range for CONST_INTS. We may also be able
10258 to handle CONST_DOUBLES. */
10260 static bool
10261 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10263 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10264 return false;
10266 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10267 return false;
10269 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10270 legitimate constants. The other variants can't be handled by
10271 the move patterns after reload starts. */
10272 if (tls_referenced_p (x))
10273 return false;
10275 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10276 return false;
10278 if (TARGET_64BIT
10279 && HOST_BITS_PER_WIDE_INT > 32
10280 && GET_CODE (x) == CONST_INT
10281 && !reload_in_progress
10282 && !reload_completed
10283 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10284 && !pa_cint_ok_for_move (INTVAL (x)))
10285 return false;
10287 if (function_label_operand (x, mode))
10288 return false;
10290 return true;
10293 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10295 static unsigned int
10296 pa_section_type_flags (tree decl, const char *name, int reloc)
10298 unsigned int flags;
10300 flags = default_section_type_flags (decl, name, reloc);
10302 /* Function labels are placed in the constant pool. This can
10303 cause a section conflict if decls are put in ".data.rel.ro"
10304 or ".data.rel.ro.local" using the __attribute__ construct. */
10305 if (strcmp (name, ".data.rel.ro") == 0
10306 || strcmp (name, ".data.rel.ro.local") == 0)
10307 flags |= SECTION_WRITE | SECTION_RELRO;
10309 return flags;
10312 /* pa_legitimate_address_p recognizes an RTL expression that is a
10313 valid memory address for an instruction. The MODE argument is the
10314 machine mode for the MEM expression that wants to use this address.
10316 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10317 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10318 available with floating point loads and stores, and integer loads.
10319 We get better code by allowing indexed addresses in the initial
10320 RTL generation.
10322 The acceptance of indexed addresses as legitimate implies that we
10323 must provide patterns for doing indexed integer stores, or the move
10324 expanders must force the address of an indexed store to a register.
10325 We have adopted the latter approach.
10327 Another function of pa_legitimate_address_p is to ensure that
10328 the base register is a valid pointer for indexed instructions.
10329 On targets that have non-equivalent space registers, we have to
10330 know at the time of assembler output which register in a REG+REG
10331 pair is the base register. The REG_POINTER flag is sometimes lost
10332 in reload and the following passes, so it can't be relied on during
10333 code generation. Thus, we either have to canonicalize the order
10334 of the registers in REG+REG indexed addresses, or treat REG+REG
10335 addresses separately and provide patterns for both permutations.
10337 The latter approach requires several hundred additional lines of
10338 code in pa.md. The downside to canonicalizing is that a PLUS
10339 in the wrong order can't combine to form to make a scaled indexed
10340 memory operand. As we won't need to canonicalize the operands if
10341 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10343 We initially break out scaled indexed addresses in canonical order
10344 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10345 scaled indexed addresses during RTL generation. However, fold_rtx
10346 has its own opinion on how the operands of a PLUS should be ordered.
10347 If one of the operands is equivalent to a constant, it will make
10348 that operand the second operand. As the base register is likely to
10349 be equivalent to a SYMBOL_REF, we have made it the second operand.
10351 pa_legitimate_address_p accepts REG+REG as legitimate when the
10352 operands are in the order INDEX+BASE on targets with non-equivalent
10353 space registers, and in any order on targets with equivalent space
10354 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10356 We treat a SYMBOL_REF as legitimate if it is part of the current
10357 function's constant-pool, because such addresses can actually be
10358 output as REG+SMALLINT. */
10360 static bool
10361 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10363 if ((REG_P (x)
10364 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10365 : REG_OK_FOR_BASE_P (x)))
10366 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10367 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10368 && REG_P (XEXP (x, 0))
10369 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10370 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10371 return true;
10373 if (GET_CODE (x) == PLUS)
10375 rtx base, index;
10377 /* For REG+REG, the base register should be in XEXP (x, 1),
10378 so check it first. */
10379 if (REG_P (XEXP (x, 1))
10380 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10381 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10382 base = XEXP (x, 1), index = XEXP (x, 0);
10383 else if (REG_P (XEXP (x, 0))
10384 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10385 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10386 base = XEXP (x, 0), index = XEXP (x, 1);
10387 else
10388 return false;
10390 if (GET_CODE (index) == CONST_INT)
10392 if (INT_5_BITS (index))
10393 return true;
10395 /* When INT14_OK_STRICT is false, a secondary reload is needed
10396 to adjust the displacement of SImode and DImode floating point
10397 instructions but this may fail when the register also needs
10398 reloading. So, we return false when STRICT is true. We
10399 also reject long displacements for float mode addresses since
10400 the majority of accesses will use floating point instructions
10401 that don't support 14-bit offsets. */
10402 if (!INT14_OK_STRICT
10403 && (strict || !(reload_in_progress || reload_completed))
10404 && mode != QImode
10405 && mode != HImode)
10406 return false;
10408 return base14_operand (index, mode);
10411 if (!TARGET_DISABLE_INDEXING
10412 /* Only accept the "canonical" INDEX+BASE operand order
10413 on targets with non-equivalent space registers. */
10414 && (TARGET_NO_SPACE_REGS
10415 ? REG_P (index)
10416 : (base == XEXP (x, 1) && REG_P (index)
10417 && (reload_completed
10418 || (reload_in_progress && HARD_REGISTER_P (base))
10419 || REG_POINTER (base))
10420 && (reload_completed
10421 || (reload_in_progress && HARD_REGISTER_P (index))
10422 || !REG_POINTER (index))))
10423 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10424 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10425 : REG_OK_FOR_INDEX_P (index))
10426 && borx_reg_operand (base, Pmode)
10427 && borx_reg_operand (index, Pmode))
10428 return true;
10430 if (!TARGET_DISABLE_INDEXING
10431 && GET_CODE (index) == MULT
10432 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10433 && REG_P (XEXP (index, 0))
10434 && GET_MODE (XEXP (index, 0)) == Pmode
10435 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10436 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10437 && GET_CODE (XEXP (index, 1)) == CONST_INT
10438 && INTVAL (XEXP (index, 1))
10439 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10440 && borx_reg_operand (base, Pmode))
10441 return true;
10443 return false;
10446 if (GET_CODE (x) == LO_SUM)
10448 rtx y = XEXP (x, 0);
10450 if (GET_CODE (y) == SUBREG)
10451 y = SUBREG_REG (y);
10453 if (REG_P (y)
10454 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10455 : REG_OK_FOR_BASE_P (y)))
10457 /* Needed for -fPIC */
10458 if (mode == Pmode
10459 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10460 return true;
10462 if (!INT14_OK_STRICT
10463 && (strict || !(reload_in_progress || reload_completed))
10464 && mode != QImode
10465 && mode != HImode)
10466 return false;
10468 if (CONSTANT_P (XEXP (x, 1)))
10469 return true;
10471 return false;
10474 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10475 return true;
10477 return false;
10480 /* Look for machine dependent ways to make the invalid address AD a
10481 valid address.
10483 For the PA, transform:
10485 memory(X + <large int>)
10487 into:
10489 if (<large int> & mask) >= 16
10490 Y = (<large int> & ~mask) + mask + 1 Round up.
10491 else
10492 Y = (<large int> & ~mask) Round down.
10493 Z = X + Y
10494 memory (Z + (<large int> - Y));
10496 This makes reload inheritance and reload_cse work better since Z
10497 can be reused.
10499 There may be more opportunities to improve code with this hook. */
10502 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10503 int opnum, int type,
10504 int ind_levels ATTRIBUTE_UNUSED)
10506 long offset, newoffset, mask;
10507 rtx new_rtx, temp = NULL_RTX;
10509 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10510 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10512 if (optimize && GET_CODE (ad) == PLUS)
10513 temp = simplify_binary_operation (PLUS, Pmode,
10514 XEXP (ad, 0), XEXP (ad, 1));
10516 new_rtx = temp ? temp : ad;
10518 if (optimize
10519 && GET_CODE (new_rtx) == PLUS
10520 && GET_CODE (XEXP (new_rtx, 0)) == REG
10521 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10523 offset = INTVAL (XEXP ((new_rtx), 1));
10525 /* Choose rounding direction. Round up if we are >= halfway. */
10526 if ((offset & mask) >= ((mask + 1) / 2))
10527 newoffset = (offset & ~mask) + mask + 1;
10528 else
10529 newoffset = offset & ~mask;
10531 /* Ensure that long displacements are aligned. */
10532 if (mask == 0x3fff
10533 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10534 || (TARGET_64BIT && (mode) == DImode)))
10535 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10537 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10539 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10540 GEN_INT (newoffset));
10541 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10542 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10543 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10544 opnum, (enum reload_type) type);
10545 return ad;
10549 return NULL_RTX;
10552 /* Output address vector. */
10554 void
10555 pa_output_addr_vec (rtx lab, rtx body)
10557 int idx, vlen = XVECLEN (body, 0);
10559 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10560 if (TARGET_GAS)
10561 fputs ("\t.begin_brtab\n", asm_out_file);
10562 for (idx = 0; idx < vlen; idx++)
10564 ASM_OUTPUT_ADDR_VEC_ELT
10565 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10567 if (TARGET_GAS)
10568 fputs ("\t.end_brtab\n", asm_out_file);
10571 /* Output address difference vector. */
10573 void
10574 pa_output_addr_diff_vec (rtx lab, rtx body)
10576 rtx base = XEXP (XEXP (body, 0), 0);
10577 int idx, vlen = XVECLEN (body, 1);
10579 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10580 if (TARGET_GAS)
10581 fputs ("\t.begin_brtab\n", asm_out_file);
10582 for (idx = 0; idx < vlen; idx++)
10584 ASM_OUTPUT_ADDR_DIFF_ELT
10585 (asm_out_file,
10586 body,
10587 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10588 CODE_LABEL_NUMBER (base));
10590 if (TARGET_GAS)
10591 fputs ("\t.end_brtab\n", asm_out_file);
10594 #include "gt-pa.h"