1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2021 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
33 #include "stringpool.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
51 #include "common/common-target.h"
52 #include "langhooks.h"
57 /* This file should be included last. */
58 #include "target-def.h"
60 /* Return nonzero if there is a bypass for the output of
61 OUT_INSN and the fp store IN_INSN. */
63 pa_fpstore_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
65 machine_mode store_mode
;
66 machine_mode other_mode
;
69 if (recog_memoized (in_insn
) < 0
70 || (get_attr_type (in_insn
) != TYPE_FPSTORE
71 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
72 || recog_memoized (out_insn
) < 0)
75 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
77 set
= single_set (out_insn
);
81 other_mode
= GET_MODE (SET_SRC (set
));
83 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
87 #ifndef DO_FRAME_NOTES
88 #ifdef INCOMING_RETURN_ADDR_RTX
89 #define DO_FRAME_NOTES 1
91 #define DO_FRAME_NOTES 0
95 static void pa_option_override (void);
96 static void copy_reg_pointer (rtx
, rtx
);
97 static void fix_range (const char *);
98 static int hppa_register_move_cost (machine_mode mode
, reg_class_t
,
100 static int hppa_address_cost (rtx
, machine_mode mode
, addr_space_t
, bool);
101 static bool hppa_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
102 static inline rtx
force_mode (machine_mode
, rtx
);
103 static void pa_reorg (void);
104 static void pa_combine_instructions (void);
105 static int pa_can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, int, rtx
,
107 static bool forward_branch_p (rtx_insn
*);
108 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
109 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT
, unsigned *);
110 static int compute_cpymem_length (rtx_insn
*);
111 static int compute_clrmem_length (rtx_insn
*);
112 static bool pa_assemble_integer (rtx
, unsigned int, int);
113 static void remove_useless_addtr_insns (int);
114 static void store_reg (int, HOST_WIDE_INT
, int);
115 static void store_reg_modify (int, int, HOST_WIDE_INT
);
116 static void load_reg (int, HOST_WIDE_INT
, int);
117 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
118 static rtx
pa_function_value (const_tree
, const_tree
, bool);
119 static rtx
pa_libcall_value (machine_mode
, const_rtx
);
120 static bool pa_function_value_regno_p (const unsigned int);
121 static void pa_output_function_prologue (FILE *) ATTRIBUTE_UNUSED
;
122 static void pa_linux_output_function_prologue (FILE *) ATTRIBUTE_UNUSED
;
123 static void update_total_code_bytes (unsigned int);
124 static void pa_output_function_epilogue (FILE *);
125 static int pa_adjust_cost (rtx_insn
*, int, rtx_insn
*, int, unsigned int);
126 static int pa_issue_rate (void);
127 static int pa_reloc_rw_mask (void);
128 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
129 static section
*pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED
;
130 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
132 static void pa_encode_section_info (tree
, rtx
, int);
133 static const char *pa_strip_name_encoding (const char *);
134 static bool pa_function_ok_for_sibcall (tree
, tree
);
135 static void pa_globalize_label (FILE *, const char *)
137 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
138 HOST_WIDE_INT
, tree
);
139 #if !defined(USE_COLLECT2)
140 static void pa_asm_out_constructor (rtx
, int);
141 static void pa_asm_out_destructor (rtx
, int);
143 static void pa_init_builtins (void);
144 static rtx
pa_expand_builtin (tree
, rtx
, rtx
, machine_mode mode
, int);
145 static rtx
hppa_builtin_saveregs (void);
146 static void hppa_va_start (tree
, rtx
);
147 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
148 static bool pa_scalar_mode_supported_p (scalar_mode
);
149 static bool pa_commutative_p (const_rtx x
, int outer_code
);
150 static void copy_fp_args (rtx_insn
*) ATTRIBUTE_UNUSED
;
151 static int length_fp_args (rtx_insn
*) ATTRIBUTE_UNUSED
;
152 static rtx
hppa_legitimize_address (rtx
, rtx
, machine_mode
);
153 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
154 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
155 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
156 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
157 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
158 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
159 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
160 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
161 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
162 static void output_deferred_plabels (void);
163 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
164 static void pa_file_end (void);
165 static void pa_init_libfuncs (void);
166 static rtx
pa_struct_value_rtx (tree
, int);
167 static bool pa_pass_by_reference (cumulative_args_t
,
168 const function_arg_info
&);
169 static int pa_arg_partial_bytes (cumulative_args_t
, const function_arg_info
&);
170 static void pa_function_arg_advance (cumulative_args_t
,
171 const function_arg_info
&);
172 static rtx
pa_function_arg (cumulative_args_t
, const function_arg_info
&);
173 static pad_direction
pa_function_arg_padding (machine_mode
, const_tree
);
174 static unsigned int pa_function_arg_boundary (machine_mode
, const_tree
);
175 static struct machine_function
* pa_init_machine_status (void);
176 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
178 secondary_reload_info
*);
179 static bool pa_secondary_memory_needed (machine_mode
,
180 reg_class_t
, reg_class_t
);
181 static void pa_extra_live_on_entry (bitmap
);
182 static machine_mode
pa_promote_function_mode (const_tree
,
186 static void pa_asm_trampoline_template (FILE *);
187 static void pa_trampoline_init (rtx
, tree
, rtx
);
188 static rtx
pa_trampoline_adjust_address (rtx
);
189 static rtx
pa_delegitimize_address (rtx
);
190 static bool pa_print_operand_punct_valid_p (unsigned char);
191 static rtx
pa_internal_arg_pointer (void);
192 static bool pa_can_eliminate (const int, const int);
193 static void pa_conditional_register_usage (void);
194 static machine_mode
pa_c_mode_for_suffix (char);
195 static section
*pa_function_section (tree
, enum node_frequency
, bool, bool);
196 static bool pa_cannot_force_const_mem (machine_mode
, rtx
);
197 static bool pa_legitimate_constant_p (machine_mode
, rtx
);
198 static unsigned int pa_section_type_flags (tree
, const char *, int);
199 static bool pa_legitimate_address_p (machine_mode
, rtx
, bool);
200 static bool pa_callee_copies (cumulative_args_t
, const function_arg_info
&);
201 static unsigned int pa_hard_regno_nregs (unsigned int, machine_mode
);
202 static bool pa_hard_regno_mode_ok (unsigned int, machine_mode
);
203 static bool pa_modes_tieable_p (machine_mode
, machine_mode
);
204 static bool pa_can_change_mode_class (machine_mode
, machine_mode
, reg_class_t
);
205 static HOST_WIDE_INT
pa_starting_frame_offset (void);
206 static section
* pa_elf_select_rtx_section(machine_mode
, rtx
, unsigned HOST_WIDE_INT
) ATTRIBUTE_UNUSED
;
208 /* The following extra sections are only used for SOM. */
209 static GTY(()) section
*som_readonly_data_section
;
210 static GTY(()) section
*som_one_only_readonly_data_section
;
211 static GTY(()) section
*som_one_only_data_section
;
212 static GTY(()) section
*som_tm_clone_table_section
;
214 /* Counts for the number of callee-saved general and floating point
215 registers which were saved by the current function's prologue. */
216 static int gr_saved
, fr_saved
;
218 /* Boolean indicating whether the return pointer was saved by the
219 current function's prologue. */
220 static bool rp_saved
;
222 static rtx
find_addr_reg (rtx
);
224 /* Keep track of the number of bytes we have output in the CODE subspace
225 during this compilation so we'll know when to emit inline long-calls. */
226 unsigned long total_code_bytes
;
228 /* The last address of the previous function plus the number of bytes in
229 associated thunks that have been output. This is used to determine if
230 a thunk can use an IA-relative branch to reach its target function. */
231 static unsigned int last_address
;
233 /* Variables to handle plabels that we discover are necessary at assembly
234 output time. They are output after the current function. */
235 struct GTY(()) deferred_plabel
240 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
242 static size_t n_deferred_plabels
= 0;
244 /* Initialize the GCC target structure. */
246 #undef TARGET_OPTION_OVERRIDE
247 #define TARGET_OPTION_OVERRIDE pa_option_override
249 #undef TARGET_ASM_ALIGNED_HI_OP
250 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
251 #undef TARGET_ASM_ALIGNED_SI_OP
252 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
253 #undef TARGET_ASM_ALIGNED_DI_OP
254 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
255 #undef TARGET_ASM_UNALIGNED_HI_OP
256 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
257 #undef TARGET_ASM_UNALIGNED_SI_OP
258 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
259 #undef TARGET_ASM_UNALIGNED_DI_OP
260 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
261 #undef TARGET_ASM_INTEGER
262 #define TARGET_ASM_INTEGER pa_assemble_integer
264 #undef TARGET_ASM_FUNCTION_EPILOGUE
265 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
267 #undef TARGET_FUNCTION_VALUE
268 #define TARGET_FUNCTION_VALUE pa_function_value
269 #undef TARGET_LIBCALL_VALUE
270 #define TARGET_LIBCALL_VALUE pa_libcall_value
271 #undef TARGET_FUNCTION_VALUE_REGNO_P
272 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
274 #undef TARGET_LEGITIMIZE_ADDRESS
275 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
277 #undef TARGET_SCHED_ADJUST_COST
278 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
279 #undef TARGET_SCHED_ISSUE_RATE
280 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
282 #undef TARGET_ENCODE_SECTION_INFO
283 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
284 #undef TARGET_STRIP_NAME_ENCODING
285 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
287 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
288 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
290 #undef TARGET_COMMUTATIVE_P
291 #define TARGET_COMMUTATIVE_P pa_commutative_p
293 #undef TARGET_ASM_OUTPUT_MI_THUNK
294 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
295 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
296 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
298 #undef TARGET_ASM_FILE_END
299 #define TARGET_ASM_FILE_END pa_file_end
301 #undef TARGET_ASM_RELOC_RW_MASK
302 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
304 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
305 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
307 #if !defined(USE_COLLECT2)
308 #undef TARGET_ASM_CONSTRUCTOR
309 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
310 #undef TARGET_ASM_DESTRUCTOR
311 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
314 #undef TARGET_INIT_BUILTINS
315 #define TARGET_INIT_BUILTINS pa_init_builtins
317 #undef TARGET_EXPAND_BUILTIN
318 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
320 #undef TARGET_REGISTER_MOVE_COST
321 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
322 #undef TARGET_RTX_COSTS
323 #define TARGET_RTX_COSTS hppa_rtx_costs
324 #undef TARGET_ADDRESS_COST
325 #define TARGET_ADDRESS_COST hppa_address_cost
327 #undef TARGET_MACHINE_DEPENDENT_REORG
328 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
330 #undef TARGET_INIT_LIBFUNCS
331 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
333 #undef TARGET_PROMOTE_FUNCTION_MODE
334 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
335 #undef TARGET_PROMOTE_PROTOTYPES
336 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
338 #undef TARGET_STRUCT_VALUE_RTX
339 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
340 #undef TARGET_RETURN_IN_MEMORY
341 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
342 #undef TARGET_MUST_PASS_IN_STACK
343 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
344 #undef TARGET_PASS_BY_REFERENCE
345 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
346 #undef TARGET_CALLEE_COPIES
347 #define TARGET_CALLEE_COPIES pa_callee_copies
348 #undef TARGET_ARG_PARTIAL_BYTES
349 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
350 #undef TARGET_FUNCTION_ARG
351 #define TARGET_FUNCTION_ARG pa_function_arg
352 #undef TARGET_FUNCTION_ARG_ADVANCE
353 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
354 #undef TARGET_FUNCTION_ARG_PADDING
355 #define TARGET_FUNCTION_ARG_PADDING pa_function_arg_padding
356 #undef TARGET_FUNCTION_ARG_BOUNDARY
357 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
359 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
360 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
361 #undef TARGET_EXPAND_BUILTIN_VA_START
362 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
363 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
364 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
366 #undef TARGET_SCALAR_MODE_SUPPORTED_P
367 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
369 #undef TARGET_CANNOT_FORCE_CONST_MEM
370 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
372 #undef TARGET_SECONDARY_RELOAD
373 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
374 #undef TARGET_SECONDARY_MEMORY_NEEDED
375 #define TARGET_SECONDARY_MEMORY_NEEDED pa_secondary_memory_needed
377 #undef TARGET_EXTRA_LIVE_ON_ENTRY
378 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
380 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
381 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
382 #undef TARGET_TRAMPOLINE_INIT
383 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
384 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
385 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
386 #undef TARGET_DELEGITIMIZE_ADDRESS
387 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
388 #undef TARGET_INTERNAL_ARG_POINTER
389 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
390 #undef TARGET_CAN_ELIMINATE
391 #define TARGET_CAN_ELIMINATE pa_can_eliminate
392 #undef TARGET_CONDITIONAL_REGISTER_USAGE
393 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
394 #undef TARGET_C_MODE_FOR_SUFFIX
395 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
396 #undef TARGET_ASM_FUNCTION_SECTION
397 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
399 #undef TARGET_LEGITIMATE_CONSTANT_P
400 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
401 #undef TARGET_SECTION_TYPE_FLAGS
402 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
403 #undef TARGET_LEGITIMATE_ADDRESS_P
404 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
407 #define TARGET_LRA_P hook_bool_void_false
409 #undef TARGET_HARD_REGNO_NREGS
410 #define TARGET_HARD_REGNO_NREGS pa_hard_regno_nregs
411 #undef TARGET_HARD_REGNO_MODE_OK
412 #define TARGET_HARD_REGNO_MODE_OK pa_hard_regno_mode_ok
413 #undef TARGET_MODES_TIEABLE_P
414 #define TARGET_MODES_TIEABLE_P pa_modes_tieable_p
416 #undef TARGET_CAN_CHANGE_MODE_CLASS
417 #define TARGET_CAN_CHANGE_MODE_CLASS pa_can_change_mode_class
419 #undef TARGET_CONSTANT_ALIGNMENT
420 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
422 #undef TARGET_STARTING_FRAME_OFFSET
423 #define TARGET_STARTING_FRAME_OFFSET pa_starting_frame_offset
425 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
426 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
428 struct gcc_target targetm
= TARGET_INITIALIZER
;
430 /* Parse the -mfixed-range= option string. */
433 fix_range (const char *const_str
)
436 char *str
, *dash
, *comma
;
438 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
439 REG2 are either register names or register numbers. The effect
440 of this option is to mark the registers in the range from REG1 to
441 REG2 as ``fixed'' so they won't be used by the compiler. This is
442 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
444 i
= strlen (const_str
);
445 str
= (char *) alloca (i
+ 1);
446 memcpy (str
, const_str
, i
+ 1);
450 dash
= strchr (str
, '-');
453 warning (0, "value of %<-mfixed-range%> must have form REG1-REG2");
458 comma
= strchr (dash
+ 1, ',');
462 first
= decode_reg_name (str
);
465 warning (0, "unknown register name: %s", str
);
469 last
= decode_reg_name (dash
+ 1);
472 warning (0, "unknown register name: %s", dash
+ 1);
480 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
484 for (i
= first
; i
<= last
; ++i
)
485 fixed_regs
[i
] = call_used_regs
[i
] = 1;
494 /* Check if all floating point registers have been fixed. */
495 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
500 target_flags
|= MASK_DISABLE_FPREGS
;
503 /* Implement the TARGET_OPTION_OVERRIDE hook. */
506 pa_option_override (void)
509 cl_deferred_option
*opt
;
510 vec
<cl_deferred_option
> *v
511 = (vec
<cl_deferred_option
> *) pa_deferred_options
;
514 FOR_EACH_VEC_ELT (*v
, i
, opt
)
516 switch (opt
->opt_index
)
518 case OPT_mfixed_range_
:
519 fix_range (opt
->arg
);
527 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
529 warning (0, "PIC code generation is not supported in the portable runtime model");
532 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
534 warning (0, "PIC code generation is not compatible with fast indirect calls");
537 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
539 warning (0, "%<-g%> is only supported when using GAS on this processor");
540 warning (0, "%<-g%> option disabled");
541 write_symbols
= NO_DEBUG
;
544 /* We only support the "big PIC" model now. And we always generate PIC
545 code when in 64bit mode. */
546 if (flag_pic
== 1 || TARGET_64BIT
)
549 /* Disable -freorder-blocks-and-partition as we don't support hot and
550 cold partitioning. */
551 if (flag_reorder_blocks_and_partition
)
553 inform (input_location
,
554 "%<-freorder-blocks-and-partition%> does not work "
555 "on this architecture");
556 flag_reorder_blocks_and_partition
= 0;
557 flag_reorder_blocks
= 1;
560 /* We can't guarantee that .dword is available for 32-bit targets. */
561 if (UNITS_PER_WORD
== 4)
562 targetm
.asm_out
.aligned_op
.di
= NULL
;
564 /* The unaligned ops are only available when using GAS. */
567 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
568 targetm
.asm_out
.unaligned_op
.si
= NULL
;
569 targetm
.asm_out
.unaligned_op
.di
= NULL
;
572 init_machine_status
= pa_init_machine_status
;
577 PA_BUILTIN_COPYSIGNQ
,
580 PA_BUILTIN_HUGE_VALQ
,
584 static GTY(()) tree pa_builtins
[(int) PA_BUILTIN_max
];
587 pa_init_builtins (void)
589 #ifdef DONT_HAVE_FPUTC_UNLOCKED
591 tree decl
= builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED
);
592 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED
, decl
,
593 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED
));
600 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITE
)) != NULL_TREE
)
601 set_user_assembler_name (decl
, "_Isfinite");
602 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEF
)) != NULL_TREE
)
603 set_user_assembler_name (decl
, "_Isfinitef");
607 if (HPUX_LONG_DOUBLE_LIBRARY
)
611 /* Under HPUX, the __float128 type is a synonym for "long double". */
612 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
615 /* TFmode support builtins. */
616 ftype
= build_function_type_list (long_double_type_node
,
617 long_double_type_node
,
619 decl
= add_builtin_function ("__builtin_fabsq", ftype
,
620 PA_BUILTIN_FABSQ
, BUILT_IN_MD
,
621 "_U_Qfabs", NULL_TREE
);
622 TREE_READONLY (decl
) = 1;
623 pa_builtins
[PA_BUILTIN_FABSQ
] = decl
;
625 ftype
= build_function_type_list (long_double_type_node
,
626 long_double_type_node
,
627 long_double_type_node
,
629 decl
= add_builtin_function ("__builtin_copysignq", ftype
,
630 PA_BUILTIN_COPYSIGNQ
, BUILT_IN_MD
,
631 "_U_Qfcopysign", NULL_TREE
);
632 TREE_READONLY (decl
) = 1;
633 pa_builtins
[PA_BUILTIN_COPYSIGNQ
] = decl
;
635 ftype
= build_function_type_list (long_double_type_node
, NULL_TREE
);
636 decl
= add_builtin_function ("__builtin_infq", ftype
,
637 PA_BUILTIN_INFQ
, BUILT_IN_MD
,
639 pa_builtins
[PA_BUILTIN_INFQ
] = decl
;
641 decl
= add_builtin_function ("__builtin_huge_valq", ftype
,
642 PA_BUILTIN_HUGE_VALQ
, BUILT_IN_MD
,
644 pa_builtins
[PA_BUILTIN_HUGE_VALQ
] = decl
;
649 pa_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
650 machine_mode mode ATTRIBUTE_UNUSED
,
651 int ignore ATTRIBUTE_UNUSED
)
653 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
654 unsigned int fcode
= DECL_MD_FUNCTION_CODE (fndecl
);
658 case PA_BUILTIN_FABSQ
:
659 case PA_BUILTIN_COPYSIGNQ
:
660 return expand_call (exp
, target
, ignore
);
662 case PA_BUILTIN_INFQ
:
663 case PA_BUILTIN_HUGE_VALQ
:
665 machine_mode target_mode
= TYPE_MODE (TREE_TYPE (exp
));
670 tmp
= const_double_from_real_value (inf
, target_mode
);
672 tmp
= validize_mem (force_const_mem (target_mode
, tmp
));
675 target
= gen_reg_rtx (target_mode
);
677 emit_move_insn (target
, tmp
);
688 /* Function to init struct machine_function.
689 This will be called, via a pointer variable,
690 from push_function_context. */
692 static struct machine_function
*
693 pa_init_machine_status (void)
695 return ggc_cleared_alloc
<machine_function
> ();
698 /* If FROM is a probable pointer register, mark TO as a probable
699 pointer register with the same pointer alignment as FROM. */
702 copy_reg_pointer (rtx to
, rtx from
)
704 if (REG_POINTER (from
))
705 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
708 /* Return 1 if X contains a symbolic expression. We know these
709 expressions will have one of a few well defined forms, so
710 we need only check those forms. */
712 pa_symbolic_expression_p (rtx x
)
715 /* Strip off any HIGH. */
716 if (GET_CODE (x
) == HIGH
)
719 return symbolic_operand (x
, VOIDmode
);
722 /* Accept any constant that can be moved in one instruction into a
725 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival
)
727 /* OK if ldo, ldil, or zdepi, can be used. */
728 return (VAL_14_BITS_P (ival
)
729 || pa_ldil_cint_p (ival
)
730 || pa_zdepi_cint_p (ival
));
733 /* True iff ldil can be used to load this CONST_INT. The least
734 significant 11 bits of the value must be zero and the value must
735 not change sign when extended from 32 to 64 bits. */
737 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival
)
739 unsigned HOST_WIDE_INT x
;
741 x
= ival
& (((unsigned HOST_WIDE_INT
) -1 << 31) | 0x7ff);
742 return x
== 0 || x
== ((unsigned HOST_WIDE_INT
) -1 << 31);
745 /* True iff zdepi can be used to generate this CONST_INT.
746 zdepi first sign extends a 5-bit signed number to a given field
747 length, then places this field anywhere in a zero. */
749 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x
)
751 unsigned HOST_WIDE_INT lsb_mask
, t
;
753 /* This might not be obvious, but it's at least fast.
754 This function is critical; we don't have the time loops would take. */
756 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
757 /* Return true iff t is a power of two. */
758 return ((t
& (t
- 1)) == 0);
761 /* True iff depi or extru can be used to compute (reg & mask).
762 Accept bit pattern like these:
767 pa_and_mask_p (unsigned HOST_WIDE_INT mask
)
770 mask
+= mask
& -mask
;
771 return (mask
& (mask
- 1)) == 0;
774 /* True iff depi can be used to compute (reg | MASK). */
776 pa_ior_mask_p (unsigned HOST_WIDE_INT mask
)
778 mask
+= mask
& -mask
;
779 return (mask
& (mask
- 1)) == 0;
782 /* Legitimize PIC addresses. If the address is already
783 position-independent, we return ORIG. Newly generated
784 position-independent addresses go to REG. If we need more
785 than one register, we lose. */
788 legitimize_pic_address (rtx orig
, machine_mode mode
, rtx reg
)
792 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
794 /* Labels need special handling. */
795 if (pic_label_operand (orig
, mode
))
799 /* We do not want to go through the movXX expanders here since that
800 would create recursion.
802 Nor do we really want to call a generator for a named pattern
803 since that requires multiple patterns if we want to support
806 So instead we just emit the raw set, which avoids the movXX
807 expanders completely. */
808 mark_reg_pointer (reg
, BITS_PER_UNIT
);
809 insn
= emit_insn (gen_rtx_SET (reg
, orig
));
811 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
812 add_reg_note (insn
, REG_EQUAL
, orig
);
814 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
815 and update LABEL_NUSES because this is not done automatically. */
816 if (reload_in_progress
|| reload_completed
)
818 /* Extract LABEL_REF. */
819 if (GET_CODE (orig
) == CONST
)
820 orig
= XEXP (XEXP (orig
, 0), 0);
821 /* Extract CODE_LABEL. */
822 orig
= XEXP (orig
, 0);
823 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
824 /* Make sure we have label and not a note. */
826 LABEL_NUSES (orig
)++;
828 crtl
->uses_pic_offset_table
= 1;
831 if (GET_CODE (orig
) == SYMBOL_REF
)
838 /* Before reload, allocate a temporary register for the intermediate
839 result. This allows the sequence to be deleted when the final
840 result is unused and the insns are trivially dead. */
841 tmp_reg
= ((reload_in_progress
|| reload_completed
)
842 ? reg
: gen_reg_rtx (Pmode
));
844 if (function_label_operand (orig
, VOIDmode
))
846 /* Force function label into memory in word mode. */
847 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
848 /* Load plabel address from DLT. */
849 emit_move_insn (tmp_reg
,
850 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
851 gen_rtx_HIGH (word_mode
, orig
)));
853 = gen_const_mem (Pmode
,
854 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
855 gen_rtx_UNSPEC (Pmode
,
858 emit_move_insn (reg
, pic_ref
);
859 /* Now load address of function descriptor. */
860 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
864 /* Load symbol reference from DLT. */
865 emit_move_insn (tmp_reg
,
866 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
867 gen_rtx_HIGH (word_mode
, orig
)));
869 = gen_const_mem (Pmode
,
870 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
871 gen_rtx_UNSPEC (Pmode
,
876 crtl
->uses_pic_offset_table
= 1;
877 mark_reg_pointer (reg
, BITS_PER_UNIT
);
878 insn
= emit_move_insn (reg
, pic_ref
);
880 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
881 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
885 else if (GET_CODE (orig
) == CONST
)
889 if (GET_CODE (XEXP (orig
, 0)) == PLUS
890 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
894 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
896 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
897 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
898 base
== reg
? 0 : reg
);
900 if (GET_CODE (orig
) == CONST_INT
)
902 if (INT_14_BITS (orig
))
903 return plus_constant (Pmode
, base
, INTVAL (orig
));
904 orig
= force_reg (Pmode
, orig
);
906 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
907 /* Likewise, should we set special REG_NOTEs here? */
913 static GTY(()) rtx gen_tls_tga
;
916 gen_tls_get_addr (void)
919 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
924 hppa_tls_call (rtx arg
)
928 ret
= gen_reg_rtx (Pmode
);
929 emit_library_call_value (gen_tls_get_addr (), ret
,
930 LCT_CONST
, Pmode
, arg
, Pmode
);
936 legitimize_tls_address (rtx addr
)
938 rtx ret
, tmp
, t1
, t2
, tp
;
941 /* Currently, we can't handle anything but a SYMBOL_REF. */
942 if (GET_CODE (addr
) != SYMBOL_REF
)
945 switch (SYMBOL_REF_TLS_MODEL (addr
))
947 case TLS_MODEL_GLOBAL_DYNAMIC
:
948 tmp
= gen_reg_rtx (Pmode
);
950 emit_insn (gen_tgd_load_pic (tmp
, addr
));
952 emit_insn (gen_tgd_load (tmp
, addr
));
953 ret
= hppa_tls_call (tmp
);
956 case TLS_MODEL_LOCAL_DYNAMIC
:
957 ret
= gen_reg_rtx (Pmode
);
958 tmp
= gen_reg_rtx (Pmode
);
961 emit_insn (gen_tld_load_pic (tmp
, addr
));
963 emit_insn (gen_tld_load (tmp
, addr
));
964 t1
= hppa_tls_call (tmp
);
967 t2
= gen_reg_rtx (Pmode
);
968 emit_libcall_block (insn
, t2
, t1
,
969 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
971 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
974 case TLS_MODEL_INITIAL_EXEC
:
975 tp
= gen_reg_rtx (Pmode
);
976 tmp
= gen_reg_rtx (Pmode
);
977 ret
= gen_reg_rtx (Pmode
);
978 emit_insn (gen_tp_load (tp
));
980 emit_insn (gen_tie_load_pic (tmp
, addr
));
982 emit_insn (gen_tie_load (tmp
, addr
));
983 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
986 case TLS_MODEL_LOCAL_EXEC
:
987 tp
= gen_reg_rtx (Pmode
);
988 ret
= gen_reg_rtx (Pmode
);
989 emit_insn (gen_tp_load (tp
));
990 emit_insn (gen_tle_load (ret
, addr
, tp
));
1000 /* Helper for hppa_legitimize_address. Given X, return true if it
1001 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
1003 This respectively represent canonical shift-add rtxs or scaled
1004 memory addresses. */
1006 mem_shadd_or_shadd_rtx_p (rtx x
)
1008 return ((GET_CODE (x
) == ASHIFT
1009 || GET_CODE (x
) == MULT
)
1010 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1011 && ((GET_CODE (x
) == ASHIFT
1012 && pa_shadd_constant_p (INTVAL (XEXP (x
, 1))))
1013 || (GET_CODE (x
) == MULT
1014 && pa_mem_shadd_constant_p (INTVAL (XEXP (x
, 1))))));
1017 /* Try machine-dependent ways of modifying an illegitimate address
1018 to be legitimate. If we find one, return the new, valid address.
1019 This macro is used in only one place: `memory_address' in explow.c.
1021 OLDX is the address as it was before break_out_memory_refs was called.
1022 In some cases it is useful to look at this to decide what needs to be done.
1024 It is always safe for this macro to do nothing. It exists to recognize
1025 opportunities to optimize the output.
1027 For the PA, transform:
1029 memory(X + <large int>)
1033 if (<large int> & mask) >= 16
1034 Y = (<large int> & ~mask) + mask + 1 Round up.
1036 Y = (<large int> & ~mask) Round down.
1038 memory (Z + (<large int> - Y));
1040 This is for CSE to find several similar references, and only use one Z.
1042 X can either be a SYMBOL_REF or REG, but because combine cannot
1043 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1044 D will not fit in 14 bits.
1046 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1049 MODE_INT references allow displacements which fit in 14 bits, so use
1052 This relies on the fact that most mode MODE_FLOAT references will use FP
1053 registers and most mode MODE_INT references will use integer registers.
1054 (In the rare case of an FP register used in an integer MODE, we depend
1055 on secondary reloads to clean things up.)
1058 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1059 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1060 addressing modes to be used).
1062 Note that the addresses passed into hppa_legitimize_address always
1063 come from a MEM, so we only have to match the MULT form on incoming
1064 addresses. But to be future proof we also match the ASHIFT form.
1066 However, this routine always places those shift-add sequences into
1067 registers, so we have to generate the ASHIFT form as our output.
1069 Put X and Z into registers. Then put the entire expression into
1073 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1078 /* We need to canonicalize the order of operands in unscaled indexed
1079 addresses since the code that checks if an address is valid doesn't
1080 always try both orders. */
1081 if (!TARGET_NO_SPACE_REGS
1082 && GET_CODE (x
) == PLUS
1083 && GET_MODE (x
) == Pmode
1084 && REG_P (XEXP (x
, 0))
1085 && REG_P (XEXP (x
, 1))
1086 && REG_POINTER (XEXP (x
, 0))
1087 && !REG_POINTER (XEXP (x
, 1)))
1088 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
1090 if (tls_referenced_p (x
))
1091 return legitimize_tls_address (x
);
1093 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
1095 /* Strip off CONST. */
1096 if (GET_CODE (x
) == CONST
)
1099 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1100 That should always be safe. */
1101 if (GET_CODE (x
) == PLUS
1102 && GET_CODE (XEXP (x
, 0)) == REG
1103 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
1105 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
1106 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
1109 /* Note we must reject symbols which represent function addresses
1110 since the assembler/linker can't handle arithmetic on plabels. */
1111 if (GET_CODE (x
) == PLUS
1112 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1113 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
1114 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
1115 || GET_CODE (XEXP (x
, 0)) == REG
))
1117 rtx int_part
, ptr_reg
;
1119 int offset
= INTVAL (XEXP (x
, 1));
1122 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
1123 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
1125 /* Choose which way to round the offset. Round up if we
1126 are >= halfway to the next boundary. */
1127 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1128 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1130 newoffset
= (offset
& ~ mask
);
1132 /* If the newoffset will not fit in 14 bits (ldo), then
1133 handling this would take 4 or 5 instructions (2 to load
1134 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1135 add the new offset and the SYMBOL_REF.) Combine cannot
1136 handle 4->2 or 5->2 combinations, so do not create
1138 if (! VAL_14_BITS_P (newoffset
)
1139 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1141 rtx const_part
= plus_constant (Pmode
, XEXP (x
, 0), newoffset
);
1144 gen_rtx_HIGH (Pmode
, const_part
));
1147 gen_rtx_LO_SUM (Pmode
,
1148 tmp_reg
, const_part
));
1152 if (! VAL_14_BITS_P (newoffset
))
1153 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1155 int_part
= GEN_INT (newoffset
);
1157 ptr_reg
= force_reg (Pmode
,
1158 gen_rtx_PLUS (Pmode
,
1159 force_reg (Pmode
, XEXP (x
, 0)),
1162 return plus_constant (Pmode
, ptr_reg
, offset
- newoffset
);
1165 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1167 if (GET_CODE (x
) == PLUS
1168 && mem_shadd_or_shadd_rtx_p (XEXP (x
, 0))
1169 && (OBJECT_P (XEXP (x
, 1))
1170 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1171 && GET_CODE (XEXP (x
, 1)) != CONST
)
1173 /* If we were given a MULT, we must fix the constant
1174 as we're going to create the ASHIFT form. */
1175 int shift_val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1176 if (GET_CODE (XEXP (x
, 0)) == MULT
)
1177 shift_val
= exact_log2 (shift_val
);
1181 if (GET_CODE (reg1
) != REG
)
1182 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1184 reg2
= XEXP (XEXP (x
, 0), 0);
1185 if (GET_CODE (reg2
) != REG
)
1186 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1188 return force_reg (Pmode
,
1189 gen_rtx_PLUS (Pmode
,
1190 gen_rtx_ASHIFT (Pmode
, reg2
,
1191 GEN_INT (shift_val
)),
1195 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1197 Only do so for floating point modes since this is more speculative
1198 and we lose if it's an integer store. */
1199 if (GET_CODE (x
) == PLUS
1200 && GET_CODE (XEXP (x
, 0)) == PLUS
1201 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x
, 0), 0))
1202 && (mode
== SFmode
|| mode
== DFmode
))
1204 int shift_val
= INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1));
1206 /* If we were given a MULT, we must fix the constant
1207 as we're going to create the ASHIFT form. */
1208 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
1209 shift_val
= exact_log2 (shift_val
);
1211 /* Try and figure out what to use as a base register. */
1212 rtx reg1
, reg2
, base
, idx
;
1214 reg1
= XEXP (XEXP (x
, 0), 1);
1219 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1220 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1221 it's a base register below. */
1222 if (GET_CODE (reg1
) != REG
)
1223 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1225 if (GET_CODE (reg2
) != REG
)
1226 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1228 /* Figure out what the base and index are. */
1230 if (GET_CODE (reg1
) == REG
1231 && REG_POINTER (reg1
))
1234 idx
= gen_rtx_PLUS (Pmode
,
1235 gen_rtx_ASHIFT (Pmode
,
1236 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1237 GEN_INT (shift_val
)),
1240 else if (GET_CODE (reg2
) == REG
1241 && REG_POINTER (reg2
))
1250 /* If the index adds a large constant, try to scale the
1251 constant so that it can be loaded with only one insn. */
1252 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1253 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1254 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1255 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1257 /* Divide the CONST_INT by the scale factor, then add it to A. */
1258 int val
= INTVAL (XEXP (idx
, 1));
1259 val
/= (1 << shift_val
);
1261 reg1
= XEXP (XEXP (idx
, 0), 0);
1262 if (GET_CODE (reg1
) != REG
)
1263 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1265 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1267 /* We can now generate a simple scaled indexed address. */
1270 (Pmode
, gen_rtx_PLUS (Pmode
,
1271 gen_rtx_ASHIFT (Pmode
, reg1
,
1272 GEN_INT (shift_val
)),
1276 /* If B + C is still a valid base register, then add them. */
1277 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1278 && INTVAL (XEXP (idx
, 1)) <= 4096
1279 && INTVAL (XEXP (idx
, 1)) >= -4096)
1283 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1285 reg2
= XEXP (XEXP (idx
, 0), 0);
1286 if (GET_CODE (reg2
) != CONST_INT
)
1287 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1289 return force_reg (Pmode
,
1290 gen_rtx_PLUS (Pmode
,
1291 gen_rtx_ASHIFT (Pmode
, reg2
,
1292 GEN_INT (shift_val
)),
1296 /* Get the index into a register, then add the base + index and
1297 return a register holding the result. */
1299 /* First get A into a register. */
1300 reg1
= XEXP (XEXP (idx
, 0), 0);
1301 if (GET_CODE (reg1
) != REG
)
1302 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1304 /* And get B into a register. */
1305 reg2
= XEXP (idx
, 1);
1306 if (GET_CODE (reg2
) != REG
)
1307 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1309 reg1
= force_reg (Pmode
,
1310 gen_rtx_PLUS (Pmode
,
1311 gen_rtx_ASHIFT (Pmode
, reg1
,
1312 GEN_INT (shift_val
)),
1315 /* Add the result to our base register and return. */
1316 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1320 /* Uh-oh. We might have an address for x[n-100000]. This needs
1321 special handling to avoid creating an indexed memory address
1322 with x-100000 as the base.
1324 If the constant part is small enough, then it's still safe because
1325 there is a guard page at the beginning and end of the data segment.
1327 Scaled references are common enough that we want to try and rearrange the
1328 terms so that we can use indexing for these addresses too. Only
1329 do the optimization for floatint point modes. */
1331 if (GET_CODE (x
) == PLUS
1332 && pa_symbolic_expression_p (XEXP (x
, 1)))
1334 /* Ugly. We modify things here so that the address offset specified
1335 by the index expression is computed first, then added to x to form
1336 the entire address. */
1338 rtx regx1
, regx2
, regy1
, regy2
, y
;
1340 /* Strip off any CONST. */
1342 if (GET_CODE (y
) == CONST
)
1345 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1347 /* See if this looks like
1348 (plus (mult (reg) (mem_shadd_const))
1349 (const (plus (symbol_ref) (const_int))))
1351 Where const_int is small. In that case the const
1352 expression is a valid pointer for indexing.
1354 If const_int is big, but can be divided evenly by shadd_const
1355 and added to (reg). This allows more scaled indexed addresses. */
1356 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1357 && mem_shadd_or_shadd_rtx_p (XEXP (x
, 0))
1358 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1359 && INTVAL (XEXP (y
, 1)) >= -4096
1360 && INTVAL (XEXP (y
, 1)) <= 4095)
1362 int shift_val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1364 /* If we were given a MULT, we must fix the constant
1365 as we're going to create the ASHIFT form. */
1366 if (GET_CODE (XEXP (x
, 0)) == MULT
)
1367 shift_val
= exact_log2 (shift_val
);
1372 if (GET_CODE (reg1
) != REG
)
1373 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1375 reg2
= XEXP (XEXP (x
, 0), 0);
1376 if (GET_CODE (reg2
) != REG
)
1377 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1381 gen_rtx_PLUS (Pmode
,
1382 gen_rtx_ASHIFT (Pmode
,
1384 GEN_INT (shift_val
)),
1387 else if ((mode
== DFmode
|| mode
== SFmode
)
1388 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1389 && mem_shadd_or_shadd_rtx_p (XEXP (x
, 0))
1390 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1391 && INTVAL (XEXP (y
, 1)) % (1 << INTVAL (XEXP (XEXP (x
, 0), 1))) == 0)
1393 int shift_val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1395 /* If we were given a MULT, we must fix the constant
1396 as we're going to create the ASHIFT form. */
1397 if (GET_CODE (XEXP (x
, 0)) == MULT
)
1398 shift_val
= exact_log2 (shift_val
);
1401 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1402 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1403 regx2
= XEXP (XEXP (x
, 0), 0);
1404 if (GET_CODE (regx2
) != REG
)
1405 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1406 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1410 gen_rtx_PLUS (Pmode
,
1411 gen_rtx_ASHIFT (Pmode
, regx2
,
1412 GEN_INT (shift_val
)),
1413 force_reg (Pmode
, XEXP (y
, 0))));
1415 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1416 && INTVAL (XEXP (y
, 1)) >= -4096
1417 && INTVAL (XEXP (y
, 1)) <= 4095)
1419 /* This is safe because of the guard page at the
1420 beginning and end of the data space. Just
1421 return the original address. */
1426 /* Doesn't look like one we can optimize. */
1427 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1428 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1429 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1430 regx1
= force_reg (Pmode
,
1431 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1433 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1441 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1443 Compute extra cost of moving data between one register class
1446 Make moves from SAR so expensive they should never happen. We used to
1447 have 0xffff here, but that generates overflow in rare cases.
1449 Copies involving a FP register and a non-FP register are relatively
1450 expensive because they must go through memory.
1452 Other copies are reasonably cheap. */
1455 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
1456 reg_class_t from
, reg_class_t to
)
1458 if (from
== SHIFT_REGS
)
1460 else if (to
== SHIFT_REGS
&& FP_REG_CLASS_P (from
))
1462 else if ((FP_REG_CLASS_P (from
) && ! FP_REG_CLASS_P (to
))
1463 || (FP_REG_CLASS_P (to
) && ! FP_REG_CLASS_P (from
)))
1469 /* For the HPPA, REG and REG+CONST is cost 0
1470 and addresses involving symbolic constants are cost 2.
1472 PIC addresses are very expensive.
1474 It is no coincidence that this has the same structure
1475 as pa_legitimate_address_p. */
1478 hppa_address_cost (rtx X
, machine_mode mode ATTRIBUTE_UNUSED
,
1479 addr_space_t as ATTRIBUTE_UNUSED
,
1480 bool speed ATTRIBUTE_UNUSED
)
1482 switch (GET_CODE (X
))
1495 /* Return true if X represents a (possibly non-canonical) shNadd pattern.
1496 The machine mode of X is known to be SImode or DImode. */
1499 hppa_rtx_costs_shadd_p (rtx x
)
1501 if (GET_CODE (x
) != PLUS
1502 || !REG_P (XEXP (x
, 1)))
1504 rtx op0
= XEXP (x
, 0);
1505 if (GET_CODE (op0
) == ASHIFT
1506 && CONST_INT_P (XEXP (op0
, 1))
1507 && REG_P (XEXP (op0
, 0)))
1509 unsigned HOST_WIDE_INT x
= UINTVAL (XEXP (op0
, 1));
1510 return x
== 1 || x
== 2 || x
== 3;
1512 if (GET_CODE (op0
) == MULT
1513 && CONST_INT_P (XEXP (op0
, 1))
1514 && REG_P (XEXP (op0
, 0)))
1516 unsigned HOST_WIDE_INT x
= UINTVAL (XEXP (op0
, 1));
1517 return x
== 2 || x
== 4 || x
== 8;
1522 /* Compute a (partial) cost for rtx X. Return true if the complete
1523 cost has been computed, and false if subexpressions should be
1524 scanned. In either case, *TOTAL contains the cost result. */
1527 hppa_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
1528 int opno ATTRIBUTE_UNUSED
,
1529 int *total
, bool speed
)
1531 int code
= GET_CODE (x
);
1536 if (outer_code
== SET
)
1537 *total
= COSTS_N_INSNS (1);
1538 else if (INTVAL (x
) == 0)
1540 else if (INT_14_BITS (x
))
1557 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1558 && outer_code
!= SET
)
1565 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1567 *total
= COSTS_N_INSNS (3);
1569 else if (mode
== DImode
)
1571 if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1572 *total
= COSTS_N_INSNS (32);
1574 *total
= COSTS_N_INSNS (80);
1578 if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1579 *total
= COSTS_N_INSNS (8);
1581 *total
= COSTS_N_INSNS (20);
1583 return REG_P (XEXP (x
, 0)) && REG_P (XEXP (x
, 1));
1586 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1588 *total
= COSTS_N_INSNS (14);
1596 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1598 *total
= COSTS_N_INSNS (240);
1600 *total
= COSTS_N_INSNS (60);
1601 return REG_P (XEXP (x
, 0)) && REG_P (XEXP (x
, 1));
1603 case PLUS
: /* this includes shNadd insns */
1605 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1606 *total
= COSTS_N_INSNS (3);
1607 else if (mode
== DImode
)
1611 *total
= COSTS_N_INSNS (1);
1612 /* Handle shladd,l instructions. */
1613 if (hppa_rtx_costs_shadd_p (x
))
1617 *total
= COSTS_N_INSNS (2);
1621 *total
= COSTS_N_INSNS (1);
1622 /* Handle shNadd instructions. */
1623 if (hppa_rtx_costs_shadd_p (x
))
1626 return REG_P (XEXP (x
, 0))
1627 && (REG_P (XEXP (x
, 1))
1628 || CONST_INT_P (XEXP (x
, 1)));
1633 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
1636 *total
= COSTS_N_INSNS (1);
1638 *total
= COSTS_N_INSNS (2);
1641 else if (TARGET_64BIT
)
1642 *total
= COSTS_N_INSNS (3);
1644 *total
= COSTS_N_INSNS (13);
1646 *total
= COSTS_N_INSNS (18);
1648 else if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
1651 *total
= COSTS_N_INSNS (2);
1653 *total
= COSTS_N_INSNS (1);
1656 else if (TARGET_64BIT
)
1657 *total
= COSTS_N_INSNS (4);
1659 *total
= COSTS_N_INSNS (2);
1660 return REG_P (XEXP (x
, 0))
1661 && (REG_P (XEXP (x
, 1))
1662 || CONST_INT_P (XEXP (x
, 1)));
1667 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
1670 *total
= COSTS_N_INSNS (1);
1672 *total
= COSTS_N_INSNS (2);
1675 else if (TARGET_64BIT
)
1676 *total
= COSTS_N_INSNS (3);
1678 *total
= COSTS_N_INSNS (14);
1680 *total
= COSTS_N_INSNS (19);
1682 else if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
1685 *total
= COSTS_N_INSNS (2);
1687 *total
= COSTS_N_INSNS (1);
1690 else if (TARGET_64BIT
)
1691 *total
= COSTS_N_INSNS (4);
1693 *total
= COSTS_N_INSNS (2);
1694 return REG_P (XEXP (x
, 0))
1695 && (REG_P (XEXP (x
, 1))
1696 || CONST_INT_P (XEXP (x
, 1)));
1701 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
1704 *total
= COSTS_N_INSNS (1);
1706 *total
= COSTS_N_INSNS (2);
1709 else if (TARGET_64BIT
)
1710 *total
= COSTS_N_INSNS (2);
1712 *total
= COSTS_N_INSNS (12);
1714 *total
= COSTS_N_INSNS (15);
1716 else if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
1718 *total
= COSTS_N_INSNS (1);
1721 else if (TARGET_64BIT
)
1722 *total
= COSTS_N_INSNS (3);
1724 *total
= COSTS_N_INSNS (2);
1725 return REG_P (XEXP (x
, 0))
1726 && (REG_P (XEXP (x
, 1))
1727 || CONST_INT_P (XEXP (x
, 1)));
1734 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1735 new rtx with the correct mode. */
1737 force_mode (machine_mode mode
, rtx orig
)
1739 if (mode
== GET_MODE (orig
))
1742 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1744 return gen_rtx_REG (mode
, REGNO (orig
));
1747 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1750 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1752 return tls_referenced_p (x
);
1755 /* Emit insns to move operands[1] into operands[0].
1757 Return 1 if we have written out everything that needs to be done to
1758 do the move. Otherwise, return 0 and the caller will emit the move
1761 Note SCRATCH_REG may not be in the proper mode depending on how it
1762 will be used. This routine is responsible for creating a new copy
1763 of SCRATCH_REG in the proper mode. */
1766 pa_emit_move_sequence (rtx
*operands
, machine_mode mode
, rtx scratch_reg
)
1768 rtx operand0
= operands
[0];
1769 rtx operand1
= operands
[1];
1772 /* We can only handle indexed addresses in the destination operand
1773 of floating point stores. Thus, we need to break out indexed
1774 addresses from the destination operand. */
1775 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1777 gcc_assert (can_create_pseudo_p ());
1779 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1780 operand0
= replace_equiv_address (operand0
, tem
);
1783 /* On targets with non-equivalent space registers, break out unscaled
1784 indexed addresses from the source operand before the final CSE.
1785 We have to do this because the REG_POINTER flag is not correctly
1786 carried through various optimization passes and CSE may substitute
1787 a pseudo without the pointer set for one with the pointer set. As
1788 a result, we loose various opportunities to create insns with
1789 unscaled indexed addresses. */
1790 if (!TARGET_NO_SPACE_REGS
1791 && !cse_not_expected
1792 && GET_CODE (operand1
) == MEM
1793 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1794 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1795 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1797 = replace_equiv_address (operand1
,
1798 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1801 && reload_in_progress
&& GET_CODE (operand0
) == REG
1802 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1803 operand0
= reg_equiv_mem (REGNO (operand0
));
1804 else if (scratch_reg
1805 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1806 && GET_CODE (SUBREG_REG (operand0
)) == REG
1807 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1809 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1810 the code which tracks sets/uses for delete_output_reload. */
1811 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1812 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
1813 SUBREG_BYTE (operand0
));
1814 operand0
= alter_subreg (&temp
, true);
1818 && reload_in_progress
&& GET_CODE (operand1
) == REG
1819 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1820 operand1
= reg_equiv_mem (REGNO (operand1
));
1821 else if (scratch_reg
1822 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1823 && GET_CODE (SUBREG_REG (operand1
)) == REG
1824 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1826 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1827 the code which tracks sets/uses for delete_output_reload. */
1828 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1829 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
1830 SUBREG_BYTE (operand1
));
1831 operand1
= alter_subreg (&temp
, true);
1834 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1835 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1836 != XEXP (operand0
, 0)))
1837 operand0
= replace_equiv_address (operand0
, tem
);
1839 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1840 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1841 != XEXP (operand1
, 0)))
1842 operand1
= replace_equiv_address (operand1
, tem
);
1844 /* Handle secondary reloads for loads/stores of FP registers from
1845 REG+D addresses where D does not fit in 5 or 14 bits, including
1846 (subreg (mem (addr))) cases, and reloads for other unsupported
1849 && FP_REG_P (operand0
)
1850 && (MEM_P (operand1
)
1851 || (GET_CODE (operand1
) == SUBREG
1852 && MEM_P (XEXP (operand1
, 0)))))
1856 if (GET_CODE (op1
) == SUBREG
)
1857 op1
= XEXP (op1
, 0);
1859 if (reg_plus_base_memory_operand (op1
, GET_MODE (op1
)))
1863 && INT_14_BITS (XEXP (XEXP (op1
, 0), 1)))
1864 && !INT_5_BITS (XEXP (XEXP (op1
, 0), 1)))
1866 /* SCRATCH_REG will hold an address and maybe the actual data.
1867 We want it in WORD_MODE regardless of what mode it was
1868 originally given to us. */
1869 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1871 /* D might not fit in 14 bits either; for such cases load D
1872 into scratch reg. */
1873 if (!INT_14_BITS (XEXP (XEXP (op1
, 0), 1)))
1875 emit_move_insn (scratch_reg
, XEXP (XEXP (op1
, 0), 1));
1876 emit_move_insn (scratch_reg
,
1877 gen_rtx_fmt_ee (GET_CODE (XEXP (op1
, 0)),
1879 XEXP (XEXP (op1
, 0), 0),
1883 emit_move_insn (scratch_reg
, XEXP (op1
, 0));
1884 op1
= replace_equiv_address (op1
, scratch_reg
);
1887 else if ((!INT14_OK_STRICT
&& symbolic_memory_operand (op1
, VOIDmode
))
1888 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1
, 0))
1889 || IS_INDEX_ADDR_P (XEXP (op1
, 0)))
1891 /* Load memory address into SCRATCH_REG. */
1892 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1893 emit_move_insn (scratch_reg
, XEXP (op1
, 0));
1894 op1
= replace_equiv_address (op1
, scratch_reg
);
1896 emit_insn (gen_rtx_SET (operand0
, op1
));
1899 else if (scratch_reg
1900 && FP_REG_P (operand1
)
1901 && (MEM_P (operand0
)
1902 || (GET_CODE (operand0
) == SUBREG
1903 && MEM_P (XEXP (operand0
, 0)))))
1907 if (GET_CODE (op0
) == SUBREG
)
1908 op0
= XEXP (op0
, 0);
1910 if (reg_plus_base_memory_operand (op0
, GET_MODE (op0
)))
1914 && INT_14_BITS (XEXP (XEXP (op0
, 0), 1)))
1915 && !INT_5_BITS (XEXP (XEXP (op0
, 0), 1)))
1917 /* SCRATCH_REG will hold an address and maybe the actual data.
1918 We want it in WORD_MODE regardless of what mode it was
1919 originally given to us. */
1920 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1922 /* D might not fit in 14 bits either; for such cases load D
1923 into scratch reg. */
1924 if (!INT_14_BITS (XEXP (XEXP (op0
, 0), 1)))
1926 emit_move_insn (scratch_reg
, XEXP (XEXP (op0
, 0), 1));
1927 emit_move_insn (scratch_reg
,
1928 gen_rtx_fmt_ee (GET_CODE (XEXP (op0
, 0)),
1930 XEXP (XEXP (op0
, 0), 0),
1934 emit_move_insn (scratch_reg
, XEXP (op0
, 0));
1935 op0
= replace_equiv_address (op0
, scratch_reg
);
1938 else if ((!INT14_OK_STRICT
&& symbolic_memory_operand (op0
, VOIDmode
))
1939 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0
, 0))
1940 || IS_INDEX_ADDR_P (XEXP (op0
, 0)))
1942 /* Load memory address into SCRATCH_REG. */
1943 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1944 emit_move_insn (scratch_reg
, XEXP (op0
, 0));
1945 op0
= replace_equiv_address (op0
, scratch_reg
);
1947 emit_insn (gen_rtx_SET (op0
, operand1
));
1950 /* Handle secondary reloads for loads of FP registers from constant
1951 expressions by forcing the constant into memory. For the most part,
1952 this is only necessary for SImode and DImode.
1954 Use scratch_reg to hold the address of the memory location. */
1955 else if (scratch_reg
1956 && CONSTANT_P (operand1
)
1957 && FP_REG_P (operand0
))
1959 rtx const_mem
, xoperands
[2];
1961 if (operand1
== CONST0_RTX (mode
))
1963 emit_insn (gen_rtx_SET (operand0
, operand1
));
1967 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1968 it in WORD_MODE regardless of what mode it was originally given
1970 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1972 /* Force the constant into memory and put the address of the
1973 memory location into scratch_reg. */
1974 const_mem
= force_const_mem (mode
, operand1
);
1975 xoperands
[0] = scratch_reg
;
1976 xoperands
[1] = XEXP (const_mem
, 0);
1977 pa_emit_move_sequence (xoperands
, Pmode
, 0);
1979 /* Now load the destination register. */
1980 emit_insn (gen_rtx_SET (operand0
,
1981 replace_equiv_address (const_mem
, scratch_reg
)));
1984 /* Handle secondary reloads for SAR. These occur when trying to load
1985 the SAR from memory or a constant. */
1986 else if (scratch_reg
1987 && GET_CODE (operand0
) == REG
1988 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1989 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1990 && (GET_CODE (operand1
) == MEM
|| GET_CODE (operand1
) == CONST_INT
))
1992 /* D might not fit in 14 bits either; for such cases load D into
1994 if (GET_CODE (operand1
) == MEM
1995 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1997 /* We are reloading the address into the scratch register, so we
1998 want to make sure the scratch register is a full register. */
1999 scratch_reg
= force_mode (word_mode
, scratch_reg
);
2001 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
2002 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
2005 XEXP (XEXP (operand1
, 0),
2009 /* Now we are going to load the scratch register from memory,
2010 we want to load it in the same width as the original MEM,
2011 which must be the same as the width of the ultimate destination,
2013 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
2015 emit_move_insn (scratch_reg
,
2016 replace_equiv_address (operand1
, scratch_reg
));
2020 /* We want to load the scratch register using the same mode as
2021 the ultimate destination. */
2022 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
2024 emit_move_insn (scratch_reg
, operand1
);
2027 /* And emit the insn to set the ultimate destination. We know that
2028 the scratch register has the same mode as the destination at this
2030 emit_move_insn (operand0
, scratch_reg
);
2034 /* Handle the most common case: storing into a register. */
2035 if (register_operand (operand0
, mode
))
2037 /* Legitimize TLS symbol references. This happens for references
2038 that aren't a legitimate constant. */
2039 if (PA_SYMBOL_REF_TLS_P (operand1
))
2040 operand1
= legitimize_tls_address (operand1
);
2042 if (register_operand (operand1
, mode
)
2043 || (GET_CODE (operand1
) == CONST_INT
2044 && pa_cint_ok_for_move (UINTVAL (operand1
)))
2045 || (operand1
== CONST0_RTX (mode
))
2046 || (GET_CODE (operand1
) == HIGH
2047 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
2048 /* Only `general_operands' can come here, so MEM is ok. */
2049 || GET_CODE (operand1
) == MEM
)
2051 /* Various sets are created during RTL generation which don't
2052 have the REG_POINTER flag correctly set. After the CSE pass,
2053 instruction recognition can fail if we don't consistently
2054 set this flag when performing register copies. This should
2055 also improve the opportunities for creating insns that use
2056 unscaled indexing. */
2057 if (REG_P (operand0
) && REG_P (operand1
))
2059 if (REG_POINTER (operand1
)
2060 && !REG_POINTER (operand0
)
2061 && !HARD_REGISTER_P (operand0
))
2062 copy_reg_pointer (operand0
, operand1
);
2065 /* When MEMs are broken out, the REG_POINTER flag doesn't
2066 get set. In some cases, we can set the REG_POINTER flag
2067 from the declaration for the MEM. */
2068 if (REG_P (operand0
)
2069 && GET_CODE (operand1
) == MEM
2070 && !REG_POINTER (operand0
))
2072 tree decl
= MEM_EXPR (operand1
);
2074 /* Set the register pointer flag and register alignment
2075 if the declaration for this memory reference is a
2081 /* If this is a COMPONENT_REF, use the FIELD_DECL from
2083 if (TREE_CODE (decl
) == COMPONENT_REF
)
2084 decl
= TREE_OPERAND (decl
, 1);
2086 type
= TREE_TYPE (decl
);
2087 type
= strip_array_types (type
);
2089 if (POINTER_TYPE_P (type
))
2090 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
2094 emit_insn (gen_rtx_SET (operand0
, operand1
));
2098 else if (GET_CODE (operand0
) == MEM
)
2100 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
2101 && !(reload_in_progress
|| reload_completed
))
2103 rtx temp
= gen_reg_rtx (DFmode
);
2105 emit_insn (gen_rtx_SET (temp
, operand1
));
2106 emit_insn (gen_rtx_SET (operand0
, temp
));
2109 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
2111 /* Run this case quickly. */
2112 emit_insn (gen_rtx_SET (operand0
, operand1
));
2115 if (! (reload_in_progress
|| reload_completed
))
2117 operands
[0] = validize_mem (operand0
);
2118 operands
[1] = operand1
= force_reg (mode
, operand1
);
2122 /* Simplify the source if we need to.
2123 Note we do have to handle function labels here, even though we do
2124 not consider them legitimate constants. Loop optimizations can
2125 call the emit_move_xxx with one as a source. */
2126 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
2127 || (GET_CODE (operand1
) == HIGH
2128 && symbolic_operand (XEXP (operand1
, 0), mode
))
2129 || function_label_operand (operand1
, VOIDmode
)
2130 || tls_referenced_p (operand1
))
2134 if (GET_CODE (operand1
) == HIGH
)
2137 operand1
= XEXP (operand1
, 0);
2139 if (symbolic_operand (operand1
, mode
))
2141 /* Argh. The assembler and linker can't handle arithmetic
2144 So we force the plabel into memory, load operand0 from
2145 the memory location, then add in the constant part. */
2146 if ((GET_CODE (operand1
) == CONST
2147 && GET_CODE (XEXP (operand1
, 0)) == PLUS
2148 && function_label_operand (XEXP (XEXP (operand1
, 0), 0),
2150 || function_label_operand (operand1
, VOIDmode
))
2152 rtx temp
, const_part
;
2154 /* Figure out what (if any) scratch register to use. */
2155 if (reload_in_progress
|| reload_completed
)
2157 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
2158 /* SCRATCH_REG will hold an address and maybe the actual
2159 data. We want it in WORD_MODE regardless of what mode it
2160 was originally given to us. */
2161 scratch_reg
= force_mode (word_mode
, scratch_reg
);
2164 scratch_reg
= gen_reg_rtx (Pmode
);
2166 if (GET_CODE (operand1
) == CONST
)
2168 /* Save away the constant part of the expression. */
2169 const_part
= XEXP (XEXP (operand1
, 0), 1);
2170 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
2172 /* Force the function label into memory. */
2173 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
2177 /* No constant part. */
2178 const_part
= NULL_RTX
;
2180 /* Force the function label into memory. */
2181 temp
= force_const_mem (mode
, operand1
);
2185 /* Get the address of the memory location. PIC-ify it if
2187 temp
= XEXP (temp
, 0);
2189 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
2191 /* Put the address of the memory location into our destination
2194 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
2196 /* Now load from the memory location into our destination
2198 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
2199 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
2201 /* And add back in the constant part. */
2202 if (const_part
!= NULL_RTX
)
2203 expand_inc (operand0
, const_part
);
2213 if (reload_in_progress
|| reload_completed
)
2215 temp
= scratch_reg
? scratch_reg
: operand0
;
2216 /* TEMP will hold an address and maybe the actual
2217 data. We want it in WORD_MODE regardless of what mode it
2218 was originally given to us. */
2219 temp
= force_mode (word_mode
, temp
);
2222 temp
= gen_reg_rtx (Pmode
);
2224 /* Force (const (plus (symbol) (const_int))) to memory
2225 if the const_int will not fit in 14 bits. Although
2226 this requires a relocation, the instruction sequence
2227 needed to load the value is shorter. */
2228 if (GET_CODE (operand1
) == CONST
2229 && GET_CODE (XEXP (operand1
, 0)) == PLUS
2230 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
2231 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1)))
2233 rtx x
, m
= force_const_mem (mode
, operand1
);
2235 x
= legitimize_pic_address (XEXP (m
, 0), mode
, temp
);
2236 x
= replace_equiv_address (m
, x
);
2237 insn
= emit_move_insn (operand0
, x
);
2241 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
2242 if (REG_P (operand0
) && REG_P (operands
[1]))
2243 copy_reg_pointer (operand0
, operands
[1]);
2244 insn
= emit_move_insn (operand0
, operands
[1]);
2247 /* Put a REG_EQUAL note on this insn. */
2248 set_unique_reg_note (insn
, REG_EQUAL
, operand1
);
2250 /* On the HPPA, references to data space are supposed to use dp,
2251 register 27, but showing it in the RTL inhibits various cse
2252 and loop optimizations. */
2257 if (reload_in_progress
|| reload_completed
)
2259 temp
= scratch_reg
? scratch_reg
: operand0
;
2260 /* TEMP will hold an address and maybe the actual
2261 data. We want it in WORD_MODE regardless of what mode it
2262 was originally given to us. */
2263 temp
= force_mode (word_mode
, temp
);
2266 temp
= gen_reg_rtx (mode
);
2268 /* Loading a SYMBOL_REF into a register makes that register
2269 safe to be used as the base in an indexed address.
2271 Don't mark hard registers though. That loses. */
2272 if (GET_CODE (operand0
) == REG
2273 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
2274 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
2275 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
2276 mark_reg_pointer (temp
, BITS_PER_UNIT
);
2279 set
= gen_rtx_SET (operand0
, temp
);
2281 set
= gen_rtx_SET (operand0
,
2282 gen_rtx_LO_SUM (mode
, temp
, operand1
));
2284 emit_insn (gen_rtx_SET (temp
, gen_rtx_HIGH (mode
, operand1
)));
2290 else if (tls_referenced_p (operand1
))
2295 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
2297 addend
= XEXP (XEXP (tmp
, 0), 1);
2298 tmp
= XEXP (XEXP (tmp
, 0), 0);
2301 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
2302 tmp
= legitimize_tls_address (tmp
);
2305 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
2306 tmp
= force_operand (tmp
, operands
[0]);
2310 else if (GET_CODE (operand1
) != CONST_INT
2311 || !pa_cint_ok_for_move (UINTVAL (operand1
)))
2316 HOST_WIDE_INT value
= 0;
2317 HOST_WIDE_INT insv
= 0;
2320 if (GET_CODE (operand1
) == CONST_INT
)
2321 value
= INTVAL (operand1
);
2324 && GET_CODE (operand1
) == CONST_INT
2325 && HOST_BITS_PER_WIDE_INT
> 32
2326 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
2330 /* Extract the low order 32 bits of the value and sign extend.
2331 If the new value is the same as the original value, we can
2332 can use the original value as-is. If the new value is
2333 different, we use it and insert the most-significant 32-bits
2334 of the original value into the final result. */
2335 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
2336 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
2339 #if HOST_BITS_PER_WIDE_INT > 32
2340 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
2344 operand1
= GEN_INT (nval
);
2348 if (reload_in_progress
|| reload_completed
)
2349 temp
= scratch_reg
? scratch_reg
: operand0
;
2351 temp
= gen_reg_rtx (mode
);
2353 /* We don't directly split DImode constants on 32-bit targets
2354 because PLUS uses an 11-bit immediate and the insn sequence
2355 generated is not as efficient as the one using HIGH/LO_SUM. */
2356 if (GET_CODE (operand1
) == CONST_INT
2357 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2358 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2361 /* Directly break constant into high and low parts. This
2362 provides better optimization opportunities because various
2363 passes recognize constants split with PLUS but not LO_SUM.
2364 We use a 14-bit signed low part except when the addition
2365 of 0x4000 to the high part might change the sign of the
2367 HOST_WIDE_INT low
= value
& 0x3fff;
2368 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2372 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2380 emit_insn (gen_rtx_SET (temp
, GEN_INT (high
)));
2381 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2385 emit_insn (gen_rtx_SET (temp
, gen_rtx_HIGH (mode
, operand1
)));
2386 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2389 insn
= emit_move_insn (operands
[0], operands
[1]);
2391 /* Now insert the most significant 32 bits of the value
2392 into the register. When we don't have a second register
2393 available, it could take up to nine instructions to load
2394 a 64-bit integer constant. Prior to reload, we force
2395 constants that would take more than three instructions
2396 to load to the constant pool. During and after reload,
2397 we have to handle all possible values. */
2400 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2401 register and the value to be inserted is outside the
2402 range that can be loaded with three depdi instructions. */
2403 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2405 operand1
= GEN_INT (insv
);
2407 emit_insn (gen_rtx_SET (temp
,
2408 gen_rtx_HIGH (mode
, operand1
)));
2409 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2411 insn
= emit_insn (gen_insvdi (operand0
, GEN_INT (32),
2414 insn
= emit_insn (gen_insvsi (operand0
, GEN_INT (32),
2419 int len
= 5, pos
= 27;
2421 /* Insert the bits using the depdi instruction. */
2424 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2425 HOST_WIDE_INT sign
= v5
< 0;
2427 /* Left extend the insertion. */
2428 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2429 while (pos
> 0 && (insv
& 1) == sign
)
2431 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2437 insn
= emit_insn (gen_insvdi (operand0
,
2442 insn
= emit_insn (gen_insvsi (operand0
,
2447 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2453 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2458 /* Now have insn-emit do whatever it normally does. */
2462 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2463 it will need a link/runtime reloc). */
2466 pa_reloc_needed (tree exp
)
2470 switch (TREE_CODE (exp
))
2475 case POINTER_PLUS_EXPR
:
2478 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2479 reloc
|= pa_reloc_needed (TREE_OPERAND (exp
, 1));
2483 case NON_LVALUE_EXPR
:
2484 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2490 unsigned HOST_WIDE_INT ix
;
2492 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2494 reloc
|= pa_reloc_needed (value
);
2508 /* Return the best assembler insn template
2509 for moving operands[1] into operands[0] as a fullword. */
2511 pa_singlemove_string (rtx
*operands
)
2513 HOST_WIDE_INT intval
;
2515 if (GET_CODE (operands
[0]) == MEM
)
2516 return "stw %r1,%0";
2517 if (GET_CODE (operands
[1]) == MEM
)
2519 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2523 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2525 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2527 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands
[1]), i
);
2529 operands
[1] = GEN_INT (i
);
2530 /* Fall through to CONST_INT case. */
2532 if (GET_CODE (operands
[1]) == CONST_INT
)
2534 intval
= INTVAL (operands
[1]);
2536 if (VAL_14_BITS_P (intval
))
2538 else if ((intval
& 0x7ff) == 0)
2539 return "ldil L'%1,%0";
2540 else if (pa_zdepi_cint_p (intval
))
2541 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2543 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2545 return "copy %1,%0";
2549 /* Compute position (in OP[1]) and width (in OP[2])
2550 useful for copying IMM to a register using the zdepi
2551 instructions. Store the immediate value to insert in OP[0]. */
2553 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2557 /* Find the least significant set bit in IMM. */
2558 for (lsb
= 0; lsb
< 32; lsb
++)
2565 /* Choose variants based on *sign* of the 5-bit field. */
2566 if ((imm
& 0x10) == 0)
2567 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2570 /* Find the width of the bitstring in IMM. */
2571 for (len
= 5; len
< 32 - lsb
; len
++)
2573 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2577 /* Sign extend IMM as a 5-bit value. */
2578 imm
= (imm
& 0xf) - 0x10;
2586 /* Compute position (in OP[1]) and width (in OP[2])
2587 useful for copying IMM to a register using the depdi,z
2588 instructions. Store the immediate value to insert in OP[0]. */
2591 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2593 int lsb
, len
, maxlen
;
2595 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2597 /* Find the least significant set bit in IMM. */
2598 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2605 /* Choose variants based on *sign* of the 5-bit field. */
2606 if ((imm
& 0x10) == 0)
2607 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2610 /* Find the width of the bitstring in IMM. */
2611 for (len
= 5; len
< maxlen
- lsb
; len
++)
2613 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2617 /* Extend length if host is narrow and IMM is negative. */
2618 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2621 /* Sign extend IMM as a 5-bit value. */
2622 imm
= (imm
& 0xf) - 0x10;
2630 /* Output assembler code to perform a doubleword move insn
2631 with operands OPERANDS. */
2634 pa_output_move_double (rtx
*operands
)
2636 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2638 rtx addreg0
= 0, addreg1
= 0;
2641 /* First classify both operands. */
2643 if (REG_P (operands
[0]))
2645 else if (offsettable_memref_p (operands
[0]))
2647 else if (GET_CODE (operands
[0]) == MEM
)
2652 if (REG_P (operands
[1]))
2654 else if (CONSTANT_P (operands
[1]))
2656 else if (offsettable_memref_p (operands
[1]))
2658 else if (GET_CODE (operands
[1]) == MEM
)
2663 /* Check for the cases that the operand constraints are not
2664 supposed to allow to happen. */
2665 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2667 /* Handle copies between general and floating registers. */
2669 if (optype0
== REGOP
&& optype1
== REGOP
2670 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2672 if (FP_REG_P (operands
[0]))
2674 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2675 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2676 return "{fldds|fldd} -16(%%sp),%0";
2680 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2681 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2682 return "{ldws|ldw} -12(%%sp),%R0";
2686 /* Handle auto decrementing and incrementing loads and stores
2687 specifically, since the structure of the function doesn't work
2688 for them without major modification. Do it better when we learn
2689 this port about the general inc/dec addressing of PA.
2690 (This was written by tege. Chide him if it doesn't work.) */
2692 if (optype0
== MEMOP
)
2694 /* We have to output the address syntax ourselves, since print_operand
2695 doesn't deal with the addresses we want to use. Fix this later. */
2697 rtx addr
= XEXP (operands
[0], 0);
2698 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2700 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2702 operands
[0] = XEXP (addr
, 0);
2703 gcc_assert (GET_CODE (operands
[1]) == REG
2704 && GET_CODE (operands
[0]) == REG
);
2706 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2708 /* No overlap between high target register and address
2709 register. (We do this in a non-obvious way to
2710 save a register file writeback) */
2711 if (GET_CODE (addr
) == POST_INC
)
2712 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2713 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2715 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2717 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2719 operands
[0] = XEXP (addr
, 0);
2720 gcc_assert (GET_CODE (operands
[1]) == REG
2721 && GET_CODE (operands
[0]) == REG
);
2723 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2724 /* No overlap between high target register and address
2725 register. (We do this in a non-obvious way to save a
2726 register file writeback) */
2727 if (GET_CODE (addr
) == PRE_INC
)
2728 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2729 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2732 if (optype1
== MEMOP
)
2734 /* We have to output the address syntax ourselves, since print_operand
2735 doesn't deal with the addresses we want to use. Fix this later. */
2737 rtx addr
= XEXP (operands
[1], 0);
2738 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2740 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2742 operands
[1] = XEXP (addr
, 0);
2743 gcc_assert (GET_CODE (operands
[0]) == REG
2744 && GET_CODE (operands
[1]) == REG
);
2746 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2748 /* No overlap between high target register and address
2749 register. (We do this in a non-obvious way to
2750 save a register file writeback) */
2751 if (GET_CODE (addr
) == POST_INC
)
2752 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2753 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2757 /* This is an undefined situation. We should load into the
2758 address register *and* update that register. Probably
2759 we don't need to handle this at all. */
2760 if (GET_CODE (addr
) == POST_INC
)
2761 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2762 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2765 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2767 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2769 operands
[1] = XEXP (addr
, 0);
2770 gcc_assert (GET_CODE (operands
[0]) == REG
2771 && GET_CODE (operands
[1]) == REG
);
2773 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2775 /* No overlap between high target register and address
2776 register. (We do this in a non-obvious way to
2777 save a register file writeback) */
2778 if (GET_CODE (addr
) == PRE_INC
)
2779 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2780 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2784 /* This is an undefined situation. We should load into the
2785 address register *and* update that register. Probably
2786 we don't need to handle this at all. */
2787 if (GET_CODE (addr
) == PRE_INC
)
2788 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2789 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2792 else if (GET_CODE (addr
) == PLUS
2793 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2797 /* Load address into left half of destination register. */
2798 xoperands
[0] = gen_rtx_SUBREG (SImode
, operands
[0], 0);
2799 xoperands
[1] = XEXP (addr
, 1);
2800 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2801 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2802 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2804 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2806 else if (GET_CODE (addr
) == PLUS
2807 && REG_P (XEXP (addr
, 0))
2808 && REG_P (XEXP (addr
, 1)))
2812 /* Load address into left half of destination register. */
2813 xoperands
[0] = gen_rtx_SUBREG (SImode
, operands
[0], 0);
2814 xoperands
[1] = XEXP (addr
, 0);
2815 xoperands
[2] = XEXP (addr
, 1);
2816 output_asm_insn ("{addl|add,l} %1,%2,%0",
2818 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2822 /* If an operand is an unoffsettable memory ref, find a register
2823 we can increment temporarily to make it refer to the second word. */
2825 if (optype0
== MEMOP
)
2826 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2828 if (optype1
== MEMOP
)
2829 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2831 /* Ok, we can do one word at a time.
2832 Normally we do the low-numbered word first.
2834 In either case, set up in LATEHALF the operands to use
2835 for the high-numbered word and in some cases alter the
2836 operands in OPERANDS to be suitable for the low-numbered word. */
2838 if (optype0
== REGOP
)
2839 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2840 else if (optype0
== OFFSOP
)
2841 latehalf
[0] = adjust_address_nv (operands
[0], SImode
, 4);
2843 latehalf
[0] = operands
[0];
2845 if (optype1
== REGOP
)
2846 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2847 else if (optype1
== OFFSOP
)
2848 latehalf
[1] = adjust_address_nv (operands
[1], SImode
, 4);
2849 else if (optype1
== CNSTOP
)
2851 if (GET_CODE (operands
[1]) == HIGH
)
2853 operands
[1] = XEXP (operands
[1], 0);
2856 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2859 latehalf
[1] = operands
[1];
2861 /* If the first move would clobber the source of the second one,
2862 do them in the other order.
2864 This can happen in two cases:
2866 mem -> register where the first half of the destination register
2867 is the same register used in the memory's address. Reload
2868 can create such insns.
2870 mem in this case will be either register indirect or register
2871 indirect plus a valid offset.
2873 register -> register move where REGNO(dst) == REGNO(src + 1)
2874 someone (Tim/Tege?) claimed this can happen for parameter loads.
2876 Handle mem -> register case first. */
2877 if (optype0
== REGOP
2878 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2879 && refers_to_regno_p (REGNO (operands
[0]), operands
[1]))
2881 /* Do the late half first. */
2883 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2884 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2888 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2889 return pa_singlemove_string (operands
);
2892 /* Now handle register -> register case. */
2893 if (optype0
== REGOP
&& optype1
== REGOP
2894 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2896 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2897 return pa_singlemove_string (operands
);
2900 /* Normal case: do the two words, low-numbered first. */
2902 output_asm_insn (pa_singlemove_string (operands
), operands
);
2904 /* Make any unoffsettable addresses point at high-numbered word. */
2906 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2908 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2910 /* Do high-numbered word. */
2912 output_asm_insn ("ldil L'%1,%0", latehalf
);
2914 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2916 /* Undo the adds we just did. */
2918 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2920 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2926 pa_output_fp_move_double (rtx
*operands
)
2928 if (FP_REG_P (operands
[0]))
2930 if (FP_REG_P (operands
[1])
2931 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2932 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2934 output_asm_insn ("fldd%F1 %1,%0", operands
);
2936 else if (FP_REG_P (operands
[1]))
2938 output_asm_insn ("fstd%F0 %1,%0", operands
);
2944 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2946 /* This is a pain. You have to be prepared to deal with an
2947 arbitrary address here including pre/post increment/decrement.
2949 so avoid this in the MD. */
2950 gcc_assert (GET_CODE (operands
[0]) == REG
);
2952 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2953 xoperands
[0] = operands
[0];
2954 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2959 /* Return a REG that occurs in ADDR with coefficient 1.
2960 ADDR can be effectively incremented by incrementing REG. */
2963 find_addr_reg (rtx addr
)
2965 while (GET_CODE (addr
) == PLUS
)
2967 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2968 addr
= XEXP (addr
, 0);
2969 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2970 addr
= XEXP (addr
, 1);
2971 else if (CONSTANT_P (XEXP (addr
, 0)))
2972 addr
= XEXP (addr
, 1);
2973 else if (CONSTANT_P (XEXP (addr
, 1)))
2974 addr
= XEXP (addr
, 0);
2978 gcc_assert (GET_CODE (addr
) == REG
);
2982 /* Emit code to perform a block move.
2984 OPERANDS[0] is the destination pointer as a REG, clobbered.
2985 OPERANDS[1] is the source pointer as a REG, clobbered.
2986 OPERANDS[2] is a register for temporary storage.
2987 OPERANDS[3] is a register for temporary storage.
2988 OPERANDS[4] is the size as a CONST_INT
2989 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2990 OPERANDS[6] is another temporary register. */
2993 pa_output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2995 int align
= INTVAL (operands
[5]);
2996 unsigned long n_bytes
= INTVAL (operands
[4]);
2998 /* We can't move more than a word at a time because the PA
2999 has no longer integer move insns. (Could use fp mem ops?) */
3000 if (align
> (TARGET_64BIT
? 8 : 4))
3001 align
= (TARGET_64BIT
? 8 : 4);
3003 /* Note that we know each loop below will execute at least twice
3004 (else we would have open-coded the copy). */
3008 /* Pre-adjust the loop counter. */
3009 operands
[4] = GEN_INT (n_bytes
- 16);
3010 output_asm_insn ("ldi %4,%2", operands
);
3013 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
3014 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
3015 output_asm_insn ("std,ma %3,8(%0)", operands
);
3016 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
3017 output_asm_insn ("std,ma %6,8(%0)", operands
);
3019 /* Handle the residual. There could be up to 7 bytes of
3020 residual to copy! */
3021 if (n_bytes
% 16 != 0)
3023 operands
[4] = GEN_INT (n_bytes
% 8);
3024 if (n_bytes
% 16 >= 8)
3025 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
3026 if (n_bytes
% 8 != 0)
3027 output_asm_insn ("ldd 0(%1),%6", operands
);
3028 if (n_bytes
% 16 >= 8)
3029 output_asm_insn ("std,ma %3,8(%0)", operands
);
3030 if (n_bytes
% 8 != 0)
3031 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
3036 /* Pre-adjust the loop counter. */
3037 operands
[4] = GEN_INT (n_bytes
- 8);
3038 output_asm_insn ("ldi %4,%2", operands
);
3041 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
3042 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
3043 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
3044 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
3045 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
3047 /* Handle the residual. There could be up to 7 bytes of
3048 residual to copy! */
3049 if (n_bytes
% 8 != 0)
3051 operands
[4] = GEN_INT (n_bytes
% 4);
3052 if (n_bytes
% 8 >= 4)
3053 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
3054 if (n_bytes
% 4 != 0)
3055 output_asm_insn ("ldw 0(%1),%6", operands
);
3056 if (n_bytes
% 8 >= 4)
3057 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
3058 if (n_bytes
% 4 != 0)
3059 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
3064 /* Pre-adjust the loop counter. */
3065 operands
[4] = GEN_INT (n_bytes
- 4);
3066 output_asm_insn ("ldi %4,%2", operands
);
3069 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
3070 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
3071 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
3072 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
3073 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
3075 /* Handle the residual. */
3076 if (n_bytes
% 4 != 0)
3078 if (n_bytes
% 4 >= 2)
3079 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
3080 if (n_bytes
% 2 != 0)
3081 output_asm_insn ("ldb 0(%1),%6", operands
);
3082 if (n_bytes
% 4 >= 2)
3083 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
3084 if (n_bytes
% 2 != 0)
3085 output_asm_insn ("stb %6,0(%0)", operands
);
3090 /* Pre-adjust the loop counter. */
3091 operands
[4] = GEN_INT (n_bytes
- 2);
3092 output_asm_insn ("ldi %4,%2", operands
);
3095 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
3096 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
3097 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
3098 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
3099 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
3101 /* Handle the residual. */
3102 if (n_bytes
% 2 != 0)
3104 output_asm_insn ("ldb 0(%1),%3", operands
);
3105 output_asm_insn ("stb %3,0(%0)", operands
);
3114 /* Count the number of insns necessary to handle this block move.
3116 Basic structure is the same as emit_block_move, except that we
3117 count insns rather than emit them. */
3120 compute_cpymem_length (rtx_insn
*insn
)
3122 rtx pat
= PATTERN (insn
);
3123 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
3124 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
3125 unsigned int n_insns
= 0;
3127 /* We can't move more than four bytes at a time because the PA
3128 has no longer integer move insns. (Could use fp mem ops?) */
3129 if (align
> (TARGET_64BIT
? 8 : 4))
3130 align
= (TARGET_64BIT
? 8 : 4);
3132 /* The basic copying loop. */
3136 if (n_bytes
% (2 * align
) != 0)
3138 if ((n_bytes
% (2 * align
)) >= align
)
3141 if ((n_bytes
% align
) != 0)
3145 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3149 /* Emit code to perform a block clear.
3151 OPERANDS[0] is the destination pointer as a REG, clobbered.
3152 OPERANDS[1] is a register for temporary storage.
3153 OPERANDS[2] is the size as a CONST_INT
3154 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3157 pa_output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
3159 int align
= INTVAL (operands
[3]);
3160 unsigned long n_bytes
= INTVAL (operands
[2]);
3162 /* We can't clear more than a word at a time because the PA
3163 has no longer integer move insns. */
3164 if (align
> (TARGET_64BIT
? 8 : 4))
3165 align
= (TARGET_64BIT
? 8 : 4);
3167 /* Note that we know each loop below will execute at least twice
3168 (else we would have open-coded the copy). */
3172 /* Pre-adjust the loop counter. */
3173 operands
[2] = GEN_INT (n_bytes
- 16);
3174 output_asm_insn ("ldi %2,%1", operands
);
3177 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
3178 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
3179 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
3181 /* Handle the residual. There could be up to 7 bytes of
3182 residual to copy! */
3183 if (n_bytes
% 16 != 0)
3185 operands
[2] = GEN_INT (n_bytes
% 8);
3186 if (n_bytes
% 16 >= 8)
3187 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
3188 if (n_bytes
% 8 != 0)
3189 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
3194 /* Pre-adjust the loop counter. */
3195 operands
[2] = GEN_INT (n_bytes
- 8);
3196 output_asm_insn ("ldi %2,%1", operands
);
3199 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
3200 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
3201 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
3203 /* Handle the residual. There could be up to 7 bytes of
3204 residual to copy! */
3205 if (n_bytes
% 8 != 0)
3207 operands
[2] = GEN_INT (n_bytes
% 4);
3208 if (n_bytes
% 8 >= 4)
3209 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
3210 if (n_bytes
% 4 != 0)
3211 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
3216 /* Pre-adjust the loop counter. */
3217 operands
[2] = GEN_INT (n_bytes
- 4);
3218 output_asm_insn ("ldi %2,%1", operands
);
3221 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3222 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
3223 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3225 /* Handle the residual. */
3226 if (n_bytes
% 4 != 0)
3228 if (n_bytes
% 4 >= 2)
3229 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3230 if (n_bytes
% 2 != 0)
3231 output_asm_insn ("stb %%r0,0(%0)", operands
);
3236 /* Pre-adjust the loop counter. */
3237 operands
[2] = GEN_INT (n_bytes
- 2);
3238 output_asm_insn ("ldi %2,%1", operands
);
3241 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3242 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
3243 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3245 /* Handle the residual. */
3246 if (n_bytes
% 2 != 0)
3247 output_asm_insn ("stb %%r0,0(%0)", operands
);
3256 /* Count the number of insns necessary to handle this block move.
3258 Basic structure is the same as emit_block_move, except that we
3259 count insns rather than emit them. */
3262 compute_clrmem_length (rtx_insn
*insn
)
3264 rtx pat
= PATTERN (insn
);
3265 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
3266 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
3267 unsigned int n_insns
= 0;
3269 /* We can't clear more than a word at a time because the PA
3270 has no longer integer move insns. */
3271 if (align
> (TARGET_64BIT
? 8 : 4))
3272 align
= (TARGET_64BIT
? 8 : 4);
3274 /* The basic loop. */
3278 if (n_bytes
% (2 * align
) != 0)
3280 if ((n_bytes
% (2 * align
)) >= align
)
3283 if ((n_bytes
% align
) != 0)
3287 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3293 pa_output_and (rtx
*operands
)
3295 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3297 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3298 int ls0
, ls1
, ms0
, p
, len
;
3300 for (ls0
= 0; ls0
< 32; ls0
++)
3301 if ((mask
& (1 << ls0
)) == 0)
3304 for (ls1
= ls0
; ls1
< 32; ls1
++)
3305 if ((mask
& (1 << ls1
)) != 0)
3308 for (ms0
= ls1
; ms0
< 32; ms0
++)
3309 if ((mask
& (1 << ms0
)) == 0)
3312 gcc_assert (ms0
== 32);
3320 operands
[2] = GEN_INT (len
);
3321 return "{extru|extrw,u} %1,31,%2,%0";
3325 /* We could use this `depi' for the case above as well, but `depi'
3326 requires one more register file access than an `extru'. */
3331 operands
[2] = GEN_INT (p
);
3332 operands
[3] = GEN_INT (len
);
3333 return "{depi|depwi} 0,%2,%3,%0";
3337 return "and %1,%2,%0";
3340 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3341 storing the result in operands[0]. */
3343 pa_output_64bit_and (rtx
*operands
)
3345 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3347 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3348 int ls0
, ls1
, ms0
, p
, len
;
3350 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3351 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3354 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3355 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3358 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3359 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3362 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3364 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3370 operands
[2] = GEN_INT (len
);
3371 return "extrd,u %1,63,%2,%0";
3375 /* We could use this `depi' for the case above as well, but `depi'
3376 requires one more register file access than an `extru'. */
3381 operands
[2] = GEN_INT (p
);
3382 operands
[3] = GEN_INT (len
);
3383 return "depdi 0,%2,%3,%0";
3387 return "and %1,%2,%0";
3391 pa_output_ior (rtx
*operands
)
3393 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3394 int bs0
, bs1
, p
, len
;
3396 if (INTVAL (operands
[2]) == 0)
3397 return "copy %1,%0";
3399 for (bs0
= 0; bs0
< 32; bs0
++)
3400 if ((mask
& (1 << bs0
)) != 0)
3403 for (bs1
= bs0
; bs1
< 32; bs1
++)
3404 if ((mask
& (1 << bs1
)) == 0)
3407 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3412 operands
[2] = GEN_INT (p
);
3413 operands
[3] = GEN_INT (len
);
3414 return "{depi|depwi} -1,%2,%3,%0";
3417 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3418 storing the result in operands[0]. */
3420 pa_output_64bit_ior (rtx
*operands
)
3422 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3423 int bs0
, bs1
, p
, len
;
3425 if (INTVAL (operands
[2]) == 0)
3426 return "copy %1,%0";
3428 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3429 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3432 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3433 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3436 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3437 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3442 operands
[2] = GEN_INT (p
);
3443 operands
[3] = GEN_INT (len
);
3444 return "depdi -1,%2,%3,%0";
3447 /* Target hook for assembling integer objects. This code handles
3448 aligned SI and DI integers specially since function references
3449 must be preceded by P%. */
3452 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3457 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3458 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3459 calling output_addr_const. Otherwise, it may call assemble_external
3460 in the midst of outputing the assembler code for the SYMBOL_REF.
3461 We restore the SYMBOL_REF_DECL after the output is done. */
3462 if (GET_CODE (x
) == SYMBOL_REF
)
3464 decl
= SYMBOL_REF_DECL (x
);
3467 assemble_external (decl
);
3468 SET_SYMBOL_REF_DECL (x
, NULL
);
3472 if (size
== UNITS_PER_WORD
3474 && function_label_operand (x
, VOIDmode
))
3476 fputs (size
== 8? "\t.dword\t" : "\t.word\t", asm_out_file
);
3478 /* We don't want an OPD when generating fast indirect calls. */
3479 if (!TARGET_FAST_INDIRECT_CALLS
)
3480 fputs ("P%", asm_out_file
);
3482 output_addr_const (asm_out_file
, x
);
3483 fputc ('\n', asm_out_file
);
3487 result
= default_assemble_integer (x
, size
, aligned_p
);
3490 SET_SYMBOL_REF_DECL (x
, decl
);
3495 /* Output an ascii string. */
3497 pa_output_ascii (FILE *file
, const char *p
, int size
)
3501 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3503 /* The HP assembler can only take strings of 256 characters at one
3504 time. This is a limitation on input line length, *not* the
3505 length of the string. Sigh. Even worse, it seems that the
3506 restriction is in number of input characters (see \xnn &
3507 \whatever). So we have to do this very carefully. */
3509 fputs ("\t.STRING \"", file
);
3512 for (i
= 0; i
< size
; i
+= 4)
3516 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3518 unsigned int c
= (unsigned char) p
[i
+ io
];
3520 if (c
== '\"' || c
== '\\')
3521 partial_output
[co
++] = '\\';
3522 if (c
>= ' ' && c
< 0177)
3523 partial_output
[co
++] = c
;
3527 partial_output
[co
++] = '\\';
3528 partial_output
[co
++] = 'x';
3529 hexd
= c
/ 16 - 0 + '0';
3531 hexd
-= '9' - 'a' + 1;
3532 partial_output
[co
++] = hexd
;
3533 hexd
= c
% 16 - 0 + '0';
3535 hexd
-= '9' - 'a' + 1;
3536 partial_output
[co
++] = hexd
;
3539 if (chars_output
+ co
> 243)
3541 fputs ("\"\n\t.STRING \"", file
);
3544 fwrite (partial_output
, 1, (size_t) co
, file
);
3548 fputs ("\"\n", file
);
3551 /* Try to rewrite floating point comparisons & branches to avoid
3552 useless add,tr insns.
3554 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3555 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3556 first attempt to remove useless add,tr insns. It is zero
3557 for the second pass as reorg sometimes leaves bogus REG_DEAD
3560 When CHECK_NOTES is zero we can only eliminate add,tr insns
3561 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3564 remove_useless_addtr_insns (int check_notes
)
3567 static int pass
= 0;
3569 /* This is fairly cheap, so always run it when optimizing. */
3573 int fbranch_count
= 0;
3575 /* Walk all the insns in this function looking for fcmp & fbranch
3576 instructions. Keep track of how many of each we find. */
3577 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3581 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3582 if (! NONJUMP_INSN_P (insn
) && ! JUMP_P (insn
))
3585 tmp
= PATTERN (insn
);
3587 /* It must be a set. */
3588 if (GET_CODE (tmp
) != SET
)
3591 /* If the destination is CCFP, then we've found an fcmp insn. */
3592 tmp
= SET_DEST (tmp
);
3593 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3599 tmp
= PATTERN (insn
);
3600 /* If this is an fbranch instruction, bump the fbranch counter. */
3601 if (GET_CODE (tmp
) == SET
3602 && SET_DEST (tmp
) == pc_rtx
3603 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3604 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3605 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3606 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3614 /* Find all floating point compare + branch insns. If possible,
3615 reverse the comparison & the branch to avoid add,tr insns. */
3616 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3621 /* Ignore anything that isn't an INSN. */
3622 if (! NONJUMP_INSN_P (insn
))
3625 tmp
= PATTERN (insn
);
3627 /* It must be a set. */
3628 if (GET_CODE (tmp
) != SET
)
3631 /* The destination must be CCFP, which is register zero. */
3632 tmp
= SET_DEST (tmp
);
3633 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3636 /* INSN should be a set of CCFP.
3638 See if the result of this insn is used in a reversed FP
3639 conditional branch. If so, reverse our condition and
3640 the branch. Doing so avoids useless add,tr insns. */
3641 next
= next_insn (insn
);
3644 /* Jumps, calls and labels stop our search. */
3645 if (JUMP_P (next
) || CALL_P (next
) || LABEL_P (next
))
3648 /* As does another fcmp insn. */
3649 if (NONJUMP_INSN_P (next
)
3650 && GET_CODE (PATTERN (next
)) == SET
3651 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3652 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3655 next
= next_insn (next
);
3658 /* Is NEXT_INSN a branch? */
3659 if (next
&& JUMP_P (next
))
3661 rtx pattern
= PATTERN (next
);
3663 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3664 and CCFP dies, then reverse our conditional and the branch
3665 to avoid the add,tr. */
3666 if (GET_CODE (pattern
) == SET
3667 && SET_DEST (pattern
) == pc_rtx
3668 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3669 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3670 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3671 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3672 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3673 && (fcmp_count
== fbranch_count
3675 && find_regno_note (next
, REG_DEAD
, 0))))
3677 /* Reverse the branch. */
3678 tmp
= XEXP (SET_SRC (pattern
), 1);
3679 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3680 XEXP (SET_SRC (pattern
), 2) = tmp
;
3681 INSN_CODE (next
) = -1;
3683 /* Reverse our condition. */
3684 tmp
= PATTERN (insn
);
3685 PUT_CODE (XEXP (tmp
, 1),
3686 (reverse_condition_maybe_unordered
3687 (GET_CODE (XEXP (tmp
, 1)))));
3697 /* You may have trouble believing this, but this is the 32 bit HP-PA
3702 Variable arguments (optional; any number may be allocated)
3704 SP-(4*(N+9)) arg word N
3709 Fixed arguments (must be allocated; may remain unused)
3718 SP-32 External Data Pointer (DP)
3720 SP-24 External/stub RP (RP')
3724 SP-8 Calling Stub RP (RP'')
3729 SP-0 Stack Pointer (points to next available address)
3733 /* This function saves registers as follows. Registers marked with ' are
3734 this function's registers (as opposed to the previous function's).
3735 If a frame_pointer isn't needed, r4 is saved as a general register;
3736 the space for the frame pointer is still allocated, though, to keep
3742 SP (FP') Previous FP
3743 SP + 4 Alignment filler (sigh)
3744 SP + 8 Space for locals reserved here.
3748 SP + n All call saved register used.
3752 SP + o All call saved fp registers used.
3756 SP + p (SP') points to next available address.
3760 /* Global variables set by output_function_prologue(). */
3761 /* Size of frame. Need to know this to emit return insns from
3763 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3764 static int save_fregs
;
3766 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3767 Handle case where DISP > 8k by using the add_high_const patterns.
3769 Note in DISP > 8k case, we will leave the high part of the address
3770 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3773 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3775 rtx dest
, src
, basereg
;
3778 src
= gen_rtx_REG (word_mode
, reg
);
3779 basereg
= gen_rtx_REG (Pmode
, base
);
3780 if (VAL_14_BITS_P (disp
))
3782 dest
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
3783 insn
= emit_move_insn (dest
, src
);
3785 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3787 rtx delta
= GEN_INT (disp
);
3788 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3790 emit_move_insn (tmpreg
, delta
);
3791 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3794 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3795 gen_rtx_SET (tmpreg
,
3796 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3797 RTX_FRAME_RELATED_P (insn
) = 1;
3799 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3800 insn
= emit_move_insn (dest
, src
);
3804 rtx delta
= GEN_INT (disp
);
3805 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3806 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3808 emit_move_insn (tmpreg
, high
);
3809 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3810 insn
= emit_move_insn (dest
, src
);
3812 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3813 gen_rtx_SET (gen_rtx_MEM (word_mode
,
3814 gen_rtx_PLUS (word_mode
,
3821 RTX_FRAME_RELATED_P (insn
) = 1;
3824 /* Emit RTL to store REG at the memory location specified by BASE and then
3825 add MOD to BASE. MOD must be <= 8k. */
3828 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3830 rtx basereg
, srcreg
, delta
;
3833 gcc_assert (VAL_14_BITS_P (mod
));
3835 basereg
= gen_rtx_REG (Pmode
, base
);
3836 srcreg
= gen_rtx_REG (word_mode
, reg
);
3837 delta
= GEN_INT (mod
);
3839 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3842 RTX_FRAME_RELATED_P (insn
) = 1;
3844 /* RTX_FRAME_RELATED_P must be set on each frame related set
3845 in a parallel with more than one element. */
3846 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3847 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3851 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3852 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3853 whether to add a frame note or not.
3855 In the DISP > 8k case, we leave the high part of the address in %r1.
3856 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3859 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3863 if (VAL_14_BITS_P (disp
))
3865 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3866 plus_constant (Pmode
,
3867 gen_rtx_REG (Pmode
, base
), disp
));
3869 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3871 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3872 rtx delta
= GEN_INT (disp
);
3873 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3875 emit_move_insn (tmpreg
, delta
);
3876 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3877 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3879 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3880 gen_rtx_SET (tmpreg
,
3881 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3885 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3886 rtx delta
= GEN_INT (disp
);
3887 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3889 emit_move_insn (tmpreg
,
3890 gen_rtx_PLUS (Pmode
, basereg
,
3891 gen_rtx_HIGH (Pmode
, delta
)));
3892 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3893 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3896 if (DO_FRAME_NOTES
&& note
)
3897 RTX_FRAME_RELATED_P (insn
) = 1;
3901 pa_compute_frame_size (poly_int64 size
, int *fregs_live
)
3906 /* The code in pa_expand_prologue and pa_expand_epilogue must
3907 be consistent with the rounding and size calculation done here.
3908 Change them at the same time. */
3910 /* We do our own stack alignment. First, round the size of the
3911 stack locals up to a word boundary. */
3912 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3914 /* Space for previous frame pointer + filler. If any frame is
3915 allocated, we need to add in the TARGET_STARTING_FRAME_OFFSET. We
3916 waste some space here for the sake of HP compatibility. The
3917 first slot is only used when the frame pointer is needed. */
3918 if (size
|| frame_pointer_needed
)
3919 size
+= pa_starting_frame_offset ();
3921 /* If the current function calls __builtin_eh_return, then we need
3922 to allocate stack space for registers that will hold data for
3923 the exception handler. */
3924 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3928 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3930 size
+= i
* UNITS_PER_WORD
;
3933 /* Account for space used by the callee general register saves. */
3934 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3935 if (df_regs_ever_live_p (i
))
3936 size
+= UNITS_PER_WORD
;
3938 /* Account for space used by the callee floating point register saves. */
3939 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3940 if (df_regs_ever_live_p (i
)
3941 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3945 /* We always save both halves of the FP register, so always
3946 increment the frame size by 8 bytes. */
3950 /* If any of the floating registers are saved, account for the
3951 alignment needed for the floating point register save block. */
3954 size
= (size
+ 7) & ~7;
3959 /* The various ABIs include space for the outgoing parameters in the
3960 size of the current function's stack frame. We don't need to align
3961 for the outgoing arguments as their alignment is set by the final
3962 rounding for the frame as a whole. */
3963 size
+= crtl
->outgoing_args_size
;
3965 /* Allocate space for the fixed frame marker. This space must be
3966 allocated for any function that makes calls or allocates
3968 if (!crtl
->is_leaf
|| size
)
3969 size
+= TARGET_64BIT
? 48 : 32;
3971 /* Finally, round to the preferred stack boundary. */
3972 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3973 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3976 /* Output function label, and associated .PROC and .CALLINFO statements. */
3979 pa_output_function_label (FILE *file
)
3981 /* The function's label and associated .PROC must never be
3982 separated and must be output *after* any profiling declarations
3983 to avoid changing spaces/subspaces within a procedure. */
3984 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3985 fputs ("\t.PROC\n", file
);
3987 /* pa_expand_prologue does the dirty work now. We just need
3988 to output the assembler directives which denote the start
3990 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3992 fputs (",NO_CALLS", file
);
3994 fputs (",CALLS", file
);
3996 fputs (",SAVE_RP", file
);
3998 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3999 at the beginning of the frame and that it is used as the frame
4000 pointer for the frame. We do this because our current frame
4001 layout doesn't conform to that specified in the HP runtime
4002 documentation and we need a way to indicate to programs such as
4003 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
4004 isn't used by HP compilers but is supported by the assembler.
4005 However, SAVE_SP is supposed to indicate that the previous stack
4006 pointer has been saved in the frame marker. */
4007 if (frame_pointer_needed
)
4008 fputs (",SAVE_SP", file
);
4010 /* Pass on information about the number of callee register saves
4011 performed in the prologue.
4013 The compiler is supposed to pass the highest register number
4014 saved, the assembler then has to adjust that number before
4015 entering it into the unwind descriptor (to account for any
4016 caller saved registers with lower register numbers than the
4017 first callee saved register). */
4019 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
4022 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
4024 fputs ("\n\t.ENTRY\n", file
);
4027 /* Output function prologue. */
4030 pa_output_function_prologue (FILE *file
)
4032 pa_output_function_label (file
);
4033 remove_useless_addtr_insns (0);
4036 /* The label is output by ASM_DECLARE_FUNCTION_NAME on linux. */
4039 pa_linux_output_function_prologue (FILE *file ATTRIBUTE_UNUSED
)
4041 remove_useless_addtr_insns (0);
4045 pa_expand_prologue (void)
4047 int merge_sp_adjust_with_store
= 0;
4048 HOST_WIDE_INT size
= get_frame_size ();
4049 HOST_WIDE_INT offset
;
4058 /* Compute total size for frame pointer, filler, locals and rounding to
4059 the next word boundary. Similar code appears in pa_compute_frame_size
4060 and must be changed in tandem with this code. */
4061 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
4062 if (local_fsize
|| frame_pointer_needed
)
4063 local_fsize
+= pa_starting_frame_offset ();
4065 actual_fsize
= pa_compute_frame_size (size
, &save_fregs
);
4066 if (flag_stack_usage_info
)
4067 current_function_static_stack_size
= actual_fsize
;
4069 /* Compute a few things we will use often. */
4070 tmpreg
= gen_rtx_REG (word_mode
, 1);
4072 /* Save RP first. The calling conventions manual states RP will
4073 always be stored into the caller's frame at sp - 20 or sp - 16
4074 depending on which ABI is in use. */
4075 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
4077 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
4083 /* Allocate the local frame and set up the frame pointer if needed. */
4084 if (actual_fsize
!= 0)
4086 if (frame_pointer_needed
)
4088 /* Copy the old frame pointer temporarily into %r1. Set up the
4089 new stack pointer, then store away the saved old frame pointer
4090 into the stack at sp and at the same time update the stack
4091 pointer by actual_fsize bytes. Two versions, first
4092 handles small (<8k) frames. The second handles large (>=8k)
4094 insn
= emit_move_insn (tmpreg
, hard_frame_pointer_rtx
);
4096 RTX_FRAME_RELATED_P (insn
) = 1;
4098 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
4100 RTX_FRAME_RELATED_P (insn
) = 1;
4102 if (VAL_14_BITS_P (actual_fsize
))
4103 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
4106 /* It is incorrect to store the saved frame pointer at *sp,
4107 then increment sp (writes beyond the current stack boundary).
4109 So instead use stwm to store at *sp and post-increment the
4110 stack pointer as an atomic operation. Then increment sp to
4111 finish allocating the new frame. */
4112 HOST_WIDE_INT adjust1
= 8192 - 64;
4113 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
4115 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
4116 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4120 /* We set SAVE_SP in frames that need a frame pointer. Thus,
4121 we need to store the previous stack pointer (frame pointer)
4122 into the frame marker on targets that use the HP unwind
4123 library. This allows the HP unwind library to be used to
4124 unwind GCC frames. However, we are not fully compatible
4125 with the HP library because our frame layout differs from
4126 that specified in the HP runtime specification.
4128 We don't want a frame note on this instruction as the frame
4129 marker moves during dynamic stack allocation.
4131 This instruction also serves as a blockage to prevent
4132 register spills from being scheduled before the stack
4133 pointer is raised. This is necessary as we store
4134 registers using the frame pointer as a base register,
4135 and the frame pointer is set before sp is raised. */
4136 if (TARGET_HPUX_UNWIND_LIBRARY
)
4138 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
4139 GEN_INT (TARGET_64BIT
? -8 : -4));
4141 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
4142 hard_frame_pointer_rtx
);
4145 emit_insn (gen_blockage ());
4147 /* no frame pointer needed. */
4150 /* In some cases we can perform the first callee register save
4151 and allocating the stack frame at the same time. If so, just
4152 make a note of it and defer allocating the frame until saving
4153 the callee registers. */
4154 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
4155 merge_sp_adjust_with_store
= 1;
4156 /* Cannot optimize. Adjust the stack frame by actual_fsize
4159 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4164 /* Normal register save.
4166 Do not save the frame pointer in the frame_pointer_needed case. It
4167 was done earlier. */
4168 if (frame_pointer_needed
)
4170 offset
= local_fsize
;
4172 /* Saving the EH return data registers in the frame is the simplest
4173 way to get the frame unwind information emitted. We put them
4174 just before the general registers. */
4175 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4177 unsigned int i
, regno
;
4181 regno
= EH_RETURN_DATA_REGNO (i
);
4182 if (regno
== INVALID_REGNUM
)
4185 store_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4186 offset
+= UNITS_PER_WORD
;
4190 for (i
= 18; i
>= 4; i
--)
4191 if (df_regs_ever_live_p (i
) && !call_used_or_fixed_reg_p (i
))
4193 store_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4194 offset
+= UNITS_PER_WORD
;
4197 /* Account for %r3 which is saved in a special place. */
4200 /* No frame pointer needed. */
4203 offset
= local_fsize
- actual_fsize
;
4205 /* Saving the EH return data registers in the frame is the simplest
4206 way to get the frame unwind information emitted. */
4207 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4209 unsigned int i
, regno
;
4213 regno
= EH_RETURN_DATA_REGNO (i
);
4214 if (regno
== INVALID_REGNUM
)
4217 /* If merge_sp_adjust_with_store is nonzero, then we can
4218 optimize the first save. */
4219 if (merge_sp_adjust_with_store
)
4221 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
4222 merge_sp_adjust_with_store
= 0;
4225 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4226 offset
+= UNITS_PER_WORD
;
4230 for (i
= 18; i
>= 3; i
--)
4231 if (df_regs_ever_live_p (i
) && !call_used_or_fixed_reg_p (i
))
4233 /* If merge_sp_adjust_with_store is nonzero, then we can
4234 optimize the first GR save. */
4235 if (merge_sp_adjust_with_store
)
4237 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
4238 merge_sp_adjust_with_store
= 0;
4241 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
4242 offset
+= UNITS_PER_WORD
;
4246 /* If we wanted to merge the SP adjustment with a GR save, but we never
4247 did any GR saves, then just emit the adjustment here. */
4248 if (merge_sp_adjust_with_store
)
4249 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4253 /* The hppa calling conventions say that %r19, the pic offset
4254 register, is saved at sp - 32 (in this function's frame)
4255 when generating PIC code. FIXME: What is the correct thing
4256 to do for functions which make no calls and allocate no
4257 frame? Do we need to allocate a frame, or can we just omit
4258 the save? For now we'll just omit the save.
4260 We don't want a note on this insn as the frame marker can
4261 move if there is a dynamic stack allocation. */
4262 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
4264 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
4266 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
4270 /* Align pointer properly (doubleword boundary). */
4271 offset
= (offset
+ 7) & ~7;
4273 /* Floating point register store. */
4278 /* First get the frame or stack pointer to the start of the FP register
4280 if (frame_pointer_needed
)
4282 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4283 base
= hard_frame_pointer_rtx
;
4287 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4288 base
= stack_pointer_rtx
;
4291 /* Now actually save the FP registers. */
4292 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4294 if (df_regs_ever_live_p (i
)
4295 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4299 addr
= gen_rtx_MEM (DFmode
,
4300 gen_rtx_POST_INC (word_mode
, tmpreg
));
4301 reg
= gen_rtx_REG (DFmode
, i
);
4302 insn
= emit_move_insn (addr
, reg
);
4305 RTX_FRAME_RELATED_P (insn
) = 1;
4308 rtx mem
= gen_rtx_MEM (DFmode
,
4309 plus_constant (Pmode
, base
,
4311 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4312 gen_rtx_SET (mem
, reg
));
4316 rtx meml
= gen_rtx_MEM (SFmode
,
4317 plus_constant (Pmode
, base
,
4319 rtx memr
= gen_rtx_MEM (SFmode
,
4320 plus_constant (Pmode
, base
,
4322 rtx regl
= gen_rtx_REG (SFmode
, i
);
4323 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
4324 rtx setl
= gen_rtx_SET (meml
, regl
);
4325 rtx setr
= gen_rtx_SET (memr
, regr
);
4328 RTX_FRAME_RELATED_P (setl
) = 1;
4329 RTX_FRAME_RELATED_P (setr
) = 1;
4330 vec
= gen_rtvec (2, setl
, setr
);
4331 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4332 gen_rtx_SEQUENCE (VOIDmode
, vec
));
4335 offset
+= GET_MODE_SIZE (DFmode
);
4342 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4343 Handle case where DISP > 8k by using the add_high_const patterns. */
4346 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
4348 rtx dest
= gen_rtx_REG (word_mode
, reg
);
4349 rtx basereg
= gen_rtx_REG (Pmode
, base
);
4352 if (VAL_14_BITS_P (disp
))
4353 src
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
4354 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
4356 rtx delta
= GEN_INT (disp
);
4357 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4359 emit_move_insn (tmpreg
, delta
);
4360 if (TARGET_DISABLE_INDEXING
)
4362 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4363 src
= gen_rtx_MEM (word_mode
, tmpreg
);
4366 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4370 rtx delta
= GEN_INT (disp
);
4371 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
4372 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4374 emit_move_insn (tmpreg
, high
);
4375 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
4378 emit_move_insn (dest
, src
);
4381 /* Update the total code bytes output to the text section. */
4384 update_total_code_bytes (unsigned int nbytes
)
4386 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4387 && !IN_NAMED_SECTION_P (cfun
->decl
))
4389 unsigned int old_total
= total_code_bytes
;
4391 total_code_bytes
+= nbytes
;
4393 /* Be prepared to handle overflows. */
4394 if (old_total
> total_code_bytes
)
4395 total_code_bytes
= UINT_MAX
;
4399 /* This function generates the assembly code for function exit.
4400 Args are as for output_function_prologue ().
4402 The function epilogue should not depend on the current stack
4403 pointer! It should use the frame pointer only. This is mandatory
4404 because of alloca; we also take advantage of it to omit stack
4405 adjustments before returning. */
4408 pa_output_function_epilogue (FILE *file
)
4410 rtx_insn
*insn
= get_last_insn ();
4413 /* pa_expand_epilogue does the dirty work now. We just need
4414 to output the assembler directives which denote the end
4417 To make debuggers happy, emit a nop if the epilogue was completely
4418 eliminated due to a volatile call as the last insn in the
4419 current function. That way the return address (in %r2) will
4420 always point to a valid instruction in the current function. */
4422 /* Get the last real insn. */
4424 insn
= prev_real_insn (insn
);
4426 /* If it is a sequence, then look inside. */
4427 if (insn
&& NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4428 insn
= as_a
<rtx_sequence
*> (PATTERN (insn
))-> insn (0);
4430 /* If insn is a CALL_INSN, then it must be a call to a volatile
4431 function (otherwise there would be epilogue insns). */
4432 if (insn
&& CALL_P (insn
))
4434 fputs ("\tnop\n", file
);
4440 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4442 if (TARGET_SOM
&& TARGET_GAS
)
4444 /* We are done with this subspace except possibly for some additional
4445 debug information. Forget that we are in this subspace to ensure
4446 that the next function is output in its own subspace. */
4448 cfun
->machine
->in_nsubspa
= 2;
4451 /* Thunks do their own insn accounting. */
4455 if (INSN_ADDRESSES_SET_P ())
4457 last_address
= extra_nop
? 4 : 0;
4458 insn
= get_last_nonnote_insn ();
4461 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4463 last_address
+= insn_default_length (insn
);
4465 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4466 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4469 last_address
= UINT_MAX
;
4471 /* Finally, update the total number of code bytes output so far. */
4472 update_total_code_bytes (last_address
);
4476 pa_expand_epilogue (void)
4479 HOST_WIDE_INT offset
;
4480 HOST_WIDE_INT ret_off
= 0;
4482 int merge_sp_adjust_with_load
= 0;
4484 /* We will use this often. */
4485 tmpreg
= gen_rtx_REG (word_mode
, 1);
4487 /* Try to restore RP early to avoid load/use interlocks when
4488 RP gets used in the return (bv) instruction. This appears to still
4489 be necessary even when we schedule the prologue and epilogue. */
4492 ret_off
= TARGET_64BIT
? -16 : -20;
4493 if (frame_pointer_needed
)
4495 load_reg (2, ret_off
, HARD_FRAME_POINTER_REGNUM
);
4500 /* No frame pointer, and stack is smaller than 8k. */
4501 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4503 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4509 /* General register restores. */
4510 if (frame_pointer_needed
)
4512 offset
= local_fsize
;
4514 /* If the current function calls __builtin_eh_return, then we need
4515 to restore the saved EH data registers. */
4516 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4518 unsigned int i
, regno
;
4522 regno
= EH_RETURN_DATA_REGNO (i
);
4523 if (regno
== INVALID_REGNUM
)
4526 load_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4527 offset
+= UNITS_PER_WORD
;
4531 for (i
= 18; i
>= 4; i
--)
4532 if (df_regs_ever_live_p (i
) && !call_used_or_fixed_reg_p (i
))
4534 load_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4535 offset
+= UNITS_PER_WORD
;
4540 offset
= local_fsize
- actual_fsize
;
4542 /* If the current function calls __builtin_eh_return, then we need
4543 to restore the saved EH data registers. */
4544 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4546 unsigned int i
, regno
;
4550 regno
= EH_RETURN_DATA_REGNO (i
);
4551 if (regno
== INVALID_REGNUM
)
4554 /* Only for the first load.
4555 merge_sp_adjust_with_load holds the register load
4556 with which we will merge the sp adjustment. */
4557 if (merge_sp_adjust_with_load
== 0
4559 && VAL_14_BITS_P (-actual_fsize
))
4560 merge_sp_adjust_with_load
= regno
;
4562 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4563 offset
+= UNITS_PER_WORD
;
4567 for (i
= 18; i
>= 3; i
--)
4569 if (df_regs_ever_live_p (i
) && !call_used_or_fixed_reg_p (i
))
4571 /* Only for the first load.
4572 merge_sp_adjust_with_load holds the register load
4573 with which we will merge the sp adjustment. */
4574 if (merge_sp_adjust_with_load
== 0
4576 && VAL_14_BITS_P (-actual_fsize
))
4577 merge_sp_adjust_with_load
= i
;
4579 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4580 offset
+= UNITS_PER_WORD
;
4585 /* Align pointer properly (doubleword boundary). */
4586 offset
= (offset
+ 7) & ~7;
4588 /* FP register restores. */
4591 /* Adjust the register to index off of. */
4592 if (frame_pointer_needed
)
4593 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4595 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4597 /* Actually do the restores now. */
4598 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4599 if (df_regs_ever_live_p (i
)
4600 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4602 rtx src
= gen_rtx_MEM (DFmode
,
4603 gen_rtx_POST_INC (word_mode
, tmpreg
));
4604 rtx dest
= gen_rtx_REG (DFmode
, i
);
4605 emit_move_insn (dest
, src
);
4609 /* Emit a blockage insn here to keep these insns from being moved to
4610 an earlier spot in the epilogue, or into the main instruction stream.
4612 This is necessary as we must not cut the stack back before all the
4613 restores are finished. */
4614 emit_insn (gen_blockage ());
4616 /* Reset stack pointer (and possibly frame pointer). The stack
4617 pointer is initially set to fp + 64 to avoid a race condition. */
4618 if (frame_pointer_needed
)
4620 rtx delta
= GEN_INT (-64);
4622 set_reg_plus_d (STACK_POINTER_REGNUM
, HARD_FRAME_POINTER_REGNUM
, 64, 0);
4623 emit_insn (gen_pre_load (hard_frame_pointer_rtx
,
4624 stack_pointer_rtx
, delta
));
4626 /* If we were deferring a callee register restore, do it now. */
4627 else if (merge_sp_adjust_with_load
)
4629 rtx delta
= GEN_INT (-actual_fsize
);
4630 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4632 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4634 else if (actual_fsize
!= 0)
4635 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4638 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4639 frame greater than 8k), do so now. */
4641 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4643 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4645 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4647 emit_insn (gen_blockage ());
4648 emit_insn (TARGET_64BIT
4649 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4650 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4655 pa_can_use_return_insn (void)
4657 if (!reload_completed
)
4660 if (frame_pointer_needed
)
4663 if (df_regs_ever_live_p (2))
4669 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4673 hppa_pic_save_rtx (void)
4675 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4678 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4679 #define NO_DEFERRED_PROFILE_COUNTERS 0
4683 /* Vector of funcdef numbers. */
4684 static vec
<int> funcdef_nos
;
4686 /* Output deferred profile counters. */
4688 output_deferred_profile_counters (void)
4693 if (funcdef_nos
.is_empty ())
4696 switch_to_section (data_section
);
4697 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4698 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4700 for (i
= 0; funcdef_nos
.iterate (i
, &n
); i
++)
4702 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4703 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4706 funcdef_nos
.release ();
4710 hppa_profile_hook (int label_no
)
4712 rtx_code_label
*label_rtx
= gen_label_rtx ();
4713 int reg_parm_stack_space
= REG_PARM_STACK_SPACE (NULL_TREE
);
4714 rtx arg_bytes
, begin_label_rtx
, mcount
, sym
;
4715 rtx_insn
*call_insn
;
4716 char begin_label_name
[16];
4717 bool use_mcount_pcrel_call
;
4719 /* Set up call destination. */
4720 sym
= gen_rtx_SYMBOL_REF (Pmode
, "_mcount");
4721 pa_encode_label (sym
);
4722 mcount
= gen_rtx_MEM (Pmode
, sym
);
4724 /* If we can reach _mcount with a pc-relative call, we can optimize
4725 loading the address of the current function. This requires linker
4726 long branch stub support. */
4727 if (!TARGET_PORTABLE_RUNTIME
4728 && !TARGET_LONG_CALLS
4729 && (TARGET_SOM
|| flag_function_sections
))
4730 use_mcount_pcrel_call
= TRUE
;
4732 use_mcount_pcrel_call
= FALSE
;
4734 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4736 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4738 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4740 if (!use_mcount_pcrel_call
)
4742 /* The address of the function is loaded into %r25 with an instruction-
4743 relative sequence that avoids the use of relocations. We use SImode
4744 for the address of the function in both 32 and 64-bit code to avoid
4745 having to provide DImode versions of the lcla2 pattern. */
4747 emit_insn (gen_lcla2 (gen_rtx_REG (SImode
, 25), label_rtx
));
4749 emit_insn (gen_lcla1 (gen_rtx_REG (SImode
, 25), label_rtx
));
4752 if (!NO_DEFERRED_PROFILE_COUNTERS
)
4754 rtx count_label_rtx
, addr
, r24
;
4755 char count_label_name
[16];
4757 funcdef_nos
.safe_push (label_no
);
4758 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4759 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
,
4760 ggc_strdup (count_label_name
));
4762 addr
= force_reg (Pmode
, count_label_rtx
);
4763 r24
= gen_rtx_REG (Pmode
, 24);
4764 emit_move_insn (r24
, addr
);
4766 arg_bytes
= GEN_INT (TARGET_64BIT
? 24 : 12);
4767 if (use_mcount_pcrel_call
)
4768 call_insn
= emit_call_insn (gen_call_mcount (mcount
, arg_bytes
,
4771 call_insn
= emit_call_insn (gen_call (mcount
, arg_bytes
));
4773 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4777 arg_bytes
= GEN_INT (TARGET_64BIT
? 16 : 8);
4778 if (use_mcount_pcrel_call
)
4779 call_insn
= emit_call_insn (gen_call_mcount (mcount
, arg_bytes
,
4782 call_insn
= emit_call_insn (gen_call (mcount
, arg_bytes
));
4785 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4786 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4788 /* Indicate the _mcount call cannot throw, nor will it execute a
4790 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4792 /* Allocate space for fixed arguments. */
4793 if (reg_parm_stack_space
> crtl
->outgoing_args_size
)
4794 crtl
->outgoing_args_size
= reg_parm_stack_space
;
4797 /* Fetch the return address for the frame COUNT steps up from
4798 the current frame, after the prologue. FRAMEADDR is the
4799 frame pointer of the COUNT frame.
4801 We want to ignore any export stub remnants here. To handle this,
4802 we examine the code at the return address, and if it is an export
4803 stub, we return a memory rtx for the stub return address stored
4806 The value returned is used in two different ways:
4808 1. To find a function's caller.
4810 2. To change the return address for a function.
4812 This function handles most instances of case 1; however, it will
4813 fail if there are two levels of stubs to execute on the return
4814 path. The only way I believe that can happen is if the return value
4815 needs a parameter relocation, which never happens for C code.
4817 This function handles most instances of case 2; however, it will
4818 fail if we did not originally have stub code on the return path
4819 but will need stub code on the new return path. This can happen if
4820 the caller & callee are both in the main program, but the new
4821 return location is in a shared library. */
4824 pa_return_addr_rtx (int count
, rtx frameaddr
)
4831 /* The instruction stream at the return address of a PA1.X export stub is:
4833 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4834 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4835 0x00011820 | stub+16: mtsp r1,sr0
4836 0xe0400002 | stub+20: be,n 0(sr0,rp)
4838 0xe0400002 must be specified as -532676606 so that it won't be
4839 rejected as an invalid immediate operand on 64-bit hosts.
4841 The instruction stream at the return address of a PA2.0 export stub is:
4843 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4844 0xe840d002 | stub+12: bve,n (rp)
4847 HOST_WIDE_INT insns
[4];
4853 rp
= get_hard_reg_initial_val (Pmode
, 2);
4855 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4858 /* If there is no export stub then just use the value saved from
4859 the return pointer register. */
4861 saved_rp
= gen_reg_rtx (Pmode
);
4862 emit_move_insn (saved_rp
, rp
);
4864 /* Get pointer to the instruction stream. We have to mask out the
4865 privilege level from the two low order bits of the return address
4866 pointer here so that ins will point to the start of the first
4867 instruction that would have been executed if we returned. */
4868 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4869 label
= gen_label_rtx ();
4873 insns
[0] = 0x4bc23fd1;
4874 insns
[1] = -398405630;
4879 insns
[0] = 0x4bc23fd1;
4880 insns
[1] = 0x004010a1;
4881 insns
[2] = 0x00011820;
4882 insns
[3] = -532676606;
4886 /* Check the instruction stream at the normal return address for the
4887 export stub. If it is an export stub, than our return address is
4888 really in -24[frameaddr]. */
4890 for (i
= 0; i
< len
; i
++)
4892 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, ins
, i
* 4));
4893 rtx op1
= GEN_INT (insns
[i
]);
4894 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4897 /* Here we know that our return address points to an export
4898 stub. We don't want to return the address of the export stub,
4899 but rather the return address of the export stub. That return
4900 address is stored at -24[frameaddr]. */
4902 emit_move_insn (saved_rp
,
4904 memory_address (Pmode
,
4905 plus_constant (Pmode
, frameaddr
,
4914 pa_emit_bcond_fp (rtx operands
[])
4916 enum rtx_code code
= GET_CODE (operands
[0]);
4917 rtx operand0
= operands
[1];
4918 rtx operand1
= operands
[2];
4919 rtx label
= operands
[3];
4921 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode
, 0),
4922 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4924 emit_jump_insn (gen_rtx_SET (pc_rtx
,
4925 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4928 gen_rtx_REG (CCFPmode
, 0),
4930 gen_rtx_LABEL_REF (VOIDmode
, label
),
4935 /* Adjust the cost of a scheduling dependency. Return the new cost of
4936 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4939 pa_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
4942 enum attr_type attr_type
;
4944 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4945 true dependencies as they are described with bypasses now. */
4946 if (pa_cpu
>= PROCESSOR_8000
|| dep_type
== 0)
4949 if (! recog_memoized (insn
))
4952 attr_type
= get_attr_type (insn
);
4957 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4960 if (attr_type
== TYPE_FPLOAD
)
4962 rtx pat
= PATTERN (insn
);
4963 rtx dep_pat
= PATTERN (dep_insn
);
4964 if (GET_CODE (pat
) == PARALLEL
)
4966 /* This happens for the fldXs,mb patterns. */
4967 pat
= XVECEXP (pat
, 0, 0);
4969 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4970 /* If this happens, we have to extend this to schedule
4971 optimally. Return 0 for now. */
4974 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4976 if (! recog_memoized (dep_insn
))
4978 switch (get_attr_type (dep_insn
))
4985 case TYPE_FPSQRTSGL
:
4986 case TYPE_FPSQRTDBL
:
4987 /* A fpload can't be issued until one cycle before a
4988 preceding arithmetic operation has finished if
4989 the target of the fpload is any of the sources
4990 (or destination) of the arithmetic operation. */
4991 return insn_default_latency (dep_insn
) - 1;
4998 else if (attr_type
== TYPE_FPALU
)
5000 rtx pat
= PATTERN (insn
);
5001 rtx dep_pat
= PATTERN (dep_insn
);
5002 if (GET_CODE (pat
) == PARALLEL
)
5004 /* This happens for the fldXs,mb patterns. */
5005 pat
= XVECEXP (pat
, 0, 0);
5007 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
5008 /* If this happens, we have to extend this to schedule
5009 optimally. Return 0 for now. */
5012 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
5014 if (! recog_memoized (dep_insn
))
5016 switch (get_attr_type (dep_insn
))
5020 case TYPE_FPSQRTSGL
:
5021 case TYPE_FPSQRTDBL
:
5022 /* An ALU flop can't be issued until two cycles before a
5023 preceding divide or sqrt operation has finished if
5024 the target of the ALU flop is any of the sources
5025 (or destination) of the divide or sqrt operation. */
5026 return insn_default_latency (dep_insn
) - 2;
5034 /* For other anti dependencies, the cost is 0. */
5037 case REG_DEP_OUTPUT
:
5038 /* Output dependency; DEP_INSN writes a register that INSN writes some
5040 if (attr_type
== TYPE_FPLOAD
)
5042 rtx pat
= PATTERN (insn
);
5043 rtx dep_pat
= PATTERN (dep_insn
);
5044 if (GET_CODE (pat
) == PARALLEL
)
5046 /* This happens for the fldXs,mb patterns. */
5047 pat
= XVECEXP (pat
, 0, 0);
5049 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
5050 /* If this happens, we have to extend this to schedule
5051 optimally. Return 0 for now. */
5054 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
5056 if (! recog_memoized (dep_insn
))
5058 switch (get_attr_type (dep_insn
))
5065 case TYPE_FPSQRTSGL
:
5066 case TYPE_FPSQRTDBL
:
5067 /* A fpload can't be issued until one cycle before a
5068 preceding arithmetic operation has finished if
5069 the target of the fpload is the destination of the
5070 arithmetic operation.
5072 Exception: For PA7100LC, PA7200 and PA7300, the cost
5073 is 3 cycles, unless they bundle together. We also
5074 pay the penalty if the second insn is a fpload. */
5075 return insn_default_latency (dep_insn
) - 1;
5082 else if (attr_type
== TYPE_FPALU
)
5084 rtx pat
= PATTERN (insn
);
5085 rtx dep_pat
= PATTERN (dep_insn
);
5086 if (GET_CODE (pat
) == PARALLEL
)
5088 /* This happens for the fldXs,mb patterns. */
5089 pat
= XVECEXP (pat
, 0, 0);
5091 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
5092 /* If this happens, we have to extend this to schedule
5093 optimally. Return 0 for now. */
5096 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
5098 if (! recog_memoized (dep_insn
))
5100 switch (get_attr_type (dep_insn
))
5104 case TYPE_FPSQRTSGL
:
5105 case TYPE_FPSQRTDBL
:
5106 /* An ALU flop can't be issued until two cycles before a
5107 preceding divide or sqrt operation has finished if
5108 the target of the ALU flop is also the target of
5109 the divide or sqrt operation. */
5110 return insn_default_latency (dep_insn
) - 2;
5118 /* For other output dependencies, the cost is 0. */
5126 /* The 700 can only issue a single insn at a time.
5127 The 7XXX processors can issue two insns at a time.
5128 The 8000 can issue 4 insns at a time. */
5130 pa_issue_rate (void)
5134 case PROCESSOR_700
: return 1;
5135 case PROCESSOR_7100
: return 2;
5136 case PROCESSOR_7100LC
: return 2;
5137 case PROCESSOR_7200
: return 2;
5138 case PROCESSOR_7300
: return 2;
5139 case PROCESSOR_8000
: return 4;
5148 /* Return any length plus adjustment needed by INSN which already has
5149 its length computed as LENGTH. Return LENGTH if no adjustment is
5152 Also compute the length of an inline block move here as it is too
5153 complicated to express as a length attribute in pa.md. */
5155 pa_adjust_insn_length (rtx_insn
*insn
, int length
)
5157 rtx pat
= PATTERN (insn
);
5159 /* If length is negative or undefined, provide initial length. */
5160 if ((unsigned int) length
>= INT_MAX
)
5162 if (GET_CODE (pat
) == SEQUENCE
)
5163 insn
= as_a
<rtx_insn
*> (XVECEXP (pat
, 0, 0));
5165 switch (get_attr_type (insn
))
5168 length
= pa_attr_length_millicode_call (insn
);
5171 length
= pa_attr_length_call (insn
, 0);
5174 length
= pa_attr_length_call (insn
, 1);
5177 length
= pa_attr_length_indirect_call (insn
);
5179 case TYPE_SH_FUNC_ADRS
:
5180 length
= pa_attr_length_millicode_call (insn
) + 20;
5187 /* Block move pattern. */
5188 if (NONJUMP_INSN_P (insn
)
5189 && GET_CODE (pat
) == PARALLEL
5190 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
5191 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
5192 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
5193 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
5194 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
5195 length
+= compute_cpymem_length (insn
) - 4;
5196 /* Block clear pattern. */
5197 else if (NONJUMP_INSN_P (insn
)
5198 && GET_CODE (pat
) == PARALLEL
5199 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
5200 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
5201 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
5202 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
5203 length
+= compute_clrmem_length (insn
) - 4;
5204 /* Conditional branch with an unfilled delay slot. */
5205 else if (JUMP_P (insn
) && ! simplejump_p (insn
))
5207 /* Adjust a short backwards conditional with an unfilled delay slot. */
5208 if (GET_CODE (pat
) == SET
5210 && JUMP_LABEL (insn
) != NULL_RTX
5211 && ! forward_branch_p (insn
))
5213 else if (GET_CODE (pat
) == PARALLEL
5214 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
5217 /* Adjust dbra insn with short backwards conditional branch with
5218 unfilled delay slot -- only for case where counter is in a
5219 general register register. */
5220 else if (GET_CODE (pat
) == PARALLEL
5221 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
5222 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
5223 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
5225 && ! forward_branch_p (insn
))
5231 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5234 pa_print_operand_punct_valid_p (unsigned char code
)
5245 /* Print operand X (an rtx) in assembler syntax to file FILE.
5246 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5247 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5250 pa_print_operand (FILE *file
, rtx x
, int code
)
5255 /* Output a 'nop' if there's nothing for the delay slot. */
5256 if (dbr_sequence_length () == 0)
5257 fputs ("\n\tnop", file
);
5260 /* Output a nullification completer if there's nothing for the */
5261 /* delay slot or nullification is requested. */
5262 if (dbr_sequence_length () == 0 ||
5264 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
5268 /* Print out the second register name of a register pair.
5269 I.e., R (6) => 7. */
5270 fputs (reg_names
[REGNO (x
) + 1], file
);
5273 /* A register or zero. */
5275 || (x
== CONST0_RTX (DFmode
))
5276 || (x
== CONST0_RTX (SFmode
)))
5278 fputs ("%r0", file
);
5284 /* A register or zero (floating point). */
5286 || (x
== CONST0_RTX (DFmode
))
5287 || (x
== CONST0_RTX (SFmode
)))
5289 fputs ("%fr0", file
);
5298 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
5299 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
5300 pa_output_global_address (file
, xoperands
[1], 0);
5301 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
5305 case 'C': /* Plain (C)ondition */
5307 switch (GET_CODE (x
))
5310 fputs ("=", file
); break;
5312 fputs ("<>", file
); break;
5314 fputs (">", file
); break;
5316 fputs (">=", file
); break;
5318 fputs (">>=", file
); break;
5320 fputs (">>", file
); break;
5322 fputs ("<", file
); break;
5324 fputs ("<=", file
); break;
5326 fputs ("<<=", file
); break;
5328 fputs ("<<", file
); break;
5333 case 'N': /* Condition, (N)egated */
5334 switch (GET_CODE (x
))
5337 fputs ("<>", file
); break;
5339 fputs ("=", file
); break;
5341 fputs ("<=", file
); break;
5343 fputs ("<", file
); break;
5345 fputs ("<<", file
); break;
5347 fputs ("<<=", file
); break;
5349 fputs (">=", file
); break;
5351 fputs (">", file
); break;
5353 fputs (">>", file
); break;
5355 fputs (">>=", file
); break;
5360 /* For floating point comparisons. Note that the output
5361 predicates are the complement of the desired mode. The
5362 conditions for GT, GE, LT, LE and LTGT cause an invalid
5363 operation exception if the result is unordered and this
5364 exception is enabled in the floating-point status register. */
5366 switch (GET_CODE (x
))
5369 fputs ("!=", file
); break;
5371 fputs ("=", file
); break;
5373 fputs ("!>", file
); break;
5375 fputs ("!>=", file
); break;
5377 fputs ("!<", file
); break;
5379 fputs ("!<=", file
); break;
5381 fputs ("!<>", file
); break;
5383 fputs ("!?<=", file
); break;
5385 fputs ("!?<", file
); break;
5387 fputs ("!?>=", file
); break;
5389 fputs ("!?>", file
); break;
5391 fputs ("!?=", file
); break;
5393 fputs ("!?", file
); break;
5395 fputs ("?", file
); break;
5400 case 'S': /* Condition, operands are (S)wapped. */
5401 switch (GET_CODE (x
))
5404 fputs ("=", file
); break;
5406 fputs ("<>", file
); break;
5408 fputs ("<", file
); break;
5410 fputs ("<=", file
); break;
5412 fputs ("<<=", file
); break;
5414 fputs ("<<", file
); break;
5416 fputs (">", file
); break;
5418 fputs (">=", file
); break;
5420 fputs (">>=", file
); break;
5422 fputs (">>", file
); break;
5427 case 'B': /* Condition, (B)oth swapped and negate. */
5428 switch (GET_CODE (x
))
5431 fputs ("<>", file
); break;
5433 fputs ("=", file
); break;
5435 fputs (">=", file
); break;
5437 fputs (">", file
); break;
5439 fputs (">>", file
); break;
5441 fputs (">>=", file
); break;
5443 fputs ("<=", file
); break;
5445 fputs ("<", file
); break;
5447 fputs ("<<", file
); break;
5449 fputs ("<<=", file
); break;
5455 gcc_assert (GET_CODE (x
) == CONST_INT
);
5456 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5459 gcc_assert (GET_CODE (x
) == CONST_INT
);
5460 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5463 gcc_assert (GET_CODE (x
) == CONST_INT
);
5464 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5467 gcc_assert (GET_CODE (x
) == CONST_INT
5468 && (INTVAL (x
) == 1 || INTVAL (x
) == 2 || INTVAL (x
) == 3));
5469 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
5472 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5473 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5476 gcc_assert (GET_CODE (x
) == CONST_INT
);
5477 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5480 gcc_assert (GET_CODE (x
) == CONST_INT
);
5481 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5484 if (GET_CODE (x
) == CONST_INT
)
5489 switch (GET_CODE (XEXP (x
, 0)))
5493 if (ASSEMBLER_DIALECT
== 0)
5494 fputs ("s,mb", file
);
5496 fputs (",mb", file
);
5500 if (ASSEMBLER_DIALECT
== 0)
5501 fputs ("s,ma", file
);
5503 fputs (",ma", file
);
5506 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5507 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5509 if (ASSEMBLER_DIALECT
== 0)
5512 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5513 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5515 if (ASSEMBLER_DIALECT
== 0)
5516 fputs ("x,s", file
);
5520 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5524 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5530 pa_output_global_address (file
, x
, 0);
5533 pa_output_global_address (file
, x
, 1);
5535 case 0: /* Don't do anything special */
5540 compute_zdepwi_operands (INTVAL (x
), op
);
5541 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5547 compute_zdepdi_operands (INTVAL (x
), op
);
5548 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5552 /* We can get here from a .vtable_inherit due to our
5553 CONSTANT_ADDRESS_P rejecting perfectly good constant
5559 if (GET_CODE (x
) == REG
)
5561 fputs (reg_names
[REGNO (x
)], file
);
5562 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5568 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5569 && (REGNO (x
) & 1) == 0)
5572 else if (GET_CODE (x
) == MEM
)
5574 int size
= GET_MODE_SIZE (GET_MODE (x
));
5575 rtx base
= NULL_RTX
;
5576 switch (GET_CODE (XEXP (x
, 0)))
5580 base
= XEXP (XEXP (x
, 0), 0);
5581 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5585 base
= XEXP (XEXP (x
, 0), 0);
5586 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5589 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5590 fprintf (file
, "%s(%s)",
5591 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5592 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5593 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5594 fprintf (file
, "%s(%s)",
5595 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5596 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5597 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5598 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5600 /* Because the REG_POINTER flag can get lost during reload,
5601 pa_legitimate_address_p canonicalizes the order of the
5602 index and base registers in the combined move patterns. */
5603 rtx base
= XEXP (XEXP (x
, 0), 1);
5604 rtx index
= XEXP (XEXP (x
, 0), 0);
5606 fprintf (file
, "%s(%s)",
5607 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5610 output_address (GET_MODE (x
), XEXP (x
, 0));
5613 output_address (GET_MODE (x
), XEXP (x
, 0));
5618 output_addr_const (file
, x
);
5621 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5624 pa_output_global_address (FILE *file
, rtx x
, int round_constant
)
5627 /* Imagine (high (const (plus ...))). */
5628 if (GET_CODE (x
) == HIGH
)
5631 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5632 output_addr_const (file
, x
);
5633 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5635 output_addr_const (file
, x
);
5636 fputs ("-$global$", file
);
5638 else if (GET_CODE (x
) == CONST
)
5640 const char *sep
= "";
5641 int offset
= 0; /* assembler wants -$global$ at end */
5642 rtx base
= NULL_RTX
;
5644 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5648 base
= XEXP (XEXP (x
, 0), 0);
5649 output_addr_const (file
, base
);
5652 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5658 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5662 base
= XEXP (XEXP (x
, 0), 1);
5663 output_addr_const (file
, base
);
5666 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5672 /* How bogus. The compiler is apparently responsible for
5673 rounding the constant if it uses an LR field selector.
5675 The linker and/or assembler seem a better place since
5676 they have to do this kind of thing already.
5678 If we fail to do this, HP's optimizing linker may eliminate
5679 an addil, but not update the ldw/stw/ldo instruction that
5680 uses the result of the addil. */
5682 offset
= ((offset
+ 0x1000) & ~0x1fff);
5684 switch (GET_CODE (XEXP (x
, 0)))
5697 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5705 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5706 fputs ("-$global$", file
);
5708 fprintf (file
, "%s%d", sep
, offset
);
5711 output_addr_const (file
, x
);
5714 /* Output boilerplate text to appear at the beginning of the file.
5715 There are several possible versions. */
5716 #define aputs(x) fputs(x, asm_out_file)
5718 pa_file_start_level (void)
5721 aputs ("\t.LEVEL 2.0w\n");
5722 else if (TARGET_PA_20
)
5723 aputs ("\t.LEVEL 2.0\n");
5724 else if (TARGET_PA_11
)
5725 aputs ("\t.LEVEL 1.1\n");
5727 aputs ("\t.LEVEL 1.0\n");
5731 pa_file_start_space (int sortspace
)
5733 aputs ("\t.SPACE $PRIVATE$");
5736 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5738 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5739 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5740 "\n\t.SPACE $TEXT$");
5743 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5744 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5748 pa_file_start_file (int want_version
)
5750 if (write_symbols
!= NO_DEBUG
)
5752 output_file_directive (asm_out_file
, main_input_filename
);
5754 aputs ("\t.version\t\"01.01\"\n");
5759 pa_file_start_mcount (const char *aswhat
)
5762 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5766 pa_elf_file_start (void)
5768 pa_file_start_level ();
5769 pa_file_start_mcount ("ENTRY");
5770 pa_file_start_file (0);
5774 pa_som_file_start (void)
5776 pa_file_start_level ();
5777 pa_file_start_space (0);
5778 aputs ("\t.IMPORT $global$,DATA\n"
5779 "\t.IMPORT $$dyncall,MILLICODE\n");
5780 pa_file_start_mcount ("CODE");
5781 pa_file_start_file (0);
5785 pa_linux_file_start (void)
5787 pa_file_start_file (1);
5788 pa_file_start_level ();
5789 pa_file_start_mcount ("CODE");
5793 pa_hpux64_gas_file_start (void)
5795 pa_file_start_level ();
5796 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5798 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5800 pa_file_start_file (1);
5804 pa_hpux64_hpas_file_start (void)
5806 pa_file_start_level ();
5807 pa_file_start_space (1);
5808 pa_file_start_mcount ("CODE");
5809 pa_file_start_file (0);
5813 /* Search the deferred plabel list for SYMBOL and return its internal
5814 label. If an entry for SYMBOL is not found, a new entry is created. */
5817 pa_get_deferred_plabel (rtx symbol
)
5819 const char *fname
= XSTR (symbol
, 0);
5822 /* See if we have already put this function on the list of deferred
5823 plabels. This list is generally small, so a liner search is not
5824 too ugly. If it proves too slow replace it with something faster. */
5825 for (i
= 0; i
< n_deferred_plabels
; i
++)
5826 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5829 /* If the deferred plabel list is empty, or this entry was not found
5830 on the list, create a new entry on the list. */
5831 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5835 if (deferred_plabels
== 0)
5836 deferred_plabels
= ggc_alloc
<deferred_plabel
> ();
5838 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5840 n_deferred_plabels
+ 1);
5842 i
= n_deferred_plabels
++;
5843 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5844 deferred_plabels
[i
].symbol
= symbol
;
5846 /* Gross. We have just implicitly taken the address of this
5847 function. Mark it in the same manner as assemble_name. */
5848 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5850 mark_referenced (id
);
5853 return deferred_plabels
[i
].internal_label
;
5857 output_deferred_plabels (void)
5861 /* If we have some deferred plabels, then we need to switch into the
5862 data or readonly data section, and align it to a 4 byte boundary
5863 before outputting the deferred plabels. */
5864 if (n_deferred_plabels
)
5866 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5867 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5870 /* Now output the deferred plabels. */
5871 for (i
= 0; i
< n_deferred_plabels
; i
++)
5873 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5874 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5875 assemble_integer (deferred_plabels
[i
].symbol
,
5876 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5880 /* Initialize optabs to point to emulation routines. */
5883 pa_init_libfuncs (void)
5885 if (HPUX_LONG_DOUBLE_LIBRARY
)
5887 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5888 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5889 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5890 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5891 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5892 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5893 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5894 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5895 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5897 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5898 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5899 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5900 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5901 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5902 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5903 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5905 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5906 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5907 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5908 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5910 set_conv_libfunc (sfix_optab
, SImode
, TFmode
,
5911 TARGET_64BIT
? "__U_Qfcnvfxt_quad_to_sgl"
5912 : "_U_Qfcnvfxt_quad_to_sgl");
5913 set_conv_libfunc (sfix_optab
, DImode
, TFmode
,
5914 "_U_Qfcnvfxt_quad_to_dbl");
5915 set_conv_libfunc (ufix_optab
, SImode
, TFmode
,
5916 "_U_Qfcnvfxt_quad_to_usgl");
5917 set_conv_libfunc (ufix_optab
, DImode
, TFmode
,
5918 "_U_Qfcnvfxt_quad_to_udbl");
5920 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
,
5921 "_U_Qfcnvxf_sgl_to_quad");
5922 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
,
5923 "_U_Qfcnvxf_dbl_to_quad");
5924 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
,
5925 "_U_Qfcnvxf_usgl_to_quad");
5926 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
,
5927 "_U_Qfcnvxf_udbl_to_quad");
5930 if (TARGET_SYNC_LIBCALL
)
5931 init_sync_libfuncs (8);
5934 /* HP's millicode routines mean something special to the assembler.
5935 Keep track of which ones we have used. */
5937 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5938 static void import_milli (enum millicodes
);
5939 static char imported
[(int) end1000
];
5940 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5941 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5942 #define MILLI_START 10
5945 import_milli (enum millicodes code
)
5947 char str
[sizeof (import_string
)];
5949 if (!imported
[(int) code
])
5951 imported
[(int) code
] = 1;
5952 strcpy (str
, import_string
);
5953 memcpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5954 output_asm_insn (str
, 0);
5958 /* The register constraints have put the operands and return value in
5959 the proper registers. */
5962 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx_insn
*insn
)
5964 import_milli (mulI
);
5965 return pa_output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5968 /* Emit the rtl for doing a division by a constant. */
5970 /* Do magic division millicodes exist for this value? */
5971 const int pa_magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5973 /* We'll use an array to keep track of the magic millicodes and
5974 whether or not we've used them already. [n][0] is signed, [n][1] is
5977 static int div_milli
[16][2];
5980 pa_emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5982 if (GET_CODE (operands
[2]) == CONST_INT
5983 && INTVAL (operands
[2]) > 0
5984 && INTVAL (operands
[2]) < 16
5985 && pa_magic_milli
[INTVAL (operands
[2])])
5987 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5989 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5993 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode
, 29),
5994 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5996 gen_rtx_REG (SImode
, 26),
5998 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5999 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
6000 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
6001 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
6002 gen_rtx_CLOBBER (VOIDmode
, ret
))));
6003 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
6010 pa_output_div_insn (rtx
*operands
, int unsignedp
, rtx_insn
*insn
)
6014 /* If the divisor is a constant, try to use one of the special
6016 if (GET_CODE (operands
[0]) == CONST_INT
)
6018 static char buf
[100];
6019 divisor
= INTVAL (operands
[0]);
6020 if (!div_milli
[divisor
][unsignedp
])
6022 div_milli
[divisor
][unsignedp
] = 1;
6024 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
6026 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
6030 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
6031 INTVAL (operands
[0]));
6032 return pa_output_millicode_call (insn
,
6033 gen_rtx_SYMBOL_REF (SImode
, buf
));
6037 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
6038 INTVAL (operands
[0]));
6039 return pa_output_millicode_call (insn
,
6040 gen_rtx_SYMBOL_REF (SImode
, buf
));
6043 /* Divisor isn't a special constant. */
6048 import_milli (divU
);
6049 return pa_output_millicode_call (insn
,
6050 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
6054 import_milli (divI
);
6055 return pa_output_millicode_call (insn
,
6056 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
6061 /* Output a $$rem millicode to do mod. */
6064 pa_output_mod_insn (int unsignedp
, rtx_insn
*insn
)
6068 import_milli (remU
);
6069 return pa_output_millicode_call (insn
,
6070 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
6074 import_milli (remI
);
6075 return pa_output_millicode_call (insn
,
6076 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
6081 pa_output_arg_descriptor (rtx_insn
*call_insn
)
6083 const char *arg_regs
[4];
6084 machine_mode arg_mode
;
6086 int i
, output_flag
= 0;
6089 /* We neither need nor want argument location descriptors for the
6090 64bit runtime environment or the ELF32 environment. */
6091 if (TARGET_64BIT
|| TARGET_ELF32
)
6094 for (i
= 0; i
< 4; i
++)
6097 /* Specify explicitly that no argument relocations should take place
6098 if using the portable runtime calling conventions. */
6099 if (TARGET_PORTABLE_RUNTIME
)
6101 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
6106 gcc_assert (CALL_P (call_insn
));
6107 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
6108 link
; link
= XEXP (link
, 1))
6110 rtx use
= XEXP (link
, 0);
6112 if (! (GET_CODE (use
) == USE
6113 && GET_CODE (XEXP (use
, 0)) == REG
6114 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
6117 arg_mode
= GET_MODE (XEXP (use
, 0));
6118 regno
= REGNO (XEXP (use
, 0));
6119 if (regno
>= 23 && regno
<= 26)
6121 arg_regs
[26 - regno
] = "GR";
6122 if (arg_mode
== DImode
)
6123 arg_regs
[25 - regno
] = "GR";
6125 else if (regno
>= 32 && regno
<= 39)
6127 if (arg_mode
== SFmode
)
6128 arg_regs
[(regno
- 32) / 2] = "FR";
6131 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6132 arg_regs
[(regno
- 34) / 2] = "FR";
6133 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
6135 arg_regs
[(regno
- 34) / 2] = "FU";
6136 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
6141 fputs ("\t.CALL ", asm_out_file
);
6142 for (i
= 0; i
< 4; i
++)
6147 fputc (',', asm_out_file
);
6148 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
6151 fputc ('\n', asm_out_file
);
6154 /* Inform reload about cases where moving X with a mode MODE to or from
6155 a register in RCLASS requires an extra scratch or immediate register.
6156 Return the class needed for the immediate register. */
6159 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
6160 machine_mode mode
, secondary_reload_info
*sri
)
6163 enum reg_class rclass
= (enum reg_class
) rclass_i
;
6165 /* Handle the easy stuff first. */
6166 if (rclass
== R1_REGS
)
6172 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
6178 /* If we have something like (mem (mem (...)), we can safely assume the
6179 inner MEM will end up in a general register after reloading, so there's
6180 no need for a secondary reload. */
6181 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
6184 /* Trying to load a constant into a FP register during PIC code
6185 generation requires %r1 as a scratch register. For float modes,
6186 the only legitimate constant is CONST0_RTX. However, there are
6187 a few patterns that accept constant double operands. */
6189 && FP_REG_CLASS_P (rclass
)
6190 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
6195 sri
->icode
= CODE_FOR_reload_insi_r1
;
6199 sri
->icode
= CODE_FOR_reload_indi_r1
;
6203 sri
->icode
= CODE_FOR_reload_insf_r1
;
6207 sri
->icode
= CODE_FOR_reload_indf_r1
;
6216 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6217 register when we're generating PIC code or when the operand isn't
6219 if (pa_symbolic_expression_p (x
))
6221 if (GET_CODE (x
) == HIGH
)
6224 if (flag_pic
|| !read_only_operand (x
, VOIDmode
))
6229 sri
->icode
= CODE_FOR_reload_insi_r1
;
6233 sri
->icode
= CODE_FOR_reload_indi_r1
;
6243 /* Profiling showed the PA port spends about 1.3% of its compilation
6244 time in true_regnum from calls inside pa_secondary_reload_class. */
6245 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
6246 regno
= true_regnum (x
);
6248 /* Handle reloads for floating point loads and stores. */
6249 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
6250 && FP_REG_CLASS_P (rclass
))
6256 /* We don't need a secondary reload for indexed memory addresses.
6258 When INT14_OK_STRICT is true, it might appear that we could
6259 directly allow register indirect memory addresses. However,
6260 this doesn't work because we don't support SUBREGs in
6261 floating-point register copies and reload doesn't tell us
6262 when it's going to use a SUBREG. */
6263 if (IS_INDEX_ADDR_P (x
))
6267 /* Request a secondary reload with a general scratch register
6268 for everything else. ??? Could symbolic operands be handled
6269 directly when generating non-pic PA 2.0 code? */
6271 ? direct_optab_handler (reload_in_optab
, mode
)
6272 : direct_optab_handler (reload_out_optab
, mode
));
6276 /* A SAR<->FP register copy requires an intermediate general register
6277 and secondary memory. We need a secondary reload with a general
6278 scratch register for spills. */
6279 if (rclass
== SHIFT_REGS
)
6282 if (regno
>= FIRST_PSEUDO_REGISTER
|| regno
< 0)
6285 ? direct_optab_handler (reload_in_optab
, mode
)
6286 : direct_optab_handler (reload_out_optab
, mode
));
6290 /* Handle FP copy. */
6291 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno
)))
6292 return GENERAL_REGS
;
6295 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
6296 && REGNO_REG_CLASS (regno
) == SHIFT_REGS
6297 && FP_REG_CLASS_P (rclass
))
6298 return GENERAL_REGS
;
6303 /* Implement TARGET_SECONDARY_MEMORY_NEEDED. */
6306 pa_secondary_memory_needed (machine_mode mode ATTRIBUTE_UNUSED
,
6307 reg_class_t class1 ATTRIBUTE_UNUSED
,
6308 reg_class_t class2 ATTRIBUTE_UNUSED
)
6310 #ifdef PA_SECONDARY_MEMORY_NEEDED
6311 return PA_SECONDARY_MEMORY_NEEDED (mode
, class1
, class2
);
6317 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6318 is only marked as live on entry by df-scan when it is a fixed
6319 register. It isn't a fixed register in the 64-bit runtime,
6320 so we need to mark it here. */
6323 pa_extra_live_on_entry (bitmap regs
)
6326 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
6329 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6330 to prevent it from being deleted. */
6333 pa_eh_return_handler_rtx (void)
6337 tmp
= gen_rtx_PLUS (word_mode
, hard_frame_pointer_rtx
,
6338 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
6339 tmp
= gen_rtx_MEM (word_mode
, tmp
);
6344 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6345 by invisible reference. As a GCC extension, we also pass anything
6346 with a zero or variable size by reference.
6348 The 64-bit runtime does not describe passing any types by invisible
6349 reference. The internals of GCC can't currently handle passing
6350 empty structures, and zero or variable length arrays when they are
6351 not passed entirely on the stack or by reference. Thus, as a GCC
6352 extension, we pass these types by reference. The HP compiler doesn't
6353 support these types, so hopefully there shouldn't be any compatibility
6354 issues. This may have to be revisited when HP releases a C99 compiler
6355 or updates the ABI. */
6358 pa_pass_by_reference (cumulative_args_t
, const function_arg_info
&arg
)
6360 HOST_WIDE_INT size
= arg
.type_size_in_bytes ();
6364 return size
<= 0 || size
> 8;
6367 /* Implement TARGET_FUNCTION_ARG_PADDING. */
6369 static pad_direction
6370 pa_function_arg_padding (machine_mode mode
, const_tree type
)
6375 && (AGGREGATE_TYPE_P (type
)
6376 || TREE_CODE (type
) == COMPLEX_TYPE
6377 || TREE_CODE (type
) == VECTOR_TYPE
)))
6379 /* Return PAD_NONE if justification is not required. */
6381 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
6382 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
6385 /* The directions set here are ignored when a BLKmode argument larger
6386 than a word is placed in a register. Different code is used for
6387 the stack and registers. This makes it difficult to have a
6388 consistent data representation for both the stack and registers.
6389 For both runtimes, the justification and padding for arguments on
6390 the stack and in registers should be identical. */
6392 /* The 64-bit runtime specifies left justification for aggregates. */
6395 /* The 32-bit runtime architecture specifies right justification.
6396 When the argument is passed on the stack, the argument is padded
6397 with garbage on the left. The HP compiler pads with zeros. */
6398 return PAD_DOWNWARD
;
6401 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
6402 return PAD_DOWNWARD
;
6408 /* Do what is necessary for `va_start'. We look at the current function
6409 to determine if stdargs or varargs is used and fill in an initial
6410 va_list. A pointer to this constructor is returned. */
6413 hppa_builtin_saveregs (void)
6416 tree fntype
= TREE_TYPE (current_function_decl
);
6417 int argadj
= ((!stdarg_p (fntype
))
6418 ? UNITS_PER_WORD
: 0);
6421 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
6423 offset
= crtl
->args
.arg_offset_rtx
;
6429 /* Adjust for varargs/stdarg differences. */
6431 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, -argadj
);
6433 offset
= crtl
->args
.arg_offset_rtx
;
6435 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6436 from the incoming arg pointer and growing to larger addresses. */
6437 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
6438 emit_move_insn (gen_rtx_MEM (word_mode
,
6439 plus_constant (Pmode
,
6440 arg_pointer_rtx
, off
)),
6441 gen_rtx_REG (word_mode
, i
));
6443 /* The incoming args pointer points just beyond the flushback area;
6444 normally this is not a serious concern. However, when we are doing
6445 varargs/stdargs we want to make the arg pointer point to the start
6446 of the incoming argument area. */
6447 emit_move_insn (virtual_incoming_args_rtx
,
6448 plus_constant (Pmode
, arg_pointer_rtx
, -64));
6450 /* Now return a pointer to the first anonymous argument. */
6451 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6452 virtual_incoming_args_rtx
,
6453 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6456 /* Store general registers on the stack. */
6457 dest
= gen_rtx_MEM (BLKmode
,
6458 plus_constant (Pmode
, crtl
->args
.internal_arg_pointer
,
6460 set_mem_alias_set (dest
, get_varargs_alias_set ());
6461 set_mem_align (dest
, BITS_PER_WORD
);
6462 move_block_from_reg (23, dest
, 4);
6464 /* move_block_from_reg will emit code to store the argument registers
6465 individually as scalar stores.
6467 However, other insns may later load from the same addresses for
6468 a structure load (passing a struct to a varargs routine).
6470 The alias code assumes that such aliasing can never happen, so we
6471 have to keep memory referencing insns from moving up beyond the
6472 last argument register store. So we emit a blockage insn here. */
6473 emit_insn (gen_blockage ());
6475 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6476 crtl
->args
.internal_arg_pointer
,
6477 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6481 hppa_va_start (tree valist
, rtx nextarg
)
6483 nextarg
= expand_builtin_saveregs ();
6484 std_expand_builtin_va_start (valist
, nextarg
);
6488 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6493 /* Args grow upward. We can use the generic routines. */
6494 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6496 else /* !TARGET_64BIT */
6498 tree ptr
= build_pointer_type (type
);
6501 unsigned int size
, ofs
;
6504 indirect
= pass_va_arg_by_reference (type
);
6508 ptr
= build_pointer_type (type
);
6510 size
= int_size_in_bytes (type
);
6511 valist_type
= TREE_TYPE (valist
);
6513 /* Args grow down. Not handled by generic routines. */
6515 u
= fold_convert (sizetype
, size_in_bytes (type
));
6516 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6517 t
= fold_build_pointer_plus (valist
, u
);
6519 /* Align to 4 or 8 byte boundary depending on argument size. */
6521 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6522 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6523 t
= fold_convert (valist_type
, t
);
6525 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6527 ofs
= (8 - size
) % 4;
6529 t
= fold_build_pointer_plus_hwi (t
, ofs
);
6531 t
= fold_convert (ptr
, t
);
6532 t
= build_va_arg_indirect_ref (t
);
6535 t
= build_va_arg_indirect_ref (t
);
6541 /* True if MODE is valid for the target. By "valid", we mean able to
6542 be manipulated in non-trivial ways. In particular, this means all
6543 the arithmetic is supported.
6545 Currently, TImode is not valid as the HP 64-bit runtime documentation
6546 doesn't document the alignment and calling conventions for this type.
6547 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6548 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6551 pa_scalar_mode_supported_p (scalar_mode mode
)
6553 int precision
= GET_MODE_PRECISION (mode
);
6555 switch (GET_MODE_CLASS (mode
))
6557 case MODE_PARTIAL_INT
:
6559 if (precision
== CHAR_TYPE_SIZE
)
6561 if (precision
== SHORT_TYPE_SIZE
)
6563 if (precision
== INT_TYPE_SIZE
)
6565 if (precision
== LONG_TYPE_SIZE
)
6567 if (precision
== LONG_LONG_TYPE_SIZE
)
6572 if (precision
== FLOAT_TYPE_SIZE
)
6574 if (precision
== DOUBLE_TYPE_SIZE
)
6576 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6580 case MODE_DECIMAL_FLOAT
:
6588 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6589 it branches into the delay slot. Otherwise, return FALSE. */
6592 branch_to_delay_slot_p (rtx_insn
*insn
)
6594 rtx_insn
*jump_insn
;
6596 if (dbr_sequence_length ())
6599 jump_insn
= next_active_insn (JUMP_LABEL_AS_INSN (insn
));
6602 insn
= next_active_insn (insn
);
6603 if (jump_insn
== insn
)
6606 /* We can't rely on the length of asms. So, we return FALSE when
6607 the branch is followed by an asm. */
6609 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6610 || asm_noperands (PATTERN (insn
)) >= 0
6611 || get_attr_length (insn
) > 0)
6618 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6620 This occurs when INSN has an unfilled delay slot and is followed
6621 by an asm. Disaster can occur if the asm is empty and the jump
6622 branches into the delay slot. So, we add a nop in the delay slot
6623 when this occurs. */
6626 branch_needs_nop_p (rtx_insn
*insn
)
6628 rtx_insn
*jump_insn
;
6630 if (dbr_sequence_length ())
6633 jump_insn
= next_active_insn (JUMP_LABEL_AS_INSN (insn
));
6636 insn
= next_active_insn (insn
);
6637 if (!insn
|| jump_insn
== insn
)
6640 if (!(GET_CODE (PATTERN (insn
)) == ASM_INPUT
6641 || asm_noperands (PATTERN (insn
)) >= 0)
6642 && get_attr_length (insn
) > 0)
6649 /* Return TRUE if INSN, a forward jump insn, can use nullification
6650 to skip the following instruction. This avoids an extra cycle due
6651 to a mis-predicted branch when we fall through. */
6654 use_skip_p (rtx_insn
*insn
)
6656 rtx_insn
*jump_insn
= next_active_insn (JUMP_LABEL_AS_INSN (insn
));
6660 insn
= next_active_insn (insn
);
6662 /* We can't rely on the length of asms, so we can't skip asms. */
6664 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6665 || asm_noperands (PATTERN (insn
)) >= 0)
6667 if (get_attr_length (insn
) == 4
6668 && jump_insn
== next_active_insn (insn
))
6670 if (get_attr_length (insn
) > 0)
6677 /* This routine handles all the normal conditional branch sequences we
6678 might need to generate. It handles compare immediate vs compare
6679 register, nullification of delay slots, varying length branches,
6680 negated branches, and all combinations of the above. It returns the
6681 output appropriate to emit the branch corresponding to all given
6685 pa_output_cbranch (rtx
*operands
, int negated
, rtx_insn
*insn
)
6687 static char buf
[100];
6689 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6690 int length
= get_attr_length (insn
);
6693 /* A conditional branch to the following instruction (e.g. the delay slot)
6694 is asking for a disaster. This can happen when not optimizing and
6695 when jump optimization fails.
6697 While it is usually safe to emit nothing, this can fail if the
6698 preceding instruction is a nullified branch with an empty delay
6699 slot and the same branch target as this branch. We could check
6700 for this but jump optimization should eliminate nop jumps. It
6701 is always safe to emit a nop. */
6702 if (branch_to_delay_slot_p (insn
))
6705 /* The doubleword form of the cmpib instruction doesn't have the LEU
6706 and GTU conditions while the cmpb instruction does. Since we accept
6707 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6708 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6709 operands
[2] = gen_rtx_REG (DImode
, 0);
6710 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6711 operands
[1] = gen_rtx_REG (DImode
, 0);
6713 /* If this is a long branch with its delay slot unfilled, set `nullify'
6714 as it can nullify the delay slot and save a nop. */
6715 if (length
== 8 && dbr_sequence_length () == 0)
6718 /* If this is a short forward conditional branch which did not get
6719 its delay slot filled, the delay slot can still be nullified. */
6720 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6721 nullify
= forward_branch_p (insn
);
6723 /* A forward branch over a single nullified insn can be done with a
6724 comclr instruction. This avoids a single cycle penalty due to
6725 mis-predicted branch if we fall through (branch not taken). */
6726 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6730 /* All short conditional branches except backwards with an unfilled
6734 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6736 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6737 if (GET_MODE (operands
[1]) == DImode
)
6740 strcat (buf
, "%B3");
6742 strcat (buf
, "%S3");
6744 strcat (buf
, " %2,%r1,%%r0");
6747 if (branch_needs_nop_p (insn
))
6748 strcat (buf
, ",n %2,%r1,%0%#");
6750 strcat (buf
, ",n %2,%r1,%0");
6753 strcat (buf
, " %2,%r1,%0");
6756 /* All long conditionals. Note a short backward branch with an
6757 unfilled delay slot is treated just like a long backward branch
6758 with an unfilled delay slot. */
6760 /* Handle weird backwards branch with a filled delay slot
6761 which is nullified. */
6762 if (dbr_sequence_length () != 0
6763 && ! forward_branch_p (insn
)
6766 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6767 if (GET_MODE (operands
[1]) == DImode
)
6770 strcat (buf
, "%S3");
6772 strcat (buf
, "%B3");
6773 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6775 /* Handle short backwards branch with an unfilled delay slot.
6776 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6777 taken and untaken branches. */
6778 else if (dbr_sequence_length () == 0
6779 && ! forward_branch_p (insn
)
6780 && INSN_ADDRESSES_SET_P ()
6781 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6782 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6784 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6785 if (GET_MODE (operands
[1]) == DImode
)
6788 strcat (buf
, "%B3 %2,%r1,%0%#");
6790 strcat (buf
, "%S3 %2,%r1,%0%#");
6794 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6795 if (GET_MODE (operands
[1]) == DImode
)
6798 strcat (buf
, "%S3");
6800 strcat (buf
, "%B3");
6802 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6804 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6809 /* The reversed conditional branch must branch over one additional
6810 instruction if the delay slot is filled and needs to be extracted
6811 by pa_output_lbranch. If the delay slot is empty or this is a
6812 nullified forward branch, the instruction after the reversed
6813 condition branch must be nullified. */
6814 if (dbr_sequence_length () == 0
6815 || (nullify
&& forward_branch_p (insn
)))
6819 operands
[4] = GEN_INT (length
);
6824 operands
[4] = GEN_INT (length
+ 4);
6827 /* Create a reversed conditional branch which branches around
6828 the following insns. */
6829 if (GET_MODE (operands
[1]) != DImode
)
6835 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6838 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6844 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6847 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6856 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6859 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6865 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6868 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6872 output_asm_insn (buf
, operands
);
6873 return pa_output_lbranch (operands
[0], insn
, xdelay
);
6878 /* Output a PIC pc-relative instruction sequence to load the address of
6879 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6880 or a code label. OPERANDS[1] specifies the register to use to load
6881 the program counter. OPERANDS[3] may be used for label generation
6882 The sequence is always three instructions in length. The program
6883 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6884 Register %r1 is clobbered. */
6887 pa_output_pic_pcrel_sequence (rtx
*operands
)
6889 gcc_assert (SYMBOL_REF_P (operands
[0]) || LABEL_P (operands
[0]));
6892 /* We can use mfia to determine the current program counter. */
6893 if (TARGET_SOM
|| !TARGET_GAS
)
6895 operands
[3] = gen_label_rtx ();
6896 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6897 CODE_LABEL_NUMBER (operands
[3]));
6898 output_asm_insn ("mfia %1", operands
);
6899 output_asm_insn ("addil L'%0-%l3,%1", operands
);
6900 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands
);
6904 output_asm_insn ("mfia %1", operands
);
6905 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands
);
6906 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands
);
6911 /* We need to use a branch to determine the current program counter. */
6912 output_asm_insn ("{bl|b,l} .+8,%1", operands
);
6913 if (TARGET_SOM
|| !TARGET_GAS
)
6915 operands
[3] = gen_label_rtx ();
6916 output_asm_insn ("addil L'%0-%l3,%1", operands
);
6917 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6918 CODE_LABEL_NUMBER (operands
[3]));
6919 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands
);
6923 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands
);
6924 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands
);
6929 /* This routine handles output of long unconditional branches that
6930 exceed the maximum range of a simple branch instruction. Since
6931 we don't have a register available for the branch, we save register
6932 %r1 in the frame marker, load the branch destination DEST into %r1,
6933 execute the branch, and restore %r1 in the delay slot of the branch.
6935 Since long branches may have an insn in the delay slot and the
6936 delay slot is used to restore %r1, we in general need to extract
6937 this insn and execute it before the branch. However, to facilitate
6938 use of this function by conditional branches, we also provide an
6939 option to not extract the delay insn so that it will be emitted
6940 after the long branch. So, if there is an insn in the delay slot,
6941 it is extracted if XDELAY is nonzero.
6943 The lengths of the various long-branch sequences are 20, 16 and 24
6944 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6947 pa_output_lbranch (rtx dest
, rtx_insn
*insn
, int xdelay
)
6951 xoperands
[0] = dest
;
6953 /* First, free up the delay slot. */
6954 if (xdelay
&& dbr_sequence_length () != 0)
6956 /* We can't handle a jump in the delay slot. */
6957 gcc_assert (! JUMP_P (NEXT_INSN (insn
)));
6959 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6962 /* Now delete the delay insn. */
6963 SET_INSN_DELETED (NEXT_INSN (insn
));
6966 /* Output an insn to save %r1. The runtime documentation doesn't
6967 specify whether the "Clean Up" slot in the callers frame can
6968 be clobbered by the callee. It isn't copied by HP's builtin
6969 alloca, so this suggests that it can be clobbered if necessary.
6970 The "Static Link" location is copied by HP builtin alloca, so
6971 we avoid using it. Using the cleanup slot might be a problem
6972 if we have to interoperate with languages that pass cleanup
6973 information. However, it should be possible to handle these
6974 situations with GCC's asm feature.
6976 The "Current RP" slot is reserved for the called procedure, so
6977 we try to use it when we don't have a frame of our own. It's
6978 rather unlikely that we won't have a frame when we need to emit
6981 Really the way to go long term is a register scavenger; goto
6982 the target of the jump and find a register which we can use
6983 as a scratch to hold the value in %r1. Then, we wouldn't have
6984 to free up the delay slot or clobber a slot that may be needed
6985 for other purposes. */
6988 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6989 /* Use the return pointer slot in the frame marker. */
6990 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6992 /* Use the slot at -40 in the frame marker since HP builtin
6993 alloca doesn't copy it. */
6994 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6998 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6999 /* Use the return pointer slot in the frame marker. */
7000 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
7002 /* Use the "Clean Up" slot in the frame marker. In GCC,
7003 the only other use of this location is for copying a
7004 floating point double argument from a floating-point
7005 register to two general registers. The copy is done
7006 as an "atomic" operation when outputting a call, so it
7007 won't interfere with our using the location here. */
7008 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
7011 if (TARGET_PORTABLE_RUNTIME
)
7013 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7014 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7015 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7019 xoperands
[1] = gen_rtx_REG (Pmode
, 1);
7020 xoperands
[2] = xoperands
[1];
7021 pa_output_pic_pcrel_sequence (xoperands
);
7022 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7025 /* Now output a very long branch to the original target. */
7026 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
7028 /* Now restore the value of %r1 in the delay slot. */
7031 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
7032 return "ldd -16(%%r30),%%r1";
7034 return "ldd -40(%%r30),%%r1";
7038 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
7039 return "ldw -20(%%r30),%%r1";
7041 return "ldw -12(%%r30),%%r1";
7045 /* This routine handles all the branch-on-bit conditional branch sequences we
7046 might need to generate. It handles nullification of delay slots,
7047 varying length branches, negated branches and all combinations of the
7048 above. it returns the appropriate output template to emit the branch. */
7051 pa_output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx_insn
*insn
, int which
)
7053 static char buf
[100];
7055 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7056 int length
= get_attr_length (insn
);
7059 /* A conditional branch to the following instruction (e.g. the delay slot) is
7060 asking for a disaster. I do not think this can happen as this pattern
7061 is only used when optimizing; jump optimization should eliminate the
7062 jump. But be prepared just in case. */
7064 if (branch_to_delay_slot_p (insn
))
7067 /* If this is a long branch with its delay slot unfilled, set `nullify'
7068 as it can nullify the delay slot and save a nop. */
7069 if (length
== 8 && dbr_sequence_length () == 0)
7072 /* If this is a short forward conditional branch which did not get
7073 its delay slot filled, the delay slot can still be nullified. */
7074 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7075 nullify
= forward_branch_p (insn
);
7077 /* A forward branch over a single nullified insn can be done with a
7078 extrs instruction. This avoids a single cycle penalty due to
7079 mis-predicted branch if we fall through (branch not taken). */
7080 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
7085 /* All short conditional branches except backwards with an unfilled
7089 strcpy (buf
, "{extrs,|extrw,s,}");
7091 strcpy (buf
, "bb,");
7092 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
7093 strcpy (buf
, "extrd,s,*");
7094 else if (GET_MODE (operands
[0]) == DImode
)
7095 strcpy (buf
, "bb,*");
7096 if ((which
== 0 && negated
)
7097 || (which
== 1 && ! negated
))
7102 strcat (buf
, " %0,%1,1,%%r0");
7103 else if (nullify
&& negated
)
7105 if (branch_needs_nop_p (insn
))
7106 strcat (buf
, ",n %0,%1,%3%#");
7108 strcat (buf
, ",n %0,%1,%3");
7110 else if (nullify
&& ! negated
)
7112 if (branch_needs_nop_p (insn
))
7113 strcat (buf
, ",n %0,%1,%2%#");
7115 strcat (buf
, ",n %0,%1,%2");
7117 else if (! nullify
&& negated
)
7118 strcat (buf
, " %0,%1,%3");
7119 else if (! nullify
&& ! negated
)
7120 strcat (buf
, " %0,%1,%2");
7123 /* All long conditionals. Note a short backward branch with an
7124 unfilled delay slot is treated just like a long backward branch
7125 with an unfilled delay slot. */
7127 /* Handle weird backwards branch with a filled delay slot
7128 which is nullified. */
7129 if (dbr_sequence_length () != 0
7130 && ! forward_branch_p (insn
)
7133 strcpy (buf
, "bb,");
7134 if (GET_MODE (operands
[0]) == DImode
)
7136 if ((which
== 0 && negated
)
7137 || (which
== 1 && ! negated
))
7142 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
7144 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
7146 /* Handle short backwards branch with an unfilled delay slot.
7147 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7148 taken and untaken branches. */
7149 else if (dbr_sequence_length () == 0
7150 && ! forward_branch_p (insn
)
7151 && INSN_ADDRESSES_SET_P ()
7152 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7153 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7155 strcpy (buf
, "bb,");
7156 if (GET_MODE (operands
[0]) == DImode
)
7158 if ((which
== 0 && negated
)
7159 || (which
== 1 && ! negated
))
7164 strcat (buf
, " %0,%1,%3%#");
7166 strcat (buf
, " %0,%1,%2%#");
7170 if (GET_MODE (operands
[0]) == DImode
)
7171 strcpy (buf
, "extrd,s,*");
7173 strcpy (buf
, "{extrs,|extrw,s,}");
7174 if ((which
== 0 && negated
)
7175 || (which
== 1 && ! negated
))
7179 if (nullify
&& negated
)
7180 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
7181 else if (nullify
&& ! negated
)
7182 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
7184 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
7186 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
7191 /* The reversed conditional branch must branch over one additional
7192 instruction if the delay slot is filled and needs to be extracted
7193 by pa_output_lbranch. If the delay slot is empty or this is a
7194 nullified forward branch, the instruction after the reversed
7195 condition branch must be nullified. */
7196 if (dbr_sequence_length () == 0
7197 || (nullify
&& forward_branch_p (insn
)))
7201 operands
[4] = GEN_INT (length
);
7206 operands
[4] = GEN_INT (length
+ 4);
7209 if (GET_MODE (operands
[0]) == DImode
)
7210 strcpy (buf
, "bb,*");
7212 strcpy (buf
, "bb,");
7213 if ((which
== 0 && negated
)
7214 || (which
== 1 && !negated
))
7219 strcat (buf
, ",n %0,%1,.+%4");
7221 strcat (buf
, " %0,%1,.+%4");
7222 output_asm_insn (buf
, operands
);
7223 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
7229 /* This routine handles all the branch-on-variable-bit conditional branch
7230 sequences we might need to generate. It handles nullification of delay
7231 slots, varying length branches, negated branches and all combinations
7232 of the above. it returns the appropriate output template to emit the
7236 pa_output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx_insn
*insn
,
7239 static char buf
[100];
7241 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7242 int length
= get_attr_length (insn
);
7245 /* A conditional branch to the following instruction (e.g. the delay slot) is
7246 asking for a disaster. I do not think this can happen as this pattern
7247 is only used when optimizing; jump optimization should eliminate the
7248 jump. But be prepared just in case. */
7250 if (branch_to_delay_slot_p (insn
))
7253 /* If this is a long branch with its delay slot unfilled, set `nullify'
7254 as it can nullify the delay slot and save a nop. */
7255 if (length
== 8 && dbr_sequence_length () == 0)
7258 /* If this is a short forward conditional branch which did not get
7259 its delay slot filled, the delay slot can still be nullified. */
7260 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7261 nullify
= forward_branch_p (insn
);
7263 /* A forward branch over a single nullified insn can be done with a
7264 extrs instruction. This avoids a single cycle penalty due to
7265 mis-predicted branch if we fall through (branch not taken). */
7266 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
7271 /* All short conditional branches except backwards with an unfilled
7275 strcpy (buf
, "{vextrs,|extrw,s,}");
7277 strcpy (buf
, "{bvb,|bb,}");
7278 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
7279 strcpy (buf
, "extrd,s,*");
7280 else if (GET_MODE (operands
[0]) == DImode
)
7281 strcpy (buf
, "bb,*");
7282 if ((which
== 0 && negated
)
7283 || (which
== 1 && ! negated
))
7288 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7289 else if (nullify
&& negated
)
7291 if (branch_needs_nop_p (insn
))
7292 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7294 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
7296 else if (nullify
&& ! negated
)
7298 if (branch_needs_nop_p (insn
))
7299 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7301 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
7303 else if (! nullify
&& negated
)
7304 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
7305 else if (! nullify
&& ! negated
)
7306 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
7309 /* All long conditionals. Note a short backward branch with an
7310 unfilled delay slot is treated just like a long backward branch
7311 with an unfilled delay slot. */
7313 /* Handle weird backwards branch with a filled delay slot
7314 which is nullified. */
7315 if (dbr_sequence_length () != 0
7316 && ! forward_branch_p (insn
)
7319 strcpy (buf
, "{bvb,|bb,}");
7320 if (GET_MODE (operands
[0]) == DImode
)
7322 if ((which
== 0 && negated
)
7323 || (which
== 1 && ! negated
))
7328 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7330 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7332 /* Handle short backwards branch with an unfilled delay slot.
7333 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7334 taken and untaken branches. */
7335 else if (dbr_sequence_length () == 0
7336 && ! forward_branch_p (insn
)
7337 && INSN_ADDRESSES_SET_P ()
7338 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7339 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7341 strcpy (buf
, "{bvb,|bb,}");
7342 if (GET_MODE (operands
[0]) == DImode
)
7344 if ((which
== 0 && negated
)
7345 || (which
== 1 && ! negated
))
7350 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
7352 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
7356 strcpy (buf
, "{vextrs,|extrw,s,}");
7357 if (GET_MODE (operands
[0]) == DImode
)
7358 strcpy (buf
, "extrd,s,*");
7359 if ((which
== 0 && negated
)
7360 || (which
== 1 && ! negated
))
7364 if (nullify
&& negated
)
7365 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7366 else if (nullify
&& ! negated
)
7367 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7369 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7371 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7376 /* The reversed conditional branch must branch over one additional
7377 instruction if the delay slot is filled and needs to be extracted
7378 by pa_output_lbranch. If the delay slot is empty or this is a
7379 nullified forward branch, the instruction after the reversed
7380 condition branch must be nullified. */
7381 if (dbr_sequence_length () == 0
7382 || (nullify
&& forward_branch_p (insn
)))
7386 operands
[4] = GEN_INT (length
);
7391 operands
[4] = GEN_INT (length
+ 4);
7394 if (GET_MODE (operands
[0]) == DImode
)
7395 strcpy (buf
, "bb,*");
7397 strcpy (buf
, "{bvb,|bb,}");
7398 if ((which
== 0 && negated
)
7399 || (which
== 1 && !negated
))
7404 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
7406 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
7407 output_asm_insn (buf
, operands
);
7408 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
7414 /* Return the output template for emitting a dbra type insn.
7416 Note it may perform some output operations on its own before
7417 returning the final output string. */
7419 pa_output_dbra (rtx
*operands
, rtx_insn
*insn
, int which_alternative
)
7421 int length
= get_attr_length (insn
);
7423 /* A conditional branch to the following instruction (e.g. the delay slot) is
7424 asking for a disaster. Be prepared! */
7426 if (branch_to_delay_slot_p (insn
))
7428 if (which_alternative
== 0)
7429 return "ldo %1(%0),%0";
7430 else if (which_alternative
== 1)
7432 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
7433 output_asm_insn ("ldw -16(%%r30),%4", operands
);
7434 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7435 return "{fldws|fldw} -16(%%r30),%0";
7439 output_asm_insn ("ldw %0,%4", operands
);
7440 return "ldo %1(%4),%4\n\tstw %4,%0";
7444 if (which_alternative
== 0)
7446 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7449 /* If this is a long branch with its delay slot unfilled, set `nullify'
7450 as it can nullify the delay slot and save a nop. */
7451 if (length
== 8 && dbr_sequence_length () == 0)
7454 /* If this is a short forward conditional branch which did not get
7455 its delay slot filled, the delay slot can still be nullified. */
7456 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7457 nullify
= forward_branch_p (insn
);
7464 if (branch_needs_nop_p (insn
))
7465 return "addib,%C2,n %1,%0,%3%#";
7467 return "addib,%C2,n %1,%0,%3";
7470 return "addib,%C2 %1,%0,%3";
7473 /* Handle weird backwards branch with a fulled delay slot
7474 which is nullified. */
7475 if (dbr_sequence_length () != 0
7476 && ! forward_branch_p (insn
)
7478 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7479 /* Handle short backwards branch with an unfilled delay slot.
7480 Using a addb;nop rather than addi;bl saves 1 cycle for both
7481 taken and untaken branches. */
7482 else if (dbr_sequence_length () == 0
7483 && ! forward_branch_p (insn
)
7484 && INSN_ADDRESSES_SET_P ()
7485 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7486 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7487 return "addib,%C2 %1,%0,%3%#";
7489 /* Handle normal cases. */
7491 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7493 return "addi,%N2 %1,%0,%0\n\tb %3";
7496 /* The reversed conditional branch must branch over one additional
7497 instruction if the delay slot is filled and needs to be extracted
7498 by pa_output_lbranch. If the delay slot is empty or this is a
7499 nullified forward branch, the instruction after the reversed
7500 condition branch must be nullified. */
7501 if (dbr_sequence_length () == 0
7502 || (nullify
&& forward_branch_p (insn
)))
7506 operands
[4] = GEN_INT (length
);
7511 operands
[4] = GEN_INT (length
+ 4);
7515 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
7517 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
7519 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7523 /* Deal with gross reload from FP register case. */
7524 else if (which_alternative
== 1)
7526 /* Move loop counter from FP register to MEM then into a GR,
7527 increment the GR, store the GR into MEM, and finally reload
7528 the FP register from MEM from within the branch's delay slot. */
7529 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7531 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7533 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7534 else if (length
== 28)
7535 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7538 operands
[5] = GEN_INT (length
- 16);
7539 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
7540 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7541 return pa_output_lbranch (operands
[3], insn
, 0);
7544 /* Deal with gross reload from memory case. */
7547 /* Reload loop counter from memory, the store back to memory
7548 happens in the branch's delay slot. */
7549 output_asm_insn ("ldw %0,%4", operands
);
7551 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7552 else if (length
== 16)
7553 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7556 operands
[5] = GEN_INT (length
- 4);
7557 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7558 return pa_output_lbranch (operands
[3], insn
, 0);
7563 /* Return the output template for emitting a movb type insn.
7565 Note it may perform some output operations on its own before
7566 returning the final output string. */
7568 pa_output_movb (rtx
*operands
, rtx_insn
*insn
, int which_alternative
,
7569 int reverse_comparison
)
7571 int length
= get_attr_length (insn
);
7573 /* A conditional branch to the following instruction (e.g. the delay slot) is
7574 asking for a disaster. Be prepared! */
7576 if (branch_to_delay_slot_p (insn
))
7578 if (which_alternative
== 0)
7579 return "copy %1,%0";
7580 else if (which_alternative
== 1)
7582 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7583 return "{fldws|fldw} -16(%%r30),%0";
7585 else if (which_alternative
== 2)
7591 /* Support the second variant. */
7592 if (reverse_comparison
)
7593 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7595 if (which_alternative
== 0)
7597 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7600 /* If this is a long branch with its delay slot unfilled, set `nullify'
7601 as it can nullify the delay slot and save a nop. */
7602 if (length
== 8 && dbr_sequence_length () == 0)
7605 /* If this is a short forward conditional branch which did not get
7606 its delay slot filled, the delay slot can still be nullified. */
7607 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7608 nullify
= forward_branch_p (insn
);
7615 if (branch_needs_nop_p (insn
))
7616 return "movb,%C2,n %1,%0,%3%#";
7618 return "movb,%C2,n %1,%0,%3";
7621 return "movb,%C2 %1,%0,%3";
7624 /* Handle weird backwards branch with a filled delay slot
7625 which is nullified. */
7626 if (dbr_sequence_length () != 0
7627 && ! forward_branch_p (insn
)
7629 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7631 /* Handle short backwards branch with an unfilled delay slot.
7632 Using a movb;nop rather than or;bl saves 1 cycle for both
7633 taken and untaken branches. */
7634 else if (dbr_sequence_length () == 0
7635 && ! forward_branch_p (insn
)
7636 && INSN_ADDRESSES_SET_P ()
7637 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7638 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7639 return "movb,%C2 %1,%0,%3%#";
7640 /* Handle normal cases. */
7642 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7644 return "or,%N2 %1,%%r0,%0\n\tb %3";
7647 /* The reversed conditional branch must branch over one additional
7648 instruction if the delay slot is filled and needs to be extracted
7649 by pa_output_lbranch. If the delay slot is empty or this is a
7650 nullified forward branch, the instruction after the reversed
7651 condition branch must be nullified. */
7652 if (dbr_sequence_length () == 0
7653 || (nullify
&& forward_branch_p (insn
)))
7657 operands
[4] = GEN_INT (length
);
7662 operands
[4] = GEN_INT (length
+ 4);
7666 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7668 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7670 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7673 /* Deal with gross reload for FP destination register case. */
7674 else if (which_alternative
== 1)
7676 /* Move source register to MEM, perform the branch test, then
7677 finally load the FP register from MEM from within the branch's
7679 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7681 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7682 else if (length
== 16)
7683 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7686 operands
[4] = GEN_INT (length
- 4);
7687 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7688 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7689 return pa_output_lbranch (operands
[3], insn
, 0);
7692 /* Deal with gross reload from memory case. */
7693 else if (which_alternative
== 2)
7695 /* Reload loop counter from memory, the store back to memory
7696 happens in the branch's delay slot. */
7698 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7699 else if (length
== 12)
7700 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7703 operands
[4] = GEN_INT (length
);
7704 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7706 return pa_output_lbranch (operands
[3], insn
, 0);
7709 /* Handle SAR as a destination. */
7713 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7714 else if (length
== 12)
7715 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7718 operands
[4] = GEN_INT (length
);
7719 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7721 return pa_output_lbranch (operands
[3], insn
, 0);
7726 /* Copy any FP arguments in INSN into integer registers. */
7728 copy_fp_args (rtx_insn
*insn
)
7733 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7735 int arg_mode
, regno
;
7736 rtx use
= XEXP (link
, 0);
7738 if (! (GET_CODE (use
) == USE
7739 && GET_CODE (XEXP (use
, 0)) == REG
7740 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7743 arg_mode
= GET_MODE (XEXP (use
, 0));
7744 regno
= REGNO (XEXP (use
, 0));
7746 /* Is it a floating point register? */
7747 if (regno
>= 32 && regno
<= 39)
7749 /* Copy the FP register into an integer register via memory. */
7750 if (arg_mode
== SFmode
)
7752 xoperands
[0] = XEXP (use
, 0);
7753 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7754 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7755 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7759 xoperands
[0] = XEXP (use
, 0);
7760 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7761 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7762 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7763 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7769 /* Compute length of the FP argument copy sequence for INSN. */
7771 length_fp_args (rtx_insn
*insn
)
7776 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7778 int arg_mode
, regno
;
7779 rtx use
= XEXP (link
, 0);
7781 if (! (GET_CODE (use
) == USE
7782 && GET_CODE (XEXP (use
, 0)) == REG
7783 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7786 arg_mode
= GET_MODE (XEXP (use
, 0));
7787 regno
= REGNO (XEXP (use
, 0));
7789 /* Is it a floating point register? */
7790 if (regno
>= 32 && regno
<= 39)
7792 if (arg_mode
== SFmode
)
7802 /* Return the attribute length for the millicode call instruction INSN.
7803 The length must match the code generated by pa_output_millicode_call.
7804 We include the delay slot in the returned length as it is better to
7805 over estimate the length than to under estimate it. */
7808 pa_attr_length_millicode_call (rtx_insn
*insn
)
7810 unsigned long distance
= -1;
7811 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7813 if (INSN_ADDRESSES_SET_P ())
7815 distance
= (total
+ insn_current_reference_address (insn
));
7816 if (distance
< total
)
7822 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7827 else if (TARGET_PORTABLE_RUNTIME
)
7831 if (!TARGET_LONG_CALLS
&& distance
< MAX_PCREL17F_OFFSET
)
7841 /* INSN is a function call.
7843 CALL_DEST is the routine we are calling. */
7846 pa_output_millicode_call (rtx_insn
*insn
, rtx call_dest
)
7848 int attr_length
= get_attr_length (insn
);
7849 int seq_length
= dbr_sequence_length ();
7852 xoperands
[0] = call_dest
;
7854 /* Handle the common case where we are sure that the branch will
7855 reach the beginning of the $CODE$ subspace. The within reach
7856 form of the $$sh_func_adrs call has a length of 28. Because it
7857 has an attribute type of sh_func_adrs, it never has a nonzero
7858 sequence length (i.e., the delay slot is never filled). */
7859 if (!TARGET_LONG_CALLS
7860 && (attr_length
== 8
7861 || (attr_length
== 28
7862 && get_attr_type (insn
) == TYPE_SH_FUNC_ADRS
)))
7864 xoperands
[1] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7865 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7871 /* It might seem that one insn could be saved by accessing
7872 the millicode function using the linkage table. However,
7873 this doesn't work in shared libraries and other dynamically
7874 loaded objects. Using a pc-relative sequence also avoids
7875 problems related to the implicit use of the gp register. */
7876 xoperands
[1] = gen_rtx_REG (Pmode
, 1);
7877 xoperands
[2] = xoperands
[1];
7878 pa_output_pic_pcrel_sequence (xoperands
);
7879 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7881 else if (TARGET_PORTABLE_RUNTIME
)
7883 /* Pure portable runtime doesn't allow be/ble; we also don't
7884 have PIC support in the assembler/linker, so this sequence
7887 /* Get the address of our target into %r1. */
7888 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7889 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7891 /* Get our return address into %r31. */
7892 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7893 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7895 /* Jump to our target address in %r1. */
7896 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7900 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7902 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7904 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7908 xoperands
[1] = gen_rtx_REG (Pmode
, 31);
7909 xoperands
[2] = gen_rtx_REG (Pmode
, 1);
7910 pa_output_pic_pcrel_sequence (xoperands
);
7912 /* Adjust return address. */
7913 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands
);
7915 /* Jump to our target address in %r1. */
7916 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7920 if (seq_length
== 0)
7921 output_asm_insn ("nop", xoperands
);
7926 /* Return the attribute length of the call instruction INSN. The SIBCALL
7927 flag indicates whether INSN is a regular call or a sibling call. The
7928 length returned must be longer than the code actually generated by
7929 pa_output_call. Since branch shortening is done before delay branch
7930 sequencing, there is no way to determine whether or not the delay
7931 slot will be filled during branch shortening. Even when the delay
7932 slot is filled, we may have to add a nop if the delay slot contains
7933 a branch that can't reach its target. Thus, we always have to include
7934 the delay slot in the length estimate. This used to be done in
7935 pa_adjust_insn_length but we do it here now as some sequences always
7936 fill the delay slot and we can save four bytes in the estimate for
7940 pa_attr_length_call (rtx_insn
*insn
, int sibcall
)
7943 rtx call
, call_dest
;
7946 rtx pat
= PATTERN (insn
);
7947 unsigned long distance
= -1;
7949 gcc_assert (CALL_P (insn
));
7951 if (INSN_ADDRESSES_SET_P ())
7953 unsigned long total
;
7955 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7956 distance
= (total
+ insn_current_reference_address (insn
));
7957 if (distance
< total
)
7961 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7963 /* Get the call rtx. */
7964 call
= XVECEXP (pat
, 0, 0);
7965 if (GET_CODE (call
) == SET
)
7966 call
= SET_SRC (call
);
7968 gcc_assert (GET_CODE (call
) == CALL
);
7970 /* Determine if this is a local call. */
7971 call_dest
= XEXP (XEXP (call
, 0), 0);
7972 call_decl
= SYMBOL_REF_DECL (call_dest
);
7973 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7975 /* pc-relative branch. */
7976 if (!TARGET_LONG_CALLS
7977 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7978 || distance
< MAX_PCREL17F_OFFSET
))
7981 /* 64-bit plabel sequence. */
7982 else if (TARGET_64BIT
&& !local_call
)
7985 /* non-pic long absolute branch sequence. */
7986 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7989 /* long pc-relative branch sequence. */
7990 else if (TARGET_LONG_PIC_SDIFF_CALL
7991 || (TARGET_GAS
&& !TARGET_SOM
&& local_call
))
7995 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7999 /* 32-bit plabel sequence. */
8005 length
+= length_fp_args (insn
);
8015 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
8023 /* INSN is a function call.
8025 CALL_DEST is the routine we are calling. */
8028 pa_output_call (rtx_insn
*insn
, rtx call_dest
, int sibcall
)
8030 int seq_length
= dbr_sequence_length ();
8031 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
8032 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
8035 xoperands
[0] = call_dest
;
8037 /* Handle the common case where we're sure that the branch will reach
8038 the beginning of the "$CODE$" subspace. This is the beginning of
8039 the current function if we are in a named section. */
8040 if (!TARGET_LONG_CALLS
&& pa_attr_length_call (insn
, sibcall
) == 8)
8042 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
8043 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
8047 if (TARGET_64BIT
&& !local_call
)
8049 /* ??? As far as I can tell, the HP linker doesn't support the
8050 long pc-relative sequence described in the 64-bit runtime
8051 architecture. So, we use a slightly longer indirect call. */
8052 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
8053 xoperands
[1] = gen_label_rtx ();
8055 /* Put the load of %r27 into the delay slot. We don't need to
8056 do anything when generating fast indirect calls. */
8057 if (seq_length
!= 0)
8059 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
8062 /* Now delete the delay insn. */
8063 SET_INSN_DELETED (NEXT_INSN (insn
));
8066 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
8067 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
8068 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
8069 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
8070 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
8071 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
8076 int indirect_call
= 0;
8078 /* Emit a long call. There are several different sequences
8079 of increasing length and complexity. In most cases,
8080 they don't allow an instruction in the delay slot. */
8081 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
8082 && !TARGET_LONG_PIC_SDIFF_CALL
8083 && !(TARGET_GAS
&& !TARGET_SOM
&& local_call
)
8091 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
8093 /* A non-jump insn in the delay slot. By definition we can
8094 emit this insn before the call (and in fact before argument
8096 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
8099 /* Now delete the delay insn. */
8100 SET_INSN_DELETED (NEXT_INSN (insn
));
8104 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
8106 /* This is the best sequence for making long calls in
8107 non-pic code. Unfortunately, GNU ld doesn't provide
8108 the stub needed for external calls, and GAS's support
8109 for this with the SOM linker is buggy. It is safe
8110 to use this for local calls. */
8111 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8113 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
8117 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8120 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
8122 output_asm_insn ("copy %%r31,%%r2", xoperands
);
8128 /* The HP assembler and linker can handle relocations for
8129 the difference of two symbols. The HP assembler
8130 recognizes the sequence as a pc-relative call and
8131 the linker provides stubs when needed. */
8133 /* GAS currently can't generate the relocations that
8134 are needed for the SOM linker under HP-UX using this
8135 sequence. The GNU linker doesn't generate the stubs
8136 that are needed for external calls on TARGET_ELF32
8137 with this sequence. For now, we have to use a longer
8138 plabel sequence when using GAS for non local calls. */
8139 if (TARGET_LONG_PIC_SDIFF_CALL
8140 || (TARGET_GAS
&& !TARGET_SOM
&& local_call
))
8142 xoperands
[1] = gen_rtx_REG (Pmode
, 1);
8143 xoperands
[2] = xoperands
[1];
8144 pa_output_pic_pcrel_sequence (xoperands
);
8148 /* Emit a long plabel-based call sequence. This is
8149 essentially an inline implementation of $$dyncall.
8150 We don't actually try to call $$dyncall as this is
8151 as difficult as calling the function itself. */
8152 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
8153 xoperands
[1] = gen_label_rtx ();
8155 /* Since the call is indirect, FP arguments in registers
8156 need to be copied to the general registers. Then, the
8157 argument relocation stub will copy them back. */
8159 copy_fp_args (insn
);
8163 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
8164 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
8165 output_asm_insn ("ldw 0(%%r1),%%r22", xoperands
);
8169 output_asm_insn ("addil LR'%0-$global$,%%r27",
8171 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r22",
8175 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8176 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8177 /* Should this be an ordered load to ensure the target
8178 address is loaded before the global pointer? */
8179 output_asm_insn ("ldw 0(%%r22),%%r1", xoperands
);
8180 output_asm_insn ("ldw 4(%%r22),%%r19", xoperands
);
8182 if (!sibcall
&& !TARGET_PA_20
)
8184 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
8185 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8186 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
8188 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
8195 output_asm_insn ("bve (%%r1)", xoperands
);
8200 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
8201 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
8205 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
8210 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
8211 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8216 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8217 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
8219 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
8223 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8224 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
8226 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
8229 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
8231 output_asm_insn ("copy %%r31,%%r2", xoperands
);
8239 if (seq_length
== 0)
8240 output_asm_insn ("nop", xoperands
);
8245 /* Return the attribute length of the indirect call instruction INSN.
8246 The length must match the code generated by output_indirect call.
8247 The returned length includes the delay slot. Currently, the delay
8248 slot of an indirect call sequence is not exposed and it is used by
8249 the sequence itself. */
8252 pa_attr_length_indirect_call (rtx_insn
*insn
)
8254 unsigned long distance
= -1;
8255 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
8257 if (INSN_ADDRESSES_SET_P ())
8259 distance
= (total
+ insn_current_reference_address (insn
));
8260 if (distance
< total
)
8267 if (TARGET_FAST_INDIRECT_CALLS
)
8270 if (TARGET_PORTABLE_RUNTIME
)
8273 if (!TARGET_LONG_CALLS
8274 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
8275 || distance
< MAX_PCREL17F_OFFSET
))
8278 /* Out of reach, can use ble. */
8282 /* Inline versions of $$dyncall. */
8285 if (TARGET_NO_SPACE_REGS
)
8292 /* Long PIC pc-relative call. */
8297 pa_output_indirect_call (rtx_insn
*insn
, rtx call_dest
)
8304 xoperands
[0] = call_dest
;
8305 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8306 "bve,l (%%r2),%%r2\n\t"
8307 "ldd 24(%0),%%r27", xoperands
);
8311 /* First the special case for kernels, level 0 systems, etc. */
8312 if (TARGET_FAST_INDIRECT_CALLS
)
8314 pa_output_arg_descriptor (insn
);
8316 return "bve,l,n (%%r22),%%r2\n\tnop";
8317 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8320 if (TARGET_PORTABLE_RUNTIME
)
8322 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8323 "ldo R'$$dyncall(%%r31),%%r31", xoperands
);
8324 pa_output_arg_descriptor (insn
);
8325 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8328 /* Now the normal case -- we can reach $$dyncall directly or
8329 we're sure that we can get there via a long-branch stub.
8331 No need to check target flags as the length uniquely identifies
8332 the remaining cases. */
8333 length
= pa_attr_length_indirect_call (insn
);
8336 pa_output_arg_descriptor (insn
);
8338 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8339 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8340 variant of the B,L instruction can't be used on the SOM target. */
8341 if (TARGET_PA_20
&& !TARGET_SOM
)
8342 return "b,l,n $$dyncall,%%r2\n\tnop";
8344 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8347 /* Long millicode call, but we are not generating PIC or portable runtime
8351 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands
);
8352 pa_output_arg_descriptor (insn
);
8353 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8356 /* The long PIC pc-relative call sequence is five instructions. So,
8357 let's use an inline version of $$dyncall when the calling sequence
8358 has a roughly similar number of instructions and we are not optimizing
8359 for size. We need two instructions to load the return pointer plus
8360 the $$dyncall implementation. */
8363 if (TARGET_NO_SPACE_REGS
)
8365 pa_output_arg_descriptor (insn
);
8366 output_asm_insn ("bl .+8,%%r2\n\t"
8367 "ldo 20(%%r2),%%r2\n\t"
8368 "extru,<> %%r22,30,1,%%r0\n\t"
8369 "bv,n %%r0(%%r22)\n\t"
8370 "ldw -2(%%r22),%%r21\n\t"
8371 "bv %%r0(%%r21)\n\t"
8372 "ldw 2(%%r22),%%r19", xoperands
);
8377 pa_output_arg_descriptor (insn
);
8378 output_asm_insn ("bl .+8,%%r2\n\t"
8379 "ldo 24(%%r2),%%r2\n\t"
8380 "stw %%r2,-24(%%sp)\n\t"
8381 "extru,<> %r22,30,1,%%r0\n\t"
8383 "ldw -2(%%r22),%%r21\n\t"
8385 "ldw 2(%%r22),%%r19", xoperands
);
8390 /* We need a long PIC call to $$dyncall. */
8391 xoperands
[0] = gen_rtx_SYMBOL_REF (Pmode
, "$$dyncall");
8392 xoperands
[1] = gen_rtx_REG (Pmode
, 2);
8393 xoperands
[2] = gen_rtx_REG (Pmode
, 1);
8394 pa_output_pic_pcrel_sequence (xoperands
);
8395 pa_output_arg_descriptor (insn
);
8396 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8399 /* In HPUX 8.0's shared library scheme, special relocations are needed
8400 for function labels if they might be passed to a function
8401 in a shared library (because shared libraries don't live in code
8402 space), and special magic is needed to construct their address. */
8405 pa_encode_label (rtx sym
)
8407 const char *str
= XSTR (sym
, 0);
8408 int len
= strlen (str
) + 1;
8411 p
= newstr
= XALLOCAVEC (char, len
+ 1);
8415 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
8419 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
8421 int old_referenced
= 0;
8423 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
8425 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
8427 default_encode_section_info (decl
, rtl
, first
);
8429 if (first
&& TEXT_SPACE_P (decl
))
8431 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
8432 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8433 pa_encode_label (XEXP (rtl
, 0));
8435 else if (old_referenced
)
8436 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
8439 /* This is sort of inverse to pa_encode_section_info. */
8442 pa_strip_name_encoding (const char *str
)
8444 str
+= (*str
== '@');
8445 str
+= (*str
== '*');
8449 /* Returns 1 if OP is a function label involved in a simple addition
8450 with a constant. Used to keep certain patterns from matching
8451 during instruction combination. */
8453 pa_is_function_label_plus_const (rtx op
)
8455 /* Strip off any CONST. */
8456 if (GET_CODE (op
) == CONST
)
8459 return (GET_CODE (op
) == PLUS
8460 && function_label_operand (XEXP (op
, 0), VOIDmode
)
8461 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8464 /* Output the assembler code for a thunk function. THUNK_DECL is the
8465 declaration for the thunk function itself, FUNCTION is the decl for
8466 the target function. DELTA is an immediate constant offset to be
8467 added to THIS. If VCALL_OFFSET is nonzero, the word at
8468 *(*this + vcall_offset) should be added to THIS. */
8471 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8472 HOST_WIDE_INT vcall_offset
, tree function
)
8474 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl
));
8475 static unsigned int current_thunk_number
;
8476 int val_14
= VAL_14_BITS_P (delta
);
8477 unsigned int old_last_address
= last_address
, nbytes
= 0;
8481 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8482 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8483 xoperands
[2] = GEN_INT (delta
);
8485 assemble_start_function (thunk_fndecl
, fnname
);
8486 final_start_function (emit_barrier (), file
, 1);
8490 /* Output the thunk. We know that the function is in the same
8491 translation unit (i.e., the same space) as the thunk, and that
8492 thunks are output after their method. Thus, we don't need an
8493 external branch to reach the function. With SOM and GAS,
8494 functions and thunks are effectively in different sections.
8495 Thus, we can always use a IA-relative branch and the linker
8496 will add a long branch stub if necessary.
8498 However, we have to be careful when generating PIC code on the
8499 SOM port to ensure that the sequence does not transfer to an
8500 import stub for the target function as this could clobber the
8501 return value saved at SP-24. This would also apply to the
8502 32-bit linux port if the multi-space model is implemented. */
8503 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8504 && !(flag_pic
&& TREE_PUBLIC (function
))
8505 && (TARGET_GAS
|| last_address
< 262132))
8506 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8507 && ((targetm_common
.have_named_sections
8508 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8509 /* The GNU 64-bit linker has rather poor stub management.
8510 So, we use a long branch from thunks that aren't in
8511 the same section as the target function. */
8513 && (DECL_SECTION_NAME (thunk_fndecl
)
8514 != DECL_SECTION_NAME (function
)))
8515 || ((DECL_SECTION_NAME (thunk_fndecl
)
8516 == DECL_SECTION_NAME (function
))
8517 && last_address
< 262132)))
8518 /* In this case, we need to be able to reach the start of
8519 the stub table even though the function is likely closer
8520 and can be jumped to directly. */
8521 || (targetm_common
.have_named_sections
8522 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8523 && DECL_SECTION_NAME (function
) == NULL
8524 && total_code_bytes
< MAX_PCREL17F_OFFSET
)
8526 || (!targetm_common
.have_named_sections
8527 && total_code_bytes
< MAX_PCREL17F_OFFSET
))))
8530 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8532 output_asm_insn ("b %0", xoperands
);
8536 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8541 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8545 else if (TARGET_64BIT
)
8549 /* We only have one call-clobbered scratch register, so we can't
8550 make use of the delay slot if delta doesn't fit in 14 bits. */
8553 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8554 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8557 /* Load function address into %r1. */
8558 xop
[0] = xoperands
[0];
8559 xop
[1] = gen_rtx_REG (Pmode
, 1);
8561 pa_output_pic_pcrel_sequence (xop
);
8565 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8566 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8571 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8575 else if (TARGET_PORTABLE_RUNTIME
)
8577 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8578 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8581 output_asm_insn ("ldil L'%2,%%r26", xoperands
);
8583 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8587 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8592 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands
);
8596 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8598 /* The function is accessible from outside this module. The only
8599 way to avoid an import stub between the thunk and function is to
8600 call the function directly with an indirect sequence similar to
8601 that used by $$dyncall. This is possible because $$dyncall acts
8602 as the import stub in an indirect call. */
8603 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8604 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8605 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8606 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8607 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8608 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8609 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8610 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8611 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8615 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8621 output_asm_insn ("bve (%%r22)", xoperands
);
8624 else if (TARGET_NO_SPACE_REGS
)
8626 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8631 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8632 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8633 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8638 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8640 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8646 /* Load function address into %r22. */
8647 xop
[0] = xoperands
[0];
8648 xop
[1] = gen_rtx_REG (Pmode
, 1);
8649 xop
[2] = gen_rtx_REG (Pmode
, 22);
8650 pa_output_pic_pcrel_sequence (xop
);
8653 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8655 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8659 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8664 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8671 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8673 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8674 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8678 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8683 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8692 /* Add DELTA to THIS. */
8695 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8700 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8701 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8707 /* Load *(THIS + DELTA) to %r1. */
8708 output_asm_insn ("ldd 0(%%r26),%%r1", xoperands
);
8710 val_14
= VAL_14_BITS_P (vcall_offset
);
8711 xoperands
[2] = GEN_INT (vcall_offset
);
8713 /* Load *(*(THIS + DELTA) + VCALL_OFFSET) to %r1. */
8716 output_asm_insn ("ldd %2(%%r1),%%r1", xoperands
);
8721 output_asm_insn ("addil L'%2,%%r1", xoperands
);
8722 output_asm_insn ("ldd R'%2(%%r1),%%r1", xoperands
);
8728 /* Load *(THIS + DELTA) to %r1. */
8729 output_asm_insn ("ldw 0(%%r26),%%r1", xoperands
);
8731 val_14
= VAL_14_BITS_P (vcall_offset
);
8732 xoperands
[2] = GEN_INT (vcall_offset
);
8734 /* Load *(*(THIS + DELTA) + VCALL_OFFSET) to %r1. */
8737 output_asm_insn ("ldw %2(%%r1),%%r1", xoperands
);
8742 output_asm_insn ("addil L'%2,%%r1", xoperands
);
8743 output_asm_insn ("ldw R'%2(%%r1),%%r1", xoperands
);
8748 /* Branch to FUNCTION and add %r1 to THIS in delay slot if possible. */
8749 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8750 && !(flag_pic
&& TREE_PUBLIC (function
))
8751 && (TARGET_GAS
|| last_address
< 262132))
8752 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8753 && ((targetm_common
.have_named_sections
8754 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8755 /* The GNU 64-bit linker has rather poor stub management.
8756 So, we use a long branch from thunks that aren't in
8757 the same section as the target function. */
8759 && (DECL_SECTION_NAME (thunk_fndecl
)
8760 != DECL_SECTION_NAME (function
)))
8761 || ((DECL_SECTION_NAME (thunk_fndecl
)
8762 == DECL_SECTION_NAME (function
))
8763 && last_address
< 262132)))
8764 /* In this case, we need to be able to reach the start of
8765 the stub table even though the function is likely closer
8766 and can be jumped to directly. */
8767 || (targetm_common
.have_named_sections
8768 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8769 && DECL_SECTION_NAME (function
) == NULL
8770 && total_code_bytes
< MAX_PCREL17F_OFFSET
)
8772 || (!targetm_common
.have_named_sections
8773 && total_code_bytes
< MAX_PCREL17F_OFFSET
))))
8776 output_asm_insn ("b %0", xoperands
);
8778 /* Add *(*(THIS + DELTA) + VCALL_OFFSET) to THIS. */
8779 output_asm_insn ("addl %%r1,%%r26,%%r26", xoperands
);
8781 else if (TARGET_64BIT
)
8783 /* Add *(*(THIS + DELTA) + VCALL_OFFSET) to THIS. */
8784 output_asm_insn ("addl %%r1,%%r26,%%r26", xoperands
);
8786 /* Load function address into %r1. */
8788 xop
[0] = xoperands
[0];
8789 xop
[1] = gen_rtx_REG (Pmode
, 1);
8791 pa_output_pic_pcrel_sequence (xop
);
8793 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8795 else if (TARGET_PORTABLE_RUNTIME
)
8797 /* Load function address into %r22. */
8799 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8800 output_asm_insn ("ldo R'%0(%%r22),%%r22", xoperands
);
8802 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8804 /* Add *(*(THIS + DELTA) + VCALL_OFFSET) to THIS. */
8805 output_asm_insn ("addl %%r1,%%r26,%%r26", xoperands
);
8807 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8809 /* Add *(*(THIS + DELTA) + VCALL_OFFSET) to THIS. */
8810 output_asm_insn ("addl %%r1,%%r26,%%r26", xoperands
);
8812 /* The function is accessible from outside this module. The only
8813 way to avoid an import stub between the thunk and function is to
8814 call the function directly with an indirect sequence similar to
8815 that used by $$dyncall. This is possible because $$dyncall acts
8816 as the import stub in an indirect call. */
8817 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8818 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8819 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8820 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8821 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8822 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8823 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8824 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8825 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8829 output_asm_insn ("bve,n (%%r22)", xoperands
);
8832 else if (TARGET_NO_SPACE_REGS
)
8834 output_asm_insn ("be,n 0(%%sr4,%%r22)", xoperands
);
8839 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8840 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8841 output_asm_insn ("be,n 0(%%sr0,%%r22)", xoperands
);
8847 /* Add *(*(THIS + DELTA) + VCALL_OFFSET) to THIS. */
8848 output_asm_insn ("addl %%r1,%%r26,%%r26", xoperands
);
8850 /* Load function address into %r1. */
8852 xop
[0] = xoperands
[0];
8853 xop
[1] = gen_rtx_REG (Pmode
, 1);
8855 pa_output_pic_pcrel_sequence (xop
);
8857 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8861 /* Load function address into %r22. */
8863 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8864 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8866 /* Add *(*(THIS + DELTA) + VCALL_OFFSET) to THIS. */
8867 output_asm_insn ("addl %%r1,%%r26,%%r26", xoperands
);
8871 final_end_function ();
8873 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8875 switch_to_section (data_section
);
8876 output_asm_insn (".align 4", xoperands
);
8877 ASM_OUTPUT_LABEL (file
, label
);
8878 output_asm_insn (".word P'%0", xoperands
);
8881 current_thunk_number
++;
8882 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8883 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8884 last_address
+= nbytes
;
8885 if (old_last_address
> last_address
)
8886 last_address
= UINT_MAX
;
8887 update_total_code_bytes (nbytes
);
8888 assemble_end_function (thunk_fndecl
, fnname
);
8891 /* Only direct calls to static functions are allowed to be sibling (tail)
8894 This restriction is necessary because some linker generated stubs will
8895 store return pointers into rp' in some cases which might clobber a
8896 live value already in rp'.
8898 In a sibcall the current function and the target function share stack
8899 space. Thus if the path to the current function and the path to the
8900 target function save a value in rp', they save the value into the
8901 same stack slot, which has undesirable consequences.
8903 Because of the deferred binding nature of shared libraries any function
8904 with external scope could be in a different load module and thus require
8905 rp' to be saved when calling that function. So sibcall optimizations
8906 can only be safe for static function.
8908 Note that GCC never needs return value relocations, so we don't have to
8909 worry about static calls with return value relocations (which require
8912 It is safe to perform a sibcall optimization when the target function
8913 will never return. */
8915 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8917 /* Sibcalls are not ok because the arg pointer register is not a fixed
8918 register. This prevents the sibcall optimization from occurring. In
8919 addition, there are problems with stub placement using GNU ld. This
8920 is because a normal sibcall branch uses a 17-bit relocation while
8921 a regular call branch uses a 22-bit relocation. As a result, more
8922 care needs to be taken in the placement of long-branch stubs. */
8926 if (TARGET_PORTABLE_RUNTIME
)
8929 /* Sibcalls are only ok within a translation unit. */
8930 return decl
&& targetm
.binds_local_p (decl
);
8933 /* ??? Addition is not commutative on the PA due to the weird implicit
8934 space register selection rules for memory addresses. Therefore, we
8935 don't consider a + b == b + a, as this might be inside a MEM. */
8937 pa_commutative_p (const_rtx x
, int outer_code
)
8939 return (COMMUTATIVE_P (x
)
8940 && (TARGET_NO_SPACE_REGS
8941 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8942 || GET_CODE (x
) != PLUS
));
8945 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8946 use in fmpyadd instructions. */
8948 pa_fmpyaddoperands (rtx
*operands
)
8950 machine_mode mode
= GET_MODE (operands
[0]);
8952 /* Must be a floating point mode. */
8953 if (mode
!= SFmode
&& mode
!= DFmode
)
8956 /* All modes must be the same. */
8957 if (! (mode
== GET_MODE (operands
[1])
8958 && mode
== GET_MODE (operands
[2])
8959 && mode
== GET_MODE (operands
[3])
8960 && mode
== GET_MODE (operands
[4])
8961 && mode
== GET_MODE (operands
[5])))
8964 /* All operands must be registers. */
8965 if (! (GET_CODE (operands
[1]) == REG
8966 && GET_CODE (operands
[2]) == REG
8967 && GET_CODE (operands
[3]) == REG
8968 && GET_CODE (operands
[4]) == REG
8969 && GET_CODE (operands
[5]) == REG
))
8972 /* Only 2 real operands to the addition. One of the input operands must
8973 be the same as the output operand. */
8974 if (! rtx_equal_p (operands
[3], operands
[4])
8975 && ! rtx_equal_p (operands
[3], operands
[5]))
8978 /* Inout operand of add cannot conflict with any operands from multiply. */
8979 if (rtx_equal_p (operands
[3], operands
[0])
8980 || rtx_equal_p (operands
[3], operands
[1])
8981 || rtx_equal_p (operands
[3], operands
[2]))
8984 /* multiply cannot feed into addition operands. */
8985 if (rtx_equal_p (operands
[4], operands
[0])
8986 || rtx_equal_p (operands
[5], operands
[0]))
8989 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8991 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8992 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8993 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8994 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8995 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8996 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8999 /* Passed. Operands are suitable for fmpyadd. */
9003 #if !defined(USE_COLLECT2)
9005 pa_asm_out_constructor (rtx symbol
, int priority
)
9007 if (!function_label_operand (symbol
, VOIDmode
))
9008 pa_encode_label (symbol
);
9010 #ifdef CTORS_SECTION_ASM_OP
9011 default_ctor_section_asm_out_constructor (symbol
, priority
);
9013 # ifdef TARGET_ASM_NAMED_SECTION
9014 default_named_section_asm_out_constructor (symbol
, priority
);
9016 default_stabs_asm_out_constructor (symbol
, priority
);
9022 pa_asm_out_destructor (rtx symbol
, int priority
)
9024 if (!function_label_operand (symbol
, VOIDmode
))
9025 pa_encode_label (symbol
);
9027 #ifdef DTORS_SECTION_ASM_OP
9028 default_dtor_section_asm_out_destructor (symbol
, priority
);
9030 # ifdef TARGET_ASM_NAMED_SECTION
9031 default_named_section_asm_out_destructor (symbol
, priority
);
9033 default_stabs_asm_out_destructor (symbol
, priority
);
9039 /* This function places uninitialized global data in the bss section.
9040 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
9041 function on the SOM port to prevent uninitialized global data from
9042 being placed in the data section. */
9045 pa_asm_output_aligned_bss (FILE *stream
,
9047 unsigned HOST_WIDE_INT size
,
9050 switch_to_section (bss_section
);
9052 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
9053 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
9056 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
9057 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
9060 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
9061 ASM_OUTPUT_LABEL (stream
, name
);
9062 fprintf (stream
, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
9065 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
9066 that doesn't allow the alignment of global common storage to be directly
9067 specified. The SOM linker aligns common storage based on the rounded
9068 value of the NUM_BYTES parameter in the .comm directive. It's not
9069 possible to use the .align directive as it doesn't affect the alignment
9070 of the label associated with a .comm directive. */
9073 pa_asm_output_aligned_common (FILE *stream
,
9075 unsigned HOST_WIDE_INT size
,
9078 unsigned int max_common_align
;
9080 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
9081 if (align
> max_common_align
)
9083 /* Alignment exceeds maximum alignment for global common data. */
9084 align
= max_common_align
;
9087 switch_to_section (bss_section
);
9089 assemble_name (stream
, name
);
9090 fprintf (stream
, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
9091 MAX (size
, align
/ BITS_PER_UNIT
));
9094 /* We can't use .comm for local common storage as the SOM linker effectively
9095 treats the symbol as universal and uses the same storage for local symbols
9096 with the same name in different object files. The .block directive
9097 reserves an uninitialized block of storage. However, it's not common
9098 storage. Fortunately, GCC never requests common storage with the same
9099 name in any given translation unit. */
9102 pa_asm_output_aligned_local (FILE *stream
,
9104 unsigned HOST_WIDE_INT size
,
9107 switch_to_section (bss_section
);
9108 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
9111 fprintf (stream
, "%s", LOCAL_ASM_OP
);
9112 assemble_name (stream
, name
);
9113 fprintf (stream
, "\n");
9116 ASM_OUTPUT_LABEL (stream
, name
);
9117 fprintf (stream
, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
9120 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
9121 use in fmpysub instructions. */
9123 pa_fmpysuboperands (rtx
*operands
)
9125 machine_mode mode
= GET_MODE (operands
[0]);
9127 /* Must be a floating point mode. */
9128 if (mode
!= SFmode
&& mode
!= DFmode
)
9131 /* All modes must be the same. */
9132 if (! (mode
== GET_MODE (operands
[1])
9133 && mode
== GET_MODE (operands
[2])
9134 && mode
== GET_MODE (operands
[3])
9135 && mode
== GET_MODE (operands
[4])
9136 && mode
== GET_MODE (operands
[5])))
9139 /* All operands must be registers. */
9140 if (! (GET_CODE (operands
[1]) == REG
9141 && GET_CODE (operands
[2]) == REG
9142 && GET_CODE (operands
[3]) == REG
9143 && GET_CODE (operands
[4]) == REG
9144 && GET_CODE (operands
[5]) == REG
))
9147 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
9148 operation, so operands[4] must be the same as operand[3]. */
9149 if (! rtx_equal_p (operands
[3], operands
[4]))
9152 /* multiply cannot feed into subtraction. */
9153 if (rtx_equal_p (operands
[5], operands
[0]))
9156 /* Inout operand of sub cannot conflict with any operands from multiply. */
9157 if (rtx_equal_p (operands
[3], operands
[0])
9158 || rtx_equal_p (operands
[3], operands
[1])
9159 || rtx_equal_p (operands
[3], operands
[2]))
9162 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
9164 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
9165 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
9166 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
9167 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
9168 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
9169 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
9172 /* Passed. Operands are suitable for fmpysub. */
9176 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
9177 constants for a MULT embedded inside a memory address. */
9179 pa_mem_shadd_constant_p (int val
)
9181 if (val
== 2 || val
== 4 || val
== 8)
9187 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
9188 constants for shadd instructions. */
9190 pa_shadd_constant_p (int val
)
9192 if (val
== 1 || val
== 2 || val
== 3)
9198 /* Return TRUE if INSN branches forward. */
9201 forward_branch_p (rtx_insn
*insn
)
9203 rtx lab
= JUMP_LABEL (insn
);
9205 /* The INSN must have a jump label. */
9206 gcc_assert (lab
!= NULL_RTX
);
9208 if (INSN_ADDRESSES_SET_P ())
9209 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
9216 insn
= NEXT_INSN (insn
);
9222 /* Output an unconditional move and branch insn. */
9225 pa_output_parallel_movb (rtx
*operands
, rtx_insn
*insn
)
9227 int length
= get_attr_length (insn
);
9229 /* These are the cases in which we win. */
9231 return "mov%I1b,tr %1,%0,%2";
9233 /* None of the following cases win, but they don't lose either. */
9236 if (dbr_sequence_length () == 0)
9238 /* Nothing in the delay slot, fake it by putting the combined
9239 insn (the copy or add) in the delay slot of a bl. */
9240 if (GET_CODE (operands
[1]) == CONST_INT
)
9241 return "b %2\n\tldi %1,%0";
9243 return "b %2\n\tcopy %1,%0";
9247 /* Something in the delay slot, but we've got a long branch. */
9248 if (GET_CODE (operands
[1]) == CONST_INT
)
9249 return "ldi %1,%0\n\tb %2";
9251 return "copy %1,%0\n\tb %2";
9255 if (GET_CODE (operands
[1]) == CONST_INT
)
9256 output_asm_insn ("ldi %1,%0", operands
);
9258 output_asm_insn ("copy %1,%0", operands
);
9259 return pa_output_lbranch (operands
[2], insn
, 1);
9262 /* Output an unconditional add and branch insn. */
9265 pa_output_parallel_addb (rtx
*operands
, rtx_insn
*insn
)
9267 int length
= get_attr_length (insn
);
9269 /* To make life easy we want operand0 to be the shared input/output
9270 operand and operand1 to be the readonly operand. */
9271 if (operands
[0] == operands
[1])
9272 operands
[1] = operands
[2];
9274 /* These are the cases in which we win. */
9276 return "add%I1b,tr %1,%0,%3";
9278 /* None of the following cases win, but they don't lose either. */
9281 if (dbr_sequence_length () == 0)
9282 /* Nothing in the delay slot, fake it by putting the combined
9283 insn (the copy or add) in the delay slot of a bl. */
9284 return "b %3\n\tadd%I1 %1,%0,%0";
9286 /* Something in the delay slot, but we've got a long branch. */
9287 return "add%I1 %1,%0,%0\n\tb %3";
9290 output_asm_insn ("add%I1 %1,%0,%0", operands
);
9291 return pa_output_lbranch (operands
[3], insn
, 1);
9294 /* We use this hook to perform a PA specific optimization which is difficult
9295 to do in earlier passes. */
9300 remove_useless_addtr_insns (1);
9302 if (pa_cpu
< PROCESSOR_8000
)
9303 pa_combine_instructions ();
9306 /* The PA has a number of odd instructions which can perform multiple
9307 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9308 it may be profitable to combine two instructions into one instruction
9309 with two outputs. It's not profitable PA2.0 machines because the
9310 two outputs would take two slots in the reorder buffers.
9312 This routine finds instructions which can be combined and combines
9313 them. We only support some of the potential combinations, and we
9314 only try common ways to find suitable instructions.
9316 * addb can add two registers or a register and a small integer
9317 and jump to a nearby (+-8k) location. Normally the jump to the
9318 nearby location is conditional on the result of the add, but by
9319 using the "true" condition we can make the jump unconditional.
9320 Thus addb can perform two independent operations in one insn.
9322 * movb is similar to addb in that it can perform a reg->reg
9323 or small immediate->reg copy and jump to a nearby (+-8k location).
9325 * fmpyadd and fmpysub can perform a FP multiply and either an
9326 FP add or FP sub if the operands of the multiply and add/sub are
9327 independent (there are other minor restrictions). Note both
9328 the fmpy and fadd/fsub can in theory move to better spots according
9329 to data dependencies, but for now we require the fmpy stay at a
9332 * Many of the memory operations can perform pre & post updates
9333 of index registers. GCC's pre/post increment/decrement addressing
9334 is far too simple to take advantage of all the possibilities. This
9335 pass may not be suitable since those insns may not be independent.
9337 * comclr can compare two ints or an int and a register, nullify
9338 the following instruction and zero some other register. This
9339 is more difficult to use as it's harder to find an insn which
9340 will generate a comclr than finding something like an unconditional
9341 branch. (conditional moves & long branches create comclr insns).
9343 * Most arithmetic operations can conditionally skip the next
9344 instruction. They can be viewed as "perform this operation
9345 and conditionally jump to this nearby location" (where nearby
9346 is an insns away). These are difficult to use due to the
9347 branch length restrictions. */
9350 pa_combine_instructions (void)
9354 /* This can get expensive since the basic algorithm is on the
9355 order of O(n^2) (or worse). Only do it for -O2 or higher
9356 levels of optimization. */
9360 /* Walk down the list of insns looking for "anchor" insns which
9361 may be combined with "floating" insns. As the name implies,
9362 "anchor" instructions don't move, while "floating" insns may
9364 rtx par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
9365 rtx_insn
*new_rtx
= make_insn_raw (par
);
9367 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
9369 enum attr_pa_combine_type anchor_attr
;
9370 enum attr_pa_combine_type floater_attr
;
9372 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9373 Also ignore any special USE insns. */
9374 if ((! NONJUMP_INSN_P (anchor
) && ! JUMP_P (anchor
) && ! CALL_P (anchor
))
9375 || GET_CODE (PATTERN (anchor
)) == USE
9376 || GET_CODE (PATTERN (anchor
)) == CLOBBER
)
9379 anchor_attr
= get_attr_pa_combine_type (anchor
);
9380 /* See if anchor is an insn suitable for combination. */
9381 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
9382 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9383 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9384 && ! forward_branch_p (anchor
)))
9388 for (floater
= PREV_INSN (anchor
);
9390 floater
= PREV_INSN (floater
))
9392 if (NOTE_P (floater
)
9393 || (NONJUMP_INSN_P (floater
)
9394 && (GET_CODE (PATTERN (floater
)) == USE
9395 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9398 /* Anything except a regular INSN will stop our search. */
9399 if (! NONJUMP_INSN_P (floater
))
9405 /* See if FLOATER is suitable for combination with the
9407 floater_attr
= get_attr_pa_combine_type (floater
);
9408 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9409 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9410 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9411 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9413 /* If ANCHOR and FLOATER can be combined, then we're
9414 done with this pass. */
9415 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9416 SET_DEST (PATTERN (floater
)),
9417 XEXP (SET_SRC (PATTERN (floater
)), 0),
9418 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9422 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9423 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
9425 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
9427 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9428 SET_DEST (PATTERN (floater
)),
9429 XEXP (SET_SRC (PATTERN (floater
)), 0),
9430 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9435 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9436 SET_DEST (PATTERN (floater
)),
9437 SET_SRC (PATTERN (floater
)),
9438 SET_SRC (PATTERN (floater
))))
9444 /* If we didn't find anything on the backwards scan try forwards. */
9446 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
9447 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
9449 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
9451 if (NOTE_P (floater
)
9452 || (NONJUMP_INSN_P (floater
)
9453 && (GET_CODE (PATTERN (floater
)) == USE
9454 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9458 /* Anything except a regular INSN will stop our search. */
9459 if (! NONJUMP_INSN_P (floater
))
9465 /* See if FLOATER is suitable for combination with the
9467 floater_attr
= get_attr_pa_combine_type (floater
);
9468 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9469 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9470 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9471 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9473 /* If ANCHOR and FLOATER can be combined, then we're
9474 done with this pass. */
9475 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9476 SET_DEST (PATTERN (floater
)),
9477 XEXP (SET_SRC (PATTERN (floater
)),
9479 XEXP (SET_SRC (PATTERN (floater
)),
9486 /* FLOATER will be nonzero if we found a suitable floating
9487 insn for combination with ANCHOR. */
9489 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9490 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9492 /* Emit the new instruction and delete the old anchor. */
9493 rtvec vtemp
= gen_rtvec (2, copy_rtx (PATTERN (anchor
)),
9494 copy_rtx (PATTERN (floater
)));
9495 rtx temp
= gen_rtx_PARALLEL (VOIDmode
, vtemp
);
9496 emit_insn_before (temp
, anchor
);
9498 SET_INSN_DELETED (anchor
);
9500 /* Emit a special USE insn for FLOATER, then delete
9501 the floating insn. */
9502 temp
= copy_rtx (PATTERN (floater
));
9503 emit_insn_before (gen_rtx_USE (VOIDmode
, temp
), floater
);
9504 delete_insn (floater
);
9509 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9511 /* Emit the new_jump instruction and delete the old anchor. */
9512 rtvec vtemp
= gen_rtvec (2, copy_rtx (PATTERN (anchor
)),
9513 copy_rtx (PATTERN (floater
)));
9514 rtx temp
= gen_rtx_PARALLEL (VOIDmode
, vtemp
);
9515 temp
= emit_jump_insn_before (temp
, anchor
);
9517 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9518 SET_INSN_DELETED (anchor
);
9520 /* Emit a special USE insn for FLOATER, then delete
9521 the floating insn. */
9522 temp
= copy_rtx (PATTERN (floater
));
9523 emit_insn_before (gen_rtx_USE (VOIDmode
, temp
), floater
);
9524 delete_insn (floater
);
9532 pa_can_combine_p (rtx_insn
*new_rtx
, rtx_insn
*anchor
, rtx_insn
*floater
,
9533 int reversed
, rtx dest
,
9536 int insn_code_number
;
9537 rtx_insn
*start
, *end
;
9539 /* Create a PARALLEL with the patterns of ANCHOR and
9540 FLOATER, try to recognize it, then test constraints
9541 for the resulting pattern.
9543 If the pattern doesn't match or the constraints
9544 aren't met keep searching for a suitable floater
9546 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9547 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9548 INSN_CODE (new_rtx
) = -1;
9549 insn_code_number
= recog_memoized (new_rtx
);
9550 basic_block bb
= BLOCK_FOR_INSN (anchor
);
9551 if (insn_code_number
< 0
9552 || (extract_insn (new_rtx
),
9553 !constrain_operands (1, get_preferred_alternatives (new_rtx
, bb
))))
9567 /* There's up to three operands to consider. One
9568 output and two inputs.
9570 The output must not be used between FLOATER & ANCHOR
9571 exclusive. The inputs must not be set between
9572 FLOATER and ANCHOR exclusive. */
9574 if (reg_used_between_p (dest
, start
, end
))
9577 if (reg_set_between_p (src1
, start
, end
))
9580 if (reg_set_between_p (src2
, start
, end
))
9583 /* If we get here, then everything is good. */
9587 /* Return nonzero if references for INSN are delayed.
9589 Millicode insns are actually function calls with some special
9590 constraints on arguments and register usage.
9592 Millicode calls always expect their arguments in the integer argument
9593 registers, and always return their result in %r29 (ret1). They
9594 are expected to clobber their arguments, %r1, %r29, and the return
9595 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9597 This function tells reorg that the references to arguments and
9598 millicode calls do not appear to happen until after the millicode call.
9599 This allows reorg to put insns which set the argument registers into the
9600 delay slot of the millicode call -- thus they act more like traditional
9603 Note we cannot consider side effects of the insn to be delayed because
9604 the branch and link insn will clobber the return pointer. If we happened
9605 to use the return pointer in the delay slot of the call, then we lose.
9607 get_attr_type will try to recognize the given insn, so make sure to
9608 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9611 pa_insn_refs_are_delayed (rtx_insn
*insn
)
9613 return ((NONJUMP_INSN_P (insn
)
9614 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9615 && GET_CODE (PATTERN (insn
)) != USE
9616 && GET_CODE (PATTERN (insn
)) != CLOBBER
9617 && get_attr_type (insn
) == TYPE_MILLI
));
9620 /* Promote the return value, but not the arguments. */
9623 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9625 int *punsignedp ATTRIBUTE_UNUSED
,
9626 const_tree fntype ATTRIBUTE_UNUSED
,
9629 if (for_return
== 0)
9631 return promote_mode (type
, mode
, punsignedp
);
9634 /* On the HP-PA the value is found in register(s) 28(-29), unless
9635 the mode is SF or DF. Then the value is returned in fr4 (32).
9637 This must perform the same promotions as PROMOTE_MODE, else promoting
9638 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9640 Small structures must be returned in a PARALLEL on PA64 in order
9641 to match the HP Compiler ABI. */
9644 pa_function_value (const_tree valtype
,
9645 const_tree func ATTRIBUTE_UNUSED
,
9646 bool outgoing ATTRIBUTE_UNUSED
)
9648 machine_mode valmode
;
9650 if (AGGREGATE_TYPE_P (valtype
)
9651 || TREE_CODE (valtype
) == COMPLEX_TYPE
9652 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9654 HOST_WIDE_INT valsize
= int_size_in_bytes (valtype
);
9656 /* Handle aggregates that fit exactly in a word or double word. */
9657 if (valsize
== UNITS_PER_WORD
|| valsize
== 2 * UNITS_PER_WORD
)
9658 return gen_rtx_REG (TYPE_MODE (valtype
), 28);
9662 /* Aggregates with a size less than or equal to 128 bits are
9663 returned in GR 28(-29). They are left justified. The pad
9664 bits are undefined. Larger aggregates are returned in
9668 int ub
= valsize
<= UNITS_PER_WORD
? 1 : 2;
9670 for (i
= 0; i
< ub
; i
++)
9672 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9673 gen_rtx_REG (DImode
, 28 + i
),
9678 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9680 else if (valsize
> UNITS_PER_WORD
)
9682 /* Aggregates 5 to 8 bytes in size are returned in general
9683 registers r28-r29 in the same manner as other non
9684 floating-point objects. The data is right-justified and
9685 zero-extended to 64 bits. This is opposite to the normal
9686 justification used on big endian targets and requires
9687 special treatment. */
9688 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9689 gen_rtx_REG (DImode
, 28), const0_rtx
);
9690 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9694 if ((INTEGRAL_TYPE_P (valtype
)
9695 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9696 || POINTER_TYPE_P (valtype
))
9697 valmode
= word_mode
;
9699 valmode
= TYPE_MODE (valtype
);
9701 if (TREE_CODE (valtype
) == REAL_TYPE
9702 && !AGGREGATE_TYPE_P (valtype
)
9703 && TYPE_MODE (valtype
) != TFmode
9704 && !TARGET_SOFT_FLOAT
)
9705 return gen_rtx_REG (valmode
, 32);
9707 return gen_rtx_REG (valmode
, 28);
9710 /* Implement the TARGET_LIBCALL_VALUE hook. */
9713 pa_libcall_value (machine_mode mode
,
9714 const_rtx fun ATTRIBUTE_UNUSED
)
9716 if (! TARGET_SOFT_FLOAT
9717 && (mode
== SFmode
|| mode
== DFmode
))
9718 return gen_rtx_REG (mode
, 32);
9720 return gen_rtx_REG (mode
, 28);
9723 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9726 pa_function_value_regno_p (const unsigned int regno
)
9729 || (! TARGET_SOFT_FLOAT
&& regno
== 32))
9735 /* Update the data in CUM to advance over argument ARG. */
9738 pa_function_arg_advance (cumulative_args_t cum_v
,
9739 const function_arg_info
&arg
)
9741 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9742 int arg_size
= pa_function_arg_size (arg
.mode
, arg
.type
);
9744 cum
->nargs_prototype
--;
9745 cum
->words
+= (arg_size
9746 + ((cum
->words
& 01)
9747 && arg
.type
!= NULL_TREE
9751 /* Return the location of a parameter that is passed in a register or NULL
9752 if the parameter has any component that is passed in memory.
9754 This is new code and will be pushed to into the net sources after
9757 ??? We might want to restructure this so that it looks more like other
9760 pa_function_arg (cumulative_args_t cum_v
, const function_arg_info
&arg
)
9762 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9763 tree type
= arg
.type
;
9764 machine_mode mode
= arg
.mode
;
9765 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9772 if (arg
.end_marker_p ())
9775 arg_size
= pa_function_arg_size (mode
, type
);
9777 /* If this arg would be passed partially or totally on the stack, then
9778 this routine should return zero. pa_arg_partial_bytes will
9779 handle arguments which are split between regs and stack slots if
9780 the ABI mandates split arguments. */
9783 /* The 32-bit ABI does not split arguments. */
9784 if (cum
->words
+ arg_size
> max_arg_words
)
9790 alignment
= cum
->words
& 1;
9791 if (cum
->words
+ alignment
>= max_arg_words
)
9795 /* The 32bit ABIs and the 64bit ABIs are rather different,
9796 particularly in their handling of FP registers. We might
9797 be able to cleverly share code between them, but I'm not
9798 going to bother in the hope that splitting them up results
9799 in code that is more easily understood. */
9803 /* Advance the base registers to their current locations.
9805 Remember, gprs grow towards smaller register numbers while
9806 fprs grow to higher register numbers. Also remember that
9807 although FP regs are 32-bit addressable, we pretend that
9808 the registers are 64-bits wide. */
9809 gpr_reg_base
= 26 - cum
->words
;
9810 fpr_reg_base
= 32 + cum
->words
;
9812 /* Arguments wider than one word and small aggregates need special
9816 || (type
&& (AGGREGATE_TYPE_P (type
)
9817 || TREE_CODE (type
) == COMPLEX_TYPE
9818 || TREE_CODE (type
) == VECTOR_TYPE
)))
9820 /* Double-extended precision (80-bit), quad-precision (128-bit)
9821 and aggregates including complex numbers are aligned on
9822 128-bit boundaries. The first eight 64-bit argument slots
9823 are associated one-to-one, with general registers r26
9824 through r19, and also with floating-point registers fr4
9825 through fr11. Arguments larger than one word are always
9826 passed in general registers.
9828 Using a PARALLEL with a word mode register results in left
9829 justified data on a big-endian target. */
9832 int i
, offset
= 0, ub
= arg_size
;
9834 /* Align the base register. */
9835 gpr_reg_base
-= alignment
;
9837 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9838 for (i
= 0; i
< ub
; i
++)
9840 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9841 gen_rtx_REG (DImode
, gpr_reg_base
),
9847 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9852 /* If the argument is larger than a word, then we know precisely
9853 which registers we must use. */
9867 /* Structures 5 to 8 bytes in size are passed in the general
9868 registers in the same manner as other non floating-point
9869 objects. The data is right-justified and zero-extended
9870 to 64 bits. This is opposite to the normal justification
9871 used on big endian targets and requires special treatment.
9872 We now define BLOCK_REG_PADDING to pad these objects.
9873 Aggregates, complex and vector types are passed in the same
9874 manner as structures. */
9876 || (type
&& (AGGREGATE_TYPE_P (type
)
9877 || TREE_CODE (type
) == COMPLEX_TYPE
9878 || TREE_CODE (type
) == VECTOR_TYPE
)))
9880 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9881 gen_rtx_REG (DImode
, gpr_reg_base
),
9883 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9888 /* We have a single word (32 bits). A simple computation
9889 will get us the register #s we need. */
9890 gpr_reg_base
= 26 - cum
->words
;
9891 fpr_reg_base
= 32 + 2 * cum
->words
;
9895 /* Determine if the argument needs to be passed in both general and
9896 floating point registers. */
9897 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9898 /* If we are doing soft-float with portable runtime, then there
9899 is no need to worry about FP regs. */
9900 && !TARGET_SOFT_FLOAT
9901 /* The parameter must be some kind of scalar float, else we just
9902 pass it in integer registers. */
9903 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9904 /* The target function must not have a prototype. */
9905 && cum
->nargs_prototype
<= 0
9906 /* libcalls do not need to pass items in both FP and general
9908 && type
!= NULL_TREE
9909 /* All this hair applies to "outgoing" args only. This includes
9910 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9912 /* Also pass outgoing floating arguments in both registers in indirect
9913 calls with the 32 bit ABI and the HP assembler since there is no
9914 way to the specify argument locations in static functions. */
9919 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9925 gen_rtx_EXPR_LIST (VOIDmode
,
9926 gen_rtx_REG (mode
, fpr_reg_base
),
9928 gen_rtx_EXPR_LIST (VOIDmode
,
9929 gen_rtx_REG (mode
, gpr_reg_base
),
9934 /* See if we should pass this parameter in a general register. */
9935 if (TARGET_SOFT_FLOAT
9936 /* Indirect calls in the normal 32bit ABI require all arguments
9937 to be passed in general registers. */
9938 || (!TARGET_PORTABLE_RUNTIME
9942 /* If the parameter is not a scalar floating-point parameter,
9943 then it belongs in GPRs. */
9944 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9945 /* Structure with single SFmode field belongs in GPR. */
9946 || (type
&& AGGREGATE_TYPE_P (type
)))
9947 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9949 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9954 /* Arguments larger than one word are double word aligned. */
9957 pa_function_arg_boundary (machine_mode mode
, const_tree type
)
9959 bool singleword
= (type
9960 ? (integer_zerop (TYPE_SIZE (type
))
9961 || !TREE_CONSTANT (TYPE_SIZE (type
))
9962 || int_size_in_bytes (type
) <= UNITS_PER_WORD
)
9963 : GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
);
9965 return singleword
? PARM_BOUNDARY
: MAX_PARM_BOUNDARY
;
9968 /* If this arg would be passed totally in registers or totally on the stack,
9969 then this routine should return zero. */
9972 pa_arg_partial_bytes (cumulative_args_t cum_v
, const function_arg_info
&arg
)
9974 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9975 unsigned int max_arg_words
= 8;
9976 unsigned int offset
= 0;
9981 if (pa_function_arg_size (arg
.mode
, arg
.type
) > 1 && (cum
->words
& 1))
9984 if (cum
->words
+ offset
+ pa_function_arg_size (arg
.mode
, arg
.type
)
9986 /* Arg fits fully into registers. */
9988 else if (cum
->words
+ offset
>= max_arg_words
)
9989 /* Arg fully on the stack. */
9993 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9997 /* A get_unnamed_section callback for switching to the text section.
9999 This function is only used with SOM. Because we don't support
10000 named subspaces, we can only create a new subspace or switch back
10001 to the default text subspace. */
10004 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
10006 gcc_assert (TARGET_SOM
);
10009 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
10011 /* We only want to emit a .nsubspa directive once at the
10012 start of the function. */
10013 cfun
->machine
->in_nsubspa
= 1;
10015 /* Create a new subspace for the text. This provides
10016 better stub placement and one-only functions. */
10018 && DECL_ONE_ONLY (cfun
->decl
)
10019 && !DECL_WEAK (cfun
->decl
))
10021 output_section_asm_op ("\t.SPACE $TEXT$\n"
10022 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
10023 "ACCESS=44,SORT=24,COMDAT");
10029 /* There isn't a current function or the body of the current
10030 function has been completed. So, we are changing to the
10031 text section to output debugging information. Thus, we
10032 need to forget that we are in the text section so that
10033 varasm.c will call us when text_section is selected again. */
10034 gcc_assert (!cfun
|| !cfun
->machine
10035 || cfun
->machine
->in_nsubspa
== 2);
10038 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
10041 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
10044 /* A get_unnamed_section callback for switching to comdat data
10045 sections. This function is only used with SOM. */
10048 som_output_comdat_data_section_asm_op (const void *data
)
10051 output_section_asm_op (data
);
10054 /* Implement TARGET_ASM_INIT_SECTIONS. */
10057 pa_som_asm_init_sections (void)
10060 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
10062 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
10063 is not being generated. */
10064 som_readonly_data_section
10065 = get_unnamed_section (0, output_section_asm_op
,
10066 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
10068 /* When secondary definitions are not supported, SOM makes readonly
10069 data one-only by creating a new $LIT$ subspace in $TEXT$ with
10070 the comdat flag. */
10071 som_one_only_readonly_data_section
10072 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
10073 "\t.SPACE $TEXT$\n"
10074 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
10075 "ACCESS=0x2c,SORT=16,COMDAT");
10078 /* When secondary definitions are not supported, SOM makes data one-only
10079 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
10080 som_one_only_data_section
10081 = get_unnamed_section (SECTION_WRITE
,
10082 som_output_comdat_data_section_asm_op
,
10083 "\t.SPACE $PRIVATE$\n"
10084 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
10085 "ACCESS=31,SORT=24,COMDAT");
10088 som_tm_clone_table_section
10089 = get_unnamed_section (0, output_section_asm_op
,
10090 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
10092 /* HPUX ld generates incorrect GOT entries for "T" fixups which
10093 reference data within the $TEXT$ space (for example constant
10094 strings in the $LIT$ subspace).
10096 The assemblers (GAS and HP as) both have problems with handling
10097 the difference of two symbols. This is the other correct way to
10098 reference constant data during PIC code generation.
10100 Thus, we can't put constant data needing relocation in the $TEXT$
10101 space during PIC generation.
10103 Previously, we placed all constant data into the $DATA$ subspace
10104 when generating PIC code. This reduces sharing, but it works
10105 correctly. Now we rely on pa_reloc_rw_mask() for section selection.
10106 This puts constant data not needing relocation into the $TEXT$ space. */
10107 readonly_data_section
= som_readonly_data_section
;
10109 /* We must not have a reference to an external symbol defined in a
10110 shared library in a readonly section, else the SOM linker will
10113 So, we force exception information into the data section. */
10114 exception_section
= data_section
;
10117 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
10120 pa_som_tm_clone_table_section (void)
10122 return som_tm_clone_table_section
;
10125 /* On hpux10, the linker will give an error if we have a reference
10126 in the read-only data section to a symbol defined in a shared
10127 library. Therefore, expressions that might require a reloc
10128 cannot be placed in the read-only data section. */
10131 pa_select_section (tree exp
, int reloc
,
10132 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
10134 if (TREE_CODE (exp
) == VAR_DECL
10135 && TREE_READONLY (exp
)
10136 && !TREE_THIS_VOLATILE (exp
)
10137 && DECL_INITIAL (exp
)
10138 && (DECL_INITIAL (exp
) == error_mark_node
10139 || TREE_CONSTANT (DECL_INITIAL (exp
)))
10140 && !(reloc
& pa_reloc_rw_mask ()))
10143 && DECL_ONE_ONLY (exp
)
10144 && !DECL_WEAK (exp
))
10145 return som_one_only_readonly_data_section
;
10147 return readonly_data_section
;
10149 else if (CONSTANT_CLASS_P (exp
)
10150 && !(reloc
& pa_reloc_rw_mask ()))
10151 return readonly_data_section
;
10152 else if (TARGET_SOM
10153 && TREE_CODE (exp
) == VAR_DECL
10154 && DECL_ONE_ONLY (exp
)
10155 && !DECL_WEAK (exp
))
10156 return som_one_only_data_section
;
10158 return data_section
;
10161 /* Implement pa_elf_select_rtx_section. If X is a function label operand
10162 and the function is in a COMDAT group, place the plabel reference in the
10163 .data.rel.ro.local section. The linker ignores references to symbols in
10164 discarded sections from this section. */
10167 pa_elf_select_rtx_section (machine_mode mode
, rtx x
,
10168 unsigned HOST_WIDE_INT align
)
10170 if (function_label_operand (x
, VOIDmode
))
10172 tree decl
= SYMBOL_REF_DECL (x
);
10174 if (!decl
|| (DECL_P (decl
) && DECL_COMDAT_GROUP (decl
)))
10175 return get_named_section (NULL
, ".data.rel.ro.local", 1);
10178 return default_elf_select_rtx_section (mode
, x
, align
);
10181 /* Implement pa_reloc_rw_mask. */
10184 pa_reloc_rw_mask (void)
10186 if (flag_pic
|| (TARGET_SOM
&& !TARGET_HPUX_11
))
10189 /* HP linker does not support global relocs in readonly memory. */
10190 return TARGET_SOM
? 2 : 0;
10194 pa_globalize_label (FILE *stream
, const char *name
)
10196 /* We only handle DATA objects here, functions are globalized in
10197 ASM_DECLARE_FUNCTION_NAME. */
10198 if (! FUNCTION_NAME_P (name
))
10200 fputs ("\t.EXPORT ", stream
);
10201 assemble_name (stream
, name
);
10202 fputs (",DATA\n", stream
);
10206 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10209 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
10210 int incoming ATTRIBUTE_UNUSED
)
10212 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
10215 /* Worker function for TARGET_RETURN_IN_MEMORY. */
10218 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
10220 /* SOM ABI says that objects larger than 64 bits are returned in memory.
10221 PA64 ABI says that objects larger than 128 bits are returned in memory.
10222 Note, int_size_in_bytes can return -1 if the size of the object is
10223 variable or larger than the maximum value that can be expressed as
10224 a HOST_WIDE_INT. It can also return zero for an empty type. The
10225 simplest way to handle variable and empty types is to pass them in
10226 memory. This avoids problems in defining the boundaries of argument
10227 slots, allocating registers, etc. */
10228 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
10229 || int_size_in_bytes (type
) <= 0);
10232 /* Structure to hold declaration and name of external symbols that are
10233 emitted by GCC. We generate a vector of these symbols and output them
10234 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
10235 This avoids putting out names that are never really used. */
10237 typedef struct GTY(()) extern_symbol
10243 /* Define gc'd vector type for extern_symbol. */
10245 /* Vector of extern_symbol pointers. */
10246 static GTY(()) vec
<extern_symbol
, va_gc
> *extern_symbols
;
10248 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10249 /* Mark DECL (name NAME) as an external reference (assembler output
10250 file FILE). This saves the names to output at the end of the file
10251 if actually referenced. */
10254 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
10256 gcc_assert (file
== asm_out_file
);
10257 extern_symbol p
= {decl
, name
};
10258 vec_safe_push (extern_symbols
, p
);
10262 /* Output text required at the end of an assembler file.
10263 This includes deferred plabels and .import directives for
10264 all external symbols that were actually referenced. */
10269 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10273 if (!NO_DEFERRED_PROFILE_COUNTERS
)
10274 output_deferred_profile_counters ();
10277 output_deferred_plabels ();
10279 #ifdef ASM_OUTPUT_EXTERNAL_REAL
10280 for (i
= 0; vec_safe_iterate (extern_symbols
, i
, &p
); i
++)
10282 tree decl
= p
->decl
;
10284 if (!TREE_ASM_WRITTEN (decl
)
10285 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
10286 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
10289 vec_free (extern_symbols
);
10292 if (NEED_INDICATE_EXEC_STACK
)
10293 file_end_indicate_exec_stack ();
10296 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10299 pa_can_change_mode_class (machine_mode from
, machine_mode to
,
10300 reg_class_t rclass
)
10305 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
10308 /* Reject changes to/from modes with zero size. */
10309 if (!GET_MODE_SIZE (from
) || !GET_MODE_SIZE (to
))
10312 /* Reject changes to/from complex and vector modes. */
10313 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
10314 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
10317 /* There is no way to load QImode or HImode values directly from memory
10318 to a FP register. SImode loads to the FP registers are not zero
10319 extended. On the 64-bit target, this conflicts with the definition
10320 of LOAD_EXTEND_OP. Thus, we reject all mode changes in the FP registers
10321 except for DImode to SImode on the 64-bit target. It is handled by
10322 register renaming in pa_print_operand. */
10323 if (MAYBE_FP_REG_CLASS_P (rclass
))
10324 return TARGET_64BIT
&& from
== DImode
&& to
== SImode
;
10326 /* TARGET_HARD_REGNO_MODE_OK places modes with sizes larger than a word
10327 in specific sets of registers. Thus, we cannot allow changing
10328 to a larger mode when it's larger than a word. */
10329 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
10330 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
10336 /* Implement TARGET_MODES_TIEABLE_P.
10338 We should return FALSE for QImode and HImode because these modes
10339 are not ok in the floating-point registers. However, this prevents
10340 tieing these modes to SImode and DImode in the general registers.
10341 So, this isn't a good idea. We rely on TARGET_HARD_REGNO_MODE_OK and
10342 TARGET_CAN_CHANGE_MODE_CLASS to prevent these modes from being used
10343 in the floating-point registers. */
10346 pa_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
10348 /* Don't tie modes in different classes. */
10349 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
10356 /* Length in units of the trampoline instruction code. */
10358 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 36 : 48))
10361 /* Output assembler code for a block containing the constant parts
10362 of a trampoline, leaving space for the variable parts.\
10364 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10365 and then branches to the specified routine.
10367 This code template is copied from text segment to stack location
10368 and then patched with pa_trampoline_init to contain valid values,
10369 and then entered as a subroutine.
10371 It is best to keep this as small as possible to avoid having to
10372 flush multiple lines in the cache. */
10375 pa_asm_trampoline_template (FILE *f
)
10381 fputs ("\tmfia %r20\n", f
);
10382 fputs ("\tldw 48(%r20),%r22\n", f
);
10383 fputs ("\tcopy %r22,%r21\n", f
);
10384 fputs ("\tbb,>=,n %r22,30,.+16\n", f
);
10385 fputs ("\tdepwi 0,31,2,%r22\n", f
);
10386 fputs ("\tldw 0(%r22),%r21\n", f
);
10387 fputs ("\tldw 4(%r22),%r19\n", f
);
10388 fputs ("\tbve (%r21)\n", f
);
10389 fputs ("\tldw 52(%r1),%r29\n", f
);
10390 fputs ("\t.word 0\n", f
);
10391 fputs ("\t.word 0\n", f
);
10392 fputs ("\t.word 0\n", f
);
10396 if (ASSEMBLER_DIALECT
== 0)
10398 fputs ("\tbl .+8,%r20\n", f
);
10399 fputs ("\tdepi 0,31,2,%r20\n", f
);
10403 fputs ("\tb,l .+8,%r20\n", f
);
10404 fputs ("\tdepwi 0,31,2,%r20\n", f
);
10406 fputs ("\tldw 40(%r20),%r22\n", f
);
10407 fputs ("\tcopy %r22,%r21\n", f
);
10408 fputs ("\tbb,>=,n %r22,30,.+16\n", f
);
10409 if (ASSEMBLER_DIALECT
== 0)
10410 fputs ("\tdepi 0,31,2,%r22\n", f
);
10412 fputs ("\tdepwi 0,31,2,%r22\n", f
);
10413 fputs ("\tldw 0(%r22),%r21\n", f
);
10414 fputs ("\tldw 4(%r22),%r19\n", f
);
10415 fputs ("\tldsid (%r21),%r1\n", f
);
10416 fputs ("\tmtsp %r1,%sr0\n", f
);
10417 fputs ("\tbe 0(%sr0,%r21)\n", f
);
10418 fputs ("\tldw 44(%r20),%r29\n", f
);
10420 fputs ("\t.word 0\n", f
);
10421 fputs ("\t.word 0\n", f
);
10422 fputs ("\t.word 0\n", f
);
10423 fputs ("\t.word 0\n", f
);
10427 fputs ("\t.dword 0\n", f
);
10428 fputs ("\t.dword 0\n", f
);
10429 fputs ("\t.dword 0\n", f
);
10430 fputs ("\t.dword 0\n", f
);
10431 fputs ("\tmfia %r31\n", f
);
10432 fputs ("\tldd 24(%r31),%r27\n", f
);
10433 fputs ("\tldd 32(%r31),%r31\n", f
);
10434 fputs ("\tldd 16(%r27),%r1\n", f
);
10435 fputs ("\tbve (%r1)\n", f
);
10436 fputs ("\tldd 24(%r27),%r27\n", f
);
10437 fputs ("\t.dword 0 ; fptr\n", f
);
10438 fputs ("\t.dword 0 ; static link\n", f
);
10442 /* Emit RTL insns to initialize the variable parts of a trampoline.
10443 FNADDR is an RTX for the address of the function's pure code.
10444 CXT is an RTX for the static chain value for the function.
10446 Move the function address to the trampoline template at offset 48.
10447 Move the static chain value to trampoline template at offset 52.
10448 Move the trampoline address to trampoline template at offset 56.
10449 Move r19 to trampoline template at offset 60. The latter two
10450 words create a plabel for the indirect call to the trampoline.
10452 A similar sequence is used for the 64-bit port but the plabel is
10453 at the beginning of the trampoline.
10455 Finally, the cache entries for the trampoline code are flushed.
10456 This is necessary to ensure that the trampoline instruction sequence
10457 is written to memory prior to any attempts at prefetching the code
10461 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
10463 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10464 rtx start_addr
= gen_reg_rtx (Pmode
);
10465 rtx end_addr
= gen_reg_rtx (Pmode
);
10466 rtx line_length
= gen_reg_rtx (Pmode
);
10469 emit_block_move (m_tramp
, assemble_trampoline_template (),
10470 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
10471 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
10475 tmp
= adjust_address (m_tramp
, Pmode
, 48);
10476 emit_move_insn (tmp
, fnaddr
);
10477 tmp
= adjust_address (m_tramp
, Pmode
, 52);
10478 emit_move_insn (tmp
, chain_value
);
10480 /* Create a fat pointer for the trampoline. */
10481 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10482 emit_move_insn (tmp
, r_tramp
);
10483 tmp
= adjust_address (m_tramp
, Pmode
, 60);
10484 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
10486 /* fdc and fic only use registers for the address to flush,
10487 they do not accept integer displacements. We align the
10488 start and end addresses to the beginning of their respective
10489 cache lines to minimize the number of lines flushed. */
10490 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
10491 GEN_INT (-MIN_CACHELINE_SIZE
)));
10492 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
,
10493 TRAMPOLINE_CODE_SIZE
-1));
10494 emit_insn (gen_andsi3 (end_addr
, tmp
,
10495 GEN_INT (-MIN_CACHELINE_SIZE
)));
10496 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10497 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
10498 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
10499 gen_reg_rtx (Pmode
),
10500 gen_reg_rtx (Pmode
)));
10504 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10505 emit_move_insn (tmp
, fnaddr
);
10506 tmp
= adjust_address (m_tramp
, Pmode
, 64);
10507 emit_move_insn (tmp
, chain_value
);
10509 /* Create a fat pointer for the trampoline. */
10510 tmp
= adjust_address (m_tramp
, Pmode
, 16);
10511 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (Pmode
,
10513 tmp
= adjust_address (m_tramp
, Pmode
, 24);
10514 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
10516 /* fdc and fic only use registers for the address to flush,
10517 they do not accept integer displacements. We align the
10518 start and end addresses to the beginning of their respective
10519 cache lines to minimize the number of lines flushed. */
10520 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
, 32));
10521 emit_insn (gen_anddi3 (start_addr
, tmp
,
10522 GEN_INT (-MIN_CACHELINE_SIZE
)));
10523 tmp
= force_reg (Pmode
, plus_constant (Pmode
, tmp
,
10524 TRAMPOLINE_CODE_SIZE
- 1));
10525 emit_insn (gen_anddi3 (end_addr
, tmp
,
10526 GEN_INT (-MIN_CACHELINE_SIZE
)));
10527 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10528 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
10529 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
10530 gen_reg_rtx (Pmode
),
10531 gen_reg_rtx (Pmode
)));
10534 #ifdef HAVE_ENABLE_EXECUTE_STACK
10535 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
10536 LCT_NORMAL
, VOIDmode
, XEXP (m_tramp
, 0), Pmode
);
10540 /* Perform any machine-specific adjustment in the address of the trampoline.
10541 ADDR contains the address that was passed to pa_trampoline_init.
10542 Adjust the trampoline address to point to the plabel at offset 56. */
10545 pa_trampoline_adjust_address (rtx addr
)
10548 addr
= memory_address (Pmode
, plus_constant (Pmode
, addr
, 58));
10553 pa_delegitimize_address (rtx orig_x
)
10555 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10557 if (GET_CODE (x
) == LO_SUM
10558 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10559 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10560 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));
10565 pa_internal_arg_pointer (void)
10567 /* The argument pointer and the hard frame pointer are the same in
10568 the 32-bit runtime, so we don't need a copy. */
10570 return copy_to_reg (virtual_incoming_args_rtx
);
10572 return virtual_incoming_args_rtx
;
10575 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10576 Frame pointer elimination is automatically handled. */
10579 pa_can_eliminate (const int from
, const int to
)
10581 /* The argument cannot be eliminated in the 64-bit runtime. */
10582 if (TARGET_64BIT
&& from
== ARG_POINTER_REGNUM
)
10585 return (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
10586 ? ! frame_pointer_needed
10590 /* Define the offset between two registers, FROM to be eliminated and its
10591 replacement TO, at the start of a routine. */
10593 pa_initial_elimination_offset (int from
, int to
)
10595 HOST_WIDE_INT offset
;
10597 if ((from
== HARD_FRAME_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
)
10598 && to
== STACK_POINTER_REGNUM
)
10599 offset
= -pa_compute_frame_size (get_frame_size (), 0);
10600 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
10603 gcc_unreachable ();
10609 pa_conditional_register_usage (void)
10613 if (!TARGET_64BIT
&& !TARGET_PA_11
)
10615 for (i
= 56; i
<= FP_REG_LAST
; i
++)
10616 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10617 for (i
= 33; i
< 56; i
+= 2)
10618 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10620 if (TARGET_DISABLE_FPREGS
|| TARGET_SOFT_FLOAT
)
10622 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
10623 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10626 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10629 /* Target hook for c_mode_for_suffix. */
10631 static machine_mode
10632 pa_c_mode_for_suffix (char suffix
)
10634 if (HPUX_LONG_DOUBLE_LIBRARY
)
10643 /* Target hook for function_section. */
10646 pa_function_section (tree decl
, enum node_frequency freq
,
10647 bool startup
, bool exit
)
10649 /* Put functions in text section if target doesn't have named sections. */
10650 if (!targetm_common
.have_named_sections
)
10651 return text_section
;
10653 /* Force nested functions into the same section as the containing
10656 && DECL_SECTION_NAME (decl
) == NULL
10657 && DECL_CONTEXT (decl
) != NULL_TREE
10658 && TREE_CODE (DECL_CONTEXT (decl
)) == FUNCTION_DECL
10659 && DECL_SECTION_NAME (DECL_CONTEXT (decl
)) == NULL
)
10660 return function_section (DECL_CONTEXT (decl
));
10662 /* Otherwise, use the default function section. */
10663 return default_function_section (decl
, freq
, startup
, exit
);
10666 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10668 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10669 that need more than three instructions to load prior to reload. This
10670 limit is somewhat arbitrary. It takes three instructions to load a
10671 CONST_INT from memory but two are memory accesses. It may be better
10672 to increase the allowed range for CONST_INTS. We may also be able
10673 to handle CONST_DOUBLES. */
10676 pa_legitimate_constant_p (machine_mode mode
, rtx x
)
10678 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& x
!= CONST0_RTX (mode
))
10681 if (!NEW_HP_ASSEMBLER
&& !TARGET_GAS
&& GET_CODE (x
) == LABEL_REF
)
10684 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10685 legitimate constants. The other variants can't be handled by
10686 the move patterns after reload starts. */
10687 if (tls_referenced_p (x
))
10690 if (TARGET_64BIT
&& GET_CODE (x
) == CONST_DOUBLE
)
10694 && HOST_BITS_PER_WIDE_INT
> 32
10695 && GET_CODE (x
) == CONST_INT
10696 && !reload_in_progress
10697 && !reload_completed
10698 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x
))
10699 && !pa_cint_ok_for_move (UINTVAL (x
)))
10702 if (function_label_operand (x
, mode
))
10708 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10710 static unsigned int
10711 pa_section_type_flags (tree decl
, const char *name
, int reloc
)
10713 unsigned int flags
;
10715 flags
= default_section_type_flags (decl
, name
, reloc
);
10717 /* Function labels are placed in the constant pool. This can
10718 cause a section conflict if decls are put in ".data.rel.ro"
10719 or ".data.rel.ro.local" using the __attribute__ construct. */
10720 if (strcmp (name
, ".data.rel.ro") == 0
10721 || strcmp (name
, ".data.rel.ro.local") == 0)
10722 flags
|= SECTION_WRITE
| SECTION_RELRO
;
10727 /* pa_legitimate_address_p recognizes an RTL expression that is a
10728 valid memory address for an instruction. The MODE argument is the
10729 machine mode for the MEM expression that wants to use this address.
10731 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10732 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10733 available with floating point loads and stores, and integer loads.
10734 We get better code by allowing indexed addresses in the initial
10737 The acceptance of indexed addresses as legitimate implies that we
10738 must provide patterns for doing indexed integer stores, or the move
10739 expanders must force the address of an indexed store to a register.
10740 We have adopted the latter approach.
10742 Another function of pa_legitimate_address_p is to ensure that
10743 the base register is a valid pointer for indexed instructions.
10744 On targets that have non-equivalent space registers, we have to
10745 know at the time of assembler output which register in a REG+REG
10746 pair is the base register. The REG_POINTER flag is sometimes lost
10747 in reload and the following passes, so it can't be relied on during
10748 code generation. Thus, we either have to canonicalize the order
10749 of the registers in REG+REG indexed addresses, or treat REG+REG
10750 addresses separately and provide patterns for both permutations.
10752 The latter approach requires several hundred additional lines of
10753 code in pa.md. The downside to canonicalizing is that a PLUS
10754 in the wrong order can't combine to form to make a scaled indexed
10755 memory operand. As we won't need to canonicalize the operands if
10756 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10758 We initially break out scaled indexed addresses in canonical order
10759 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10760 scaled indexed addresses during RTL generation. However, fold_rtx
10761 has its own opinion on how the operands of a PLUS should be ordered.
10762 If one of the operands is equivalent to a constant, it will make
10763 that operand the second operand. As the base register is likely to
10764 be equivalent to a SYMBOL_REF, we have made it the second operand.
10766 pa_legitimate_address_p accepts REG+REG as legitimate when the
10767 operands are in the order INDEX+BASE on targets with non-equivalent
10768 space registers, and in any order on targets with equivalent space
10769 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10771 We treat a SYMBOL_REF as legitimate if it is part of the current
10772 function's constant-pool, because such addresses can actually be
10773 output as REG+SMALLINT. */
10776 pa_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
10779 && (strict
? STRICT_REG_OK_FOR_BASE_P (x
)
10780 : REG_OK_FOR_BASE_P (x
)))
10781 || ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_DEC
10782 || GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == POST_INC
)
10783 && REG_P (XEXP (x
, 0))
10784 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10785 : REG_OK_FOR_BASE_P (XEXP (x
, 0)))))
10788 if (GET_CODE (x
) == PLUS
)
10792 /* For REG+REG, the base register should be in XEXP (x, 1),
10793 so check it first. */
10794 if (REG_P (XEXP (x
, 1))
10795 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 1))
10796 : REG_OK_FOR_BASE_P (XEXP (x
, 1))))
10797 base
= XEXP (x
, 1), index
= XEXP (x
, 0);
10798 else if (REG_P (XEXP (x
, 0))
10799 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10800 : REG_OK_FOR_BASE_P (XEXP (x
, 0))))
10801 base
= XEXP (x
, 0), index
= XEXP (x
, 1);
10805 if (GET_CODE (index
) == CONST_INT
)
10807 if (INT_5_BITS (index
))
10810 /* When INT14_OK_STRICT is false, a secondary reload is needed
10811 to adjust the displacement of SImode and DImode floating point
10812 instructions but this may fail when the register also needs
10813 reloading. So, we return false when STRICT is true. We
10814 also reject long displacements for float mode addresses since
10815 the majority of accesses will use floating point instructions
10816 that don't support 14-bit offsets. */
10817 if (!INT14_OK_STRICT
10818 && (strict
|| !(reload_in_progress
|| reload_completed
))
10823 return base14_operand (index
, mode
);
10826 if (!TARGET_DISABLE_INDEXING
10827 /* Only accept the "canonical" INDEX+BASE operand order
10828 on targets with non-equivalent space registers. */
10829 && (TARGET_NO_SPACE_REGS
10831 : (base
== XEXP (x
, 1) && REG_P (index
)
10832 && (reload_completed
10833 || (reload_in_progress
&& HARD_REGISTER_P (base
))
10834 || REG_POINTER (base
))
10835 && (reload_completed
10836 || (reload_in_progress
&& HARD_REGISTER_P (index
))
10837 || !REG_POINTER (index
))))
10838 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode
)
10839 && (strict
? STRICT_REG_OK_FOR_INDEX_P (index
)
10840 : REG_OK_FOR_INDEX_P (index
))
10841 && borx_reg_operand (base
, Pmode
)
10842 && borx_reg_operand (index
, Pmode
))
10845 if (!TARGET_DISABLE_INDEXING
10846 && GET_CODE (index
) == MULT
10847 /* Only accept base operands with the REG_POINTER flag prior to
10848 reload on targets with non-equivalent space registers. */
10849 && (TARGET_NO_SPACE_REGS
10850 || (base
== XEXP (x
, 1)
10851 && (reload_completed
10852 || (reload_in_progress
&& HARD_REGISTER_P (base
))
10853 || REG_POINTER (base
))))
10854 && REG_P (XEXP (index
, 0))
10855 && GET_MODE (XEXP (index
, 0)) == Pmode
10856 && MODE_OK_FOR_SCALED_INDEXING_P (mode
)
10857 && (strict
? STRICT_REG_OK_FOR_INDEX_P (XEXP (index
, 0))
10858 : REG_OK_FOR_INDEX_P (XEXP (index
, 0)))
10859 && GET_CODE (XEXP (index
, 1)) == CONST_INT
10860 && INTVAL (XEXP (index
, 1))
10861 == (HOST_WIDE_INT
) GET_MODE_SIZE (mode
)
10862 && borx_reg_operand (base
, Pmode
))
10868 if (GET_CODE (x
) == LO_SUM
)
10870 rtx y
= XEXP (x
, 0);
10872 if (GET_CODE (y
) == SUBREG
)
10873 y
= SUBREG_REG (y
);
10876 && (strict
? STRICT_REG_OK_FOR_BASE_P (y
)
10877 : REG_OK_FOR_BASE_P (y
)))
10879 /* Needed for -fPIC */
10881 && GET_CODE (XEXP (x
, 1)) == UNSPEC
)
10884 if (!INT14_OK_STRICT
10885 && (strict
|| !(reload_in_progress
|| reload_completed
))
10890 if (CONSTANT_P (XEXP (x
, 1)))
10896 if (GET_CODE (x
) == CONST_INT
&& INT_5_BITS (x
))
10902 /* Look for machine dependent ways to make the invalid address AD a
10905 For the PA, transform:
10907 memory(X + <large int>)
10911 if (<large int> & mask) >= 16
10912 Y = (<large int> & ~mask) + mask + 1 Round up.
10914 Y = (<large int> & ~mask) Round down.
10916 memory (Z + (<large int> - Y));
10918 This makes reload inheritance and reload_cse work better since Z
10921 There may be more opportunities to improve code with this hook. */
10924 pa_legitimize_reload_address (rtx ad
, machine_mode mode
,
10925 int opnum
, int type
,
10926 int ind_levels ATTRIBUTE_UNUSED
)
10928 long offset
, newoffset
, mask
;
10929 rtx new_rtx
, temp
= NULL_RTX
;
10931 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
10932 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
10934 if (optimize
&& GET_CODE (ad
) == PLUS
)
10935 temp
= simplify_binary_operation (PLUS
, Pmode
,
10936 XEXP (ad
, 0), XEXP (ad
, 1));
10938 new_rtx
= temp
? temp
: ad
;
10941 && GET_CODE (new_rtx
) == PLUS
10942 && GET_CODE (XEXP (new_rtx
, 0)) == REG
10943 && GET_CODE (XEXP (new_rtx
, 1)) == CONST_INT
)
10945 offset
= INTVAL (XEXP ((new_rtx
), 1));
10947 /* Choose rounding direction. Round up if we are >= halfway. */
10948 if ((offset
& mask
) >= ((mask
+ 1) / 2))
10949 newoffset
= (offset
& ~mask
) + mask
+ 1;
10951 newoffset
= offset
& ~mask
;
10953 /* Ensure that long displacements are aligned. */
10955 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
10956 || (TARGET_64BIT
&& (mode
) == DImode
)))
10957 newoffset
&= ~(GET_MODE_SIZE (mode
) - 1);
10959 if (newoffset
!= 0 && VAL_14_BITS_P (newoffset
))
10961 temp
= gen_rtx_PLUS (Pmode
, XEXP (new_rtx
, 0),
10962 GEN_INT (newoffset
));
10963 ad
= gen_rtx_PLUS (Pmode
, temp
, GEN_INT (offset
- newoffset
));
10964 push_reload (XEXP (ad
, 0), 0, &XEXP (ad
, 0), 0,
10965 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
10966 opnum
, (enum reload_type
) type
);
10974 /* Output address vector. */
10977 pa_output_addr_vec (rtx lab
, rtx body
)
10979 int idx
, vlen
= XVECLEN (body
, 0);
10982 fputs ("\t.align 4\n", asm_out_file
);
10983 targetm
.asm_out
.internal_label (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
10985 fputs ("\t.begin_brtab\n", asm_out_file
);
10986 for (idx
= 0; idx
< vlen
; idx
++)
10988 ASM_OUTPUT_ADDR_VEC_ELT
10989 (asm_out_file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
10992 fputs ("\t.end_brtab\n", asm_out_file
);
10995 /* Output address difference vector. */
10998 pa_output_addr_diff_vec (rtx lab
, rtx body
)
11000 rtx base
= XEXP (XEXP (body
, 0), 0);
11001 int idx
, vlen
= XVECLEN (body
, 1);
11003 targetm
.asm_out
.internal_label (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
11005 fputs ("\t.begin_brtab\n", asm_out_file
);
11006 for (idx
= 0; idx
< vlen
; idx
++)
11008 ASM_OUTPUT_ADDR_DIFF_ELT
11011 CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 1, idx
), 0)),
11012 CODE_LABEL_NUMBER (base
));
11015 fputs ("\t.end_brtab\n", asm_out_file
);
11018 /* This is a helper function for the other atomic operations. This function
11019 emits a loop that contains SEQ that iterates until a compare-and-swap
11020 operation at the end succeeds. MEM is the memory to be modified. SEQ is
11021 a set of instructions that takes a value from OLD_REG as an input and
11022 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
11023 set to the current contents of MEM. After SEQ, a compare-and-swap will
11024 attempt to update MEM with NEW_REG. The function returns true when the
11025 loop was generated successfully. */
11028 pa_expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
11030 machine_mode mode
= GET_MODE (mem
);
11031 rtx_code_label
*label
;
11032 rtx cmp_reg
, success
, oldval
;
11034 /* The loop we want to generate looks like
11040 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
11044 Note that we only do the plain load from memory once. Subsequent
11045 iterations use the value loaded by the compare-and-swap pattern. */
11047 label
= gen_label_rtx ();
11048 cmp_reg
= gen_reg_rtx (mode
);
11050 emit_move_insn (cmp_reg
, mem
);
11051 emit_label (label
);
11052 emit_move_insn (old_reg
, cmp_reg
);
11056 success
= NULL_RTX
;
11058 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
11059 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
11063 if (oldval
!= cmp_reg
)
11064 emit_move_insn (cmp_reg
, oldval
);
11066 /* Mark this jump predicted not taken. */
11067 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
11068 GET_MODE (success
), 1, label
,
11069 profile_probability::guessed_never ());
11073 /* This function tries to implement an atomic exchange operation using a
11074 compare_and_swap loop. VAL is written to *MEM. The previous contents of
11075 *MEM are returned, using TARGET if possible. No memory model is required
11076 since a compare_and_swap loop is seq-cst. */
11079 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
11081 machine_mode mode
= GET_MODE (mem
);
11083 if (can_compare_and_swap_p (mode
, true))
11085 if (!target
|| !register_operand (target
, mode
))
11086 target
= gen_reg_rtx (mode
);
11087 if (pa_expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
11094 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
11095 arguments passed by hidden reference in the 32-bit HP runtime. Users
11096 can override this behavior for better compatibility with openmp at the
11097 risk of library incompatibilities. Arguments are always passed by value
11098 in the 64-bit HP runtime. */
11101 pa_callee_copies (cumulative_args_t
, const function_arg_info
&)
11103 return !TARGET_CALLER_COPIES
;
11106 /* Implement TARGET_HARD_REGNO_NREGS. */
11108 static unsigned int
11109 pa_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED
, machine_mode mode
)
11111 return PA_HARD_REGNO_NREGS (regno
, mode
);
11114 /* Implement TARGET_HARD_REGNO_MODE_OK. */
11117 pa_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
11119 return PA_HARD_REGNO_MODE_OK (regno
, mode
);
11122 /* Implement TARGET_STARTING_FRAME_OFFSET.
11124 On the 32-bit ports, we reserve one slot for the previous frame
11125 pointer and one fill slot. The fill slot is for compatibility
11126 with HP compiled programs. On the 64-bit ports, we reserve one
11127 slot for the previous frame pointer. */
11129 static HOST_WIDE_INT
11130 pa_starting_frame_offset (void)
11135 /* Figure out the size in words of the function argument. The size
11136 returned by this function should always be greater than zero because
11137 we pass variable and zero sized objects by reference. */
11140 pa_function_arg_size (machine_mode mode
, const_tree type
)
11142 HOST_WIDE_INT size
;
11144 size
= mode
!= BLKmode
? GET_MODE_SIZE (mode
) : int_size_in_bytes (type
);
11145 return CEIL (size
, UNITS_PER_WORD
);