1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
35 #include "double-int.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "stringpool.h"
52 #include "statistics.h"
54 #include "fixed-value.h"
61 #include "insn-codes.h"
64 #include "diagnostic-core.h"
70 #include "common/common-target.h"
71 #include "target-def.h"
72 #include "langhooks.h"
73 #include "dominance.h"
79 #include "cfgcleanup.h"
80 #include "basic-block.h"
85 /* Return nonzero if there is a bypass for the output of
86 OUT_INSN and the fp store IN_INSN. */
88 pa_fpstore_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
90 machine_mode store_mode
;
91 machine_mode other_mode
;
94 if (recog_memoized (in_insn
) < 0
95 || (get_attr_type (in_insn
) != TYPE_FPSTORE
96 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
97 || recog_memoized (out_insn
) < 0)
100 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
102 set
= single_set (out_insn
);
106 other_mode
= GET_MODE (SET_SRC (set
));
108 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
112 #ifndef DO_FRAME_NOTES
113 #ifdef INCOMING_RETURN_ADDR_RTX
114 #define DO_FRAME_NOTES 1
116 #define DO_FRAME_NOTES 0
120 static void pa_option_override (void);
121 static void copy_reg_pointer (rtx
, rtx
);
122 static void fix_range (const char *);
123 static int hppa_register_move_cost (machine_mode mode
, reg_class_t
,
125 static int hppa_address_cost (rtx
, machine_mode mode
, addr_space_t
, bool);
126 static bool hppa_rtx_costs (rtx
, int, int, int, int *, bool);
127 static inline rtx
force_mode (machine_mode
, rtx
);
128 static void pa_reorg (void);
129 static void pa_combine_instructions (void);
130 static int pa_can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, int, rtx
,
132 static bool forward_branch_p (rtx_insn
*);
133 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
134 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT
, unsigned *);
135 static int compute_movmem_length (rtx_insn
*);
136 static int compute_clrmem_length (rtx_insn
*);
137 static bool pa_assemble_integer (rtx
, unsigned int, int);
138 static void remove_useless_addtr_insns (int);
139 static void store_reg (int, HOST_WIDE_INT
, int);
140 static void store_reg_modify (int, int, HOST_WIDE_INT
);
141 static void load_reg (int, HOST_WIDE_INT
, int);
142 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
143 static rtx
pa_function_value (const_tree
, const_tree
, bool);
144 static rtx
pa_libcall_value (machine_mode
, const_rtx
);
145 static bool pa_function_value_regno_p (const unsigned int);
146 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
147 static void update_total_code_bytes (unsigned int);
148 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
149 static int pa_adjust_cost (rtx_insn
*, rtx
, rtx_insn
*, int);
150 static int pa_adjust_priority (rtx_insn
*, int);
151 static int pa_issue_rate (void);
152 static int pa_reloc_rw_mask (void);
153 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
154 static section
*pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED
;
155 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
157 static void pa_encode_section_info (tree
, rtx
, int);
158 static const char *pa_strip_name_encoding (const char *);
159 static bool pa_function_ok_for_sibcall (tree
, tree
);
160 static void pa_globalize_label (FILE *, const char *)
162 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
163 HOST_WIDE_INT
, tree
);
164 #if !defined(USE_COLLECT2)
165 static void pa_asm_out_constructor (rtx
, int);
166 static void pa_asm_out_destructor (rtx
, int);
168 static void pa_init_builtins (void);
169 static rtx
pa_expand_builtin (tree
, rtx
, rtx
, machine_mode mode
, int);
170 static rtx
hppa_builtin_saveregs (void);
171 static void hppa_va_start (tree
, rtx
);
172 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
173 static bool pa_scalar_mode_supported_p (machine_mode
);
174 static bool pa_commutative_p (const_rtx x
, int outer_code
);
175 static void copy_fp_args (rtx_insn
*) ATTRIBUTE_UNUSED
;
176 static int length_fp_args (rtx_insn
*) ATTRIBUTE_UNUSED
;
177 static rtx
hppa_legitimize_address (rtx
, rtx
, machine_mode
);
178 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
179 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
180 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
181 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
182 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
183 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
184 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
185 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
186 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
187 static void output_deferred_plabels (void);
188 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
189 #ifdef ASM_OUTPUT_EXTERNAL_REAL
190 static void pa_hpux_file_end (void);
192 static void pa_init_libfuncs (void);
193 static rtx
pa_struct_value_rtx (tree
, int);
194 static bool pa_pass_by_reference (cumulative_args_t
, machine_mode
,
196 static int pa_arg_partial_bytes (cumulative_args_t
, machine_mode
,
198 static void pa_function_arg_advance (cumulative_args_t
, machine_mode
,
200 static rtx
pa_function_arg (cumulative_args_t
, machine_mode
,
202 static unsigned int pa_function_arg_boundary (machine_mode
, const_tree
);
203 static struct machine_function
* pa_init_machine_status (void);
204 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
206 secondary_reload_info
*);
207 static void pa_extra_live_on_entry (bitmap
);
208 static machine_mode
pa_promote_function_mode (const_tree
,
212 static void pa_asm_trampoline_template (FILE *);
213 static void pa_trampoline_init (rtx
, tree
, rtx
);
214 static rtx
pa_trampoline_adjust_address (rtx
);
215 static rtx
pa_delegitimize_address (rtx
);
216 static bool pa_print_operand_punct_valid_p (unsigned char);
217 static rtx
pa_internal_arg_pointer (void);
218 static bool pa_can_eliminate (const int, const int);
219 static void pa_conditional_register_usage (void);
220 static machine_mode
pa_c_mode_for_suffix (char);
221 static section
*pa_function_section (tree
, enum node_frequency
, bool, bool);
222 static bool pa_cannot_force_const_mem (machine_mode
, rtx
);
223 static bool pa_legitimate_constant_p (machine_mode
, rtx
);
224 static unsigned int pa_section_type_flags (tree
, const char *, int);
225 static bool pa_legitimate_address_p (machine_mode
, rtx
, bool);
227 /* The following extra sections are only used for SOM. */
228 static GTY(()) section
*som_readonly_data_section
;
229 static GTY(()) section
*som_one_only_readonly_data_section
;
230 static GTY(()) section
*som_one_only_data_section
;
231 static GTY(()) section
*som_tm_clone_table_section
;
233 /* Counts for the number of callee-saved general and floating point
234 registers which were saved by the current function's prologue. */
235 static int gr_saved
, fr_saved
;
237 /* Boolean indicating whether the return pointer was saved by the
238 current function's prologue. */
239 static bool rp_saved
;
241 static rtx
find_addr_reg (rtx
);
243 /* Keep track of the number of bytes we have output in the CODE subspace
244 during this compilation so we'll know when to emit inline long-calls. */
245 unsigned long total_code_bytes
;
247 /* The last address of the previous function plus the number of bytes in
248 associated thunks that have been output. This is used to determine if
249 a thunk can use an IA-relative branch to reach its target function. */
250 static unsigned int last_address
;
252 /* Variables to handle plabels that we discover are necessary at assembly
253 output time. They are output after the current function. */
254 struct GTY(()) deferred_plabel
259 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
261 static size_t n_deferred_plabels
= 0;
263 /* Initialize the GCC target structure. */
265 #undef TARGET_OPTION_OVERRIDE
266 #define TARGET_OPTION_OVERRIDE pa_option_override
268 #undef TARGET_ASM_ALIGNED_HI_OP
269 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
270 #undef TARGET_ASM_ALIGNED_SI_OP
271 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
272 #undef TARGET_ASM_ALIGNED_DI_OP
273 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
274 #undef TARGET_ASM_UNALIGNED_HI_OP
275 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
276 #undef TARGET_ASM_UNALIGNED_SI_OP
277 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
278 #undef TARGET_ASM_UNALIGNED_DI_OP
279 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
280 #undef TARGET_ASM_INTEGER
281 #define TARGET_ASM_INTEGER pa_assemble_integer
283 #undef TARGET_ASM_FUNCTION_PROLOGUE
284 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
285 #undef TARGET_ASM_FUNCTION_EPILOGUE
286 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
288 #undef TARGET_FUNCTION_VALUE
289 #define TARGET_FUNCTION_VALUE pa_function_value
290 #undef TARGET_LIBCALL_VALUE
291 #define TARGET_LIBCALL_VALUE pa_libcall_value
292 #undef TARGET_FUNCTION_VALUE_REGNO_P
293 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
295 #undef TARGET_LEGITIMIZE_ADDRESS
296 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
298 #undef TARGET_SCHED_ADJUST_COST
299 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
300 #undef TARGET_SCHED_ADJUST_PRIORITY
301 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
302 #undef TARGET_SCHED_ISSUE_RATE
303 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
305 #undef TARGET_ENCODE_SECTION_INFO
306 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
307 #undef TARGET_STRIP_NAME_ENCODING
308 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
310 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
311 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
313 #undef TARGET_COMMUTATIVE_P
314 #define TARGET_COMMUTATIVE_P pa_commutative_p
316 #undef TARGET_ASM_OUTPUT_MI_THUNK
317 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
318 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
319 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
321 #undef TARGET_ASM_FILE_END
322 #ifdef ASM_OUTPUT_EXTERNAL_REAL
323 #define TARGET_ASM_FILE_END pa_hpux_file_end
325 #define TARGET_ASM_FILE_END output_deferred_plabels
328 #undef TARGET_ASM_RELOC_RW_MASK
329 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
331 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
332 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
334 #if !defined(USE_COLLECT2)
335 #undef TARGET_ASM_CONSTRUCTOR
336 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
337 #undef TARGET_ASM_DESTRUCTOR
338 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
341 #undef TARGET_INIT_BUILTINS
342 #define TARGET_INIT_BUILTINS pa_init_builtins
344 #undef TARGET_EXPAND_BUILTIN
345 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
347 #undef TARGET_REGISTER_MOVE_COST
348 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
349 #undef TARGET_RTX_COSTS
350 #define TARGET_RTX_COSTS hppa_rtx_costs
351 #undef TARGET_ADDRESS_COST
352 #define TARGET_ADDRESS_COST hppa_address_cost
354 #undef TARGET_MACHINE_DEPENDENT_REORG
355 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
357 #undef TARGET_INIT_LIBFUNCS
358 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
360 #undef TARGET_PROMOTE_FUNCTION_MODE
361 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
362 #undef TARGET_PROMOTE_PROTOTYPES
363 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
365 #undef TARGET_STRUCT_VALUE_RTX
366 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
367 #undef TARGET_RETURN_IN_MEMORY
368 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
369 #undef TARGET_MUST_PASS_IN_STACK
370 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
371 #undef TARGET_PASS_BY_REFERENCE
372 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
373 #undef TARGET_CALLEE_COPIES
374 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
375 #undef TARGET_ARG_PARTIAL_BYTES
376 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
377 #undef TARGET_FUNCTION_ARG
378 #define TARGET_FUNCTION_ARG pa_function_arg
379 #undef TARGET_FUNCTION_ARG_ADVANCE
380 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
381 #undef TARGET_FUNCTION_ARG_BOUNDARY
382 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
384 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
385 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
386 #undef TARGET_EXPAND_BUILTIN_VA_START
387 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
388 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
389 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
391 #undef TARGET_SCALAR_MODE_SUPPORTED_P
392 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
394 #undef TARGET_CANNOT_FORCE_CONST_MEM
395 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
397 #undef TARGET_SECONDARY_RELOAD
398 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
400 #undef TARGET_EXTRA_LIVE_ON_ENTRY
401 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
403 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
404 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
405 #undef TARGET_TRAMPOLINE_INIT
406 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
407 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
408 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
409 #undef TARGET_DELEGITIMIZE_ADDRESS
410 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
411 #undef TARGET_INTERNAL_ARG_POINTER
412 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
413 #undef TARGET_CAN_ELIMINATE
414 #define TARGET_CAN_ELIMINATE pa_can_eliminate
415 #undef TARGET_CONDITIONAL_REGISTER_USAGE
416 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
417 #undef TARGET_C_MODE_FOR_SUFFIX
418 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
419 #undef TARGET_ASM_FUNCTION_SECTION
420 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
422 #undef TARGET_LEGITIMATE_CONSTANT_P
423 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
424 #undef TARGET_SECTION_TYPE_FLAGS
425 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
426 #undef TARGET_LEGITIMATE_ADDRESS_P
427 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
429 struct gcc_target targetm
= TARGET_INITIALIZER
;
431 /* Parse the -mfixed-range= option string. */
434 fix_range (const char *const_str
)
437 char *str
, *dash
, *comma
;
439 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
440 REG2 are either register names or register numbers. The effect
441 of this option is to mark the registers in the range from REG1 to
442 REG2 as ``fixed'' so they won't be used by the compiler. This is
443 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
445 i
= strlen (const_str
);
446 str
= (char *) alloca (i
+ 1);
447 memcpy (str
, const_str
, i
+ 1);
451 dash
= strchr (str
, '-');
454 warning (0, "value of -mfixed-range must have form REG1-REG2");
459 comma
= strchr (dash
+ 1, ',');
463 first
= decode_reg_name (str
);
466 warning (0, "unknown register name: %s", str
);
470 last
= decode_reg_name (dash
+ 1);
473 warning (0, "unknown register name: %s", dash
+ 1);
481 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
485 for (i
= first
; i
<= last
; ++i
)
486 fixed_regs
[i
] = call_used_regs
[i
] = 1;
495 /* Check if all floating point registers have been fixed. */
496 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
501 target_flags
|= MASK_DISABLE_FPREGS
;
504 /* Implement the TARGET_OPTION_OVERRIDE hook. */
507 pa_option_override (void)
510 cl_deferred_option
*opt
;
511 vec
<cl_deferred_option
> *v
512 = (vec
<cl_deferred_option
> *) pa_deferred_options
;
515 FOR_EACH_VEC_ELT (*v
, i
, opt
)
517 switch (opt
->opt_index
)
519 case OPT_mfixed_range_
:
520 fix_range (opt
->arg
);
528 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
530 warning (0, "PIC code generation is not supported in the portable runtime model");
533 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
535 warning (0, "PIC code generation is not compatible with fast indirect calls");
538 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
540 warning (0, "-g is only supported when using GAS on this processor,");
541 warning (0, "-g option disabled");
542 write_symbols
= NO_DEBUG
;
545 /* We only support the "big PIC" model now. And we always generate PIC
546 code when in 64bit mode. */
547 if (flag_pic
== 1 || TARGET_64BIT
)
550 /* Disable -freorder-blocks-and-partition as we don't support hot and
551 cold partitioning. */
552 if (flag_reorder_blocks_and_partition
)
554 inform (input_location
,
555 "-freorder-blocks-and-partition does not work "
556 "on this architecture");
557 flag_reorder_blocks_and_partition
= 0;
558 flag_reorder_blocks
= 1;
561 /* We can't guarantee that .dword is available for 32-bit targets. */
562 if (UNITS_PER_WORD
== 4)
563 targetm
.asm_out
.aligned_op
.di
= NULL
;
565 /* The unaligned ops are only available when using GAS. */
568 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
569 targetm
.asm_out
.unaligned_op
.si
= NULL
;
570 targetm
.asm_out
.unaligned_op
.di
= NULL
;
573 init_machine_status
= pa_init_machine_status
;
578 PA_BUILTIN_COPYSIGNQ
,
581 PA_BUILTIN_HUGE_VALQ
,
585 static GTY(()) tree pa_builtins
[(int) PA_BUILTIN_max
];
588 pa_init_builtins (void)
590 #ifdef DONT_HAVE_FPUTC_UNLOCKED
592 tree decl
= builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED
);
593 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED
, decl
,
594 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED
));
601 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITE
)) != NULL_TREE
)
602 set_user_assembler_name (decl
, "_Isfinite");
603 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEF
)) != NULL_TREE
)
604 set_user_assembler_name (decl
, "_Isfinitef");
608 if (HPUX_LONG_DOUBLE_LIBRARY
)
612 /* Under HPUX, the __float128 type is a synonym for "long double". */
613 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
616 /* TFmode support builtins. */
617 ftype
= build_function_type_list (long_double_type_node
,
618 long_double_type_node
,
620 decl
= add_builtin_function ("__builtin_fabsq", ftype
,
621 PA_BUILTIN_FABSQ
, BUILT_IN_MD
,
622 "_U_Qfabs", NULL_TREE
);
623 TREE_READONLY (decl
) = 1;
624 pa_builtins
[PA_BUILTIN_FABSQ
] = decl
;
626 ftype
= build_function_type_list (long_double_type_node
,
627 long_double_type_node
,
628 long_double_type_node
,
630 decl
= add_builtin_function ("__builtin_copysignq", ftype
,
631 PA_BUILTIN_COPYSIGNQ
, BUILT_IN_MD
,
632 "_U_Qfcopysign", NULL_TREE
);
633 TREE_READONLY (decl
) = 1;
634 pa_builtins
[PA_BUILTIN_COPYSIGNQ
] = decl
;
636 ftype
= build_function_type_list (long_double_type_node
, NULL_TREE
);
637 decl
= add_builtin_function ("__builtin_infq", ftype
,
638 PA_BUILTIN_INFQ
, BUILT_IN_MD
,
640 pa_builtins
[PA_BUILTIN_INFQ
] = decl
;
642 decl
= add_builtin_function ("__builtin_huge_valq", ftype
,
643 PA_BUILTIN_HUGE_VALQ
, BUILT_IN_MD
,
645 pa_builtins
[PA_BUILTIN_HUGE_VALQ
] = decl
;
650 pa_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
651 machine_mode mode ATTRIBUTE_UNUSED
,
652 int ignore ATTRIBUTE_UNUSED
)
654 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
655 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
659 case PA_BUILTIN_FABSQ
:
660 case PA_BUILTIN_COPYSIGNQ
:
661 return expand_call (exp
, target
, ignore
);
663 case PA_BUILTIN_INFQ
:
664 case PA_BUILTIN_HUGE_VALQ
:
666 machine_mode target_mode
= TYPE_MODE (TREE_TYPE (exp
));
671 tmp
= CONST_DOUBLE_FROM_REAL_VALUE (inf
, target_mode
);
673 tmp
= validize_mem (force_const_mem (target_mode
, tmp
));
676 target
= gen_reg_rtx (target_mode
);
678 emit_move_insn (target
, tmp
);
689 /* Function to init struct machine_function.
690 This will be called, via a pointer variable,
691 from push_function_context. */
693 static struct machine_function
*
694 pa_init_machine_status (void)
696 return ggc_cleared_alloc
<machine_function
> ();
699 /* If FROM is a probable pointer register, mark TO as a probable
700 pointer register with the same pointer alignment as FROM. */
703 copy_reg_pointer (rtx to
, rtx from
)
705 if (REG_POINTER (from
))
706 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
709 /* Return 1 if X contains a symbolic expression. We know these
710 expressions will have one of a few well defined forms, so
711 we need only check those forms. */
713 pa_symbolic_expression_p (rtx x
)
716 /* Strip off any HIGH. */
717 if (GET_CODE (x
) == HIGH
)
720 return symbolic_operand (x
, VOIDmode
);
723 /* Accept any constant that can be moved in one instruction into a
726 pa_cint_ok_for_move (HOST_WIDE_INT ival
)
728 /* OK if ldo, ldil, or zdepi, can be used. */
729 return (VAL_14_BITS_P (ival
)
730 || pa_ldil_cint_p (ival
)
731 || pa_zdepi_cint_p (ival
));
734 /* True iff ldil can be used to load this CONST_INT. The least
735 significant 11 bits of the value must be zero and the value must
736 not change sign when extended from 32 to 64 bits. */
738 pa_ldil_cint_p (HOST_WIDE_INT ival
)
740 HOST_WIDE_INT x
= ival
& (((HOST_WIDE_INT
) -1 << 31) | 0x7ff);
742 return x
== 0 || x
== ((HOST_WIDE_INT
) -1 << 31);
745 /* True iff zdepi can be used to generate this CONST_INT.
746 zdepi first sign extends a 5-bit signed number to a given field
747 length, then places this field anywhere in a zero. */
749 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x
)
751 unsigned HOST_WIDE_INT lsb_mask
, t
;
753 /* This might not be obvious, but it's at least fast.
754 This function is critical; we don't have the time loops would take. */
756 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
757 /* Return true iff t is a power of two. */
758 return ((t
& (t
- 1)) == 0);
761 /* True iff depi or extru can be used to compute (reg & mask).
762 Accept bit pattern like these:
767 pa_and_mask_p (unsigned HOST_WIDE_INT mask
)
770 mask
+= mask
& -mask
;
771 return (mask
& (mask
- 1)) == 0;
774 /* True iff depi can be used to compute (reg | MASK). */
776 pa_ior_mask_p (unsigned HOST_WIDE_INT mask
)
778 mask
+= mask
& -mask
;
779 return (mask
& (mask
- 1)) == 0;
782 /* Legitimize PIC addresses. If the address is already
783 position-independent, we return ORIG. Newly generated
784 position-independent addresses go to REG. If we need more
785 than one register, we lose. */
788 legitimize_pic_address (rtx orig
, machine_mode mode
, rtx reg
)
792 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
794 /* Labels need special handling. */
795 if (pic_label_operand (orig
, mode
))
799 /* We do not want to go through the movXX expanders here since that
800 would create recursion.
802 Nor do we really want to call a generator for a named pattern
803 since that requires multiple patterns if we want to support
806 So instead we just emit the raw set, which avoids the movXX
807 expanders completely. */
808 mark_reg_pointer (reg
, BITS_PER_UNIT
);
809 insn
= emit_insn (gen_rtx_SET (reg
, orig
));
811 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
812 add_reg_note (insn
, REG_EQUAL
, orig
);
814 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
815 and update LABEL_NUSES because this is not done automatically. */
816 if (reload_in_progress
|| reload_completed
)
818 /* Extract LABEL_REF. */
819 if (GET_CODE (orig
) == CONST
)
820 orig
= XEXP (XEXP (orig
, 0), 0);
821 /* Extract CODE_LABEL. */
822 orig
= XEXP (orig
, 0);
823 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
824 /* Make sure we have label and not a note. */
826 LABEL_NUSES (orig
)++;
828 crtl
->uses_pic_offset_table
= 1;
831 if (GET_CODE (orig
) == SYMBOL_REF
)
838 /* Before reload, allocate a temporary register for the intermediate
839 result. This allows the sequence to be deleted when the final
840 result is unused and the insns are trivially dead. */
841 tmp_reg
= ((reload_in_progress
|| reload_completed
)
842 ? reg
: gen_reg_rtx (Pmode
));
844 if (function_label_operand (orig
, VOIDmode
))
846 /* Force function label into memory in word mode. */
847 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
848 /* Load plabel address from DLT. */
849 emit_move_insn (tmp_reg
,
850 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
851 gen_rtx_HIGH (word_mode
, orig
)));
853 = gen_const_mem (Pmode
,
854 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
855 gen_rtx_UNSPEC (Pmode
,
858 emit_move_insn (reg
, pic_ref
);
859 /* Now load address of function descriptor. */
860 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
864 /* Load symbol reference from DLT. */
865 emit_move_insn (tmp_reg
,
866 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
867 gen_rtx_HIGH (word_mode
, orig
)));
869 = gen_const_mem (Pmode
,
870 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
871 gen_rtx_UNSPEC (Pmode
,
876 crtl
->uses_pic_offset_table
= 1;
877 mark_reg_pointer (reg
, BITS_PER_UNIT
);
878 insn
= emit_move_insn (reg
, pic_ref
);
880 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
881 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
885 else if (GET_CODE (orig
) == CONST
)
889 if (GET_CODE (XEXP (orig
, 0)) == PLUS
890 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
894 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
896 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
897 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
898 base
== reg
? 0 : reg
);
900 if (GET_CODE (orig
) == CONST_INT
)
902 if (INT_14_BITS (orig
))
903 return plus_constant (Pmode
, base
, INTVAL (orig
));
904 orig
= force_reg (Pmode
, orig
);
906 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
907 /* Likewise, should we set special REG_NOTEs here? */
913 static GTY(()) rtx gen_tls_tga
;
916 gen_tls_get_addr (void)
919 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
924 hppa_tls_call (rtx arg
)
928 ret
= gen_reg_rtx (Pmode
);
929 emit_library_call_value (gen_tls_get_addr (), ret
,
930 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
936 legitimize_tls_address (rtx addr
)
938 rtx ret
, tmp
, t1
, t2
, tp
;
941 /* Currently, we can't handle anything but a SYMBOL_REF. */
942 if (GET_CODE (addr
) != SYMBOL_REF
)
945 switch (SYMBOL_REF_TLS_MODEL (addr
))
947 case TLS_MODEL_GLOBAL_DYNAMIC
:
948 tmp
= gen_reg_rtx (Pmode
);
950 emit_insn (gen_tgd_load_pic (tmp
, addr
));
952 emit_insn (gen_tgd_load (tmp
, addr
));
953 ret
= hppa_tls_call (tmp
);
956 case TLS_MODEL_LOCAL_DYNAMIC
:
957 ret
= gen_reg_rtx (Pmode
);
958 tmp
= gen_reg_rtx (Pmode
);
961 emit_insn (gen_tld_load_pic (tmp
, addr
));
963 emit_insn (gen_tld_load (tmp
, addr
));
964 t1
= hppa_tls_call (tmp
);
967 t2
= gen_reg_rtx (Pmode
);
968 emit_libcall_block (insn
, t2
, t1
,
969 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
971 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
974 case TLS_MODEL_INITIAL_EXEC
:
975 tp
= gen_reg_rtx (Pmode
);
976 tmp
= gen_reg_rtx (Pmode
);
977 ret
= gen_reg_rtx (Pmode
);
978 emit_insn (gen_tp_load (tp
));
980 emit_insn (gen_tie_load_pic (tmp
, addr
));
982 emit_insn (gen_tie_load (tmp
, addr
));
983 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
986 case TLS_MODEL_LOCAL_EXEC
:
987 tp
= gen_reg_rtx (Pmode
);
988 ret
= gen_reg_rtx (Pmode
);
989 emit_insn (gen_tp_load (tp
));
990 emit_insn (gen_tle_load (ret
, addr
, tp
));
1000 /* Try machine-dependent ways of modifying an illegitimate address
1001 to be legitimate. If we find one, return the new, valid address.
1002 This macro is used in only one place: `memory_address' in explow.c.
1004 OLDX is the address as it was before break_out_memory_refs was called.
1005 In some cases it is useful to look at this to decide what needs to be done.
1007 It is always safe for this macro to do nothing. It exists to recognize
1008 opportunities to optimize the output.
1010 For the PA, transform:
1012 memory(X + <large int>)
1016 if (<large int> & mask) >= 16
1017 Y = (<large int> & ~mask) + mask + 1 Round up.
1019 Y = (<large int> & ~mask) Round down.
1021 memory (Z + (<large int> - Y));
1023 This is for CSE to find several similar references, and only use one Z.
1025 X can either be a SYMBOL_REF or REG, but because combine cannot
1026 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1027 D will not fit in 14 bits.
1029 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1032 MODE_INT references allow displacements which fit in 14 bits, so use
1035 This relies on the fact that most mode MODE_FLOAT references will use FP
1036 registers and most mode MODE_INT references will use integer registers.
1037 (In the rare case of an FP register used in an integer MODE, we depend
1038 on secondary reloads to clean things up.)
1041 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1042 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1043 addressing modes to be used).
1045 Put X and Z into registers. Then put the entire expression into
1049 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1054 /* We need to canonicalize the order of operands in unscaled indexed
1055 addresses since the code that checks if an address is valid doesn't
1056 always try both orders. */
1057 if (!TARGET_NO_SPACE_REGS
1058 && GET_CODE (x
) == PLUS
1059 && GET_MODE (x
) == Pmode
1060 && REG_P (XEXP (x
, 0))
1061 && REG_P (XEXP (x
, 1))
1062 && REG_POINTER (XEXP (x
, 0))
1063 && !REG_POINTER (XEXP (x
, 1)))
1064 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
1066 if (tls_referenced_p (x
))
1067 return legitimize_tls_address (x
);
1069 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
1071 /* Strip off CONST. */
1072 if (GET_CODE (x
) == CONST
)
1075 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1076 That should always be safe. */
1077 if (GET_CODE (x
) == PLUS
1078 && GET_CODE (XEXP (x
, 0)) == REG
1079 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
1081 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
1082 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
1085 /* Note we must reject symbols which represent function addresses
1086 since the assembler/linker can't handle arithmetic on plabels. */
1087 if (GET_CODE (x
) == PLUS
1088 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1089 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
1090 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
1091 || GET_CODE (XEXP (x
, 0)) == REG
))
1093 rtx int_part
, ptr_reg
;
1095 int offset
= INTVAL (XEXP (x
, 1));
1098 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
1099 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
1101 /* Choose which way to round the offset. Round up if we
1102 are >= halfway to the next boundary. */
1103 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1104 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1106 newoffset
= (offset
& ~ mask
);
1108 /* If the newoffset will not fit in 14 bits (ldo), then
1109 handling this would take 4 or 5 instructions (2 to load
1110 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1111 add the new offset and the SYMBOL_REF.) Combine can
1112 not handle 4->2 or 5->2 combinations, so do not create
1114 if (! VAL_14_BITS_P (newoffset
)
1115 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1117 rtx const_part
= plus_constant (Pmode
, XEXP (x
, 0), newoffset
);
1120 gen_rtx_HIGH (Pmode
, const_part
));
1123 gen_rtx_LO_SUM (Pmode
,
1124 tmp_reg
, const_part
));
1128 if (! VAL_14_BITS_P (newoffset
))
1129 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1131 int_part
= GEN_INT (newoffset
);
1133 ptr_reg
= force_reg (Pmode
,
1134 gen_rtx_PLUS (Pmode
,
1135 force_reg (Pmode
, XEXP (x
, 0)),
1138 return plus_constant (Pmode
, ptr_reg
, offset
- newoffset
);
1141 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1143 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
1144 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1145 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
1146 && (OBJECT_P (XEXP (x
, 1))
1147 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1148 && GET_CODE (XEXP (x
, 1)) != CONST
)
1150 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1154 if (GET_CODE (reg1
) != REG
)
1155 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1157 reg2
= XEXP (XEXP (x
, 0), 0);
1158 if (GET_CODE (reg2
) != REG
)
1159 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1161 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1162 gen_rtx_MULT (Pmode
,
1168 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1170 Only do so for floating point modes since this is more speculative
1171 and we lose if it's an integer store. */
1172 if (GET_CODE (x
) == PLUS
1173 && GET_CODE (XEXP (x
, 0)) == PLUS
1174 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1175 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
1176 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1177 && (mode
== SFmode
|| mode
== DFmode
))
1180 /* First, try and figure out what to use as a base register. */
1181 rtx reg1
, reg2
, base
, idx
;
1183 reg1
= XEXP (XEXP (x
, 0), 1);
1188 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1189 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1190 it's a base register below. */
1191 if (GET_CODE (reg1
) != REG
)
1192 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1194 if (GET_CODE (reg2
) != REG
)
1195 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1197 /* Figure out what the base and index are. */
1199 if (GET_CODE (reg1
) == REG
1200 && REG_POINTER (reg1
))
1203 idx
= gen_rtx_PLUS (Pmode
,
1204 gen_rtx_MULT (Pmode
,
1205 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1206 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
1209 else if (GET_CODE (reg2
) == REG
1210 && REG_POINTER (reg2
))
1219 /* If the index adds a large constant, try to scale the
1220 constant so that it can be loaded with only one insn. */
1221 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1222 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1223 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1224 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1226 /* Divide the CONST_INT by the scale factor, then add it to A. */
1227 int val
= INTVAL (XEXP (idx
, 1));
1229 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1230 reg1
= XEXP (XEXP (idx
, 0), 0);
1231 if (GET_CODE (reg1
) != REG
)
1232 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1234 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1236 /* We can now generate a simple scaled indexed address. */
1239 (Pmode
, gen_rtx_PLUS (Pmode
,
1240 gen_rtx_MULT (Pmode
, reg1
,
1241 XEXP (XEXP (idx
, 0), 1)),
1245 /* If B + C is still a valid base register, then add them. */
1246 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1247 && INTVAL (XEXP (idx
, 1)) <= 4096
1248 && INTVAL (XEXP (idx
, 1)) >= -4096)
1250 int val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1253 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1255 reg2
= XEXP (XEXP (idx
, 0), 0);
1256 if (GET_CODE (reg2
) != CONST_INT
)
1257 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1259 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1260 gen_rtx_MULT (Pmode
,
1266 /* Get the index into a register, then add the base + index and
1267 return a register holding the result. */
1269 /* First get A into a register. */
1270 reg1
= XEXP (XEXP (idx
, 0), 0);
1271 if (GET_CODE (reg1
) != REG
)
1272 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1274 /* And get B into a register. */
1275 reg2
= XEXP (idx
, 1);
1276 if (GET_CODE (reg2
) != REG
)
1277 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1279 reg1
= force_reg (Pmode
,
1280 gen_rtx_PLUS (Pmode
,
1281 gen_rtx_MULT (Pmode
, reg1
,
1282 XEXP (XEXP (idx
, 0), 1)),
1285 /* Add the result to our base register and return. */
1286 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1290 /* Uh-oh. We might have an address for x[n-100000]. This needs
1291 special handling to avoid creating an indexed memory address
1292 with x-100000 as the base.
1294 If the constant part is small enough, then it's still safe because
1295 there is a guard page at the beginning and end of the data segment.
1297 Scaled references are common enough that we want to try and rearrange the
1298 terms so that we can use indexing for these addresses too. Only
1299 do the optimization for floatint point modes. */
1301 if (GET_CODE (x
) == PLUS
1302 && pa_symbolic_expression_p (XEXP (x
, 1)))
1304 /* Ugly. We modify things here so that the address offset specified
1305 by the index expression is computed first, then added to x to form
1306 the entire address. */
1308 rtx regx1
, regx2
, regy1
, regy2
, y
;
1310 /* Strip off any CONST. */
1312 if (GET_CODE (y
) == CONST
)
1315 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1317 /* See if this looks like
1318 (plus (mult (reg) (shadd_const))
1319 (const (plus (symbol_ref) (const_int))))
1321 Where const_int is small. In that case the const
1322 expression is a valid pointer for indexing.
1324 If const_int is big, but can be divided evenly by shadd_const
1325 and added to (reg). This allows more scaled indexed addresses. */
1326 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1327 && GET_CODE (XEXP (x
, 0)) == MULT
1328 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1329 && INTVAL (XEXP (y
, 1)) >= -4096
1330 && INTVAL (XEXP (y
, 1)) <= 4095
1331 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1332 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1334 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1338 if (GET_CODE (reg1
) != REG
)
1339 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1341 reg2
= XEXP (XEXP (x
, 0), 0);
1342 if (GET_CODE (reg2
) != REG
)
1343 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1345 return force_reg (Pmode
,
1346 gen_rtx_PLUS (Pmode
,
1347 gen_rtx_MULT (Pmode
,
1352 else if ((mode
== DFmode
|| mode
== SFmode
)
1353 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1354 && GET_CODE (XEXP (x
, 0)) == MULT
1355 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1356 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1357 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1358 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1361 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1362 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1363 regx2
= XEXP (XEXP (x
, 0), 0);
1364 if (GET_CODE (regx2
) != REG
)
1365 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1366 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1370 gen_rtx_PLUS (Pmode
,
1371 gen_rtx_MULT (Pmode
, regx2
,
1372 XEXP (XEXP (x
, 0), 1)),
1373 force_reg (Pmode
, XEXP (y
, 0))));
1375 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1376 && INTVAL (XEXP (y
, 1)) >= -4096
1377 && INTVAL (XEXP (y
, 1)) <= 4095)
1379 /* This is safe because of the guard page at the
1380 beginning and end of the data space. Just
1381 return the original address. */
1386 /* Doesn't look like one we can optimize. */
1387 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1388 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1389 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1390 regx1
= force_reg (Pmode
,
1391 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1393 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1401 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1403 Compute extra cost of moving data between one register class
1406 Make moves from SAR so expensive they should never happen. We used to
1407 have 0xffff here, but that generates overflow in rare cases.
1409 Copies involving a FP register and a non-FP register are relatively
1410 expensive because they must go through memory.
1412 Other copies are reasonably cheap. */
1415 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
1416 reg_class_t from
, reg_class_t to
)
1418 if (from
== SHIFT_REGS
)
1420 else if (to
== SHIFT_REGS
&& FP_REG_CLASS_P (from
))
1422 else if ((FP_REG_CLASS_P (from
) && ! FP_REG_CLASS_P (to
))
1423 || (FP_REG_CLASS_P (to
) && ! FP_REG_CLASS_P (from
)))
1429 /* For the HPPA, REG and REG+CONST is cost 0
1430 and addresses involving symbolic constants are cost 2.
1432 PIC addresses are very expensive.
1434 It is no coincidence that this has the same structure
1435 as pa_legitimate_address_p. */
1438 hppa_address_cost (rtx X
, machine_mode mode ATTRIBUTE_UNUSED
,
1439 addr_space_t as ATTRIBUTE_UNUSED
,
1440 bool speed ATTRIBUTE_UNUSED
)
1442 switch (GET_CODE (X
))
1455 /* Compute a (partial) cost for rtx X. Return true if the complete
1456 cost has been computed, and false if subexpressions should be
1457 scanned. In either case, *TOTAL contains the cost result. */
1460 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
1461 int *total
, bool speed ATTRIBUTE_UNUSED
)
1468 if (INTVAL (x
) == 0)
1470 else if (INT_14_BITS (x
))
1487 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1488 && outer_code
!= SET
)
1495 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1497 *total
= COSTS_N_INSNS (3);
1501 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1502 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1506 if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1507 *total
= factor
* factor
* COSTS_N_INSNS (8);
1509 *total
= factor
* factor
* COSTS_N_INSNS (20);
1513 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1515 *total
= COSTS_N_INSNS (14);
1523 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1524 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1528 *total
= factor
* factor
* COSTS_N_INSNS (60);
1531 case PLUS
: /* this includes shNadd insns */
1533 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1535 *total
= COSTS_N_INSNS (3);
1539 /* A size N times larger than UNITS_PER_WORD needs N times as
1540 many insns, taking N times as long. */
1541 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
1544 *total
= factor
* COSTS_N_INSNS (1);
1550 *total
= COSTS_N_INSNS (1);
1558 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1559 new rtx with the correct mode. */
1561 force_mode (machine_mode mode
, rtx orig
)
1563 if (mode
== GET_MODE (orig
))
1566 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1568 return gen_rtx_REG (mode
, REGNO (orig
));
1571 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1574 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1576 return tls_referenced_p (x
);
1579 /* Emit insns to move operands[1] into operands[0].
1581 Return 1 if we have written out everything that needs to be done to
1582 do the move. Otherwise, return 0 and the caller will emit the move
1585 Note SCRATCH_REG may not be in the proper mode depending on how it
1586 will be used. This routine is responsible for creating a new copy
1587 of SCRATCH_REG in the proper mode. */
1590 pa_emit_move_sequence (rtx
*operands
, machine_mode mode
, rtx scratch_reg
)
1592 register rtx operand0
= operands
[0];
1593 register rtx operand1
= operands
[1];
1596 /* We can only handle indexed addresses in the destination operand
1597 of floating point stores. Thus, we need to break out indexed
1598 addresses from the destination operand. */
1599 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1601 gcc_assert (can_create_pseudo_p ());
1603 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1604 operand0
= replace_equiv_address (operand0
, tem
);
1607 /* On targets with non-equivalent space registers, break out unscaled
1608 indexed addresses from the source operand before the final CSE.
1609 We have to do this because the REG_POINTER flag is not correctly
1610 carried through various optimization passes and CSE may substitute
1611 a pseudo without the pointer set for one with the pointer set. As
1612 a result, we loose various opportunities to create insns with
1613 unscaled indexed addresses. */
1614 if (!TARGET_NO_SPACE_REGS
1615 && !cse_not_expected
1616 && GET_CODE (operand1
) == MEM
1617 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1618 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1619 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1621 = replace_equiv_address (operand1
,
1622 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1625 && reload_in_progress
&& GET_CODE (operand0
) == REG
1626 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1627 operand0
= reg_equiv_mem (REGNO (operand0
));
1628 else if (scratch_reg
1629 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1630 && GET_CODE (SUBREG_REG (operand0
)) == REG
1631 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1633 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1634 the code which tracks sets/uses for delete_output_reload. */
1635 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1636 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
1637 SUBREG_BYTE (operand0
));
1638 operand0
= alter_subreg (&temp
, true);
1642 && reload_in_progress
&& GET_CODE (operand1
) == REG
1643 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1644 operand1
= reg_equiv_mem (REGNO (operand1
));
1645 else if (scratch_reg
1646 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1647 && GET_CODE (SUBREG_REG (operand1
)) == REG
1648 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1650 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1651 the code which tracks sets/uses for delete_output_reload. */
1652 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1653 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
1654 SUBREG_BYTE (operand1
));
1655 operand1
= alter_subreg (&temp
, true);
1658 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1659 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1660 != XEXP (operand0
, 0)))
1661 operand0
= replace_equiv_address (operand0
, tem
);
1663 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1664 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1665 != XEXP (operand1
, 0)))
1666 operand1
= replace_equiv_address (operand1
, tem
);
1668 /* Handle secondary reloads for loads/stores of FP registers from
1669 REG+D addresses where D does not fit in 5 or 14 bits, including
1670 (subreg (mem (addr))) cases. */
1672 && fp_reg_operand (operand0
, mode
)
1673 && (MEM_P (operand1
)
1674 || (GET_CODE (operand1
) == SUBREG
1675 && MEM_P (XEXP (operand1
, 0))))
1676 && !floating_point_store_memory_operand (operand1
, mode
))
1678 if (GET_CODE (operand1
) == SUBREG
)
1679 operand1
= XEXP (operand1
, 0);
1681 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1682 it in WORD_MODE regardless of what mode it was originally given
1684 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1686 /* D might not fit in 14 bits either; for such cases load D into
1688 if (reg_plus_base_memory_operand (operand1
, mode
)
1691 && INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))))
1693 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1694 emit_move_insn (scratch_reg
,
1695 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1697 XEXP (XEXP (operand1
, 0), 0),
1701 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1702 emit_insn (gen_rtx_SET (operand0
,
1703 replace_equiv_address (operand1
, scratch_reg
)));
1706 else if (scratch_reg
1707 && fp_reg_operand (operand1
, mode
)
1708 && (MEM_P (operand0
)
1709 || (GET_CODE (operand0
) == SUBREG
1710 && MEM_P (XEXP (operand0
, 0))))
1711 && !floating_point_store_memory_operand (operand0
, mode
))
1713 if (GET_CODE (operand0
) == SUBREG
)
1714 operand0
= XEXP (operand0
, 0);
1716 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1717 it in WORD_MODE regardless of what mode it was originally given
1719 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1721 /* D might not fit in 14 bits either; for such cases load D into
1723 if (reg_plus_base_memory_operand (operand0
, mode
)
1726 && INT_14_BITS (XEXP (XEXP (operand0
, 0), 1))))
1728 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1729 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1732 XEXP (XEXP (operand0
, 0),
1737 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1738 emit_insn (gen_rtx_SET (replace_equiv_address (operand0
, scratch_reg
),
1742 /* Handle secondary reloads for loads of FP registers from constant
1743 expressions by forcing the constant into memory. For the most part,
1744 this is only necessary for SImode and DImode.
1746 Use scratch_reg to hold the address of the memory location. */
1747 else if (scratch_reg
1748 && CONSTANT_P (operand1
)
1749 && fp_reg_operand (operand0
, mode
))
1751 rtx const_mem
, xoperands
[2];
1753 if (operand1
== CONST0_RTX (mode
))
1755 emit_insn (gen_rtx_SET (operand0
, operand1
));
1759 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1760 it in WORD_MODE regardless of what mode it was originally given
1762 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1764 /* Force the constant into memory and put the address of the
1765 memory location into scratch_reg. */
1766 const_mem
= force_const_mem (mode
, operand1
);
1767 xoperands
[0] = scratch_reg
;
1768 xoperands
[1] = XEXP (const_mem
, 0);
1769 pa_emit_move_sequence (xoperands
, Pmode
, 0);
1771 /* Now load the destination register. */
1772 emit_insn (gen_rtx_SET (operand0
,
1773 replace_equiv_address (const_mem
, scratch_reg
)));
1776 /* Handle secondary reloads for SAR. These occur when trying to load
1777 the SAR from memory or a constant. */
1778 else if (scratch_reg
1779 && GET_CODE (operand0
) == REG
1780 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1781 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1782 && (GET_CODE (operand1
) == MEM
|| GET_CODE (operand1
) == CONST_INT
))
1784 /* D might not fit in 14 bits either; for such cases load D into
1786 if (GET_CODE (operand1
) == MEM
1787 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1789 /* We are reloading the address into the scratch register, so we
1790 want to make sure the scratch register is a full register. */
1791 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1793 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1794 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1797 XEXP (XEXP (operand1
, 0),
1801 /* Now we are going to load the scratch register from memory,
1802 we want to load it in the same width as the original MEM,
1803 which must be the same as the width of the ultimate destination,
1805 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1807 emit_move_insn (scratch_reg
,
1808 replace_equiv_address (operand1
, scratch_reg
));
1812 /* We want to load the scratch register using the same mode as
1813 the ultimate destination. */
1814 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1816 emit_move_insn (scratch_reg
, operand1
);
1819 /* And emit the insn to set the ultimate destination. We know that
1820 the scratch register has the same mode as the destination at this
1822 emit_move_insn (operand0
, scratch_reg
);
1825 /* Handle the most common case: storing into a register. */
1826 else if (register_operand (operand0
, mode
))
1828 /* Legitimize TLS symbol references. This happens for references
1829 that aren't a legitimate constant. */
1830 if (PA_SYMBOL_REF_TLS_P (operand1
))
1831 operand1
= legitimize_tls_address (operand1
);
1833 if (register_operand (operand1
, mode
)
1834 || (GET_CODE (operand1
) == CONST_INT
1835 && pa_cint_ok_for_move (INTVAL (operand1
)))
1836 || (operand1
== CONST0_RTX (mode
))
1837 || (GET_CODE (operand1
) == HIGH
1838 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1839 /* Only `general_operands' can come here, so MEM is ok. */
1840 || GET_CODE (operand1
) == MEM
)
1842 /* Various sets are created during RTL generation which don't
1843 have the REG_POINTER flag correctly set. After the CSE pass,
1844 instruction recognition can fail if we don't consistently
1845 set this flag when performing register copies. This should
1846 also improve the opportunities for creating insns that use
1847 unscaled indexing. */
1848 if (REG_P (operand0
) && REG_P (operand1
))
1850 if (REG_POINTER (operand1
)
1851 && !REG_POINTER (operand0
)
1852 && !HARD_REGISTER_P (operand0
))
1853 copy_reg_pointer (operand0
, operand1
);
1856 /* When MEMs are broken out, the REG_POINTER flag doesn't
1857 get set. In some cases, we can set the REG_POINTER flag
1858 from the declaration for the MEM. */
1859 if (REG_P (operand0
)
1860 && GET_CODE (operand1
) == MEM
1861 && !REG_POINTER (operand0
))
1863 tree decl
= MEM_EXPR (operand1
);
1865 /* Set the register pointer flag and register alignment
1866 if the declaration for this memory reference is a
1872 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1874 if (TREE_CODE (decl
) == COMPONENT_REF
)
1875 decl
= TREE_OPERAND (decl
, 1);
1877 type
= TREE_TYPE (decl
);
1878 type
= strip_array_types (type
);
1880 if (POINTER_TYPE_P (type
))
1884 type
= TREE_TYPE (type
);
1885 /* Using TYPE_ALIGN_OK is rather conservative as
1886 only the ada frontend actually sets it. */
1887 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1889 mark_reg_pointer (operand0
, align
);
1894 emit_insn (gen_rtx_SET (operand0
, operand1
));
1898 else if (GET_CODE (operand0
) == MEM
)
1900 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1901 && !(reload_in_progress
|| reload_completed
))
1903 rtx temp
= gen_reg_rtx (DFmode
);
1905 emit_insn (gen_rtx_SET (temp
, operand1
));
1906 emit_insn (gen_rtx_SET (operand0
, temp
));
1909 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1911 /* Run this case quickly. */
1912 emit_insn (gen_rtx_SET (operand0
, operand1
));
1915 if (! (reload_in_progress
|| reload_completed
))
1917 operands
[0] = validize_mem (operand0
);
1918 operands
[1] = operand1
= force_reg (mode
, operand1
);
1922 /* Simplify the source if we need to.
1923 Note we do have to handle function labels here, even though we do
1924 not consider them legitimate constants. Loop optimizations can
1925 call the emit_move_xxx with one as a source. */
1926 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1927 || (GET_CODE (operand1
) == HIGH
1928 && symbolic_operand (XEXP (operand1
, 0), mode
))
1929 || function_label_operand (operand1
, VOIDmode
)
1930 || tls_referenced_p (operand1
))
1934 if (GET_CODE (operand1
) == HIGH
)
1937 operand1
= XEXP (operand1
, 0);
1939 if (symbolic_operand (operand1
, mode
))
1941 /* Argh. The assembler and linker can't handle arithmetic
1944 So we force the plabel into memory, load operand0 from
1945 the memory location, then add in the constant part. */
1946 if ((GET_CODE (operand1
) == CONST
1947 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1948 && function_label_operand (XEXP (XEXP (operand1
, 0), 0),
1950 || function_label_operand (operand1
, VOIDmode
))
1952 rtx temp
, const_part
;
1954 /* Figure out what (if any) scratch register to use. */
1955 if (reload_in_progress
|| reload_completed
)
1957 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1958 /* SCRATCH_REG will hold an address and maybe the actual
1959 data. We want it in WORD_MODE regardless of what mode it
1960 was originally given to us. */
1961 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1964 scratch_reg
= gen_reg_rtx (Pmode
);
1966 if (GET_CODE (operand1
) == CONST
)
1968 /* Save away the constant part of the expression. */
1969 const_part
= XEXP (XEXP (operand1
, 0), 1);
1970 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1972 /* Force the function label into memory. */
1973 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1977 /* No constant part. */
1978 const_part
= NULL_RTX
;
1980 /* Force the function label into memory. */
1981 temp
= force_const_mem (mode
, operand1
);
1985 /* Get the address of the memory location. PIC-ify it if
1987 temp
= XEXP (temp
, 0);
1989 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1991 /* Put the address of the memory location into our destination
1994 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1996 /* Now load from the memory location into our destination
1998 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1999 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
2001 /* And add back in the constant part. */
2002 if (const_part
!= NULL_RTX
)
2003 expand_inc (operand0
, const_part
);
2013 if (reload_in_progress
|| reload_completed
)
2015 temp
= scratch_reg
? scratch_reg
: operand0
;
2016 /* TEMP will hold an address and maybe the actual
2017 data. We want it in WORD_MODE regardless of what mode it
2018 was originally given to us. */
2019 temp
= force_mode (word_mode
, temp
);
2022 temp
= gen_reg_rtx (Pmode
);
2024 /* Force (const (plus (symbol) (const_int))) to memory
2025 if the const_int will not fit in 14 bits. Although
2026 this requires a relocation, the instruction sequence
2027 needed to load the value is shorter. */
2028 if (GET_CODE (operand1
) == CONST
2029 && GET_CODE (XEXP (operand1
, 0)) == PLUS
2030 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
2031 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1)))
2033 rtx x
, m
= force_const_mem (mode
, operand1
);
2035 x
= legitimize_pic_address (XEXP (m
, 0), mode
, temp
);
2036 x
= replace_equiv_address (m
, x
);
2037 insn
= emit_move_insn (operand0
, x
);
2041 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
2042 if (REG_P (operand0
) && REG_P (operands
[1]))
2043 copy_reg_pointer (operand0
, operands
[1]);
2044 insn
= emit_move_insn (operand0
, operands
[1]);
2047 /* Put a REG_EQUAL note on this insn. */
2048 set_unique_reg_note (insn
, REG_EQUAL
, operand1
);
2050 /* On the HPPA, references to data space are supposed to use dp,
2051 register 27, but showing it in the RTL inhibits various cse
2052 and loop optimizations. */
2057 if (reload_in_progress
|| reload_completed
)
2059 temp
= scratch_reg
? scratch_reg
: operand0
;
2060 /* TEMP will hold an address and maybe the actual
2061 data. We want it in WORD_MODE regardless of what mode it
2062 was originally given to us. */
2063 temp
= force_mode (word_mode
, temp
);
2066 temp
= gen_reg_rtx (mode
);
2068 /* Loading a SYMBOL_REF into a register makes that register
2069 safe to be used as the base in an indexed address.
2071 Don't mark hard registers though. That loses. */
2072 if (GET_CODE (operand0
) == REG
2073 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
2074 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
2075 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
2076 mark_reg_pointer (temp
, BITS_PER_UNIT
);
2079 set
= gen_rtx_SET (operand0
, temp
);
2081 set
= gen_rtx_SET (operand0
,
2082 gen_rtx_LO_SUM (mode
, temp
, operand1
));
2084 emit_insn (gen_rtx_SET (temp
, gen_rtx_HIGH (mode
, operand1
)));
2090 else if (tls_referenced_p (operand1
))
2095 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
2097 addend
= XEXP (XEXP (tmp
, 0), 1);
2098 tmp
= XEXP (XEXP (tmp
, 0), 0);
2101 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
2102 tmp
= legitimize_tls_address (tmp
);
2105 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
2106 tmp
= force_operand (tmp
, operands
[0]);
2110 else if (GET_CODE (operand1
) != CONST_INT
2111 || !pa_cint_ok_for_move (INTVAL (operand1
)))
2116 HOST_WIDE_INT value
= 0;
2117 HOST_WIDE_INT insv
= 0;
2120 if (GET_CODE (operand1
) == CONST_INT
)
2121 value
= INTVAL (operand1
);
2124 && GET_CODE (operand1
) == CONST_INT
2125 && HOST_BITS_PER_WIDE_INT
> 32
2126 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
2130 /* Extract the low order 32 bits of the value and sign extend.
2131 If the new value is the same as the original value, we can
2132 can use the original value as-is. If the new value is
2133 different, we use it and insert the most-significant 32-bits
2134 of the original value into the final result. */
2135 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
2136 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
2139 #if HOST_BITS_PER_WIDE_INT > 32
2140 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
2144 operand1
= GEN_INT (nval
);
2148 if (reload_in_progress
|| reload_completed
)
2149 temp
= scratch_reg
? scratch_reg
: operand0
;
2151 temp
= gen_reg_rtx (mode
);
2153 /* We don't directly split DImode constants on 32-bit targets
2154 because PLUS uses an 11-bit immediate and the insn sequence
2155 generated is not as efficient as the one using HIGH/LO_SUM. */
2156 if (GET_CODE (operand1
) == CONST_INT
2157 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2158 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2161 /* Directly break constant into high and low parts. This
2162 provides better optimization opportunities because various
2163 passes recognize constants split with PLUS but not LO_SUM.
2164 We use a 14-bit signed low part except when the addition
2165 of 0x4000 to the high part might change the sign of the
2167 HOST_WIDE_INT low
= value
& 0x3fff;
2168 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2172 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2180 emit_insn (gen_rtx_SET (temp
, GEN_INT (high
)));
2181 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2185 emit_insn (gen_rtx_SET (temp
, gen_rtx_HIGH (mode
, operand1
)));
2186 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2189 insn
= emit_move_insn (operands
[0], operands
[1]);
2191 /* Now insert the most significant 32 bits of the value
2192 into the register. When we don't have a second register
2193 available, it could take up to nine instructions to load
2194 a 64-bit integer constant. Prior to reload, we force
2195 constants that would take more than three instructions
2196 to load to the constant pool. During and after reload,
2197 we have to handle all possible values. */
2200 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2201 register and the value to be inserted is outside the
2202 range that can be loaded with three depdi instructions. */
2203 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2205 operand1
= GEN_INT (insv
);
2207 emit_insn (gen_rtx_SET (temp
,
2208 gen_rtx_HIGH (mode
, operand1
)));
2209 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2211 emit_insn (gen_insvdi (operand0
, GEN_INT (32),
2214 emit_insn (gen_insvsi (operand0
, GEN_INT (32),
2219 int len
= 5, pos
= 27;
2221 /* Insert the bits using the depdi instruction. */
2224 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2225 HOST_WIDE_INT sign
= v5
< 0;
2227 /* Left extend the insertion. */
2228 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2229 while (pos
> 0 && (insv
& 1) == sign
)
2231 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2237 emit_insn (gen_insvdi (operand0
, GEN_INT (len
),
2238 GEN_INT (pos
), GEN_INT (v5
)));
2240 emit_insn (gen_insvsi (operand0
, GEN_INT (len
),
2241 GEN_INT (pos
), GEN_INT (v5
)));
2243 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2249 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2254 /* Now have insn-emit do whatever it normally does. */
2258 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2259 it will need a link/runtime reloc). */
2262 pa_reloc_needed (tree exp
)
2266 switch (TREE_CODE (exp
))
2271 case POINTER_PLUS_EXPR
:
2274 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2275 reloc
|= pa_reloc_needed (TREE_OPERAND (exp
, 1));
2279 case NON_LVALUE_EXPR
:
2280 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2286 unsigned HOST_WIDE_INT ix
;
2288 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2290 reloc
|= pa_reloc_needed (value
);
2304 /* Return the best assembler insn template
2305 for moving operands[1] into operands[0] as a fullword. */
2307 pa_singlemove_string (rtx
*operands
)
2309 HOST_WIDE_INT intval
;
2311 if (GET_CODE (operands
[0]) == MEM
)
2312 return "stw %r1,%0";
2313 if (GET_CODE (operands
[1]) == MEM
)
2315 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2320 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2322 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2324 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2325 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2327 operands
[1] = GEN_INT (i
);
2328 /* Fall through to CONST_INT case. */
2330 if (GET_CODE (operands
[1]) == CONST_INT
)
2332 intval
= INTVAL (operands
[1]);
2334 if (VAL_14_BITS_P (intval
))
2336 else if ((intval
& 0x7ff) == 0)
2337 return "ldil L'%1,%0";
2338 else if (pa_zdepi_cint_p (intval
))
2339 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2341 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2343 return "copy %1,%0";
2347 /* Compute position (in OP[1]) and width (in OP[2])
2348 useful for copying IMM to a register using the zdepi
2349 instructions. Store the immediate value to insert in OP[0]. */
2351 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2355 /* Find the least significant set bit in IMM. */
2356 for (lsb
= 0; lsb
< 32; lsb
++)
2363 /* Choose variants based on *sign* of the 5-bit field. */
2364 if ((imm
& 0x10) == 0)
2365 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2368 /* Find the width of the bitstring in IMM. */
2369 for (len
= 5; len
< 32 - lsb
; len
++)
2371 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2375 /* Sign extend IMM as a 5-bit value. */
2376 imm
= (imm
& 0xf) - 0x10;
2384 /* Compute position (in OP[1]) and width (in OP[2])
2385 useful for copying IMM to a register using the depdi,z
2386 instructions. Store the immediate value to insert in OP[0]. */
2389 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2391 int lsb
, len
, maxlen
;
2393 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2395 /* Find the least significant set bit in IMM. */
2396 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2403 /* Choose variants based on *sign* of the 5-bit field. */
2404 if ((imm
& 0x10) == 0)
2405 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2408 /* Find the width of the bitstring in IMM. */
2409 for (len
= 5; len
< maxlen
- lsb
; len
++)
2411 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2415 /* Extend length if host is narrow and IMM is negative. */
2416 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2419 /* Sign extend IMM as a 5-bit value. */
2420 imm
= (imm
& 0xf) - 0x10;
2428 /* Output assembler code to perform a doubleword move insn
2429 with operands OPERANDS. */
2432 pa_output_move_double (rtx
*operands
)
2434 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2436 rtx addreg0
= 0, addreg1
= 0;
2438 /* First classify both operands. */
2440 if (REG_P (operands
[0]))
2442 else if (offsettable_memref_p (operands
[0]))
2444 else if (GET_CODE (operands
[0]) == MEM
)
2449 if (REG_P (operands
[1]))
2451 else if (CONSTANT_P (operands
[1]))
2453 else if (offsettable_memref_p (operands
[1]))
2455 else if (GET_CODE (operands
[1]) == MEM
)
2460 /* Check for the cases that the operand constraints are not
2461 supposed to allow to happen. */
2462 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2464 /* Handle copies between general and floating registers. */
2466 if (optype0
== REGOP
&& optype1
== REGOP
2467 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2469 if (FP_REG_P (operands
[0]))
2471 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2472 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2473 return "{fldds|fldd} -16(%%sp),%0";
2477 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2478 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2479 return "{ldws|ldw} -12(%%sp),%R0";
2483 /* Handle auto decrementing and incrementing loads and stores
2484 specifically, since the structure of the function doesn't work
2485 for them without major modification. Do it better when we learn
2486 this port about the general inc/dec addressing of PA.
2487 (This was written by tege. Chide him if it doesn't work.) */
2489 if (optype0
== MEMOP
)
2491 /* We have to output the address syntax ourselves, since print_operand
2492 doesn't deal with the addresses we want to use. Fix this later. */
2494 rtx addr
= XEXP (operands
[0], 0);
2495 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2497 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2499 operands
[0] = XEXP (addr
, 0);
2500 gcc_assert (GET_CODE (operands
[1]) == REG
2501 && GET_CODE (operands
[0]) == REG
);
2503 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2505 /* No overlap between high target register and address
2506 register. (We do this in a non-obvious way to
2507 save a register file writeback) */
2508 if (GET_CODE (addr
) == POST_INC
)
2509 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2510 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2512 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2514 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2516 operands
[0] = XEXP (addr
, 0);
2517 gcc_assert (GET_CODE (operands
[1]) == REG
2518 && GET_CODE (operands
[0]) == REG
);
2520 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2521 /* No overlap between high target register and address
2522 register. (We do this in a non-obvious way to save a
2523 register file writeback) */
2524 if (GET_CODE (addr
) == PRE_INC
)
2525 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2526 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2529 if (optype1
== MEMOP
)
2531 /* We have to output the address syntax ourselves, since print_operand
2532 doesn't deal with the addresses we want to use. Fix this later. */
2534 rtx addr
= XEXP (operands
[1], 0);
2535 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2537 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2539 operands
[1] = XEXP (addr
, 0);
2540 gcc_assert (GET_CODE (operands
[0]) == REG
2541 && GET_CODE (operands
[1]) == REG
);
2543 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2545 /* No overlap between high target register and address
2546 register. (We do this in a non-obvious way to
2547 save a register file writeback) */
2548 if (GET_CODE (addr
) == POST_INC
)
2549 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2550 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2554 /* This is an undefined situation. We should load into the
2555 address register *and* update that register. Probably
2556 we don't need to handle this at all. */
2557 if (GET_CODE (addr
) == POST_INC
)
2558 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2559 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2562 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2564 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2566 operands
[1] = XEXP (addr
, 0);
2567 gcc_assert (GET_CODE (operands
[0]) == REG
2568 && GET_CODE (operands
[1]) == REG
);
2570 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2572 /* No overlap between high target register and address
2573 register. (We do this in a non-obvious way to
2574 save a register file writeback) */
2575 if (GET_CODE (addr
) == PRE_INC
)
2576 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2577 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2581 /* This is an undefined situation. We should load into the
2582 address register *and* update that register. Probably
2583 we don't need to handle this at all. */
2584 if (GET_CODE (addr
) == PRE_INC
)
2585 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2586 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2589 else if (GET_CODE (addr
) == PLUS
2590 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2594 /* Load address into left half of destination register. */
2595 xoperands
[0] = gen_rtx_SUBREG (SImode
, operands
[0], 0);
2596 xoperands
[1] = XEXP (addr
, 1);
2597 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2598 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2599 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2601 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2603 else if (GET_CODE (addr
) == PLUS
2604 && REG_P (XEXP (addr
, 0))
2605 && REG_P (XEXP (addr
, 1)))
2609 /* Load address into left half of destination register. */
2610 xoperands
[0] = gen_rtx_SUBREG (SImode
, operands
[0], 0);
2611 xoperands
[1] = XEXP (addr
, 0);
2612 xoperands
[2] = XEXP (addr
, 1);
2613 output_asm_insn ("{addl|add,l} %1,%2,%0",
2615 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2619 /* If an operand is an unoffsettable memory ref, find a register
2620 we can increment temporarily to make it refer to the second word. */
2622 if (optype0
== MEMOP
)
2623 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2625 if (optype1
== MEMOP
)
2626 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2628 /* Ok, we can do one word at a time.
2629 Normally we do the low-numbered word first.
2631 In either case, set up in LATEHALF the operands to use
2632 for the high-numbered word and in some cases alter the
2633 operands in OPERANDS to be suitable for the low-numbered word. */
2635 if (optype0
== REGOP
)
2636 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2637 else if (optype0
== OFFSOP
)
2638 latehalf
[0] = adjust_address_nv (operands
[0], SImode
, 4);
2640 latehalf
[0] = operands
[0];
2642 if (optype1
== REGOP
)
2643 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2644 else if (optype1
== OFFSOP
)
2645 latehalf
[1] = adjust_address_nv (operands
[1], SImode
, 4);
2646 else if (optype1
== CNSTOP
)
2647 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2649 latehalf
[1] = operands
[1];
2651 /* If the first move would clobber the source of the second one,
2652 do them in the other order.
2654 This can happen in two cases:
2656 mem -> register where the first half of the destination register
2657 is the same register used in the memory's address. Reload
2658 can create such insns.
2660 mem in this case will be either register indirect or register
2661 indirect plus a valid offset.
2663 register -> register move where REGNO(dst) == REGNO(src + 1)
2664 someone (Tim/Tege?) claimed this can happen for parameter loads.
2666 Handle mem -> register case first. */
2667 if (optype0
== REGOP
2668 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2669 && refers_to_regno_p (REGNO (operands
[0]), operands
[1]))
2671 /* Do the late half first. */
2673 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2674 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2678 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2679 return pa_singlemove_string (operands
);
2682 /* Now handle register -> register case. */
2683 if (optype0
== REGOP
&& optype1
== REGOP
2684 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2686 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2687 return pa_singlemove_string (operands
);
2690 /* Normal case: do the two words, low-numbered first. */
2692 output_asm_insn (pa_singlemove_string (operands
), operands
);
2694 /* Make any unoffsettable addresses point at high-numbered word. */
2696 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2698 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2701 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2703 /* Undo the adds we just did. */
2705 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2707 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2713 pa_output_fp_move_double (rtx
*operands
)
2715 if (FP_REG_P (operands
[0]))
2717 if (FP_REG_P (operands
[1])
2718 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2719 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2721 output_asm_insn ("fldd%F1 %1,%0", operands
);
2723 else if (FP_REG_P (operands
[1]))
2725 output_asm_insn ("fstd%F0 %1,%0", operands
);
2731 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2733 /* This is a pain. You have to be prepared to deal with an
2734 arbitrary address here including pre/post increment/decrement.
2736 so avoid this in the MD. */
2737 gcc_assert (GET_CODE (operands
[0]) == REG
);
2739 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2740 xoperands
[0] = operands
[0];
2741 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2746 /* Return a REG that occurs in ADDR with coefficient 1.
2747 ADDR can be effectively incremented by incrementing REG. */
2750 find_addr_reg (rtx addr
)
2752 while (GET_CODE (addr
) == PLUS
)
2754 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2755 addr
= XEXP (addr
, 0);
2756 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2757 addr
= XEXP (addr
, 1);
2758 else if (CONSTANT_P (XEXP (addr
, 0)))
2759 addr
= XEXP (addr
, 1);
2760 else if (CONSTANT_P (XEXP (addr
, 1)))
2761 addr
= XEXP (addr
, 0);
2765 gcc_assert (GET_CODE (addr
) == REG
);
2769 /* Emit code to perform a block move.
2771 OPERANDS[0] is the destination pointer as a REG, clobbered.
2772 OPERANDS[1] is the source pointer as a REG, clobbered.
2773 OPERANDS[2] is a register for temporary storage.
2774 OPERANDS[3] is a register for temporary storage.
2775 OPERANDS[4] is the size as a CONST_INT
2776 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2777 OPERANDS[6] is another temporary register. */
2780 pa_output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2782 int align
= INTVAL (operands
[5]);
2783 unsigned long n_bytes
= INTVAL (operands
[4]);
2785 /* We can't move more than a word at a time because the PA
2786 has no longer integer move insns. (Could use fp mem ops?) */
2787 if (align
> (TARGET_64BIT
? 8 : 4))
2788 align
= (TARGET_64BIT
? 8 : 4);
2790 /* Note that we know each loop below will execute at least twice
2791 (else we would have open-coded the copy). */
2795 /* Pre-adjust the loop counter. */
2796 operands
[4] = GEN_INT (n_bytes
- 16);
2797 output_asm_insn ("ldi %4,%2", operands
);
2800 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2801 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2802 output_asm_insn ("std,ma %3,8(%0)", operands
);
2803 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2804 output_asm_insn ("std,ma %6,8(%0)", operands
);
2806 /* Handle the residual. There could be up to 7 bytes of
2807 residual to copy! */
2808 if (n_bytes
% 16 != 0)
2810 operands
[4] = GEN_INT (n_bytes
% 8);
2811 if (n_bytes
% 16 >= 8)
2812 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2813 if (n_bytes
% 8 != 0)
2814 output_asm_insn ("ldd 0(%1),%6", operands
);
2815 if (n_bytes
% 16 >= 8)
2816 output_asm_insn ("std,ma %3,8(%0)", operands
);
2817 if (n_bytes
% 8 != 0)
2818 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2823 /* Pre-adjust the loop counter. */
2824 operands
[4] = GEN_INT (n_bytes
- 8);
2825 output_asm_insn ("ldi %4,%2", operands
);
2828 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2829 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2830 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2831 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2832 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2834 /* Handle the residual. There could be up to 7 bytes of
2835 residual to copy! */
2836 if (n_bytes
% 8 != 0)
2838 operands
[4] = GEN_INT (n_bytes
% 4);
2839 if (n_bytes
% 8 >= 4)
2840 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2841 if (n_bytes
% 4 != 0)
2842 output_asm_insn ("ldw 0(%1),%6", operands
);
2843 if (n_bytes
% 8 >= 4)
2844 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2845 if (n_bytes
% 4 != 0)
2846 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2851 /* Pre-adjust the loop counter. */
2852 operands
[4] = GEN_INT (n_bytes
- 4);
2853 output_asm_insn ("ldi %4,%2", operands
);
2856 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2857 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2858 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2859 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2860 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2862 /* Handle the residual. */
2863 if (n_bytes
% 4 != 0)
2865 if (n_bytes
% 4 >= 2)
2866 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2867 if (n_bytes
% 2 != 0)
2868 output_asm_insn ("ldb 0(%1),%6", operands
);
2869 if (n_bytes
% 4 >= 2)
2870 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2871 if (n_bytes
% 2 != 0)
2872 output_asm_insn ("stb %6,0(%0)", operands
);
2877 /* Pre-adjust the loop counter. */
2878 operands
[4] = GEN_INT (n_bytes
- 2);
2879 output_asm_insn ("ldi %4,%2", operands
);
2882 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2883 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2884 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2885 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2886 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2888 /* Handle the residual. */
2889 if (n_bytes
% 2 != 0)
2891 output_asm_insn ("ldb 0(%1),%3", operands
);
2892 output_asm_insn ("stb %3,0(%0)", operands
);
2901 /* Count the number of insns necessary to handle this block move.
2903 Basic structure is the same as emit_block_move, except that we
2904 count insns rather than emit them. */
2907 compute_movmem_length (rtx_insn
*insn
)
2909 rtx pat
= PATTERN (insn
);
2910 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2911 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2912 unsigned int n_insns
= 0;
2914 /* We can't move more than four bytes at a time because the PA
2915 has no longer integer move insns. (Could use fp mem ops?) */
2916 if (align
> (TARGET_64BIT
? 8 : 4))
2917 align
= (TARGET_64BIT
? 8 : 4);
2919 /* The basic copying loop. */
2923 if (n_bytes
% (2 * align
) != 0)
2925 if ((n_bytes
% (2 * align
)) >= align
)
2928 if ((n_bytes
% align
) != 0)
2932 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2936 /* Emit code to perform a block clear.
2938 OPERANDS[0] is the destination pointer as a REG, clobbered.
2939 OPERANDS[1] is a register for temporary storage.
2940 OPERANDS[2] is the size as a CONST_INT
2941 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2944 pa_output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2946 int align
= INTVAL (operands
[3]);
2947 unsigned long n_bytes
= INTVAL (operands
[2]);
2949 /* We can't clear more than a word at a time because the PA
2950 has no longer integer move insns. */
2951 if (align
> (TARGET_64BIT
? 8 : 4))
2952 align
= (TARGET_64BIT
? 8 : 4);
2954 /* Note that we know each loop below will execute at least twice
2955 (else we would have open-coded the copy). */
2959 /* Pre-adjust the loop counter. */
2960 operands
[2] = GEN_INT (n_bytes
- 16);
2961 output_asm_insn ("ldi %2,%1", operands
);
2964 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2965 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2966 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2968 /* Handle the residual. There could be up to 7 bytes of
2969 residual to copy! */
2970 if (n_bytes
% 16 != 0)
2972 operands
[2] = GEN_INT (n_bytes
% 8);
2973 if (n_bytes
% 16 >= 8)
2974 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2975 if (n_bytes
% 8 != 0)
2976 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2981 /* Pre-adjust the loop counter. */
2982 operands
[2] = GEN_INT (n_bytes
- 8);
2983 output_asm_insn ("ldi %2,%1", operands
);
2986 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2987 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2988 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2990 /* Handle the residual. There could be up to 7 bytes of
2991 residual to copy! */
2992 if (n_bytes
% 8 != 0)
2994 operands
[2] = GEN_INT (n_bytes
% 4);
2995 if (n_bytes
% 8 >= 4)
2996 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2997 if (n_bytes
% 4 != 0)
2998 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
3003 /* Pre-adjust the loop counter. */
3004 operands
[2] = GEN_INT (n_bytes
- 4);
3005 output_asm_insn ("ldi %2,%1", operands
);
3008 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3009 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
3010 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3012 /* Handle the residual. */
3013 if (n_bytes
% 4 != 0)
3015 if (n_bytes
% 4 >= 2)
3016 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3017 if (n_bytes
% 2 != 0)
3018 output_asm_insn ("stb %%r0,0(%0)", operands
);
3023 /* Pre-adjust the loop counter. */
3024 operands
[2] = GEN_INT (n_bytes
- 2);
3025 output_asm_insn ("ldi %2,%1", operands
);
3028 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3029 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
3030 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3032 /* Handle the residual. */
3033 if (n_bytes
% 2 != 0)
3034 output_asm_insn ("stb %%r0,0(%0)", operands
);
3043 /* Count the number of insns necessary to handle this block move.
3045 Basic structure is the same as emit_block_move, except that we
3046 count insns rather than emit them. */
3049 compute_clrmem_length (rtx_insn
*insn
)
3051 rtx pat
= PATTERN (insn
);
3052 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
3053 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
3054 unsigned int n_insns
= 0;
3056 /* We can't clear more than a word at a time because the PA
3057 has no longer integer move insns. */
3058 if (align
> (TARGET_64BIT
? 8 : 4))
3059 align
= (TARGET_64BIT
? 8 : 4);
3061 /* The basic loop. */
3065 if (n_bytes
% (2 * align
) != 0)
3067 if ((n_bytes
% (2 * align
)) >= align
)
3070 if ((n_bytes
% align
) != 0)
3074 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3080 pa_output_and (rtx
*operands
)
3082 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3084 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3085 int ls0
, ls1
, ms0
, p
, len
;
3087 for (ls0
= 0; ls0
< 32; ls0
++)
3088 if ((mask
& (1 << ls0
)) == 0)
3091 for (ls1
= ls0
; ls1
< 32; ls1
++)
3092 if ((mask
& (1 << ls1
)) != 0)
3095 for (ms0
= ls1
; ms0
< 32; ms0
++)
3096 if ((mask
& (1 << ms0
)) == 0)
3099 gcc_assert (ms0
== 32);
3107 operands
[2] = GEN_INT (len
);
3108 return "{extru|extrw,u} %1,31,%2,%0";
3112 /* We could use this `depi' for the case above as well, but `depi'
3113 requires one more register file access than an `extru'. */
3118 operands
[2] = GEN_INT (p
);
3119 operands
[3] = GEN_INT (len
);
3120 return "{depi|depwi} 0,%2,%3,%0";
3124 return "and %1,%2,%0";
3127 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3128 storing the result in operands[0]. */
3130 pa_output_64bit_and (rtx
*operands
)
3132 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3134 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3135 int ls0
, ls1
, ms0
, p
, len
;
3137 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3138 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3141 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3142 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3145 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3146 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3149 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3151 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3157 operands
[2] = GEN_INT (len
);
3158 return "extrd,u %1,63,%2,%0";
3162 /* We could use this `depi' for the case above as well, but `depi'
3163 requires one more register file access than an `extru'. */
3168 operands
[2] = GEN_INT (p
);
3169 operands
[3] = GEN_INT (len
);
3170 return "depdi 0,%2,%3,%0";
3174 return "and %1,%2,%0";
3178 pa_output_ior (rtx
*operands
)
3180 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3181 int bs0
, bs1
, p
, len
;
3183 if (INTVAL (operands
[2]) == 0)
3184 return "copy %1,%0";
3186 for (bs0
= 0; bs0
< 32; bs0
++)
3187 if ((mask
& (1 << bs0
)) != 0)
3190 for (bs1
= bs0
; bs1
< 32; bs1
++)
3191 if ((mask
& (1 << bs1
)) == 0)
3194 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3199 operands
[2] = GEN_INT (p
);
3200 operands
[3] = GEN_INT (len
);
3201 return "{depi|depwi} -1,%2,%3,%0";
3204 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3205 storing the result in operands[0]. */
3207 pa_output_64bit_ior (rtx
*operands
)
3209 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3210 int bs0
, bs1
, p
, len
;
3212 if (INTVAL (operands
[2]) == 0)
3213 return "copy %1,%0";
3215 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3216 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3219 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3220 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3223 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3224 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3229 operands
[2] = GEN_INT (p
);
3230 operands
[3] = GEN_INT (len
);
3231 return "depdi -1,%2,%3,%0";
3234 /* Target hook for assembling integer objects. This code handles
3235 aligned SI and DI integers specially since function references
3236 must be preceded by P%. */
3239 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3241 if (size
== UNITS_PER_WORD
3243 && function_label_operand (x
, VOIDmode
))
3245 fputs (size
== 8? "\t.dword\t" : "\t.word\t", asm_out_file
);
3247 /* We don't want an OPD when generating fast indirect calls. */
3248 if (!TARGET_FAST_INDIRECT_CALLS
)
3249 fputs ("P%", asm_out_file
);
3251 output_addr_const (asm_out_file
, x
);
3252 fputc ('\n', asm_out_file
);
3255 return default_assemble_integer (x
, size
, aligned_p
);
3258 /* Output an ascii string. */
3260 pa_output_ascii (FILE *file
, const char *p
, int size
)
3264 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3266 /* The HP assembler can only take strings of 256 characters at one
3267 time. This is a limitation on input line length, *not* the
3268 length of the string. Sigh. Even worse, it seems that the
3269 restriction is in number of input characters (see \xnn &
3270 \whatever). So we have to do this very carefully. */
3272 fputs ("\t.STRING \"", file
);
3275 for (i
= 0; i
< size
; i
+= 4)
3279 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3281 register unsigned int c
= (unsigned char) p
[i
+ io
];
3283 if (c
== '\"' || c
== '\\')
3284 partial_output
[co
++] = '\\';
3285 if (c
>= ' ' && c
< 0177)
3286 partial_output
[co
++] = c
;
3290 partial_output
[co
++] = '\\';
3291 partial_output
[co
++] = 'x';
3292 hexd
= c
/ 16 - 0 + '0';
3294 hexd
-= '9' - 'a' + 1;
3295 partial_output
[co
++] = hexd
;
3296 hexd
= c
% 16 - 0 + '0';
3298 hexd
-= '9' - 'a' + 1;
3299 partial_output
[co
++] = hexd
;
3302 if (chars_output
+ co
> 243)
3304 fputs ("\"\n\t.STRING \"", file
);
3307 fwrite (partial_output
, 1, (size_t) co
, file
);
3311 fputs ("\"\n", file
);
3314 /* Try to rewrite floating point comparisons & branches to avoid
3315 useless add,tr insns.
3317 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3318 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3319 first attempt to remove useless add,tr insns. It is zero
3320 for the second pass as reorg sometimes leaves bogus REG_DEAD
3323 When CHECK_NOTES is zero we can only eliminate add,tr insns
3324 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3327 remove_useless_addtr_insns (int check_notes
)
3330 static int pass
= 0;
3332 /* This is fairly cheap, so always run it when optimizing. */
3336 int fbranch_count
= 0;
3338 /* Walk all the insns in this function looking for fcmp & fbranch
3339 instructions. Keep track of how many of each we find. */
3340 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3344 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3345 if (! NONJUMP_INSN_P (insn
) && ! JUMP_P (insn
))
3348 tmp
= PATTERN (insn
);
3350 /* It must be a set. */
3351 if (GET_CODE (tmp
) != SET
)
3354 /* If the destination is CCFP, then we've found an fcmp insn. */
3355 tmp
= SET_DEST (tmp
);
3356 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3362 tmp
= PATTERN (insn
);
3363 /* If this is an fbranch instruction, bump the fbranch counter. */
3364 if (GET_CODE (tmp
) == SET
3365 && SET_DEST (tmp
) == pc_rtx
3366 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3367 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3368 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3369 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3377 /* Find all floating point compare + branch insns. If possible,
3378 reverse the comparison & the branch to avoid add,tr insns. */
3379 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3384 /* Ignore anything that isn't an INSN. */
3385 if (! NONJUMP_INSN_P (insn
))
3388 tmp
= PATTERN (insn
);
3390 /* It must be a set. */
3391 if (GET_CODE (tmp
) != SET
)
3394 /* The destination must be CCFP, which is register zero. */
3395 tmp
= SET_DEST (tmp
);
3396 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3399 /* INSN should be a set of CCFP.
3401 See if the result of this insn is used in a reversed FP
3402 conditional branch. If so, reverse our condition and
3403 the branch. Doing so avoids useless add,tr insns. */
3404 next
= next_insn (insn
);
3407 /* Jumps, calls and labels stop our search. */
3408 if (JUMP_P (next
) || CALL_P (next
) || LABEL_P (next
))
3411 /* As does another fcmp insn. */
3412 if (NONJUMP_INSN_P (next
)
3413 && GET_CODE (PATTERN (next
)) == SET
3414 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3415 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3418 next
= next_insn (next
);
3421 /* Is NEXT_INSN a branch? */
3422 if (next
&& JUMP_P (next
))
3424 rtx pattern
= PATTERN (next
);
3426 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3427 and CCFP dies, then reverse our conditional and the branch
3428 to avoid the add,tr. */
3429 if (GET_CODE (pattern
) == SET
3430 && SET_DEST (pattern
) == pc_rtx
3431 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3432 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3433 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3434 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3435 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3436 && (fcmp_count
== fbranch_count
3438 && find_regno_note (next
, REG_DEAD
, 0))))
3440 /* Reverse the branch. */
3441 tmp
= XEXP (SET_SRC (pattern
), 1);
3442 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3443 XEXP (SET_SRC (pattern
), 2) = tmp
;
3444 INSN_CODE (next
) = -1;
3446 /* Reverse our condition. */
3447 tmp
= PATTERN (insn
);
3448 PUT_CODE (XEXP (tmp
, 1),
3449 (reverse_condition_maybe_unordered
3450 (GET_CODE (XEXP (tmp
, 1)))));
3460 /* You may have trouble believing this, but this is the 32 bit HP-PA
3465 Variable arguments (optional; any number may be allocated)
3467 SP-(4*(N+9)) arg word N
3472 Fixed arguments (must be allocated; may remain unused)
3481 SP-32 External Data Pointer (DP)
3483 SP-24 External/stub RP (RP')
3487 SP-8 Calling Stub RP (RP'')
3492 SP-0 Stack Pointer (points to next available address)
3496 /* This function saves registers as follows. Registers marked with ' are
3497 this function's registers (as opposed to the previous function's).
3498 If a frame_pointer isn't needed, r4 is saved as a general register;
3499 the space for the frame pointer is still allocated, though, to keep
3505 SP (FP') Previous FP
3506 SP + 4 Alignment filler (sigh)
3507 SP + 8 Space for locals reserved here.
3511 SP + n All call saved register used.
3515 SP + o All call saved fp registers used.
3519 SP + p (SP') points to next available address.
3523 /* Global variables set by output_function_prologue(). */
3524 /* Size of frame. Need to know this to emit return insns from
3526 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3527 static int save_fregs
;
3529 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3530 Handle case where DISP > 8k by using the add_high_const patterns.
3532 Note in DISP > 8k case, we will leave the high part of the address
3533 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3536 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3538 rtx dest
, src
, basereg
;
3541 src
= gen_rtx_REG (word_mode
, reg
);
3542 basereg
= gen_rtx_REG (Pmode
, base
);
3543 if (VAL_14_BITS_P (disp
))
3545 dest
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
3546 insn
= emit_move_insn (dest
, src
);
3548 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3550 rtx delta
= GEN_INT (disp
);
3551 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3553 emit_move_insn (tmpreg
, delta
);
3554 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3557 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3558 gen_rtx_SET (tmpreg
,
3559 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3560 RTX_FRAME_RELATED_P (insn
) = 1;
3562 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3563 insn
= emit_move_insn (dest
, src
);
3567 rtx delta
= GEN_INT (disp
);
3568 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3569 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3571 emit_move_insn (tmpreg
, high
);
3572 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3573 insn
= emit_move_insn (dest
, src
);
3575 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3576 gen_rtx_SET (gen_rtx_MEM (word_mode
,
3577 gen_rtx_PLUS (word_mode
,
3584 RTX_FRAME_RELATED_P (insn
) = 1;
3587 /* Emit RTL to store REG at the memory location specified by BASE and then
3588 add MOD to BASE. MOD must be <= 8k. */
3591 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3593 rtx basereg
, srcreg
, delta
;
3596 gcc_assert (VAL_14_BITS_P (mod
));
3598 basereg
= gen_rtx_REG (Pmode
, base
);
3599 srcreg
= gen_rtx_REG (word_mode
, reg
);
3600 delta
= GEN_INT (mod
);
3602 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3605 RTX_FRAME_RELATED_P (insn
) = 1;
3607 /* RTX_FRAME_RELATED_P must be set on each frame related set
3608 in a parallel with more than one element. */
3609 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3610 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3614 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3615 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3616 whether to add a frame note or not.
3618 In the DISP > 8k case, we leave the high part of the address in %r1.
3619 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3622 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3626 if (VAL_14_BITS_P (disp
))
3628 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3629 plus_constant (Pmode
,
3630 gen_rtx_REG (Pmode
, base
), disp
));
3632 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3634 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3635 rtx delta
= GEN_INT (disp
);
3636 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3638 emit_move_insn (tmpreg
, delta
);
3639 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3640 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3642 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3643 gen_rtx_SET (tmpreg
,
3644 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3648 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3649 rtx delta
= GEN_INT (disp
);
3650 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3652 emit_move_insn (tmpreg
,
3653 gen_rtx_PLUS (Pmode
, basereg
,
3654 gen_rtx_HIGH (Pmode
, delta
)));
3655 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3656 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3659 if (DO_FRAME_NOTES
&& note
)
3660 RTX_FRAME_RELATED_P (insn
) = 1;
3664 pa_compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3669 /* The code in pa_expand_prologue and pa_expand_epilogue must
3670 be consistent with the rounding and size calculation done here.
3671 Change them at the same time. */
3673 /* We do our own stack alignment. First, round the size of the
3674 stack locals up to a word boundary. */
3675 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3677 /* Space for previous frame pointer + filler. If any frame is
3678 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3679 waste some space here for the sake of HP compatibility. The
3680 first slot is only used when the frame pointer is needed. */
3681 if (size
|| frame_pointer_needed
)
3682 size
+= STARTING_FRAME_OFFSET
;
3684 /* If the current function calls __builtin_eh_return, then we need
3685 to allocate stack space for registers that will hold data for
3686 the exception handler. */
3687 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3691 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3693 size
+= i
* UNITS_PER_WORD
;
3696 /* Account for space used by the callee general register saves. */
3697 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3698 if (df_regs_ever_live_p (i
))
3699 size
+= UNITS_PER_WORD
;
3701 /* Account for space used by the callee floating point register saves. */
3702 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3703 if (df_regs_ever_live_p (i
)
3704 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3708 /* We always save both halves of the FP register, so always
3709 increment the frame size by 8 bytes. */
3713 /* If any of the floating registers are saved, account for the
3714 alignment needed for the floating point register save block. */
3717 size
= (size
+ 7) & ~7;
3722 /* The various ABIs include space for the outgoing parameters in the
3723 size of the current function's stack frame. We don't need to align
3724 for the outgoing arguments as their alignment is set by the final
3725 rounding for the frame as a whole. */
3726 size
+= crtl
->outgoing_args_size
;
3728 /* Allocate space for the fixed frame marker. This space must be
3729 allocated for any function that makes calls or allocates
3731 if (!crtl
->is_leaf
|| size
)
3732 size
+= TARGET_64BIT
? 48 : 32;
3734 /* Finally, round to the preferred stack boundary. */
3735 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3736 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3739 /* Generate the assembly code for function entry. FILE is a stdio
3740 stream to output the code to. SIZE is an int: how many units of
3741 temporary storage to allocate.
3743 Refer to the array `regs_ever_live' to determine which registers to
3744 save; `regs_ever_live[I]' is nonzero if register number I is ever
3745 used in the function. This function is responsible for knowing
3746 which registers should not be saved even if used. */
3748 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3749 of memory. If any fpu reg is used in the function, we allocate
3750 such a block here, at the bottom of the frame, just in case it's needed.
3752 If this function is a leaf procedure, then we may choose not
3753 to do a "save" insn. The decision about whether or not
3754 to do this is made in regclass.c. */
3757 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3759 /* The function's label and associated .PROC must never be
3760 separated and must be output *after* any profiling declarations
3761 to avoid changing spaces/subspaces within a procedure. */
3762 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3763 fputs ("\t.PROC\n", file
);
3765 /* pa_expand_prologue does the dirty work now. We just need
3766 to output the assembler directives which denote the start
3768 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3770 fputs (",NO_CALLS", file
);
3772 fputs (",CALLS", file
);
3774 fputs (",SAVE_RP", file
);
3776 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3777 at the beginning of the frame and that it is used as the frame
3778 pointer for the frame. We do this because our current frame
3779 layout doesn't conform to that specified in the HP runtime
3780 documentation and we need a way to indicate to programs such as
3781 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3782 isn't used by HP compilers but is supported by the assembler.
3783 However, SAVE_SP is supposed to indicate that the previous stack
3784 pointer has been saved in the frame marker. */
3785 if (frame_pointer_needed
)
3786 fputs (",SAVE_SP", file
);
3788 /* Pass on information about the number of callee register saves
3789 performed in the prologue.
3791 The compiler is supposed to pass the highest register number
3792 saved, the assembler then has to adjust that number before
3793 entering it into the unwind descriptor (to account for any
3794 caller saved registers with lower register numbers than the
3795 first callee saved register). */
3797 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3800 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3802 fputs ("\n\t.ENTRY\n", file
);
3804 remove_useless_addtr_insns (0);
3808 pa_expand_prologue (void)
3810 int merge_sp_adjust_with_store
= 0;
3811 HOST_WIDE_INT size
= get_frame_size ();
3812 HOST_WIDE_INT offset
;
3821 /* Compute total size for frame pointer, filler, locals and rounding to
3822 the next word boundary. Similar code appears in pa_compute_frame_size
3823 and must be changed in tandem with this code. */
3824 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3825 if (local_fsize
|| frame_pointer_needed
)
3826 local_fsize
+= STARTING_FRAME_OFFSET
;
3828 actual_fsize
= pa_compute_frame_size (size
, &save_fregs
);
3829 if (flag_stack_usage_info
)
3830 current_function_static_stack_size
= actual_fsize
;
3832 /* Compute a few things we will use often. */
3833 tmpreg
= gen_rtx_REG (word_mode
, 1);
3835 /* Save RP first. The calling conventions manual states RP will
3836 always be stored into the caller's frame at sp - 20 or sp - 16
3837 depending on which ABI is in use. */
3838 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
3840 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3846 /* Allocate the local frame and set up the frame pointer if needed. */
3847 if (actual_fsize
!= 0)
3849 if (frame_pointer_needed
)
3851 /* Copy the old frame pointer temporarily into %r1. Set up the
3852 new stack pointer, then store away the saved old frame pointer
3853 into the stack at sp and at the same time update the stack
3854 pointer by actual_fsize bytes. Two versions, first
3855 handles small (<8k) frames. The second handles large (>=8k)
3857 insn
= emit_move_insn (tmpreg
, hard_frame_pointer_rtx
);
3859 RTX_FRAME_RELATED_P (insn
) = 1;
3861 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
3863 RTX_FRAME_RELATED_P (insn
) = 1;
3865 if (VAL_14_BITS_P (actual_fsize
))
3866 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3869 /* It is incorrect to store the saved frame pointer at *sp,
3870 then increment sp (writes beyond the current stack boundary).
3872 So instead use stwm to store at *sp and post-increment the
3873 stack pointer as an atomic operation. Then increment sp to
3874 finish allocating the new frame. */
3875 HOST_WIDE_INT adjust1
= 8192 - 64;
3876 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3878 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3879 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3883 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3884 we need to store the previous stack pointer (frame pointer)
3885 into the frame marker on targets that use the HP unwind
3886 library. This allows the HP unwind library to be used to
3887 unwind GCC frames. However, we are not fully compatible
3888 with the HP library because our frame layout differs from
3889 that specified in the HP runtime specification.
3891 We don't want a frame note on this instruction as the frame
3892 marker moves during dynamic stack allocation.
3894 This instruction also serves as a blockage to prevent
3895 register spills from being scheduled before the stack
3896 pointer is raised. This is necessary as we store
3897 registers using the frame pointer as a base register,
3898 and the frame pointer is set before sp is raised. */
3899 if (TARGET_HPUX_UNWIND_LIBRARY
)
3901 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3902 GEN_INT (TARGET_64BIT
? -8 : -4));
3904 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3905 hard_frame_pointer_rtx
);
3908 emit_insn (gen_blockage ());
3910 /* no frame pointer needed. */
3913 /* In some cases we can perform the first callee register save
3914 and allocating the stack frame at the same time. If so, just
3915 make a note of it and defer allocating the frame until saving
3916 the callee registers. */
3917 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3918 merge_sp_adjust_with_store
= 1;
3919 /* Can not optimize. Adjust the stack frame by actual_fsize
3922 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3927 /* Normal register save.
3929 Do not save the frame pointer in the frame_pointer_needed case. It
3930 was done earlier. */
3931 if (frame_pointer_needed
)
3933 offset
= local_fsize
;
3935 /* Saving the EH return data registers in the frame is the simplest
3936 way to get the frame unwind information emitted. We put them
3937 just before the general registers. */
3938 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3940 unsigned int i
, regno
;
3944 regno
= EH_RETURN_DATA_REGNO (i
);
3945 if (regno
== INVALID_REGNUM
)
3948 store_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
3949 offset
+= UNITS_PER_WORD
;
3953 for (i
= 18; i
>= 4; i
--)
3954 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3956 store_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
3957 offset
+= UNITS_PER_WORD
;
3960 /* Account for %r3 which is saved in a special place. */
3963 /* No frame pointer needed. */
3966 offset
= local_fsize
- actual_fsize
;
3968 /* Saving the EH return data registers in the frame is the simplest
3969 way to get the frame unwind information emitted. */
3970 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3972 unsigned int i
, regno
;
3976 regno
= EH_RETURN_DATA_REGNO (i
);
3977 if (regno
== INVALID_REGNUM
)
3980 /* If merge_sp_adjust_with_store is nonzero, then we can
3981 optimize the first save. */
3982 if (merge_sp_adjust_with_store
)
3984 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3985 merge_sp_adjust_with_store
= 0;
3988 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3989 offset
+= UNITS_PER_WORD
;
3993 for (i
= 18; i
>= 3; i
--)
3994 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3996 /* If merge_sp_adjust_with_store is nonzero, then we can
3997 optimize the first GR save. */
3998 if (merge_sp_adjust_with_store
)
4000 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
4001 merge_sp_adjust_with_store
= 0;
4004 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
4005 offset
+= UNITS_PER_WORD
;
4009 /* If we wanted to merge the SP adjustment with a GR save, but we never
4010 did any GR saves, then just emit the adjustment here. */
4011 if (merge_sp_adjust_with_store
)
4012 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4016 /* The hppa calling conventions say that %r19, the pic offset
4017 register, is saved at sp - 32 (in this function's frame)
4018 when generating PIC code. FIXME: What is the correct thing
4019 to do for functions which make no calls and allocate no
4020 frame? Do we need to allocate a frame, or can we just omit
4021 the save? For now we'll just omit the save.
4023 We don't want a note on this insn as the frame marker can
4024 move if there is a dynamic stack allocation. */
4025 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
4027 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
4029 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
4033 /* Align pointer properly (doubleword boundary). */
4034 offset
= (offset
+ 7) & ~7;
4036 /* Floating point register store. */
4041 /* First get the frame or stack pointer to the start of the FP register
4043 if (frame_pointer_needed
)
4045 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4046 base
= hard_frame_pointer_rtx
;
4050 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4051 base
= stack_pointer_rtx
;
4054 /* Now actually save the FP registers. */
4055 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4057 if (df_regs_ever_live_p (i
)
4058 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4062 addr
= gen_rtx_MEM (DFmode
,
4063 gen_rtx_POST_INC (word_mode
, tmpreg
));
4064 reg
= gen_rtx_REG (DFmode
, i
);
4065 insn
= emit_move_insn (addr
, reg
);
4068 RTX_FRAME_RELATED_P (insn
) = 1;
4071 rtx mem
= gen_rtx_MEM (DFmode
,
4072 plus_constant (Pmode
, base
,
4074 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4075 gen_rtx_SET (mem
, reg
));
4079 rtx meml
= gen_rtx_MEM (SFmode
,
4080 plus_constant (Pmode
, base
,
4082 rtx memr
= gen_rtx_MEM (SFmode
,
4083 plus_constant (Pmode
, base
,
4085 rtx regl
= gen_rtx_REG (SFmode
, i
);
4086 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
4087 rtx setl
= gen_rtx_SET (meml
, regl
);
4088 rtx setr
= gen_rtx_SET (memr
, regr
);
4091 RTX_FRAME_RELATED_P (setl
) = 1;
4092 RTX_FRAME_RELATED_P (setr
) = 1;
4093 vec
= gen_rtvec (2, setl
, setr
);
4094 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4095 gen_rtx_SEQUENCE (VOIDmode
, vec
));
4098 offset
+= GET_MODE_SIZE (DFmode
);
4105 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4106 Handle case where DISP > 8k by using the add_high_const patterns. */
4109 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
4111 rtx dest
= gen_rtx_REG (word_mode
, reg
);
4112 rtx basereg
= gen_rtx_REG (Pmode
, base
);
4115 if (VAL_14_BITS_P (disp
))
4116 src
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
4117 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
4119 rtx delta
= GEN_INT (disp
);
4120 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4122 emit_move_insn (tmpreg
, delta
);
4123 if (TARGET_DISABLE_INDEXING
)
4125 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4126 src
= gen_rtx_MEM (word_mode
, tmpreg
);
4129 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4133 rtx delta
= GEN_INT (disp
);
4134 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
4135 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4137 emit_move_insn (tmpreg
, high
);
4138 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
4141 emit_move_insn (dest
, src
);
4144 /* Update the total code bytes output to the text section. */
4147 update_total_code_bytes (unsigned int nbytes
)
4149 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4150 && !IN_NAMED_SECTION_P (cfun
->decl
))
4152 unsigned int old_total
= total_code_bytes
;
4154 total_code_bytes
+= nbytes
;
4156 /* Be prepared to handle overflows. */
4157 if (old_total
> total_code_bytes
)
4158 total_code_bytes
= UINT_MAX
;
4162 /* This function generates the assembly code for function exit.
4163 Args are as for output_function_prologue ().
4165 The function epilogue should not depend on the current stack
4166 pointer! It should use the frame pointer only. This is mandatory
4167 because of alloca; we also take advantage of it to omit stack
4168 adjustments before returning. */
4171 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4173 rtx_insn
*insn
= get_last_insn ();
4176 /* pa_expand_epilogue does the dirty work now. We just need
4177 to output the assembler directives which denote the end
4180 To make debuggers happy, emit a nop if the epilogue was completely
4181 eliminated due to a volatile call as the last insn in the
4182 current function. That way the return address (in %r2) will
4183 always point to a valid instruction in the current function. */
4185 /* Get the last real insn. */
4187 insn
= prev_real_insn (insn
);
4189 /* If it is a sequence, then look inside. */
4190 if (insn
&& NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4191 insn
= as_a
<rtx_sequence
*> (PATTERN (insn
))-> insn (0);
4193 /* If insn is a CALL_INSN, then it must be a call to a volatile
4194 function (otherwise there would be epilogue insns). */
4195 if (insn
&& CALL_P (insn
))
4197 fputs ("\tnop\n", file
);
4203 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4205 if (TARGET_SOM
&& TARGET_GAS
)
4207 /* We are done with this subspace except possibly for some additional
4208 debug information. Forget that we are in this subspace to ensure
4209 that the next function is output in its own subspace. */
4211 cfun
->machine
->in_nsubspa
= 2;
4214 /* Thunks do their own insn accounting. */
4218 if (INSN_ADDRESSES_SET_P ())
4220 last_address
= extra_nop
? 4 : 0;
4221 insn
= get_last_nonnote_insn ();
4224 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4226 last_address
+= insn_default_length (insn
);
4228 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4229 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4232 last_address
= UINT_MAX
;
4234 /* Finally, update the total number of code bytes output so far. */
4235 update_total_code_bytes (last_address
);
4239 pa_expand_epilogue (void)
4242 HOST_WIDE_INT offset
;
4243 HOST_WIDE_INT ret_off
= 0;
4245 int merge_sp_adjust_with_load
= 0;
4247 /* We will use this often. */
4248 tmpreg
= gen_rtx_REG (word_mode
, 1);
4250 /* Try to restore RP early to avoid load/use interlocks when
4251 RP gets used in the return (bv) instruction. This appears to still
4252 be necessary even when we schedule the prologue and epilogue. */
4255 ret_off
= TARGET_64BIT
? -16 : -20;
4256 if (frame_pointer_needed
)
4258 load_reg (2, ret_off
, HARD_FRAME_POINTER_REGNUM
);
4263 /* No frame pointer, and stack is smaller than 8k. */
4264 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4266 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4272 /* General register restores. */
4273 if (frame_pointer_needed
)
4275 offset
= local_fsize
;
4277 /* If the current function calls __builtin_eh_return, then we need
4278 to restore the saved EH data registers. */
4279 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4281 unsigned int i
, regno
;
4285 regno
= EH_RETURN_DATA_REGNO (i
);
4286 if (regno
== INVALID_REGNUM
)
4289 load_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4290 offset
+= UNITS_PER_WORD
;
4294 for (i
= 18; i
>= 4; i
--)
4295 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4297 load_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4298 offset
+= UNITS_PER_WORD
;
4303 offset
= local_fsize
- actual_fsize
;
4305 /* If the current function calls __builtin_eh_return, then we need
4306 to restore the saved EH data registers. */
4307 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4309 unsigned int i
, regno
;
4313 regno
= EH_RETURN_DATA_REGNO (i
);
4314 if (regno
== INVALID_REGNUM
)
4317 /* Only for the first load.
4318 merge_sp_adjust_with_load holds the register load
4319 with which we will merge the sp adjustment. */
4320 if (merge_sp_adjust_with_load
== 0
4322 && VAL_14_BITS_P (-actual_fsize
))
4323 merge_sp_adjust_with_load
= regno
;
4325 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4326 offset
+= UNITS_PER_WORD
;
4330 for (i
= 18; i
>= 3; i
--)
4332 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4334 /* Only for the first load.
4335 merge_sp_adjust_with_load holds the register load
4336 with which we will merge the sp adjustment. */
4337 if (merge_sp_adjust_with_load
== 0
4339 && VAL_14_BITS_P (-actual_fsize
))
4340 merge_sp_adjust_with_load
= i
;
4342 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4343 offset
+= UNITS_PER_WORD
;
4348 /* Align pointer properly (doubleword boundary). */
4349 offset
= (offset
+ 7) & ~7;
4351 /* FP register restores. */
4354 /* Adjust the register to index off of. */
4355 if (frame_pointer_needed
)
4356 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4358 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4360 /* Actually do the restores now. */
4361 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4362 if (df_regs_ever_live_p (i
)
4363 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4365 rtx src
= gen_rtx_MEM (DFmode
,
4366 gen_rtx_POST_INC (word_mode
, tmpreg
));
4367 rtx dest
= gen_rtx_REG (DFmode
, i
);
4368 emit_move_insn (dest
, src
);
4372 /* Emit a blockage insn here to keep these insns from being moved to
4373 an earlier spot in the epilogue, or into the main instruction stream.
4375 This is necessary as we must not cut the stack back before all the
4376 restores are finished. */
4377 emit_insn (gen_blockage ());
4379 /* Reset stack pointer (and possibly frame pointer). The stack
4380 pointer is initially set to fp + 64 to avoid a race condition. */
4381 if (frame_pointer_needed
)
4383 rtx delta
= GEN_INT (-64);
4385 set_reg_plus_d (STACK_POINTER_REGNUM
, HARD_FRAME_POINTER_REGNUM
, 64, 0);
4386 emit_insn (gen_pre_load (hard_frame_pointer_rtx
,
4387 stack_pointer_rtx
, delta
));
4389 /* If we were deferring a callee register restore, do it now. */
4390 else if (merge_sp_adjust_with_load
)
4392 rtx delta
= GEN_INT (-actual_fsize
);
4393 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4395 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4397 else if (actual_fsize
!= 0)
4398 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4401 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4402 frame greater than 8k), do so now. */
4404 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4406 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4408 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4410 emit_insn (gen_blockage ());
4411 emit_insn (TARGET_64BIT
4412 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4413 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4418 pa_can_use_return_insn (void)
4420 if (!reload_completed
)
4423 if (frame_pointer_needed
)
4426 if (df_regs_ever_live_p (2))
4432 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4436 hppa_pic_save_rtx (void)
4438 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4441 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4442 #define NO_DEFERRED_PROFILE_COUNTERS 0
4446 /* Vector of funcdef numbers. */
4447 static vec
<int> funcdef_nos
;
4449 /* Output deferred profile counters. */
4451 output_deferred_profile_counters (void)
4456 if (funcdef_nos
.is_empty ())
4459 switch_to_section (data_section
);
4460 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4461 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4463 for (i
= 0; funcdef_nos
.iterate (i
, &n
); i
++)
4465 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4466 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4469 funcdef_nos
.release ();
4473 hppa_profile_hook (int label_no
)
4475 /* We use SImode for the address of the function in both 32 and
4476 64-bit code to avoid having to provide DImode versions of the
4477 lcla2 and load_offset_label_address insn patterns. */
4478 rtx reg
= gen_reg_rtx (SImode
);
4479 rtx_code_label
*label_rtx
= gen_label_rtx ();
4480 rtx begin_label_rtx
;
4481 rtx_insn
*call_insn
;
4482 char begin_label_name
[16];
4484 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4486 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4489 emit_move_insn (arg_pointer_rtx
,
4490 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4493 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4495 /* The address of the function is loaded into %r25 with an instruction-
4496 relative sequence that avoids the use of relocations. The sequence
4497 is split so that the load_offset_label_address instruction can
4498 occupy the delay slot of the call to _mcount. */
4500 emit_insn (gen_lcla2 (reg
, label_rtx
));
4502 emit_insn (gen_lcla1 (reg
, label_rtx
));
4504 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4505 reg
, begin_label_rtx
, label_rtx
));
4507 #if !NO_DEFERRED_PROFILE_COUNTERS
4509 rtx count_label_rtx
, addr
, r24
;
4510 char count_label_name
[16];
4512 funcdef_nos
.safe_push (label_no
);
4513 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4514 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4516 addr
= force_reg (Pmode
, count_label_rtx
);
4517 r24
= gen_rtx_REG (Pmode
, 24);
4518 emit_move_insn (r24
, addr
);
4521 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4522 gen_rtx_SYMBOL_REF (Pmode
,
4524 GEN_INT (TARGET_64BIT
? 24 : 12)));
4526 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4531 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4532 gen_rtx_SYMBOL_REF (Pmode
,
4534 GEN_INT (TARGET_64BIT
? 16 : 8)));
4538 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4539 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4541 /* Indicate the _mcount call cannot throw, nor will it execute a
4543 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4546 /* Fetch the return address for the frame COUNT steps up from
4547 the current frame, after the prologue. FRAMEADDR is the
4548 frame pointer of the COUNT frame.
4550 We want to ignore any export stub remnants here. To handle this,
4551 we examine the code at the return address, and if it is an export
4552 stub, we return a memory rtx for the stub return address stored
4555 The value returned is used in two different ways:
4557 1. To find a function's caller.
4559 2. To change the return address for a function.
4561 This function handles most instances of case 1; however, it will
4562 fail if there are two levels of stubs to execute on the return
4563 path. The only way I believe that can happen is if the return value
4564 needs a parameter relocation, which never happens for C code.
4566 This function handles most instances of case 2; however, it will
4567 fail if we did not originally have stub code on the return path
4568 but will need stub code on the new return path. This can happen if
4569 the caller & callee are both in the main program, but the new
4570 return location is in a shared library. */
4573 pa_return_addr_rtx (int count
, rtx frameaddr
)
4580 /* The instruction stream at the return address of a PA1.X export stub is:
4582 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4583 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4584 0x00011820 | stub+16: mtsp r1,sr0
4585 0xe0400002 | stub+20: be,n 0(sr0,rp)
4587 0xe0400002 must be specified as -532676606 so that it won't be
4588 rejected as an invalid immediate operand on 64-bit hosts.
4590 The instruction stream at the return address of a PA2.0 export stub is:
4592 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4593 0xe840d002 | stub+12: bve,n (rp)
4596 HOST_WIDE_INT insns
[4];
4602 rp
= get_hard_reg_initial_val (Pmode
, 2);
4604 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4607 /* If there is no export stub then just use the value saved from
4608 the return pointer register. */
4610 saved_rp
= gen_reg_rtx (Pmode
);
4611 emit_move_insn (saved_rp
, rp
);
4613 /* Get pointer to the instruction stream. We have to mask out the
4614 privilege level from the two low order bits of the return address
4615 pointer here so that ins will point to the start of the first
4616 instruction that would have been executed if we returned. */
4617 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4618 label
= gen_label_rtx ();
4622 insns
[0] = 0x4bc23fd1;
4623 insns
[1] = -398405630;
4628 insns
[0] = 0x4bc23fd1;
4629 insns
[1] = 0x004010a1;
4630 insns
[2] = 0x00011820;
4631 insns
[3] = -532676606;
4635 /* Check the instruction stream at the normal return address for the
4636 export stub. If it is an export stub, than our return address is
4637 really in -24[frameaddr]. */
4639 for (i
= 0; i
< len
; i
++)
4641 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, ins
, i
* 4));
4642 rtx op1
= GEN_INT (insns
[i
]);
4643 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4646 /* Here we know that our return address points to an export
4647 stub. We don't want to return the address of the export stub,
4648 but rather the return address of the export stub. That return
4649 address is stored at -24[frameaddr]. */
4651 emit_move_insn (saved_rp
,
4653 memory_address (Pmode
,
4654 plus_constant (Pmode
, frameaddr
,
4663 pa_emit_bcond_fp (rtx operands
[])
4665 enum rtx_code code
= GET_CODE (operands
[0]);
4666 rtx operand0
= operands
[1];
4667 rtx operand1
= operands
[2];
4668 rtx label
= operands
[3];
4670 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode
, 0),
4671 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4673 emit_jump_insn (gen_rtx_SET (pc_rtx
,
4674 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4677 gen_rtx_REG (CCFPmode
, 0),
4679 gen_rtx_LABEL_REF (VOIDmode
, label
),
4684 /* Adjust the cost of a scheduling dependency. Return the new cost of
4685 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4688 pa_adjust_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep_insn
, int cost
)
4690 enum attr_type attr_type
;
4692 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4693 true dependencies as they are described with bypasses now. */
4694 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4697 if (! recog_memoized (insn
))
4700 attr_type
= get_attr_type (insn
);
4702 switch (REG_NOTE_KIND (link
))
4705 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4708 if (attr_type
== TYPE_FPLOAD
)
4710 rtx pat
= PATTERN (insn
);
4711 rtx dep_pat
= PATTERN (dep_insn
);
4712 if (GET_CODE (pat
) == PARALLEL
)
4714 /* This happens for the fldXs,mb patterns. */
4715 pat
= XVECEXP (pat
, 0, 0);
4717 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4718 /* If this happens, we have to extend this to schedule
4719 optimally. Return 0 for now. */
4722 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4724 if (! recog_memoized (dep_insn
))
4726 switch (get_attr_type (dep_insn
))
4733 case TYPE_FPSQRTSGL
:
4734 case TYPE_FPSQRTDBL
:
4735 /* A fpload can't be issued until one cycle before a
4736 preceding arithmetic operation has finished if
4737 the target of the fpload is any of the sources
4738 (or destination) of the arithmetic operation. */
4739 return insn_default_latency (dep_insn
) - 1;
4746 else if (attr_type
== TYPE_FPALU
)
4748 rtx pat
= PATTERN (insn
);
4749 rtx dep_pat
= PATTERN (dep_insn
);
4750 if (GET_CODE (pat
) == PARALLEL
)
4752 /* This happens for the fldXs,mb patterns. */
4753 pat
= XVECEXP (pat
, 0, 0);
4755 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4756 /* If this happens, we have to extend this to schedule
4757 optimally. Return 0 for now. */
4760 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4762 if (! recog_memoized (dep_insn
))
4764 switch (get_attr_type (dep_insn
))
4768 case TYPE_FPSQRTSGL
:
4769 case TYPE_FPSQRTDBL
:
4770 /* An ALU flop can't be issued until two cycles before a
4771 preceding divide or sqrt operation has finished if
4772 the target of the ALU flop is any of the sources
4773 (or destination) of the divide or sqrt operation. */
4774 return insn_default_latency (dep_insn
) - 2;
4782 /* For other anti dependencies, the cost is 0. */
4785 case REG_DEP_OUTPUT
:
4786 /* Output dependency; DEP_INSN writes a register that INSN writes some
4788 if (attr_type
== TYPE_FPLOAD
)
4790 rtx pat
= PATTERN (insn
);
4791 rtx dep_pat
= PATTERN (dep_insn
);
4792 if (GET_CODE (pat
) == PARALLEL
)
4794 /* This happens for the fldXs,mb patterns. */
4795 pat
= XVECEXP (pat
, 0, 0);
4797 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4798 /* If this happens, we have to extend this to schedule
4799 optimally. Return 0 for now. */
4802 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4804 if (! recog_memoized (dep_insn
))
4806 switch (get_attr_type (dep_insn
))
4813 case TYPE_FPSQRTSGL
:
4814 case TYPE_FPSQRTDBL
:
4815 /* A fpload can't be issued until one cycle before a
4816 preceding arithmetic operation has finished if
4817 the target of the fpload is the destination of the
4818 arithmetic operation.
4820 Exception: For PA7100LC, PA7200 and PA7300, the cost
4821 is 3 cycles, unless they bundle together. We also
4822 pay the penalty if the second insn is a fpload. */
4823 return insn_default_latency (dep_insn
) - 1;
4830 else if (attr_type
== TYPE_FPALU
)
4832 rtx pat
= PATTERN (insn
);
4833 rtx dep_pat
= PATTERN (dep_insn
);
4834 if (GET_CODE (pat
) == PARALLEL
)
4836 /* This happens for the fldXs,mb patterns. */
4837 pat
= XVECEXP (pat
, 0, 0);
4839 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4840 /* If this happens, we have to extend this to schedule
4841 optimally. Return 0 for now. */
4844 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4846 if (! recog_memoized (dep_insn
))
4848 switch (get_attr_type (dep_insn
))
4852 case TYPE_FPSQRTSGL
:
4853 case TYPE_FPSQRTDBL
:
4854 /* An ALU flop can't be issued until two cycles before a
4855 preceding divide or sqrt operation has finished if
4856 the target of the ALU flop is also the target of
4857 the divide or sqrt operation. */
4858 return insn_default_latency (dep_insn
) - 2;
4866 /* For other output dependencies, the cost is 0. */
4874 /* Adjust scheduling priorities. We use this to try and keep addil
4875 and the next use of %r1 close together. */
4877 pa_adjust_priority (rtx_insn
*insn
, int priority
)
4879 rtx set
= single_set (insn
);
4883 src
= SET_SRC (set
);
4884 dest
= SET_DEST (set
);
4885 if (GET_CODE (src
) == LO_SUM
4886 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4887 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4890 else if (GET_CODE (src
) == MEM
4891 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4892 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4893 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4896 else if (GET_CODE (dest
) == MEM
4897 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4898 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4899 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4905 /* The 700 can only issue a single insn at a time.
4906 The 7XXX processors can issue two insns at a time.
4907 The 8000 can issue 4 insns at a time. */
4909 pa_issue_rate (void)
4913 case PROCESSOR_700
: return 1;
4914 case PROCESSOR_7100
: return 2;
4915 case PROCESSOR_7100LC
: return 2;
4916 case PROCESSOR_7200
: return 2;
4917 case PROCESSOR_7300
: return 2;
4918 case PROCESSOR_8000
: return 4;
4927 /* Return any length plus adjustment needed by INSN which already has
4928 its length computed as LENGTH. Return LENGTH if no adjustment is
4931 Also compute the length of an inline block move here as it is too
4932 complicated to express as a length attribute in pa.md. */
4934 pa_adjust_insn_length (rtx_insn
*insn
, int length
)
4936 rtx pat
= PATTERN (insn
);
4938 /* If length is negative or undefined, provide initial length. */
4939 if ((unsigned int) length
>= INT_MAX
)
4941 if (GET_CODE (pat
) == SEQUENCE
)
4942 insn
= as_a
<rtx_insn
*> (XVECEXP (pat
, 0, 0));
4944 switch (get_attr_type (insn
))
4947 length
= pa_attr_length_millicode_call (insn
);
4950 length
= pa_attr_length_call (insn
, 0);
4953 length
= pa_attr_length_call (insn
, 1);
4956 length
= pa_attr_length_indirect_call (insn
);
4958 case TYPE_SH_FUNC_ADRS
:
4959 length
= pa_attr_length_millicode_call (insn
) + 20;
4966 /* Block move pattern. */
4967 if (NONJUMP_INSN_P (insn
)
4968 && GET_CODE (pat
) == PARALLEL
4969 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4970 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4971 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4972 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4973 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4974 length
+= compute_movmem_length (insn
) - 4;
4975 /* Block clear pattern. */
4976 else if (NONJUMP_INSN_P (insn
)
4977 && GET_CODE (pat
) == PARALLEL
4978 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4979 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4980 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4981 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4982 length
+= compute_clrmem_length (insn
) - 4;
4983 /* Conditional branch with an unfilled delay slot. */
4984 else if (JUMP_P (insn
) && ! simplejump_p (insn
))
4986 /* Adjust a short backwards conditional with an unfilled delay slot. */
4987 if (GET_CODE (pat
) == SET
4989 && JUMP_LABEL (insn
) != NULL_RTX
4990 && ! forward_branch_p (insn
))
4992 else if (GET_CODE (pat
) == PARALLEL
4993 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4996 /* Adjust dbra insn with short backwards conditional branch with
4997 unfilled delay slot -- only for case where counter is in a
4998 general register register. */
4999 else if (GET_CODE (pat
) == PARALLEL
5000 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
5001 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
5002 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
5004 && ! forward_branch_p (insn
))
5010 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5013 pa_print_operand_punct_valid_p (unsigned char code
)
5024 /* Print operand X (an rtx) in assembler syntax to file FILE.
5025 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5026 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5029 pa_print_operand (FILE *file
, rtx x
, int code
)
5034 /* Output a 'nop' if there's nothing for the delay slot. */
5035 if (dbr_sequence_length () == 0)
5036 fputs ("\n\tnop", file
);
5039 /* Output a nullification completer if there's nothing for the */
5040 /* delay slot or nullification is requested. */
5041 if (dbr_sequence_length () == 0 ||
5043 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
5047 /* Print out the second register name of a register pair.
5048 I.e., R (6) => 7. */
5049 fputs (reg_names
[REGNO (x
) + 1], file
);
5052 /* A register or zero. */
5054 || (x
== CONST0_RTX (DFmode
))
5055 || (x
== CONST0_RTX (SFmode
)))
5057 fputs ("%r0", file
);
5063 /* A register or zero (floating point). */
5065 || (x
== CONST0_RTX (DFmode
))
5066 || (x
== CONST0_RTX (SFmode
)))
5068 fputs ("%fr0", file
);
5077 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
5078 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
5079 pa_output_global_address (file
, xoperands
[1], 0);
5080 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
5084 case 'C': /* Plain (C)ondition */
5086 switch (GET_CODE (x
))
5089 fputs ("=", file
); break;
5091 fputs ("<>", file
); break;
5093 fputs (">", file
); break;
5095 fputs (">=", file
); break;
5097 fputs (">>=", file
); break;
5099 fputs (">>", file
); break;
5101 fputs ("<", file
); break;
5103 fputs ("<=", file
); break;
5105 fputs ("<<=", file
); break;
5107 fputs ("<<", file
); break;
5112 case 'N': /* Condition, (N)egated */
5113 switch (GET_CODE (x
))
5116 fputs ("<>", file
); break;
5118 fputs ("=", file
); break;
5120 fputs ("<=", file
); break;
5122 fputs ("<", file
); break;
5124 fputs ("<<", file
); break;
5126 fputs ("<<=", file
); break;
5128 fputs (">=", file
); break;
5130 fputs (">", file
); break;
5132 fputs (">>", file
); break;
5134 fputs (">>=", file
); break;
5139 /* For floating point comparisons. Note that the output
5140 predicates are the complement of the desired mode. The
5141 conditions for GT, GE, LT, LE and LTGT cause an invalid
5142 operation exception if the result is unordered and this
5143 exception is enabled in the floating-point status register. */
5145 switch (GET_CODE (x
))
5148 fputs ("!=", file
); break;
5150 fputs ("=", file
); break;
5152 fputs ("!>", file
); break;
5154 fputs ("!>=", file
); break;
5156 fputs ("!<", file
); break;
5158 fputs ("!<=", file
); break;
5160 fputs ("!<>", file
); break;
5162 fputs ("!?<=", file
); break;
5164 fputs ("!?<", file
); break;
5166 fputs ("!?>=", file
); break;
5168 fputs ("!?>", file
); break;
5170 fputs ("!?=", file
); break;
5172 fputs ("!?", file
); break;
5174 fputs ("?", file
); break;
5179 case 'S': /* Condition, operands are (S)wapped. */
5180 switch (GET_CODE (x
))
5183 fputs ("=", file
); break;
5185 fputs ("<>", file
); break;
5187 fputs ("<", file
); break;
5189 fputs ("<=", file
); break;
5191 fputs ("<<=", file
); break;
5193 fputs ("<<", file
); break;
5195 fputs (">", file
); break;
5197 fputs (">=", file
); break;
5199 fputs (">>=", file
); break;
5201 fputs (">>", file
); break;
5206 case 'B': /* Condition, (B)oth swapped and negate. */
5207 switch (GET_CODE (x
))
5210 fputs ("<>", file
); break;
5212 fputs ("=", file
); break;
5214 fputs (">=", file
); break;
5216 fputs (">", file
); break;
5218 fputs (">>", file
); break;
5220 fputs (">>=", file
); break;
5222 fputs ("<=", file
); break;
5224 fputs ("<", file
); break;
5226 fputs ("<<", file
); break;
5228 fputs ("<<=", file
); break;
5234 gcc_assert (GET_CODE (x
) == CONST_INT
);
5235 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5238 gcc_assert (GET_CODE (x
) == CONST_INT
);
5239 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5242 gcc_assert (GET_CODE (x
) == CONST_INT
);
5243 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5246 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5247 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5250 gcc_assert (GET_CODE (x
) == CONST_INT
);
5251 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5254 gcc_assert (GET_CODE (x
) == CONST_INT
);
5255 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5258 if (GET_CODE (x
) == CONST_INT
)
5263 switch (GET_CODE (XEXP (x
, 0)))
5267 if (ASSEMBLER_DIALECT
== 0)
5268 fputs ("s,mb", file
);
5270 fputs (",mb", file
);
5274 if (ASSEMBLER_DIALECT
== 0)
5275 fputs ("s,ma", file
);
5277 fputs (",ma", file
);
5280 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5281 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5283 if (ASSEMBLER_DIALECT
== 0)
5286 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5287 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5289 if (ASSEMBLER_DIALECT
== 0)
5290 fputs ("x,s", file
);
5294 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5298 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5304 pa_output_global_address (file
, x
, 0);
5307 pa_output_global_address (file
, x
, 1);
5309 case 0: /* Don't do anything special */
5314 compute_zdepwi_operands (INTVAL (x
), op
);
5315 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5321 compute_zdepdi_operands (INTVAL (x
), op
);
5322 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5326 /* We can get here from a .vtable_inherit due to our
5327 CONSTANT_ADDRESS_P rejecting perfectly good constant
5333 if (GET_CODE (x
) == REG
)
5335 fputs (reg_names
[REGNO (x
)], file
);
5336 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5342 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5343 && (REGNO (x
) & 1) == 0)
5346 else if (GET_CODE (x
) == MEM
)
5348 int size
= GET_MODE_SIZE (GET_MODE (x
));
5349 rtx base
= NULL_RTX
;
5350 switch (GET_CODE (XEXP (x
, 0)))
5354 base
= XEXP (XEXP (x
, 0), 0);
5355 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5359 base
= XEXP (XEXP (x
, 0), 0);
5360 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5363 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5364 fprintf (file
, "%s(%s)",
5365 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5366 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5367 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5368 fprintf (file
, "%s(%s)",
5369 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5370 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5371 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5372 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5374 /* Because the REG_POINTER flag can get lost during reload,
5375 pa_legitimate_address_p canonicalizes the order of the
5376 index and base registers in the combined move patterns. */
5377 rtx base
= XEXP (XEXP (x
, 0), 1);
5378 rtx index
= XEXP (XEXP (x
, 0), 0);
5380 fprintf (file
, "%s(%s)",
5381 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5384 output_address (XEXP (x
, 0));
5387 output_address (XEXP (x
, 0));
5392 output_addr_const (file
, x
);
5395 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5398 pa_output_global_address (FILE *file
, rtx x
, int round_constant
)
5401 /* Imagine (high (const (plus ...))). */
5402 if (GET_CODE (x
) == HIGH
)
5405 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5406 output_addr_const (file
, x
);
5407 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5409 output_addr_const (file
, x
);
5410 fputs ("-$global$", file
);
5412 else if (GET_CODE (x
) == CONST
)
5414 const char *sep
= "";
5415 int offset
= 0; /* assembler wants -$global$ at end */
5416 rtx base
= NULL_RTX
;
5418 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5421 base
= XEXP (XEXP (x
, 0), 0);
5422 output_addr_const (file
, base
);
5425 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5431 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5434 base
= XEXP (XEXP (x
, 0), 1);
5435 output_addr_const (file
, base
);
5438 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5444 /* How bogus. The compiler is apparently responsible for
5445 rounding the constant if it uses an LR field selector.
5447 The linker and/or assembler seem a better place since
5448 they have to do this kind of thing already.
5450 If we fail to do this, HP's optimizing linker may eliminate
5451 an addil, but not update the ldw/stw/ldo instruction that
5452 uses the result of the addil. */
5454 offset
= ((offset
+ 0x1000) & ~0x1fff);
5456 switch (GET_CODE (XEXP (x
, 0)))
5469 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5477 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5478 fputs ("-$global$", file
);
5480 fprintf (file
, "%s%d", sep
, offset
);
5483 output_addr_const (file
, x
);
5486 /* Output boilerplate text to appear at the beginning of the file.
5487 There are several possible versions. */
5488 #define aputs(x) fputs(x, asm_out_file)
5490 pa_file_start_level (void)
5493 aputs ("\t.LEVEL 2.0w\n");
5494 else if (TARGET_PA_20
)
5495 aputs ("\t.LEVEL 2.0\n");
5496 else if (TARGET_PA_11
)
5497 aputs ("\t.LEVEL 1.1\n");
5499 aputs ("\t.LEVEL 1.0\n");
5503 pa_file_start_space (int sortspace
)
5505 aputs ("\t.SPACE $PRIVATE$");
5508 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5510 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5511 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5512 "\n\t.SPACE $TEXT$");
5515 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5516 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5520 pa_file_start_file (int want_version
)
5522 if (write_symbols
!= NO_DEBUG
)
5524 output_file_directive (asm_out_file
, main_input_filename
);
5526 aputs ("\t.version\t\"01.01\"\n");
5531 pa_file_start_mcount (const char *aswhat
)
5534 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5538 pa_elf_file_start (void)
5540 pa_file_start_level ();
5541 pa_file_start_mcount ("ENTRY");
5542 pa_file_start_file (0);
5546 pa_som_file_start (void)
5548 pa_file_start_level ();
5549 pa_file_start_space (0);
5550 aputs ("\t.IMPORT $global$,DATA\n"
5551 "\t.IMPORT $$dyncall,MILLICODE\n");
5552 pa_file_start_mcount ("CODE");
5553 pa_file_start_file (0);
5557 pa_linux_file_start (void)
5559 pa_file_start_file (1);
5560 pa_file_start_level ();
5561 pa_file_start_mcount ("CODE");
5565 pa_hpux64_gas_file_start (void)
5567 pa_file_start_level ();
5568 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5570 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5572 pa_file_start_file (1);
5576 pa_hpux64_hpas_file_start (void)
5578 pa_file_start_level ();
5579 pa_file_start_space (1);
5580 pa_file_start_mcount ("CODE");
5581 pa_file_start_file (0);
5585 /* Search the deferred plabel list for SYMBOL and return its internal
5586 label. If an entry for SYMBOL is not found, a new entry is created. */
5589 pa_get_deferred_plabel (rtx symbol
)
5591 const char *fname
= XSTR (symbol
, 0);
5594 /* See if we have already put this function on the list of deferred
5595 plabels. This list is generally small, so a liner search is not
5596 too ugly. If it proves too slow replace it with something faster. */
5597 for (i
= 0; i
< n_deferred_plabels
; i
++)
5598 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5601 /* If the deferred plabel list is empty, or this entry was not found
5602 on the list, create a new entry on the list. */
5603 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5607 if (deferred_plabels
== 0)
5608 deferred_plabels
= ggc_alloc
<deferred_plabel
> ();
5610 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5612 n_deferred_plabels
+ 1);
5614 i
= n_deferred_plabels
++;
5615 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5616 deferred_plabels
[i
].symbol
= symbol
;
5618 /* Gross. We have just implicitly taken the address of this
5619 function. Mark it in the same manner as assemble_name. */
5620 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5622 mark_referenced (id
);
5625 return deferred_plabels
[i
].internal_label
;
5629 output_deferred_plabels (void)
5633 /* If we have some deferred plabels, then we need to switch into the
5634 data or readonly data section, and align it to a 4 byte boundary
5635 before outputting the deferred plabels. */
5636 if (n_deferred_plabels
)
5638 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5639 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5642 /* Now output the deferred plabels. */
5643 for (i
= 0; i
< n_deferred_plabels
; i
++)
5645 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5646 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5647 assemble_integer (deferred_plabels
[i
].symbol
,
5648 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5652 /* Initialize optabs to point to emulation routines. */
5655 pa_init_libfuncs (void)
5657 if (HPUX_LONG_DOUBLE_LIBRARY
)
5659 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5660 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5661 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5662 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5663 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5664 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5665 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5666 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5667 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5669 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5670 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5671 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5672 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5673 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5674 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5675 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5677 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5678 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5679 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5680 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5682 set_conv_libfunc (sfix_optab
, SImode
, TFmode
,
5683 TARGET_64BIT
? "__U_Qfcnvfxt_quad_to_sgl"
5684 : "_U_Qfcnvfxt_quad_to_sgl");
5685 set_conv_libfunc (sfix_optab
, DImode
, TFmode
,
5686 "_U_Qfcnvfxt_quad_to_dbl");
5687 set_conv_libfunc (ufix_optab
, SImode
, TFmode
,
5688 "_U_Qfcnvfxt_quad_to_usgl");
5689 set_conv_libfunc (ufix_optab
, DImode
, TFmode
,
5690 "_U_Qfcnvfxt_quad_to_udbl");
5692 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
,
5693 "_U_Qfcnvxf_sgl_to_quad");
5694 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
,
5695 "_U_Qfcnvxf_dbl_to_quad");
5696 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
,
5697 "_U_Qfcnvxf_usgl_to_quad");
5698 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
,
5699 "_U_Qfcnvxf_udbl_to_quad");
5702 if (TARGET_SYNC_LIBCALL
)
5703 init_sync_libfuncs (UNITS_PER_WORD
);
5706 /* HP's millicode routines mean something special to the assembler.
5707 Keep track of which ones we have used. */
5709 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5710 static void import_milli (enum millicodes
);
5711 static char imported
[(int) end1000
];
5712 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5713 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5714 #define MILLI_START 10
5717 import_milli (enum millicodes code
)
5719 char str
[sizeof (import_string
)];
5721 if (!imported
[(int) code
])
5723 imported
[(int) code
] = 1;
5724 strcpy (str
, import_string
);
5725 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5726 output_asm_insn (str
, 0);
5730 /* The register constraints have put the operands and return value in
5731 the proper registers. */
5734 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx_insn
*insn
)
5736 import_milli (mulI
);
5737 return pa_output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5740 /* Emit the rtl for doing a division by a constant. */
5742 /* Do magic division millicodes exist for this value? */
5743 const int pa_magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5745 /* We'll use an array to keep track of the magic millicodes and
5746 whether or not we've used them already. [n][0] is signed, [n][1] is
5749 static int div_milli
[16][2];
5752 pa_emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5754 if (GET_CODE (operands
[2]) == CONST_INT
5755 && INTVAL (operands
[2]) > 0
5756 && INTVAL (operands
[2]) < 16
5757 && pa_magic_milli
[INTVAL (operands
[2])])
5759 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5761 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5765 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode
, 29),
5766 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5768 gen_rtx_REG (SImode
, 26),
5770 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5771 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5772 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5773 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5774 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5775 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5782 pa_output_div_insn (rtx
*operands
, int unsignedp
, rtx_insn
*insn
)
5786 /* If the divisor is a constant, try to use one of the special
5788 if (GET_CODE (operands
[0]) == CONST_INT
)
5790 static char buf
[100];
5791 divisor
= INTVAL (operands
[0]);
5792 if (!div_milli
[divisor
][unsignedp
])
5794 div_milli
[divisor
][unsignedp
] = 1;
5796 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5798 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5802 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5803 INTVAL (operands
[0]));
5804 return pa_output_millicode_call (insn
,
5805 gen_rtx_SYMBOL_REF (SImode
, buf
));
5809 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5810 INTVAL (operands
[0]));
5811 return pa_output_millicode_call (insn
,
5812 gen_rtx_SYMBOL_REF (SImode
, buf
));
5815 /* Divisor isn't a special constant. */
5820 import_milli (divU
);
5821 return pa_output_millicode_call (insn
,
5822 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5826 import_milli (divI
);
5827 return pa_output_millicode_call (insn
,
5828 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5833 /* Output a $$rem millicode to do mod. */
5836 pa_output_mod_insn (int unsignedp
, rtx_insn
*insn
)
5840 import_milli (remU
);
5841 return pa_output_millicode_call (insn
,
5842 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5846 import_milli (remI
);
5847 return pa_output_millicode_call (insn
,
5848 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5853 pa_output_arg_descriptor (rtx_insn
*call_insn
)
5855 const char *arg_regs
[4];
5856 machine_mode arg_mode
;
5858 int i
, output_flag
= 0;
5861 /* We neither need nor want argument location descriptors for the
5862 64bit runtime environment or the ELF32 environment. */
5863 if (TARGET_64BIT
|| TARGET_ELF32
)
5866 for (i
= 0; i
< 4; i
++)
5869 /* Specify explicitly that no argument relocations should take place
5870 if using the portable runtime calling conventions. */
5871 if (TARGET_PORTABLE_RUNTIME
)
5873 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5878 gcc_assert (CALL_P (call_insn
));
5879 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5880 link
; link
= XEXP (link
, 1))
5882 rtx use
= XEXP (link
, 0);
5884 if (! (GET_CODE (use
) == USE
5885 && GET_CODE (XEXP (use
, 0)) == REG
5886 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5889 arg_mode
= GET_MODE (XEXP (use
, 0));
5890 regno
= REGNO (XEXP (use
, 0));
5891 if (regno
>= 23 && regno
<= 26)
5893 arg_regs
[26 - regno
] = "GR";
5894 if (arg_mode
== DImode
)
5895 arg_regs
[25 - regno
] = "GR";
5897 else if (regno
>= 32 && regno
<= 39)
5899 if (arg_mode
== SFmode
)
5900 arg_regs
[(regno
- 32) / 2] = "FR";
5903 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5904 arg_regs
[(regno
- 34) / 2] = "FR";
5905 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5907 arg_regs
[(regno
- 34) / 2] = "FU";
5908 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5913 fputs ("\t.CALL ", asm_out_file
);
5914 for (i
= 0; i
< 4; i
++)
5919 fputc (',', asm_out_file
);
5920 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5923 fputc ('\n', asm_out_file
);
5926 /* Inform reload about cases where moving X with a mode MODE to or from
5927 a register in RCLASS requires an extra scratch or immediate register.
5928 Return the class needed for the immediate register. */
5931 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
5932 machine_mode mode
, secondary_reload_info
*sri
)
5935 enum reg_class rclass
= (enum reg_class
) rclass_i
;
5937 /* Handle the easy stuff first. */
5938 if (rclass
== R1_REGS
)
5944 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
5950 /* If we have something like (mem (mem (...)), we can safely assume the
5951 inner MEM will end up in a general register after reloading, so there's
5952 no need for a secondary reload. */
5953 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
5956 /* Trying to load a constant into a FP register during PIC code
5957 generation requires %r1 as a scratch register. For float modes,
5958 the only legitimate constant is CONST0_RTX. However, there are
5959 a few patterns that accept constant double operands. */
5961 && FP_REG_CLASS_P (rclass
)
5962 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
5967 sri
->icode
= CODE_FOR_reload_insi_r1
;
5971 sri
->icode
= CODE_FOR_reload_indi_r1
;
5975 sri
->icode
= CODE_FOR_reload_insf_r1
;
5979 sri
->icode
= CODE_FOR_reload_indf_r1
;
5988 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5989 register when we're generating PIC code or when the operand isn't
5991 if (pa_symbolic_expression_p (x
))
5993 if (GET_CODE (x
) == HIGH
)
5996 if (flag_pic
|| !read_only_operand (x
, VOIDmode
))
6001 sri
->icode
= CODE_FOR_reload_insi_r1
;
6005 sri
->icode
= CODE_FOR_reload_indi_r1
;
6015 /* Profiling showed the PA port spends about 1.3% of its compilation
6016 time in true_regnum from calls inside pa_secondary_reload_class. */
6017 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
6018 regno
= true_regnum (x
);
6020 /* Handle reloads for floating point loads and stores. */
6021 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
6022 && FP_REG_CLASS_P (rclass
))
6028 /* We don't need a secondary reload for indexed memory addresses.
6030 When INT14_OK_STRICT is true, it might appear that we could
6031 directly allow register indirect memory addresses. However,
6032 this doesn't work because we don't support SUBREGs in
6033 floating-point register copies and reload doesn't tell us
6034 when it's going to use a SUBREG. */
6035 if (IS_INDEX_ADDR_P (x
))
6039 /* Request a secondary reload with a general scratch register
6040 for everything else. ??? Could symbolic operands be handled
6041 directly when generating non-pic PA 2.0 code? */
6043 ? direct_optab_handler (reload_in_optab
, mode
)
6044 : direct_optab_handler (reload_out_optab
, mode
));
6048 /* A SAR<->FP register copy requires an intermediate general register
6049 and secondary memory. We need a secondary reload with a general
6050 scratch register for spills. */
6051 if (rclass
== SHIFT_REGS
)
6054 if (regno
>= FIRST_PSEUDO_REGISTER
|| regno
< 0)
6057 ? direct_optab_handler (reload_in_optab
, mode
)
6058 : direct_optab_handler (reload_out_optab
, mode
));
6062 /* Handle FP copy. */
6063 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno
)))
6064 return GENERAL_REGS
;
6067 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
6068 && REGNO_REG_CLASS (regno
) == SHIFT_REGS
6069 && FP_REG_CLASS_P (rclass
))
6070 return GENERAL_REGS
;
6075 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6076 is only marked as live on entry by df-scan when it is a fixed
6077 register. It isn't a fixed register in the 64-bit runtime,
6078 so we need to mark it here. */
6081 pa_extra_live_on_entry (bitmap regs
)
6084 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
6087 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6088 to prevent it from being deleted. */
6091 pa_eh_return_handler_rtx (void)
6095 tmp
= gen_rtx_PLUS (word_mode
, hard_frame_pointer_rtx
,
6096 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
6097 tmp
= gen_rtx_MEM (word_mode
, tmp
);
6102 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6103 by invisible reference. As a GCC extension, we also pass anything
6104 with a zero or variable size by reference.
6106 The 64-bit runtime does not describe passing any types by invisible
6107 reference. The internals of GCC can't currently handle passing
6108 empty structures, and zero or variable length arrays when they are
6109 not passed entirely on the stack or by reference. Thus, as a GCC
6110 extension, we pass these types by reference. The HP compiler doesn't
6111 support these types, so hopefully there shouldn't be any compatibility
6112 issues. This may have to be revisited when HP releases a C99 compiler
6113 or updates the ABI. */
6116 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
6117 machine_mode mode
, const_tree type
,
6118 bool named ATTRIBUTE_UNUSED
)
6123 size
= int_size_in_bytes (type
);
6125 size
= GET_MODE_SIZE (mode
);
6130 return size
<= 0 || size
> 8;
6134 pa_function_arg_padding (machine_mode mode
, const_tree type
)
6139 && (AGGREGATE_TYPE_P (type
)
6140 || TREE_CODE (type
) == COMPLEX_TYPE
6141 || TREE_CODE (type
) == VECTOR_TYPE
)))
6143 /* Return none if justification is not required. */
6145 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
6146 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
6149 /* The directions set here are ignored when a BLKmode argument larger
6150 than a word is placed in a register. Different code is used for
6151 the stack and registers. This makes it difficult to have a
6152 consistent data representation for both the stack and registers.
6153 For both runtimes, the justification and padding for arguments on
6154 the stack and in registers should be identical. */
6156 /* The 64-bit runtime specifies left justification for aggregates. */
6159 /* The 32-bit runtime architecture specifies right justification.
6160 When the argument is passed on the stack, the argument is padded
6161 with garbage on the left. The HP compiler pads with zeros. */
6165 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
6172 /* Do what is necessary for `va_start'. We look at the current function
6173 to determine if stdargs or varargs is used and fill in an initial
6174 va_list. A pointer to this constructor is returned. */
6177 hppa_builtin_saveregs (void)
6180 tree fntype
= TREE_TYPE (current_function_decl
);
6181 int argadj
= ((!stdarg_p (fntype
))
6182 ? UNITS_PER_WORD
: 0);
6185 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
6187 offset
= crtl
->args
.arg_offset_rtx
;
6193 /* Adjust for varargs/stdarg differences. */
6195 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, -argadj
);
6197 offset
= crtl
->args
.arg_offset_rtx
;
6199 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6200 from the incoming arg pointer and growing to larger addresses. */
6201 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
6202 emit_move_insn (gen_rtx_MEM (word_mode
,
6203 plus_constant (Pmode
,
6204 arg_pointer_rtx
, off
)),
6205 gen_rtx_REG (word_mode
, i
));
6207 /* The incoming args pointer points just beyond the flushback area;
6208 normally this is not a serious concern. However, when we are doing
6209 varargs/stdargs we want to make the arg pointer point to the start
6210 of the incoming argument area. */
6211 emit_move_insn (virtual_incoming_args_rtx
,
6212 plus_constant (Pmode
, arg_pointer_rtx
, -64));
6214 /* Now return a pointer to the first anonymous argument. */
6215 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6216 virtual_incoming_args_rtx
,
6217 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6220 /* Store general registers on the stack. */
6221 dest
= gen_rtx_MEM (BLKmode
,
6222 plus_constant (Pmode
, crtl
->args
.internal_arg_pointer
,
6224 set_mem_alias_set (dest
, get_varargs_alias_set ());
6225 set_mem_align (dest
, BITS_PER_WORD
);
6226 move_block_from_reg (23, dest
, 4);
6228 /* move_block_from_reg will emit code to store the argument registers
6229 individually as scalar stores.
6231 However, other insns may later load from the same addresses for
6232 a structure load (passing a struct to a varargs routine).
6234 The alias code assumes that such aliasing can never happen, so we
6235 have to keep memory referencing insns from moving up beyond the
6236 last argument register store. So we emit a blockage insn here. */
6237 emit_insn (gen_blockage ());
6239 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6240 crtl
->args
.internal_arg_pointer
,
6241 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6245 hppa_va_start (tree valist
, rtx nextarg
)
6247 nextarg
= expand_builtin_saveregs ();
6248 std_expand_builtin_va_start (valist
, nextarg
);
6252 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6257 /* Args grow upward. We can use the generic routines. */
6258 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6260 else /* !TARGET_64BIT */
6262 tree ptr
= build_pointer_type (type
);
6265 unsigned int size
, ofs
;
6268 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6272 ptr
= build_pointer_type (type
);
6274 size
= int_size_in_bytes (type
);
6275 valist_type
= TREE_TYPE (valist
);
6277 /* Args grow down. Not handled by generic routines. */
6279 u
= fold_convert (sizetype
, size_in_bytes (type
));
6280 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6281 t
= fold_build_pointer_plus (valist
, u
);
6283 /* Align to 4 or 8 byte boundary depending on argument size. */
6285 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6286 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6287 t
= fold_convert (valist_type
, t
);
6289 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6291 ofs
= (8 - size
) % 4;
6293 t
= fold_build_pointer_plus_hwi (t
, ofs
);
6295 t
= fold_convert (ptr
, t
);
6296 t
= build_va_arg_indirect_ref (t
);
6299 t
= build_va_arg_indirect_ref (t
);
6305 /* True if MODE is valid for the target. By "valid", we mean able to
6306 be manipulated in non-trivial ways. In particular, this means all
6307 the arithmetic is supported.
6309 Currently, TImode is not valid as the HP 64-bit runtime documentation
6310 doesn't document the alignment and calling conventions for this type.
6311 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6312 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6315 pa_scalar_mode_supported_p (machine_mode mode
)
6317 int precision
= GET_MODE_PRECISION (mode
);
6319 switch (GET_MODE_CLASS (mode
))
6321 case MODE_PARTIAL_INT
:
6323 if (precision
== CHAR_TYPE_SIZE
)
6325 if (precision
== SHORT_TYPE_SIZE
)
6327 if (precision
== INT_TYPE_SIZE
)
6329 if (precision
== LONG_TYPE_SIZE
)
6331 if (precision
== LONG_LONG_TYPE_SIZE
)
6336 if (precision
== FLOAT_TYPE_SIZE
)
6338 if (precision
== DOUBLE_TYPE_SIZE
)
6340 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6344 case MODE_DECIMAL_FLOAT
:
6352 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6353 it branches into the delay slot. Otherwise, return FALSE. */
6356 branch_to_delay_slot_p (rtx_insn
*insn
)
6358 rtx_insn
*jump_insn
;
6360 if (dbr_sequence_length ())
6363 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6366 insn
= next_active_insn (insn
);
6367 if (jump_insn
== insn
)
6370 /* We can't rely on the length of asms. So, we return FALSE when
6371 the branch is followed by an asm. */
6373 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6374 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
6375 || get_attr_length (insn
) > 0)
6382 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6384 This occurs when INSN has an unfilled delay slot and is followed
6385 by an asm. Disaster can occur if the asm is empty and the jump
6386 branches into the delay slot. So, we add a nop in the delay slot
6387 when this occurs. */
6390 branch_needs_nop_p (rtx_insn
*insn
)
6392 rtx_insn
*jump_insn
;
6394 if (dbr_sequence_length ())
6397 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6400 insn
= next_active_insn (insn
);
6401 if (!insn
|| jump_insn
== insn
)
6404 if (!(GET_CODE (PATTERN (insn
)) == ASM_INPUT
6405 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6406 && get_attr_length (insn
) > 0)
6413 /* Return TRUE if INSN, a forward jump insn, can use nullification
6414 to skip the following instruction. This avoids an extra cycle due
6415 to a mis-predicted branch when we fall through. */
6418 use_skip_p (rtx_insn
*insn
)
6420 rtx_insn
*jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6424 insn
= next_active_insn (insn
);
6426 /* We can't rely on the length of asms, so we can't skip asms. */
6428 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6429 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6431 if (get_attr_length (insn
) == 4
6432 && jump_insn
== next_active_insn (insn
))
6434 if (get_attr_length (insn
) > 0)
6441 /* This routine handles all the normal conditional branch sequences we
6442 might need to generate. It handles compare immediate vs compare
6443 register, nullification of delay slots, varying length branches,
6444 negated branches, and all combinations of the above. It returns the
6445 output appropriate to emit the branch corresponding to all given
6449 pa_output_cbranch (rtx
*operands
, int negated
, rtx_insn
*insn
)
6451 static char buf
[100];
6453 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6454 int length
= get_attr_length (insn
);
6457 /* A conditional branch to the following instruction (e.g. the delay slot)
6458 is asking for a disaster. This can happen when not optimizing and
6459 when jump optimization fails.
6461 While it is usually safe to emit nothing, this can fail if the
6462 preceding instruction is a nullified branch with an empty delay
6463 slot and the same branch target as this branch. We could check
6464 for this but jump optimization should eliminate nop jumps. It
6465 is always safe to emit a nop. */
6466 if (branch_to_delay_slot_p (insn
))
6469 /* The doubleword form of the cmpib instruction doesn't have the LEU
6470 and GTU conditions while the cmpb instruction does. Since we accept
6471 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6472 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6473 operands
[2] = gen_rtx_REG (DImode
, 0);
6474 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6475 operands
[1] = gen_rtx_REG (DImode
, 0);
6477 /* If this is a long branch with its delay slot unfilled, set `nullify'
6478 as it can nullify the delay slot and save a nop. */
6479 if (length
== 8 && dbr_sequence_length () == 0)
6482 /* If this is a short forward conditional branch which did not get
6483 its delay slot filled, the delay slot can still be nullified. */
6484 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6485 nullify
= forward_branch_p (insn
);
6487 /* A forward branch over a single nullified insn can be done with a
6488 comclr instruction. This avoids a single cycle penalty due to
6489 mis-predicted branch if we fall through (branch not taken). */
6490 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6494 /* All short conditional branches except backwards with an unfilled
6498 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6500 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6501 if (GET_MODE (operands
[1]) == DImode
)
6504 strcat (buf
, "%B3");
6506 strcat (buf
, "%S3");
6508 strcat (buf
, " %2,%r1,%%r0");
6511 if (branch_needs_nop_p (insn
))
6512 strcat (buf
, ",n %2,%r1,%0%#");
6514 strcat (buf
, ",n %2,%r1,%0");
6517 strcat (buf
, " %2,%r1,%0");
6520 /* All long conditionals. Note a short backward branch with an
6521 unfilled delay slot is treated just like a long backward branch
6522 with an unfilled delay slot. */
6524 /* Handle weird backwards branch with a filled delay slot
6525 which is nullified. */
6526 if (dbr_sequence_length () != 0
6527 && ! forward_branch_p (insn
)
6530 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6531 if (GET_MODE (operands
[1]) == DImode
)
6534 strcat (buf
, "%S3");
6536 strcat (buf
, "%B3");
6537 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6539 /* Handle short backwards branch with an unfilled delay slot.
6540 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6541 taken and untaken branches. */
6542 else if (dbr_sequence_length () == 0
6543 && ! forward_branch_p (insn
)
6544 && INSN_ADDRESSES_SET_P ()
6545 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6546 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6548 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6549 if (GET_MODE (operands
[1]) == DImode
)
6552 strcat (buf
, "%B3 %2,%r1,%0%#");
6554 strcat (buf
, "%S3 %2,%r1,%0%#");
6558 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6559 if (GET_MODE (operands
[1]) == DImode
)
6562 strcat (buf
, "%S3");
6564 strcat (buf
, "%B3");
6566 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6568 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6573 /* The reversed conditional branch must branch over one additional
6574 instruction if the delay slot is filled and needs to be extracted
6575 by pa_output_lbranch. If the delay slot is empty or this is a
6576 nullified forward branch, the instruction after the reversed
6577 condition branch must be nullified. */
6578 if (dbr_sequence_length () == 0
6579 || (nullify
&& forward_branch_p (insn
)))
6583 operands
[4] = GEN_INT (length
);
6588 operands
[4] = GEN_INT (length
+ 4);
6591 /* Create a reversed conditional branch which branches around
6592 the following insns. */
6593 if (GET_MODE (operands
[1]) != DImode
)
6599 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6602 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6608 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6611 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6620 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6623 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6629 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6632 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6636 output_asm_insn (buf
, operands
);
6637 return pa_output_lbranch (operands
[0], insn
, xdelay
);
6642 /* This routine handles output of long unconditional branches that
6643 exceed the maximum range of a simple branch instruction. Since
6644 we don't have a register available for the branch, we save register
6645 %r1 in the frame marker, load the branch destination DEST into %r1,
6646 execute the branch, and restore %r1 in the delay slot of the branch.
6648 Since long branches may have an insn in the delay slot and the
6649 delay slot is used to restore %r1, we in general need to extract
6650 this insn and execute it before the branch. However, to facilitate
6651 use of this function by conditional branches, we also provide an
6652 option to not extract the delay insn so that it will be emitted
6653 after the long branch. So, if there is an insn in the delay slot,
6654 it is extracted if XDELAY is nonzero.
6656 The lengths of the various long-branch sequences are 20, 16 and 24
6657 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6660 pa_output_lbranch (rtx dest
, rtx_insn
*insn
, int xdelay
)
6664 xoperands
[0] = dest
;
6666 /* First, free up the delay slot. */
6667 if (xdelay
&& dbr_sequence_length () != 0)
6669 /* We can't handle a jump in the delay slot. */
6670 gcc_assert (! JUMP_P (NEXT_INSN (insn
)));
6672 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6675 /* Now delete the delay insn. */
6676 SET_INSN_DELETED (NEXT_INSN (insn
));
6679 /* Output an insn to save %r1. The runtime documentation doesn't
6680 specify whether the "Clean Up" slot in the callers frame can
6681 be clobbered by the callee. It isn't copied by HP's builtin
6682 alloca, so this suggests that it can be clobbered if necessary.
6683 The "Static Link" location is copied by HP builtin alloca, so
6684 we avoid using it. Using the cleanup slot might be a problem
6685 if we have to interoperate with languages that pass cleanup
6686 information. However, it should be possible to handle these
6687 situations with GCC's asm feature.
6689 The "Current RP" slot is reserved for the called procedure, so
6690 we try to use it when we don't have a frame of our own. It's
6691 rather unlikely that we won't have a frame when we need to emit
6694 Really the way to go long term is a register scavenger; goto
6695 the target of the jump and find a register which we can use
6696 as a scratch to hold the value in %r1. Then, we wouldn't have
6697 to free up the delay slot or clobber a slot that may be needed
6698 for other purposes. */
6701 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6702 /* Use the return pointer slot in the frame marker. */
6703 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6705 /* Use the slot at -40 in the frame marker since HP builtin
6706 alloca doesn't copy it. */
6707 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6711 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6712 /* Use the return pointer slot in the frame marker. */
6713 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6715 /* Use the "Clean Up" slot in the frame marker. In GCC,
6716 the only other use of this location is for copying a
6717 floating point double argument from a floating-point
6718 register to two general registers. The copy is done
6719 as an "atomic" operation when outputting a call, so it
6720 won't interfere with our using the location here. */
6721 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6724 if (TARGET_PORTABLE_RUNTIME
)
6726 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6727 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6728 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6732 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6733 if (TARGET_SOM
|| !TARGET_GAS
)
6735 xoperands
[1] = gen_label_rtx ();
6736 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6737 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6738 CODE_LABEL_NUMBER (xoperands
[1]));
6739 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6743 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6744 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6746 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6749 /* Now output a very long branch to the original target. */
6750 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6752 /* Now restore the value of %r1 in the delay slot. */
6755 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6756 return "ldd -16(%%r30),%%r1";
6758 return "ldd -40(%%r30),%%r1";
6762 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6763 return "ldw -20(%%r30),%%r1";
6765 return "ldw -12(%%r30),%%r1";
6769 /* This routine handles all the branch-on-bit conditional branch sequences we
6770 might need to generate. It handles nullification of delay slots,
6771 varying length branches, negated branches and all combinations of the
6772 above. it returns the appropriate output template to emit the branch. */
6775 pa_output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx_insn
*insn
, int which
)
6777 static char buf
[100];
6779 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6780 int length
= get_attr_length (insn
);
6783 /* A conditional branch to the following instruction (e.g. the delay slot) is
6784 asking for a disaster. I do not think this can happen as this pattern
6785 is only used when optimizing; jump optimization should eliminate the
6786 jump. But be prepared just in case. */
6788 if (branch_to_delay_slot_p (insn
))
6791 /* If this is a long branch with its delay slot unfilled, set `nullify'
6792 as it can nullify the delay slot and save a nop. */
6793 if (length
== 8 && dbr_sequence_length () == 0)
6796 /* If this is a short forward conditional branch which did not get
6797 its delay slot filled, the delay slot can still be nullified. */
6798 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6799 nullify
= forward_branch_p (insn
);
6801 /* A forward branch over a single nullified insn can be done with a
6802 extrs instruction. This avoids a single cycle penalty due to
6803 mis-predicted branch if we fall through (branch not taken). */
6804 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6809 /* All short conditional branches except backwards with an unfilled
6813 strcpy (buf
, "{extrs,|extrw,s,}");
6815 strcpy (buf
, "bb,");
6816 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6817 strcpy (buf
, "extrd,s,*");
6818 else if (GET_MODE (operands
[0]) == DImode
)
6819 strcpy (buf
, "bb,*");
6820 if ((which
== 0 && negated
)
6821 || (which
== 1 && ! negated
))
6826 strcat (buf
, " %0,%1,1,%%r0");
6827 else if (nullify
&& negated
)
6829 if (branch_needs_nop_p (insn
))
6830 strcat (buf
, ",n %0,%1,%3%#");
6832 strcat (buf
, ",n %0,%1,%3");
6834 else if (nullify
&& ! negated
)
6836 if (branch_needs_nop_p (insn
))
6837 strcat (buf
, ",n %0,%1,%2%#");
6839 strcat (buf
, ",n %0,%1,%2");
6841 else if (! nullify
&& negated
)
6842 strcat (buf
, " %0,%1,%3");
6843 else if (! nullify
&& ! negated
)
6844 strcat (buf
, " %0,%1,%2");
6847 /* All long conditionals. Note a short backward branch with an
6848 unfilled delay slot is treated just like a long backward branch
6849 with an unfilled delay slot. */
6851 /* Handle weird backwards branch with a filled delay slot
6852 which is nullified. */
6853 if (dbr_sequence_length () != 0
6854 && ! forward_branch_p (insn
)
6857 strcpy (buf
, "bb,");
6858 if (GET_MODE (operands
[0]) == DImode
)
6860 if ((which
== 0 && negated
)
6861 || (which
== 1 && ! negated
))
6866 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6868 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6870 /* Handle short backwards branch with an unfilled delay slot.
6871 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6872 taken and untaken branches. */
6873 else if (dbr_sequence_length () == 0
6874 && ! forward_branch_p (insn
)
6875 && INSN_ADDRESSES_SET_P ()
6876 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6877 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6879 strcpy (buf
, "bb,");
6880 if (GET_MODE (operands
[0]) == DImode
)
6882 if ((which
== 0 && negated
)
6883 || (which
== 1 && ! negated
))
6888 strcat (buf
, " %0,%1,%3%#");
6890 strcat (buf
, " %0,%1,%2%#");
6894 if (GET_MODE (operands
[0]) == DImode
)
6895 strcpy (buf
, "extrd,s,*");
6897 strcpy (buf
, "{extrs,|extrw,s,}");
6898 if ((which
== 0 && negated
)
6899 || (which
== 1 && ! negated
))
6903 if (nullify
&& negated
)
6904 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6905 else if (nullify
&& ! negated
)
6906 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6908 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6910 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6915 /* The reversed conditional branch must branch over one additional
6916 instruction if the delay slot is filled and needs to be extracted
6917 by pa_output_lbranch. If the delay slot is empty or this is a
6918 nullified forward branch, the instruction after the reversed
6919 condition branch must be nullified. */
6920 if (dbr_sequence_length () == 0
6921 || (nullify
&& forward_branch_p (insn
)))
6925 operands
[4] = GEN_INT (length
);
6930 operands
[4] = GEN_INT (length
+ 4);
6933 if (GET_MODE (operands
[0]) == DImode
)
6934 strcpy (buf
, "bb,*");
6936 strcpy (buf
, "bb,");
6937 if ((which
== 0 && negated
)
6938 || (which
== 1 && !negated
))
6943 strcat (buf
, ",n %0,%1,.+%4");
6945 strcat (buf
, " %0,%1,.+%4");
6946 output_asm_insn (buf
, operands
);
6947 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
6953 /* This routine handles all the branch-on-variable-bit conditional branch
6954 sequences we might need to generate. It handles nullification of delay
6955 slots, varying length branches, negated branches and all combinations
6956 of the above. it returns the appropriate output template to emit the
6960 pa_output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx_insn
*insn
,
6963 static char buf
[100];
6965 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6966 int length
= get_attr_length (insn
);
6969 /* A conditional branch to the following instruction (e.g. the delay slot) is
6970 asking for a disaster. I do not think this can happen as this pattern
6971 is only used when optimizing; jump optimization should eliminate the
6972 jump. But be prepared just in case. */
6974 if (branch_to_delay_slot_p (insn
))
6977 /* If this is a long branch with its delay slot unfilled, set `nullify'
6978 as it can nullify the delay slot and save a nop. */
6979 if (length
== 8 && dbr_sequence_length () == 0)
6982 /* If this is a short forward conditional branch which did not get
6983 its delay slot filled, the delay slot can still be nullified. */
6984 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6985 nullify
= forward_branch_p (insn
);
6987 /* A forward branch over a single nullified insn can be done with a
6988 extrs instruction. This avoids a single cycle penalty due to
6989 mis-predicted branch if we fall through (branch not taken). */
6990 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6995 /* All short conditional branches except backwards with an unfilled
6999 strcpy (buf
, "{vextrs,|extrw,s,}");
7001 strcpy (buf
, "{bvb,|bb,}");
7002 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
7003 strcpy (buf
, "extrd,s,*");
7004 else if (GET_MODE (operands
[0]) == DImode
)
7005 strcpy (buf
, "bb,*");
7006 if ((which
== 0 && negated
)
7007 || (which
== 1 && ! negated
))
7012 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7013 else if (nullify
&& negated
)
7015 if (branch_needs_nop_p (insn
))
7016 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7018 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
7020 else if (nullify
&& ! negated
)
7022 if (branch_needs_nop_p (insn
))
7023 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7025 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
7027 else if (! nullify
&& negated
)
7028 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
7029 else if (! nullify
&& ! negated
)
7030 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
7033 /* All long conditionals. Note a short backward branch with an
7034 unfilled delay slot is treated just like a long backward branch
7035 with an unfilled delay slot. */
7037 /* Handle weird backwards branch with a filled delay slot
7038 which is nullified. */
7039 if (dbr_sequence_length () != 0
7040 && ! forward_branch_p (insn
)
7043 strcpy (buf
, "{bvb,|bb,}");
7044 if (GET_MODE (operands
[0]) == DImode
)
7046 if ((which
== 0 && negated
)
7047 || (which
== 1 && ! negated
))
7052 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7054 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7056 /* Handle short backwards branch with an unfilled delay slot.
7057 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7058 taken and untaken branches. */
7059 else if (dbr_sequence_length () == 0
7060 && ! forward_branch_p (insn
)
7061 && INSN_ADDRESSES_SET_P ()
7062 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7063 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7065 strcpy (buf
, "{bvb,|bb,}");
7066 if (GET_MODE (operands
[0]) == DImode
)
7068 if ((which
== 0 && negated
)
7069 || (which
== 1 && ! negated
))
7074 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
7076 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
7080 strcpy (buf
, "{vextrs,|extrw,s,}");
7081 if (GET_MODE (operands
[0]) == DImode
)
7082 strcpy (buf
, "extrd,s,*");
7083 if ((which
== 0 && negated
)
7084 || (which
== 1 && ! negated
))
7088 if (nullify
&& negated
)
7089 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7090 else if (nullify
&& ! negated
)
7091 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7093 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7095 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7100 /* The reversed conditional branch must branch over one additional
7101 instruction if the delay slot is filled and needs to be extracted
7102 by pa_output_lbranch. If the delay slot is empty or this is a
7103 nullified forward branch, the instruction after the reversed
7104 condition branch must be nullified. */
7105 if (dbr_sequence_length () == 0
7106 || (nullify
&& forward_branch_p (insn
)))
7110 operands
[4] = GEN_INT (length
);
7115 operands
[4] = GEN_INT (length
+ 4);
7118 if (GET_MODE (operands
[0]) == DImode
)
7119 strcpy (buf
, "bb,*");
7121 strcpy (buf
, "{bvb,|bb,}");
7122 if ((which
== 0 && negated
)
7123 || (which
== 1 && !negated
))
7128 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
7130 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
7131 output_asm_insn (buf
, operands
);
7132 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
7138 /* Return the output template for emitting a dbra type insn.
7140 Note it may perform some output operations on its own before
7141 returning the final output string. */
7143 pa_output_dbra (rtx
*operands
, rtx_insn
*insn
, int which_alternative
)
7145 int length
= get_attr_length (insn
);
7147 /* A conditional branch to the following instruction (e.g. the delay slot) is
7148 asking for a disaster. Be prepared! */
7150 if (branch_to_delay_slot_p (insn
))
7152 if (which_alternative
== 0)
7153 return "ldo %1(%0),%0";
7154 else if (which_alternative
== 1)
7156 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
7157 output_asm_insn ("ldw -16(%%r30),%4", operands
);
7158 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7159 return "{fldws|fldw} -16(%%r30),%0";
7163 output_asm_insn ("ldw %0,%4", operands
);
7164 return "ldo %1(%4),%4\n\tstw %4,%0";
7168 if (which_alternative
== 0)
7170 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7173 /* If this is a long branch with its delay slot unfilled, set `nullify'
7174 as it can nullify the delay slot and save a nop. */
7175 if (length
== 8 && dbr_sequence_length () == 0)
7178 /* If this is a short forward conditional branch which did not get
7179 its delay slot filled, the delay slot can still be nullified. */
7180 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7181 nullify
= forward_branch_p (insn
);
7188 if (branch_needs_nop_p (insn
))
7189 return "addib,%C2,n %1,%0,%3%#";
7191 return "addib,%C2,n %1,%0,%3";
7194 return "addib,%C2 %1,%0,%3";
7197 /* Handle weird backwards branch with a fulled delay slot
7198 which is nullified. */
7199 if (dbr_sequence_length () != 0
7200 && ! forward_branch_p (insn
)
7202 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7203 /* Handle short backwards branch with an unfilled delay slot.
7204 Using a addb;nop rather than addi;bl saves 1 cycle for both
7205 taken and untaken branches. */
7206 else if (dbr_sequence_length () == 0
7207 && ! forward_branch_p (insn
)
7208 && INSN_ADDRESSES_SET_P ()
7209 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7210 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7211 return "addib,%C2 %1,%0,%3%#";
7213 /* Handle normal cases. */
7215 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7217 return "addi,%N2 %1,%0,%0\n\tb %3";
7220 /* The reversed conditional branch must branch over one additional
7221 instruction if the delay slot is filled and needs to be extracted
7222 by pa_output_lbranch. If the delay slot is empty or this is a
7223 nullified forward branch, the instruction after the reversed
7224 condition branch must be nullified. */
7225 if (dbr_sequence_length () == 0
7226 || (nullify
&& forward_branch_p (insn
)))
7230 operands
[4] = GEN_INT (length
);
7235 operands
[4] = GEN_INT (length
+ 4);
7239 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
7241 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
7243 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7247 /* Deal with gross reload from FP register case. */
7248 else if (which_alternative
== 1)
7250 /* Move loop counter from FP register to MEM then into a GR,
7251 increment the GR, store the GR into MEM, and finally reload
7252 the FP register from MEM from within the branch's delay slot. */
7253 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7255 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7257 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7258 else if (length
== 28)
7259 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7262 operands
[5] = GEN_INT (length
- 16);
7263 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
7264 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7265 return pa_output_lbranch (operands
[3], insn
, 0);
7268 /* Deal with gross reload from memory case. */
7271 /* Reload loop counter from memory, the store back to memory
7272 happens in the branch's delay slot. */
7273 output_asm_insn ("ldw %0,%4", operands
);
7275 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7276 else if (length
== 16)
7277 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7280 operands
[5] = GEN_INT (length
- 4);
7281 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7282 return pa_output_lbranch (operands
[3], insn
, 0);
7287 /* Return the output template for emitting a movb type insn.
7289 Note it may perform some output operations on its own before
7290 returning the final output string. */
7292 pa_output_movb (rtx
*operands
, rtx_insn
*insn
, int which_alternative
,
7293 int reverse_comparison
)
7295 int length
= get_attr_length (insn
);
7297 /* A conditional branch to the following instruction (e.g. the delay slot) is
7298 asking for a disaster. Be prepared! */
7300 if (branch_to_delay_slot_p (insn
))
7302 if (which_alternative
== 0)
7303 return "copy %1,%0";
7304 else if (which_alternative
== 1)
7306 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7307 return "{fldws|fldw} -16(%%r30),%0";
7309 else if (which_alternative
== 2)
7315 /* Support the second variant. */
7316 if (reverse_comparison
)
7317 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7319 if (which_alternative
== 0)
7321 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7324 /* If this is a long branch with its delay slot unfilled, set `nullify'
7325 as it can nullify the delay slot and save a nop. */
7326 if (length
== 8 && dbr_sequence_length () == 0)
7329 /* If this is a short forward conditional branch which did not get
7330 its delay slot filled, the delay slot can still be nullified. */
7331 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7332 nullify
= forward_branch_p (insn
);
7339 if (branch_needs_nop_p (insn
))
7340 return "movb,%C2,n %1,%0,%3%#";
7342 return "movb,%C2,n %1,%0,%3";
7345 return "movb,%C2 %1,%0,%3";
7348 /* Handle weird backwards branch with a filled delay slot
7349 which is nullified. */
7350 if (dbr_sequence_length () != 0
7351 && ! forward_branch_p (insn
)
7353 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7355 /* Handle short backwards branch with an unfilled delay slot.
7356 Using a movb;nop rather than or;bl saves 1 cycle for both
7357 taken and untaken branches. */
7358 else if (dbr_sequence_length () == 0
7359 && ! forward_branch_p (insn
)
7360 && INSN_ADDRESSES_SET_P ()
7361 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7362 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7363 return "movb,%C2 %1,%0,%3%#";
7364 /* Handle normal cases. */
7366 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7368 return "or,%N2 %1,%%r0,%0\n\tb %3";
7371 /* The reversed conditional branch must branch over one additional
7372 instruction if the delay slot is filled and needs to be extracted
7373 by pa_output_lbranch. If the delay slot is empty or this is a
7374 nullified forward branch, the instruction after the reversed
7375 condition branch must be nullified. */
7376 if (dbr_sequence_length () == 0
7377 || (nullify
&& forward_branch_p (insn
)))
7381 operands
[4] = GEN_INT (length
);
7386 operands
[4] = GEN_INT (length
+ 4);
7390 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7392 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7394 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7397 /* Deal with gross reload for FP destination register case. */
7398 else if (which_alternative
== 1)
7400 /* Move source register to MEM, perform the branch test, then
7401 finally load the FP register from MEM from within the branch's
7403 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7405 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7406 else if (length
== 16)
7407 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7410 operands
[4] = GEN_INT (length
- 4);
7411 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7412 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7413 return pa_output_lbranch (operands
[3], insn
, 0);
7416 /* Deal with gross reload from memory case. */
7417 else if (which_alternative
== 2)
7419 /* Reload loop counter from memory, the store back to memory
7420 happens in the branch's delay slot. */
7422 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7423 else if (length
== 12)
7424 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7427 operands
[4] = GEN_INT (length
);
7428 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7430 return pa_output_lbranch (operands
[3], insn
, 0);
7433 /* Handle SAR as a destination. */
7437 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7438 else if (length
== 12)
7439 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7442 operands
[4] = GEN_INT (length
);
7443 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7445 return pa_output_lbranch (operands
[3], insn
, 0);
7450 /* Copy any FP arguments in INSN into integer registers. */
7452 copy_fp_args (rtx_insn
*insn
)
7457 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7459 int arg_mode
, regno
;
7460 rtx use
= XEXP (link
, 0);
7462 if (! (GET_CODE (use
) == USE
7463 && GET_CODE (XEXP (use
, 0)) == REG
7464 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7467 arg_mode
= GET_MODE (XEXP (use
, 0));
7468 regno
= REGNO (XEXP (use
, 0));
7470 /* Is it a floating point register? */
7471 if (regno
>= 32 && regno
<= 39)
7473 /* Copy the FP register into an integer register via memory. */
7474 if (arg_mode
== SFmode
)
7476 xoperands
[0] = XEXP (use
, 0);
7477 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7478 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7479 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7483 xoperands
[0] = XEXP (use
, 0);
7484 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7485 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7486 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7487 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7493 /* Compute length of the FP argument copy sequence for INSN. */
7495 length_fp_args (rtx_insn
*insn
)
7500 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7502 int arg_mode
, regno
;
7503 rtx use
= XEXP (link
, 0);
7505 if (! (GET_CODE (use
) == USE
7506 && GET_CODE (XEXP (use
, 0)) == REG
7507 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7510 arg_mode
= GET_MODE (XEXP (use
, 0));
7511 regno
= REGNO (XEXP (use
, 0));
7513 /* Is it a floating point register? */
7514 if (regno
>= 32 && regno
<= 39)
7516 if (arg_mode
== SFmode
)
7526 /* Return the attribute length for the millicode call instruction INSN.
7527 The length must match the code generated by pa_output_millicode_call.
7528 We include the delay slot in the returned length as it is better to
7529 over estimate the length than to under estimate it. */
7532 pa_attr_length_millicode_call (rtx_insn
*insn
)
7534 unsigned long distance
= -1;
7535 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7537 if (INSN_ADDRESSES_SET_P ())
7539 distance
= (total
+ insn_current_reference_address (insn
));
7540 if (distance
< total
)
7546 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7551 else if (TARGET_PORTABLE_RUNTIME
)
7555 if (!TARGET_LONG_CALLS
&& distance
< MAX_PCREL17F_OFFSET
)
7565 /* INSN is a function call.
7567 CALL_DEST is the routine we are calling. */
7570 pa_output_millicode_call (rtx_insn
*insn
, rtx call_dest
)
7572 int attr_length
= get_attr_length (insn
);
7573 int seq_length
= dbr_sequence_length ();
7576 xoperands
[0] = call_dest
;
7577 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7579 /* Handle the common case where we are sure that the branch will
7580 reach the beginning of the $CODE$ subspace. The within reach
7581 form of the $$sh_func_adrs call has a length of 28. Because it
7582 has an attribute type of sh_func_adrs, it never has a nonzero
7583 sequence length (i.e., the delay slot is never filled). */
7584 if (!TARGET_LONG_CALLS
7585 && (attr_length
== 8
7586 || (attr_length
== 28
7587 && get_attr_type (insn
) == TYPE_SH_FUNC_ADRS
)))
7589 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
7595 /* It might seem that one insn could be saved by accessing
7596 the millicode function using the linkage table. However,
7597 this doesn't work in shared libraries and other dynamically
7598 loaded objects. Using a pc-relative sequence also avoids
7599 problems related to the implicit use of the gp register. */
7600 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7604 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7605 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7609 xoperands
[1] = gen_label_rtx ();
7610 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7611 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7612 CODE_LABEL_NUMBER (xoperands
[1]));
7613 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7616 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7618 else if (TARGET_PORTABLE_RUNTIME
)
7620 /* Pure portable runtime doesn't allow be/ble; we also don't
7621 have PIC support in the assembler/linker, so this sequence
7624 /* Get the address of our target into %r1. */
7625 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7626 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7628 /* Get our return address into %r31. */
7629 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7630 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7632 /* Jump to our target address in %r1. */
7633 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7637 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7639 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7641 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7645 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7646 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
7648 if (TARGET_SOM
|| !TARGET_GAS
)
7650 /* The HP assembler can generate relocations for the
7651 difference of two symbols. GAS can do this for a
7652 millicode symbol but not an arbitrary external
7653 symbol when generating SOM output. */
7654 xoperands
[1] = gen_label_rtx ();
7655 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7656 CODE_LABEL_NUMBER (xoperands
[1]));
7657 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7658 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7662 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
7663 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7667 /* Jump to our target address in %r1. */
7668 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7672 if (seq_length
== 0)
7673 output_asm_insn ("nop", xoperands
);
7678 /* Return the attribute length of the call instruction INSN. The SIBCALL
7679 flag indicates whether INSN is a regular call or a sibling call. The
7680 length returned must be longer than the code actually generated by
7681 pa_output_call. Since branch shortening is done before delay branch
7682 sequencing, there is no way to determine whether or not the delay
7683 slot will be filled during branch shortening. Even when the delay
7684 slot is filled, we may have to add a nop if the delay slot contains
7685 a branch that can't reach its target. Thus, we always have to include
7686 the delay slot in the length estimate. This used to be done in
7687 pa_adjust_insn_length but we do it here now as some sequences always
7688 fill the delay slot and we can save four bytes in the estimate for
7692 pa_attr_length_call (rtx_insn
*insn
, int sibcall
)
7695 rtx call
, call_dest
;
7698 rtx pat
= PATTERN (insn
);
7699 unsigned long distance
= -1;
7701 gcc_assert (CALL_P (insn
));
7703 if (INSN_ADDRESSES_SET_P ())
7705 unsigned long total
;
7707 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7708 distance
= (total
+ insn_current_reference_address (insn
));
7709 if (distance
< total
)
7713 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7715 /* Get the call rtx. */
7716 call
= XVECEXP (pat
, 0, 0);
7717 if (GET_CODE (call
) == SET
)
7718 call
= SET_SRC (call
);
7720 gcc_assert (GET_CODE (call
) == CALL
);
7722 /* Determine if this is a local call. */
7723 call_dest
= XEXP (XEXP (call
, 0), 0);
7724 call_decl
= SYMBOL_REF_DECL (call_dest
);
7725 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7727 /* pc-relative branch. */
7728 if (!TARGET_LONG_CALLS
7729 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7730 || distance
< MAX_PCREL17F_OFFSET
))
7733 /* 64-bit plabel sequence. */
7734 else if (TARGET_64BIT
&& !local_call
)
7735 length
+= sibcall
? 28 : 24;
7737 /* non-pic long absolute branch sequence. */
7738 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7741 /* long pc-relative branch sequence. */
7742 else if (TARGET_LONG_PIC_SDIFF_CALL
7743 || (TARGET_GAS
&& !TARGET_SOM
7744 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7748 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7752 /* 32-bit plabel sequence. */
7758 length
+= length_fp_args (insn
);
7768 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7776 /* INSN is a function call.
7778 CALL_DEST is the routine we are calling. */
7781 pa_output_call (rtx_insn
*insn
, rtx call_dest
, int sibcall
)
7783 int seq_length
= dbr_sequence_length ();
7784 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7785 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7788 xoperands
[0] = call_dest
;
7790 /* Handle the common case where we're sure that the branch will reach
7791 the beginning of the "$CODE$" subspace. This is the beginning of
7792 the current function if we are in a named section. */
7793 if (!TARGET_LONG_CALLS
&& pa_attr_length_call (insn
, sibcall
) == 8)
7795 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7796 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7800 if (TARGET_64BIT
&& !local_call
)
7802 /* ??? As far as I can tell, the HP linker doesn't support the
7803 long pc-relative sequence described in the 64-bit runtime
7804 architecture. So, we use a slightly longer indirect call. */
7805 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7806 xoperands
[1] = gen_label_rtx ();
7808 /* If this isn't a sibcall, we put the load of %r27 into the
7809 delay slot. We can't do this in a sibcall as we don't
7810 have a second call-clobbered scratch register available.
7811 We don't need to do anything when generating fast indirect
7813 if (seq_length
!= 0 && !sibcall
)
7815 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7818 /* Now delete the delay insn. */
7819 SET_INSN_DELETED (NEXT_INSN (insn
));
7823 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7824 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7825 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7829 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7830 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7831 output_asm_insn ("bve (%%r1)", xoperands
);
7835 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7836 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7837 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7843 int indirect_call
= 0;
7845 /* Emit a long call. There are several different sequences
7846 of increasing length and complexity. In most cases,
7847 they don't allow an instruction in the delay slot. */
7848 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7849 && !TARGET_LONG_PIC_SDIFF_CALL
7850 && !(TARGET_GAS
&& !TARGET_SOM
7851 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7859 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
7861 /* A non-jump insn in the delay slot. By definition we can
7862 emit this insn before the call (and in fact before argument
7864 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7867 /* Now delete the delay insn. */
7868 SET_INSN_DELETED (NEXT_INSN (insn
));
7872 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7874 /* This is the best sequence for making long calls in
7875 non-pic code. Unfortunately, GNU ld doesn't provide
7876 the stub needed for external calls, and GAS's support
7877 for this with the SOM linker is buggy. It is safe
7878 to use this for local calls. */
7879 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7881 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7885 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7888 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7890 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7896 if (TARGET_LONG_PIC_SDIFF_CALL
)
7898 /* The HP assembler and linker can handle relocations
7899 for the difference of two symbols. The HP assembler
7900 recognizes the sequence as a pc-relative call and
7901 the linker provides stubs when needed. */
7902 xoperands
[1] = gen_label_rtx ();
7903 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7904 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7905 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7906 CODE_LABEL_NUMBER (xoperands
[1]));
7907 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7909 else if (TARGET_GAS
&& !TARGET_SOM
7910 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7912 /* GAS currently can't generate the relocations that
7913 are needed for the SOM linker under HP-UX using this
7914 sequence. The GNU linker doesn't generate the stubs
7915 that are needed for external calls on TARGET_ELF32
7916 with this sequence. For now, we have to use a
7917 longer plabel sequence when using GAS. */
7918 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7919 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7921 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7926 /* Emit a long plabel-based call sequence. This is
7927 essentially an inline implementation of $$dyncall.
7928 We don't actually try to call $$dyncall as this is
7929 as difficult as calling the function itself. */
7930 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7931 xoperands
[1] = gen_label_rtx ();
7933 /* Since the call is indirect, FP arguments in registers
7934 need to be copied to the general registers. Then, the
7935 argument relocation stub will copy them back. */
7937 copy_fp_args (insn
);
7941 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7942 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7943 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7947 output_asm_insn ("addil LR'%0-$global$,%%r27",
7949 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7953 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7954 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7955 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7956 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7958 if (!sibcall
&& !TARGET_PA_20
)
7960 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7961 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7962 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7964 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7971 output_asm_insn ("bve (%%r1)", xoperands
);
7976 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7977 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7981 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7986 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7987 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7992 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7993 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
7995 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
7999 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8000 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
8002 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
8005 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
8007 output_asm_insn ("copy %%r31,%%r2", xoperands
);
8015 if (seq_length
== 0)
8016 output_asm_insn ("nop", xoperands
);
8021 /* Return the attribute length of the indirect call instruction INSN.
8022 The length must match the code generated by output_indirect call.
8023 The returned length includes the delay slot. Currently, the delay
8024 slot of an indirect call sequence is not exposed and it is used by
8025 the sequence itself. */
8028 pa_attr_length_indirect_call (rtx_insn
*insn
)
8030 unsigned long distance
= -1;
8031 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
8033 if (INSN_ADDRESSES_SET_P ())
8035 distance
= (total
+ insn_current_reference_address (insn
));
8036 if (distance
< total
)
8043 if (TARGET_FAST_INDIRECT_CALLS
8044 || (!TARGET_LONG_CALLS
8045 && !TARGET_PORTABLE_RUNTIME
8046 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
8047 || distance
< MAX_PCREL17F_OFFSET
)))
8053 if (TARGET_PORTABLE_RUNTIME
)
8056 /* Out of reach, can use ble. */
8061 pa_output_indirect_call (rtx_insn
*insn
, rtx call_dest
)
8067 xoperands
[0] = call_dest
;
8068 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
8069 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
8073 /* First the special case for kernels, level 0 systems, etc. */
8074 if (TARGET_FAST_INDIRECT_CALLS
)
8075 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8077 /* Now the normal case -- we can reach $$dyncall directly or
8078 we're sure that we can get there via a long-branch stub.
8080 No need to check target flags as the length uniquely identifies
8081 the remaining cases. */
8082 if (pa_attr_length_indirect_call (insn
) == 8)
8084 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8085 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8086 variant of the B,L instruction can't be used on the SOM target. */
8087 if (TARGET_PA_20
&& !TARGET_SOM
)
8088 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8090 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8093 /* Long millicode call, but we are not generating PIC or portable runtime
8095 if (pa_attr_length_indirect_call (insn
) == 12)
8096 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8098 /* Long millicode call for portable runtime. */
8099 if (pa_attr_length_indirect_call (insn
) == 16)
8100 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8102 /* We need a long PIC call to $$dyncall. */
8103 xoperands
[0] = NULL_RTX
;
8104 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
8105 if (TARGET_SOM
|| !TARGET_GAS
)
8107 xoperands
[0] = gen_label_rtx ();
8108 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands
);
8109 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8110 CODE_LABEL_NUMBER (xoperands
[0]));
8111 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
8115 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands
);
8116 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8119 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8120 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands
);
8124 /* In HPUX 8.0's shared library scheme, special relocations are needed
8125 for function labels if they might be passed to a function
8126 in a shared library (because shared libraries don't live in code
8127 space), and special magic is needed to construct their address. */
8130 pa_encode_label (rtx sym
)
8132 const char *str
= XSTR (sym
, 0);
8133 int len
= strlen (str
) + 1;
8136 p
= newstr
= XALLOCAVEC (char, len
+ 1);
8140 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
8144 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
8146 int old_referenced
= 0;
8148 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
8150 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
8152 default_encode_section_info (decl
, rtl
, first
);
8154 if (first
&& TEXT_SPACE_P (decl
))
8156 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
8157 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8158 pa_encode_label (XEXP (rtl
, 0));
8160 else if (old_referenced
)
8161 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
8164 /* This is sort of inverse to pa_encode_section_info. */
8167 pa_strip_name_encoding (const char *str
)
8169 str
+= (*str
== '@');
8170 str
+= (*str
== '*');
8174 /* Returns 1 if OP is a function label involved in a simple addition
8175 with a constant. Used to keep certain patterns from matching
8176 during instruction combination. */
8178 pa_is_function_label_plus_const (rtx op
)
8180 /* Strip off any CONST. */
8181 if (GET_CODE (op
) == CONST
)
8184 return (GET_CODE (op
) == PLUS
8185 && function_label_operand (XEXP (op
, 0), VOIDmode
)
8186 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8189 /* Output assembly code for a thunk to FUNCTION. */
8192 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8193 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
8196 static unsigned int current_thunk_number
;
8197 int val_14
= VAL_14_BITS_P (delta
);
8198 unsigned int old_last_address
= last_address
, nbytes
= 0;
8202 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8203 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8204 xoperands
[2] = GEN_INT (delta
);
8206 final_start_function (emit_barrier (), file
, 1);
8208 /* Output the thunk. We know that the function is in the same
8209 translation unit (i.e., the same space) as the thunk, and that
8210 thunks are output after their method. Thus, we don't need an
8211 external branch to reach the function. With SOM and GAS,
8212 functions and thunks are effectively in different sections.
8213 Thus, we can always use a IA-relative branch and the linker
8214 will add a long branch stub if necessary.
8216 However, we have to be careful when generating PIC code on the
8217 SOM port to ensure that the sequence does not transfer to an
8218 import stub for the target function as this could clobber the
8219 return value saved at SP-24. This would also apply to the
8220 32-bit linux port if the multi-space model is implemented. */
8221 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8222 && !(flag_pic
&& TREE_PUBLIC (function
))
8223 && (TARGET_GAS
|| last_address
< 262132))
8224 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8225 && ((targetm_common
.have_named_sections
8226 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8227 /* The GNU 64-bit linker has rather poor stub management.
8228 So, we use a long branch from thunks that aren't in
8229 the same section as the target function. */
8231 && (DECL_SECTION_NAME (thunk_fndecl
)
8232 != DECL_SECTION_NAME (function
)))
8233 || ((DECL_SECTION_NAME (thunk_fndecl
)
8234 == DECL_SECTION_NAME (function
))
8235 && last_address
< 262132)))
8236 /* In this case, we need to be able to reach the start of
8237 the stub table even though the function is likely closer
8238 and can be jumped to directly. */
8239 || (targetm_common
.have_named_sections
8240 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8241 && DECL_SECTION_NAME (function
) == NULL
8242 && total_code_bytes
< MAX_PCREL17F_OFFSET
)
8244 || (!targetm_common
.have_named_sections
8245 && total_code_bytes
< MAX_PCREL17F_OFFSET
))))
8248 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8250 output_asm_insn ("b %0", xoperands
);
8254 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8259 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8263 else if (TARGET_64BIT
)
8265 /* We only have one call-clobbered scratch register, so we can't
8266 make use of the delay slot if delta doesn't fit in 14 bits. */
8269 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8270 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8273 output_asm_insn ("b,l .+8,%%r1", xoperands
);
8277 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8278 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
8282 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
8283 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
8288 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8289 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8294 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8298 else if (TARGET_PORTABLE_RUNTIME
)
8300 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8301 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8304 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8306 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8310 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8315 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8319 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8321 /* The function is accessible from outside this module. The only
8322 way to avoid an import stub between the thunk and function is to
8323 call the function directly with an indirect sequence similar to
8324 that used by $$dyncall. This is possible because $$dyncall acts
8325 as the import stub in an indirect call. */
8326 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8327 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8328 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8329 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8330 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8331 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8332 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8333 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8334 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8338 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8344 output_asm_insn ("bve (%%r22)", xoperands
);
8347 else if (TARGET_NO_SPACE_REGS
)
8349 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8354 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8355 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8356 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8361 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8363 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8367 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8369 if (TARGET_SOM
|| !TARGET_GAS
)
8371 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
8372 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
8376 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8377 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
8381 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8383 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8387 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8392 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8399 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8401 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8402 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8406 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8411 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8416 final_end_function ();
8418 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8420 switch_to_section (data_section
);
8421 output_asm_insn (".align 4", xoperands
);
8422 ASM_OUTPUT_LABEL (file
, label
);
8423 output_asm_insn (".word P'%0", xoperands
);
8426 current_thunk_number
++;
8427 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8428 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8429 last_address
+= nbytes
;
8430 if (old_last_address
> last_address
)
8431 last_address
= UINT_MAX
;
8432 update_total_code_bytes (nbytes
);
8435 /* Only direct calls to static functions are allowed to be sibling (tail)
8438 This restriction is necessary because some linker generated stubs will
8439 store return pointers into rp' in some cases which might clobber a
8440 live value already in rp'.
8442 In a sibcall the current function and the target function share stack
8443 space. Thus if the path to the current function and the path to the
8444 target function save a value in rp', they save the value into the
8445 same stack slot, which has undesirable consequences.
8447 Because of the deferred binding nature of shared libraries any function
8448 with external scope could be in a different load module and thus require
8449 rp' to be saved when calling that function. So sibcall optimizations
8450 can only be safe for static function.
8452 Note that GCC never needs return value relocations, so we don't have to
8453 worry about static calls with return value relocations (which require
8456 It is safe to perform a sibcall optimization when the target function
8457 will never return. */
8459 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8461 if (TARGET_PORTABLE_RUNTIME
)
8464 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8465 single subspace mode and the call is not indirect. As far as I know,
8466 there is no operating system support for the multiple subspace mode.
8467 It might be possible to support indirect calls if we didn't use
8468 $$dyncall (see the indirect sequence generated in pa_output_call). */
8470 return (decl
!= NULL_TREE
);
8472 /* Sibcalls are not ok because the arg pointer register is not a fixed
8473 register. This prevents the sibcall optimization from occurring. In
8474 addition, there are problems with stub placement using GNU ld. This
8475 is because a normal sibcall branch uses a 17-bit relocation while
8476 a regular call branch uses a 22-bit relocation. As a result, more
8477 care needs to be taken in the placement of long-branch stubs. */
8481 /* Sibcalls are only ok within a translation unit. */
8482 return (decl
&& !TREE_PUBLIC (decl
));
8485 /* ??? Addition is not commutative on the PA due to the weird implicit
8486 space register selection rules for memory addresses. Therefore, we
8487 don't consider a + b == b + a, as this might be inside a MEM. */
8489 pa_commutative_p (const_rtx x
, int outer_code
)
8491 return (COMMUTATIVE_P (x
)
8492 && (TARGET_NO_SPACE_REGS
8493 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8494 || GET_CODE (x
) != PLUS
));
8497 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8498 use in fmpyadd instructions. */
8500 pa_fmpyaddoperands (rtx
*operands
)
8502 machine_mode mode
= GET_MODE (operands
[0]);
8504 /* Must be a floating point mode. */
8505 if (mode
!= SFmode
&& mode
!= DFmode
)
8508 /* All modes must be the same. */
8509 if (! (mode
== GET_MODE (operands
[1])
8510 && mode
== GET_MODE (operands
[2])
8511 && mode
== GET_MODE (operands
[3])
8512 && mode
== GET_MODE (operands
[4])
8513 && mode
== GET_MODE (operands
[5])))
8516 /* All operands must be registers. */
8517 if (! (GET_CODE (operands
[1]) == REG
8518 && GET_CODE (operands
[2]) == REG
8519 && GET_CODE (operands
[3]) == REG
8520 && GET_CODE (operands
[4]) == REG
8521 && GET_CODE (operands
[5]) == REG
))
8524 /* Only 2 real operands to the addition. One of the input operands must
8525 be the same as the output operand. */
8526 if (! rtx_equal_p (operands
[3], operands
[4])
8527 && ! rtx_equal_p (operands
[3], operands
[5]))
8530 /* Inout operand of add cannot conflict with any operands from multiply. */
8531 if (rtx_equal_p (operands
[3], operands
[0])
8532 || rtx_equal_p (operands
[3], operands
[1])
8533 || rtx_equal_p (operands
[3], operands
[2]))
8536 /* multiply cannot feed into addition operands. */
8537 if (rtx_equal_p (operands
[4], operands
[0])
8538 || rtx_equal_p (operands
[5], operands
[0]))
8541 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8543 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8544 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8545 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8546 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8547 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8548 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8551 /* Passed. Operands are suitable for fmpyadd. */
8555 #if !defined(USE_COLLECT2)
8557 pa_asm_out_constructor (rtx symbol
, int priority
)
8559 if (!function_label_operand (symbol
, VOIDmode
))
8560 pa_encode_label (symbol
);
8562 #ifdef CTORS_SECTION_ASM_OP
8563 default_ctor_section_asm_out_constructor (symbol
, priority
);
8565 # ifdef TARGET_ASM_NAMED_SECTION
8566 default_named_section_asm_out_constructor (symbol
, priority
);
8568 default_stabs_asm_out_constructor (symbol
, priority
);
8574 pa_asm_out_destructor (rtx symbol
, int priority
)
8576 if (!function_label_operand (symbol
, VOIDmode
))
8577 pa_encode_label (symbol
);
8579 #ifdef DTORS_SECTION_ASM_OP
8580 default_dtor_section_asm_out_destructor (symbol
, priority
);
8582 # ifdef TARGET_ASM_NAMED_SECTION
8583 default_named_section_asm_out_destructor (symbol
, priority
);
8585 default_stabs_asm_out_destructor (symbol
, priority
);
8591 /* This function places uninitialized global data in the bss section.
8592 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8593 function on the SOM port to prevent uninitialized global data from
8594 being placed in the data section. */
8597 pa_asm_output_aligned_bss (FILE *stream
,
8599 unsigned HOST_WIDE_INT size
,
8602 switch_to_section (bss_section
);
8603 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8605 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8606 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8609 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8610 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8613 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8614 ASM_OUTPUT_LABEL (stream
, name
);
8615 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8618 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8619 that doesn't allow the alignment of global common storage to be directly
8620 specified. The SOM linker aligns common storage based on the rounded
8621 value of the NUM_BYTES parameter in the .comm directive. It's not
8622 possible to use the .align directive as it doesn't affect the alignment
8623 of the label associated with a .comm directive. */
8626 pa_asm_output_aligned_common (FILE *stream
,
8628 unsigned HOST_WIDE_INT size
,
8631 unsigned int max_common_align
;
8633 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8634 if (align
> max_common_align
)
8636 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8637 "for global common data. Using %u",
8638 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8639 align
= max_common_align
;
8642 switch_to_section (bss_section
);
8644 assemble_name (stream
, name
);
8645 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8646 MAX (size
, align
/ BITS_PER_UNIT
));
8649 /* We can't use .comm for local common storage as the SOM linker effectively
8650 treats the symbol as universal and uses the same storage for local symbols
8651 with the same name in different object files. The .block directive
8652 reserves an uninitialized block of storage. However, it's not common
8653 storage. Fortunately, GCC never requests common storage with the same
8654 name in any given translation unit. */
8657 pa_asm_output_aligned_local (FILE *stream
,
8659 unsigned HOST_WIDE_INT size
,
8662 switch_to_section (bss_section
);
8663 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8666 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8667 assemble_name (stream
, name
);
8668 fprintf (stream
, "\n");
8671 ASM_OUTPUT_LABEL (stream
, name
);
8672 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8675 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8676 use in fmpysub instructions. */
8678 pa_fmpysuboperands (rtx
*operands
)
8680 machine_mode mode
= GET_MODE (operands
[0]);
8682 /* Must be a floating point mode. */
8683 if (mode
!= SFmode
&& mode
!= DFmode
)
8686 /* All modes must be the same. */
8687 if (! (mode
== GET_MODE (operands
[1])
8688 && mode
== GET_MODE (operands
[2])
8689 && mode
== GET_MODE (operands
[3])
8690 && mode
== GET_MODE (operands
[4])
8691 && mode
== GET_MODE (operands
[5])))
8694 /* All operands must be registers. */
8695 if (! (GET_CODE (operands
[1]) == REG
8696 && GET_CODE (operands
[2]) == REG
8697 && GET_CODE (operands
[3]) == REG
8698 && GET_CODE (operands
[4]) == REG
8699 && GET_CODE (operands
[5]) == REG
))
8702 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8703 operation, so operands[4] must be the same as operand[3]. */
8704 if (! rtx_equal_p (operands
[3], operands
[4]))
8707 /* multiply cannot feed into subtraction. */
8708 if (rtx_equal_p (operands
[5], operands
[0]))
8711 /* Inout operand of sub cannot conflict with any operands from multiply. */
8712 if (rtx_equal_p (operands
[3], operands
[0])
8713 || rtx_equal_p (operands
[3], operands
[1])
8714 || rtx_equal_p (operands
[3], operands
[2]))
8717 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8719 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8720 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8721 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8722 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8723 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8724 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8727 /* Passed. Operands are suitable for fmpysub. */
8731 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8732 constants for shadd instructions. */
8734 pa_shadd_constant_p (int val
)
8736 if (val
== 2 || val
== 4 || val
== 8)
8742 /* Return TRUE if INSN branches forward. */
8745 forward_branch_p (rtx_insn
*insn
)
8747 rtx lab
= JUMP_LABEL (insn
);
8749 /* The INSN must have a jump label. */
8750 gcc_assert (lab
!= NULL_RTX
);
8752 if (INSN_ADDRESSES_SET_P ())
8753 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
8760 insn
= NEXT_INSN (insn
);
8766 /* Output an unconditional move and branch insn. */
8769 pa_output_parallel_movb (rtx
*operands
, rtx_insn
*insn
)
8771 int length
= get_attr_length (insn
);
8773 /* These are the cases in which we win. */
8775 return "mov%I1b,tr %1,%0,%2";
8777 /* None of the following cases win, but they don't lose either. */
8780 if (dbr_sequence_length () == 0)
8782 /* Nothing in the delay slot, fake it by putting the combined
8783 insn (the copy or add) in the delay slot of a bl. */
8784 if (GET_CODE (operands
[1]) == CONST_INT
)
8785 return "b %2\n\tldi %1,%0";
8787 return "b %2\n\tcopy %1,%0";
8791 /* Something in the delay slot, but we've got a long branch. */
8792 if (GET_CODE (operands
[1]) == CONST_INT
)
8793 return "ldi %1,%0\n\tb %2";
8795 return "copy %1,%0\n\tb %2";
8799 if (GET_CODE (operands
[1]) == CONST_INT
)
8800 output_asm_insn ("ldi %1,%0", operands
);
8802 output_asm_insn ("copy %1,%0", operands
);
8803 return pa_output_lbranch (operands
[2], insn
, 1);
8806 /* Output an unconditional add and branch insn. */
8809 pa_output_parallel_addb (rtx
*operands
, rtx_insn
*insn
)
8811 int length
= get_attr_length (insn
);
8813 /* To make life easy we want operand0 to be the shared input/output
8814 operand and operand1 to be the readonly operand. */
8815 if (operands
[0] == operands
[1])
8816 operands
[1] = operands
[2];
8818 /* These are the cases in which we win. */
8820 return "add%I1b,tr %1,%0,%3";
8822 /* None of the following cases win, but they don't lose either. */
8825 if (dbr_sequence_length () == 0)
8826 /* Nothing in the delay slot, fake it by putting the combined
8827 insn (the copy or add) in the delay slot of a bl. */
8828 return "b %3\n\tadd%I1 %1,%0,%0";
8830 /* Something in the delay slot, but we've got a long branch. */
8831 return "add%I1 %1,%0,%0\n\tb %3";
8834 output_asm_insn ("add%I1 %1,%0,%0", operands
);
8835 return pa_output_lbranch (operands
[3], insn
, 1);
8838 /* We use this hook to perform a PA specific optimization which is difficult
8839 to do in earlier passes. */
8844 remove_useless_addtr_insns (1);
8846 if (pa_cpu
< PROCESSOR_8000
)
8847 pa_combine_instructions ();
8850 /* The PA has a number of odd instructions which can perform multiple
8851 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8852 it may be profitable to combine two instructions into one instruction
8853 with two outputs. It's not profitable PA2.0 machines because the
8854 two outputs would take two slots in the reorder buffers.
8856 This routine finds instructions which can be combined and combines
8857 them. We only support some of the potential combinations, and we
8858 only try common ways to find suitable instructions.
8860 * addb can add two registers or a register and a small integer
8861 and jump to a nearby (+-8k) location. Normally the jump to the
8862 nearby location is conditional on the result of the add, but by
8863 using the "true" condition we can make the jump unconditional.
8864 Thus addb can perform two independent operations in one insn.
8866 * movb is similar to addb in that it can perform a reg->reg
8867 or small immediate->reg copy and jump to a nearby (+-8k location).
8869 * fmpyadd and fmpysub can perform a FP multiply and either an
8870 FP add or FP sub if the operands of the multiply and add/sub are
8871 independent (there are other minor restrictions). Note both
8872 the fmpy and fadd/fsub can in theory move to better spots according
8873 to data dependencies, but for now we require the fmpy stay at a
8876 * Many of the memory operations can perform pre & post updates
8877 of index registers. GCC's pre/post increment/decrement addressing
8878 is far too simple to take advantage of all the possibilities. This
8879 pass may not be suitable since those insns may not be independent.
8881 * comclr can compare two ints or an int and a register, nullify
8882 the following instruction and zero some other register. This
8883 is more difficult to use as it's harder to find an insn which
8884 will generate a comclr than finding something like an unconditional
8885 branch. (conditional moves & long branches create comclr insns).
8887 * Most arithmetic operations can conditionally skip the next
8888 instruction. They can be viewed as "perform this operation
8889 and conditionally jump to this nearby location" (where nearby
8890 is an insns away). These are difficult to use due to the
8891 branch length restrictions. */
8894 pa_combine_instructions (void)
8898 /* This can get expensive since the basic algorithm is on the
8899 order of O(n^2) (or worse). Only do it for -O2 or higher
8900 levels of optimization. */
8904 /* Walk down the list of insns looking for "anchor" insns which
8905 may be combined with "floating" insns. As the name implies,
8906 "anchor" instructions don't move, while "floating" insns may
8908 rtx par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
8909 rtx_insn
*new_rtx
= make_insn_raw (par
);
8911 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
8913 enum attr_pa_combine_type anchor_attr
;
8914 enum attr_pa_combine_type floater_attr
;
8916 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8917 Also ignore any special USE insns. */
8918 if ((! NONJUMP_INSN_P (anchor
) && ! JUMP_P (anchor
) && ! CALL_P (anchor
))
8919 || GET_CODE (PATTERN (anchor
)) == USE
8920 || GET_CODE (PATTERN (anchor
)) == CLOBBER
)
8923 anchor_attr
= get_attr_pa_combine_type (anchor
);
8924 /* See if anchor is an insn suitable for combination. */
8925 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
8926 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8927 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
8928 && ! forward_branch_p (anchor
)))
8932 for (floater
= PREV_INSN (anchor
);
8934 floater
= PREV_INSN (floater
))
8936 if (NOTE_P (floater
)
8937 || (NONJUMP_INSN_P (floater
)
8938 && (GET_CODE (PATTERN (floater
)) == USE
8939 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
8942 /* Anything except a regular INSN will stop our search. */
8943 if (! NONJUMP_INSN_P (floater
))
8949 /* See if FLOATER is suitable for combination with the
8951 floater_attr
= get_attr_pa_combine_type (floater
);
8952 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
8953 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
8954 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8955 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
8957 /* If ANCHOR and FLOATER can be combined, then we're
8958 done with this pass. */
8959 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
8960 SET_DEST (PATTERN (floater
)),
8961 XEXP (SET_SRC (PATTERN (floater
)), 0),
8962 XEXP (SET_SRC (PATTERN (floater
)), 1)))
8966 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
8967 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
8969 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
8971 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
8972 SET_DEST (PATTERN (floater
)),
8973 XEXP (SET_SRC (PATTERN (floater
)), 0),
8974 XEXP (SET_SRC (PATTERN (floater
)), 1)))
8979 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
8980 SET_DEST (PATTERN (floater
)),
8981 SET_SRC (PATTERN (floater
)),
8982 SET_SRC (PATTERN (floater
))))
8988 /* If we didn't find anything on the backwards scan try forwards. */
8990 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
8991 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
8993 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
8995 if (NOTE_P (floater
)
8996 || (NONJUMP_INSN_P (floater
)
8997 && (GET_CODE (PATTERN (floater
)) == USE
8998 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9002 /* Anything except a regular INSN will stop our search. */
9003 if (! NONJUMP_INSN_P (floater
))
9009 /* See if FLOATER is suitable for combination with the
9011 floater_attr
= get_attr_pa_combine_type (floater
);
9012 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9013 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9014 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9015 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9017 /* If ANCHOR and FLOATER can be combined, then we're
9018 done with this pass. */
9019 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9020 SET_DEST (PATTERN (floater
)),
9021 XEXP (SET_SRC (PATTERN (floater
)),
9023 XEXP (SET_SRC (PATTERN (floater
)),
9030 /* FLOATER will be nonzero if we found a suitable floating
9031 insn for combination with ANCHOR. */
9033 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9034 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9036 /* Emit the new instruction and delete the old anchor. */
9037 emit_insn_before (gen_rtx_PARALLEL
9039 gen_rtvec (2, PATTERN (anchor
),
9040 PATTERN (floater
))),
9043 SET_INSN_DELETED (anchor
);
9045 /* Emit a special USE insn for FLOATER, then delete
9046 the floating insn. */
9047 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9048 delete_insn (floater
);
9053 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9056 /* Emit the new_jump instruction and delete the old anchor. */
9058 = emit_jump_insn_before (gen_rtx_PARALLEL
9060 gen_rtvec (2, PATTERN (anchor
),
9061 PATTERN (floater
))),
9064 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9065 SET_INSN_DELETED (anchor
);
9067 /* Emit a special USE insn for FLOATER, then delete
9068 the floating insn. */
9069 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9070 delete_insn (floater
);
9078 pa_can_combine_p (rtx_insn
*new_rtx
, rtx_insn
*anchor
, rtx_insn
*floater
,
9079 int reversed
, rtx dest
,
9082 int insn_code_number
;
9083 rtx_insn
*start
, *end
;
9085 /* Create a PARALLEL with the patterns of ANCHOR and
9086 FLOATER, try to recognize it, then test constraints
9087 for the resulting pattern.
9089 If the pattern doesn't match or the constraints
9090 aren't met keep searching for a suitable floater
9092 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9093 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9094 INSN_CODE (new_rtx
) = -1;
9095 insn_code_number
= recog_memoized (new_rtx
);
9096 basic_block bb
= BLOCK_FOR_INSN (anchor
);
9097 if (insn_code_number
< 0
9098 || (extract_insn (new_rtx
),
9099 !constrain_operands (1, get_preferred_alternatives (new_rtx
, bb
))))
9113 /* There's up to three operands to consider. One
9114 output and two inputs.
9116 The output must not be used between FLOATER & ANCHOR
9117 exclusive. The inputs must not be set between
9118 FLOATER and ANCHOR exclusive. */
9120 if (reg_used_between_p (dest
, start
, end
))
9123 if (reg_set_between_p (src1
, start
, end
))
9126 if (reg_set_between_p (src2
, start
, end
))
9129 /* If we get here, then everything is good. */
9133 /* Return nonzero if references for INSN are delayed.
9135 Millicode insns are actually function calls with some special
9136 constraints on arguments and register usage.
9138 Millicode calls always expect their arguments in the integer argument
9139 registers, and always return their result in %r29 (ret1). They
9140 are expected to clobber their arguments, %r1, %r29, and the return
9141 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9143 This function tells reorg that the references to arguments and
9144 millicode calls do not appear to happen until after the millicode call.
9145 This allows reorg to put insns which set the argument registers into the
9146 delay slot of the millicode call -- thus they act more like traditional
9149 Note we cannot consider side effects of the insn to be delayed because
9150 the branch and link insn will clobber the return pointer. If we happened
9151 to use the return pointer in the delay slot of the call, then we lose.
9153 get_attr_type will try to recognize the given insn, so make sure to
9154 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9157 pa_insn_refs_are_delayed (rtx_insn
*insn
)
9159 return ((NONJUMP_INSN_P (insn
)
9160 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9161 && GET_CODE (PATTERN (insn
)) != USE
9162 && GET_CODE (PATTERN (insn
)) != CLOBBER
9163 && get_attr_type (insn
) == TYPE_MILLI
));
9166 /* Promote the return value, but not the arguments. */
9169 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9171 int *punsignedp ATTRIBUTE_UNUSED
,
9172 const_tree fntype ATTRIBUTE_UNUSED
,
9175 if (for_return
== 0)
9177 return promote_mode (type
, mode
, punsignedp
);
9180 /* On the HP-PA the value is found in register(s) 28(-29), unless
9181 the mode is SF or DF. Then the value is returned in fr4 (32).
9183 This must perform the same promotions as PROMOTE_MODE, else promoting
9184 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9186 Small structures must be returned in a PARALLEL on PA64 in order
9187 to match the HP Compiler ABI. */
9190 pa_function_value (const_tree valtype
,
9191 const_tree func ATTRIBUTE_UNUSED
,
9192 bool outgoing ATTRIBUTE_UNUSED
)
9194 machine_mode valmode
;
9196 if (AGGREGATE_TYPE_P (valtype
)
9197 || TREE_CODE (valtype
) == COMPLEX_TYPE
9198 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9200 HOST_WIDE_INT valsize
= int_size_in_bytes (valtype
);
9202 /* Handle aggregates that fit exactly in a word or double word. */
9203 if ((valsize
& (UNITS_PER_WORD
- 1)) == 0)
9204 return gen_rtx_REG (TYPE_MODE (valtype
), 28);
9208 /* Aggregates with a size less than or equal to 128 bits are
9209 returned in GR 28(-29). They are left justified. The pad
9210 bits are undefined. Larger aggregates are returned in
9214 int ub
= valsize
<= UNITS_PER_WORD
? 1 : 2;
9216 for (i
= 0; i
< ub
; i
++)
9218 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9219 gen_rtx_REG (DImode
, 28 + i
),
9224 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9226 else if (valsize
> UNITS_PER_WORD
)
9228 /* Aggregates 5 to 8 bytes in size are returned in general
9229 registers r28-r29 in the same manner as other non
9230 floating-point objects. The data is right-justified and
9231 zero-extended to 64 bits. This is opposite to the normal
9232 justification used on big endian targets and requires
9233 special treatment. */
9234 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9235 gen_rtx_REG (DImode
, 28), const0_rtx
);
9236 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9240 if ((INTEGRAL_TYPE_P (valtype
)
9241 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9242 || POINTER_TYPE_P (valtype
))
9243 valmode
= word_mode
;
9245 valmode
= TYPE_MODE (valtype
);
9247 if (TREE_CODE (valtype
) == REAL_TYPE
9248 && !AGGREGATE_TYPE_P (valtype
)
9249 && TYPE_MODE (valtype
) != TFmode
9250 && !TARGET_SOFT_FLOAT
)
9251 return gen_rtx_REG (valmode
, 32);
9253 return gen_rtx_REG (valmode
, 28);
9256 /* Implement the TARGET_LIBCALL_VALUE hook. */
9259 pa_libcall_value (machine_mode mode
,
9260 const_rtx fun ATTRIBUTE_UNUSED
)
9262 if (! TARGET_SOFT_FLOAT
9263 && (mode
== SFmode
|| mode
== DFmode
))
9264 return gen_rtx_REG (mode
, 32);
9266 return gen_rtx_REG (mode
, 28);
9269 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9272 pa_function_value_regno_p (const unsigned int regno
)
9275 || (! TARGET_SOFT_FLOAT
&& regno
== 32))
9281 /* Update the data in CUM to advance over an argument
9282 of mode MODE and data type TYPE.
9283 (TYPE is null for libcalls where that information may not be available.) */
9286 pa_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
9287 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9289 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9290 int arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9292 cum
->nargs_prototype
--;
9293 cum
->words
+= (arg_size
9294 + ((cum
->words
& 01)
9295 && type
!= NULL_TREE
9299 /* Return the location of a parameter that is passed in a register or NULL
9300 if the parameter has any component that is passed in memory.
9302 This is new code and will be pushed to into the net sources after
9305 ??? We might want to restructure this so that it looks more like other
9308 pa_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
9309 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9311 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9312 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9319 if (mode
== VOIDmode
)
9322 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9324 /* If this arg would be passed partially or totally on the stack, then
9325 this routine should return zero. pa_arg_partial_bytes will
9326 handle arguments which are split between regs and stack slots if
9327 the ABI mandates split arguments. */
9330 /* The 32-bit ABI does not split arguments. */
9331 if (cum
->words
+ arg_size
> max_arg_words
)
9337 alignment
= cum
->words
& 1;
9338 if (cum
->words
+ alignment
>= max_arg_words
)
9342 /* The 32bit ABIs and the 64bit ABIs are rather different,
9343 particularly in their handling of FP registers. We might
9344 be able to cleverly share code between them, but I'm not
9345 going to bother in the hope that splitting them up results
9346 in code that is more easily understood. */
9350 /* Advance the base registers to their current locations.
9352 Remember, gprs grow towards smaller register numbers while
9353 fprs grow to higher register numbers. Also remember that
9354 although FP regs are 32-bit addressable, we pretend that
9355 the registers are 64-bits wide. */
9356 gpr_reg_base
= 26 - cum
->words
;
9357 fpr_reg_base
= 32 + cum
->words
;
9359 /* Arguments wider than one word and small aggregates need special
9363 || (type
&& (AGGREGATE_TYPE_P (type
)
9364 || TREE_CODE (type
) == COMPLEX_TYPE
9365 || TREE_CODE (type
) == VECTOR_TYPE
)))
9367 /* Double-extended precision (80-bit), quad-precision (128-bit)
9368 and aggregates including complex numbers are aligned on
9369 128-bit boundaries. The first eight 64-bit argument slots
9370 are associated one-to-one, with general registers r26
9371 through r19, and also with floating-point registers fr4
9372 through fr11. Arguments larger than one word are always
9373 passed in general registers.
9375 Using a PARALLEL with a word mode register results in left
9376 justified data on a big-endian target. */
9379 int i
, offset
= 0, ub
= arg_size
;
9381 /* Align the base register. */
9382 gpr_reg_base
-= alignment
;
9384 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9385 for (i
= 0; i
< ub
; i
++)
9387 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9388 gen_rtx_REG (DImode
, gpr_reg_base
),
9394 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9399 /* If the argument is larger than a word, then we know precisely
9400 which registers we must use. */
9414 /* Structures 5 to 8 bytes in size are passed in the general
9415 registers in the same manner as other non floating-point
9416 objects. The data is right-justified and zero-extended
9417 to 64 bits. This is opposite to the normal justification
9418 used on big endian targets and requires special treatment.
9419 We now define BLOCK_REG_PADDING to pad these objects.
9420 Aggregates, complex and vector types are passed in the same
9421 manner as structures. */
9423 || (type
&& (AGGREGATE_TYPE_P (type
)
9424 || TREE_CODE (type
) == COMPLEX_TYPE
9425 || TREE_CODE (type
) == VECTOR_TYPE
)))
9427 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9428 gen_rtx_REG (DImode
, gpr_reg_base
),
9430 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9435 /* We have a single word (32 bits). A simple computation
9436 will get us the register #s we need. */
9437 gpr_reg_base
= 26 - cum
->words
;
9438 fpr_reg_base
= 32 + 2 * cum
->words
;
9442 /* Determine if the argument needs to be passed in both general and
9443 floating point registers. */
9444 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9445 /* If we are doing soft-float with portable runtime, then there
9446 is no need to worry about FP regs. */
9447 && !TARGET_SOFT_FLOAT
9448 /* The parameter must be some kind of scalar float, else we just
9449 pass it in integer registers. */
9450 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9451 /* The target function must not have a prototype. */
9452 && cum
->nargs_prototype
<= 0
9453 /* libcalls do not need to pass items in both FP and general
9455 && type
!= NULL_TREE
9456 /* All this hair applies to "outgoing" args only. This includes
9457 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9459 /* Also pass outgoing floating arguments in both registers in indirect
9460 calls with the 32 bit ABI and the HP assembler since there is no
9461 way to the specify argument locations in static functions. */
9466 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9472 gen_rtx_EXPR_LIST (VOIDmode
,
9473 gen_rtx_REG (mode
, fpr_reg_base
),
9475 gen_rtx_EXPR_LIST (VOIDmode
,
9476 gen_rtx_REG (mode
, gpr_reg_base
),
9481 /* See if we should pass this parameter in a general register. */
9482 if (TARGET_SOFT_FLOAT
9483 /* Indirect calls in the normal 32bit ABI require all arguments
9484 to be passed in general registers. */
9485 || (!TARGET_PORTABLE_RUNTIME
9489 /* If the parameter is not a scalar floating-point parameter,
9490 then it belongs in GPRs. */
9491 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9492 /* Structure with single SFmode field belongs in GPR. */
9493 || (type
&& AGGREGATE_TYPE_P (type
)))
9494 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9496 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9501 /* Arguments larger than one word are double word aligned. */
9504 pa_function_arg_boundary (machine_mode mode
, const_tree type
)
9506 bool singleword
= (type
9507 ? (integer_zerop (TYPE_SIZE (type
))
9508 || !TREE_CONSTANT (TYPE_SIZE (type
))
9509 || int_size_in_bytes (type
) <= UNITS_PER_WORD
)
9510 : GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
);
9512 return singleword
? PARM_BOUNDARY
: MAX_PARM_BOUNDARY
;
9515 /* If this arg would be passed totally in registers or totally on the stack,
9516 then this routine should return zero. */
9519 pa_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
9520 tree type
, bool named ATTRIBUTE_UNUSED
)
9522 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9523 unsigned int max_arg_words
= 8;
9524 unsigned int offset
= 0;
9529 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9532 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9533 /* Arg fits fully into registers. */
9535 else if (cum
->words
+ offset
>= max_arg_words
)
9536 /* Arg fully on the stack. */
9540 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9544 /* A get_unnamed_section callback for switching to the text section.
9546 This function is only used with SOM. Because we don't support
9547 named subspaces, we can only create a new subspace or switch back
9548 to the default text subspace. */
9551 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9553 gcc_assert (TARGET_SOM
);
9556 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
9558 /* We only want to emit a .nsubspa directive once at the
9559 start of the function. */
9560 cfun
->machine
->in_nsubspa
= 1;
9562 /* Create a new subspace for the text. This provides
9563 better stub placement and one-only functions. */
9565 && DECL_ONE_ONLY (cfun
->decl
)
9566 && !DECL_WEAK (cfun
->decl
))
9568 output_section_asm_op ("\t.SPACE $TEXT$\n"
9569 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9570 "ACCESS=44,SORT=24,COMDAT");
9576 /* There isn't a current function or the body of the current
9577 function has been completed. So, we are changing to the
9578 text section to output debugging information. Thus, we
9579 need to forget that we are in the text section so that
9580 varasm.c will call us when text_section is selected again. */
9581 gcc_assert (!cfun
|| !cfun
->machine
9582 || cfun
->machine
->in_nsubspa
== 2);
9585 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9588 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9591 /* A get_unnamed_section callback for switching to comdat data
9592 sections. This function is only used with SOM. */
9595 som_output_comdat_data_section_asm_op (const void *data
)
9598 output_section_asm_op (data
);
9601 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9604 pa_som_asm_init_sections (void)
9607 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
9609 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9610 is not being generated. */
9611 som_readonly_data_section
9612 = get_unnamed_section (0, output_section_asm_op
,
9613 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9615 /* When secondary definitions are not supported, SOM makes readonly
9616 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9618 som_one_only_readonly_data_section
9619 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
9621 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9622 "ACCESS=0x2c,SORT=16,COMDAT");
9625 /* When secondary definitions are not supported, SOM makes data one-only
9626 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9627 som_one_only_data_section
9628 = get_unnamed_section (SECTION_WRITE
,
9629 som_output_comdat_data_section_asm_op
,
9630 "\t.SPACE $PRIVATE$\n"
9631 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9632 "ACCESS=31,SORT=24,COMDAT");
9635 som_tm_clone_table_section
9636 = get_unnamed_section (0, output_section_asm_op
,
9637 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9639 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9640 which reference data within the $TEXT$ space (for example constant
9641 strings in the $LIT$ subspace).
9643 The assemblers (GAS and HP as) both have problems with handling
9644 the difference of two symbols which is the other correct way to
9645 reference constant data during PIC code generation.
9647 So, there's no way to reference constant data which is in the
9648 $TEXT$ space during PIC generation. Instead place all constant
9649 data into the $PRIVATE$ subspace (this reduces sharing, but it
9650 works correctly). */
9651 readonly_data_section
= flag_pic
? data_section
: som_readonly_data_section
;
9653 /* We must not have a reference to an external symbol defined in a
9654 shared library in a readonly section, else the SOM linker will
9657 So, we force exception information into the data section. */
9658 exception_section
= data_section
;
9661 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9664 pa_som_tm_clone_table_section (void)
9666 return som_tm_clone_table_section
;
9669 /* On hpux10, the linker will give an error if we have a reference
9670 in the read-only data section to a symbol defined in a shared
9671 library. Therefore, expressions that might require a reloc can
9672 not be placed in the read-only data section. */
9675 pa_select_section (tree exp
, int reloc
,
9676 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9678 if (TREE_CODE (exp
) == VAR_DECL
9679 && TREE_READONLY (exp
)
9680 && !TREE_THIS_VOLATILE (exp
)
9681 && DECL_INITIAL (exp
)
9682 && (DECL_INITIAL (exp
) == error_mark_node
9683 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9687 && DECL_ONE_ONLY (exp
)
9688 && !DECL_WEAK (exp
))
9689 return som_one_only_readonly_data_section
;
9691 return readonly_data_section
;
9693 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9694 return readonly_data_section
;
9696 && TREE_CODE (exp
) == VAR_DECL
9697 && DECL_ONE_ONLY (exp
)
9698 && !DECL_WEAK (exp
))
9699 return som_one_only_data_section
;
9701 return data_section
;
9704 /* Implement pa_reloc_rw_mask. */
9707 pa_reloc_rw_mask (void)
9709 /* We force (const (plus (symbol) (const_int))) to memory when the
9710 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9711 handle this construct in read-only memory and we want to avoid
9712 this for ELF. So, we always force an RTX needing relocation to
9713 the data section. */
9718 pa_globalize_label (FILE *stream
, const char *name
)
9720 /* We only handle DATA objects here, functions are globalized in
9721 ASM_DECLARE_FUNCTION_NAME. */
9722 if (! FUNCTION_NAME_P (name
))
9724 fputs ("\t.EXPORT ", stream
);
9725 assemble_name (stream
, name
);
9726 fputs (",DATA\n", stream
);
9730 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9733 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9734 int incoming ATTRIBUTE_UNUSED
)
9736 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9739 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9742 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
9744 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9745 PA64 ABI says that objects larger than 128 bits are returned in memory.
9746 Note, int_size_in_bytes can return -1 if the size of the object is
9747 variable or larger than the maximum value that can be expressed as
9748 a HOST_WIDE_INT. It can also return zero for an empty type. The
9749 simplest way to handle variable and empty types is to pass them in
9750 memory. This avoids problems in defining the boundaries of argument
9751 slots, allocating registers, etc. */
9752 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9753 || int_size_in_bytes (type
) <= 0);
9756 /* Structure to hold declaration and name of external symbols that are
9757 emitted by GCC. We generate a vector of these symbols and output them
9758 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9759 This avoids putting out names that are never really used. */
9761 typedef struct GTY(()) extern_symbol
9767 /* Define gc'd vector type for extern_symbol. */
9769 /* Vector of extern_symbol pointers. */
9770 static GTY(()) vec
<extern_symbol
, va_gc
> *extern_symbols
;
9772 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9773 /* Mark DECL (name NAME) as an external reference (assembler output
9774 file FILE). This saves the names to output at the end of the file
9775 if actually referenced. */
9778 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9780 gcc_assert (file
== asm_out_file
);
9781 extern_symbol p
= {decl
, name
};
9782 vec_safe_push (extern_symbols
, p
);
9785 /* Output text required at the end of an assembler file.
9786 This includes deferred plabels and .import directives for
9787 all external symbols that were actually referenced. */
9790 pa_hpux_file_end (void)
9795 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9796 output_deferred_profile_counters ();
9798 output_deferred_plabels ();
9800 for (i
= 0; vec_safe_iterate (extern_symbols
, i
, &p
); i
++)
9802 tree decl
= p
->decl
;
9804 if (!TREE_ASM_WRITTEN (decl
)
9805 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9806 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9809 vec_free (extern_symbols
);
9813 /* Return true if a change from mode FROM to mode TO for a register
9814 in register class RCLASS is invalid. */
9817 pa_cannot_change_mode_class (machine_mode from
, machine_mode to
,
9818 enum reg_class rclass
)
9823 /* Reject changes to/from complex and vector modes. */
9824 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
9825 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
9828 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
9831 /* There is no way to load QImode or HImode values directly from
9832 memory. SImode loads to the FP registers are not zero extended.
9833 On the 64-bit target, this conflicts with the definition of
9834 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9835 with different sizes in the floating-point registers. */
9836 if (MAYBE_FP_REG_CLASS_P (rclass
))
9839 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9840 in specific sets of registers. Thus, we cannot allow changing
9841 to a larger mode when it's larger than a word. */
9842 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
9843 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
9849 /* Returns TRUE if it is a good idea to tie two pseudo registers
9850 when one has mode MODE1 and one has mode MODE2.
9851 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9852 for any hard reg, then this must be FALSE for correct output.
9854 We should return FALSE for QImode and HImode because these modes
9855 are not ok in the floating-point registers. However, this prevents
9856 tieing these modes to SImode and DImode in the general registers.
9857 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9858 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9859 in the floating-point registers. */
9862 pa_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
9864 /* Don't tie modes in different classes. */
9865 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
9872 /* Length in units of the trampoline instruction code. */
9874 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9877 /* Output assembler code for a block containing the constant parts
9878 of a trampoline, leaving space for the variable parts.\
9880 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9881 and then branches to the specified routine.
9883 This code template is copied from text segment to stack location
9884 and then patched with pa_trampoline_init to contain valid values,
9885 and then entered as a subroutine.
9887 It is best to keep this as small as possible to avoid having to
9888 flush multiple lines in the cache. */
9891 pa_asm_trampoline_template (FILE *f
)
9895 fputs ("\tldw 36(%r22),%r21\n", f
);
9896 fputs ("\tbb,>=,n %r21,30,.+16\n", f
);
9897 if (ASSEMBLER_DIALECT
== 0)
9898 fputs ("\tdepi 0,31,2,%r21\n", f
);
9900 fputs ("\tdepwi 0,31,2,%r21\n", f
);
9901 fputs ("\tldw 4(%r21),%r19\n", f
);
9902 fputs ("\tldw 0(%r21),%r21\n", f
);
9905 fputs ("\tbve (%r21)\n", f
);
9906 fputs ("\tldw 40(%r22),%r29\n", f
);
9907 fputs ("\t.word 0\n", f
);
9908 fputs ("\t.word 0\n", f
);
9912 fputs ("\tldsid (%r21),%r1\n", f
);
9913 fputs ("\tmtsp %r1,%sr0\n", f
);
9914 fputs ("\tbe 0(%sr0,%r21)\n", f
);
9915 fputs ("\tldw 40(%r22),%r29\n", f
);
9917 fputs ("\t.word 0\n", f
);
9918 fputs ("\t.word 0\n", f
);
9919 fputs ("\t.word 0\n", f
);
9920 fputs ("\t.word 0\n", f
);
9924 fputs ("\t.dword 0\n", f
);
9925 fputs ("\t.dword 0\n", f
);
9926 fputs ("\t.dword 0\n", f
);
9927 fputs ("\t.dword 0\n", f
);
9928 fputs ("\tmfia %r31\n", f
);
9929 fputs ("\tldd 24(%r31),%r1\n", f
);
9930 fputs ("\tldd 24(%r1),%r27\n", f
);
9931 fputs ("\tldd 16(%r1),%r1\n", f
);
9932 fputs ("\tbve (%r1)\n", f
);
9933 fputs ("\tldd 32(%r31),%r31\n", f
);
9934 fputs ("\t.dword 0 ; fptr\n", f
);
9935 fputs ("\t.dword 0 ; static link\n", f
);
9939 /* Emit RTL insns to initialize the variable parts of a trampoline.
9940 FNADDR is an RTX for the address of the function's pure code.
9941 CXT is an RTX for the static chain value for the function.
9943 Move the function address to the trampoline template at offset 36.
9944 Move the static chain value to trampoline template at offset 40.
9945 Move the trampoline address to trampoline template at offset 44.
9946 Move r19 to trampoline template at offset 48. The latter two
9947 words create a plabel for the indirect call to the trampoline.
9949 A similar sequence is used for the 64-bit port but the plabel is
9950 at the beginning of the trampoline.
9952 Finally, the cache entries for the trampoline code are flushed.
9953 This is necessary to ensure that the trampoline instruction sequence
9954 is written to memory prior to any attempts at prefetching the code
9958 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
9960 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
9961 rtx start_addr
= gen_reg_rtx (Pmode
);
9962 rtx end_addr
= gen_reg_rtx (Pmode
);
9963 rtx line_length
= gen_reg_rtx (Pmode
);
9966 emit_block_move (m_tramp
, assemble_trampoline_template (),
9967 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
9968 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
9972 tmp
= adjust_address (m_tramp
, Pmode
, 36);
9973 emit_move_insn (tmp
, fnaddr
);
9974 tmp
= adjust_address (m_tramp
, Pmode
, 40);
9975 emit_move_insn (tmp
, chain_value
);
9977 /* Create a fat pointer for the trampoline. */
9978 tmp
= adjust_address (m_tramp
, Pmode
, 44);
9979 emit_move_insn (tmp
, r_tramp
);
9980 tmp
= adjust_address (m_tramp
, Pmode
, 48);
9981 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
9983 /* fdc and fic only use registers for the address to flush,
9984 they do not accept integer displacements. We align the
9985 start and end addresses to the beginning of their respective
9986 cache lines to minimize the number of lines flushed. */
9987 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
9988 GEN_INT (-MIN_CACHELINE_SIZE
)));
9989 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
,
9990 TRAMPOLINE_CODE_SIZE
-1));
9991 emit_insn (gen_andsi3 (end_addr
, tmp
,
9992 GEN_INT (-MIN_CACHELINE_SIZE
)));
9993 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
9994 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
9995 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
9996 gen_reg_rtx (Pmode
),
9997 gen_reg_rtx (Pmode
)));
10001 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10002 emit_move_insn (tmp
, fnaddr
);
10003 tmp
= adjust_address (m_tramp
, Pmode
, 64);
10004 emit_move_insn (tmp
, chain_value
);
10006 /* Create a fat pointer for the trampoline. */
10007 tmp
= adjust_address (m_tramp
, Pmode
, 16);
10008 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (Pmode
,
10010 tmp
= adjust_address (m_tramp
, Pmode
, 24);
10011 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
10013 /* fdc and fic only use registers for the address to flush,
10014 they do not accept integer displacements. We align the
10015 start and end addresses to the beginning of their respective
10016 cache lines to minimize the number of lines flushed. */
10017 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
, 32));
10018 emit_insn (gen_anddi3 (start_addr
, tmp
,
10019 GEN_INT (-MIN_CACHELINE_SIZE
)));
10020 tmp
= force_reg (Pmode
, plus_constant (Pmode
, tmp
,
10021 TRAMPOLINE_CODE_SIZE
- 1));
10022 emit_insn (gen_anddi3 (end_addr
, tmp
,
10023 GEN_INT (-MIN_CACHELINE_SIZE
)));
10024 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10025 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
10026 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
10027 gen_reg_rtx (Pmode
),
10028 gen_reg_rtx (Pmode
)));
10031 #ifdef HAVE_ENABLE_EXECUTE_STACK
10032 Â
emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
10033 Â Â Â Â LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
10037 /* Perform any machine-specific adjustment in the address of the trampoline.
10038 ADDR contains the address that was passed to pa_trampoline_init.
10039 Adjust the trampoline address to point to the plabel at offset 44. */
10042 pa_trampoline_adjust_address (rtx addr
)
10045 addr
= memory_address (Pmode
, plus_constant (Pmode
, addr
, 46));
10050 pa_delegitimize_address (rtx orig_x
)
10052 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10054 if (GET_CODE (x
) == LO_SUM
10055 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10056 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10057 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));
10062 pa_internal_arg_pointer (void)
10064 /* The argument pointer and the hard frame pointer are the same in
10065 the 32-bit runtime, so we don't need a copy. */
10067 return copy_to_reg (virtual_incoming_args_rtx
);
10069 return virtual_incoming_args_rtx
;
10072 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10073 Frame pointer elimination is automatically handled. */
10076 pa_can_eliminate (const int from
, const int to
)
10078 /* The argument cannot be eliminated in the 64-bit runtime. */
10079 if (TARGET_64BIT
&& from
== ARG_POINTER_REGNUM
)
10082 return (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
10083 ? ! frame_pointer_needed
10087 /* Define the offset between two registers, FROM to be eliminated and its
10088 replacement TO, at the start of a routine. */
10090 pa_initial_elimination_offset (int from
, int to
)
10092 HOST_WIDE_INT offset
;
10094 if ((from
== HARD_FRAME_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
)
10095 && to
== STACK_POINTER_REGNUM
)
10096 offset
= -pa_compute_frame_size (get_frame_size (), 0);
10097 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
10100 gcc_unreachable ();
10106 pa_conditional_register_usage (void)
10110 if (!TARGET_64BIT
&& !TARGET_PA_11
)
10112 for (i
= 56; i
<= FP_REG_LAST
; i
++)
10113 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10114 for (i
= 33; i
< 56; i
+= 2)
10115 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10117 if (TARGET_DISABLE_FPREGS
|| TARGET_SOFT_FLOAT
)
10119 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
10120 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10123 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10126 /* Target hook for c_mode_for_suffix. */
10128 static machine_mode
10129 pa_c_mode_for_suffix (char suffix
)
10131 if (HPUX_LONG_DOUBLE_LIBRARY
)
10140 /* Target hook for function_section. */
10143 pa_function_section (tree decl
, enum node_frequency freq
,
10144 bool startup
, bool exit
)
10146 /* Put functions in text section if target doesn't have named sections. */
10147 if (!targetm_common
.have_named_sections
)
10148 return text_section
;
10150 /* Force nested functions into the same section as the containing
10153 && DECL_SECTION_NAME (decl
) == NULL
10154 && DECL_CONTEXT (decl
) != NULL_TREE
10155 && TREE_CODE (DECL_CONTEXT (decl
)) == FUNCTION_DECL
10156 && DECL_SECTION_NAME (DECL_CONTEXT (decl
)) == NULL
)
10157 return function_section (DECL_CONTEXT (decl
));
10159 /* Otherwise, use the default function section. */
10160 return default_function_section (decl
, freq
, startup
, exit
);
10163 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10165 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10166 that need more than three instructions to load prior to reload. This
10167 limit is somewhat arbitrary. It takes three instructions to load a
10168 CONST_INT from memory but two are memory accesses. It may be better
10169 to increase the allowed range for CONST_INTS. We may also be able
10170 to handle CONST_DOUBLES. */
10173 pa_legitimate_constant_p (machine_mode mode
, rtx x
)
10175 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& x
!= CONST0_RTX (mode
))
10178 if (!NEW_HP_ASSEMBLER
&& !TARGET_GAS
&& GET_CODE (x
) == LABEL_REF
)
10181 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10182 legitimate constants. The other variants can't be handled by
10183 the move patterns after reload starts. */
10184 if (tls_referenced_p (x
))
10187 if (TARGET_64BIT
&& GET_CODE (x
) == CONST_DOUBLE
)
10191 && HOST_BITS_PER_WIDE_INT
> 32
10192 && GET_CODE (x
) == CONST_INT
10193 && !reload_in_progress
10194 && !reload_completed
10195 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x
))
10196 && !pa_cint_ok_for_move (INTVAL (x
)))
10199 if (function_label_operand (x
, mode
))
10205 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10207 static unsigned int
10208 pa_section_type_flags (tree decl
, const char *name
, int reloc
)
10210 unsigned int flags
;
10212 flags
= default_section_type_flags (decl
, name
, reloc
);
10214 /* Function labels are placed in the constant pool. This can
10215 cause a section conflict if decls are put in ".data.rel.ro"
10216 or ".data.rel.ro.local" using the __attribute__ construct. */
10217 if (strcmp (name
, ".data.rel.ro") == 0
10218 || strcmp (name
, ".data.rel.ro.local") == 0)
10219 flags
|= SECTION_WRITE
| SECTION_RELRO
;
10224 /* pa_legitimate_address_p recognizes an RTL expression that is a
10225 valid memory address for an instruction. The MODE argument is the
10226 machine mode for the MEM expression that wants to use this address.
10228 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10229 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10230 available with floating point loads and stores, and integer loads.
10231 We get better code by allowing indexed addresses in the initial
10234 The acceptance of indexed addresses as legitimate implies that we
10235 must provide patterns for doing indexed integer stores, or the move
10236 expanders must force the address of an indexed store to a register.
10237 We have adopted the latter approach.
10239 Another function of pa_legitimate_address_p is to ensure that
10240 the base register is a valid pointer for indexed instructions.
10241 On targets that have non-equivalent space registers, we have to
10242 know at the time of assembler output which register in a REG+REG
10243 pair is the base register. The REG_POINTER flag is sometimes lost
10244 in reload and the following passes, so it can't be relied on during
10245 code generation. Thus, we either have to canonicalize the order
10246 of the registers in REG+REG indexed addresses, or treat REG+REG
10247 addresses separately and provide patterns for both permutations.
10249 The latter approach requires several hundred additional lines of
10250 code in pa.md. The downside to canonicalizing is that a PLUS
10251 in the wrong order can't combine to form to make a scaled indexed
10252 memory operand. As we won't need to canonicalize the operands if
10253 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10255 We initially break out scaled indexed addresses in canonical order
10256 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10257 scaled indexed addresses during RTL generation. However, fold_rtx
10258 has its own opinion on how the operands of a PLUS should be ordered.
10259 If one of the operands is equivalent to a constant, it will make
10260 that operand the second operand. As the base register is likely to
10261 be equivalent to a SYMBOL_REF, we have made it the second operand.
10263 pa_legitimate_address_p accepts REG+REG as legitimate when the
10264 operands are in the order INDEX+BASE on targets with non-equivalent
10265 space registers, and in any order on targets with equivalent space
10266 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10268 We treat a SYMBOL_REF as legitimate if it is part of the current
10269 function's constant-pool, because such addresses can actually be
10270 output as REG+SMALLINT. */
10273 pa_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
10276 && (strict
? STRICT_REG_OK_FOR_BASE_P (x
)
10277 : REG_OK_FOR_BASE_P (x
)))
10278 || ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_DEC
10279 || GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == POST_INC
)
10280 && REG_P (XEXP (x
, 0))
10281 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10282 : REG_OK_FOR_BASE_P (XEXP (x
, 0)))))
10285 if (GET_CODE (x
) == PLUS
)
10289 /* For REG+REG, the base register should be in XEXP (x, 1),
10290 so check it first. */
10291 if (REG_P (XEXP (x
, 1))
10292 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 1))
10293 : REG_OK_FOR_BASE_P (XEXP (x
, 1))))
10294 base
= XEXP (x
, 1), index
= XEXP (x
, 0);
10295 else if (REG_P (XEXP (x
, 0))
10296 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10297 : REG_OK_FOR_BASE_P (XEXP (x
, 0))))
10298 base
= XEXP (x
, 0), index
= XEXP (x
, 1);
10302 if (GET_CODE (index
) == CONST_INT
)
10304 if (INT_5_BITS (index
))
10307 /* When INT14_OK_STRICT is false, a secondary reload is needed
10308 to adjust the displacement of SImode and DImode floating point
10309 instructions but this may fail when the register also needs
10310 reloading. So, we return false when STRICT is true. We
10311 also reject long displacements for float mode addresses since
10312 the majority of accesses will use floating point instructions
10313 that don't support 14-bit offsets. */
10314 if (!INT14_OK_STRICT
10315 && (strict
|| !(reload_in_progress
|| reload_completed
))
10320 return base14_operand (index
, mode
);
10323 if (!TARGET_DISABLE_INDEXING
10324 /* Only accept the "canonical" INDEX+BASE operand order
10325 on targets with non-equivalent space registers. */
10326 && (TARGET_NO_SPACE_REGS
10328 : (base
== XEXP (x
, 1) && REG_P (index
)
10329 && (reload_completed
10330 || (reload_in_progress
&& HARD_REGISTER_P (base
))
10331 || REG_POINTER (base
))
10332 && (reload_completed
10333 || (reload_in_progress
&& HARD_REGISTER_P (index
))
10334 || !REG_POINTER (index
))))
10335 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode
)
10336 && (strict
? STRICT_REG_OK_FOR_INDEX_P (index
)
10337 : REG_OK_FOR_INDEX_P (index
))
10338 && borx_reg_operand (base
, Pmode
)
10339 && borx_reg_operand (index
, Pmode
))
10342 if (!TARGET_DISABLE_INDEXING
10343 && GET_CODE (index
) == MULT
10344 && MODE_OK_FOR_SCALED_INDEXING_P (mode
)
10345 && REG_P (XEXP (index
, 0))
10346 && GET_MODE (XEXP (index
, 0)) == Pmode
10347 && (strict
? STRICT_REG_OK_FOR_INDEX_P (XEXP (index
, 0))
10348 : REG_OK_FOR_INDEX_P (XEXP (index
, 0)))
10349 && GET_CODE (XEXP (index
, 1)) == CONST_INT
10350 && INTVAL (XEXP (index
, 1))
10351 == (HOST_WIDE_INT
) GET_MODE_SIZE (mode
)
10352 && borx_reg_operand (base
, Pmode
))
10358 if (GET_CODE (x
) == LO_SUM
)
10360 rtx y
= XEXP (x
, 0);
10362 if (GET_CODE (y
) == SUBREG
)
10363 y
= SUBREG_REG (y
);
10366 && (strict
? STRICT_REG_OK_FOR_BASE_P (y
)
10367 : REG_OK_FOR_BASE_P (y
)))
10369 /* Needed for -fPIC */
10371 && GET_CODE (XEXP (x
, 1)) == UNSPEC
)
10374 if (!INT14_OK_STRICT
10375 && (strict
|| !(reload_in_progress
|| reload_completed
))
10380 if (CONSTANT_P (XEXP (x
, 1)))
10386 if (GET_CODE (x
) == CONST_INT
&& INT_5_BITS (x
))
10392 /* Look for machine dependent ways to make the invalid address AD a
10395 For the PA, transform:
10397 memory(X + <large int>)
10401 if (<large int> & mask) >= 16
10402 Y = (<large int> & ~mask) + mask + 1 Round up.
10404 Y = (<large int> & ~mask) Round down.
10406 memory (Z + (<large int> - Y));
10408 This makes reload inheritance and reload_cse work better since Z
10411 There may be more opportunities to improve code with this hook. */
10414 pa_legitimize_reload_address (rtx ad
, machine_mode mode
,
10415 int opnum
, int type
,
10416 int ind_levels ATTRIBUTE_UNUSED
)
10418 long offset
, newoffset
, mask
;
10419 rtx new_rtx
, temp
= NULL_RTX
;
10421 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
10422 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
10424 if (optimize
&& GET_CODE (ad
) == PLUS
)
10425 temp
= simplify_binary_operation (PLUS
, Pmode
,
10426 XEXP (ad
, 0), XEXP (ad
, 1));
10428 new_rtx
= temp
? temp
: ad
;
10431 && GET_CODE (new_rtx
) == PLUS
10432 && GET_CODE (XEXP (new_rtx
, 0)) == REG
10433 && GET_CODE (XEXP (new_rtx
, 1)) == CONST_INT
)
10435 offset
= INTVAL (XEXP ((new_rtx
), 1));
10437 /* Choose rounding direction. Round up if we are >= halfway. */
10438 if ((offset
& mask
) >= ((mask
+ 1) / 2))
10439 newoffset
= (offset
& ~mask
) + mask
+ 1;
10441 newoffset
= offset
& ~mask
;
10443 /* Ensure that long displacements are aligned. */
10445 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
10446 || (TARGET_64BIT
&& (mode
) == DImode
)))
10447 newoffset
&= ~(GET_MODE_SIZE (mode
) - 1);
10449 if (newoffset
!= 0 && VAL_14_BITS_P (newoffset
))
10451 temp
= gen_rtx_PLUS (Pmode
, XEXP (new_rtx
, 0),
10452 GEN_INT (newoffset
));
10453 ad
= gen_rtx_PLUS (Pmode
, temp
, GEN_INT (offset
- newoffset
));
10454 push_reload (XEXP (ad
, 0), 0, &XEXP (ad
, 0), 0,
10455 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
10456 opnum
, (enum reload_type
) type
);
10464 /* Output address vector. */
10467 pa_output_addr_vec (rtx lab
, rtx body
)
10469 int idx
, vlen
= XVECLEN (body
, 0);
10471 targetm
.asm_out
.internal_label (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
10473 fputs ("\t.begin_brtab\n", asm_out_file
);
10474 for (idx
= 0; idx
< vlen
; idx
++)
10476 ASM_OUTPUT_ADDR_VEC_ELT
10477 (asm_out_file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
10480 fputs ("\t.end_brtab\n", asm_out_file
);
10483 /* Output address difference vector. */
10486 pa_output_addr_diff_vec (rtx lab
, rtx body
)
10488 rtx base
= XEXP (XEXP (body
, 0), 0);
10489 int idx
, vlen
= XVECLEN (body
, 1);
10491 targetm
.asm_out
.internal_label (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
10493 fputs ("\t.begin_brtab\n", asm_out_file
);
10494 for (idx
= 0; idx
< vlen
; idx
++)
10496 ASM_OUTPUT_ADDR_DIFF_ELT
10499 CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 1, idx
), 0)),
10500 CODE_LABEL_NUMBER (base
));
10503 fputs ("\t.end_brtab\n", asm_out_file
);