1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
44 #include "diagnostic-core.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
56 /* Return nonzero if there is a bypass for the output of
57 OUT_INSN and the fp store IN_INSN. */
59 pa_fpstore_bypass_p (rtx out_insn
, rtx in_insn
)
61 enum machine_mode store_mode
;
62 enum machine_mode other_mode
;
65 if (recog_memoized (in_insn
) < 0
66 || (get_attr_type (in_insn
) != TYPE_FPSTORE
67 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
68 || recog_memoized (out_insn
) < 0)
71 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
73 set
= single_set (out_insn
);
77 other_mode
= GET_MODE (SET_SRC (set
));
79 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
83 #ifndef DO_FRAME_NOTES
84 #ifdef INCOMING_RETURN_ADDR_RTX
85 #define DO_FRAME_NOTES 1
87 #define DO_FRAME_NOTES 0
91 static void pa_option_override (void);
92 static void copy_reg_pointer (rtx
, rtx
);
93 static void fix_range (const char *);
94 static int hppa_register_move_cost (enum machine_mode mode
, reg_class_t
,
96 static int hppa_address_cost (rtx
, enum machine_mode mode
, addr_space_t
, bool);
97 static bool hppa_rtx_costs (rtx
, int, int, int, int *, bool);
98 static inline rtx
force_mode (enum machine_mode
, rtx
);
99 static void pa_reorg (void);
100 static void pa_combine_instructions (void);
101 static int pa_can_combine_p (rtx
, rtx
, rtx
, int, rtx
, rtx
, rtx
);
102 static bool forward_branch_p (rtx
);
103 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
104 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT
, unsigned *);
105 static int compute_movmem_length (rtx
);
106 static int compute_clrmem_length (rtx
);
107 static bool pa_assemble_integer (rtx
, unsigned int, int);
108 static void remove_useless_addtr_insns (int);
109 static void store_reg (int, HOST_WIDE_INT
, int);
110 static void store_reg_modify (int, int, HOST_WIDE_INT
);
111 static void load_reg (int, HOST_WIDE_INT
, int);
112 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
113 static rtx
pa_function_value (const_tree
, const_tree
, bool);
114 static rtx
pa_libcall_value (enum machine_mode
, const_rtx
);
115 static bool pa_function_value_regno_p (const unsigned int);
116 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
117 static void update_total_code_bytes (unsigned int);
118 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
119 static int pa_adjust_cost (rtx
, rtx
, rtx
, int);
120 static int pa_adjust_priority (rtx
, int);
121 static int pa_issue_rate (void);
122 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
123 static section
*pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED
;
124 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
126 static void pa_encode_section_info (tree
, rtx
, int);
127 static const char *pa_strip_name_encoding (const char *);
128 static bool pa_function_ok_for_sibcall (tree
, tree
);
129 static void pa_globalize_label (FILE *, const char *)
131 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
132 HOST_WIDE_INT
, tree
);
133 #if !defined(USE_COLLECT2)
134 static void pa_asm_out_constructor (rtx
, int);
135 static void pa_asm_out_destructor (rtx
, int);
137 static void pa_init_builtins (void);
138 static rtx
pa_expand_builtin (tree
, rtx
, rtx
, enum machine_mode mode
, int);
139 static rtx
hppa_builtin_saveregs (void);
140 static void hppa_va_start (tree
, rtx
);
141 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
142 static bool pa_scalar_mode_supported_p (enum machine_mode
);
143 static bool pa_commutative_p (const_rtx x
, int outer_code
);
144 static void copy_fp_args (rtx
) ATTRIBUTE_UNUSED
;
145 static int length_fp_args (rtx
) ATTRIBUTE_UNUSED
;
146 static rtx
hppa_legitimize_address (rtx
, rtx
, enum machine_mode
);
147 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
148 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
149 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
150 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
151 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
152 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
153 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
154 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
155 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
156 static void output_deferred_plabels (void);
157 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
158 #ifdef ASM_OUTPUT_EXTERNAL_REAL
159 static void pa_hpux_file_end (void);
161 static void pa_init_libfuncs (void);
162 static rtx
pa_struct_value_rtx (tree
, int);
163 static bool pa_pass_by_reference (cumulative_args_t
, enum machine_mode
,
165 static int pa_arg_partial_bytes (cumulative_args_t
, enum machine_mode
,
167 static void pa_function_arg_advance (cumulative_args_t
, enum machine_mode
,
169 static rtx
pa_function_arg (cumulative_args_t
, enum machine_mode
,
171 static unsigned int pa_function_arg_boundary (enum machine_mode
, const_tree
);
172 static struct machine_function
* pa_init_machine_status (void);
173 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
175 secondary_reload_info
*);
176 static void pa_extra_live_on_entry (bitmap
);
177 static enum machine_mode
pa_promote_function_mode (const_tree
,
178 enum machine_mode
, int *,
181 static void pa_asm_trampoline_template (FILE *);
182 static void pa_trampoline_init (rtx
, tree
, rtx
);
183 static rtx
pa_trampoline_adjust_address (rtx
);
184 static rtx
pa_delegitimize_address (rtx
);
185 static bool pa_print_operand_punct_valid_p (unsigned char);
186 static rtx
pa_internal_arg_pointer (void);
187 static bool pa_can_eliminate (const int, const int);
188 static void pa_conditional_register_usage (void);
189 static enum machine_mode
pa_c_mode_for_suffix (char);
190 static section
*pa_function_section (tree
, enum node_frequency
, bool, bool);
191 static bool pa_cannot_force_const_mem (enum machine_mode
, rtx
);
192 static bool pa_legitimate_constant_p (enum machine_mode
, rtx
);
193 static unsigned int pa_section_type_flags (tree
, const char *, int);
194 static bool pa_legitimate_address_p (enum machine_mode
, rtx
, bool);
196 /* The following extra sections are only used for SOM. */
197 static GTY(()) section
*som_readonly_data_section
;
198 static GTY(()) section
*som_one_only_readonly_data_section
;
199 static GTY(()) section
*som_one_only_data_section
;
200 static GTY(()) section
*som_tm_clone_table_section
;
202 /* Counts for the number of callee-saved general and floating point
203 registers which were saved by the current function's prologue. */
204 static int gr_saved
, fr_saved
;
206 /* Boolean indicating whether the return pointer was saved by the
207 current function's prologue. */
208 static bool rp_saved
;
210 static rtx
find_addr_reg (rtx
);
212 /* Keep track of the number of bytes we have output in the CODE subspace
213 during this compilation so we'll know when to emit inline long-calls. */
214 unsigned long total_code_bytes
;
216 /* The last address of the previous function plus the number of bytes in
217 associated thunks that have been output. This is used to determine if
218 a thunk can use an IA-relative branch to reach its target function. */
219 static unsigned int last_address
;
221 /* Variables to handle plabels that we discover are necessary at assembly
222 output time. They are output after the current function. */
223 struct GTY(()) deferred_plabel
228 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
230 static size_t n_deferred_plabels
= 0;
232 /* Initialize the GCC target structure. */
234 #undef TARGET_OPTION_OVERRIDE
235 #define TARGET_OPTION_OVERRIDE pa_option_override
237 #undef TARGET_ASM_ALIGNED_HI_OP
238 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
239 #undef TARGET_ASM_ALIGNED_SI_OP
240 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
241 #undef TARGET_ASM_ALIGNED_DI_OP
242 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
243 #undef TARGET_ASM_UNALIGNED_HI_OP
244 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
245 #undef TARGET_ASM_UNALIGNED_SI_OP
246 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
247 #undef TARGET_ASM_UNALIGNED_DI_OP
248 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
249 #undef TARGET_ASM_INTEGER
250 #define TARGET_ASM_INTEGER pa_assemble_integer
252 #undef TARGET_ASM_FUNCTION_PROLOGUE
253 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
254 #undef TARGET_ASM_FUNCTION_EPILOGUE
255 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
257 #undef TARGET_FUNCTION_VALUE
258 #define TARGET_FUNCTION_VALUE pa_function_value
259 #undef TARGET_LIBCALL_VALUE
260 #define TARGET_LIBCALL_VALUE pa_libcall_value
261 #undef TARGET_FUNCTION_VALUE_REGNO_P
262 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
264 #undef TARGET_LEGITIMIZE_ADDRESS
265 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
267 #undef TARGET_SCHED_ADJUST_COST
268 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
269 #undef TARGET_SCHED_ADJUST_PRIORITY
270 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
271 #undef TARGET_SCHED_ISSUE_RATE
272 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
274 #undef TARGET_ENCODE_SECTION_INFO
275 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
276 #undef TARGET_STRIP_NAME_ENCODING
277 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
279 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
280 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
282 #undef TARGET_COMMUTATIVE_P
283 #define TARGET_COMMUTATIVE_P pa_commutative_p
285 #undef TARGET_ASM_OUTPUT_MI_THUNK
286 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
287 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
288 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
290 #undef TARGET_ASM_FILE_END
291 #ifdef ASM_OUTPUT_EXTERNAL_REAL
292 #define TARGET_ASM_FILE_END pa_hpux_file_end
294 #define TARGET_ASM_FILE_END output_deferred_plabels
297 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
298 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
300 #if !defined(USE_COLLECT2)
301 #undef TARGET_ASM_CONSTRUCTOR
302 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
303 #undef TARGET_ASM_DESTRUCTOR
304 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
307 #undef TARGET_INIT_BUILTINS
308 #define TARGET_INIT_BUILTINS pa_init_builtins
310 #undef TARGET_EXPAND_BUILTIN
311 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
313 #undef TARGET_REGISTER_MOVE_COST
314 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
315 #undef TARGET_RTX_COSTS
316 #define TARGET_RTX_COSTS hppa_rtx_costs
317 #undef TARGET_ADDRESS_COST
318 #define TARGET_ADDRESS_COST hppa_address_cost
320 #undef TARGET_MACHINE_DEPENDENT_REORG
321 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
323 #undef TARGET_INIT_LIBFUNCS
324 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
326 #undef TARGET_PROMOTE_FUNCTION_MODE
327 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
328 #undef TARGET_PROMOTE_PROTOTYPES
329 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
331 #undef TARGET_STRUCT_VALUE_RTX
332 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
333 #undef TARGET_RETURN_IN_MEMORY
334 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
335 #undef TARGET_MUST_PASS_IN_STACK
336 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
337 #undef TARGET_PASS_BY_REFERENCE
338 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
339 #undef TARGET_CALLEE_COPIES
340 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
341 #undef TARGET_ARG_PARTIAL_BYTES
342 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
343 #undef TARGET_FUNCTION_ARG
344 #define TARGET_FUNCTION_ARG pa_function_arg
345 #undef TARGET_FUNCTION_ARG_ADVANCE
346 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
347 #undef TARGET_FUNCTION_ARG_BOUNDARY
348 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
350 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
351 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
352 #undef TARGET_EXPAND_BUILTIN_VA_START
353 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
354 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
355 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
357 #undef TARGET_SCALAR_MODE_SUPPORTED_P
358 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
360 #undef TARGET_CANNOT_FORCE_CONST_MEM
361 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
363 #undef TARGET_SECONDARY_RELOAD
364 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
366 #undef TARGET_EXTRA_LIVE_ON_ENTRY
367 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
369 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
370 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
371 #undef TARGET_TRAMPOLINE_INIT
372 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
373 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
374 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
375 #undef TARGET_DELEGITIMIZE_ADDRESS
376 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
377 #undef TARGET_INTERNAL_ARG_POINTER
378 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
379 #undef TARGET_CAN_ELIMINATE
380 #define TARGET_CAN_ELIMINATE pa_can_eliminate
381 #undef TARGET_CONDITIONAL_REGISTER_USAGE
382 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
383 #undef TARGET_C_MODE_FOR_SUFFIX
384 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
385 #undef TARGET_ASM_FUNCTION_SECTION
386 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
388 #undef TARGET_LEGITIMATE_CONSTANT_P
389 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
390 #undef TARGET_SECTION_TYPE_FLAGS
391 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
392 #undef TARGET_LEGITIMATE_ADDRESS_P
393 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
395 struct gcc_target targetm
= TARGET_INITIALIZER
;
397 /* Parse the -mfixed-range= option string. */
400 fix_range (const char *const_str
)
403 char *str
, *dash
, *comma
;
405 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
406 REG2 are either register names or register numbers. The effect
407 of this option is to mark the registers in the range from REG1 to
408 REG2 as ``fixed'' so they won't be used by the compiler. This is
409 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
411 i
= strlen (const_str
);
412 str
= (char *) alloca (i
+ 1);
413 memcpy (str
, const_str
, i
+ 1);
417 dash
= strchr (str
, '-');
420 warning (0, "value of -mfixed-range must have form REG1-REG2");
425 comma
= strchr (dash
+ 1, ',');
429 first
= decode_reg_name (str
);
432 warning (0, "unknown register name: %s", str
);
436 last
= decode_reg_name (dash
+ 1);
439 warning (0, "unknown register name: %s", dash
+ 1);
447 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
451 for (i
= first
; i
<= last
; ++i
)
452 fixed_regs
[i
] = call_used_regs
[i
] = 1;
461 /* Check if all floating point registers have been fixed. */
462 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
467 target_flags
|= MASK_DISABLE_FPREGS
;
470 /* Implement the TARGET_OPTION_OVERRIDE hook. */
473 pa_option_override (void)
476 cl_deferred_option
*opt
;
477 vec
<cl_deferred_option
> *v
478 = (vec
<cl_deferred_option
> *) pa_deferred_options
;
481 FOR_EACH_VEC_ELT (*v
, i
, opt
)
483 switch (opt
->opt_index
)
485 case OPT_mfixed_range_
:
486 fix_range (opt
->arg
);
494 /* Unconditional branches in the delay slot are not compatible with dwarf2
495 call frame information. There is no benefit in using this optimization
496 on PA8000 and later processors. */
497 if (pa_cpu
>= PROCESSOR_8000
498 || (targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
500 || flag_unwind_tables
)
501 target_flags
&= ~MASK_JUMP_IN_DELAY
;
503 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
505 warning (0, "PIC code generation is not supported in the portable runtime model");
508 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
510 warning (0, "PIC code generation is not compatible with fast indirect calls");
513 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
515 warning (0, "-g is only supported when using GAS on this processor,");
516 warning (0, "-g option disabled");
517 write_symbols
= NO_DEBUG
;
520 /* We only support the "big PIC" model now. And we always generate PIC
521 code when in 64bit mode. */
522 if (flag_pic
== 1 || TARGET_64BIT
)
525 /* Disable -freorder-blocks-and-partition as we don't support hot and
526 cold partitioning. */
527 if (flag_reorder_blocks_and_partition
)
529 inform (input_location
,
530 "-freorder-blocks-and-partition does not work "
531 "on this architecture");
532 flag_reorder_blocks_and_partition
= 0;
533 flag_reorder_blocks
= 1;
536 /* We can't guarantee that .dword is available for 32-bit targets. */
537 if (UNITS_PER_WORD
== 4)
538 targetm
.asm_out
.aligned_op
.di
= NULL
;
540 /* The unaligned ops are only available when using GAS. */
543 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
544 targetm
.asm_out
.unaligned_op
.si
= NULL
;
545 targetm
.asm_out
.unaligned_op
.di
= NULL
;
548 init_machine_status
= pa_init_machine_status
;
553 PA_BUILTIN_COPYSIGNQ
,
556 PA_BUILTIN_HUGE_VALQ
,
560 static GTY(()) tree pa_builtins
[(int) PA_BUILTIN_max
];
563 pa_init_builtins (void)
565 #ifdef DONT_HAVE_FPUTC_UNLOCKED
567 tree decl
= builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED
);
568 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED
, decl
,
569 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED
));
576 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITE
)) != NULL_TREE
)
577 set_user_assembler_name (decl
, "_Isfinite");
578 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEF
)) != NULL_TREE
)
579 set_user_assembler_name (decl
, "_Isfinitef");
583 if (HPUX_LONG_DOUBLE_LIBRARY
)
587 /* Under HPUX, the __float128 type is a synonym for "long double". */
588 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
591 /* TFmode support builtins. */
592 ftype
= build_function_type_list (long_double_type_node
,
593 long_double_type_node
,
595 decl
= add_builtin_function ("__builtin_fabsq", ftype
,
596 PA_BUILTIN_FABSQ
, BUILT_IN_MD
,
597 "_U_Qfabs", NULL_TREE
);
598 TREE_READONLY (decl
) = 1;
599 pa_builtins
[PA_BUILTIN_FABSQ
] = decl
;
601 ftype
= build_function_type_list (long_double_type_node
,
602 long_double_type_node
,
603 long_double_type_node
,
605 decl
= add_builtin_function ("__builtin_copysignq", ftype
,
606 PA_BUILTIN_COPYSIGNQ
, BUILT_IN_MD
,
607 "_U_Qfcopysign", NULL_TREE
);
608 TREE_READONLY (decl
) = 1;
609 pa_builtins
[PA_BUILTIN_COPYSIGNQ
] = decl
;
611 ftype
= build_function_type_list (long_double_type_node
, NULL_TREE
);
612 decl
= add_builtin_function ("__builtin_infq", ftype
,
613 PA_BUILTIN_INFQ
, BUILT_IN_MD
,
615 pa_builtins
[PA_BUILTIN_INFQ
] = decl
;
617 decl
= add_builtin_function ("__builtin_huge_valq", ftype
,
618 PA_BUILTIN_HUGE_VALQ
, BUILT_IN_MD
,
620 pa_builtins
[PA_BUILTIN_HUGE_VALQ
] = decl
;
625 pa_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
626 enum machine_mode mode ATTRIBUTE_UNUSED
,
627 int ignore ATTRIBUTE_UNUSED
)
629 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
630 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
634 case PA_BUILTIN_FABSQ
:
635 case PA_BUILTIN_COPYSIGNQ
:
636 return expand_call (exp
, target
, ignore
);
638 case PA_BUILTIN_INFQ
:
639 case PA_BUILTIN_HUGE_VALQ
:
641 enum machine_mode target_mode
= TYPE_MODE (TREE_TYPE (exp
));
646 tmp
= CONST_DOUBLE_FROM_REAL_VALUE (inf
, target_mode
);
648 tmp
= validize_mem (force_const_mem (target_mode
, tmp
));
651 target
= gen_reg_rtx (target_mode
);
653 emit_move_insn (target
, tmp
);
664 /* Function to init struct machine_function.
665 This will be called, via a pointer variable,
666 from push_function_context. */
668 static struct machine_function
*
669 pa_init_machine_status (void)
671 return ggc_alloc_cleared_machine_function ();
674 /* If FROM is a probable pointer register, mark TO as a probable
675 pointer register with the same pointer alignment as FROM. */
678 copy_reg_pointer (rtx to
, rtx from
)
680 if (REG_POINTER (from
))
681 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
684 /* Return 1 if X contains a symbolic expression. We know these
685 expressions will have one of a few well defined forms, so
686 we need only check those forms. */
688 pa_symbolic_expression_p (rtx x
)
691 /* Strip off any HIGH. */
692 if (GET_CODE (x
) == HIGH
)
695 return symbolic_operand (x
, VOIDmode
);
698 /* Accept any constant that can be moved in one instruction into a
701 pa_cint_ok_for_move (HOST_WIDE_INT ival
)
703 /* OK if ldo, ldil, or zdepi, can be used. */
704 return (VAL_14_BITS_P (ival
)
705 || pa_ldil_cint_p (ival
)
706 || pa_zdepi_cint_p (ival
));
709 /* True iff ldil can be used to load this CONST_INT. The least
710 significant 11 bits of the value must be zero and the value must
711 not change sign when extended from 32 to 64 bits. */
713 pa_ldil_cint_p (HOST_WIDE_INT ival
)
715 HOST_WIDE_INT x
= ival
& (((HOST_WIDE_INT
) -1 << 31) | 0x7ff);
717 return x
== 0 || x
== ((HOST_WIDE_INT
) -1 << 31);
720 /* True iff zdepi can be used to generate this CONST_INT.
721 zdepi first sign extends a 5-bit signed number to a given field
722 length, then places this field anywhere in a zero. */
724 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x
)
726 unsigned HOST_WIDE_INT lsb_mask
, t
;
728 /* This might not be obvious, but it's at least fast.
729 This function is critical; we don't have the time loops would take. */
731 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
732 /* Return true iff t is a power of two. */
733 return ((t
& (t
- 1)) == 0);
736 /* True iff depi or extru can be used to compute (reg & mask).
737 Accept bit pattern like these:
742 pa_and_mask_p (unsigned HOST_WIDE_INT mask
)
745 mask
+= mask
& -mask
;
746 return (mask
& (mask
- 1)) == 0;
749 /* True iff depi can be used to compute (reg | MASK). */
751 pa_ior_mask_p (unsigned HOST_WIDE_INT mask
)
753 mask
+= mask
& -mask
;
754 return (mask
& (mask
- 1)) == 0;
757 /* Legitimize PIC addresses. If the address is already
758 position-independent, we return ORIG. Newly generated
759 position-independent addresses go to REG. If we need more
760 than one register, we lose. */
763 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
767 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
769 /* Labels need special handling. */
770 if (pic_label_operand (orig
, mode
))
774 /* We do not want to go through the movXX expanders here since that
775 would create recursion.
777 Nor do we really want to call a generator for a named pattern
778 since that requires multiple patterns if we want to support
781 So instead we just emit the raw set, which avoids the movXX
782 expanders completely. */
783 mark_reg_pointer (reg
, BITS_PER_UNIT
);
784 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, orig
));
786 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
787 add_reg_note (insn
, REG_EQUAL
, orig
);
789 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
790 and update LABEL_NUSES because this is not done automatically. */
791 if (reload_in_progress
|| reload_completed
)
793 /* Extract LABEL_REF. */
794 if (GET_CODE (orig
) == CONST
)
795 orig
= XEXP (XEXP (orig
, 0), 0);
796 /* Extract CODE_LABEL. */
797 orig
= XEXP (orig
, 0);
798 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
799 /* Make sure we have label and not a note. */
801 LABEL_NUSES (orig
)++;
803 crtl
->uses_pic_offset_table
= 1;
806 if (GET_CODE (orig
) == SYMBOL_REF
)
812 /* Before reload, allocate a temporary register for the intermediate
813 result. This allows the sequence to be deleted when the final
814 result is unused and the insns are trivially dead. */
815 tmp_reg
= ((reload_in_progress
|| reload_completed
)
816 ? reg
: gen_reg_rtx (Pmode
));
818 if (function_label_operand (orig
, VOIDmode
))
820 /* Force function label into memory in word mode. */
821 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
822 /* Load plabel address from DLT. */
823 emit_move_insn (tmp_reg
,
824 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
825 gen_rtx_HIGH (word_mode
, orig
)));
827 = gen_const_mem (Pmode
,
828 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
829 gen_rtx_UNSPEC (Pmode
,
832 emit_move_insn (reg
, pic_ref
);
833 /* Now load address of function descriptor. */
834 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
838 /* Load symbol reference from DLT. */
839 emit_move_insn (tmp_reg
,
840 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
841 gen_rtx_HIGH (word_mode
, orig
)));
843 = gen_const_mem (Pmode
,
844 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
845 gen_rtx_UNSPEC (Pmode
,
850 crtl
->uses_pic_offset_table
= 1;
851 mark_reg_pointer (reg
, BITS_PER_UNIT
);
852 insn
= emit_move_insn (reg
, pic_ref
);
854 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
855 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
859 else if (GET_CODE (orig
) == CONST
)
863 if (GET_CODE (XEXP (orig
, 0)) == PLUS
864 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
868 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
870 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
871 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
872 base
== reg
? 0 : reg
);
874 if (GET_CODE (orig
) == CONST_INT
)
876 if (INT_14_BITS (orig
))
877 return plus_constant (Pmode
, base
, INTVAL (orig
));
878 orig
= force_reg (Pmode
, orig
);
880 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
881 /* Likewise, should we set special REG_NOTEs here? */
887 static GTY(()) rtx gen_tls_tga
;
890 gen_tls_get_addr (void)
893 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
898 hppa_tls_call (rtx arg
)
902 ret
= gen_reg_rtx (Pmode
);
903 emit_library_call_value (gen_tls_get_addr (), ret
,
904 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
910 legitimize_tls_address (rtx addr
)
912 rtx ret
, insn
, tmp
, t1
, t2
, tp
;
914 /* Currently, we can't handle anything but a SYMBOL_REF. */
915 if (GET_CODE (addr
) != SYMBOL_REF
)
918 switch (SYMBOL_REF_TLS_MODEL (addr
))
920 case TLS_MODEL_GLOBAL_DYNAMIC
:
921 tmp
= gen_reg_rtx (Pmode
);
923 emit_insn (gen_tgd_load_pic (tmp
, addr
));
925 emit_insn (gen_tgd_load (tmp
, addr
));
926 ret
= hppa_tls_call (tmp
);
929 case TLS_MODEL_LOCAL_DYNAMIC
:
930 ret
= gen_reg_rtx (Pmode
);
931 tmp
= gen_reg_rtx (Pmode
);
934 emit_insn (gen_tld_load_pic (tmp
, addr
));
936 emit_insn (gen_tld_load (tmp
, addr
));
937 t1
= hppa_tls_call (tmp
);
940 t2
= gen_reg_rtx (Pmode
);
941 emit_libcall_block (insn
, t2
, t1
,
942 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
944 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
947 case TLS_MODEL_INITIAL_EXEC
:
948 tp
= gen_reg_rtx (Pmode
);
949 tmp
= gen_reg_rtx (Pmode
);
950 ret
= gen_reg_rtx (Pmode
);
951 emit_insn (gen_tp_load (tp
));
953 emit_insn (gen_tie_load_pic (tmp
, addr
));
955 emit_insn (gen_tie_load (tmp
, addr
));
956 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
959 case TLS_MODEL_LOCAL_EXEC
:
960 tp
= gen_reg_rtx (Pmode
);
961 ret
= gen_reg_rtx (Pmode
);
962 emit_insn (gen_tp_load (tp
));
963 emit_insn (gen_tle_load (ret
, addr
, tp
));
973 /* Try machine-dependent ways of modifying an illegitimate address
974 to be legitimate. If we find one, return the new, valid address.
975 This macro is used in only one place: `memory_address' in explow.c.
977 OLDX is the address as it was before break_out_memory_refs was called.
978 In some cases it is useful to look at this to decide what needs to be done.
980 It is always safe for this macro to do nothing. It exists to recognize
981 opportunities to optimize the output.
983 For the PA, transform:
985 memory(X + <large int>)
989 if (<large int> & mask) >= 16
990 Y = (<large int> & ~mask) + mask + 1 Round up.
992 Y = (<large int> & ~mask) Round down.
994 memory (Z + (<large int> - Y));
996 This is for CSE to find several similar references, and only use one Z.
998 X can either be a SYMBOL_REF or REG, but because combine cannot
999 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1000 D will not fit in 14 bits.
1002 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1005 MODE_INT references allow displacements which fit in 14 bits, so use
1008 This relies on the fact that most mode MODE_FLOAT references will use FP
1009 registers and most mode MODE_INT references will use integer registers.
1010 (In the rare case of an FP register used in an integer MODE, we depend
1011 on secondary reloads to clean things up.)
1014 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1015 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1016 addressing modes to be used).
1018 Put X and Z into registers. Then put the entire expression into
1022 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1023 enum machine_mode mode
)
1027 /* We need to canonicalize the order of operands in unscaled indexed
1028 addresses since the code that checks if an address is valid doesn't
1029 always try both orders. */
1030 if (!TARGET_NO_SPACE_REGS
1031 && GET_CODE (x
) == PLUS
1032 && GET_MODE (x
) == Pmode
1033 && REG_P (XEXP (x
, 0))
1034 && REG_P (XEXP (x
, 1))
1035 && REG_POINTER (XEXP (x
, 0))
1036 && !REG_POINTER (XEXP (x
, 1)))
1037 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
1039 if (pa_tls_referenced_p (x
))
1040 return legitimize_tls_address (x
);
1042 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
1044 /* Strip off CONST. */
1045 if (GET_CODE (x
) == CONST
)
1048 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1049 That should always be safe. */
1050 if (GET_CODE (x
) == PLUS
1051 && GET_CODE (XEXP (x
, 0)) == REG
1052 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
1054 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
1055 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
1058 /* Note we must reject symbols which represent function addresses
1059 since the assembler/linker can't handle arithmetic on plabels. */
1060 if (GET_CODE (x
) == PLUS
1061 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1062 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
1063 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
1064 || GET_CODE (XEXP (x
, 0)) == REG
))
1066 rtx int_part
, ptr_reg
;
1068 int offset
= INTVAL (XEXP (x
, 1));
1071 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
1072 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
1074 /* Choose which way to round the offset. Round up if we
1075 are >= halfway to the next boundary. */
1076 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1077 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1079 newoffset
= (offset
& ~ mask
);
1081 /* If the newoffset will not fit in 14 bits (ldo), then
1082 handling this would take 4 or 5 instructions (2 to load
1083 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1084 add the new offset and the SYMBOL_REF.) Combine can
1085 not handle 4->2 or 5->2 combinations, so do not create
1087 if (! VAL_14_BITS_P (newoffset
)
1088 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1090 rtx const_part
= plus_constant (Pmode
, XEXP (x
, 0), newoffset
);
1093 gen_rtx_HIGH (Pmode
, const_part
));
1096 gen_rtx_LO_SUM (Pmode
,
1097 tmp_reg
, const_part
));
1101 if (! VAL_14_BITS_P (newoffset
))
1102 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1104 int_part
= GEN_INT (newoffset
);
1106 ptr_reg
= force_reg (Pmode
,
1107 gen_rtx_PLUS (Pmode
,
1108 force_reg (Pmode
, XEXP (x
, 0)),
1111 return plus_constant (Pmode
, ptr_reg
, offset
- newoffset
);
1114 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1116 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
1117 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1118 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
1119 && (OBJECT_P (XEXP (x
, 1))
1120 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1121 && GET_CODE (XEXP (x
, 1)) != CONST
)
1123 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1127 if (GET_CODE (reg1
) != REG
)
1128 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1130 reg2
= XEXP (XEXP (x
, 0), 0);
1131 if (GET_CODE (reg2
) != REG
)
1132 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1134 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1135 gen_rtx_MULT (Pmode
,
1141 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1143 Only do so for floating point modes since this is more speculative
1144 and we lose if it's an integer store. */
1145 if (GET_CODE (x
) == PLUS
1146 && GET_CODE (XEXP (x
, 0)) == PLUS
1147 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1148 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
1149 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1150 && (mode
== SFmode
|| mode
== DFmode
))
1153 /* First, try and figure out what to use as a base register. */
1154 rtx reg1
, reg2
, base
, idx
;
1156 reg1
= XEXP (XEXP (x
, 0), 1);
1161 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1162 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1163 it's a base register below. */
1164 if (GET_CODE (reg1
) != REG
)
1165 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1167 if (GET_CODE (reg2
) != REG
)
1168 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1170 /* Figure out what the base and index are. */
1172 if (GET_CODE (reg1
) == REG
1173 && REG_POINTER (reg1
))
1176 idx
= gen_rtx_PLUS (Pmode
,
1177 gen_rtx_MULT (Pmode
,
1178 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1179 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
1182 else if (GET_CODE (reg2
) == REG
1183 && REG_POINTER (reg2
))
1192 /* If the index adds a large constant, try to scale the
1193 constant so that it can be loaded with only one insn. */
1194 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1195 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1196 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1197 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1199 /* Divide the CONST_INT by the scale factor, then add it to A. */
1200 int val
= INTVAL (XEXP (idx
, 1));
1202 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1203 reg1
= XEXP (XEXP (idx
, 0), 0);
1204 if (GET_CODE (reg1
) != REG
)
1205 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1207 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1209 /* We can now generate a simple scaled indexed address. */
1212 (Pmode
, gen_rtx_PLUS (Pmode
,
1213 gen_rtx_MULT (Pmode
, reg1
,
1214 XEXP (XEXP (idx
, 0), 1)),
1218 /* If B + C is still a valid base register, then add them. */
1219 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1220 && INTVAL (XEXP (idx
, 1)) <= 4096
1221 && INTVAL (XEXP (idx
, 1)) >= -4096)
1223 int val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1226 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1228 reg2
= XEXP (XEXP (idx
, 0), 0);
1229 if (GET_CODE (reg2
) != CONST_INT
)
1230 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1232 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1233 gen_rtx_MULT (Pmode
,
1239 /* Get the index into a register, then add the base + index and
1240 return a register holding the result. */
1242 /* First get A into a register. */
1243 reg1
= XEXP (XEXP (idx
, 0), 0);
1244 if (GET_CODE (reg1
) != REG
)
1245 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1247 /* And get B into a register. */
1248 reg2
= XEXP (idx
, 1);
1249 if (GET_CODE (reg2
) != REG
)
1250 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1252 reg1
= force_reg (Pmode
,
1253 gen_rtx_PLUS (Pmode
,
1254 gen_rtx_MULT (Pmode
, reg1
,
1255 XEXP (XEXP (idx
, 0), 1)),
1258 /* Add the result to our base register and return. */
1259 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1263 /* Uh-oh. We might have an address for x[n-100000]. This needs
1264 special handling to avoid creating an indexed memory address
1265 with x-100000 as the base.
1267 If the constant part is small enough, then it's still safe because
1268 there is a guard page at the beginning and end of the data segment.
1270 Scaled references are common enough that we want to try and rearrange the
1271 terms so that we can use indexing for these addresses too. Only
1272 do the optimization for floatint point modes. */
1274 if (GET_CODE (x
) == PLUS
1275 && pa_symbolic_expression_p (XEXP (x
, 1)))
1277 /* Ugly. We modify things here so that the address offset specified
1278 by the index expression is computed first, then added to x to form
1279 the entire address. */
1281 rtx regx1
, regx2
, regy1
, regy2
, y
;
1283 /* Strip off any CONST. */
1285 if (GET_CODE (y
) == CONST
)
1288 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1290 /* See if this looks like
1291 (plus (mult (reg) (shadd_const))
1292 (const (plus (symbol_ref) (const_int))))
1294 Where const_int is small. In that case the const
1295 expression is a valid pointer for indexing.
1297 If const_int is big, but can be divided evenly by shadd_const
1298 and added to (reg). This allows more scaled indexed addresses. */
1299 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1300 && GET_CODE (XEXP (x
, 0)) == MULT
1301 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1302 && INTVAL (XEXP (y
, 1)) >= -4096
1303 && INTVAL (XEXP (y
, 1)) <= 4095
1304 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1305 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1307 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1311 if (GET_CODE (reg1
) != REG
)
1312 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1314 reg2
= XEXP (XEXP (x
, 0), 0);
1315 if (GET_CODE (reg2
) != REG
)
1316 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1318 return force_reg (Pmode
,
1319 gen_rtx_PLUS (Pmode
,
1320 gen_rtx_MULT (Pmode
,
1325 else if ((mode
== DFmode
|| mode
== SFmode
)
1326 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1327 && GET_CODE (XEXP (x
, 0)) == MULT
1328 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1329 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1330 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1331 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1334 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1335 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1336 regx2
= XEXP (XEXP (x
, 0), 0);
1337 if (GET_CODE (regx2
) != REG
)
1338 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1339 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1343 gen_rtx_PLUS (Pmode
,
1344 gen_rtx_MULT (Pmode
, regx2
,
1345 XEXP (XEXP (x
, 0), 1)),
1346 force_reg (Pmode
, XEXP (y
, 0))));
1348 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1349 && INTVAL (XEXP (y
, 1)) >= -4096
1350 && INTVAL (XEXP (y
, 1)) <= 4095)
1352 /* This is safe because of the guard page at the
1353 beginning and end of the data space. Just
1354 return the original address. */
1359 /* Doesn't look like one we can optimize. */
1360 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1361 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1362 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1363 regx1
= force_reg (Pmode
,
1364 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1366 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1374 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1376 Compute extra cost of moving data between one register class
1379 Make moves from SAR so expensive they should never happen. We used to
1380 have 0xffff here, but that generates overflow in rare cases.
1382 Copies involving a FP register and a non-FP register are relatively
1383 expensive because they must go through memory.
1385 Other copies are reasonably cheap. */
1388 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
1389 reg_class_t from
, reg_class_t to
)
1391 if (from
== SHIFT_REGS
)
1393 else if (to
== SHIFT_REGS
&& FP_REG_CLASS_P (from
))
1395 else if ((FP_REG_CLASS_P (from
) && ! FP_REG_CLASS_P (to
))
1396 || (FP_REG_CLASS_P (to
) && ! FP_REG_CLASS_P (from
)))
1402 /* For the HPPA, REG and REG+CONST is cost 0
1403 and addresses involving symbolic constants are cost 2.
1405 PIC addresses are very expensive.
1407 It is no coincidence that this has the same structure
1408 as pa_legitimate_address_p. */
1411 hppa_address_cost (rtx X
, enum machine_mode mode ATTRIBUTE_UNUSED
,
1412 addr_space_t as ATTRIBUTE_UNUSED
,
1413 bool speed ATTRIBUTE_UNUSED
)
1415 switch (GET_CODE (X
))
1428 /* Compute a (partial) cost for rtx X. Return true if the complete
1429 cost has been computed, and false if subexpressions should be
1430 scanned. In either case, *TOTAL contains the cost result. */
1433 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
1434 int *total
, bool speed ATTRIBUTE_UNUSED
)
1441 if (INTVAL (x
) == 0)
1443 else if (INT_14_BITS (x
))
1460 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1461 && outer_code
!= SET
)
1468 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1470 *total
= COSTS_N_INSNS (3);
1474 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1475 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1479 if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1480 *total
= factor
* factor
* COSTS_N_INSNS (8);
1482 *total
= factor
* factor
* COSTS_N_INSNS (20);
1486 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1488 *total
= COSTS_N_INSNS (14);
1496 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1497 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1501 *total
= factor
* factor
* COSTS_N_INSNS (60);
1504 case PLUS
: /* this includes shNadd insns */
1506 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1508 *total
= COSTS_N_INSNS (3);
1512 /* A size N times larger than UNITS_PER_WORD needs N times as
1513 many insns, taking N times as long. */
1514 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
1517 *total
= factor
* COSTS_N_INSNS (1);
1523 *total
= COSTS_N_INSNS (1);
1531 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1532 new rtx with the correct mode. */
1534 force_mode (enum machine_mode mode
, rtx orig
)
1536 if (mode
== GET_MODE (orig
))
1539 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1541 return gen_rtx_REG (mode
, REGNO (orig
));
1544 /* Return 1 if *X is a thread-local symbol. */
1547 pa_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1549 return PA_SYMBOL_REF_TLS_P (*x
);
1552 /* Return 1 if X contains a thread-local symbol. */
1555 pa_tls_referenced_p (rtx x
)
1557 if (!TARGET_HAVE_TLS
)
1560 return for_each_rtx (&x
, &pa_tls_symbol_ref_1
, 0);
1563 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1566 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1568 return pa_tls_referenced_p (x
);
1571 /* Emit insns to move operands[1] into operands[0].
1573 Return 1 if we have written out everything that needs to be done to
1574 do the move. Otherwise, return 0 and the caller will emit the move
1577 Note SCRATCH_REG may not be in the proper mode depending on how it
1578 will be used. This routine is responsible for creating a new copy
1579 of SCRATCH_REG in the proper mode. */
1582 pa_emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
1584 register rtx operand0
= operands
[0];
1585 register rtx operand1
= operands
[1];
1588 /* We can only handle indexed addresses in the destination operand
1589 of floating point stores. Thus, we need to break out indexed
1590 addresses from the destination operand. */
1591 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1593 gcc_assert (can_create_pseudo_p ());
1595 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1596 operand0
= replace_equiv_address (operand0
, tem
);
1599 /* On targets with non-equivalent space registers, break out unscaled
1600 indexed addresses from the source operand before the final CSE.
1601 We have to do this because the REG_POINTER flag is not correctly
1602 carried through various optimization passes and CSE may substitute
1603 a pseudo without the pointer set for one with the pointer set. As
1604 a result, we loose various opportunities to create insns with
1605 unscaled indexed addresses. */
1606 if (!TARGET_NO_SPACE_REGS
1607 && !cse_not_expected
1608 && GET_CODE (operand1
) == MEM
1609 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1610 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1611 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1613 = replace_equiv_address (operand1
,
1614 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1617 && reload_in_progress
&& GET_CODE (operand0
) == REG
1618 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1619 operand0
= reg_equiv_mem (REGNO (operand0
));
1620 else if (scratch_reg
1621 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1622 && GET_CODE (SUBREG_REG (operand0
)) == REG
1623 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1625 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1626 the code which tracks sets/uses for delete_output_reload. */
1627 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1628 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
1629 SUBREG_BYTE (operand0
));
1630 operand0
= alter_subreg (&temp
, true);
1634 && reload_in_progress
&& GET_CODE (operand1
) == REG
1635 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1636 operand1
= reg_equiv_mem (REGNO (operand1
));
1637 else if (scratch_reg
1638 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1639 && GET_CODE (SUBREG_REG (operand1
)) == REG
1640 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1642 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1643 the code which tracks sets/uses for delete_output_reload. */
1644 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1645 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
1646 SUBREG_BYTE (operand1
));
1647 operand1
= alter_subreg (&temp
, true);
1650 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1651 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1652 != XEXP (operand0
, 0)))
1653 operand0
= replace_equiv_address (operand0
, tem
);
1655 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1656 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1657 != XEXP (operand1
, 0)))
1658 operand1
= replace_equiv_address (operand1
, tem
);
1660 /* Handle secondary reloads for loads/stores of FP registers from
1661 REG+D addresses where D does not fit in 5 or 14 bits, including
1662 (subreg (mem (addr))) cases. */
1664 && fp_reg_operand (operand0
, mode
)
1665 && (MEM_P (operand1
)
1666 || (GET_CODE (operand1
) == SUBREG
1667 && MEM_P (XEXP (operand1
, 0))))
1668 && !floating_point_store_memory_operand (operand1
, mode
))
1670 if (GET_CODE (operand1
) == SUBREG
)
1671 operand1
= XEXP (operand1
, 0);
1673 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1674 it in WORD_MODE regardless of what mode it was originally given
1676 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1678 /* D might not fit in 14 bits either; for such cases load D into
1680 if (reg_plus_base_memory_operand (operand1
, mode
)
1683 && INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))))
1685 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1686 emit_move_insn (scratch_reg
,
1687 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1689 XEXP (XEXP (operand1
, 0), 0),
1693 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1694 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
1695 replace_equiv_address (operand1
, scratch_reg
)));
1698 else if (scratch_reg
1699 && fp_reg_operand (operand1
, mode
)
1700 && (MEM_P (operand0
)
1701 || (GET_CODE (operand0
) == SUBREG
1702 && MEM_P (XEXP (operand0
, 0))))
1703 && !floating_point_store_memory_operand (operand0
, mode
))
1705 if (GET_CODE (operand0
) == SUBREG
)
1706 operand0
= XEXP (operand0
, 0);
1708 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1709 it in WORD_MODE regardless of what mode it was originally given
1711 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1713 /* D might not fit in 14 bits either; for such cases load D into
1715 if (reg_plus_base_memory_operand (operand0
, mode
)
1718 && INT_14_BITS (XEXP (XEXP (operand0
, 0), 1))))
1720 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1721 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1724 XEXP (XEXP (operand0
, 0),
1729 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1730 emit_insn (gen_rtx_SET (VOIDmode
,
1731 replace_equiv_address (operand0
, scratch_reg
),
1735 /* Handle secondary reloads for loads of FP registers from constant
1736 expressions by forcing the constant into memory. For the most part,
1737 this is only necessary for SImode and DImode.
1739 Use scratch_reg to hold the address of the memory location. */
1740 else if (scratch_reg
1741 && CONSTANT_P (operand1
)
1742 && fp_reg_operand (operand0
, mode
))
1744 rtx const_mem
, xoperands
[2];
1746 if (operand1
== CONST0_RTX (mode
))
1748 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1752 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1753 it in WORD_MODE regardless of what mode it was originally given
1755 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1757 /* Force the constant into memory and put the address of the
1758 memory location into scratch_reg. */
1759 const_mem
= force_const_mem (mode
, operand1
);
1760 xoperands
[0] = scratch_reg
;
1761 xoperands
[1] = XEXP (const_mem
, 0);
1762 pa_emit_move_sequence (xoperands
, Pmode
, 0);
1764 /* Now load the destination register. */
1765 emit_insn (gen_rtx_SET (mode
, operand0
,
1766 replace_equiv_address (const_mem
, scratch_reg
)));
1769 /* Handle secondary reloads for SAR. These occur when trying to load
1770 the SAR from memory or a constant. */
1771 else if (scratch_reg
1772 && GET_CODE (operand0
) == REG
1773 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1774 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1775 && (GET_CODE (operand1
) == MEM
|| GET_CODE (operand1
) == CONST_INT
))
1777 /* D might not fit in 14 bits either; for such cases load D into
1779 if (GET_CODE (operand1
) == MEM
1780 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1782 /* We are reloading the address into the scratch register, so we
1783 want to make sure the scratch register is a full register. */
1784 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1786 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1787 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1790 XEXP (XEXP (operand1
, 0),
1794 /* Now we are going to load the scratch register from memory,
1795 we want to load it in the same width as the original MEM,
1796 which must be the same as the width of the ultimate destination,
1798 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1800 emit_move_insn (scratch_reg
,
1801 replace_equiv_address (operand1
, scratch_reg
));
1805 /* We want to load the scratch register using the same mode as
1806 the ultimate destination. */
1807 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1809 emit_move_insn (scratch_reg
, operand1
);
1812 /* And emit the insn to set the ultimate destination. We know that
1813 the scratch register has the same mode as the destination at this
1815 emit_move_insn (operand0
, scratch_reg
);
1818 /* Handle the most common case: storing into a register. */
1819 else if (register_operand (operand0
, mode
))
1821 /* Legitimize TLS symbol references. This happens for references
1822 that aren't a legitimate constant. */
1823 if (PA_SYMBOL_REF_TLS_P (operand1
))
1824 operand1
= legitimize_tls_address (operand1
);
1826 if (register_operand (operand1
, mode
)
1827 || (GET_CODE (operand1
) == CONST_INT
1828 && pa_cint_ok_for_move (INTVAL (operand1
)))
1829 || (operand1
== CONST0_RTX (mode
))
1830 || (GET_CODE (operand1
) == HIGH
1831 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1832 /* Only `general_operands' can come here, so MEM is ok. */
1833 || GET_CODE (operand1
) == MEM
)
1835 /* Various sets are created during RTL generation which don't
1836 have the REG_POINTER flag correctly set. After the CSE pass,
1837 instruction recognition can fail if we don't consistently
1838 set this flag when performing register copies. This should
1839 also improve the opportunities for creating insns that use
1840 unscaled indexing. */
1841 if (REG_P (operand0
) && REG_P (operand1
))
1843 if (REG_POINTER (operand1
)
1844 && !REG_POINTER (operand0
)
1845 && !HARD_REGISTER_P (operand0
))
1846 copy_reg_pointer (operand0
, operand1
);
1849 /* When MEMs are broken out, the REG_POINTER flag doesn't
1850 get set. In some cases, we can set the REG_POINTER flag
1851 from the declaration for the MEM. */
1852 if (REG_P (operand0
)
1853 && GET_CODE (operand1
) == MEM
1854 && !REG_POINTER (operand0
))
1856 tree decl
= MEM_EXPR (operand1
);
1858 /* Set the register pointer flag and register alignment
1859 if the declaration for this memory reference is a
1865 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1867 if (TREE_CODE (decl
) == COMPONENT_REF
)
1868 decl
= TREE_OPERAND (decl
, 1);
1870 type
= TREE_TYPE (decl
);
1871 type
= strip_array_types (type
);
1873 if (POINTER_TYPE_P (type
))
1877 type
= TREE_TYPE (type
);
1878 /* Using TYPE_ALIGN_OK is rather conservative as
1879 only the ada frontend actually sets it. */
1880 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1882 mark_reg_pointer (operand0
, align
);
1887 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1891 else if (GET_CODE (operand0
) == MEM
)
1893 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1894 && !(reload_in_progress
|| reload_completed
))
1896 rtx temp
= gen_reg_rtx (DFmode
);
1898 emit_insn (gen_rtx_SET (VOIDmode
, temp
, operand1
));
1899 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, temp
));
1902 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1904 /* Run this case quickly. */
1905 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1908 if (! (reload_in_progress
|| reload_completed
))
1910 operands
[0] = validize_mem (operand0
);
1911 operands
[1] = operand1
= force_reg (mode
, operand1
);
1915 /* Simplify the source if we need to.
1916 Note we do have to handle function labels here, even though we do
1917 not consider them legitimate constants. Loop optimizations can
1918 call the emit_move_xxx with one as a source. */
1919 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1920 || (GET_CODE (operand1
) == HIGH
1921 && symbolic_operand (XEXP (operand1
, 0), mode
))
1922 || function_label_operand (operand1
, VOIDmode
)
1923 || pa_tls_referenced_p (operand1
))
1927 if (GET_CODE (operand1
) == HIGH
)
1930 operand1
= XEXP (operand1
, 0);
1932 if (symbolic_operand (operand1
, mode
))
1934 /* Argh. The assembler and linker can't handle arithmetic
1937 So we force the plabel into memory, load operand0 from
1938 the memory location, then add in the constant part. */
1939 if ((GET_CODE (operand1
) == CONST
1940 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1941 && function_label_operand (XEXP (XEXP (operand1
, 0), 0),
1943 || function_label_operand (operand1
, VOIDmode
))
1945 rtx temp
, const_part
;
1947 /* Figure out what (if any) scratch register to use. */
1948 if (reload_in_progress
|| reload_completed
)
1950 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1951 /* SCRATCH_REG will hold an address and maybe the actual
1952 data. We want it in WORD_MODE regardless of what mode it
1953 was originally given to us. */
1954 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1957 scratch_reg
= gen_reg_rtx (Pmode
);
1959 if (GET_CODE (operand1
) == CONST
)
1961 /* Save away the constant part of the expression. */
1962 const_part
= XEXP (XEXP (operand1
, 0), 1);
1963 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1965 /* Force the function label into memory. */
1966 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1970 /* No constant part. */
1971 const_part
= NULL_RTX
;
1973 /* Force the function label into memory. */
1974 temp
= force_const_mem (mode
, operand1
);
1978 /* Get the address of the memory location. PIC-ify it if
1980 temp
= XEXP (temp
, 0);
1982 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1984 /* Put the address of the memory location into our destination
1987 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1989 /* Now load from the memory location into our destination
1991 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1992 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1994 /* And add back in the constant part. */
1995 if (const_part
!= NULL_RTX
)
1996 expand_inc (operand0
, const_part
);
2005 if (reload_in_progress
|| reload_completed
)
2007 temp
= scratch_reg
? scratch_reg
: operand0
;
2008 /* TEMP will hold an address and maybe the actual
2009 data. We want it in WORD_MODE regardless of what mode it
2010 was originally given to us. */
2011 temp
= force_mode (word_mode
, temp
);
2014 temp
= gen_reg_rtx (Pmode
);
2016 /* (const (plus (symbol) (const_int))) must be forced to
2017 memory during/after reload if the const_int will not fit
2019 if (GET_CODE (operand1
) == CONST
2020 && GET_CODE (XEXP (operand1
, 0)) == PLUS
2021 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
2022 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))
2023 && (reload_completed
|| reload_in_progress
)
2026 rtx const_mem
= force_const_mem (mode
, operand1
);
2027 operands
[1] = legitimize_pic_address (XEXP (const_mem
, 0),
2029 operands
[1] = replace_equiv_address (const_mem
, operands
[1]);
2030 pa_emit_move_sequence (operands
, mode
, temp
);
2034 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
2035 if (REG_P (operand0
) && REG_P (operands
[1]))
2036 copy_reg_pointer (operand0
, operands
[1]);
2037 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operands
[1]));
2040 /* On the HPPA, references to data space are supposed to use dp,
2041 register 27, but showing it in the RTL inhibits various cse
2042 and loop optimizations. */
2047 if (reload_in_progress
|| reload_completed
)
2049 temp
= scratch_reg
? scratch_reg
: operand0
;
2050 /* TEMP will hold an address and maybe the actual
2051 data. We want it in WORD_MODE regardless of what mode it
2052 was originally given to us. */
2053 temp
= force_mode (word_mode
, temp
);
2056 temp
= gen_reg_rtx (mode
);
2058 /* Loading a SYMBOL_REF into a register makes that register
2059 safe to be used as the base in an indexed address.
2061 Don't mark hard registers though. That loses. */
2062 if (GET_CODE (operand0
) == REG
2063 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
2064 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
2065 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
2066 mark_reg_pointer (temp
, BITS_PER_UNIT
);
2069 set
= gen_rtx_SET (mode
, operand0
, temp
);
2071 set
= gen_rtx_SET (VOIDmode
,
2073 gen_rtx_LO_SUM (mode
, temp
, operand1
));
2075 emit_insn (gen_rtx_SET (VOIDmode
,
2077 gen_rtx_HIGH (mode
, operand1
)));
2083 else if (pa_tls_referenced_p (operand1
))
2088 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
2090 addend
= XEXP (XEXP (tmp
, 0), 1);
2091 tmp
= XEXP (XEXP (tmp
, 0), 0);
2094 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
2095 tmp
= legitimize_tls_address (tmp
);
2098 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
2099 tmp
= force_operand (tmp
, operands
[0]);
2103 else if (GET_CODE (operand1
) != CONST_INT
2104 || !pa_cint_ok_for_move (INTVAL (operand1
)))
2108 HOST_WIDE_INT value
= 0;
2109 HOST_WIDE_INT insv
= 0;
2112 if (GET_CODE (operand1
) == CONST_INT
)
2113 value
= INTVAL (operand1
);
2116 && GET_CODE (operand1
) == CONST_INT
2117 && HOST_BITS_PER_WIDE_INT
> 32
2118 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
2122 /* Extract the low order 32 bits of the value and sign extend.
2123 If the new value is the same as the original value, we can
2124 can use the original value as-is. If the new value is
2125 different, we use it and insert the most-significant 32-bits
2126 of the original value into the final result. */
2127 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
2128 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
2131 #if HOST_BITS_PER_WIDE_INT > 32
2132 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
2136 operand1
= GEN_INT (nval
);
2140 if (reload_in_progress
|| reload_completed
)
2141 temp
= scratch_reg
? scratch_reg
: operand0
;
2143 temp
= gen_reg_rtx (mode
);
2145 /* We don't directly split DImode constants on 32-bit targets
2146 because PLUS uses an 11-bit immediate and the insn sequence
2147 generated is not as efficient as the one using HIGH/LO_SUM. */
2148 if (GET_CODE (operand1
) == CONST_INT
2149 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2150 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2153 /* Directly break constant into high and low parts. This
2154 provides better optimization opportunities because various
2155 passes recognize constants split with PLUS but not LO_SUM.
2156 We use a 14-bit signed low part except when the addition
2157 of 0x4000 to the high part might change the sign of the
2159 HOST_WIDE_INT low
= value
& 0x3fff;
2160 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2164 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2172 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (high
)));
2173 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2177 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2178 gen_rtx_HIGH (mode
, operand1
)));
2179 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2182 insn
= emit_move_insn (operands
[0], operands
[1]);
2184 /* Now insert the most significant 32 bits of the value
2185 into the register. When we don't have a second register
2186 available, it could take up to nine instructions to load
2187 a 64-bit integer constant. Prior to reload, we force
2188 constants that would take more than three instructions
2189 to load to the constant pool. During and after reload,
2190 we have to handle all possible values. */
2193 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2194 register and the value to be inserted is outside the
2195 range that can be loaded with three depdi instructions. */
2196 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2198 operand1
= GEN_INT (insv
);
2200 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2201 gen_rtx_HIGH (mode
, operand1
)));
2202 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2204 emit_insn (gen_insvdi (operand0
, GEN_INT (32),
2207 emit_insn (gen_insvsi (operand0
, GEN_INT (32),
2212 int len
= 5, pos
= 27;
2214 /* Insert the bits using the depdi instruction. */
2217 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2218 HOST_WIDE_INT sign
= v5
< 0;
2220 /* Left extend the insertion. */
2221 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2222 while (pos
> 0 && (insv
& 1) == sign
)
2224 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2230 emit_insn (gen_insvdi (operand0
, GEN_INT (len
),
2231 GEN_INT (pos
), GEN_INT (v5
)));
2233 emit_insn (gen_insvsi (operand0
, GEN_INT (len
),
2234 GEN_INT (pos
), GEN_INT (v5
)));
2236 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2242 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2247 /* Now have insn-emit do whatever it normally does. */
2251 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2252 it will need a link/runtime reloc). */
2255 pa_reloc_needed (tree exp
)
2259 switch (TREE_CODE (exp
))
2264 case POINTER_PLUS_EXPR
:
2267 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2268 reloc
|= pa_reloc_needed (TREE_OPERAND (exp
, 1));
2272 case NON_LVALUE_EXPR
:
2273 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2279 unsigned HOST_WIDE_INT ix
;
2281 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2283 reloc
|= pa_reloc_needed (value
);
2297 /* Return the best assembler insn template
2298 for moving operands[1] into operands[0] as a fullword. */
2300 pa_singlemove_string (rtx
*operands
)
2302 HOST_WIDE_INT intval
;
2304 if (GET_CODE (operands
[0]) == MEM
)
2305 return "stw %r1,%0";
2306 if (GET_CODE (operands
[1]) == MEM
)
2308 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2313 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2315 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2317 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2318 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2320 operands
[1] = GEN_INT (i
);
2321 /* Fall through to CONST_INT case. */
2323 if (GET_CODE (operands
[1]) == CONST_INT
)
2325 intval
= INTVAL (operands
[1]);
2327 if (VAL_14_BITS_P (intval
))
2329 else if ((intval
& 0x7ff) == 0)
2330 return "ldil L'%1,%0";
2331 else if (pa_zdepi_cint_p (intval
))
2332 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2334 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2336 return "copy %1,%0";
2340 /* Compute position (in OP[1]) and width (in OP[2])
2341 useful for copying IMM to a register using the zdepi
2342 instructions. Store the immediate value to insert in OP[0]. */
2344 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2348 /* Find the least significant set bit in IMM. */
2349 for (lsb
= 0; lsb
< 32; lsb
++)
2356 /* Choose variants based on *sign* of the 5-bit field. */
2357 if ((imm
& 0x10) == 0)
2358 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2361 /* Find the width of the bitstring in IMM. */
2362 for (len
= 5; len
< 32 - lsb
; len
++)
2364 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2368 /* Sign extend IMM as a 5-bit value. */
2369 imm
= (imm
& 0xf) - 0x10;
2377 /* Compute position (in OP[1]) and width (in OP[2])
2378 useful for copying IMM to a register using the depdi,z
2379 instructions. Store the immediate value to insert in OP[0]. */
2382 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2384 int lsb
, len
, maxlen
;
2386 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2388 /* Find the least significant set bit in IMM. */
2389 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2396 /* Choose variants based on *sign* of the 5-bit field. */
2397 if ((imm
& 0x10) == 0)
2398 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2401 /* Find the width of the bitstring in IMM. */
2402 for (len
= 5; len
< maxlen
- lsb
; len
++)
2404 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2408 /* Extend length if host is narrow and IMM is negative. */
2409 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2412 /* Sign extend IMM as a 5-bit value. */
2413 imm
= (imm
& 0xf) - 0x10;
2421 /* Output assembler code to perform a doubleword move insn
2422 with operands OPERANDS. */
2425 pa_output_move_double (rtx
*operands
)
2427 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2429 rtx addreg0
= 0, addreg1
= 0;
2431 /* First classify both operands. */
2433 if (REG_P (operands
[0]))
2435 else if (offsettable_memref_p (operands
[0]))
2437 else if (GET_CODE (operands
[0]) == MEM
)
2442 if (REG_P (operands
[1]))
2444 else if (CONSTANT_P (operands
[1]))
2446 else if (offsettable_memref_p (operands
[1]))
2448 else if (GET_CODE (operands
[1]) == MEM
)
2453 /* Check for the cases that the operand constraints are not
2454 supposed to allow to happen. */
2455 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2457 /* Handle copies between general and floating registers. */
2459 if (optype0
== REGOP
&& optype1
== REGOP
2460 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2462 if (FP_REG_P (operands
[0]))
2464 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2465 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2466 return "{fldds|fldd} -16(%%sp),%0";
2470 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2471 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2472 return "{ldws|ldw} -12(%%sp),%R0";
2476 /* Handle auto decrementing and incrementing loads and stores
2477 specifically, since the structure of the function doesn't work
2478 for them without major modification. Do it better when we learn
2479 this port about the general inc/dec addressing of PA.
2480 (This was written by tege. Chide him if it doesn't work.) */
2482 if (optype0
== MEMOP
)
2484 /* We have to output the address syntax ourselves, since print_operand
2485 doesn't deal with the addresses we want to use. Fix this later. */
2487 rtx addr
= XEXP (operands
[0], 0);
2488 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2490 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2492 operands
[0] = XEXP (addr
, 0);
2493 gcc_assert (GET_CODE (operands
[1]) == REG
2494 && GET_CODE (operands
[0]) == REG
);
2496 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2498 /* No overlap between high target register and address
2499 register. (We do this in a non-obvious way to
2500 save a register file writeback) */
2501 if (GET_CODE (addr
) == POST_INC
)
2502 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2503 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2505 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2507 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2509 operands
[0] = XEXP (addr
, 0);
2510 gcc_assert (GET_CODE (operands
[1]) == REG
2511 && GET_CODE (operands
[0]) == REG
);
2513 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2514 /* No overlap between high target register and address
2515 register. (We do this in a non-obvious way to save a
2516 register file writeback) */
2517 if (GET_CODE (addr
) == PRE_INC
)
2518 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2519 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2522 if (optype1
== MEMOP
)
2524 /* We have to output the address syntax ourselves, since print_operand
2525 doesn't deal with the addresses we want to use. Fix this later. */
2527 rtx addr
= XEXP (operands
[1], 0);
2528 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2530 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2532 operands
[1] = XEXP (addr
, 0);
2533 gcc_assert (GET_CODE (operands
[0]) == REG
2534 && GET_CODE (operands
[1]) == REG
);
2536 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2538 /* No overlap between high target register and address
2539 register. (We do this in a non-obvious way to
2540 save a register file writeback) */
2541 if (GET_CODE (addr
) == POST_INC
)
2542 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2543 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2547 /* This is an undefined situation. We should load into the
2548 address register *and* update that register. Probably
2549 we don't need to handle this at all. */
2550 if (GET_CODE (addr
) == POST_INC
)
2551 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2552 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2555 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2557 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2559 operands
[1] = XEXP (addr
, 0);
2560 gcc_assert (GET_CODE (operands
[0]) == REG
2561 && GET_CODE (operands
[1]) == REG
);
2563 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2565 /* No overlap between high target register and address
2566 register. (We do this in a non-obvious way to
2567 save a register file writeback) */
2568 if (GET_CODE (addr
) == PRE_INC
)
2569 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2570 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2574 /* This is an undefined situation. We should load into the
2575 address register *and* update that register. Probably
2576 we don't need to handle this at all. */
2577 if (GET_CODE (addr
) == PRE_INC
)
2578 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2579 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2582 else if (GET_CODE (addr
) == PLUS
2583 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2586 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2588 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2590 xoperands
[0] = high_reg
;
2591 xoperands
[1] = XEXP (addr
, 1);
2592 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2593 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2594 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2596 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2600 xoperands
[0] = high_reg
;
2601 xoperands
[1] = XEXP (addr
, 1);
2602 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2603 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2604 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2606 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2611 /* If an operand is an unoffsettable memory ref, find a register
2612 we can increment temporarily to make it refer to the second word. */
2614 if (optype0
== MEMOP
)
2615 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2617 if (optype1
== MEMOP
)
2618 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2620 /* Ok, we can do one word at a time.
2621 Normally we do the low-numbered word first.
2623 In either case, set up in LATEHALF the operands to use
2624 for the high-numbered word and in some cases alter the
2625 operands in OPERANDS to be suitable for the low-numbered word. */
2627 if (optype0
== REGOP
)
2628 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2629 else if (optype0
== OFFSOP
)
2630 latehalf
[0] = adjust_address_nv (operands
[0], SImode
, 4);
2632 latehalf
[0] = operands
[0];
2634 if (optype1
== REGOP
)
2635 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2636 else if (optype1
== OFFSOP
)
2637 latehalf
[1] = adjust_address_nv (operands
[1], SImode
, 4);
2638 else if (optype1
== CNSTOP
)
2639 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2641 latehalf
[1] = operands
[1];
2643 /* If the first move would clobber the source of the second one,
2644 do them in the other order.
2646 This can happen in two cases:
2648 mem -> register where the first half of the destination register
2649 is the same register used in the memory's address. Reload
2650 can create such insns.
2652 mem in this case will be either register indirect or register
2653 indirect plus a valid offset.
2655 register -> register move where REGNO(dst) == REGNO(src + 1)
2656 someone (Tim/Tege?) claimed this can happen for parameter loads.
2658 Handle mem -> register case first. */
2659 if (optype0
== REGOP
2660 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2661 && refers_to_regno_p (REGNO (operands
[0]), REGNO (operands
[0]) + 1,
2664 /* Do the late half first. */
2666 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2667 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2671 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2672 return pa_singlemove_string (operands
);
2675 /* Now handle register -> register case. */
2676 if (optype0
== REGOP
&& optype1
== REGOP
2677 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2679 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2680 return pa_singlemove_string (operands
);
2683 /* Normal case: do the two words, low-numbered first. */
2685 output_asm_insn (pa_singlemove_string (operands
), operands
);
2687 /* Make any unoffsettable addresses point at high-numbered word. */
2689 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2691 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2694 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2696 /* Undo the adds we just did. */
2698 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2700 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2706 pa_output_fp_move_double (rtx
*operands
)
2708 if (FP_REG_P (operands
[0]))
2710 if (FP_REG_P (operands
[1])
2711 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2712 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2714 output_asm_insn ("fldd%F1 %1,%0", operands
);
2716 else if (FP_REG_P (operands
[1]))
2718 output_asm_insn ("fstd%F0 %1,%0", operands
);
2724 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2726 /* This is a pain. You have to be prepared to deal with an
2727 arbitrary address here including pre/post increment/decrement.
2729 so avoid this in the MD. */
2730 gcc_assert (GET_CODE (operands
[0]) == REG
);
2732 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2733 xoperands
[0] = operands
[0];
2734 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2739 /* Return a REG that occurs in ADDR with coefficient 1.
2740 ADDR can be effectively incremented by incrementing REG. */
2743 find_addr_reg (rtx addr
)
2745 while (GET_CODE (addr
) == PLUS
)
2747 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2748 addr
= XEXP (addr
, 0);
2749 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2750 addr
= XEXP (addr
, 1);
2751 else if (CONSTANT_P (XEXP (addr
, 0)))
2752 addr
= XEXP (addr
, 1);
2753 else if (CONSTANT_P (XEXP (addr
, 1)))
2754 addr
= XEXP (addr
, 0);
2758 gcc_assert (GET_CODE (addr
) == REG
);
2762 /* Emit code to perform a block move.
2764 OPERANDS[0] is the destination pointer as a REG, clobbered.
2765 OPERANDS[1] is the source pointer as a REG, clobbered.
2766 OPERANDS[2] is a register for temporary storage.
2767 OPERANDS[3] is a register for temporary storage.
2768 OPERANDS[4] is the size as a CONST_INT
2769 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2770 OPERANDS[6] is another temporary register. */
2773 pa_output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2775 int align
= INTVAL (operands
[5]);
2776 unsigned long n_bytes
= INTVAL (operands
[4]);
2778 /* We can't move more than a word at a time because the PA
2779 has no longer integer move insns. (Could use fp mem ops?) */
2780 if (align
> (TARGET_64BIT
? 8 : 4))
2781 align
= (TARGET_64BIT
? 8 : 4);
2783 /* Note that we know each loop below will execute at least twice
2784 (else we would have open-coded the copy). */
2788 /* Pre-adjust the loop counter. */
2789 operands
[4] = GEN_INT (n_bytes
- 16);
2790 output_asm_insn ("ldi %4,%2", operands
);
2793 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2794 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2795 output_asm_insn ("std,ma %3,8(%0)", operands
);
2796 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2797 output_asm_insn ("std,ma %6,8(%0)", operands
);
2799 /* Handle the residual. There could be up to 7 bytes of
2800 residual to copy! */
2801 if (n_bytes
% 16 != 0)
2803 operands
[4] = GEN_INT (n_bytes
% 8);
2804 if (n_bytes
% 16 >= 8)
2805 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2806 if (n_bytes
% 8 != 0)
2807 output_asm_insn ("ldd 0(%1),%6", operands
);
2808 if (n_bytes
% 16 >= 8)
2809 output_asm_insn ("std,ma %3,8(%0)", operands
);
2810 if (n_bytes
% 8 != 0)
2811 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2816 /* Pre-adjust the loop counter. */
2817 operands
[4] = GEN_INT (n_bytes
- 8);
2818 output_asm_insn ("ldi %4,%2", operands
);
2821 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2822 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2823 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2824 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2825 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2827 /* Handle the residual. There could be up to 7 bytes of
2828 residual to copy! */
2829 if (n_bytes
% 8 != 0)
2831 operands
[4] = GEN_INT (n_bytes
% 4);
2832 if (n_bytes
% 8 >= 4)
2833 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2834 if (n_bytes
% 4 != 0)
2835 output_asm_insn ("ldw 0(%1),%6", operands
);
2836 if (n_bytes
% 8 >= 4)
2837 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2838 if (n_bytes
% 4 != 0)
2839 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2844 /* Pre-adjust the loop counter. */
2845 operands
[4] = GEN_INT (n_bytes
- 4);
2846 output_asm_insn ("ldi %4,%2", operands
);
2849 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2850 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2851 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2852 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2853 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2855 /* Handle the residual. */
2856 if (n_bytes
% 4 != 0)
2858 if (n_bytes
% 4 >= 2)
2859 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2860 if (n_bytes
% 2 != 0)
2861 output_asm_insn ("ldb 0(%1),%6", operands
);
2862 if (n_bytes
% 4 >= 2)
2863 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2864 if (n_bytes
% 2 != 0)
2865 output_asm_insn ("stb %6,0(%0)", operands
);
2870 /* Pre-adjust the loop counter. */
2871 operands
[4] = GEN_INT (n_bytes
- 2);
2872 output_asm_insn ("ldi %4,%2", operands
);
2875 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2876 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2877 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2878 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2879 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2881 /* Handle the residual. */
2882 if (n_bytes
% 2 != 0)
2884 output_asm_insn ("ldb 0(%1),%3", operands
);
2885 output_asm_insn ("stb %3,0(%0)", operands
);
2894 /* Count the number of insns necessary to handle this block move.
2896 Basic structure is the same as emit_block_move, except that we
2897 count insns rather than emit them. */
2900 compute_movmem_length (rtx insn
)
2902 rtx pat
= PATTERN (insn
);
2903 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2904 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2905 unsigned int n_insns
= 0;
2907 /* We can't move more than four bytes at a time because the PA
2908 has no longer integer move insns. (Could use fp mem ops?) */
2909 if (align
> (TARGET_64BIT
? 8 : 4))
2910 align
= (TARGET_64BIT
? 8 : 4);
2912 /* The basic copying loop. */
2916 if (n_bytes
% (2 * align
) != 0)
2918 if ((n_bytes
% (2 * align
)) >= align
)
2921 if ((n_bytes
% align
) != 0)
2925 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2929 /* Emit code to perform a block clear.
2931 OPERANDS[0] is the destination pointer as a REG, clobbered.
2932 OPERANDS[1] is a register for temporary storage.
2933 OPERANDS[2] is the size as a CONST_INT
2934 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2937 pa_output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2939 int align
= INTVAL (operands
[3]);
2940 unsigned long n_bytes
= INTVAL (operands
[2]);
2942 /* We can't clear more than a word at a time because the PA
2943 has no longer integer move insns. */
2944 if (align
> (TARGET_64BIT
? 8 : 4))
2945 align
= (TARGET_64BIT
? 8 : 4);
2947 /* Note that we know each loop below will execute at least twice
2948 (else we would have open-coded the copy). */
2952 /* Pre-adjust the loop counter. */
2953 operands
[2] = GEN_INT (n_bytes
- 16);
2954 output_asm_insn ("ldi %2,%1", operands
);
2957 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2958 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2959 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2961 /* Handle the residual. There could be up to 7 bytes of
2962 residual to copy! */
2963 if (n_bytes
% 16 != 0)
2965 operands
[2] = GEN_INT (n_bytes
% 8);
2966 if (n_bytes
% 16 >= 8)
2967 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2968 if (n_bytes
% 8 != 0)
2969 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2974 /* Pre-adjust the loop counter. */
2975 operands
[2] = GEN_INT (n_bytes
- 8);
2976 output_asm_insn ("ldi %2,%1", operands
);
2979 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2980 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2981 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2983 /* Handle the residual. There could be up to 7 bytes of
2984 residual to copy! */
2985 if (n_bytes
% 8 != 0)
2987 operands
[2] = GEN_INT (n_bytes
% 4);
2988 if (n_bytes
% 8 >= 4)
2989 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2990 if (n_bytes
% 4 != 0)
2991 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
2996 /* Pre-adjust the loop counter. */
2997 operands
[2] = GEN_INT (n_bytes
- 4);
2998 output_asm_insn ("ldi %2,%1", operands
);
3001 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3002 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
3003 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3005 /* Handle the residual. */
3006 if (n_bytes
% 4 != 0)
3008 if (n_bytes
% 4 >= 2)
3009 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
3010 if (n_bytes
% 2 != 0)
3011 output_asm_insn ("stb %%r0,0(%0)", operands
);
3016 /* Pre-adjust the loop counter. */
3017 operands
[2] = GEN_INT (n_bytes
- 2);
3018 output_asm_insn ("ldi %2,%1", operands
);
3021 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3022 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
3023 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3025 /* Handle the residual. */
3026 if (n_bytes
% 2 != 0)
3027 output_asm_insn ("stb %%r0,0(%0)", operands
);
3036 /* Count the number of insns necessary to handle this block move.
3038 Basic structure is the same as emit_block_move, except that we
3039 count insns rather than emit them. */
3042 compute_clrmem_length (rtx insn
)
3044 rtx pat
= PATTERN (insn
);
3045 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
3046 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
3047 unsigned int n_insns
= 0;
3049 /* We can't clear more than a word at a time because the PA
3050 has no longer integer move insns. */
3051 if (align
> (TARGET_64BIT
? 8 : 4))
3052 align
= (TARGET_64BIT
? 8 : 4);
3054 /* The basic loop. */
3058 if (n_bytes
% (2 * align
) != 0)
3060 if ((n_bytes
% (2 * align
)) >= align
)
3063 if ((n_bytes
% align
) != 0)
3067 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3073 pa_output_and (rtx
*operands
)
3075 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3077 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3078 int ls0
, ls1
, ms0
, p
, len
;
3080 for (ls0
= 0; ls0
< 32; ls0
++)
3081 if ((mask
& (1 << ls0
)) == 0)
3084 for (ls1
= ls0
; ls1
< 32; ls1
++)
3085 if ((mask
& (1 << ls1
)) != 0)
3088 for (ms0
= ls1
; ms0
< 32; ms0
++)
3089 if ((mask
& (1 << ms0
)) == 0)
3092 gcc_assert (ms0
== 32);
3100 operands
[2] = GEN_INT (len
);
3101 return "{extru|extrw,u} %1,31,%2,%0";
3105 /* We could use this `depi' for the case above as well, but `depi'
3106 requires one more register file access than an `extru'. */
3111 operands
[2] = GEN_INT (p
);
3112 operands
[3] = GEN_INT (len
);
3113 return "{depi|depwi} 0,%2,%3,%0";
3117 return "and %1,%2,%0";
3120 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3121 storing the result in operands[0]. */
3123 pa_output_64bit_and (rtx
*operands
)
3125 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3127 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3128 int ls0
, ls1
, ms0
, p
, len
;
3130 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3131 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3134 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3135 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3138 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3139 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3142 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3144 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3150 operands
[2] = GEN_INT (len
);
3151 return "extrd,u %1,63,%2,%0";
3155 /* We could use this `depi' for the case above as well, but `depi'
3156 requires one more register file access than an `extru'. */
3161 operands
[2] = GEN_INT (p
);
3162 operands
[3] = GEN_INT (len
);
3163 return "depdi 0,%2,%3,%0";
3167 return "and %1,%2,%0";
3171 pa_output_ior (rtx
*operands
)
3173 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3174 int bs0
, bs1
, p
, len
;
3176 if (INTVAL (operands
[2]) == 0)
3177 return "copy %1,%0";
3179 for (bs0
= 0; bs0
< 32; bs0
++)
3180 if ((mask
& (1 << bs0
)) != 0)
3183 for (bs1
= bs0
; bs1
< 32; bs1
++)
3184 if ((mask
& (1 << bs1
)) == 0)
3187 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3192 operands
[2] = GEN_INT (p
);
3193 operands
[3] = GEN_INT (len
);
3194 return "{depi|depwi} -1,%2,%3,%0";
3197 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3198 storing the result in operands[0]. */
3200 pa_output_64bit_ior (rtx
*operands
)
3202 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3203 int bs0
, bs1
, p
, len
;
3205 if (INTVAL (operands
[2]) == 0)
3206 return "copy %1,%0";
3208 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3209 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3212 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3213 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3216 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3217 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3222 operands
[2] = GEN_INT (p
);
3223 operands
[3] = GEN_INT (len
);
3224 return "depdi -1,%2,%3,%0";
3227 /* Target hook for assembling integer objects. This code handles
3228 aligned SI and DI integers specially since function references
3229 must be preceded by P%. */
3232 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3234 if (size
== UNITS_PER_WORD
3236 && function_label_operand (x
, VOIDmode
))
3238 fputs (size
== 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file
);
3239 output_addr_const (asm_out_file
, x
);
3240 fputc ('\n', asm_out_file
);
3243 return default_assemble_integer (x
, size
, aligned_p
);
3246 /* Output an ascii string. */
3248 pa_output_ascii (FILE *file
, const char *p
, int size
)
3252 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3254 /* The HP assembler can only take strings of 256 characters at one
3255 time. This is a limitation on input line length, *not* the
3256 length of the string. Sigh. Even worse, it seems that the
3257 restriction is in number of input characters (see \xnn &
3258 \whatever). So we have to do this very carefully. */
3260 fputs ("\t.STRING \"", file
);
3263 for (i
= 0; i
< size
; i
+= 4)
3267 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3269 register unsigned int c
= (unsigned char) p
[i
+ io
];
3271 if (c
== '\"' || c
== '\\')
3272 partial_output
[co
++] = '\\';
3273 if (c
>= ' ' && c
< 0177)
3274 partial_output
[co
++] = c
;
3278 partial_output
[co
++] = '\\';
3279 partial_output
[co
++] = 'x';
3280 hexd
= c
/ 16 - 0 + '0';
3282 hexd
-= '9' - 'a' + 1;
3283 partial_output
[co
++] = hexd
;
3284 hexd
= c
% 16 - 0 + '0';
3286 hexd
-= '9' - 'a' + 1;
3287 partial_output
[co
++] = hexd
;
3290 if (chars_output
+ co
> 243)
3292 fputs ("\"\n\t.STRING \"", file
);
3295 fwrite (partial_output
, 1, (size_t) co
, file
);
3299 fputs ("\"\n", file
);
3302 /* Try to rewrite floating point comparisons & branches to avoid
3303 useless add,tr insns.
3305 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3306 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3307 first attempt to remove useless add,tr insns. It is zero
3308 for the second pass as reorg sometimes leaves bogus REG_DEAD
3311 When CHECK_NOTES is zero we can only eliminate add,tr insns
3312 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3315 remove_useless_addtr_insns (int check_notes
)
3318 static int pass
= 0;
3320 /* This is fairly cheap, so always run it when optimizing. */
3324 int fbranch_count
= 0;
3326 /* Walk all the insns in this function looking for fcmp & fbranch
3327 instructions. Keep track of how many of each we find. */
3328 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3332 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3333 if (! NONJUMP_INSN_P (insn
) && ! JUMP_P (insn
))
3336 tmp
= PATTERN (insn
);
3338 /* It must be a set. */
3339 if (GET_CODE (tmp
) != SET
)
3342 /* If the destination is CCFP, then we've found an fcmp insn. */
3343 tmp
= SET_DEST (tmp
);
3344 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3350 tmp
= PATTERN (insn
);
3351 /* If this is an fbranch instruction, bump the fbranch counter. */
3352 if (GET_CODE (tmp
) == SET
3353 && SET_DEST (tmp
) == pc_rtx
3354 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3355 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3356 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3357 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3365 /* Find all floating point compare + branch insns. If possible,
3366 reverse the comparison & the branch to avoid add,tr insns. */
3367 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3371 /* Ignore anything that isn't an INSN. */
3372 if (! NONJUMP_INSN_P (insn
))
3375 tmp
= PATTERN (insn
);
3377 /* It must be a set. */
3378 if (GET_CODE (tmp
) != SET
)
3381 /* The destination must be CCFP, which is register zero. */
3382 tmp
= SET_DEST (tmp
);
3383 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3386 /* INSN should be a set of CCFP.
3388 See if the result of this insn is used in a reversed FP
3389 conditional branch. If so, reverse our condition and
3390 the branch. Doing so avoids useless add,tr insns. */
3391 next
= next_insn (insn
);
3394 /* Jumps, calls and labels stop our search. */
3395 if (JUMP_P (next
) || CALL_P (next
) || LABEL_P (next
))
3398 /* As does another fcmp insn. */
3399 if (NONJUMP_INSN_P (next
)
3400 && GET_CODE (PATTERN (next
)) == SET
3401 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3402 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3405 next
= next_insn (next
);
3408 /* Is NEXT_INSN a branch? */
3409 if (next
&& JUMP_P (next
))
3411 rtx pattern
= PATTERN (next
);
3413 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3414 and CCFP dies, then reverse our conditional and the branch
3415 to avoid the add,tr. */
3416 if (GET_CODE (pattern
) == SET
3417 && SET_DEST (pattern
) == pc_rtx
3418 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3419 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3420 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3421 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3422 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3423 && (fcmp_count
== fbranch_count
3425 && find_regno_note (next
, REG_DEAD
, 0))))
3427 /* Reverse the branch. */
3428 tmp
= XEXP (SET_SRC (pattern
), 1);
3429 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3430 XEXP (SET_SRC (pattern
), 2) = tmp
;
3431 INSN_CODE (next
) = -1;
3433 /* Reverse our condition. */
3434 tmp
= PATTERN (insn
);
3435 PUT_CODE (XEXP (tmp
, 1),
3436 (reverse_condition_maybe_unordered
3437 (GET_CODE (XEXP (tmp
, 1)))));
3447 /* You may have trouble believing this, but this is the 32 bit HP-PA
3452 Variable arguments (optional; any number may be allocated)
3454 SP-(4*(N+9)) arg word N
3459 Fixed arguments (must be allocated; may remain unused)
3468 SP-32 External Data Pointer (DP)
3470 SP-24 External/stub RP (RP')
3474 SP-8 Calling Stub RP (RP'')
3479 SP-0 Stack Pointer (points to next available address)
3483 /* This function saves registers as follows. Registers marked with ' are
3484 this function's registers (as opposed to the previous function's).
3485 If a frame_pointer isn't needed, r4 is saved as a general register;
3486 the space for the frame pointer is still allocated, though, to keep
3492 SP (FP') Previous FP
3493 SP + 4 Alignment filler (sigh)
3494 SP + 8 Space for locals reserved here.
3498 SP + n All call saved register used.
3502 SP + o All call saved fp registers used.
3506 SP + p (SP') points to next available address.
3510 /* Global variables set by output_function_prologue(). */
3511 /* Size of frame. Need to know this to emit return insns from
3513 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3514 static int save_fregs
;
3516 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3517 Handle case where DISP > 8k by using the add_high_const patterns.
3519 Note in DISP > 8k case, we will leave the high part of the address
3520 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3523 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3525 rtx insn
, dest
, src
, basereg
;
3527 src
= gen_rtx_REG (word_mode
, reg
);
3528 basereg
= gen_rtx_REG (Pmode
, base
);
3529 if (VAL_14_BITS_P (disp
))
3531 dest
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
3532 insn
= emit_move_insn (dest
, src
);
3534 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3536 rtx delta
= GEN_INT (disp
);
3537 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3539 emit_move_insn (tmpreg
, delta
);
3540 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3543 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3544 gen_rtx_SET (VOIDmode
, tmpreg
,
3545 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3546 RTX_FRAME_RELATED_P (insn
) = 1;
3548 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3549 insn
= emit_move_insn (dest
, src
);
3553 rtx delta
= GEN_INT (disp
);
3554 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3555 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3557 emit_move_insn (tmpreg
, high
);
3558 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3559 insn
= emit_move_insn (dest
, src
);
3561 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3562 gen_rtx_SET (VOIDmode
,
3563 gen_rtx_MEM (word_mode
,
3564 gen_rtx_PLUS (word_mode
,
3571 RTX_FRAME_RELATED_P (insn
) = 1;
3574 /* Emit RTL to store REG at the memory location specified by BASE and then
3575 add MOD to BASE. MOD must be <= 8k. */
3578 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3580 rtx insn
, basereg
, srcreg
, delta
;
3582 gcc_assert (VAL_14_BITS_P (mod
));
3584 basereg
= gen_rtx_REG (Pmode
, base
);
3585 srcreg
= gen_rtx_REG (word_mode
, reg
);
3586 delta
= GEN_INT (mod
);
3588 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3591 RTX_FRAME_RELATED_P (insn
) = 1;
3593 /* RTX_FRAME_RELATED_P must be set on each frame related set
3594 in a parallel with more than one element. */
3595 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3596 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3600 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3601 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3602 whether to add a frame note or not.
3604 In the DISP > 8k case, we leave the high part of the address in %r1.
3605 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3608 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3612 if (VAL_14_BITS_P (disp
))
3614 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3615 plus_constant (Pmode
,
3616 gen_rtx_REG (Pmode
, base
), disp
));
3618 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3620 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3621 rtx delta
= GEN_INT (disp
);
3622 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3624 emit_move_insn (tmpreg
, delta
);
3625 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3626 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3628 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3629 gen_rtx_SET (VOIDmode
, tmpreg
,
3630 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3634 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3635 rtx delta
= GEN_INT (disp
);
3636 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3638 emit_move_insn (tmpreg
,
3639 gen_rtx_PLUS (Pmode
, basereg
,
3640 gen_rtx_HIGH (Pmode
, delta
)));
3641 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3642 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3645 if (DO_FRAME_NOTES
&& note
)
3646 RTX_FRAME_RELATED_P (insn
) = 1;
3650 pa_compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3655 /* The code in pa_expand_prologue and pa_expand_epilogue must
3656 be consistent with the rounding and size calculation done here.
3657 Change them at the same time. */
3659 /* We do our own stack alignment. First, round the size of the
3660 stack locals up to a word boundary. */
3661 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3663 /* Space for previous frame pointer + filler. If any frame is
3664 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3665 waste some space here for the sake of HP compatibility. The
3666 first slot is only used when the frame pointer is needed. */
3667 if (size
|| frame_pointer_needed
)
3668 size
+= STARTING_FRAME_OFFSET
;
3670 /* If the current function calls __builtin_eh_return, then we need
3671 to allocate stack space for registers that will hold data for
3672 the exception handler. */
3673 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3677 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3679 size
+= i
* UNITS_PER_WORD
;
3682 /* Account for space used by the callee general register saves. */
3683 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3684 if (df_regs_ever_live_p (i
))
3685 size
+= UNITS_PER_WORD
;
3687 /* Account for space used by the callee floating point register saves. */
3688 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3689 if (df_regs_ever_live_p (i
)
3690 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3694 /* We always save both halves of the FP register, so always
3695 increment the frame size by 8 bytes. */
3699 /* If any of the floating registers are saved, account for the
3700 alignment needed for the floating point register save block. */
3703 size
= (size
+ 7) & ~7;
3708 /* The various ABIs include space for the outgoing parameters in the
3709 size of the current function's stack frame. We don't need to align
3710 for the outgoing arguments as their alignment is set by the final
3711 rounding for the frame as a whole. */
3712 size
+= crtl
->outgoing_args_size
;
3714 /* Allocate space for the fixed frame marker. This space must be
3715 allocated for any function that makes calls or allocates
3717 if (!crtl
->is_leaf
|| size
)
3718 size
+= TARGET_64BIT
? 48 : 32;
3720 /* Finally, round to the preferred stack boundary. */
3721 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3722 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3725 /* Generate the assembly code for function entry. FILE is a stdio
3726 stream to output the code to. SIZE is an int: how many units of
3727 temporary storage to allocate.
3729 Refer to the array `regs_ever_live' to determine which registers to
3730 save; `regs_ever_live[I]' is nonzero if register number I is ever
3731 used in the function. This function is responsible for knowing
3732 which registers should not be saved even if used. */
3734 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3735 of memory. If any fpu reg is used in the function, we allocate
3736 such a block here, at the bottom of the frame, just in case it's needed.
3738 If this function is a leaf procedure, then we may choose not
3739 to do a "save" insn. The decision about whether or not
3740 to do this is made in regclass.c. */
3743 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3745 /* The function's label and associated .PROC must never be
3746 separated and must be output *after* any profiling declarations
3747 to avoid changing spaces/subspaces within a procedure. */
3748 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3749 fputs ("\t.PROC\n", file
);
3751 /* pa_expand_prologue does the dirty work now. We just need
3752 to output the assembler directives which denote the start
3754 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3756 fputs (",NO_CALLS", file
);
3758 fputs (",CALLS", file
);
3760 fputs (",SAVE_RP", file
);
3762 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3763 at the beginning of the frame and that it is used as the frame
3764 pointer for the frame. We do this because our current frame
3765 layout doesn't conform to that specified in the HP runtime
3766 documentation and we need a way to indicate to programs such as
3767 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3768 isn't used by HP compilers but is supported by the assembler.
3769 However, SAVE_SP is supposed to indicate that the previous stack
3770 pointer has been saved in the frame marker. */
3771 if (frame_pointer_needed
)
3772 fputs (",SAVE_SP", file
);
3774 /* Pass on information about the number of callee register saves
3775 performed in the prologue.
3777 The compiler is supposed to pass the highest register number
3778 saved, the assembler then has to adjust that number before
3779 entering it into the unwind descriptor (to account for any
3780 caller saved registers with lower register numbers than the
3781 first callee saved register). */
3783 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3786 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3788 fputs ("\n\t.ENTRY\n", file
);
3790 remove_useless_addtr_insns (0);
3794 pa_expand_prologue (void)
3796 int merge_sp_adjust_with_store
= 0;
3797 HOST_WIDE_INT size
= get_frame_size ();
3798 HOST_WIDE_INT offset
;
3806 /* Compute total size for frame pointer, filler, locals and rounding to
3807 the next word boundary. Similar code appears in pa_compute_frame_size
3808 and must be changed in tandem with this code. */
3809 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3810 if (local_fsize
|| frame_pointer_needed
)
3811 local_fsize
+= STARTING_FRAME_OFFSET
;
3813 actual_fsize
= pa_compute_frame_size (size
, &save_fregs
);
3814 if (flag_stack_usage_info
)
3815 current_function_static_stack_size
= actual_fsize
;
3817 /* Compute a few things we will use often. */
3818 tmpreg
= gen_rtx_REG (word_mode
, 1);
3820 /* Save RP first. The calling conventions manual states RP will
3821 always be stored into the caller's frame at sp - 20 or sp - 16
3822 depending on which ABI is in use. */
3823 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
3825 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3831 /* Allocate the local frame and set up the frame pointer if needed. */
3832 if (actual_fsize
!= 0)
3834 if (frame_pointer_needed
)
3836 /* Copy the old frame pointer temporarily into %r1. Set up the
3837 new stack pointer, then store away the saved old frame pointer
3838 into the stack at sp and at the same time update the stack
3839 pointer by actual_fsize bytes. Two versions, first
3840 handles small (<8k) frames. The second handles large (>=8k)
3842 insn
= emit_move_insn (tmpreg
, hard_frame_pointer_rtx
);
3844 RTX_FRAME_RELATED_P (insn
) = 1;
3846 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
3848 RTX_FRAME_RELATED_P (insn
) = 1;
3850 if (VAL_14_BITS_P (actual_fsize
))
3851 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3854 /* It is incorrect to store the saved frame pointer at *sp,
3855 then increment sp (writes beyond the current stack boundary).
3857 So instead use stwm to store at *sp and post-increment the
3858 stack pointer as an atomic operation. Then increment sp to
3859 finish allocating the new frame. */
3860 HOST_WIDE_INT adjust1
= 8192 - 64;
3861 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3863 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3864 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3868 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3869 we need to store the previous stack pointer (frame pointer)
3870 into the frame marker on targets that use the HP unwind
3871 library. This allows the HP unwind library to be used to
3872 unwind GCC frames. However, we are not fully compatible
3873 with the HP library because our frame layout differs from
3874 that specified in the HP runtime specification.
3876 We don't want a frame note on this instruction as the frame
3877 marker moves during dynamic stack allocation.
3879 This instruction also serves as a blockage to prevent
3880 register spills from being scheduled before the stack
3881 pointer is raised. This is necessary as we store
3882 registers using the frame pointer as a base register,
3883 and the frame pointer is set before sp is raised. */
3884 if (TARGET_HPUX_UNWIND_LIBRARY
)
3886 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3887 GEN_INT (TARGET_64BIT
? -8 : -4));
3889 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3890 hard_frame_pointer_rtx
);
3893 emit_insn (gen_blockage ());
3895 /* no frame pointer needed. */
3898 /* In some cases we can perform the first callee register save
3899 and allocating the stack frame at the same time. If so, just
3900 make a note of it and defer allocating the frame until saving
3901 the callee registers. */
3902 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3903 merge_sp_adjust_with_store
= 1;
3904 /* Can not optimize. Adjust the stack frame by actual_fsize
3907 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3912 /* Normal register save.
3914 Do not save the frame pointer in the frame_pointer_needed case. It
3915 was done earlier. */
3916 if (frame_pointer_needed
)
3918 offset
= local_fsize
;
3920 /* Saving the EH return data registers in the frame is the simplest
3921 way to get the frame unwind information emitted. We put them
3922 just before the general registers. */
3923 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3925 unsigned int i
, regno
;
3929 regno
= EH_RETURN_DATA_REGNO (i
);
3930 if (regno
== INVALID_REGNUM
)
3933 store_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
3934 offset
+= UNITS_PER_WORD
;
3938 for (i
= 18; i
>= 4; i
--)
3939 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3941 store_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
3942 offset
+= UNITS_PER_WORD
;
3945 /* Account for %r3 which is saved in a special place. */
3948 /* No frame pointer needed. */
3951 offset
= local_fsize
- actual_fsize
;
3953 /* Saving the EH return data registers in the frame is the simplest
3954 way to get the frame unwind information emitted. */
3955 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3957 unsigned int i
, regno
;
3961 regno
= EH_RETURN_DATA_REGNO (i
);
3962 if (regno
== INVALID_REGNUM
)
3965 /* If merge_sp_adjust_with_store is nonzero, then we can
3966 optimize the first save. */
3967 if (merge_sp_adjust_with_store
)
3969 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3970 merge_sp_adjust_with_store
= 0;
3973 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3974 offset
+= UNITS_PER_WORD
;
3978 for (i
= 18; i
>= 3; i
--)
3979 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3981 /* If merge_sp_adjust_with_store is nonzero, then we can
3982 optimize the first GR save. */
3983 if (merge_sp_adjust_with_store
)
3985 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
3986 merge_sp_adjust_with_store
= 0;
3989 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
3990 offset
+= UNITS_PER_WORD
;
3994 /* If we wanted to merge the SP adjustment with a GR save, but we never
3995 did any GR saves, then just emit the adjustment here. */
3996 if (merge_sp_adjust_with_store
)
3997 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4001 /* The hppa calling conventions say that %r19, the pic offset
4002 register, is saved at sp - 32 (in this function's frame)
4003 when generating PIC code. FIXME: What is the correct thing
4004 to do for functions which make no calls and allocate no
4005 frame? Do we need to allocate a frame, or can we just omit
4006 the save? For now we'll just omit the save.
4008 We don't want a note on this insn as the frame marker can
4009 move if there is a dynamic stack allocation. */
4010 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
4012 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
4014 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
4018 /* Align pointer properly (doubleword boundary). */
4019 offset
= (offset
+ 7) & ~7;
4021 /* Floating point register store. */
4026 /* First get the frame or stack pointer to the start of the FP register
4028 if (frame_pointer_needed
)
4030 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4031 base
= hard_frame_pointer_rtx
;
4035 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4036 base
= stack_pointer_rtx
;
4039 /* Now actually save the FP registers. */
4040 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4042 if (df_regs_ever_live_p (i
)
4043 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4045 rtx addr
, insn
, reg
;
4046 addr
= gen_rtx_MEM (DFmode
,
4047 gen_rtx_POST_INC (word_mode
, tmpreg
));
4048 reg
= gen_rtx_REG (DFmode
, i
);
4049 insn
= emit_move_insn (addr
, reg
);
4052 RTX_FRAME_RELATED_P (insn
) = 1;
4055 rtx mem
= gen_rtx_MEM (DFmode
,
4056 plus_constant (Pmode
, base
,
4058 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4059 gen_rtx_SET (VOIDmode
, mem
, reg
));
4063 rtx meml
= gen_rtx_MEM (SFmode
,
4064 plus_constant (Pmode
, base
,
4066 rtx memr
= gen_rtx_MEM (SFmode
,
4067 plus_constant (Pmode
, base
,
4069 rtx regl
= gen_rtx_REG (SFmode
, i
);
4070 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
4071 rtx setl
= gen_rtx_SET (VOIDmode
, meml
, regl
);
4072 rtx setr
= gen_rtx_SET (VOIDmode
, memr
, regr
);
4075 RTX_FRAME_RELATED_P (setl
) = 1;
4076 RTX_FRAME_RELATED_P (setr
) = 1;
4077 vec
= gen_rtvec (2, setl
, setr
);
4078 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4079 gen_rtx_SEQUENCE (VOIDmode
, vec
));
4082 offset
+= GET_MODE_SIZE (DFmode
);
4089 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4090 Handle case where DISP > 8k by using the add_high_const patterns. */
4093 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
4095 rtx dest
= gen_rtx_REG (word_mode
, reg
);
4096 rtx basereg
= gen_rtx_REG (Pmode
, base
);
4099 if (VAL_14_BITS_P (disp
))
4100 src
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
4101 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
4103 rtx delta
= GEN_INT (disp
);
4104 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4106 emit_move_insn (tmpreg
, delta
);
4107 if (TARGET_DISABLE_INDEXING
)
4109 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4110 src
= gen_rtx_MEM (word_mode
, tmpreg
);
4113 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4117 rtx delta
= GEN_INT (disp
);
4118 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
4119 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4121 emit_move_insn (tmpreg
, high
);
4122 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
4125 emit_move_insn (dest
, src
);
4128 /* Update the total code bytes output to the text section. */
4131 update_total_code_bytes (unsigned int nbytes
)
4133 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4134 && !IN_NAMED_SECTION_P (cfun
->decl
))
4136 unsigned int old_total
= total_code_bytes
;
4138 total_code_bytes
+= nbytes
;
4140 /* Be prepared to handle overflows. */
4141 if (old_total
> total_code_bytes
)
4142 total_code_bytes
= UINT_MAX
;
4146 /* This function generates the assembly code for function exit.
4147 Args are as for output_function_prologue ().
4149 The function epilogue should not depend on the current stack
4150 pointer! It should use the frame pointer only. This is mandatory
4151 because of alloca; we also take advantage of it to omit stack
4152 adjustments before returning. */
4155 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4157 rtx insn
= get_last_insn ();
4161 /* pa_expand_epilogue does the dirty work now. We just need
4162 to output the assembler directives which denote the end
4165 To make debuggers happy, emit a nop if the epilogue was completely
4166 eliminated due to a volatile call as the last insn in the
4167 current function. That way the return address (in %r2) will
4168 always point to a valid instruction in the current function. */
4170 /* Get the last real insn. */
4172 insn
= prev_real_insn (insn
);
4174 /* If it is a sequence, then look inside. */
4175 if (insn
&& NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4176 insn
= XVECEXP (PATTERN (insn
), 0, 0);
4178 /* If insn is a CALL_INSN, then it must be a call to a volatile
4179 function (otherwise there would be epilogue insns). */
4180 if (insn
&& CALL_P (insn
))
4182 fputs ("\tnop\n", file
);
4186 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4188 if (TARGET_SOM
&& TARGET_GAS
)
4190 /* We done with this subspace except possibly for some additional
4191 debug information. Forget that we are in this subspace to ensure
4192 that the next function is output in its own subspace. */
4194 cfun
->machine
->in_nsubspa
= 2;
4197 if (INSN_ADDRESSES_SET_P ())
4199 insn
= get_last_nonnote_insn ();
4200 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4202 last_address
+= insn_default_length (insn
);
4203 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4204 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4207 last_address
= UINT_MAX
;
4209 /* Finally, update the total number of code bytes output so far. */
4210 update_total_code_bytes (last_address
);
4214 pa_expand_epilogue (void)
4217 HOST_WIDE_INT offset
;
4218 HOST_WIDE_INT ret_off
= 0;
4220 int merge_sp_adjust_with_load
= 0;
4222 /* We will use this often. */
4223 tmpreg
= gen_rtx_REG (word_mode
, 1);
4225 /* Try to restore RP early to avoid load/use interlocks when
4226 RP gets used in the return (bv) instruction. This appears to still
4227 be necessary even when we schedule the prologue and epilogue. */
4230 ret_off
= TARGET_64BIT
? -16 : -20;
4231 if (frame_pointer_needed
)
4233 load_reg (2, ret_off
, HARD_FRAME_POINTER_REGNUM
);
4238 /* No frame pointer, and stack is smaller than 8k. */
4239 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4241 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4247 /* General register restores. */
4248 if (frame_pointer_needed
)
4250 offset
= local_fsize
;
4252 /* If the current function calls __builtin_eh_return, then we need
4253 to restore the saved EH data registers. */
4254 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4256 unsigned int i
, regno
;
4260 regno
= EH_RETURN_DATA_REGNO (i
);
4261 if (regno
== INVALID_REGNUM
)
4264 load_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4265 offset
+= UNITS_PER_WORD
;
4269 for (i
= 18; i
>= 4; i
--)
4270 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4272 load_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4273 offset
+= UNITS_PER_WORD
;
4278 offset
= local_fsize
- actual_fsize
;
4280 /* If the current function calls __builtin_eh_return, then we need
4281 to restore the saved EH data registers. */
4282 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4284 unsigned int i
, regno
;
4288 regno
= EH_RETURN_DATA_REGNO (i
);
4289 if (regno
== INVALID_REGNUM
)
4292 /* Only for the first load.
4293 merge_sp_adjust_with_load holds the register load
4294 with which we will merge the sp adjustment. */
4295 if (merge_sp_adjust_with_load
== 0
4297 && VAL_14_BITS_P (-actual_fsize
))
4298 merge_sp_adjust_with_load
= regno
;
4300 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4301 offset
+= UNITS_PER_WORD
;
4305 for (i
= 18; i
>= 3; i
--)
4307 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4309 /* Only for the first load.
4310 merge_sp_adjust_with_load holds the register load
4311 with which we will merge the sp adjustment. */
4312 if (merge_sp_adjust_with_load
== 0
4314 && VAL_14_BITS_P (-actual_fsize
))
4315 merge_sp_adjust_with_load
= i
;
4317 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4318 offset
+= UNITS_PER_WORD
;
4323 /* Align pointer properly (doubleword boundary). */
4324 offset
= (offset
+ 7) & ~7;
4326 /* FP register restores. */
4329 /* Adjust the register to index off of. */
4330 if (frame_pointer_needed
)
4331 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4333 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4335 /* Actually do the restores now. */
4336 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4337 if (df_regs_ever_live_p (i
)
4338 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4340 rtx src
= gen_rtx_MEM (DFmode
,
4341 gen_rtx_POST_INC (word_mode
, tmpreg
));
4342 rtx dest
= gen_rtx_REG (DFmode
, i
);
4343 emit_move_insn (dest
, src
);
4347 /* Emit a blockage insn here to keep these insns from being moved to
4348 an earlier spot in the epilogue, or into the main instruction stream.
4350 This is necessary as we must not cut the stack back before all the
4351 restores are finished. */
4352 emit_insn (gen_blockage ());
4354 /* Reset stack pointer (and possibly frame pointer). The stack
4355 pointer is initially set to fp + 64 to avoid a race condition. */
4356 if (frame_pointer_needed
)
4358 rtx delta
= GEN_INT (-64);
4360 set_reg_plus_d (STACK_POINTER_REGNUM
, HARD_FRAME_POINTER_REGNUM
, 64, 0);
4361 emit_insn (gen_pre_load (hard_frame_pointer_rtx
,
4362 stack_pointer_rtx
, delta
));
4364 /* If we were deferring a callee register restore, do it now. */
4365 else if (merge_sp_adjust_with_load
)
4367 rtx delta
= GEN_INT (-actual_fsize
);
4368 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4370 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4372 else if (actual_fsize
!= 0)
4373 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4376 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4377 frame greater than 8k), do so now. */
4379 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4381 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4383 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4385 emit_insn (gen_blockage ());
4386 emit_insn (TARGET_64BIT
4387 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4388 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4393 pa_can_use_return_insn (void)
4395 if (!reload_completed
)
4398 if (frame_pointer_needed
)
4401 if (df_regs_ever_live_p (2))
4407 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4411 hppa_pic_save_rtx (void)
4413 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4416 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4417 #define NO_DEFERRED_PROFILE_COUNTERS 0
4421 /* Vector of funcdef numbers. */
4422 static vec
<int> funcdef_nos
;
4424 /* Output deferred profile counters. */
4426 output_deferred_profile_counters (void)
4431 if (funcdef_nos
.is_empty ())
4434 switch_to_section (data_section
);
4435 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4436 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4438 for (i
= 0; funcdef_nos
.iterate (i
, &n
); i
++)
4440 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4441 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4444 funcdef_nos
.release ();
4448 hppa_profile_hook (int label_no
)
4450 /* We use SImode for the address of the function in both 32 and
4451 64-bit code to avoid having to provide DImode versions of the
4452 lcla2 and load_offset_label_address insn patterns. */
4453 rtx reg
= gen_reg_rtx (SImode
);
4454 rtx label_rtx
= gen_label_rtx ();
4455 rtx begin_label_rtx
, call_insn
;
4456 char begin_label_name
[16];
4458 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4460 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4463 emit_move_insn (arg_pointer_rtx
,
4464 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4467 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4469 /* The address of the function is loaded into %r25 with an instruction-
4470 relative sequence that avoids the use of relocations. The sequence
4471 is split so that the load_offset_label_address instruction can
4472 occupy the delay slot of the call to _mcount. */
4474 emit_insn (gen_lcla2 (reg
, label_rtx
));
4476 emit_insn (gen_lcla1 (reg
, label_rtx
));
4478 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4479 reg
, begin_label_rtx
, label_rtx
));
4481 #if !NO_DEFERRED_PROFILE_COUNTERS
4483 rtx count_label_rtx
, addr
, r24
;
4484 char count_label_name
[16];
4486 funcdef_nos
.safe_push (label_no
);
4487 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4488 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4490 addr
= force_reg (Pmode
, count_label_rtx
);
4491 r24
= gen_rtx_REG (Pmode
, 24);
4492 emit_move_insn (r24
, addr
);
4495 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4496 gen_rtx_SYMBOL_REF (Pmode
,
4498 GEN_INT (TARGET_64BIT
? 24 : 12)));
4500 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4505 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4506 gen_rtx_SYMBOL_REF (Pmode
,
4508 GEN_INT (TARGET_64BIT
? 16 : 8)));
4512 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4513 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4515 /* Indicate the _mcount call cannot throw, nor will it execute a
4517 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4520 /* Fetch the return address for the frame COUNT steps up from
4521 the current frame, after the prologue. FRAMEADDR is the
4522 frame pointer of the COUNT frame.
4524 We want to ignore any export stub remnants here. To handle this,
4525 we examine the code at the return address, and if it is an export
4526 stub, we return a memory rtx for the stub return address stored
4529 The value returned is used in two different ways:
4531 1. To find a function's caller.
4533 2. To change the return address for a function.
4535 This function handles most instances of case 1; however, it will
4536 fail if there are two levels of stubs to execute on the return
4537 path. The only way I believe that can happen is if the return value
4538 needs a parameter relocation, which never happens for C code.
4540 This function handles most instances of case 2; however, it will
4541 fail if we did not originally have stub code on the return path
4542 but will need stub code on the new return path. This can happen if
4543 the caller & callee are both in the main program, but the new
4544 return location is in a shared library. */
4547 pa_return_addr_rtx (int count
, rtx frameaddr
)
4554 /* The instruction stream at the return address of a PA1.X export stub is:
4556 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4557 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4558 0x00011820 | stub+16: mtsp r1,sr0
4559 0xe0400002 | stub+20: be,n 0(sr0,rp)
4561 0xe0400002 must be specified as -532676606 so that it won't be
4562 rejected as an invalid immediate operand on 64-bit hosts.
4564 The instruction stream at the return address of a PA2.0 export stub is:
4566 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4567 0xe840d002 | stub+12: bve,n (rp)
4570 HOST_WIDE_INT insns
[4];
4576 rp
= get_hard_reg_initial_val (Pmode
, 2);
4578 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4581 /* If there is no export stub then just use the value saved from
4582 the return pointer register. */
4584 saved_rp
= gen_reg_rtx (Pmode
);
4585 emit_move_insn (saved_rp
, rp
);
4587 /* Get pointer to the instruction stream. We have to mask out the
4588 privilege level from the two low order bits of the return address
4589 pointer here so that ins will point to the start of the first
4590 instruction that would have been executed if we returned. */
4591 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4592 label
= gen_label_rtx ();
4596 insns
[0] = 0x4bc23fd1;
4597 insns
[1] = -398405630;
4602 insns
[0] = 0x4bc23fd1;
4603 insns
[1] = 0x004010a1;
4604 insns
[2] = 0x00011820;
4605 insns
[3] = -532676606;
4609 /* Check the instruction stream at the normal return address for the
4610 export stub. If it is an export stub, than our return address is
4611 really in -24[frameaddr]. */
4613 for (i
= 0; i
< len
; i
++)
4615 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, ins
, i
* 4));
4616 rtx op1
= GEN_INT (insns
[i
]);
4617 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4620 /* Here we know that our return address points to an export
4621 stub. We don't want to return the address of the export stub,
4622 but rather the return address of the export stub. That return
4623 address is stored at -24[frameaddr]. */
4625 emit_move_insn (saved_rp
,
4627 memory_address (Pmode
,
4628 plus_constant (Pmode
, frameaddr
,
4637 pa_emit_bcond_fp (rtx operands
[])
4639 enum rtx_code code
= GET_CODE (operands
[0]);
4640 rtx operand0
= operands
[1];
4641 rtx operand1
= operands
[2];
4642 rtx label
= operands
[3];
4644 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCFPmode
, 0),
4645 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4647 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
4648 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4651 gen_rtx_REG (CCFPmode
, 0),
4653 gen_rtx_LABEL_REF (VOIDmode
, label
),
4658 /* Adjust the cost of a scheduling dependency. Return the new cost of
4659 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4662 pa_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4664 enum attr_type attr_type
;
4666 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4667 true dependencies as they are described with bypasses now. */
4668 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4671 if (! recog_memoized (insn
))
4674 attr_type
= get_attr_type (insn
);
4676 switch (REG_NOTE_KIND (link
))
4679 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4682 if (attr_type
== TYPE_FPLOAD
)
4684 rtx pat
= PATTERN (insn
);
4685 rtx dep_pat
= PATTERN (dep_insn
);
4686 if (GET_CODE (pat
) == PARALLEL
)
4688 /* This happens for the fldXs,mb patterns. */
4689 pat
= XVECEXP (pat
, 0, 0);
4691 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4692 /* If this happens, we have to extend this to schedule
4693 optimally. Return 0 for now. */
4696 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4698 if (! recog_memoized (dep_insn
))
4700 switch (get_attr_type (dep_insn
))
4707 case TYPE_FPSQRTSGL
:
4708 case TYPE_FPSQRTDBL
:
4709 /* A fpload can't be issued until one cycle before a
4710 preceding arithmetic operation has finished if
4711 the target of the fpload is any of the sources
4712 (or destination) of the arithmetic operation. */
4713 return insn_default_latency (dep_insn
) - 1;
4720 else if (attr_type
== TYPE_FPALU
)
4722 rtx pat
= PATTERN (insn
);
4723 rtx dep_pat
= PATTERN (dep_insn
);
4724 if (GET_CODE (pat
) == PARALLEL
)
4726 /* This happens for the fldXs,mb patterns. */
4727 pat
= XVECEXP (pat
, 0, 0);
4729 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4730 /* If this happens, we have to extend this to schedule
4731 optimally. Return 0 for now. */
4734 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4736 if (! recog_memoized (dep_insn
))
4738 switch (get_attr_type (dep_insn
))
4742 case TYPE_FPSQRTSGL
:
4743 case TYPE_FPSQRTDBL
:
4744 /* An ALU flop can't be issued until two cycles before a
4745 preceding divide or sqrt operation has finished if
4746 the target of the ALU flop is any of the sources
4747 (or destination) of the divide or sqrt operation. */
4748 return insn_default_latency (dep_insn
) - 2;
4756 /* For other anti dependencies, the cost is 0. */
4759 case REG_DEP_OUTPUT
:
4760 /* Output dependency; DEP_INSN writes a register that INSN writes some
4762 if (attr_type
== TYPE_FPLOAD
)
4764 rtx pat
= PATTERN (insn
);
4765 rtx dep_pat
= PATTERN (dep_insn
);
4766 if (GET_CODE (pat
) == PARALLEL
)
4768 /* This happens for the fldXs,mb patterns. */
4769 pat
= XVECEXP (pat
, 0, 0);
4771 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4772 /* If this happens, we have to extend this to schedule
4773 optimally. Return 0 for now. */
4776 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4778 if (! recog_memoized (dep_insn
))
4780 switch (get_attr_type (dep_insn
))
4787 case TYPE_FPSQRTSGL
:
4788 case TYPE_FPSQRTDBL
:
4789 /* A fpload can't be issued until one cycle before a
4790 preceding arithmetic operation has finished if
4791 the target of the fpload is the destination of the
4792 arithmetic operation.
4794 Exception: For PA7100LC, PA7200 and PA7300, the cost
4795 is 3 cycles, unless they bundle together. We also
4796 pay the penalty if the second insn is a fpload. */
4797 return insn_default_latency (dep_insn
) - 1;
4804 else if (attr_type
== TYPE_FPALU
)
4806 rtx pat
= PATTERN (insn
);
4807 rtx dep_pat
= PATTERN (dep_insn
);
4808 if (GET_CODE (pat
) == PARALLEL
)
4810 /* This happens for the fldXs,mb patterns. */
4811 pat
= XVECEXP (pat
, 0, 0);
4813 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4814 /* If this happens, we have to extend this to schedule
4815 optimally. Return 0 for now. */
4818 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4820 if (! recog_memoized (dep_insn
))
4822 switch (get_attr_type (dep_insn
))
4826 case TYPE_FPSQRTSGL
:
4827 case TYPE_FPSQRTDBL
:
4828 /* An ALU flop can't be issued until two cycles before a
4829 preceding divide or sqrt operation has finished if
4830 the target of the ALU flop is also the target of
4831 the divide or sqrt operation. */
4832 return insn_default_latency (dep_insn
) - 2;
4840 /* For other output dependencies, the cost is 0. */
4848 /* Adjust scheduling priorities. We use this to try and keep addil
4849 and the next use of %r1 close together. */
4851 pa_adjust_priority (rtx insn
, int priority
)
4853 rtx set
= single_set (insn
);
4857 src
= SET_SRC (set
);
4858 dest
= SET_DEST (set
);
4859 if (GET_CODE (src
) == LO_SUM
4860 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4861 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4864 else if (GET_CODE (src
) == MEM
4865 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4866 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4867 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4870 else if (GET_CODE (dest
) == MEM
4871 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4872 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4873 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4879 /* The 700 can only issue a single insn at a time.
4880 The 7XXX processors can issue two insns at a time.
4881 The 8000 can issue 4 insns at a time. */
4883 pa_issue_rate (void)
4887 case PROCESSOR_700
: return 1;
4888 case PROCESSOR_7100
: return 2;
4889 case PROCESSOR_7100LC
: return 2;
4890 case PROCESSOR_7200
: return 2;
4891 case PROCESSOR_7300
: return 2;
4892 case PROCESSOR_8000
: return 4;
4901 /* Return any length plus adjustment needed by INSN which already has
4902 its length computed as LENGTH. Return LENGTH if no adjustment is
4905 Also compute the length of an inline block move here as it is too
4906 complicated to express as a length attribute in pa.md. */
4908 pa_adjust_insn_length (rtx insn
, int length
)
4910 rtx pat
= PATTERN (insn
);
4912 /* If length is negative or undefined, provide initial length. */
4913 if ((unsigned int) length
>= INT_MAX
)
4915 if (GET_CODE (pat
) == SEQUENCE
)
4916 insn
= XVECEXP (pat
, 0, 0);
4918 switch (get_attr_type (insn
))
4921 length
= pa_attr_length_millicode_call (insn
);
4924 length
= pa_attr_length_call (insn
, 0);
4927 length
= pa_attr_length_call (insn
, 1);
4930 length
= pa_attr_length_indirect_call (insn
);
4932 case TYPE_SH_FUNC_ADRS
:
4933 length
= pa_attr_length_millicode_call (insn
) + 20;
4940 /* Block move pattern. */
4941 if (NONJUMP_INSN_P (insn
)
4942 && GET_CODE (pat
) == PARALLEL
4943 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4944 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4945 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4946 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4947 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4948 length
+= compute_movmem_length (insn
) - 4;
4949 /* Block clear pattern. */
4950 else if (NONJUMP_INSN_P (insn
)
4951 && GET_CODE (pat
) == PARALLEL
4952 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4953 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4954 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4955 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4956 length
+= compute_clrmem_length (insn
) - 4;
4957 /* Conditional branch with an unfilled delay slot. */
4958 else if (JUMP_P (insn
) && ! simplejump_p (insn
))
4960 /* Adjust a short backwards conditional with an unfilled delay slot. */
4961 if (GET_CODE (pat
) == SET
4963 && JUMP_LABEL (insn
) != NULL_RTX
4964 && ! forward_branch_p (insn
))
4966 else if (GET_CODE (pat
) == PARALLEL
4967 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4970 /* Adjust dbra insn with short backwards conditional branch with
4971 unfilled delay slot -- only for case where counter is in a
4972 general register register. */
4973 else if (GET_CODE (pat
) == PARALLEL
4974 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
4975 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
4976 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
4978 && ! forward_branch_p (insn
))
4984 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4987 pa_print_operand_punct_valid_p (unsigned char code
)
4998 /* Print operand X (an rtx) in assembler syntax to file FILE.
4999 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5000 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5003 pa_print_operand (FILE *file
, rtx x
, int code
)
5008 /* Output a 'nop' if there's nothing for the delay slot. */
5009 if (dbr_sequence_length () == 0)
5010 fputs ("\n\tnop", file
);
5013 /* Output a nullification completer if there's nothing for the */
5014 /* delay slot or nullification is requested. */
5015 if (dbr_sequence_length () == 0 ||
5017 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
5021 /* Print out the second register name of a register pair.
5022 I.e., R (6) => 7. */
5023 fputs (reg_names
[REGNO (x
) + 1], file
);
5026 /* A register or zero. */
5028 || (x
== CONST0_RTX (DFmode
))
5029 || (x
== CONST0_RTX (SFmode
)))
5031 fputs ("%r0", file
);
5037 /* A register or zero (floating point). */
5039 || (x
== CONST0_RTX (DFmode
))
5040 || (x
== CONST0_RTX (SFmode
)))
5042 fputs ("%fr0", file
);
5051 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
5052 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
5053 pa_output_global_address (file
, xoperands
[1], 0);
5054 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
5058 case 'C': /* Plain (C)ondition */
5060 switch (GET_CODE (x
))
5063 fputs ("=", file
); break;
5065 fputs ("<>", file
); break;
5067 fputs (">", file
); break;
5069 fputs (">=", file
); break;
5071 fputs (">>=", file
); break;
5073 fputs (">>", file
); break;
5075 fputs ("<", file
); break;
5077 fputs ("<=", file
); break;
5079 fputs ("<<=", file
); break;
5081 fputs ("<<", file
); break;
5086 case 'N': /* Condition, (N)egated */
5087 switch (GET_CODE (x
))
5090 fputs ("<>", file
); break;
5092 fputs ("=", file
); break;
5094 fputs ("<=", file
); break;
5096 fputs ("<", file
); break;
5098 fputs ("<<", file
); break;
5100 fputs ("<<=", file
); break;
5102 fputs (">=", file
); break;
5104 fputs (">", file
); break;
5106 fputs (">>", file
); break;
5108 fputs (">>=", file
); break;
5113 /* For floating point comparisons. Note that the output
5114 predicates are the complement of the desired mode. The
5115 conditions for GT, GE, LT, LE and LTGT cause an invalid
5116 operation exception if the result is unordered and this
5117 exception is enabled in the floating-point status register. */
5119 switch (GET_CODE (x
))
5122 fputs ("!=", file
); break;
5124 fputs ("=", file
); break;
5126 fputs ("!>", file
); break;
5128 fputs ("!>=", file
); break;
5130 fputs ("!<", file
); break;
5132 fputs ("!<=", file
); break;
5134 fputs ("!<>", file
); break;
5136 fputs ("!?<=", file
); break;
5138 fputs ("!?<", file
); break;
5140 fputs ("!?>=", file
); break;
5142 fputs ("!?>", file
); break;
5144 fputs ("!?=", file
); break;
5146 fputs ("!?", file
); break;
5148 fputs ("?", file
); break;
5153 case 'S': /* Condition, operands are (S)wapped. */
5154 switch (GET_CODE (x
))
5157 fputs ("=", file
); break;
5159 fputs ("<>", file
); break;
5161 fputs ("<", file
); break;
5163 fputs ("<=", file
); break;
5165 fputs ("<<=", file
); break;
5167 fputs ("<<", file
); break;
5169 fputs (">", file
); break;
5171 fputs (">=", file
); break;
5173 fputs (">>=", file
); break;
5175 fputs (">>", file
); break;
5180 case 'B': /* Condition, (B)oth swapped and negate. */
5181 switch (GET_CODE (x
))
5184 fputs ("<>", file
); break;
5186 fputs ("=", file
); break;
5188 fputs (">=", file
); break;
5190 fputs (">", file
); break;
5192 fputs (">>", file
); break;
5194 fputs (">>=", file
); break;
5196 fputs ("<=", file
); break;
5198 fputs ("<", file
); break;
5200 fputs ("<<", file
); break;
5202 fputs ("<<=", file
); break;
5208 gcc_assert (GET_CODE (x
) == CONST_INT
);
5209 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5212 gcc_assert (GET_CODE (x
) == CONST_INT
);
5213 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5216 gcc_assert (GET_CODE (x
) == CONST_INT
);
5217 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5220 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5221 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5224 gcc_assert (GET_CODE (x
) == CONST_INT
);
5225 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5228 gcc_assert (GET_CODE (x
) == CONST_INT
);
5229 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5232 if (GET_CODE (x
) == CONST_INT
)
5237 switch (GET_CODE (XEXP (x
, 0)))
5241 if (ASSEMBLER_DIALECT
== 0)
5242 fputs ("s,mb", file
);
5244 fputs (",mb", file
);
5248 if (ASSEMBLER_DIALECT
== 0)
5249 fputs ("s,ma", file
);
5251 fputs (",ma", file
);
5254 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5255 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5257 if (ASSEMBLER_DIALECT
== 0)
5260 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5261 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5263 if (ASSEMBLER_DIALECT
== 0)
5264 fputs ("x,s", file
);
5268 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5272 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5278 pa_output_global_address (file
, x
, 0);
5281 pa_output_global_address (file
, x
, 1);
5283 case 0: /* Don't do anything special */
5288 compute_zdepwi_operands (INTVAL (x
), op
);
5289 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5295 compute_zdepdi_operands (INTVAL (x
), op
);
5296 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5300 /* We can get here from a .vtable_inherit due to our
5301 CONSTANT_ADDRESS_P rejecting perfectly good constant
5307 if (GET_CODE (x
) == REG
)
5309 fputs (reg_names
[REGNO (x
)], file
);
5310 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5316 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5317 && (REGNO (x
) & 1) == 0)
5320 else if (GET_CODE (x
) == MEM
)
5322 int size
= GET_MODE_SIZE (GET_MODE (x
));
5323 rtx base
= NULL_RTX
;
5324 switch (GET_CODE (XEXP (x
, 0)))
5328 base
= XEXP (XEXP (x
, 0), 0);
5329 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5333 base
= XEXP (XEXP (x
, 0), 0);
5334 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5337 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5338 fprintf (file
, "%s(%s)",
5339 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5340 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5341 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5342 fprintf (file
, "%s(%s)",
5343 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5344 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5345 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5346 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5348 /* Because the REG_POINTER flag can get lost during reload,
5349 pa_legitimate_address_p canonicalizes the order of the
5350 index and base registers in the combined move patterns. */
5351 rtx base
= XEXP (XEXP (x
, 0), 1);
5352 rtx index
= XEXP (XEXP (x
, 0), 0);
5354 fprintf (file
, "%s(%s)",
5355 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5358 output_address (XEXP (x
, 0));
5361 output_address (XEXP (x
, 0));
5366 output_addr_const (file
, x
);
5369 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5372 pa_output_global_address (FILE *file
, rtx x
, int round_constant
)
5375 /* Imagine (high (const (plus ...))). */
5376 if (GET_CODE (x
) == HIGH
)
5379 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5380 output_addr_const (file
, x
);
5381 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5383 output_addr_const (file
, x
);
5384 fputs ("-$global$", file
);
5386 else if (GET_CODE (x
) == CONST
)
5388 const char *sep
= "";
5389 int offset
= 0; /* assembler wants -$global$ at end */
5390 rtx base
= NULL_RTX
;
5392 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5395 base
= XEXP (XEXP (x
, 0), 0);
5396 output_addr_const (file
, base
);
5399 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5405 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5408 base
= XEXP (XEXP (x
, 0), 1);
5409 output_addr_const (file
, base
);
5412 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5418 /* How bogus. The compiler is apparently responsible for
5419 rounding the constant if it uses an LR field selector.
5421 The linker and/or assembler seem a better place since
5422 they have to do this kind of thing already.
5424 If we fail to do this, HP's optimizing linker may eliminate
5425 an addil, but not update the ldw/stw/ldo instruction that
5426 uses the result of the addil. */
5428 offset
= ((offset
+ 0x1000) & ~0x1fff);
5430 switch (GET_CODE (XEXP (x
, 0)))
5443 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5451 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5452 fputs ("-$global$", file
);
5454 fprintf (file
, "%s%d", sep
, offset
);
5457 output_addr_const (file
, x
);
5460 /* Output boilerplate text to appear at the beginning of the file.
5461 There are several possible versions. */
5462 #define aputs(x) fputs(x, asm_out_file)
5464 pa_file_start_level (void)
5467 aputs ("\t.LEVEL 2.0w\n");
5468 else if (TARGET_PA_20
)
5469 aputs ("\t.LEVEL 2.0\n");
5470 else if (TARGET_PA_11
)
5471 aputs ("\t.LEVEL 1.1\n");
5473 aputs ("\t.LEVEL 1.0\n");
5477 pa_file_start_space (int sortspace
)
5479 aputs ("\t.SPACE $PRIVATE$");
5482 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5484 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5485 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5486 "\n\t.SPACE $TEXT$");
5489 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5490 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5494 pa_file_start_file (int want_version
)
5496 if (write_symbols
!= NO_DEBUG
)
5498 output_file_directive (asm_out_file
, main_input_filename
);
5500 aputs ("\t.version\t\"01.01\"\n");
5505 pa_file_start_mcount (const char *aswhat
)
5508 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5512 pa_elf_file_start (void)
5514 pa_file_start_level ();
5515 pa_file_start_mcount ("ENTRY");
5516 pa_file_start_file (0);
5520 pa_som_file_start (void)
5522 pa_file_start_level ();
5523 pa_file_start_space (0);
5524 aputs ("\t.IMPORT $global$,DATA\n"
5525 "\t.IMPORT $$dyncall,MILLICODE\n");
5526 pa_file_start_mcount ("CODE");
5527 pa_file_start_file (0);
5531 pa_linux_file_start (void)
5533 pa_file_start_file (1);
5534 pa_file_start_level ();
5535 pa_file_start_mcount ("CODE");
5539 pa_hpux64_gas_file_start (void)
5541 pa_file_start_level ();
5542 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5544 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5546 pa_file_start_file (1);
5550 pa_hpux64_hpas_file_start (void)
5552 pa_file_start_level ();
5553 pa_file_start_space (1);
5554 pa_file_start_mcount ("CODE");
5555 pa_file_start_file (0);
5559 /* Search the deferred plabel list for SYMBOL and return its internal
5560 label. If an entry for SYMBOL is not found, a new entry is created. */
5563 pa_get_deferred_plabel (rtx symbol
)
5565 const char *fname
= XSTR (symbol
, 0);
5568 /* See if we have already put this function on the list of deferred
5569 plabels. This list is generally small, so a liner search is not
5570 too ugly. If it proves too slow replace it with something faster. */
5571 for (i
= 0; i
< n_deferred_plabels
; i
++)
5572 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5575 /* If the deferred plabel list is empty, or this entry was not found
5576 on the list, create a new entry on the list. */
5577 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5581 if (deferred_plabels
== 0)
5582 deferred_plabels
= ggc_alloc_deferred_plabel ();
5584 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5586 n_deferred_plabels
+ 1);
5588 i
= n_deferred_plabels
++;
5589 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5590 deferred_plabels
[i
].symbol
= symbol
;
5592 /* Gross. We have just implicitly taken the address of this
5593 function. Mark it in the same manner as assemble_name. */
5594 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5596 mark_referenced (id
);
5599 return deferred_plabels
[i
].internal_label
;
5603 output_deferred_plabels (void)
5607 /* If we have some deferred plabels, then we need to switch into the
5608 data or readonly data section, and align it to a 4 byte boundary
5609 before outputting the deferred plabels. */
5610 if (n_deferred_plabels
)
5612 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5613 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5616 /* Now output the deferred plabels. */
5617 for (i
= 0; i
< n_deferred_plabels
; i
++)
5619 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5620 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5621 assemble_integer (deferred_plabels
[i
].symbol
,
5622 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5626 /* Initialize optabs to point to emulation routines. */
5629 pa_init_libfuncs (void)
5631 if (HPUX_LONG_DOUBLE_LIBRARY
)
5633 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5634 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5635 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5636 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5637 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5638 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5639 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5640 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5641 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5643 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5644 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5645 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5646 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5647 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5648 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5649 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5651 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5652 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5653 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5654 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5656 set_conv_libfunc (sfix_optab
, SImode
, TFmode
,
5657 TARGET_64BIT
? "__U_Qfcnvfxt_quad_to_sgl"
5658 : "_U_Qfcnvfxt_quad_to_sgl");
5659 set_conv_libfunc (sfix_optab
, DImode
, TFmode
,
5660 "_U_Qfcnvfxt_quad_to_dbl");
5661 set_conv_libfunc (ufix_optab
, SImode
, TFmode
,
5662 "_U_Qfcnvfxt_quad_to_usgl");
5663 set_conv_libfunc (ufix_optab
, DImode
, TFmode
,
5664 "_U_Qfcnvfxt_quad_to_udbl");
5666 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
,
5667 "_U_Qfcnvxf_sgl_to_quad");
5668 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
,
5669 "_U_Qfcnvxf_dbl_to_quad");
5670 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
,
5671 "_U_Qfcnvxf_usgl_to_quad");
5672 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
,
5673 "_U_Qfcnvxf_udbl_to_quad");
5676 if (TARGET_SYNC_LIBCALL
)
5677 init_sync_libfuncs (UNITS_PER_WORD
);
5680 /* HP's millicode routines mean something special to the assembler.
5681 Keep track of which ones we have used. */
5683 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5684 static void import_milli (enum millicodes
);
5685 static char imported
[(int) end1000
];
5686 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5687 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5688 #define MILLI_START 10
5691 import_milli (enum millicodes code
)
5693 char str
[sizeof (import_string
)];
5695 if (!imported
[(int) code
])
5697 imported
[(int) code
] = 1;
5698 strcpy (str
, import_string
);
5699 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5700 output_asm_insn (str
, 0);
5704 /* The register constraints have put the operands and return value in
5705 the proper registers. */
5708 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx insn
)
5710 import_milli (mulI
);
5711 return pa_output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5714 /* Emit the rtl for doing a division by a constant. */
5716 /* Do magic division millicodes exist for this value? */
5717 const int pa_magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5719 /* We'll use an array to keep track of the magic millicodes and
5720 whether or not we've used them already. [n][0] is signed, [n][1] is
5723 static int div_milli
[16][2];
5726 pa_emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5728 if (GET_CODE (operands
[2]) == CONST_INT
5729 && INTVAL (operands
[2]) > 0
5730 && INTVAL (operands
[2]) < 16
5731 && pa_magic_milli
[INTVAL (operands
[2])])
5733 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5735 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5739 gen_rtvec (6, gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, 29),
5740 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5742 gen_rtx_REG (SImode
, 26),
5744 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5745 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5746 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5747 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5748 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5749 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5756 pa_output_div_insn (rtx
*operands
, int unsignedp
, rtx insn
)
5760 /* If the divisor is a constant, try to use one of the special
5762 if (GET_CODE (operands
[0]) == CONST_INT
)
5764 static char buf
[100];
5765 divisor
= INTVAL (operands
[0]);
5766 if (!div_milli
[divisor
][unsignedp
])
5768 div_milli
[divisor
][unsignedp
] = 1;
5770 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5772 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5776 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5777 INTVAL (operands
[0]));
5778 return pa_output_millicode_call (insn
,
5779 gen_rtx_SYMBOL_REF (SImode
, buf
));
5783 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5784 INTVAL (operands
[0]));
5785 return pa_output_millicode_call (insn
,
5786 gen_rtx_SYMBOL_REF (SImode
, buf
));
5789 /* Divisor isn't a special constant. */
5794 import_milli (divU
);
5795 return pa_output_millicode_call (insn
,
5796 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5800 import_milli (divI
);
5801 return pa_output_millicode_call (insn
,
5802 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5807 /* Output a $$rem millicode to do mod. */
5810 pa_output_mod_insn (int unsignedp
, rtx insn
)
5814 import_milli (remU
);
5815 return pa_output_millicode_call (insn
,
5816 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5820 import_milli (remI
);
5821 return pa_output_millicode_call (insn
,
5822 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5827 pa_output_arg_descriptor (rtx call_insn
)
5829 const char *arg_regs
[4];
5830 enum machine_mode arg_mode
;
5832 int i
, output_flag
= 0;
5835 /* We neither need nor want argument location descriptors for the
5836 64bit runtime environment or the ELF32 environment. */
5837 if (TARGET_64BIT
|| TARGET_ELF32
)
5840 for (i
= 0; i
< 4; i
++)
5843 /* Specify explicitly that no argument relocations should take place
5844 if using the portable runtime calling conventions. */
5845 if (TARGET_PORTABLE_RUNTIME
)
5847 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5852 gcc_assert (CALL_P (call_insn
));
5853 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5854 link
; link
= XEXP (link
, 1))
5856 rtx use
= XEXP (link
, 0);
5858 if (! (GET_CODE (use
) == USE
5859 && GET_CODE (XEXP (use
, 0)) == REG
5860 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5863 arg_mode
= GET_MODE (XEXP (use
, 0));
5864 regno
= REGNO (XEXP (use
, 0));
5865 if (regno
>= 23 && regno
<= 26)
5867 arg_regs
[26 - regno
] = "GR";
5868 if (arg_mode
== DImode
)
5869 arg_regs
[25 - regno
] = "GR";
5871 else if (regno
>= 32 && regno
<= 39)
5873 if (arg_mode
== SFmode
)
5874 arg_regs
[(regno
- 32) / 2] = "FR";
5877 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5878 arg_regs
[(regno
- 34) / 2] = "FR";
5879 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5881 arg_regs
[(regno
- 34) / 2] = "FU";
5882 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5887 fputs ("\t.CALL ", asm_out_file
);
5888 for (i
= 0; i
< 4; i
++)
5893 fputc (',', asm_out_file
);
5894 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5897 fputc ('\n', asm_out_file
);
5900 /* Inform reload about cases where moving X with a mode MODE to or from
5901 a register in RCLASS requires an extra scratch or immediate register.
5902 Return the class needed for the immediate register. */
5905 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
5906 enum machine_mode mode
, secondary_reload_info
*sri
)
5909 enum reg_class rclass
= (enum reg_class
) rclass_i
;
5911 /* Handle the easy stuff first. */
5912 if (rclass
== R1_REGS
)
5918 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
5924 /* If we have something like (mem (mem (...)), we can safely assume the
5925 inner MEM will end up in a general register after reloading, so there's
5926 no need for a secondary reload. */
5927 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
5930 /* Trying to load a constant into a FP register during PIC code
5931 generation requires %r1 as a scratch register. For float modes,
5932 the only legitimate constant is CONST0_RTX. However, there are
5933 a few patterns that accept constant double operands. */
5935 && FP_REG_CLASS_P (rclass
)
5936 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
5941 sri
->icode
= CODE_FOR_reload_insi_r1
;
5945 sri
->icode
= CODE_FOR_reload_indi_r1
;
5949 sri
->icode
= CODE_FOR_reload_insf_r1
;
5953 sri
->icode
= CODE_FOR_reload_indf_r1
;
5962 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5963 register when we're generating PIC code or when the operand isn't
5965 if (pa_symbolic_expression_p (x
))
5967 if (GET_CODE (x
) == HIGH
)
5970 if (flag_pic
|| !read_only_operand (x
, VOIDmode
))
5975 sri
->icode
= CODE_FOR_reload_insi_r1
;
5979 sri
->icode
= CODE_FOR_reload_indi_r1
;
5989 /* Profiling showed the PA port spends about 1.3% of its compilation
5990 time in true_regnum from calls inside pa_secondary_reload_class. */
5991 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
5992 regno
= true_regnum (x
);
5994 /* Handle reloads for floating point loads and stores. */
5995 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
5996 && FP_REG_CLASS_P (rclass
))
6002 /* We don't need an intermediate for indexed and LO_SUM DLT
6003 memory addresses. When INT14_OK_STRICT is true, it might
6004 appear that we could directly allow register indirect
6005 memory addresses. However, this doesn't work because we
6006 don't support SUBREGs in floating-point register copies
6007 and reload doesn't tell us when it's going to use a SUBREG. */
6008 if (IS_INDEX_ADDR_P (x
)
6009 || IS_LO_SUM_DLT_ADDR_P (x
))
6012 /* Request intermediate general register. */
6013 return GENERAL_REGS
;
6016 /* Request a secondary reload with a general scratch register
6017 for everything else. ??? Could symbolic operands be handled
6018 directly when generating non-pic PA 2.0 code? */
6020 ? direct_optab_handler (reload_in_optab
, mode
)
6021 : direct_optab_handler (reload_out_optab
, mode
));
6025 /* A SAR<->FP register copy requires an intermediate general register
6026 and secondary memory. We need a secondary reload with a general
6027 scratch register for spills. */
6028 if (rclass
== SHIFT_REGS
)
6031 if (regno
>= FIRST_PSEUDO_REGISTER
|| regno
< 0)
6034 ? direct_optab_handler (reload_in_optab
, mode
)
6035 : direct_optab_handler (reload_out_optab
, mode
));
6039 /* Handle FP copy. */
6040 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno
)))
6041 return GENERAL_REGS
;
6044 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
6045 && REGNO_REG_CLASS (regno
) == SHIFT_REGS
6046 && FP_REG_CLASS_P (rclass
))
6047 return GENERAL_REGS
;
6052 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6053 is only marked as live on entry by df-scan when it is a fixed
6054 register. It isn't a fixed register in the 64-bit runtime,
6055 so we need to mark it here. */
6058 pa_extra_live_on_entry (bitmap regs
)
6061 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
6064 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6065 to prevent it from being deleted. */
6068 pa_eh_return_handler_rtx (void)
6072 tmp
= gen_rtx_PLUS (word_mode
, hard_frame_pointer_rtx
,
6073 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
6074 tmp
= gen_rtx_MEM (word_mode
, tmp
);
6079 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6080 by invisible reference. As a GCC extension, we also pass anything
6081 with a zero or variable size by reference.
6083 The 64-bit runtime does not describe passing any types by invisible
6084 reference. The internals of GCC can't currently handle passing
6085 empty structures, and zero or variable length arrays when they are
6086 not passed entirely on the stack or by reference. Thus, as a GCC
6087 extension, we pass these types by reference. The HP compiler doesn't
6088 support these types, so hopefully there shouldn't be any compatibility
6089 issues. This may have to be revisited when HP releases a C99 compiler
6090 or updates the ABI. */
6093 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
6094 enum machine_mode mode
, const_tree type
,
6095 bool named ATTRIBUTE_UNUSED
)
6100 size
= int_size_in_bytes (type
);
6102 size
= GET_MODE_SIZE (mode
);
6107 return size
<= 0 || size
> 8;
6111 pa_function_arg_padding (enum machine_mode mode
, const_tree type
)
6116 && (AGGREGATE_TYPE_P (type
)
6117 || TREE_CODE (type
) == COMPLEX_TYPE
6118 || TREE_CODE (type
) == VECTOR_TYPE
)))
6120 /* Return none if justification is not required. */
6122 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
6123 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
6126 /* The directions set here are ignored when a BLKmode argument larger
6127 than a word is placed in a register. Different code is used for
6128 the stack and registers. This makes it difficult to have a
6129 consistent data representation for both the stack and registers.
6130 For both runtimes, the justification and padding for arguments on
6131 the stack and in registers should be identical. */
6133 /* The 64-bit runtime specifies left justification for aggregates. */
6136 /* The 32-bit runtime architecture specifies right justification.
6137 When the argument is passed on the stack, the argument is padded
6138 with garbage on the left. The HP compiler pads with zeros. */
6142 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
6149 /* Do what is necessary for `va_start'. We look at the current function
6150 to determine if stdargs or varargs is used and fill in an initial
6151 va_list. A pointer to this constructor is returned. */
6154 hppa_builtin_saveregs (void)
6157 tree fntype
= TREE_TYPE (current_function_decl
);
6158 int argadj
= ((!stdarg_p (fntype
))
6159 ? UNITS_PER_WORD
: 0);
6162 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
6164 offset
= crtl
->args
.arg_offset_rtx
;
6170 /* Adjust for varargs/stdarg differences. */
6172 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, -argadj
);
6174 offset
= crtl
->args
.arg_offset_rtx
;
6176 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6177 from the incoming arg pointer and growing to larger addresses. */
6178 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
6179 emit_move_insn (gen_rtx_MEM (word_mode
,
6180 plus_constant (Pmode
,
6181 arg_pointer_rtx
, off
)),
6182 gen_rtx_REG (word_mode
, i
));
6184 /* The incoming args pointer points just beyond the flushback area;
6185 normally this is not a serious concern. However, when we are doing
6186 varargs/stdargs we want to make the arg pointer point to the start
6187 of the incoming argument area. */
6188 emit_move_insn (virtual_incoming_args_rtx
,
6189 plus_constant (Pmode
, arg_pointer_rtx
, -64));
6191 /* Now return a pointer to the first anonymous argument. */
6192 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6193 virtual_incoming_args_rtx
,
6194 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6197 /* Store general registers on the stack. */
6198 dest
= gen_rtx_MEM (BLKmode
,
6199 plus_constant (Pmode
, crtl
->args
.internal_arg_pointer
,
6201 set_mem_alias_set (dest
, get_varargs_alias_set ());
6202 set_mem_align (dest
, BITS_PER_WORD
);
6203 move_block_from_reg (23, dest
, 4);
6205 /* move_block_from_reg will emit code to store the argument registers
6206 individually as scalar stores.
6208 However, other insns may later load from the same addresses for
6209 a structure load (passing a struct to a varargs routine).
6211 The alias code assumes that such aliasing can never happen, so we
6212 have to keep memory referencing insns from moving up beyond the
6213 last argument register store. So we emit a blockage insn here. */
6214 emit_insn (gen_blockage ());
6216 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6217 crtl
->args
.internal_arg_pointer
,
6218 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6222 hppa_va_start (tree valist
, rtx nextarg
)
6224 nextarg
= expand_builtin_saveregs ();
6225 std_expand_builtin_va_start (valist
, nextarg
);
6229 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6234 /* Args grow upward. We can use the generic routines. */
6235 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6237 else /* !TARGET_64BIT */
6239 tree ptr
= build_pointer_type (type
);
6242 unsigned int size
, ofs
;
6245 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6249 ptr
= build_pointer_type (type
);
6251 size
= int_size_in_bytes (type
);
6252 valist_type
= TREE_TYPE (valist
);
6254 /* Args grow down. Not handled by generic routines. */
6256 u
= fold_convert (sizetype
, size_in_bytes (type
));
6257 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6258 t
= fold_build_pointer_plus (valist
, u
);
6260 /* Align to 4 or 8 byte boundary depending on argument size. */
6262 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6263 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6264 t
= fold_convert (valist_type
, t
);
6266 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6268 ofs
= (8 - size
) % 4;
6270 t
= fold_build_pointer_plus_hwi (t
, ofs
);
6272 t
= fold_convert (ptr
, t
);
6273 t
= build_va_arg_indirect_ref (t
);
6276 t
= build_va_arg_indirect_ref (t
);
6282 /* True if MODE is valid for the target. By "valid", we mean able to
6283 be manipulated in non-trivial ways. In particular, this means all
6284 the arithmetic is supported.
6286 Currently, TImode is not valid as the HP 64-bit runtime documentation
6287 doesn't document the alignment and calling conventions for this type.
6288 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6289 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6292 pa_scalar_mode_supported_p (enum machine_mode mode
)
6294 int precision
= GET_MODE_PRECISION (mode
);
6296 switch (GET_MODE_CLASS (mode
))
6298 case MODE_PARTIAL_INT
:
6300 if (precision
== CHAR_TYPE_SIZE
)
6302 if (precision
== SHORT_TYPE_SIZE
)
6304 if (precision
== INT_TYPE_SIZE
)
6306 if (precision
== LONG_TYPE_SIZE
)
6308 if (precision
== LONG_LONG_TYPE_SIZE
)
6313 if (precision
== FLOAT_TYPE_SIZE
)
6315 if (precision
== DOUBLE_TYPE_SIZE
)
6317 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6321 case MODE_DECIMAL_FLOAT
:
6329 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6330 it branches into the delay slot. Otherwise, return FALSE. */
6333 branch_to_delay_slot_p (rtx insn
)
6337 if (dbr_sequence_length ())
6340 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6343 insn
= next_active_insn (insn
);
6344 if (jump_insn
== insn
)
6347 /* We can't rely on the length of asms. So, we return FALSE when
6348 the branch is followed by an asm. */
6350 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6351 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
6352 || get_attr_length (insn
) > 0)
6359 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6361 This occurs when INSN has an unfilled delay slot and is followed
6362 by an asm. Disaster can occur if the asm is empty and the jump
6363 branches into the delay slot. So, we add a nop in the delay slot
6364 when this occurs. */
6367 branch_needs_nop_p (rtx insn
)
6371 if (dbr_sequence_length ())
6374 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6377 insn
= next_active_insn (insn
);
6378 if (!insn
|| jump_insn
== insn
)
6381 if (!(GET_CODE (PATTERN (insn
)) == ASM_INPUT
6382 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6383 && get_attr_length (insn
) > 0)
6390 /* Return TRUE if INSN, a forward jump insn, can use nullification
6391 to skip the following instruction. This avoids an extra cycle due
6392 to a mis-predicted branch when we fall through. */
6395 use_skip_p (rtx insn
)
6397 rtx jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6401 insn
= next_active_insn (insn
);
6403 /* We can't rely on the length of asms, so we can't skip asms. */
6405 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6406 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6408 if (get_attr_length (insn
) == 4
6409 && jump_insn
== next_active_insn (insn
))
6411 if (get_attr_length (insn
) > 0)
6418 /* This routine handles all the normal conditional branch sequences we
6419 might need to generate. It handles compare immediate vs compare
6420 register, nullification of delay slots, varying length branches,
6421 negated branches, and all combinations of the above. It returns the
6422 output appropriate to emit the branch corresponding to all given
6426 pa_output_cbranch (rtx
*operands
, int negated
, rtx insn
)
6428 static char buf
[100];
6430 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6431 int length
= get_attr_length (insn
);
6434 /* A conditional branch to the following instruction (e.g. the delay slot)
6435 is asking for a disaster. This can happen when not optimizing and
6436 when jump optimization fails.
6438 While it is usually safe to emit nothing, this can fail if the
6439 preceding instruction is a nullified branch with an empty delay
6440 slot and the same branch target as this branch. We could check
6441 for this but jump optimization should eliminate nop jumps. It
6442 is always safe to emit a nop. */
6443 if (branch_to_delay_slot_p (insn
))
6446 /* The doubleword form of the cmpib instruction doesn't have the LEU
6447 and GTU conditions while the cmpb instruction does. Since we accept
6448 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6449 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6450 operands
[2] = gen_rtx_REG (DImode
, 0);
6451 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6452 operands
[1] = gen_rtx_REG (DImode
, 0);
6454 /* If this is a long branch with its delay slot unfilled, set `nullify'
6455 as it can nullify the delay slot and save a nop. */
6456 if (length
== 8 && dbr_sequence_length () == 0)
6459 /* If this is a short forward conditional branch which did not get
6460 its delay slot filled, the delay slot can still be nullified. */
6461 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6462 nullify
= forward_branch_p (insn
);
6464 /* A forward branch over a single nullified insn can be done with a
6465 comclr instruction. This avoids a single cycle penalty due to
6466 mis-predicted branch if we fall through (branch not taken). */
6467 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6471 /* All short conditional branches except backwards with an unfilled
6475 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6477 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6478 if (GET_MODE (operands
[1]) == DImode
)
6481 strcat (buf
, "%B3");
6483 strcat (buf
, "%S3");
6485 strcat (buf
, " %2,%r1,%%r0");
6488 if (branch_needs_nop_p (insn
))
6489 strcat (buf
, ",n %2,%r1,%0%#");
6491 strcat (buf
, ",n %2,%r1,%0");
6494 strcat (buf
, " %2,%r1,%0");
6497 /* All long conditionals. Note a short backward branch with an
6498 unfilled delay slot is treated just like a long backward branch
6499 with an unfilled delay slot. */
6501 /* Handle weird backwards branch with a filled delay slot
6502 which is nullified. */
6503 if (dbr_sequence_length () != 0
6504 && ! forward_branch_p (insn
)
6507 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6508 if (GET_MODE (operands
[1]) == DImode
)
6511 strcat (buf
, "%S3");
6513 strcat (buf
, "%B3");
6514 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6516 /* Handle short backwards branch with an unfilled delay slot.
6517 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6518 taken and untaken branches. */
6519 else if (dbr_sequence_length () == 0
6520 && ! forward_branch_p (insn
)
6521 && INSN_ADDRESSES_SET_P ()
6522 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6523 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6525 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6526 if (GET_MODE (operands
[1]) == DImode
)
6529 strcat (buf
, "%B3 %2,%r1,%0%#");
6531 strcat (buf
, "%S3 %2,%r1,%0%#");
6535 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6536 if (GET_MODE (operands
[1]) == DImode
)
6539 strcat (buf
, "%S3");
6541 strcat (buf
, "%B3");
6543 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6545 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6550 /* The reversed conditional branch must branch over one additional
6551 instruction if the delay slot is filled and needs to be extracted
6552 by pa_output_lbranch. If the delay slot is empty or this is a
6553 nullified forward branch, the instruction after the reversed
6554 condition branch must be nullified. */
6555 if (dbr_sequence_length () == 0
6556 || (nullify
&& forward_branch_p (insn
)))
6560 operands
[4] = GEN_INT (length
);
6565 operands
[4] = GEN_INT (length
+ 4);
6568 /* Create a reversed conditional branch which branches around
6569 the following insns. */
6570 if (GET_MODE (operands
[1]) != DImode
)
6576 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6579 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6585 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6588 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6597 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6600 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6606 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6609 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6613 output_asm_insn (buf
, operands
);
6614 return pa_output_lbranch (operands
[0], insn
, xdelay
);
6619 /* This routine handles output of long unconditional branches that
6620 exceed the maximum range of a simple branch instruction. Since
6621 we don't have a register available for the branch, we save register
6622 %r1 in the frame marker, load the branch destination DEST into %r1,
6623 execute the branch, and restore %r1 in the delay slot of the branch.
6625 Since long branches may have an insn in the delay slot and the
6626 delay slot is used to restore %r1, we in general need to extract
6627 this insn and execute it before the branch. However, to facilitate
6628 use of this function by conditional branches, we also provide an
6629 option to not extract the delay insn so that it will be emitted
6630 after the long branch. So, if there is an insn in the delay slot,
6631 it is extracted if XDELAY is nonzero.
6633 The lengths of the various long-branch sequences are 20, 16 and 24
6634 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6637 pa_output_lbranch (rtx dest
, rtx insn
, int xdelay
)
6641 xoperands
[0] = dest
;
6643 /* First, free up the delay slot. */
6644 if (xdelay
&& dbr_sequence_length () != 0)
6646 /* We can't handle a jump in the delay slot. */
6647 gcc_assert (! JUMP_P (NEXT_INSN (insn
)));
6649 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6652 /* Now delete the delay insn. */
6653 SET_INSN_DELETED (NEXT_INSN (insn
));
6656 /* Output an insn to save %r1. The runtime documentation doesn't
6657 specify whether the "Clean Up" slot in the callers frame can
6658 be clobbered by the callee. It isn't copied by HP's builtin
6659 alloca, so this suggests that it can be clobbered if necessary.
6660 The "Static Link" location is copied by HP builtin alloca, so
6661 we avoid using it. Using the cleanup slot might be a problem
6662 if we have to interoperate with languages that pass cleanup
6663 information. However, it should be possible to handle these
6664 situations with GCC's asm feature.
6666 The "Current RP" slot is reserved for the called procedure, so
6667 we try to use it when we don't have a frame of our own. It's
6668 rather unlikely that we won't have a frame when we need to emit
6671 Really the way to go long term is a register scavenger; goto
6672 the target of the jump and find a register which we can use
6673 as a scratch to hold the value in %r1. Then, we wouldn't have
6674 to free up the delay slot or clobber a slot that may be needed
6675 for other purposes. */
6678 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6679 /* Use the return pointer slot in the frame marker. */
6680 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6682 /* Use the slot at -40 in the frame marker since HP builtin
6683 alloca doesn't copy it. */
6684 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6688 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6689 /* Use the return pointer slot in the frame marker. */
6690 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6692 /* Use the "Clean Up" slot in the frame marker. In GCC,
6693 the only other use of this location is for copying a
6694 floating point double argument from a floating-point
6695 register to two general registers. The copy is done
6696 as an "atomic" operation when outputting a call, so it
6697 won't interfere with our using the location here. */
6698 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6701 if (TARGET_PORTABLE_RUNTIME
)
6703 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6704 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6705 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6709 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6710 if (TARGET_SOM
|| !TARGET_GAS
)
6712 xoperands
[1] = gen_label_rtx ();
6713 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6714 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6715 CODE_LABEL_NUMBER (xoperands
[1]));
6716 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6720 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6721 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6723 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6726 /* Now output a very long branch to the original target. */
6727 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6729 /* Now restore the value of %r1 in the delay slot. */
6732 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6733 return "ldd -16(%%r30),%%r1";
6735 return "ldd -40(%%r30),%%r1";
6739 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6740 return "ldw -20(%%r30),%%r1";
6742 return "ldw -12(%%r30),%%r1";
6746 /* This routine handles all the branch-on-bit conditional branch sequences we
6747 might need to generate. It handles nullification of delay slots,
6748 varying length branches, negated branches and all combinations of the
6749 above. it returns the appropriate output template to emit the branch. */
6752 pa_output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
, int which
)
6754 static char buf
[100];
6756 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6757 int length
= get_attr_length (insn
);
6760 /* A conditional branch to the following instruction (e.g. the delay slot) is
6761 asking for a disaster. I do not think this can happen as this pattern
6762 is only used when optimizing; jump optimization should eliminate the
6763 jump. But be prepared just in case. */
6765 if (branch_to_delay_slot_p (insn
))
6768 /* If this is a long branch with its delay slot unfilled, set `nullify'
6769 as it can nullify the delay slot and save a nop. */
6770 if (length
== 8 && dbr_sequence_length () == 0)
6773 /* If this is a short forward conditional branch which did not get
6774 its delay slot filled, the delay slot can still be nullified. */
6775 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6776 nullify
= forward_branch_p (insn
);
6778 /* A forward branch over a single nullified insn can be done with a
6779 extrs instruction. This avoids a single cycle penalty due to
6780 mis-predicted branch if we fall through (branch not taken). */
6781 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6786 /* All short conditional branches except backwards with an unfilled
6790 strcpy (buf
, "{extrs,|extrw,s,}");
6792 strcpy (buf
, "bb,");
6793 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6794 strcpy (buf
, "extrd,s,*");
6795 else if (GET_MODE (operands
[0]) == DImode
)
6796 strcpy (buf
, "bb,*");
6797 if ((which
== 0 && negated
)
6798 || (which
== 1 && ! negated
))
6803 strcat (buf
, " %0,%1,1,%%r0");
6804 else if (nullify
&& negated
)
6806 if (branch_needs_nop_p (insn
))
6807 strcat (buf
, ",n %0,%1,%3%#");
6809 strcat (buf
, ",n %0,%1,%3");
6811 else if (nullify
&& ! negated
)
6813 if (branch_needs_nop_p (insn
))
6814 strcat (buf
, ",n %0,%1,%2%#");
6816 strcat (buf
, ",n %0,%1,%2");
6818 else if (! nullify
&& negated
)
6819 strcat (buf
, " %0,%1,%3");
6820 else if (! nullify
&& ! negated
)
6821 strcat (buf
, " %0,%1,%2");
6824 /* All long conditionals. Note a short backward branch with an
6825 unfilled delay slot is treated just like a long backward branch
6826 with an unfilled delay slot. */
6828 /* Handle weird backwards branch with a filled delay slot
6829 which is nullified. */
6830 if (dbr_sequence_length () != 0
6831 && ! forward_branch_p (insn
)
6834 strcpy (buf
, "bb,");
6835 if (GET_MODE (operands
[0]) == DImode
)
6837 if ((which
== 0 && negated
)
6838 || (which
== 1 && ! negated
))
6843 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6845 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6847 /* Handle short backwards branch with an unfilled delay slot.
6848 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6849 taken and untaken branches. */
6850 else if (dbr_sequence_length () == 0
6851 && ! forward_branch_p (insn
)
6852 && INSN_ADDRESSES_SET_P ()
6853 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6854 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6856 strcpy (buf
, "bb,");
6857 if (GET_MODE (operands
[0]) == DImode
)
6859 if ((which
== 0 && negated
)
6860 || (which
== 1 && ! negated
))
6865 strcat (buf
, " %0,%1,%3%#");
6867 strcat (buf
, " %0,%1,%2%#");
6871 if (GET_MODE (operands
[0]) == DImode
)
6872 strcpy (buf
, "extrd,s,*");
6874 strcpy (buf
, "{extrs,|extrw,s,}");
6875 if ((which
== 0 && negated
)
6876 || (which
== 1 && ! negated
))
6880 if (nullify
&& negated
)
6881 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6882 else if (nullify
&& ! negated
)
6883 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6885 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6887 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6892 /* The reversed conditional branch must branch over one additional
6893 instruction if the delay slot is filled and needs to be extracted
6894 by pa_output_lbranch. If the delay slot is empty or this is a
6895 nullified forward branch, the instruction after the reversed
6896 condition branch must be nullified. */
6897 if (dbr_sequence_length () == 0
6898 || (nullify
&& forward_branch_p (insn
)))
6902 operands
[4] = GEN_INT (length
);
6907 operands
[4] = GEN_INT (length
+ 4);
6910 if (GET_MODE (operands
[0]) == DImode
)
6911 strcpy (buf
, "bb,*");
6913 strcpy (buf
, "bb,");
6914 if ((which
== 0 && negated
)
6915 || (which
== 1 && !negated
))
6920 strcat (buf
, ",n %0,%1,.+%4");
6922 strcat (buf
, " %0,%1,.+%4");
6923 output_asm_insn (buf
, operands
);
6924 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
6930 /* This routine handles all the branch-on-variable-bit conditional branch
6931 sequences we might need to generate. It handles nullification of delay
6932 slots, varying length branches, negated branches and all combinations
6933 of the above. it returns the appropriate output template to emit the
6937 pa_output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
,
6940 static char buf
[100];
6942 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6943 int length
= get_attr_length (insn
);
6946 /* A conditional branch to the following instruction (e.g. the delay slot) is
6947 asking for a disaster. I do not think this can happen as this pattern
6948 is only used when optimizing; jump optimization should eliminate the
6949 jump. But be prepared just in case. */
6951 if (branch_to_delay_slot_p (insn
))
6954 /* If this is a long branch with its delay slot unfilled, set `nullify'
6955 as it can nullify the delay slot and save a nop. */
6956 if (length
== 8 && dbr_sequence_length () == 0)
6959 /* If this is a short forward conditional branch which did not get
6960 its delay slot filled, the delay slot can still be nullified. */
6961 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6962 nullify
= forward_branch_p (insn
);
6964 /* A forward branch over a single nullified insn can be done with a
6965 extrs instruction. This avoids a single cycle penalty due to
6966 mis-predicted branch if we fall through (branch not taken). */
6967 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6972 /* All short conditional branches except backwards with an unfilled
6976 strcpy (buf
, "{vextrs,|extrw,s,}");
6978 strcpy (buf
, "{bvb,|bb,}");
6979 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6980 strcpy (buf
, "extrd,s,*");
6981 else if (GET_MODE (operands
[0]) == DImode
)
6982 strcpy (buf
, "bb,*");
6983 if ((which
== 0 && negated
)
6984 || (which
== 1 && ! negated
))
6989 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6990 else if (nullify
&& negated
)
6992 if (branch_needs_nop_p (insn
))
6993 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6995 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
6997 else if (nullify
&& ! negated
)
6999 if (branch_needs_nop_p (insn
))
7000 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7002 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
7004 else if (! nullify
&& negated
)
7005 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
7006 else if (! nullify
&& ! negated
)
7007 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
7010 /* All long conditionals. Note a short backward branch with an
7011 unfilled delay slot is treated just like a long backward branch
7012 with an unfilled delay slot. */
7014 /* Handle weird backwards branch with a filled delay slot
7015 which is nullified. */
7016 if (dbr_sequence_length () != 0
7017 && ! forward_branch_p (insn
)
7020 strcpy (buf
, "{bvb,|bb,}");
7021 if (GET_MODE (operands
[0]) == DImode
)
7023 if ((which
== 0 && negated
)
7024 || (which
== 1 && ! negated
))
7029 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7031 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7033 /* Handle short backwards branch with an unfilled delay slot.
7034 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7035 taken and untaken branches. */
7036 else if (dbr_sequence_length () == 0
7037 && ! forward_branch_p (insn
)
7038 && INSN_ADDRESSES_SET_P ()
7039 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7040 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7042 strcpy (buf
, "{bvb,|bb,}");
7043 if (GET_MODE (operands
[0]) == DImode
)
7045 if ((which
== 0 && negated
)
7046 || (which
== 1 && ! negated
))
7051 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
7053 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
7057 strcpy (buf
, "{vextrs,|extrw,s,}");
7058 if (GET_MODE (operands
[0]) == DImode
)
7059 strcpy (buf
, "extrd,s,*");
7060 if ((which
== 0 && negated
)
7061 || (which
== 1 && ! negated
))
7065 if (nullify
&& negated
)
7066 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7067 else if (nullify
&& ! negated
)
7068 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7070 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7072 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7077 /* The reversed conditional branch must branch over one additional
7078 instruction if the delay slot is filled and needs to be extracted
7079 by pa_output_lbranch. If the delay slot is empty or this is a
7080 nullified forward branch, the instruction after the reversed
7081 condition branch must be nullified. */
7082 if (dbr_sequence_length () == 0
7083 || (nullify
&& forward_branch_p (insn
)))
7087 operands
[4] = GEN_INT (length
);
7092 operands
[4] = GEN_INT (length
+ 4);
7095 if (GET_MODE (operands
[0]) == DImode
)
7096 strcpy (buf
, "bb,*");
7098 strcpy (buf
, "{bvb,|bb,}");
7099 if ((which
== 0 && negated
)
7100 || (which
== 1 && !negated
))
7105 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
7107 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
7108 output_asm_insn (buf
, operands
);
7109 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
7115 /* Return the output template for emitting a dbra type insn.
7117 Note it may perform some output operations on its own before
7118 returning the final output string. */
7120 pa_output_dbra (rtx
*operands
, rtx insn
, int which_alternative
)
7122 int length
= get_attr_length (insn
);
7124 /* A conditional branch to the following instruction (e.g. the delay slot) is
7125 asking for a disaster. Be prepared! */
7127 if (branch_to_delay_slot_p (insn
))
7129 if (which_alternative
== 0)
7130 return "ldo %1(%0),%0";
7131 else if (which_alternative
== 1)
7133 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
7134 output_asm_insn ("ldw -16(%%r30),%4", operands
);
7135 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7136 return "{fldws|fldw} -16(%%r30),%0";
7140 output_asm_insn ("ldw %0,%4", operands
);
7141 return "ldo %1(%4),%4\n\tstw %4,%0";
7145 if (which_alternative
== 0)
7147 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7150 /* If this is a long branch with its delay slot unfilled, set `nullify'
7151 as it can nullify the delay slot and save a nop. */
7152 if (length
== 8 && dbr_sequence_length () == 0)
7155 /* If this is a short forward conditional branch which did not get
7156 its delay slot filled, the delay slot can still be nullified. */
7157 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7158 nullify
= forward_branch_p (insn
);
7165 if (branch_needs_nop_p (insn
))
7166 return "addib,%C2,n %1,%0,%3%#";
7168 return "addib,%C2,n %1,%0,%3";
7171 return "addib,%C2 %1,%0,%3";
7174 /* Handle weird backwards branch with a fulled delay slot
7175 which is nullified. */
7176 if (dbr_sequence_length () != 0
7177 && ! forward_branch_p (insn
)
7179 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7180 /* Handle short backwards branch with an unfilled delay slot.
7181 Using a addb;nop rather than addi;bl saves 1 cycle for both
7182 taken and untaken branches. */
7183 else if (dbr_sequence_length () == 0
7184 && ! forward_branch_p (insn
)
7185 && INSN_ADDRESSES_SET_P ()
7186 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7187 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7188 return "addib,%C2 %1,%0,%3%#";
7190 /* Handle normal cases. */
7192 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7194 return "addi,%N2 %1,%0,%0\n\tb %3";
7197 /* The reversed conditional branch must branch over one additional
7198 instruction if the delay slot is filled and needs to be extracted
7199 by pa_output_lbranch. If the delay slot is empty or this is a
7200 nullified forward branch, the instruction after the reversed
7201 condition branch must be nullified. */
7202 if (dbr_sequence_length () == 0
7203 || (nullify
&& forward_branch_p (insn
)))
7207 operands
[4] = GEN_INT (length
);
7212 operands
[4] = GEN_INT (length
+ 4);
7216 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
7218 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
7220 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7224 /* Deal with gross reload from FP register case. */
7225 else if (which_alternative
== 1)
7227 /* Move loop counter from FP register to MEM then into a GR,
7228 increment the GR, store the GR into MEM, and finally reload
7229 the FP register from MEM from within the branch's delay slot. */
7230 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7232 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7234 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7235 else if (length
== 28)
7236 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7239 operands
[5] = GEN_INT (length
- 16);
7240 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
7241 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7242 return pa_output_lbranch (operands
[3], insn
, 0);
7245 /* Deal with gross reload from memory case. */
7248 /* Reload loop counter from memory, the store back to memory
7249 happens in the branch's delay slot. */
7250 output_asm_insn ("ldw %0,%4", operands
);
7252 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7253 else if (length
== 16)
7254 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7257 operands
[5] = GEN_INT (length
- 4);
7258 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7259 return pa_output_lbranch (operands
[3], insn
, 0);
7264 /* Return the output template for emitting a movb type insn.
7266 Note it may perform some output operations on its own before
7267 returning the final output string. */
7269 pa_output_movb (rtx
*operands
, rtx insn
, int which_alternative
,
7270 int reverse_comparison
)
7272 int length
= get_attr_length (insn
);
7274 /* A conditional branch to the following instruction (e.g. the delay slot) is
7275 asking for a disaster. Be prepared! */
7277 if (branch_to_delay_slot_p (insn
))
7279 if (which_alternative
== 0)
7280 return "copy %1,%0";
7281 else if (which_alternative
== 1)
7283 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7284 return "{fldws|fldw} -16(%%r30),%0";
7286 else if (which_alternative
== 2)
7292 /* Support the second variant. */
7293 if (reverse_comparison
)
7294 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7296 if (which_alternative
== 0)
7298 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7301 /* If this is a long branch with its delay slot unfilled, set `nullify'
7302 as it can nullify the delay slot and save a nop. */
7303 if (length
== 8 && dbr_sequence_length () == 0)
7306 /* If this is a short forward conditional branch which did not get
7307 its delay slot filled, the delay slot can still be nullified. */
7308 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7309 nullify
= forward_branch_p (insn
);
7316 if (branch_needs_nop_p (insn
))
7317 return "movb,%C2,n %1,%0,%3%#";
7319 return "movb,%C2,n %1,%0,%3";
7322 return "movb,%C2 %1,%0,%3";
7325 /* Handle weird backwards branch with a filled delay slot
7326 which is nullified. */
7327 if (dbr_sequence_length () != 0
7328 && ! forward_branch_p (insn
)
7330 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7332 /* Handle short backwards branch with an unfilled delay slot.
7333 Using a movb;nop rather than or;bl saves 1 cycle for both
7334 taken and untaken branches. */
7335 else if (dbr_sequence_length () == 0
7336 && ! forward_branch_p (insn
)
7337 && INSN_ADDRESSES_SET_P ()
7338 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7339 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7340 return "movb,%C2 %1,%0,%3%#";
7341 /* Handle normal cases. */
7343 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7345 return "or,%N2 %1,%%r0,%0\n\tb %3";
7348 /* The reversed conditional branch must branch over one additional
7349 instruction if the delay slot is filled and needs to be extracted
7350 by pa_output_lbranch. If the delay slot is empty or this is a
7351 nullified forward branch, the instruction after the reversed
7352 condition branch must be nullified. */
7353 if (dbr_sequence_length () == 0
7354 || (nullify
&& forward_branch_p (insn
)))
7358 operands
[4] = GEN_INT (length
);
7363 operands
[4] = GEN_INT (length
+ 4);
7367 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7369 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7371 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7374 /* Deal with gross reload for FP destination register case. */
7375 else if (which_alternative
== 1)
7377 /* Move source register to MEM, perform the branch test, then
7378 finally load the FP register from MEM from within the branch's
7380 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7382 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7383 else if (length
== 16)
7384 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7387 operands
[4] = GEN_INT (length
- 4);
7388 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7389 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7390 return pa_output_lbranch (operands
[3], insn
, 0);
7393 /* Deal with gross reload from memory case. */
7394 else if (which_alternative
== 2)
7396 /* Reload loop counter from memory, the store back to memory
7397 happens in the branch's delay slot. */
7399 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7400 else if (length
== 12)
7401 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7404 operands
[4] = GEN_INT (length
);
7405 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7407 return pa_output_lbranch (operands
[3], insn
, 0);
7410 /* Handle SAR as a destination. */
7414 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7415 else if (length
== 12)
7416 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7419 operands
[4] = GEN_INT (length
);
7420 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7422 return pa_output_lbranch (operands
[3], insn
, 0);
7427 /* Copy any FP arguments in INSN into integer registers. */
7429 copy_fp_args (rtx insn
)
7434 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7436 int arg_mode
, regno
;
7437 rtx use
= XEXP (link
, 0);
7439 if (! (GET_CODE (use
) == USE
7440 && GET_CODE (XEXP (use
, 0)) == REG
7441 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7444 arg_mode
= GET_MODE (XEXP (use
, 0));
7445 regno
= REGNO (XEXP (use
, 0));
7447 /* Is it a floating point register? */
7448 if (regno
>= 32 && regno
<= 39)
7450 /* Copy the FP register into an integer register via memory. */
7451 if (arg_mode
== SFmode
)
7453 xoperands
[0] = XEXP (use
, 0);
7454 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7455 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7456 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7460 xoperands
[0] = XEXP (use
, 0);
7461 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7462 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7463 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7464 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7470 /* Compute length of the FP argument copy sequence for INSN. */
7472 length_fp_args (rtx insn
)
7477 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7479 int arg_mode
, regno
;
7480 rtx use
= XEXP (link
, 0);
7482 if (! (GET_CODE (use
) == USE
7483 && GET_CODE (XEXP (use
, 0)) == REG
7484 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7487 arg_mode
= GET_MODE (XEXP (use
, 0));
7488 regno
= REGNO (XEXP (use
, 0));
7490 /* Is it a floating point register? */
7491 if (regno
>= 32 && regno
<= 39)
7493 if (arg_mode
== SFmode
)
7503 /* Return the attribute length for the millicode call instruction INSN.
7504 The length must match the code generated by pa_output_millicode_call.
7505 We include the delay slot in the returned length as it is better to
7506 over estimate the length than to under estimate it. */
7509 pa_attr_length_millicode_call (rtx insn
)
7511 unsigned long distance
= -1;
7512 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7514 if (INSN_ADDRESSES_SET_P ())
7516 distance
= (total
+ insn_current_reference_address (insn
));
7517 if (distance
< total
)
7523 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7528 else if (TARGET_PORTABLE_RUNTIME
)
7532 if (!TARGET_LONG_CALLS
&& distance
< MAX_PCREL17F_OFFSET
)
7542 /* INSN is a function call. It may have an unconditional jump
7545 CALL_DEST is the routine we are calling. */
7548 pa_output_millicode_call (rtx insn
, rtx call_dest
)
7550 int attr_length
= get_attr_length (insn
);
7551 int seq_length
= dbr_sequence_length ();
7556 xoperands
[0] = call_dest
;
7557 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7559 /* Handle the common case where we are sure that the branch will
7560 reach the beginning of the $CODE$ subspace. The within reach
7561 form of the $$sh_func_adrs call has a length of 28. Because it
7562 has an attribute type of sh_func_adrs, it never has a nonzero
7563 sequence length (i.e., the delay slot is never filled). */
7564 if (!TARGET_LONG_CALLS
7565 && (attr_length
== 8
7566 || (attr_length
== 28
7567 && get_attr_type (insn
) == TYPE_SH_FUNC_ADRS
)))
7569 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
7575 /* It might seem that one insn could be saved by accessing
7576 the millicode function using the linkage table. However,
7577 this doesn't work in shared libraries and other dynamically
7578 loaded objects. Using a pc-relative sequence also avoids
7579 problems related to the implicit use of the gp register. */
7580 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7584 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7585 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7589 xoperands
[1] = gen_label_rtx ();
7590 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7591 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7592 CODE_LABEL_NUMBER (xoperands
[1]));
7593 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7596 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7598 else if (TARGET_PORTABLE_RUNTIME
)
7600 /* Pure portable runtime doesn't allow be/ble; we also don't
7601 have PIC support in the assembler/linker, so this sequence
7604 /* Get the address of our target into %r1. */
7605 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7606 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7608 /* Get our return address into %r31. */
7609 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7610 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7612 /* Jump to our target address in %r1. */
7613 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7617 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7619 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7621 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7625 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7626 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
7628 if (TARGET_SOM
|| !TARGET_GAS
)
7630 /* The HP assembler can generate relocations for the
7631 difference of two symbols. GAS can do this for a
7632 millicode symbol but not an arbitrary external
7633 symbol when generating SOM output. */
7634 xoperands
[1] = gen_label_rtx ();
7635 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7636 CODE_LABEL_NUMBER (xoperands
[1]));
7637 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7638 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7642 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
7643 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7647 /* Jump to our target address in %r1. */
7648 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7652 if (seq_length
== 0)
7653 output_asm_insn ("nop", xoperands
);
7655 /* We are done if there isn't a jump in the delay slot. */
7656 if (seq_length
== 0 || ! JUMP_P (NEXT_INSN (insn
)))
7659 /* This call has an unconditional jump in its delay slot. */
7660 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7662 /* See if the return address can be adjusted. Use the containing
7663 sequence insn's address. */
7664 if (INSN_ADDRESSES_SET_P ())
7666 seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7667 distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7668 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7670 if (VAL_14_BITS_P (distance
))
7672 xoperands
[1] = gen_label_rtx ();
7673 output_asm_insn ("ldo %0-%1(%2),%2", xoperands
);
7674 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7675 CODE_LABEL_NUMBER (xoperands
[1]));
7678 /* ??? This branch may not reach its target. */
7679 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7682 /* ??? This branch may not reach its target. */
7683 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7685 /* Delete the jump. */
7686 SET_INSN_DELETED (NEXT_INSN (insn
));
7691 /* Return the attribute length of the call instruction INSN. The SIBCALL
7692 flag indicates whether INSN is a regular call or a sibling call. The
7693 length returned must be longer than the code actually generated by
7694 pa_output_call. Since branch shortening is done before delay branch
7695 sequencing, there is no way to determine whether or not the delay
7696 slot will be filled during branch shortening. Even when the delay
7697 slot is filled, we may have to add a nop if the delay slot contains
7698 a branch that can't reach its target. Thus, we always have to include
7699 the delay slot in the length estimate. This used to be done in
7700 pa_adjust_insn_length but we do it here now as some sequences always
7701 fill the delay slot and we can save four bytes in the estimate for
7705 pa_attr_length_call (rtx insn
, int sibcall
)
7708 rtx call
, call_dest
;
7711 rtx pat
= PATTERN (insn
);
7712 unsigned long distance
= -1;
7714 gcc_assert (CALL_P (insn
));
7716 if (INSN_ADDRESSES_SET_P ())
7718 unsigned long total
;
7720 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7721 distance
= (total
+ insn_current_reference_address (insn
));
7722 if (distance
< total
)
7726 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7728 /* Get the call rtx. */
7729 call
= XVECEXP (pat
, 0, 0);
7730 if (GET_CODE (call
) == SET
)
7731 call
= SET_SRC (call
);
7733 gcc_assert (GET_CODE (call
) == CALL
);
7735 /* Determine if this is a local call. */
7736 call_dest
= XEXP (XEXP (call
, 0), 0);
7737 call_decl
= SYMBOL_REF_DECL (call_dest
);
7738 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7740 /* pc-relative branch. */
7741 if (!TARGET_LONG_CALLS
7742 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7743 || distance
< MAX_PCREL17F_OFFSET
))
7746 /* 64-bit plabel sequence. */
7747 else if (TARGET_64BIT
&& !local_call
)
7748 length
+= sibcall
? 28 : 24;
7750 /* non-pic long absolute branch sequence. */
7751 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7754 /* long pc-relative branch sequence. */
7755 else if (TARGET_LONG_PIC_SDIFF_CALL
7756 || (TARGET_GAS
&& !TARGET_SOM
7757 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7761 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7765 /* 32-bit plabel sequence. */
7771 length
+= length_fp_args (insn
);
7781 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7789 /* INSN is a function call. It may have an unconditional jump
7792 CALL_DEST is the routine we are calling. */
7795 pa_output_call (rtx insn
, rtx call_dest
, int sibcall
)
7797 int delay_insn_deleted
= 0;
7798 int delay_slot_filled
= 0;
7799 int seq_length
= dbr_sequence_length ();
7800 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7801 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7804 xoperands
[0] = call_dest
;
7806 /* Handle the common case where we're sure that the branch will reach
7807 the beginning of the "$CODE$" subspace. This is the beginning of
7808 the current function if we are in a named section. */
7809 if (!TARGET_LONG_CALLS
&& pa_attr_length_call (insn
, sibcall
) == 8)
7811 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7812 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7816 if (TARGET_64BIT
&& !local_call
)
7818 /* ??? As far as I can tell, the HP linker doesn't support the
7819 long pc-relative sequence described in the 64-bit runtime
7820 architecture. So, we use a slightly longer indirect call. */
7821 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7822 xoperands
[1] = gen_label_rtx ();
7824 /* If this isn't a sibcall, we put the load of %r27 into the
7825 delay slot. We can't do this in a sibcall as we don't
7826 have a second call-clobbered scratch register available. */
7828 && ! JUMP_P (NEXT_INSN (insn
))
7831 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7834 /* Now delete the delay insn. */
7835 SET_INSN_DELETED (NEXT_INSN (insn
));
7836 delay_insn_deleted
= 1;
7839 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7840 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7841 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7845 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7846 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7847 output_asm_insn ("bve (%%r1)", xoperands
);
7851 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7852 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7853 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7854 delay_slot_filled
= 1;
7859 int indirect_call
= 0;
7861 /* Emit a long call. There are several different sequences
7862 of increasing length and complexity. In most cases,
7863 they don't allow an instruction in the delay slot. */
7864 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7865 && !TARGET_LONG_PIC_SDIFF_CALL
7866 && !(TARGET_GAS
&& !TARGET_SOM
7867 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7872 && ! JUMP_P (NEXT_INSN (insn
))
7876 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
7878 /* A non-jump insn in the delay slot. By definition we can
7879 emit this insn before the call (and in fact before argument
7881 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7884 /* Now delete the delay insn. */
7885 SET_INSN_DELETED (NEXT_INSN (insn
));
7886 delay_insn_deleted
= 1;
7889 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7891 /* This is the best sequence for making long calls in
7892 non-pic code. Unfortunately, GNU ld doesn't provide
7893 the stub needed for external calls, and GAS's support
7894 for this with the SOM linker is buggy. It is safe
7895 to use this for local calls. */
7896 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7898 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7902 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7905 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7907 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7908 delay_slot_filled
= 1;
7913 if (TARGET_LONG_PIC_SDIFF_CALL
)
7915 /* The HP assembler and linker can handle relocations
7916 for the difference of two symbols. The HP assembler
7917 recognizes the sequence as a pc-relative call and
7918 the linker provides stubs when needed. */
7919 xoperands
[1] = gen_label_rtx ();
7920 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7921 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7922 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7923 CODE_LABEL_NUMBER (xoperands
[1]));
7924 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7926 else if (TARGET_GAS
&& !TARGET_SOM
7927 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7929 /* GAS currently can't generate the relocations that
7930 are needed for the SOM linker under HP-UX using this
7931 sequence. The GNU linker doesn't generate the stubs
7932 that are needed for external calls on TARGET_ELF32
7933 with this sequence. For now, we have to use a
7934 longer plabel sequence when using GAS. */
7935 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7936 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7938 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7943 /* Emit a long plabel-based call sequence. This is
7944 essentially an inline implementation of $$dyncall.
7945 We don't actually try to call $$dyncall as this is
7946 as difficult as calling the function itself. */
7947 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7948 xoperands
[1] = gen_label_rtx ();
7950 /* Since the call is indirect, FP arguments in registers
7951 need to be copied to the general registers. Then, the
7952 argument relocation stub will copy them back. */
7954 copy_fp_args (insn
);
7958 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7959 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7960 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7964 output_asm_insn ("addil LR'%0-$global$,%%r27",
7966 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7970 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7971 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7972 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7973 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7975 if (!sibcall
&& !TARGET_PA_20
)
7977 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7978 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7979 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7981 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7988 output_asm_insn ("bve (%%r1)", xoperands
);
7993 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7994 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7995 delay_slot_filled
= 1;
7998 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
8003 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
8004 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8009 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8010 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
8012 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
8016 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8017 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
8019 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
8022 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
8024 output_asm_insn ("copy %%r31,%%r2", xoperands
);
8025 delay_slot_filled
= 1;
8032 if (!delay_slot_filled
&& (seq_length
== 0 || delay_insn_deleted
))
8033 output_asm_insn ("nop", xoperands
);
8035 /* We are done if there isn't a jump in the delay slot. */
8037 || delay_insn_deleted
8038 || ! JUMP_P (NEXT_INSN (insn
)))
8041 /* A sibcall should never have a branch in the delay slot. */
8042 gcc_assert (!sibcall
);
8044 /* This call has an unconditional jump in its delay slot. */
8045 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
8047 if (!delay_slot_filled
&& INSN_ADDRESSES_SET_P ())
8049 /* See if the return address can be adjusted. Use the containing
8050 sequence insn's address. This would break the regular call/return@
8051 relationship assumed by the table based eh unwinder, so only do that
8052 if the call is not possibly throwing. */
8053 rtx seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
8054 int distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
8055 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
8057 if (VAL_14_BITS_P (distance
)
8058 && !(can_throw_internal (insn
) || can_throw_external (insn
)))
8060 xoperands
[1] = gen_label_rtx ();
8061 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands
);
8062 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8063 CODE_LABEL_NUMBER (xoperands
[1]));
8066 output_asm_insn ("nop\n\tb,n %0", xoperands
);
8069 output_asm_insn ("b,n %0", xoperands
);
8071 /* Delete the jump. */
8072 SET_INSN_DELETED (NEXT_INSN (insn
));
8077 /* Return the attribute length of the indirect call instruction INSN.
8078 The length must match the code generated by output_indirect call.
8079 The returned length includes the delay slot. Currently, the delay
8080 slot of an indirect call sequence is not exposed and it is used by
8081 the sequence itself. */
8084 pa_attr_length_indirect_call (rtx insn
)
8086 unsigned long distance
= -1;
8087 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
8089 if (INSN_ADDRESSES_SET_P ())
8091 distance
= (total
+ insn_current_reference_address (insn
));
8092 if (distance
< total
)
8099 if (TARGET_FAST_INDIRECT_CALLS
8100 || (!TARGET_LONG_CALLS
8101 && !TARGET_PORTABLE_RUNTIME
8102 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
8103 || distance
< MAX_PCREL17F_OFFSET
)))
8109 if (TARGET_PORTABLE_RUNTIME
)
8112 /* Out of reach, can use ble. */
8117 pa_output_indirect_call (rtx insn
, rtx call_dest
)
8123 xoperands
[0] = call_dest
;
8124 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
8125 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
8129 /* First the special case for kernels, level 0 systems, etc. */
8130 if (TARGET_FAST_INDIRECT_CALLS
)
8131 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8133 /* Now the normal case -- we can reach $$dyncall directly or
8134 we're sure that we can get there via a long-branch stub.
8136 No need to check target flags as the length uniquely identifies
8137 the remaining cases. */
8138 if (pa_attr_length_indirect_call (insn
) == 8)
8140 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8141 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8142 variant of the B,L instruction can't be used on the SOM target. */
8143 if (TARGET_PA_20
&& !TARGET_SOM
)
8144 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8146 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8149 /* Long millicode call, but we are not generating PIC or portable runtime
8151 if (pa_attr_length_indirect_call (insn
) == 12)
8152 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8154 /* Long millicode call for portable runtime. */
8155 if (pa_attr_length_indirect_call (insn
) == 16)
8156 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8158 /* We need a long PIC call to $$dyncall. */
8159 xoperands
[0] = NULL_RTX
;
8160 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
8161 if (TARGET_SOM
|| !TARGET_GAS
)
8163 xoperands
[0] = gen_label_rtx ();
8164 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands
);
8165 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8166 CODE_LABEL_NUMBER (xoperands
[0]));
8167 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
8171 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands
);
8172 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8175 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8176 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands
);
8180 /* In HPUX 8.0's shared library scheme, special relocations are needed
8181 for function labels if they might be passed to a function
8182 in a shared library (because shared libraries don't live in code
8183 space), and special magic is needed to construct their address. */
8186 pa_encode_label (rtx sym
)
8188 const char *str
= XSTR (sym
, 0);
8189 int len
= strlen (str
) + 1;
8192 p
= newstr
= XALLOCAVEC (char, len
+ 1);
8196 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
8200 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
8202 int old_referenced
= 0;
8204 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
8206 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
8208 default_encode_section_info (decl
, rtl
, first
);
8210 if (first
&& TEXT_SPACE_P (decl
))
8212 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
8213 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8214 pa_encode_label (XEXP (rtl
, 0));
8216 else if (old_referenced
)
8217 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
8220 /* This is sort of inverse to pa_encode_section_info. */
8223 pa_strip_name_encoding (const char *str
)
8225 str
+= (*str
== '@');
8226 str
+= (*str
== '*');
8230 /* Returns 1 if OP is a function label involved in a simple addition
8231 with a constant. Used to keep certain patterns from matching
8232 during instruction combination. */
8234 pa_is_function_label_plus_const (rtx op
)
8236 /* Strip off any CONST. */
8237 if (GET_CODE (op
) == CONST
)
8240 return (GET_CODE (op
) == PLUS
8241 && function_label_operand (XEXP (op
, 0), VOIDmode
)
8242 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8245 /* Output assembly code for a thunk to FUNCTION. */
8248 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8249 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
8252 static unsigned int current_thunk_number
;
8253 int val_14
= VAL_14_BITS_P (delta
);
8254 unsigned int old_last_address
= last_address
, nbytes
= 0;
8258 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8259 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8260 xoperands
[2] = GEN_INT (delta
);
8262 ASM_OUTPUT_LABEL (file
, XSTR (xoperands
[1], 0));
8263 fprintf (file
, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8265 /* Output the thunk. We know that the function is in the same
8266 translation unit (i.e., the same space) as the thunk, and that
8267 thunks are output after their method. Thus, we don't need an
8268 external branch to reach the function. With SOM and GAS,
8269 functions and thunks are effectively in different sections.
8270 Thus, we can always use a IA-relative branch and the linker
8271 will add a long branch stub if necessary.
8273 However, we have to be careful when generating PIC code on the
8274 SOM port to ensure that the sequence does not transfer to an
8275 import stub for the target function as this could clobber the
8276 return value saved at SP-24. This would also apply to the
8277 32-bit linux port if the multi-space model is implemented. */
8278 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8279 && !(flag_pic
&& TREE_PUBLIC (function
))
8280 && (TARGET_GAS
|| last_address
< 262132))
8281 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8282 && ((targetm_common
.have_named_sections
8283 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8284 /* The GNU 64-bit linker has rather poor stub management.
8285 So, we use a long branch from thunks that aren't in
8286 the same section as the target function. */
8288 && (DECL_SECTION_NAME (thunk_fndecl
)
8289 != DECL_SECTION_NAME (function
)))
8290 || ((DECL_SECTION_NAME (thunk_fndecl
)
8291 == DECL_SECTION_NAME (function
))
8292 && last_address
< 262132)))
8293 || (targetm_common
.have_named_sections
8294 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8295 && DECL_SECTION_NAME (function
) == NULL
8296 && last_address
< 262132)
8297 || (!targetm_common
.have_named_sections
8298 && last_address
< 262132))))
8301 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8303 output_asm_insn ("b %0", xoperands
);
8307 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8312 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8316 else if (TARGET_64BIT
)
8318 /* We only have one call-clobbered scratch register, so we can't
8319 make use of the delay slot if delta doesn't fit in 14 bits. */
8322 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8323 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8326 output_asm_insn ("b,l .+8,%%r1", xoperands
);
8330 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8331 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
8335 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
8336 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
8341 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8342 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8347 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8351 else if (TARGET_PORTABLE_RUNTIME
)
8353 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8354 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8357 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8359 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8363 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8368 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8372 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8374 /* The function is accessible from outside this module. The only
8375 way to avoid an import stub between the thunk and function is to
8376 call the function directly with an indirect sequence similar to
8377 that used by $$dyncall. This is possible because $$dyncall acts
8378 as the import stub in an indirect call. */
8379 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8380 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8381 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8382 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8383 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8384 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8385 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8386 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8387 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8391 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8397 output_asm_insn ("bve (%%r22)", xoperands
);
8400 else if (TARGET_NO_SPACE_REGS
)
8402 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8407 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8408 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8409 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8414 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8416 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8420 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8422 if (TARGET_SOM
|| !TARGET_GAS
)
8424 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
8425 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
8429 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8430 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
8434 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8436 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8440 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8445 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8452 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8454 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8455 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8459 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8464 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8469 fprintf (file
, "\t.EXIT\n\t.PROCEND\n");
8471 if (TARGET_SOM
&& TARGET_GAS
)
8473 /* We done with this subspace except possibly for some additional
8474 debug information. Forget that we are in this subspace to ensure
8475 that the next function is output in its own subspace. */
8477 cfun
->machine
->in_nsubspa
= 2;
8480 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8482 switch_to_section (data_section
);
8483 output_asm_insn (".align 4", xoperands
);
8484 ASM_OUTPUT_LABEL (file
, label
);
8485 output_asm_insn (".word P'%0", xoperands
);
8488 current_thunk_number
++;
8489 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8490 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8491 last_address
+= nbytes
;
8492 if (old_last_address
> last_address
)
8493 last_address
= UINT_MAX
;
8494 update_total_code_bytes (nbytes
);
8497 /* Only direct calls to static functions are allowed to be sibling (tail)
8500 This restriction is necessary because some linker generated stubs will
8501 store return pointers into rp' in some cases which might clobber a
8502 live value already in rp'.
8504 In a sibcall the current function and the target function share stack
8505 space. Thus if the path to the current function and the path to the
8506 target function save a value in rp', they save the value into the
8507 same stack slot, which has undesirable consequences.
8509 Because of the deferred binding nature of shared libraries any function
8510 with external scope could be in a different load module and thus require
8511 rp' to be saved when calling that function. So sibcall optimizations
8512 can only be safe for static function.
8514 Note that GCC never needs return value relocations, so we don't have to
8515 worry about static calls with return value relocations (which require
8518 It is safe to perform a sibcall optimization when the target function
8519 will never return. */
8521 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8523 if (TARGET_PORTABLE_RUNTIME
)
8526 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8527 single subspace mode and the call is not indirect. As far as I know,
8528 there is no operating system support for the multiple subspace mode.
8529 It might be possible to support indirect calls if we didn't use
8530 $$dyncall (see the indirect sequence generated in pa_output_call). */
8532 return (decl
!= NULL_TREE
);
8534 /* Sibcalls are not ok because the arg pointer register is not a fixed
8535 register. This prevents the sibcall optimization from occurring. In
8536 addition, there are problems with stub placement using GNU ld. This
8537 is because a normal sibcall branch uses a 17-bit relocation while
8538 a regular call branch uses a 22-bit relocation. As a result, more
8539 care needs to be taken in the placement of long-branch stubs. */
8543 /* Sibcalls are only ok within a translation unit. */
8544 return (decl
&& !TREE_PUBLIC (decl
));
8547 /* ??? Addition is not commutative on the PA due to the weird implicit
8548 space register selection rules for memory addresses. Therefore, we
8549 don't consider a + b == b + a, as this might be inside a MEM. */
8551 pa_commutative_p (const_rtx x
, int outer_code
)
8553 return (COMMUTATIVE_P (x
)
8554 && (TARGET_NO_SPACE_REGS
8555 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8556 || GET_CODE (x
) != PLUS
));
8559 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8560 use in fmpyadd instructions. */
8562 pa_fmpyaddoperands (rtx
*operands
)
8564 enum machine_mode mode
= GET_MODE (operands
[0]);
8566 /* Must be a floating point mode. */
8567 if (mode
!= SFmode
&& mode
!= DFmode
)
8570 /* All modes must be the same. */
8571 if (! (mode
== GET_MODE (operands
[1])
8572 && mode
== GET_MODE (operands
[2])
8573 && mode
== GET_MODE (operands
[3])
8574 && mode
== GET_MODE (operands
[4])
8575 && mode
== GET_MODE (operands
[5])))
8578 /* All operands must be registers. */
8579 if (! (GET_CODE (operands
[1]) == REG
8580 && GET_CODE (operands
[2]) == REG
8581 && GET_CODE (operands
[3]) == REG
8582 && GET_CODE (operands
[4]) == REG
8583 && GET_CODE (operands
[5]) == REG
))
8586 /* Only 2 real operands to the addition. One of the input operands must
8587 be the same as the output operand. */
8588 if (! rtx_equal_p (operands
[3], operands
[4])
8589 && ! rtx_equal_p (operands
[3], operands
[5]))
8592 /* Inout operand of add cannot conflict with any operands from multiply. */
8593 if (rtx_equal_p (operands
[3], operands
[0])
8594 || rtx_equal_p (operands
[3], operands
[1])
8595 || rtx_equal_p (operands
[3], operands
[2]))
8598 /* multiply cannot feed into addition operands. */
8599 if (rtx_equal_p (operands
[4], operands
[0])
8600 || rtx_equal_p (operands
[5], operands
[0]))
8603 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8605 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8606 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8607 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8608 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8609 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8610 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8613 /* Passed. Operands are suitable for fmpyadd. */
8617 #if !defined(USE_COLLECT2)
8619 pa_asm_out_constructor (rtx symbol
, int priority
)
8621 if (!function_label_operand (symbol
, VOIDmode
))
8622 pa_encode_label (symbol
);
8624 #ifdef CTORS_SECTION_ASM_OP
8625 default_ctor_section_asm_out_constructor (symbol
, priority
);
8627 # ifdef TARGET_ASM_NAMED_SECTION
8628 default_named_section_asm_out_constructor (symbol
, priority
);
8630 default_stabs_asm_out_constructor (symbol
, priority
);
8636 pa_asm_out_destructor (rtx symbol
, int priority
)
8638 if (!function_label_operand (symbol
, VOIDmode
))
8639 pa_encode_label (symbol
);
8641 #ifdef DTORS_SECTION_ASM_OP
8642 default_dtor_section_asm_out_destructor (symbol
, priority
);
8644 # ifdef TARGET_ASM_NAMED_SECTION
8645 default_named_section_asm_out_destructor (symbol
, priority
);
8647 default_stabs_asm_out_destructor (symbol
, priority
);
8653 /* This function places uninitialized global data in the bss section.
8654 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8655 function on the SOM port to prevent uninitialized global data from
8656 being placed in the data section. */
8659 pa_asm_output_aligned_bss (FILE *stream
,
8661 unsigned HOST_WIDE_INT size
,
8664 switch_to_section (bss_section
);
8665 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8667 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8668 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8671 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8672 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8675 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8676 ASM_OUTPUT_LABEL (stream
, name
);
8677 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8680 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8681 that doesn't allow the alignment of global common storage to be directly
8682 specified. The SOM linker aligns common storage based on the rounded
8683 value of the NUM_BYTES parameter in the .comm directive. It's not
8684 possible to use the .align directive as it doesn't affect the alignment
8685 of the label associated with a .comm directive. */
8688 pa_asm_output_aligned_common (FILE *stream
,
8690 unsigned HOST_WIDE_INT size
,
8693 unsigned int max_common_align
;
8695 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8696 if (align
> max_common_align
)
8698 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8699 "for global common data. Using %u",
8700 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8701 align
= max_common_align
;
8704 switch_to_section (bss_section
);
8706 assemble_name (stream
, name
);
8707 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8708 MAX (size
, align
/ BITS_PER_UNIT
));
8711 /* We can't use .comm for local common storage as the SOM linker effectively
8712 treats the symbol as universal and uses the same storage for local symbols
8713 with the same name in different object files. The .block directive
8714 reserves an uninitialized block of storage. However, it's not common
8715 storage. Fortunately, GCC never requests common storage with the same
8716 name in any given translation unit. */
8719 pa_asm_output_aligned_local (FILE *stream
,
8721 unsigned HOST_WIDE_INT size
,
8724 switch_to_section (bss_section
);
8725 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8728 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8729 assemble_name (stream
, name
);
8730 fprintf (stream
, "\n");
8733 ASM_OUTPUT_LABEL (stream
, name
);
8734 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8737 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8738 use in fmpysub instructions. */
8740 pa_fmpysuboperands (rtx
*operands
)
8742 enum machine_mode mode
= GET_MODE (operands
[0]);
8744 /* Must be a floating point mode. */
8745 if (mode
!= SFmode
&& mode
!= DFmode
)
8748 /* All modes must be the same. */
8749 if (! (mode
== GET_MODE (operands
[1])
8750 && mode
== GET_MODE (operands
[2])
8751 && mode
== GET_MODE (operands
[3])
8752 && mode
== GET_MODE (operands
[4])
8753 && mode
== GET_MODE (operands
[5])))
8756 /* All operands must be registers. */
8757 if (! (GET_CODE (operands
[1]) == REG
8758 && GET_CODE (operands
[2]) == REG
8759 && GET_CODE (operands
[3]) == REG
8760 && GET_CODE (operands
[4]) == REG
8761 && GET_CODE (operands
[5]) == REG
))
8764 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8765 operation, so operands[4] must be the same as operand[3]. */
8766 if (! rtx_equal_p (operands
[3], operands
[4]))
8769 /* multiply cannot feed into subtraction. */
8770 if (rtx_equal_p (operands
[5], operands
[0]))
8773 /* Inout operand of sub cannot conflict with any operands from multiply. */
8774 if (rtx_equal_p (operands
[3], operands
[0])
8775 || rtx_equal_p (operands
[3], operands
[1])
8776 || rtx_equal_p (operands
[3], operands
[2]))
8779 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8781 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8782 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8783 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8784 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8785 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8786 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8789 /* Passed. Operands are suitable for fmpysub. */
8793 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8794 constants for shadd instructions. */
8796 pa_shadd_constant_p (int val
)
8798 if (val
== 2 || val
== 4 || val
== 8)
8804 /* Return TRUE if INSN branches forward. */
8807 forward_branch_p (rtx insn
)
8809 rtx lab
= JUMP_LABEL (insn
);
8811 /* The INSN must have a jump label. */
8812 gcc_assert (lab
!= NULL_RTX
);
8814 if (INSN_ADDRESSES_SET_P ())
8815 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
8822 insn
= NEXT_INSN (insn
);
8828 /* Return 1 if INSN is in the delay slot of a call instruction. */
8830 pa_jump_in_call_delay (rtx insn
)
8833 if (! JUMP_P (insn
))
8836 if (PREV_INSN (insn
)
8837 && PREV_INSN (PREV_INSN (insn
))
8838 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn
)))))
8840 rtx test_insn
= next_real_insn (PREV_INSN (PREV_INSN (insn
)));
8842 return (GET_CODE (PATTERN (test_insn
)) == SEQUENCE
8843 && XVECEXP (PATTERN (test_insn
), 0, 1) == insn
);
8850 /* Output an unconditional move and branch insn. */
8853 pa_output_parallel_movb (rtx
*operands
, rtx insn
)
8855 int length
= get_attr_length (insn
);
8857 /* These are the cases in which we win. */
8859 return "mov%I1b,tr %1,%0,%2";
8861 /* None of the following cases win, but they don't lose either. */
8864 if (dbr_sequence_length () == 0)
8866 /* Nothing in the delay slot, fake it by putting the combined
8867 insn (the copy or add) in the delay slot of a bl. */
8868 if (GET_CODE (operands
[1]) == CONST_INT
)
8869 return "b %2\n\tldi %1,%0";
8871 return "b %2\n\tcopy %1,%0";
8875 /* Something in the delay slot, but we've got a long branch. */
8876 if (GET_CODE (operands
[1]) == CONST_INT
)
8877 return "ldi %1,%0\n\tb %2";
8879 return "copy %1,%0\n\tb %2";
8883 if (GET_CODE (operands
[1]) == CONST_INT
)
8884 output_asm_insn ("ldi %1,%0", operands
);
8886 output_asm_insn ("copy %1,%0", operands
);
8887 return pa_output_lbranch (operands
[2], insn
, 1);
8890 /* Output an unconditional add and branch insn. */
8893 pa_output_parallel_addb (rtx
*operands
, rtx insn
)
8895 int length
= get_attr_length (insn
);
8897 /* To make life easy we want operand0 to be the shared input/output
8898 operand and operand1 to be the readonly operand. */
8899 if (operands
[0] == operands
[1])
8900 operands
[1] = operands
[2];
8902 /* These are the cases in which we win. */
8904 return "add%I1b,tr %1,%0,%3";
8906 /* None of the following cases win, but they don't lose either. */
8909 if (dbr_sequence_length () == 0)
8910 /* Nothing in the delay slot, fake it by putting the combined
8911 insn (the copy or add) in the delay slot of a bl. */
8912 return "b %3\n\tadd%I1 %1,%0,%0";
8914 /* Something in the delay slot, but we've got a long branch. */
8915 return "add%I1 %1,%0,%0\n\tb %3";
8918 output_asm_insn ("add%I1 %1,%0,%0", operands
);
8919 return pa_output_lbranch (operands
[3], insn
, 1);
8922 /* Return nonzero if INSN (a jump insn) immediately follows a call
8923 to a named function. This is used to avoid filling the delay slot
8924 of the jump since it can usually be eliminated by modifying RP in
8925 the delay slot of the call. */
8928 pa_following_call (rtx insn
)
8930 if (! TARGET_JUMP_IN_DELAY
)
8933 /* Find the previous real insn, skipping NOTEs. */
8934 insn
= PREV_INSN (insn
);
8935 while (insn
&& NOTE_P (insn
))
8936 insn
= PREV_INSN (insn
);
8938 /* Check for CALL_INSNs and millicode calls. */
8941 && get_attr_type (insn
) != TYPE_DYNCALL
)
8942 || (NONJUMP_INSN_P (insn
)
8943 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8944 && GET_CODE (PATTERN (insn
)) != USE
8945 && GET_CODE (PATTERN (insn
)) != CLOBBER
8946 && get_attr_type (insn
) == TYPE_MILLI
)))
8952 /* We use this hook to perform a PA specific optimization which is difficult
8953 to do in earlier passes.
8955 We surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8956 insns. Those insns mark where we should emit .begin_brtab and
8957 .end_brtab directives when using GAS. This allows for better link
8958 time optimizations. */
8965 remove_useless_addtr_insns (1);
8967 if (pa_cpu
< PROCESSOR_8000
)
8968 pa_combine_instructions ();
8970 /* Still need brtab marker insns. FIXME: the presence of these
8971 markers disables output of the branch table to readonly memory,
8972 and any alignment directives that might be needed. Possibly,
8973 the begin_brtab insn should be output before the label for the
8974 table. This doesn't matter at the moment since the tables are
8975 always output in the text section. */
8976 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8978 /* Find an ADDR_VEC insn. */
8979 if (! JUMP_TABLE_DATA_P (insn
))
8982 /* Now generate markers for the beginning and end of the
8984 emit_insn_before (gen_begin_brtab (), insn
);
8985 emit_insn_after (gen_end_brtab (), insn
);
8989 /* The PA has a number of odd instructions which can perform multiple
8990 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8991 it may be profitable to combine two instructions into one instruction
8992 with two outputs. It's not profitable PA2.0 machines because the
8993 two outputs would take two slots in the reorder buffers.
8995 This routine finds instructions which can be combined and combines
8996 them. We only support some of the potential combinations, and we
8997 only try common ways to find suitable instructions.
8999 * addb can add two registers or a register and a small integer
9000 and jump to a nearby (+-8k) location. Normally the jump to the
9001 nearby location is conditional on the result of the add, but by
9002 using the "true" condition we can make the jump unconditional.
9003 Thus addb can perform two independent operations in one insn.
9005 * movb is similar to addb in that it can perform a reg->reg
9006 or small immediate->reg copy and jump to a nearby (+-8k location).
9008 * fmpyadd and fmpysub can perform a FP multiply and either an
9009 FP add or FP sub if the operands of the multiply and add/sub are
9010 independent (there are other minor restrictions). Note both
9011 the fmpy and fadd/fsub can in theory move to better spots according
9012 to data dependencies, but for now we require the fmpy stay at a
9015 * Many of the memory operations can perform pre & post updates
9016 of index registers. GCC's pre/post increment/decrement addressing
9017 is far too simple to take advantage of all the possibilities. This
9018 pass may not be suitable since those insns may not be independent.
9020 * comclr can compare two ints or an int and a register, nullify
9021 the following instruction and zero some other register. This
9022 is more difficult to use as it's harder to find an insn which
9023 will generate a comclr than finding something like an unconditional
9024 branch. (conditional moves & long branches create comclr insns).
9026 * Most arithmetic operations can conditionally skip the next
9027 instruction. They can be viewed as "perform this operation
9028 and conditionally jump to this nearby location" (where nearby
9029 is an insns away). These are difficult to use due to the
9030 branch length restrictions. */
9033 pa_combine_instructions (void)
9035 rtx anchor
, new_rtx
;
9037 /* This can get expensive since the basic algorithm is on the
9038 order of O(n^2) (or worse). Only do it for -O2 or higher
9039 levels of optimization. */
9043 /* Walk down the list of insns looking for "anchor" insns which
9044 may be combined with "floating" insns. As the name implies,
9045 "anchor" instructions don't move, while "floating" insns may
9047 new_rtx
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
9048 new_rtx
= make_insn_raw (new_rtx
);
9050 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
9052 enum attr_pa_combine_type anchor_attr
;
9053 enum attr_pa_combine_type floater_attr
;
9055 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9056 Also ignore any special USE insns. */
9057 if ((! NONJUMP_INSN_P (anchor
) && ! JUMP_P (anchor
) && ! CALL_P (anchor
))
9058 || GET_CODE (PATTERN (anchor
)) == USE
9059 || GET_CODE (PATTERN (anchor
)) == CLOBBER
)
9062 anchor_attr
= get_attr_pa_combine_type (anchor
);
9063 /* See if anchor is an insn suitable for combination. */
9064 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
9065 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9066 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9067 && ! forward_branch_p (anchor
)))
9071 for (floater
= PREV_INSN (anchor
);
9073 floater
= PREV_INSN (floater
))
9075 if (NOTE_P (floater
)
9076 || (NONJUMP_INSN_P (floater
)
9077 && (GET_CODE (PATTERN (floater
)) == USE
9078 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9081 /* Anything except a regular INSN will stop our search. */
9082 if (! NONJUMP_INSN_P (floater
))
9088 /* See if FLOATER is suitable for combination with the
9090 floater_attr
= get_attr_pa_combine_type (floater
);
9091 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9092 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9093 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9094 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9096 /* If ANCHOR and FLOATER can be combined, then we're
9097 done with this pass. */
9098 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9099 SET_DEST (PATTERN (floater
)),
9100 XEXP (SET_SRC (PATTERN (floater
)), 0),
9101 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9105 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9106 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
9108 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
9110 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9111 SET_DEST (PATTERN (floater
)),
9112 XEXP (SET_SRC (PATTERN (floater
)), 0),
9113 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9118 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9119 SET_DEST (PATTERN (floater
)),
9120 SET_SRC (PATTERN (floater
)),
9121 SET_SRC (PATTERN (floater
))))
9127 /* If we didn't find anything on the backwards scan try forwards. */
9129 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
9130 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
9132 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
9134 if (NOTE_P (floater
)
9135 || (NONJUMP_INSN_P (floater
)
9136 && (GET_CODE (PATTERN (floater
)) == USE
9137 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9141 /* Anything except a regular INSN will stop our search. */
9142 if (! NONJUMP_INSN_P (floater
))
9148 /* See if FLOATER is suitable for combination with the
9150 floater_attr
= get_attr_pa_combine_type (floater
);
9151 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9152 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9153 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9154 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9156 /* If ANCHOR and FLOATER can be combined, then we're
9157 done with this pass. */
9158 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9159 SET_DEST (PATTERN (floater
)),
9160 XEXP (SET_SRC (PATTERN (floater
)),
9162 XEXP (SET_SRC (PATTERN (floater
)),
9169 /* FLOATER will be nonzero if we found a suitable floating
9170 insn for combination with ANCHOR. */
9172 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9173 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9175 /* Emit the new instruction and delete the old anchor. */
9176 emit_insn_before (gen_rtx_PARALLEL
9178 gen_rtvec (2, PATTERN (anchor
),
9179 PATTERN (floater
))),
9182 SET_INSN_DELETED (anchor
);
9184 /* Emit a special USE insn for FLOATER, then delete
9185 the floating insn. */
9186 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9187 delete_insn (floater
);
9192 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9195 /* Emit the new_jump instruction and delete the old anchor. */
9197 = emit_jump_insn_before (gen_rtx_PARALLEL
9199 gen_rtvec (2, PATTERN (anchor
),
9200 PATTERN (floater
))),
9203 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9204 SET_INSN_DELETED (anchor
);
9206 /* Emit a special USE insn for FLOATER, then delete
9207 the floating insn. */
9208 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9209 delete_insn (floater
);
9217 pa_can_combine_p (rtx new_rtx
, rtx anchor
, rtx floater
, int reversed
, rtx dest
,
9220 int insn_code_number
;
9223 /* Create a PARALLEL with the patterns of ANCHOR and
9224 FLOATER, try to recognize it, then test constraints
9225 for the resulting pattern.
9227 If the pattern doesn't match or the constraints
9228 aren't met keep searching for a suitable floater
9230 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9231 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9232 INSN_CODE (new_rtx
) = -1;
9233 insn_code_number
= recog_memoized (new_rtx
);
9234 if (insn_code_number
< 0
9235 || (extract_insn (new_rtx
), ! constrain_operands (1)))
9249 /* There's up to three operands to consider. One
9250 output and two inputs.
9252 The output must not be used between FLOATER & ANCHOR
9253 exclusive. The inputs must not be set between
9254 FLOATER and ANCHOR exclusive. */
9256 if (reg_used_between_p (dest
, start
, end
))
9259 if (reg_set_between_p (src1
, start
, end
))
9262 if (reg_set_between_p (src2
, start
, end
))
9265 /* If we get here, then everything is good. */
9269 /* Return nonzero if references for INSN are delayed.
9271 Millicode insns are actually function calls with some special
9272 constraints on arguments and register usage.
9274 Millicode calls always expect their arguments in the integer argument
9275 registers, and always return their result in %r29 (ret1). They
9276 are expected to clobber their arguments, %r1, %r29, and the return
9277 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9279 This function tells reorg that the references to arguments and
9280 millicode calls do not appear to happen until after the millicode call.
9281 This allows reorg to put insns which set the argument registers into the
9282 delay slot of the millicode call -- thus they act more like traditional
9285 Note we cannot consider side effects of the insn to be delayed because
9286 the branch and link insn will clobber the return pointer. If we happened
9287 to use the return pointer in the delay slot of the call, then we lose.
9289 get_attr_type will try to recognize the given insn, so make sure to
9290 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9293 pa_insn_refs_are_delayed (rtx insn
)
9295 return ((NONJUMP_INSN_P (insn
)
9296 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9297 && GET_CODE (PATTERN (insn
)) != USE
9298 && GET_CODE (PATTERN (insn
)) != CLOBBER
9299 && get_attr_type (insn
) == TYPE_MILLI
));
9302 /* Promote the return value, but not the arguments. */
9304 static enum machine_mode
9305 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9306 enum machine_mode mode
,
9307 int *punsignedp ATTRIBUTE_UNUSED
,
9308 const_tree fntype ATTRIBUTE_UNUSED
,
9311 if (for_return
== 0)
9313 return promote_mode (type
, mode
, punsignedp
);
9316 /* On the HP-PA the value is found in register(s) 28(-29), unless
9317 the mode is SF or DF. Then the value is returned in fr4 (32).
9319 This must perform the same promotions as PROMOTE_MODE, else promoting
9320 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9322 Small structures must be returned in a PARALLEL on PA64 in order
9323 to match the HP Compiler ABI. */
9326 pa_function_value (const_tree valtype
,
9327 const_tree func ATTRIBUTE_UNUSED
,
9328 bool outgoing ATTRIBUTE_UNUSED
)
9330 enum machine_mode valmode
;
9332 if (AGGREGATE_TYPE_P (valtype
)
9333 || TREE_CODE (valtype
) == COMPLEX_TYPE
9334 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9338 /* Aggregates with a size less than or equal to 128 bits are
9339 returned in GR 28(-29). They are left justified. The pad
9340 bits are undefined. Larger aggregates are returned in
9344 int ub
= int_size_in_bytes (valtype
) <= UNITS_PER_WORD
? 1 : 2;
9346 for (i
= 0; i
< ub
; i
++)
9348 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9349 gen_rtx_REG (DImode
, 28 + i
),
9354 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9356 else if (int_size_in_bytes (valtype
) > UNITS_PER_WORD
)
9358 /* Aggregates 5 to 8 bytes in size are returned in general
9359 registers r28-r29 in the same manner as other non
9360 floating-point objects. The data is right-justified and
9361 zero-extended to 64 bits. This is opposite to the normal
9362 justification used on big endian targets and requires
9363 special treatment. */
9364 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9365 gen_rtx_REG (DImode
, 28), const0_rtx
);
9366 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9370 if ((INTEGRAL_TYPE_P (valtype
)
9371 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9372 || POINTER_TYPE_P (valtype
))
9373 valmode
= word_mode
;
9375 valmode
= TYPE_MODE (valtype
);
9377 if (TREE_CODE (valtype
) == REAL_TYPE
9378 && !AGGREGATE_TYPE_P (valtype
)
9379 && TYPE_MODE (valtype
) != TFmode
9380 && !TARGET_SOFT_FLOAT
)
9381 return gen_rtx_REG (valmode
, 32);
9383 return gen_rtx_REG (valmode
, 28);
9386 /* Implement the TARGET_LIBCALL_VALUE hook. */
9389 pa_libcall_value (enum machine_mode mode
,
9390 const_rtx fun ATTRIBUTE_UNUSED
)
9392 if (! TARGET_SOFT_FLOAT
9393 && (mode
== SFmode
|| mode
== DFmode
))
9394 return gen_rtx_REG (mode
, 32);
9396 return gen_rtx_REG (mode
, 28);
9399 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9402 pa_function_value_regno_p (const unsigned int regno
)
9405 || (! TARGET_SOFT_FLOAT
&& regno
== 32))
9411 /* Update the data in CUM to advance over an argument
9412 of mode MODE and data type TYPE.
9413 (TYPE is null for libcalls where that information may not be available.) */
9416 pa_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
9417 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9419 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9420 int arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9422 cum
->nargs_prototype
--;
9423 cum
->words
+= (arg_size
9424 + ((cum
->words
& 01)
9425 && type
!= NULL_TREE
9429 /* Return the location of a parameter that is passed in a register or NULL
9430 if the parameter has any component that is passed in memory.
9432 This is new code and will be pushed to into the net sources after
9435 ??? We might want to restructure this so that it looks more like other
9438 pa_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
9439 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9441 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9442 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9449 if (mode
== VOIDmode
)
9452 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9454 /* If this arg would be passed partially or totally on the stack, then
9455 this routine should return zero. pa_arg_partial_bytes will
9456 handle arguments which are split between regs and stack slots if
9457 the ABI mandates split arguments. */
9460 /* The 32-bit ABI does not split arguments. */
9461 if (cum
->words
+ arg_size
> max_arg_words
)
9467 alignment
= cum
->words
& 1;
9468 if (cum
->words
+ alignment
>= max_arg_words
)
9472 /* The 32bit ABIs and the 64bit ABIs are rather different,
9473 particularly in their handling of FP registers. We might
9474 be able to cleverly share code between them, but I'm not
9475 going to bother in the hope that splitting them up results
9476 in code that is more easily understood. */
9480 /* Advance the base registers to their current locations.
9482 Remember, gprs grow towards smaller register numbers while
9483 fprs grow to higher register numbers. Also remember that
9484 although FP regs are 32-bit addressable, we pretend that
9485 the registers are 64-bits wide. */
9486 gpr_reg_base
= 26 - cum
->words
;
9487 fpr_reg_base
= 32 + cum
->words
;
9489 /* Arguments wider than one word and small aggregates need special
9493 || (type
&& (AGGREGATE_TYPE_P (type
)
9494 || TREE_CODE (type
) == COMPLEX_TYPE
9495 || TREE_CODE (type
) == VECTOR_TYPE
)))
9497 /* Double-extended precision (80-bit), quad-precision (128-bit)
9498 and aggregates including complex numbers are aligned on
9499 128-bit boundaries. The first eight 64-bit argument slots
9500 are associated one-to-one, with general registers r26
9501 through r19, and also with floating-point registers fr4
9502 through fr11. Arguments larger than one word are always
9503 passed in general registers.
9505 Using a PARALLEL with a word mode register results in left
9506 justified data on a big-endian target. */
9509 int i
, offset
= 0, ub
= arg_size
;
9511 /* Align the base register. */
9512 gpr_reg_base
-= alignment
;
9514 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9515 for (i
= 0; i
< ub
; i
++)
9517 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9518 gen_rtx_REG (DImode
, gpr_reg_base
),
9524 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9529 /* If the argument is larger than a word, then we know precisely
9530 which registers we must use. */
9544 /* Structures 5 to 8 bytes in size are passed in the general
9545 registers in the same manner as other non floating-point
9546 objects. The data is right-justified and zero-extended
9547 to 64 bits. This is opposite to the normal justification
9548 used on big endian targets and requires special treatment.
9549 We now define BLOCK_REG_PADDING to pad these objects.
9550 Aggregates, complex and vector types are passed in the same
9551 manner as structures. */
9553 || (type
&& (AGGREGATE_TYPE_P (type
)
9554 || TREE_CODE (type
) == COMPLEX_TYPE
9555 || TREE_CODE (type
) == VECTOR_TYPE
)))
9557 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9558 gen_rtx_REG (DImode
, gpr_reg_base
),
9560 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9565 /* We have a single word (32 bits). A simple computation
9566 will get us the register #s we need. */
9567 gpr_reg_base
= 26 - cum
->words
;
9568 fpr_reg_base
= 32 + 2 * cum
->words
;
9572 /* Determine if the argument needs to be passed in both general and
9573 floating point registers. */
9574 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9575 /* If we are doing soft-float with portable runtime, then there
9576 is no need to worry about FP regs. */
9577 && !TARGET_SOFT_FLOAT
9578 /* The parameter must be some kind of scalar float, else we just
9579 pass it in integer registers. */
9580 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9581 /* The target function must not have a prototype. */
9582 && cum
->nargs_prototype
<= 0
9583 /* libcalls do not need to pass items in both FP and general
9585 && type
!= NULL_TREE
9586 /* All this hair applies to "outgoing" args only. This includes
9587 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9589 /* Also pass outgoing floating arguments in both registers in indirect
9590 calls with the 32 bit ABI and the HP assembler since there is no
9591 way to the specify argument locations in static functions. */
9596 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9602 gen_rtx_EXPR_LIST (VOIDmode
,
9603 gen_rtx_REG (mode
, fpr_reg_base
),
9605 gen_rtx_EXPR_LIST (VOIDmode
,
9606 gen_rtx_REG (mode
, gpr_reg_base
),
9611 /* See if we should pass this parameter in a general register. */
9612 if (TARGET_SOFT_FLOAT
9613 /* Indirect calls in the normal 32bit ABI require all arguments
9614 to be passed in general registers. */
9615 || (!TARGET_PORTABLE_RUNTIME
9619 /* If the parameter is not a scalar floating-point parameter,
9620 then it belongs in GPRs. */
9621 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9622 /* Structure with single SFmode field belongs in GPR. */
9623 || (type
&& AGGREGATE_TYPE_P (type
)))
9624 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9626 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9631 /* Arguments larger than one word are double word aligned. */
9634 pa_function_arg_boundary (enum machine_mode mode
, const_tree type
)
9636 bool singleword
= (type
9637 ? (integer_zerop (TYPE_SIZE (type
))
9638 || !TREE_CONSTANT (TYPE_SIZE (type
))
9639 || int_size_in_bytes (type
) <= UNITS_PER_WORD
)
9640 : GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
);
9642 return singleword
? PARM_BOUNDARY
: MAX_PARM_BOUNDARY
;
9645 /* If this arg would be passed totally in registers or totally on the stack,
9646 then this routine should return zero. */
9649 pa_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
9650 tree type
, bool named ATTRIBUTE_UNUSED
)
9652 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9653 unsigned int max_arg_words
= 8;
9654 unsigned int offset
= 0;
9659 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9662 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9663 /* Arg fits fully into registers. */
9665 else if (cum
->words
+ offset
>= max_arg_words
)
9666 /* Arg fully on the stack. */
9670 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9674 /* A get_unnamed_section callback for switching to the text section.
9676 This function is only used with SOM. Because we don't support
9677 named subspaces, we can only create a new subspace or switch back
9678 to the default text subspace. */
9681 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9683 gcc_assert (TARGET_SOM
);
9686 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
9688 /* We only want to emit a .nsubspa directive once at the
9689 start of the function. */
9690 cfun
->machine
->in_nsubspa
= 1;
9692 /* Create a new subspace for the text. This provides
9693 better stub placement and one-only functions. */
9695 && DECL_ONE_ONLY (cfun
->decl
)
9696 && !DECL_WEAK (cfun
->decl
))
9698 output_section_asm_op ("\t.SPACE $TEXT$\n"
9699 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9700 "ACCESS=44,SORT=24,COMDAT");
9706 /* There isn't a current function or the body of the current
9707 function has been completed. So, we are changing to the
9708 text section to output debugging information. Thus, we
9709 need to forget that we are in the text section so that
9710 varasm.c will call us when text_section is selected again. */
9711 gcc_assert (!cfun
|| !cfun
->machine
9712 || cfun
->machine
->in_nsubspa
== 2);
9715 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9718 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9721 /* A get_unnamed_section callback for switching to comdat data
9722 sections. This function is only used with SOM. */
9725 som_output_comdat_data_section_asm_op (const void *data
)
9728 output_section_asm_op (data
);
9731 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9734 pa_som_asm_init_sections (void)
9737 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
9739 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9740 is not being generated. */
9741 som_readonly_data_section
9742 = get_unnamed_section (0, output_section_asm_op
,
9743 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9745 /* When secondary definitions are not supported, SOM makes readonly
9746 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9748 som_one_only_readonly_data_section
9749 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
9751 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9752 "ACCESS=0x2c,SORT=16,COMDAT");
9755 /* When secondary definitions are not supported, SOM makes data one-only
9756 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9757 som_one_only_data_section
9758 = get_unnamed_section (SECTION_WRITE
,
9759 som_output_comdat_data_section_asm_op
,
9760 "\t.SPACE $PRIVATE$\n"
9761 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9762 "ACCESS=31,SORT=24,COMDAT");
9765 som_tm_clone_table_section
9766 = get_unnamed_section (0, output_section_asm_op
,
9767 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9769 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9770 which reference data within the $TEXT$ space (for example constant
9771 strings in the $LIT$ subspace).
9773 The assemblers (GAS and HP as) both have problems with handling
9774 the difference of two symbols which is the other correct way to
9775 reference constant data during PIC code generation.
9777 So, there's no way to reference constant data which is in the
9778 $TEXT$ space during PIC generation. Instead place all constant
9779 data into the $PRIVATE$ subspace (this reduces sharing, but it
9780 works correctly). */
9781 readonly_data_section
= flag_pic
? data_section
: som_readonly_data_section
;
9783 /* We must not have a reference to an external symbol defined in a
9784 shared library in a readonly section, else the SOM linker will
9787 So, we force exception information into the data section. */
9788 exception_section
= data_section
;
9791 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9794 pa_som_tm_clone_table_section (void)
9796 return som_tm_clone_table_section
;
9799 /* On hpux10, the linker will give an error if we have a reference
9800 in the read-only data section to a symbol defined in a shared
9801 library. Therefore, expressions that might require a reloc can
9802 not be placed in the read-only data section. */
9805 pa_select_section (tree exp
, int reloc
,
9806 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9808 if (TREE_CODE (exp
) == VAR_DECL
9809 && TREE_READONLY (exp
)
9810 && !TREE_THIS_VOLATILE (exp
)
9811 && DECL_INITIAL (exp
)
9812 && (DECL_INITIAL (exp
) == error_mark_node
9813 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9817 && DECL_ONE_ONLY (exp
)
9818 && !DECL_WEAK (exp
))
9819 return som_one_only_readonly_data_section
;
9821 return readonly_data_section
;
9823 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9824 return readonly_data_section
;
9826 && TREE_CODE (exp
) == VAR_DECL
9827 && DECL_ONE_ONLY (exp
)
9828 && !DECL_WEAK (exp
))
9829 return som_one_only_data_section
;
9831 return data_section
;
9835 pa_globalize_label (FILE *stream
, const char *name
)
9837 /* We only handle DATA objects here, functions are globalized in
9838 ASM_DECLARE_FUNCTION_NAME. */
9839 if (! FUNCTION_NAME_P (name
))
9841 fputs ("\t.EXPORT ", stream
);
9842 assemble_name (stream
, name
);
9843 fputs (",DATA\n", stream
);
9847 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9850 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9851 int incoming ATTRIBUTE_UNUSED
)
9853 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9856 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9859 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
9861 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9862 PA64 ABI says that objects larger than 128 bits are returned in memory.
9863 Note, int_size_in_bytes can return -1 if the size of the object is
9864 variable or larger than the maximum value that can be expressed as
9865 a HOST_WIDE_INT. It can also return zero for an empty type. The
9866 simplest way to handle variable and empty types is to pass them in
9867 memory. This avoids problems in defining the boundaries of argument
9868 slots, allocating registers, etc. */
9869 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9870 || int_size_in_bytes (type
) <= 0);
9873 /* Structure to hold declaration and name of external symbols that are
9874 emitted by GCC. We generate a vector of these symbols and output them
9875 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9876 This avoids putting out names that are never really used. */
9878 typedef struct GTY(()) extern_symbol
9884 /* Define gc'd vector type for extern_symbol. */
9886 /* Vector of extern_symbol pointers. */
9887 static GTY(()) vec
<extern_symbol
, va_gc
> *extern_symbols
;
9889 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9890 /* Mark DECL (name NAME) as an external reference (assembler output
9891 file FILE). This saves the names to output at the end of the file
9892 if actually referenced. */
9895 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9897 gcc_assert (file
== asm_out_file
);
9898 extern_symbol p
= {decl
, name
};
9899 vec_safe_push (extern_symbols
, p
);
9902 /* Output text required at the end of an assembler file.
9903 This includes deferred plabels and .import directives for
9904 all external symbols that were actually referenced. */
9907 pa_hpux_file_end (void)
9912 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9913 output_deferred_profile_counters ();
9915 output_deferred_plabels ();
9917 for (i
= 0; vec_safe_iterate (extern_symbols
, i
, &p
); i
++)
9919 tree decl
= p
->decl
;
9921 if (!TREE_ASM_WRITTEN (decl
)
9922 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9923 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9926 vec_free (extern_symbols
);
9930 /* Return true if a change from mode FROM to mode TO for a register
9931 in register class RCLASS is invalid. */
9934 pa_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
9935 enum reg_class rclass
)
9940 /* Reject changes to/from complex and vector modes. */
9941 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
9942 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
9945 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
9948 /* There is no way to load QImode or HImode values directly from
9949 memory. SImode loads to the FP registers are not zero extended.
9950 On the 64-bit target, this conflicts with the definition of
9951 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9952 with different sizes in the floating-point registers. */
9953 if (MAYBE_FP_REG_CLASS_P (rclass
))
9956 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9957 in specific sets of registers. Thus, we cannot allow changing
9958 to a larger mode when it's larger than a word. */
9959 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
9960 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
9966 /* Returns TRUE if it is a good idea to tie two pseudo registers
9967 when one has mode MODE1 and one has mode MODE2.
9968 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9969 for any hard reg, then this must be FALSE for correct output.
9971 We should return FALSE for QImode and HImode because these modes
9972 are not ok in the floating-point registers. However, this prevents
9973 tieing these modes to SImode and DImode in the general registers.
9974 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9975 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9976 in the floating-point registers. */
9979 pa_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
9981 /* Don't tie modes in different classes. */
9982 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
9989 /* Length in units of the trampoline instruction code. */
9991 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9994 /* Output assembler code for a block containing the constant parts
9995 of a trampoline, leaving space for the variable parts.\
9997 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9998 and then branches to the specified routine.
10000 This code template is copied from text segment to stack location
10001 and then patched with pa_trampoline_init to contain valid values,
10002 and then entered as a subroutine.
10004 It is best to keep this as small as possible to avoid having to
10005 flush multiple lines in the cache. */
10008 pa_asm_trampoline_template (FILE *f
)
10012 fputs ("\tldw 36(%r22),%r21\n", f
);
10013 fputs ("\tbb,>=,n %r21,30,.+16\n", f
);
10014 if (ASSEMBLER_DIALECT
== 0)
10015 fputs ("\tdepi 0,31,2,%r21\n", f
);
10017 fputs ("\tdepwi 0,31,2,%r21\n", f
);
10018 fputs ("\tldw 4(%r21),%r19\n", f
);
10019 fputs ("\tldw 0(%r21),%r21\n", f
);
10022 fputs ("\tbve (%r21)\n", f
);
10023 fputs ("\tldw 40(%r22),%r29\n", f
);
10024 fputs ("\t.word 0\n", f
);
10025 fputs ("\t.word 0\n", f
);
10029 fputs ("\tldsid (%r21),%r1\n", f
);
10030 fputs ("\tmtsp %r1,%sr0\n", f
);
10031 fputs ("\tbe 0(%sr0,%r21)\n", f
);
10032 fputs ("\tldw 40(%r22),%r29\n", f
);
10034 fputs ("\t.word 0\n", f
);
10035 fputs ("\t.word 0\n", f
);
10036 fputs ("\t.word 0\n", f
);
10037 fputs ("\t.word 0\n", f
);
10041 fputs ("\t.dword 0\n", f
);
10042 fputs ("\t.dword 0\n", f
);
10043 fputs ("\t.dword 0\n", f
);
10044 fputs ("\t.dword 0\n", f
);
10045 fputs ("\tmfia %r31\n", f
);
10046 fputs ("\tldd 24(%r31),%r1\n", f
);
10047 fputs ("\tldd 24(%r1),%r27\n", f
);
10048 fputs ("\tldd 16(%r1),%r1\n", f
);
10049 fputs ("\tbve (%r1)\n", f
);
10050 fputs ("\tldd 32(%r31),%r31\n", f
);
10051 fputs ("\t.dword 0 ; fptr\n", f
);
10052 fputs ("\t.dword 0 ; static link\n", f
);
10056 /* Emit RTL insns to initialize the variable parts of a trampoline.
10057 FNADDR is an RTX for the address of the function's pure code.
10058 CXT is an RTX for the static chain value for the function.
10060 Move the function address to the trampoline template at offset 36.
10061 Move the static chain value to trampoline template at offset 40.
10062 Move the trampoline address to trampoline template at offset 44.
10063 Move r19 to trampoline template at offset 48. The latter two
10064 words create a plabel for the indirect call to the trampoline.
10066 A similar sequence is used for the 64-bit port but the plabel is
10067 at the beginning of the trampoline.
10069 Finally, the cache entries for the trampoline code are flushed.
10070 This is necessary to ensure that the trampoline instruction sequence
10071 is written to memory prior to any attempts at prefetching the code
10075 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
10077 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10078 rtx start_addr
= gen_reg_rtx (Pmode
);
10079 rtx end_addr
= gen_reg_rtx (Pmode
);
10080 rtx line_length
= gen_reg_rtx (Pmode
);
10083 emit_block_move (m_tramp
, assemble_trampoline_template (),
10084 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
10085 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
10089 tmp
= adjust_address (m_tramp
, Pmode
, 36);
10090 emit_move_insn (tmp
, fnaddr
);
10091 tmp
= adjust_address (m_tramp
, Pmode
, 40);
10092 emit_move_insn (tmp
, chain_value
);
10094 /* Create a fat pointer for the trampoline. */
10095 tmp
= adjust_address (m_tramp
, Pmode
, 44);
10096 emit_move_insn (tmp
, r_tramp
);
10097 tmp
= adjust_address (m_tramp
, Pmode
, 48);
10098 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
10100 /* fdc and fic only use registers for the address to flush,
10101 they do not accept integer displacements. We align the
10102 start and end addresses to the beginning of their respective
10103 cache lines to minimize the number of lines flushed. */
10104 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
10105 GEN_INT (-MIN_CACHELINE_SIZE
)));
10106 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
,
10107 TRAMPOLINE_CODE_SIZE
-1));
10108 emit_insn (gen_andsi3 (end_addr
, tmp
,
10109 GEN_INT (-MIN_CACHELINE_SIZE
)));
10110 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10111 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
10112 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
10113 gen_reg_rtx (Pmode
),
10114 gen_reg_rtx (Pmode
)));
10118 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10119 emit_move_insn (tmp
, fnaddr
);
10120 tmp
= adjust_address (m_tramp
, Pmode
, 64);
10121 emit_move_insn (tmp
, chain_value
);
10123 /* Create a fat pointer for the trampoline. */
10124 tmp
= adjust_address (m_tramp
, Pmode
, 16);
10125 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (Pmode
,
10127 tmp
= adjust_address (m_tramp
, Pmode
, 24);
10128 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
10130 /* fdc and fic only use registers for the address to flush,
10131 they do not accept integer displacements. We align the
10132 start and end addresses to the beginning of their respective
10133 cache lines to minimize the number of lines flushed. */
10134 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
, 32));
10135 emit_insn (gen_anddi3 (start_addr
, tmp
,
10136 GEN_INT (-MIN_CACHELINE_SIZE
)));
10137 tmp
= force_reg (Pmode
, plus_constant (Pmode
, tmp
,
10138 TRAMPOLINE_CODE_SIZE
- 1));
10139 emit_insn (gen_anddi3 (end_addr
, tmp
,
10140 GEN_INT (-MIN_CACHELINE_SIZE
)));
10141 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10142 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
10143 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
10144 gen_reg_rtx (Pmode
),
10145 gen_reg_rtx (Pmode
)));
10148 #ifdef HAVE_ENABLE_EXECUTE_STACK
10149 Â
emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
10150 Â Â Â Â LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
10154 /* Perform any machine-specific adjustment in the address of the trampoline.
10155 ADDR contains the address that was passed to pa_trampoline_init.
10156 Adjust the trampoline address to point to the plabel at offset 44. */
10159 pa_trampoline_adjust_address (rtx addr
)
10162 addr
= memory_address (Pmode
, plus_constant (Pmode
, addr
, 46));
10167 pa_delegitimize_address (rtx orig_x
)
10169 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10171 if (GET_CODE (x
) == LO_SUM
10172 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10173 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10174 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));
10179 pa_internal_arg_pointer (void)
10181 /* The argument pointer and the hard frame pointer are the same in
10182 the 32-bit runtime, so we don't need a copy. */
10184 return copy_to_reg (virtual_incoming_args_rtx
);
10186 return virtual_incoming_args_rtx
;
10189 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10190 Frame pointer elimination is automatically handled. */
10193 pa_can_eliminate (const int from
, const int to
)
10195 /* The argument cannot be eliminated in the 64-bit runtime. */
10196 if (TARGET_64BIT
&& from
== ARG_POINTER_REGNUM
)
10199 return (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
10200 ? ! frame_pointer_needed
10204 /* Define the offset between two registers, FROM to be eliminated and its
10205 replacement TO, at the start of a routine. */
10207 pa_initial_elimination_offset (int from
, int to
)
10209 HOST_WIDE_INT offset
;
10211 if ((from
== HARD_FRAME_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
)
10212 && to
== STACK_POINTER_REGNUM
)
10213 offset
= -pa_compute_frame_size (get_frame_size (), 0);
10214 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
10217 gcc_unreachable ();
10223 pa_conditional_register_usage (void)
10227 if (!TARGET_64BIT
&& !TARGET_PA_11
)
10229 for (i
= 56; i
<= FP_REG_LAST
; i
++)
10230 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10231 for (i
= 33; i
< 56; i
+= 2)
10232 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10234 if (TARGET_DISABLE_FPREGS
|| TARGET_SOFT_FLOAT
)
10236 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
10237 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10240 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10243 /* Target hook for c_mode_for_suffix. */
10245 static enum machine_mode
10246 pa_c_mode_for_suffix (char suffix
)
10248 if (HPUX_LONG_DOUBLE_LIBRARY
)
10257 /* Target hook for function_section. */
10260 pa_function_section (tree decl
, enum node_frequency freq
,
10261 bool startup
, bool exit
)
10263 /* Put functions in text section if target doesn't have named sections. */
10264 if (!targetm_common
.have_named_sections
)
10265 return text_section
;
10267 /* Force nested functions into the same section as the containing
10270 && DECL_SECTION_NAME (decl
) == NULL_TREE
10271 && DECL_CONTEXT (decl
) != NULL_TREE
10272 && TREE_CODE (DECL_CONTEXT (decl
)) == FUNCTION_DECL
10273 && DECL_SECTION_NAME (DECL_CONTEXT (decl
)) == NULL_TREE
)
10274 return function_section (DECL_CONTEXT (decl
));
10276 /* Otherwise, use the default function section. */
10277 return default_function_section (decl
, freq
, startup
, exit
);
10280 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10282 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10283 that need more than three instructions to load prior to reload. This
10284 limit is somewhat arbitrary. It takes three instructions to load a
10285 CONST_INT from memory but two are memory accesses. It may be better
10286 to increase the allowed range for CONST_INTS. We may also be able
10287 to handle CONST_DOUBLES. */
10290 pa_legitimate_constant_p (enum machine_mode mode
, rtx x
)
10292 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& x
!= CONST0_RTX (mode
))
10295 if (!NEW_HP_ASSEMBLER
&& !TARGET_GAS
&& GET_CODE (x
) == LABEL_REF
)
10298 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10299 legitimate constants. The other variants can't be handled by
10300 the move patterns after reload starts. */
10301 if (pa_tls_referenced_p (x
))
10304 if (TARGET_64BIT
&& GET_CODE (x
) == CONST_DOUBLE
)
10308 && HOST_BITS_PER_WIDE_INT
> 32
10309 && GET_CODE (x
) == CONST_INT
10310 && !reload_in_progress
10311 && !reload_completed
10312 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x
))
10313 && !pa_cint_ok_for_move (INTVAL (x
)))
10316 if (function_label_operand (x
, mode
))
10322 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10324 static unsigned int
10325 pa_section_type_flags (tree decl
, const char *name
, int reloc
)
10327 unsigned int flags
;
10329 flags
= default_section_type_flags (decl
, name
, reloc
);
10331 /* Function labels are placed in the constant pool. This can
10332 cause a section conflict if decls are put in ".data.rel.ro"
10333 or ".data.rel.ro.local" using the __attribute__ construct. */
10334 if (strcmp (name
, ".data.rel.ro") == 0
10335 || strcmp (name
, ".data.rel.ro.local") == 0)
10336 flags
|= SECTION_WRITE
| SECTION_RELRO
;
10341 /* pa_legitimate_address_p recognizes an RTL expression that is a
10342 valid memory address for an instruction. The MODE argument is the
10343 machine mode for the MEM expression that wants to use this address.
10345 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10346 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10347 available with floating point loads and stores, and integer loads.
10348 We get better code by allowing indexed addresses in the initial
10351 The acceptance of indexed addresses as legitimate implies that we
10352 must provide patterns for doing indexed integer stores, or the move
10353 expanders must force the address of an indexed store to a register.
10354 We have adopted the latter approach.
10356 Another function of pa_legitimate_address_p is to ensure that
10357 the base register is a valid pointer for indexed instructions.
10358 On targets that have non-equivalent space registers, we have to
10359 know at the time of assembler output which register in a REG+REG
10360 pair is the base register. The REG_POINTER flag is sometimes lost
10361 in reload and the following passes, so it can't be relied on during
10362 code generation. Thus, we either have to canonicalize the order
10363 of the registers in REG+REG indexed addresses, or treat REG+REG
10364 addresses separately and provide patterns for both permutations.
10366 The latter approach requires several hundred additional lines of
10367 code in pa.md. The downside to canonicalizing is that a PLUS
10368 in the wrong order can't combine to form to make a scaled indexed
10369 memory operand. As we won't need to canonicalize the operands if
10370 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10372 We initially break out scaled indexed addresses in canonical order
10373 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10374 scaled indexed addresses during RTL generation. However, fold_rtx
10375 has its own opinion on how the operands of a PLUS should be ordered.
10376 If one of the operands is equivalent to a constant, it will make
10377 that operand the second operand. As the base register is likely to
10378 be equivalent to a SYMBOL_REF, we have made it the second operand.
10380 pa_legitimate_address_p accepts REG+REG as legitimate when the
10381 operands are in the order INDEX+BASE on targets with non-equivalent
10382 space registers, and in any order on targets with equivalent space
10383 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10385 We treat a SYMBOL_REF as legitimate if it is part of the current
10386 function's constant-pool, because such addresses can actually be
10387 output as REG+SMALLINT. */
10390 pa_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
10393 && (strict
? STRICT_REG_OK_FOR_BASE_P (x
)
10394 : REG_OK_FOR_BASE_P (x
)))
10395 || ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_DEC
10396 || GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == POST_INC
)
10397 && REG_P (XEXP (x
, 0))
10398 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10399 : REG_OK_FOR_BASE_P (XEXP (x
, 0)))))
10402 if (GET_CODE (x
) == PLUS
)
10406 /* For REG+REG, the base register should be in XEXP (x, 1),
10407 so check it first. */
10408 if (REG_P (XEXP (x
, 1))
10409 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 1))
10410 : REG_OK_FOR_BASE_P (XEXP (x
, 1))))
10411 base
= XEXP (x
, 1), index
= XEXP (x
, 0);
10412 else if (REG_P (XEXP (x
, 0))
10413 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10414 : REG_OK_FOR_BASE_P (XEXP (x
, 0))))
10415 base
= XEXP (x
, 0), index
= XEXP (x
, 1);
10419 if (GET_CODE (index
) == CONST_INT
)
10421 if (INT_5_BITS (index
))
10424 /* When INT14_OK_STRICT is false, a secondary reload is needed
10425 to adjust the displacement of SImode and DImode floating point
10426 instructions but this may fail when the register also needs
10427 reloading. So, we return false when STRICT is true. We
10428 also reject long displacements for float mode addresses since
10429 the majority of accesses will use floating point instructions
10430 that don't support 14-bit offsets. */
10431 if (!INT14_OK_STRICT
10432 && (strict
|| !(reload_in_progress
|| reload_completed
))
10437 return base14_operand (index
, mode
);
10440 if (!TARGET_DISABLE_INDEXING
10441 /* Only accept the "canonical" INDEX+BASE operand order
10442 on targets with non-equivalent space registers. */
10443 && (TARGET_NO_SPACE_REGS
10445 : (base
== XEXP (x
, 1) && REG_P (index
)
10446 && (reload_completed
10447 || (reload_in_progress
&& HARD_REGISTER_P (base
))
10448 || REG_POINTER (base
))
10449 && (reload_completed
10450 || (reload_in_progress
&& HARD_REGISTER_P (index
))
10451 || !REG_POINTER (index
))))
10452 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode
)
10453 && (strict
? STRICT_REG_OK_FOR_INDEX_P (index
)
10454 : REG_OK_FOR_INDEX_P (index
))
10455 && borx_reg_operand (base
, Pmode
)
10456 && borx_reg_operand (index
, Pmode
))
10459 if (!TARGET_DISABLE_INDEXING
10460 && GET_CODE (index
) == MULT
10461 && MODE_OK_FOR_SCALED_INDEXING_P (mode
)
10462 && REG_P (XEXP (index
, 0))
10463 && GET_MODE (XEXP (index
, 0)) == Pmode
10464 && (strict
? STRICT_REG_OK_FOR_INDEX_P (XEXP (index
, 0))
10465 : REG_OK_FOR_INDEX_P (XEXP (index
, 0)))
10466 && GET_CODE (XEXP (index
, 1)) == CONST_INT
10467 && INTVAL (XEXP (index
, 1))
10468 == (HOST_WIDE_INT
) GET_MODE_SIZE (mode
)
10469 && borx_reg_operand (base
, Pmode
))
10475 if (GET_CODE (x
) == LO_SUM
)
10477 rtx y
= XEXP (x
, 0);
10479 if (GET_CODE (y
) == SUBREG
)
10480 y
= SUBREG_REG (y
);
10483 && (strict
? STRICT_REG_OK_FOR_BASE_P (y
)
10484 : REG_OK_FOR_BASE_P (y
)))
10486 /* Needed for -fPIC */
10488 && GET_CODE (XEXP (x
, 1)) == UNSPEC
)
10491 if (!INT14_OK_STRICT
10492 && (strict
|| !(reload_in_progress
|| reload_completed
))
10497 if (CONSTANT_P (XEXP (x
, 1)))
10503 if (GET_CODE (x
) == CONST_INT
&& INT_5_BITS (x
))
10509 /* Look for machine dependent ways to make the invalid address AD a
10512 For the PA, transform:
10514 memory(X + <large int>)
10518 if (<large int> & mask) >= 16
10519 Y = (<large int> & ~mask) + mask + 1 Round up.
10521 Y = (<large int> & ~mask) Round down.
10523 memory (Z + (<large int> - Y));
10525 This makes reload inheritance and reload_cse work better since Z
10528 There may be more opportunities to improve code with this hook. */
10531 pa_legitimize_reload_address (rtx ad
, enum machine_mode mode
,
10532 int opnum
, int type
,
10533 int ind_levels ATTRIBUTE_UNUSED
)
10535 long offset
, newoffset
, mask
;
10536 rtx new_rtx
, temp
= NULL_RTX
;
10538 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
10539 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
10541 if (optimize
&& GET_CODE (ad
) == PLUS
)
10542 temp
= simplify_binary_operation (PLUS
, Pmode
,
10543 XEXP (ad
, 0), XEXP (ad
, 1));
10545 new_rtx
= temp
? temp
: ad
;
10548 && GET_CODE (new_rtx
) == PLUS
10549 && GET_CODE (XEXP (new_rtx
, 0)) == REG
10550 && GET_CODE (XEXP (new_rtx
, 1)) == CONST_INT
)
10552 offset
= INTVAL (XEXP ((new_rtx
), 1));
10554 /* Choose rounding direction. Round up if we are >= halfway. */
10555 if ((offset
& mask
) >= ((mask
+ 1) / 2))
10556 newoffset
= (offset
& ~mask
) + mask
+ 1;
10558 newoffset
= offset
& ~mask
;
10560 /* Ensure that long displacements are aligned. */
10562 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
10563 || (TARGET_64BIT
&& (mode
) == DImode
)))
10564 newoffset
&= ~(GET_MODE_SIZE (mode
) - 1);
10566 if (newoffset
!= 0 && VAL_14_BITS_P (newoffset
))
10568 temp
= gen_rtx_PLUS (Pmode
, XEXP (new_rtx
, 0),
10569 GEN_INT (newoffset
));
10570 ad
= gen_rtx_PLUS (Pmode
, temp
, GEN_INT (offset
- newoffset
));
10571 push_reload (XEXP (ad
, 0), 0, &XEXP (ad
, 0), 0,
10572 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
10573 opnum
, (enum reload_type
) type
);