1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
44 #include "diagnostic-core.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
60 pa_fpstore_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
62 enum machine_mode store_mode
;
63 enum machine_mode other_mode
;
66 if (recog_memoized (in_insn
) < 0
67 || (get_attr_type (in_insn
) != TYPE_FPSTORE
68 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
69 || recog_memoized (out_insn
) < 0)
72 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
74 set
= single_set (out_insn
);
78 other_mode
= GET_MODE (SET_SRC (set
));
80 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
88 #define DO_FRAME_NOTES 0
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx
, rtx
);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (enum machine_mode mode
, reg_class_t
,
97 static int hppa_address_cost (rtx
, enum machine_mode mode
, addr_space_t
, bool);
98 static bool hppa_rtx_costs (rtx
, int, int, int, int *, bool);
99 static inline rtx
force_mode (enum machine_mode
, rtx
);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, int, rtx
,
104 static bool forward_branch_p (rtx_insn
*);
105 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
106 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT
, unsigned *);
107 static int compute_movmem_length (rtx
);
108 static int compute_clrmem_length (rtx
);
109 static bool pa_assemble_integer (rtx
, unsigned int, int);
110 static void remove_useless_addtr_insns (int);
111 static void store_reg (int, HOST_WIDE_INT
, int);
112 static void store_reg_modify (int, int, HOST_WIDE_INT
);
113 static void load_reg (int, HOST_WIDE_INT
, int);
114 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
115 static rtx
pa_function_value (const_tree
, const_tree
, bool);
116 static rtx
pa_libcall_value (enum machine_mode
, const_rtx
);
117 static bool pa_function_value_regno_p (const unsigned int);
118 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
119 static void update_total_code_bytes (unsigned int);
120 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
121 static int pa_adjust_cost (rtx_insn
*, rtx
, rtx_insn
*, int);
122 static int pa_adjust_priority (rtx_insn
*, int);
123 static int pa_issue_rate (void);
124 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
125 static section
*pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED
;
126 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
128 static void pa_encode_section_info (tree
, rtx
, int);
129 static const char *pa_strip_name_encoding (const char *);
130 static bool pa_function_ok_for_sibcall (tree
, tree
);
131 static void pa_globalize_label (FILE *, const char *)
133 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
134 HOST_WIDE_INT
, tree
);
135 #if !defined(USE_COLLECT2)
136 static void pa_asm_out_constructor (rtx
, int);
137 static void pa_asm_out_destructor (rtx
, int);
139 static void pa_init_builtins (void);
140 static rtx
pa_expand_builtin (tree
, rtx
, rtx
, enum machine_mode mode
, int);
141 static rtx
hppa_builtin_saveregs (void);
142 static void hppa_va_start (tree
, rtx
);
143 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
144 static bool pa_scalar_mode_supported_p (enum machine_mode
);
145 static bool pa_commutative_p (const_rtx x
, int outer_code
);
146 static void copy_fp_args (rtx
) ATTRIBUTE_UNUSED
;
147 static int length_fp_args (rtx
) ATTRIBUTE_UNUSED
;
148 static rtx
hppa_legitimize_address (rtx
, rtx
, enum machine_mode
);
149 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
150 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
151 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
152 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
153 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
154 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
155 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
156 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
157 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
158 static void output_deferred_plabels (void);
159 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
160 #ifdef ASM_OUTPUT_EXTERNAL_REAL
161 static void pa_hpux_file_end (void);
163 static void pa_init_libfuncs (void);
164 static rtx
pa_struct_value_rtx (tree
, int);
165 static bool pa_pass_by_reference (cumulative_args_t
, enum machine_mode
,
167 static int pa_arg_partial_bytes (cumulative_args_t
, enum machine_mode
,
169 static void pa_function_arg_advance (cumulative_args_t
, enum machine_mode
,
171 static rtx
pa_function_arg (cumulative_args_t
, enum machine_mode
,
173 static unsigned int pa_function_arg_boundary (enum machine_mode
, const_tree
);
174 static struct machine_function
* pa_init_machine_status (void);
175 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
177 secondary_reload_info
*);
178 static void pa_extra_live_on_entry (bitmap
);
179 static enum machine_mode
pa_promote_function_mode (const_tree
,
180 enum machine_mode
, int *,
183 static void pa_asm_trampoline_template (FILE *);
184 static void pa_trampoline_init (rtx
, tree
, rtx
);
185 static rtx
pa_trampoline_adjust_address (rtx
);
186 static rtx
pa_delegitimize_address (rtx
);
187 static bool pa_print_operand_punct_valid_p (unsigned char);
188 static rtx
pa_internal_arg_pointer (void);
189 static bool pa_can_eliminate (const int, const int);
190 static void pa_conditional_register_usage (void);
191 static enum machine_mode
pa_c_mode_for_suffix (char);
192 static section
*pa_function_section (tree
, enum node_frequency
, bool, bool);
193 static bool pa_cannot_force_const_mem (enum machine_mode
, rtx
);
194 static bool pa_legitimate_constant_p (enum machine_mode
, rtx
);
195 static unsigned int pa_section_type_flags (tree
, const char *, int);
196 static bool pa_legitimate_address_p (enum machine_mode
, rtx
, bool);
198 /* The following extra sections are only used for SOM. */
199 static GTY(()) section
*som_readonly_data_section
;
200 static GTY(()) section
*som_one_only_readonly_data_section
;
201 static GTY(()) section
*som_one_only_data_section
;
202 static GTY(()) section
*som_tm_clone_table_section
;
204 /* Counts for the number of callee-saved general and floating point
205 registers which were saved by the current function's prologue. */
206 static int gr_saved
, fr_saved
;
208 /* Boolean indicating whether the return pointer was saved by the
209 current function's prologue. */
210 static bool rp_saved
;
212 static rtx
find_addr_reg (rtx
);
214 /* Keep track of the number of bytes we have output in the CODE subspace
215 during this compilation so we'll know when to emit inline long-calls. */
216 unsigned long total_code_bytes
;
218 /* The last address of the previous function plus the number of bytes in
219 associated thunks that have been output. This is used to determine if
220 a thunk can use an IA-relative branch to reach its target function. */
221 static unsigned int last_address
;
223 /* Variables to handle plabels that we discover are necessary at assembly
224 output time. They are output after the current function. */
225 struct GTY(()) deferred_plabel
230 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
232 static size_t n_deferred_plabels
= 0;
234 /* Initialize the GCC target structure. */
236 #undef TARGET_OPTION_OVERRIDE
237 #define TARGET_OPTION_OVERRIDE pa_option_override
239 #undef TARGET_ASM_ALIGNED_HI_OP
240 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
241 #undef TARGET_ASM_ALIGNED_SI_OP
242 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
243 #undef TARGET_ASM_ALIGNED_DI_OP
244 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
245 #undef TARGET_ASM_UNALIGNED_HI_OP
246 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
247 #undef TARGET_ASM_UNALIGNED_SI_OP
248 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
249 #undef TARGET_ASM_UNALIGNED_DI_OP
250 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
251 #undef TARGET_ASM_INTEGER
252 #define TARGET_ASM_INTEGER pa_assemble_integer
254 #undef TARGET_ASM_FUNCTION_PROLOGUE
255 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
256 #undef TARGET_ASM_FUNCTION_EPILOGUE
257 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
259 #undef TARGET_FUNCTION_VALUE
260 #define TARGET_FUNCTION_VALUE pa_function_value
261 #undef TARGET_LIBCALL_VALUE
262 #define TARGET_LIBCALL_VALUE pa_libcall_value
263 #undef TARGET_FUNCTION_VALUE_REGNO_P
264 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
266 #undef TARGET_LEGITIMIZE_ADDRESS
267 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
269 #undef TARGET_SCHED_ADJUST_COST
270 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
271 #undef TARGET_SCHED_ADJUST_PRIORITY
272 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
273 #undef TARGET_SCHED_ISSUE_RATE
274 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
276 #undef TARGET_ENCODE_SECTION_INFO
277 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
278 #undef TARGET_STRIP_NAME_ENCODING
279 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
281 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
282 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
284 #undef TARGET_COMMUTATIVE_P
285 #define TARGET_COMMUTATIVE_P pa_commutative_p
287 #undef TARGET_ASM_OUTPUT_MI_THUNK
288 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
289 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
290 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
292 #undef TARGET_ASM_FILE_END
293 #ifdef ASM_OUTPUT_EXTERNAL_REAL
294 #define TARGET_ASM_FILE_END pa_hpux_file_end
296 #define TARGET_ASM_FILE_END output_deferred_plabels
299 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
300 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
302 #if !defined(USE_COLLECT2)
303 #undef TARGET_ASM_CONSTRUCTOR
304 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
305 #undef TARGET_ASM_DESTRUCTOR
306 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
309 #undef TARGET_INIT_BUILTINS
310 #define TARGET_INIT_BUILTINS pa_init_builtins
312 #undef TARGET_EXPAND_BUILTIN
313 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
315 #undef TARGET_REGISTER_MOVE_COST
316 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
317 #undef TARGET_RTX_COSTS
318 #define TARGET_RTX_COSTS hppa_rtx_costs
319 #undef TARGET_ADDRESS_COST
320 #define TARGET_ADDRESS_COST hppa_address_cost
322 #undef TARGET_MACHINE_DEPENDENT_REORG
323 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
325 #undef TARGET_INIT_LIBFUNCS
326 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
328 #undef TARGET_PROMOTE_FUNCTION_MODE
329 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
330 #undef TARGET_PROMOTE_PROTOTYPES
331 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
333 #undef TARGET_STRUCT_VALUE_RTX
334 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
335 #undef TARGET_RETURN_IN_MEMORY
336 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
337 #undef TARGET_MUST_PASS_IN_STACK
338 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
339 #undef TARGET_PASS_BY_REFERENCE
340 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
341 #undef TARGET_CALLEE_COPIES
342 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
343 #undef TARGET_ARG_PARTIAL_BYTES
344 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
345 #undef TARGET_FUNCTION_ARG
346 #define TARGET_FUNCTION_ARG pa_function_arg
347 #undef TARGET_FUNCTION_ARG_ADVANCE
348 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
349 #undef TARGET_FUNCTION_ARG_BOUNDARY
350 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
352 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
353 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
354 #undef TARGET_EXPAND_BUILTIN_VA_START
355 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
356 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
357 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
359 #undef TARGET_SCALAR_MODE_SUPPORTED_P
360 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
362 #undef TARGET_CANNOT_FORCE_CONST_MEM
363 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
365 #undef TARGET_SECONDARY_RELOAD
366 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
368 #undef TARGET_EXTRA_LIVE_ON_ENTRY
369 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
371 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
372 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
373 #undef TARGET_TRAMPOLINE_INIT
374 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
375 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
376 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
377 #undef TARGET_DELEGITIMIZE_ADDRESS
378 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
379 #undef TARGET_INTERNAL_ARG_POINTER
380 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
381 #undef TARGET_CAN_ELIMINATE
382 #define TARGET_CAN_ELIMINATE pa_can_eliminate
383 #undef TARGET_CONDITIONAL_REGISTER_USAGE
384 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
385 #undef TARGET_C_MODE_FOR_SUFFIX
386 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
387 #undef TARGET_ASM_FUNCTION_SECTION
388 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
390 #undef TARGET_LEGITIMATE_CONSTANT_P
391 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
392 #undef TARGET_SECTION_TYPE_FLAGS
393 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
394 #undef TARGET_LEGITIMATE_ADDRESS_P
395 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
397 struct gcc_target targetm
= TARGET_INITIALIZER
;
399 /* Parse the -mfixed-range= option string. */
402 fix_range (const char *const_str
)
405 char *str
, *dash
, *comma
;
407 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
408 REG2 are either register names or register numbers. The effect
409 of this option is to mark the registers in the range from REG1 to
410 REG2 as ``fixed'' so they won't be used by the compiler. This is
411 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
413 i
= strlen (const_str
);
414 str
= (char *) alloca (i
+ 1);
415 memcpy (str
, const_str
, i
+ 1);
419 dash
= strchr (str
, '-');
422 warning (0, "value of -mfixed-range must have form REG1-REG2");
427 comma
= strchr (dash
+ 1, ',');
431 first
= decode_reg_name (str
);
434 warning (0, "unknown register name: %s", str
);
438 last
= decode_reg_name (dash
+ 1);
441 warning (0, "unknown register name: %s", dash
+ 1);
449 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
453 for (i
= first
; i
<= last
; ++i
)
454 fixed_regs
[i
] = call_used_regs
[i
] = 1;
463 /* Check if all floating point registers have been fixed. */
464 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
469 target_flags
|= MASK_DISABLE_FPREGS
;
472 /* Implement the TARGET_OPTION_OVERRIDE hook. */
475 pa_option_override (void)
478 cl_deferred_option
*opt
;
479 vec
<cl_deferred_option
> *v
480 = (vec
<cl_deferred_option
> *) pa_deferred_options
;
483 FOR_EACH_VEC_ELT (*v
, i
, opt
)
485 switch (opt
->opt_index
)
487 case OPT_mfixed_range_
:
488 fix_range (opt
->arg
);
496 /* Unconditional branches in the delay slot are not compatible with dwarf2
497 call frame information. There is no benefit in using this optimization
498 on PA8000 and later processors. */
499 if (pa_cpu
>= PROCESSOR_8000
500 || (targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
502 || flag_unwind_tables
)
503 target_flags
&= ~MASK_JUMP_IN_DELAY
;
505 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
507 warning (0, "PIC code generation is not supported in the portable runtime model");
510 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
512 warning (0, "PIC code generation is not compatible with fast indirect calls");
515 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
517 warning (0, "-g is only supported when using GAS on this processor,");
518 warning (0, "-g option disabled");
519 write_symbols
= NO_DEBUG
;
522 /* We only support the "big PIC" model now. And we always generate PIC
523 code when in 64bit mode. */
524 if (flag_pic
== 1 || TARGET_64BIT
)
527 /* Disable -freorder-blocks-and-partition as we don't support hot and
528 cold partitioning. */
529 if (flag_reorder_blocks_and_partition
)
531 inform (input_location
,
532 "-freorder-blocks-and-partition does not work "
533 "on this architecture");
534 flag_reorder_blocks_and_partition
= 0;
535 flag_reorder_blocks
= 1;
538 /* We can't guarantee that .dword is available for 32-bit targets. */
539 if (UNITS_PER_WORD
== 4)
540 targetm
.asm_out
.aligned_op
.di
= NULL
;
542 /* The unaligned ops are only available when using GAS. */
545 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
546 targetm
.asm_out
.unaligned_op
.si
= NULL
;
547 targetm
.asm_out
.unaligned_op
.di
= NULL
;
550 init_machine_status
= pa_init_machine_status
;
555 PA_BUILTIN_COPYSIGNQ
,
558 PA_BUILTIN_HUGE_VALQ
,
562 static GTY(()) tree pa_builtins
[(int) PA_BUILTIN_max
];
565 pa_init_builtins (void)
567 #ifdef DONT_HAVE_FPUTC_UNLOCKED
569 tree decl
= builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED
);
570 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED
, decl
,
571 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED
));
578 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITE
)) != NULL_TREE
)
579 set_user_assembler_name (decl
, "_Isfinite");
580 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEF
)) != NULL_TREE
)
581 set_user_assembler_name (decl
, "_Isfinitef");
585 if (HPUX_LONG_DOUBLE_LIBRARY
)
589 /* Under HPUX, the __float128 type is a synonym for "long double". */
590 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
593 /* TFmode support builtins. */
594 ftype
= build_function_type_list (long_double_type_node
,
595 long_double_type_node
,
597 decl
= add_builtin_function ("__builtin_fabsq", ftype
,
598 PA_BUILTIN_FABSQ
, BUILT_IN_MD
,
599 "_U_Qfabs", NULL_TREE
);
600 TREE_READONLY (decl
) = 1;
601 pa_builtins
[PA_BUILTIN_FABSQ
] = decl
;
603 ftype
= build_function_type_list (long_double_type_node
,
604 long_double_type_node
,
605 long_double_type_node
,
607 decl
= add_builtin_function ("__builtin_copysignq", ftype
,
608 PA_BUILTIN_COPYSIGNQ
, BUILT_IN_MD
,
609 "_U_Qfcopysign", NULL_TREE
);
610 TREE_READONLY (decl
) = 1;
611 pa_builtins
[PA_BUILTIN_COPYSIGNQ
] = decl
;
613 ftype
= build_function_type_list (long_double_type_node
, NULL_TREE
);
614 decl
= add_builtin_function ("__builtin_infq", ftype
,
615 PA_BUILTIN_INFQ
, BUILT_IN_MD
,
617 pa_builtins
[PA_BUILTIN_INFQ
] = decl
;
619 decl
= add_builtin_function ("__builtin_huge_valq", ftype
,
620 PA_BUILTIN_HUGE_VALQ
, BUILT_IN_MD
,
622 pa_builtins
[PA_BUILTIN_HUGE_VALQ
] = decl
;
627 pa_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
628 enum machine_mode mode ATTRIBUTE_UNUSED
,
629 int ignore ATTRIBUTE_UNUSED
)
631 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
632 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
636 case PA_BUILTIN_FABSQ
:
637 case PA_BUILTIN_COPYSIGNQ
:
638 return expand_call (exp
, target
, ignore
);
640 case PA_BUILTIN_INFQ
:
641 case PA_BUILTIN_HUGE_VALQ
:
643 enum machine_mode target_mode
= TYPE_MODE (TREE_TYPE (exp
));
648 tmp
= CONST_DOUBLE_FROM_REAL_VALUE (inf
, target_mode
);
650 tmp
= validize_mem (force_const_mem (target_mode
, tmp
));
653 target
= gen_reg_rtx (target_mode
);
655 emit_move_insn (target
, tmp
);
666 /* Function to init struct machine_function.
667 This will be called, via a pointer variable,
668 from push_function_context. */
670 static struct machine_function
*
671 pa_init_machine_status (void)
673 return ggc_cleared_alloc
<machine_function
> ();
676 /* If FROM is a probable pointer register, mark TO as a probable
677 pointer register with the same pointer alignment as FROM. */
680 copy_reg_pointer (rtx to
, rtx from
)
682 if (REG_POINTER (from
))
683 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
686 /* Return 1 if X contains a symbolic expression. We know these
687 expressions will have one of a few well defined forms, so
688 we need only check those forms. */
690 pa_symbolic_expression_p (rtx x
)
693 /* Strip off any HIGH. */
694 if (GET_CODE (x
) == HIGH
)
697 return symbolic_operand (x
, VOIDmode
);
700 /* Accept any constant that can be moved in one instruction into a
703 pa_cint_ok_for_move (HOST_WIDE_INT ival
)
705 /* OK if ldo, ldil, or zdepi, can be used. */
706 return (VAL_14_BITS_P (ival
)
707 || pa_ldil_cint_p (ival
)
708 || pa_zdepi_cint_p (ival
));
711 /* True iff ldil can be used to load this CONST_INT. The least
712 significant 11 bits of the value must be zero and the value must
713 not change sign when extended from 32 to 64 bits. */
715 pa_ldil_cint_p (HOST_WIDE_INT ival
)
717 HOST_WIDE_INT x
= ival
& (((HOST_WIDE_INT
) -1 << 31) | 0x7ff);
719 return x
== 0 || x
== ((HOST_WIDE_INT
) -1 << 31);
722 /* True iff zdepi can be used to generate this CONST_INT.
723 zdepi first sign extends a 5-bit signed number to a given field
724 length, then places this field anywhere in a zero. */
726 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x
)
728 unsigned HOST_WIDE_INT lsb_mask
, t
;
730 /* This might not be obvious, but it's at least fast.
731 This function is critical; we don't have the time loops would take. */
733 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
734 /* Return true iff t is a power of two. */
735 return ((t
& (t
- 1)) == 0);
738 /* True iff depi or extru can be used to compute (reg & mask).
739 Accept bit pattern like these:
744 pa_and_mask_p (unsigned HOST_WIDE_INT mask
)
747 mask
+= mask
& -mask
;
748 return (mask
& (mask
- 1)) == 0;
751 /* True iff depi can be used to compute (reg | MASK). */
753 pa_ior_mask_p (unsigned HOST_WIDE_INT mask
)
755 mask
+= mask
& -mask
;
756 return (mask
& (mask
- 1)) == 0;
759 /* Legitimize PIC addresses. If the address is already
760 position-independent, we return ORIG. Newly generated
761 position-independent addresses go to REG. If we need more
762 than one register, we lose. */
765 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
769 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
771 /* Labels need special handling. */
772 if (pic_label_operand (orig
, mode
))
776 /* We do not want to go through the movXX expanders here since that
777 would create recursion.
779 Nor do we really want to call a generator for a named pattern
780 since that requires multiple patterns if we want to support
783 So instead we just emit the raw set, which avoids the movXX
784 expanders completely. */
785 mark_reg_pointer (reg
, BITS_PER_UNIT
);
786 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, orig
));
788 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
789 add_reg_note (insn
, REG_EQUAL
, orig
);
791 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
792 and update LABEL_NUSES because this is not done automatically. */
793 if (reload_in_progress
|| reload_completed
)
795 /* Extract LABEL_REF. */
796 if (GET_CODE (orig
) == CONST
)
797 orig
= XEXP (XEXP (orig
, 0), 0);
798 /* Extract CODE_LABEL. */
799 orig
= XEXP (orig
, 0);
800 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
801 /* Make sure we have label and not a note. */
803 LABEL_NUSES (orig
)++;
805 crtl
->uses_pic_offset_table
= 1;
808 if (GET_CODE (orig
) == SYMBOL_REF
)
814 /* Before reload, allocate a temporary register for the intermediate
815 result. This allows the sequence to be deleted when the final
816 result is unused and the insns are trivially dead. */
817 tmp_reg
= ((reload_in_progress
|| reload_completed
)
818 ? reg
: gen_reg_rtx (Pmode
));
820 if (function_label_operand (orig
, VOIDmode
))
822 /* Force function label into memory in word mode. */
823 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
824 /* Load plabel address from DLT. */
825 emit_move_insn (tmp_reg
,
826 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
827 gen_rtx_HIGH (word_mode
, orig
)));
829 = gen_const_mem (Pmode
,
830 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
831 gen_rtx_UNSPEC (Pmode
,
834 emit_move_insn (reg
, pic_ref
);
835 /* Now load address of function descriptor. */
836 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
840 /* Load symbol reference from DLT. */
841 emit_move_insn (tmp_reg
,
842 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
843 gen_rtx_HIGH (word_mode
, orig
)));
845 = gen_const_mem (Pmode
,
846 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
847 gen_rtx_UNSPEC (Pmode
,
852 crtl
->uses_pic_offset_table
= 1;
853 mark_reg_pointer (reg
, BITS_PER_UNIT
);
854 insn
= emit_move_insn (reg
, pic_ref
);
856 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
857 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
861 else if (GET_CODE (orig
) == CONST
)
865 if (GET_CODE (XEXP (orig
, 0)) == PLUS
866 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
870 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
872 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
873 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
874 base
== reg
? 0 : reg
);
876 if (GET_CODE (orig
) == CONST_INT
)
878 if (INT_14_BITS (orig
))
879 return plus_constant (Pmode
, base
, INTVAL (orig
));
880 orig
= force_reg (Pmode
, orig
);
882 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
883 /* Likewise, should we set special REG_NOTEs here? */
889 static GTY(()) rtx gen_tls_tga
;
892 gen_tls_get_addr (void)
895 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
900 hppa_tls_call (rtx arg
)
904 ret
= gen_reg_rtx (Pmode
);
905 emit_library_call_value (gen_tls_get_addr (), ret
,
906 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
912 legitimize_tls_address (rtx addr
)
914 rtx ret
, insn
, tmp
, t1
, t2
, tp
;
916 /* Currently, we can't handle anything but a SYMBOL_REF. */
917 if (GET_CODE (addr
) != SYMBOL_REF
)
920 switch (SYMBOL_REF_TLS_MODEL (addr
))
922 case TLS_MODEL_GLOBAL_DYNAMIC
:
923 tmp
= gen_reg_rtx (Pmode
);
925 emit_insn (gen_tgd_load_pic (tmp
, addr
));
927 emit_insn (gen_tgd_load (tmp
, addr
));
928 ret
= hppa_tls_call (tmp
);
931 case TLS_MODEL_LOCAL_DYNAMIC
:
932 ret
= gen_reg_rtx (Pmode
);
933 tmp
= gen_reg_rtx (Pmode
);
936 emit_insn (gen_tld_load_pic (tmp
, addr
));
938 emit_insn (gen_tld_load (tmp
, addr
));
939 t1
= hppa_tls_call (tmp
);
942 t2
= gen_reg_rtx (Pmode
);
943 emit_libcall_block (insn
, t2
, t1
,
944 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
946 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
949 case TLS_MODEL_INITIAL_EXEC
:
950 tp
= gen_reg_rtx (Pmode
);
951 tmp
= gen_reg_rtx (Pmode
);
952 ret
= gen_reg_rtx (Pmode
);
953 emit_insn (gen_tp_load (tp
));
955 emit_insn (gen_tie_load_pic (tmp
, addr
));
957 emit_insn (gen_tie_load (tmp
, addr
));
958 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
961 case TLS_MODEL_LOCAL_EXEC
:
962 tp
= gen_reg_rtx (Pmode
);
963 ret
= gen_reg_rtx (Pmode
);
964 emit_insn (gen_tp_load (tp
));
965 emit_insn (gen_tle_load (ret
, addr
, tp
));
975 /* Try machine-dependent ways of modifying an illegitimate address
976 to be legitimate. If we find one, return the new, valid address.
977 This macro is used in only one place: `memory_address' in explow.c.
979 OLDX is the address as it was before break_out_memory_refs was called.
980 In some cases it is useful to look at this to decide what needs to be done.
982 It is always safe for this macro to do nothing. It exists to recognize
983 opportunities to optimize the output.
985 For the PA, transform:
987 memory(X + <large int>)
991 if (<large int> & mask) >= 16
992 Y = (<large int> & ~mask) + mask + 1 Round up.
994 Y = (<large int> & ~mask) Round down.
996 memory (Z + (<large int> - Y));
998 This is for CSE to find several similar references, and only use one Z.
1000 X can either be a SYMBOL_REF or REG, but because combine cannot
1001 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1002 D will not fit in 14 bits.
1004 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1007 MODE_INT references allow displacements which fit in 14 bits, so use
1010 This relies on the fact that most mode MODE_FLOAT references will use FP
1011 registers and most mode MODE_INT references will use integer registers.
1012 (In the rare case of an FP register used in an integer MODE, we depend
1013 on secondary reloads to clean things up.)
1016 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1017 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1018 addressing modes to be used).
1020 Put X and Z into registers. Then put the entire expression into
1024 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1025 enum machine_mode mode
)
1029 /* We need to canonicalize the order of operands in unscaled indexed
1030 addresses since the code that checks if an address is valid doesn't
1031 always try both orders. */
1032 if (!TARGET_NO_SPACE_REGS
1033 && GET_CODE (x
) == PLUS
1034 && GET_MODE (x
) == Pmode
1035 && REG_P (XEXP (x
, 0))
1036 && REG_P (XEXP (x
, 1))
1037 && REG_POINTER (XEXP (x
, 0))
1038 && !REG_POINTER (XEXP (x
, 1)))
1039 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
1041 if (tls_referenced_p (x
))
1042 return legitimize_tls_address (x
);
1044 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
1046 /* Strip off CONST. */
1047 if (GET_CODE (x
) == CONST
)
1050 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1051 That should always be safe. */
1052 if (GET_CODE (x
) == PLUS
1053 && GET_CODE (XEXP (x
, 0)) == REG
1054 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
1056 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
1057 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
1060 /* Note we must reject symbols which represent function addresses
1061 since the assembler/linker can't handle arithmetic on plabels. */
1062 if (GET_CODE (x
) == PLUS
1063 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1064 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
1065 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
1066 || GET_CODE (XEXP (x
, 0)) == REG
))
1068 rtx int_part
, ptr_reg
;
1070 int offset
= INTVAL (XEXP (x
, 1));
1073 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
1074 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
1076 /* Choose which way to round the offset. Round up if we
1077 are >= halfway to the next boundary. */
1078 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1079 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1081 newoffset
= (offset
& ~ mask
);
1083 /* If the newoffset will not fit in 14 bits (ldo), then
1084 handling this would take 4 or 5 instructions (2 to load
1085 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1086 add the new offset and the SYMBOL_REF.) Combine can
1087 not handle 4->2 or 5->2 combinations, so do not create
1089 if (! VAL_14_BITS_P (newoffset
)
1090 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1092 rtx const_part
= plus_constant (Pmode
, XEXP (x
, 0), newoffset
);
1095 gen_rtx_HIGH (Pmode
, const_part
));
1098 gen_rtx_LO_SUM (Pmode
,
1099 tmp_reg
, const_part
));
1103 if (! VAL_14_BITS_P (newoffset
))
1104 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1106 int_part
= GEN_INT (newoffset
);
1108 ptr_reg
= force_reg (Pmode
,
1109 gen_rtx_PLUS (Pmode
,
1110 force_reg (Pmode
, XEXP (x
, 0)),
1113 return plus_constant (Pmode
, ptr_reg
, offset
- newoffset
);
1116 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1118 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
1119 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1120 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
1121 && (OBJECT_P (XEXP (x
, 1))
1122 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1123 && GET_CODE (XEXP (x
, 1)) != CONST
)
1125 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1129 if (GET_CODE (reg1
) != REG
)
1130 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1132 reg2
= XEXP (XEXP (x
, 0), 0);
1133 if (GET_CODE (reg2
) != REG
)
1134 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1136 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1137 gen_rtx_MULT (Pmode
,
1143 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1145 Only do so for floating point modes since this is more speculative
1146 and we lose if it's an integer store. */
1147 if (GET_CODE (x
) == PLUS
1148 && GET_CODE (XEXP (x
, 0)) == PLUS
1149 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1150 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
1151 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1152 && (mode
== SFmode
|| mode
== DFmode
))
1155 /* First, try and figure out what to use as a base register. */
1156 rtx reg1
, reg2
, base
, idx
;
1158 reg1
= XEXP (XEXP (x
, 0), 1);
1163 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1164 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1165 it's a base register below. */
1166 if (GET_CODE (reg1
) != REG
)
1167 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1169 if (GET_CODE (reg2
) != REG
)
1170 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1172 /* Figure out what the base and index are. */
1174 if (GET_CODE (reg1
) == REG
1175 && REG_POINTER (reg1
))
1178 idx
= gen_rtx_PLUS (Pmode
,
1179 gen_rtx_MULT (Pmode
,
1180 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1181 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
1184 else if (GET_CODE (reg2
) == REG
1185 && REG_POINTER (reg2
))
1194 /* If the index adds a large constant, try to scale the
1195 constant so that it can be loaded with only one insn. */
1196 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1197 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1198 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1199 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1201 /* Divide the CONST_INT by the scale factor, then add it to A. */
1202 int val
= INTVAL (XEXP (idx
, 1));
1204 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1205 reg1
= XEXP (XEXP (idx
, 0), 0);
1206 if (GET_CODE (reg1
) != REG
)
1207 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1209 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1211 /* We can now generate a simple scaled indexed address. */
1214 (Pmode
, gen_rtx_PLUS (Pmode
,
1215 gen_rtx_MULT (Pmode
, reg1
,
1216 XEXP (XEXP (idx
, 0), 1)),
1220 /* If B + C is still a valid base register, then add them. */
1221 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1222 && INTVAL (XEXP (idx
, 1)) <= 4096
1223 && INTVAL (XEXP (idx
, 1)) >= -4096)
1225 int val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1228 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1230 reg2
= XEXP (XEXP (idx
, 0), 0);
1231 if (GET_CODE (reg2
) != CONST_INT
)
1232 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1234 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1235 gen_rtx_MULT (Pmode
,
1241 /* Get the index into a register, then add the base + index and
1242 return a register holding the result. */
1244 /* First get A into a register. */
1245 reg1
= XEXP (XEXP (idx
, 0), 0);
1246 if (GET_CODE (reg1
) != REG
)
1247 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1249 /* And get B into a register. */
1250 reg2
= XEXP (idx
, 1);
1251 if (GET_CODE (reg2
) != REG
)
1252 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1254 reg1
= force_reg (Pmode
,
1255 gen_rtx_PLUS (Pmode
,
1256 gen_rtx_MULT (Pmode
, reg1
,
1257 XEXP (XEXP (idx
, 0), 1)),
1260 /* Add the result to our base register and return. */
1261 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1265 /* Uh-oh. We might have an address for x[n-100000]. This needs
1266 special handling to avoid creating an indexed memory address
1267 with x-100000 as the base.
1269 If the constant part is small enough, then it's still safe because
1270 there is a guard page at the beginning and end of the data segment.
1272 Scaled references are common enough that we want to try and rearrange the
1273 terms so that we can use indexing for these addresses too. Only
1274 do the optimization for floatint point modes. */
1276 if (GET_CODE (x
) == PLUS
1277 && pa_symbolic_expression_p (XEXP (x
, 1)))
1279 /* Ugly. We modify things here so that the address offset specified
1280 by the index expression is computed first, then added to x to form
1281 the entire address. */
1283 rtx regx1
, regx2
, regy1
, regy2
, y
;
1285 /* Strip off any CONST. */
1287 if (GET_CODE (y
) == CONST
)
1290 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1292 /* See if this looks like
1293 (plus (mult (reg) (shadd_const))
1294 (const (plus (symbol_ref) (const_int))))
1296 Where const_int is small. In that case the const
1297 expression is a valid pointer for indexing.
1299 If const_int is big, but can be divided evenly by shadd_const
1300 and added to (reg). This allows more scaled indexed addresses. */
1301 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1302 && GET_CODE (XEXP (x
, 0)) == MULT
1303 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1304 && INTVAL (XEXP (y
, 1)) >= -4096
1305 && INTVAL (XEXP (y
, 1)) <= 4095
1306 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1307 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1309 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1313 if (GET_CODE (reg1
) != REG
)
1314 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1316 reg2
= XEXP (XEXP (x
, 0), 0);
1317 if (GET_CODE (reg2
) != REG
)
1318 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1320 return force_reg (Pmode
,
1321 gen_rtx_PLUS (Pmode
,
1322 gen_rtx_MULT (Pmode
,
1327 else if ((mode
== DFmode
|| mode
== SFmode
)
1328 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1329 && GET_CODE (XEXP (x
, 0)) == MULT
1330 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1331 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1332 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1333 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1336 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1337 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1338 regx2
= XEXP (XEXP (x
, 0), 0);
1339 if (GET_CODE (regx2
) != REG
)
1340 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1341 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1345 gen_rtx_PLUS (Pmode
,
1346 gen_rtx_MULT (Pmode
, regx2
,
1347 XEXP (XEXP (x
, 0), 1)),
1348 force_reg (Pmode
, XEXP (y
, 0))));
1350 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1351 && INTVAL (XEXP (y
, 1)) >= -4096
1352 && INTVAL (XEXP (y
, 1)) <= 4095)
1354 /* This is safe because of the guard page at the
1355 beginning and end of the data space. Just
1356 return the original address. */
1361 /* Doesn't look like one we can optimize. */
1362 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1363 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1364 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1365 regx1
= force_reg (Pmode
,
1366 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1368 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1376 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1378 Compute extra cost of moving data between one register class
1381 Make moves from SAR so expensive they should never happen. We used to
1382 have 0xffff here, but that generates overflow in rare cases.
1384 Copies involving a FP register and a non-FP register are relatively
1385 expensive because they must go through memory.
1387 Other copies are reasonably cheap. */
1390 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
1391 reg_class_t from
, reg_class_t to
)
1393 if (from
== SHIFT_REGS
)
1395 else if (to
== SHIFT_REGS
&& FP_REG_CLASS_P (from
))
1397 else if ((FP_REG_CLASS_P (from
) && ! FP_REG_CLASS_P (to
))
1398 || (FP_REG_CLASS_P (to
) && ! FP_REG_CLASS_P (from
)))
1404 /* For the HPPA, REG and REG+CONST is cost 0
1405 and addresses involving symbolic constants are cost 2.
1407 PIC addresses are very expensive.
1409 It is no coincidence that this has the same structure
1410 as pa_legitimate_address_p. */
1413 hppa_address_cost (rtx X
, enum machine_mode mode ATTRIBUTE_UNUSED
,
1414 addr_space_t as ATTRIBUTE_UNUSED
,
1415 bool speed ATTRIBUTE_UNUSED
)
1417 switch (GET_CODE (X
))
1430 /* Compute a (partial) cost for rtx X. Return true if the complete
1431 cost has been computed, and false if subexpressions should be
1432 scanned. In either case, *TOTAL contains the cost result. */
1435 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
1436 int *total
, bool speed ATTRIBUTE_UNUSED
)
1443 if (INTVAL (x
) == 0)
1445 else if (INT_14_BITS (x
))
1462 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1463 && outer_code
!= SET
)
1470 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1472 *total
= COSTS_N_INSNS (3);
1476 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1477 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1481 if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1482 *total
= factor
* factor
* COSTS_N_INSNS (8);
1484 *total
= factor
* factor
* COSTS_N_INSNS (20);
1488 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1490 *total
= COSTS_N_INSNS (14);
1498 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1499 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1503 *total
= factor
* factor
* COSTS_N_INSNS (60);
1506 case PLUS
: /* this includes shNadd insns */
1508 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1510 *total
= COSTS_N_INSNS (3);
1514 /* A size N times larger than UNITS_PER_WORD needs N times as
1515 many insns, taking N times as long. */
1516 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
1519 *total
= factor
* COSTS_N_INSNS (1);
1525 *total
= COSTS_N_INSNS (1);
1533 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1534 new rtx with the correct mode. */
1536 force_mode (enum machine_mode mode
, rtx orig
)
1538 if (mode
== GET_MODE (orig
))
1541 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1543 return gen_rtx_REG (mode
, REGNO (orig
));
1546 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1549 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1551 return tls_referenced_p (x
);
1554 /* Emit insns to move operands[1] into operands[0].
1556 Return 1 if we have written out everything that needs to be done to
1557 do the move. Otherwise, return 0 and the caller will emit the move
1560 Note SCRATCH_REG may not be in the proper mode depending on how it
1561 will be used. This routine is responsible for creating a new copy
1562 of SCRATCH_REG in the proper mode. */
1565 pa_emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
1567 register rtx operand0
= operands
[0];
1568 register rtx operand1
= operands
[1];
1571 /* We can only handle indexed addresses in the destination operand
1572 of floating point stores. Thus, we need to break out indexed
1573 addresses from the destination operand. */
1574 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1576 gcc_assert (can_create_pseudo_p ());
1578 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1579 operand0
= replace_equiv_address (operand0
, tem
);
1582 /* On targets with non-equivalent space registers, break out unscaled
1583 indexed addresses from the source operand before the final CSE.
1584 We have to do this because the REG_POINTER flag is not correctly
1585 carried through various optimization passes and CSE may substitute
1586 a pseudo without the pointer set for one with the pointer set. As
1587 a result, we loose various opportunities to create insns with
1588 unscaled indexed addresses. */
1589 if (!TARGET_NO_SPACE_REGS
1590 && !cse_not_expected
1591 && GET_CODE (operand1
) == MEM
1592 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1593 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1594 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1596 = replace_equiv_address (operand1
,
1597 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1600 && reload_in_progress
&& GET_CODE (operand0
) == REG
1601 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1602 operand0
= reg_equiv_mem (REGNO (operand0
));
1603 else if (scratch_reg
1604 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1605 && GET_CODE (SUBREG_REG (operand0
)) == REG
1606 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1608 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1609 the code which tracks sets/uses for delete_output_reload. */
1610 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1611 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
1612 SUBREG_BYTE (operand0
));
1613 operand0
= alter_subreg (&temp
, true);
1617 && reload_in_progress
&& GET_CODE (operand1
) == REG
1618 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1619 operand1
= reg_equiv_mem (REGNO (operand1
));
1620 else if (scratch_reg
1621 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1622 && GET_CODE (SUBREG_REG (operand1
)) == REG
1623 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1625 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1626 the code which tracks sets/uses for delete_output_reload. */
1627 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1628 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
1629 SUBREG_BYTE (operand1
));
1630 operand1
= alter_subreg (&temp
, true);
1633 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1634 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1635 != XEXP (operand0
, 0)))
1636 operand0
= replace_equiv_address (operand0
, tem
);
1638 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1639 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1640 != XEXP (operand1
, 0)))
1641 operand1
= replace_equiv_address (operand1
, tem
);
1643 /* Handle secondary reloads for loads/stores of FP registers from
1644 REG+D addresses where D does not fit in 5 or 14 bits, including
1645 (subreg (mem (addr))) cases. */
1647 && fp_reg_operand (operand0
, mode
)
1648 && (MEM_P (operand1
)
1649 || (GET_CODE (operand1
) == SUBREG
1650 && MEM_P (XEXP (operand1
, 0))))
1651 && !floating_point_store_memory_operand (operand1
, mode
))
1653 if (GET_CODE (operand1
) == SUBREG
)
1654 operand1
= XEXP (operand1
, 0);
1656 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1657 it in WORD_MODE regardless of what mode it was originally given
1659 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1661 /* D might not fit in 14 bits either; for such cases load D into
1663 if (reg_plus_base_memory_operand (operand1
, mode
)
1666 && INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))))
1668 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1669 emit_move_insn (scratch_reg
,
1670 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1672 XEXP (XEXP (operand1
, 0), 0),
1676 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1677 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
1678 replace_equiv_address (operand1
, scratch_reg
)));
1681 else if (scratch_reg
1682 && fp_reg_operand (operand1
, mode
)
1683 && (MEM_P (operand0
)
1684 || (GET_CODE (operand0
) == SUBREG
1685 && MEM_P (XEXP (operand0
, 0))))
1686 && !floating_point_store_memory_operand (operand0
, mode
))
1688 if (GET_CODE (operand0
) == SUBREG
)
1689 operand0
= XEXP (operand0
, 0);
1691 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1692 it in WORD_MODE regardless of what mode it was originally given
1694 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1696 /* D might not fit in 14 bits either; for such cases load D into
1698 if (reg_plus_base_memory_operand (operand0
, mode
)
1701 && INT_14_BITS (XEXP (XEXP (operand0
, 0), 1))))
1703 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1704 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1707 XEXP (XEXP (operand0
, 0),
1712 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1713 emit_insn (gen_rtx_SET (VOIDmode
,
1714 replace_equiv_address (operand0
, scratch_reg
),
1718 /* Handle secondary reloads for loads of FP registers from constant
1719 expressions by forcing the constant into memory. For the most part,
1720 this is only necessary for SImode and DImode.
1722 Use scratch_reg to hold the address of the memory location. */
1723 else if (scratch_reg
1724 && CONSTANT_P (operand1
)
1725 && fp_reg_operand (operand0
, mode
))
1727 rtx const_mem
, xoperands
[2];
1729 if (operand1
== CONST0_RTX (mode
))
1731 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1735 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1736 it in WORD_MODE regardless of what mode it was originally given
1738 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1740 /* Force the constant into memory and put the address of the
1741 memory location into scratch_reg. */
1742 const_mem
= force_const_mem (mode
, operand1
);
1743 xoperands
[0] = scratch_reg
;
1744 xoperands
[1] = XEXP (const_mem
, 0);
1745 pa_emit_move_sequence (xoperands
, Pmode
, 0);
1747 /* Now load the destination register. */
1748 emit_insn (gen_rtx_SET (mode
, operand0
,
1749 replace_equiv_address (const_mem
, scratch_reg
)));
1752 /* Handle secondary reloads for SAR. These occur when trying to load
1753 the SAR from memory or a constant. */
1754 else if (scratch_reg
1755 && GET_CODE (operand0
) == REG
1756 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1757 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1758 && (GET_CODE (operand1
) == MEM
|| GET_CODE (operand1
) == CONST_INT
))
1760 /* D might not fit in 14 bits either; for such cases load D into
1762 if (GET_CODE (operand1
) == MEM
1763 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1765 /* We are reloading the address into the scratch register, so we
1766 want to make sure the scratch register is a full register. */
1767 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1769 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1770 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1773 XEXP (XEXP (operand1
, 0),
1777 /* Now we are going to load the scratch register from memory,
1778 we want to load it in the same width as the original MEM,
1779 which must be the same as the width of the ultimate destination,
1781 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1783 emit_move_insn (scratch_reg
,
1784 replace_equiv_address (operand1
, scratch_reg
));
1788 /* We want to load the scratch register using the same mode as
1789 the ultimate destination. */
1790 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1792 emit_move_insn (scratch_reg
, operand1
);
1795 /* And emit the insn to set the ultimate destination. We know that
1796 the scratch register has the same mode as the destination at this
1798 emit_move_insn (operand0
, scratch_reg
);
1801 /* Handle the most common case: storing into a register. */
1802 else if (register_operand (operand0
, mode
))
1804 /* Legitimize TLS symbol references. This happens for references
1805 that aren't a legitimate constant. */
1806 if (PA_SYMBOL_REF_TLS_P (operand1
))
1807 operand1
= legitimize_tls_address (operand1
);
1809 if (register_operand (operand1
, mode
)
1810 || (GET_CODE (operand1
) == CONST_INT
1811 && pa_cint_ok_for_move (INTVAL (operand1
)))
1812 || (operand1
== CONST0_RTX (mode
))
1813 || (GET_CODE (operand1
) == HIGH
1814 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1815 /* Only `general_operands' can come here, so MEM is ok. */
1816 || GET_CODE (operand1
) == MEM
)
1818 /* Various sets are created during RTL generation which don't
1819 have the REG_POINTER flag correctly set. After the CSE pass,
1820 instruction recognition can fail if we don't consistently
1821 set this flag when performing register copies. This should
1822 also improve the opportunities for creating insns that use
1823 unscaled indexing. */
1824 if (REG_P (operand0
) && REG_P (operand1
))
1826 if (REG_POINTER (operand1
)
1827 && !REG_POINTER (operand0
)
1828 && !HARD_REGISTER_P (operand0
))
1829 copy_reg_pointer (operand0
, operand1
);
1832 /* When MEMs are broken out, the REG_POINTER flag doesn't
1833 get set. In some cases, we can set the REG_POINTER flag
1834 from the declaration for the MEM. */
1835 if (REG_P (operand0
)
1836 && GET_CODE (operand1
) == MEM
1837 && !REG_POINTER (operand0
))
1839 tree decl
= MEM_EXPR (operand1
);
1841 /* Set the register pointer flag and register alignment
1842 if the declaration for this memory reference is a
1848 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1850 if (TREE_CODE (decl
) == COMPONENT_REF
)
1851 decl
= TREE_OPERAND (decl
, 1);
1853 type
= TREE_TYPE (decl
);
1854 type
= strip_array_types (type
);
1856 if (POINTER_TYPE_P (type
))
1860 type
= TREE_TYPE (type
);
1861 /* Using TYPE_ALIGN_OK is rather conservative as
1862 only the ada frontend actually sets it. */
1863 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1865 mark_reg_pointer (operand0
, align
);
1870 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1874 else if (GET_CODE (operand0
) == MEM
)
1876 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1877 && !(reload_in_progress
|| reload_completed
))
1879 rtx temp
= gen_reg_rtx (DFmode
);
1881 emit_insn (gen_rtx_SET (VOIDmode
, temp
, operand1
));
1882 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, temp
));
1885 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1887 /* Run this case quickly. */
1888 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1891 if (! (reload_in_progress
|| reload_completed
))
1893 operands
[0] = validize_mem (operand0
);
1894 operands
[1] = operand1
= force_reg (mode
, operand1
);
1898 /* Simplify the source if we need to.
1899 Note we do have to handle function labels here, even though we do
1900 not consider them legitimate constants. Loop optimizations can
1901 call the emit_move_xxx with one as a source. */
1902 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1903 || (GET_CODE (operand1
) == HIGH
1904 && symbolic_operand (XEXP (operand1
, 0), mode
))
1905 || function_label_operand (operand1
, VOIDmode
)
1906 || tls_referenced_p (operand1
))
1910 if (GET_CODE (operand1
) == HIGH
)
1913 operand1
= XEXP (operand1
, 0);
1915 if (symbolic_operand (operand1
, mode
))
1917 /* Argh. The assembler and linker can't handle arithmetic
1920 So we force the plabel into memory, load operand0 from
1921 the memory location, then add in the constant part. */
1922 if ((GET_CODE (operand1
) == CONST
1923 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1924 && function_label_operand (XEXP (XEXP (operand1
, 0), 0),
1926 || function_label_operand (operand1
, VOIDmode
))
1928 rtx temp
, const_part
;
1930 /* Figure out what (if any) scratch register to use. */
1931 if (reload_in_progress
|| reload_completed
)
1933 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1934 /* SCRATCH_REG will hold an address and maybe the actual
1935 data. We want it in WORD_MODE regardless of what mode it
1936 was originally given to us. */
1937 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1940 scratch_reg
= gen_reg_rtx (Pmode
);
1942 if (GET_CODE (operand1
) == CONST
)
1944 /* Save away the constant part of the expression. */
1945 const_part
= XEXP (XEXP (operand1
, 0), 1);
1946 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1948 /* Force the function label into memory. */
1949 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1953 /* No constant part. */
1954 const_part
= NULL_RTX
;
1956 /* Force the function label into memory. */
1957 temp
= force_const_mem (mode
, operand1
);
1961 /* Get the address of the memory location. PIC-ify it if
1963 temp
= XEXP (temp
, 0);
1965 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1967 /* Put the address of the memory location into our destination
1970 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1972 /* Now load from the memory location into our destination
1974 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1975 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1977 /* And add back in the constant part. */
1978 if (const_part
!= NULL_RTX
)
1979 expand_inc (operand0
, const_part
);
1988 if (reload_in_progress
|| reload_completed
)
1990 temp
= scratch_reg
? scratch_reg
: operand0
;
1991 /* TEMP will hold an address and maybe the actual
1992 data. We want it in WORD_MODE regardless of what mode it
1993 was originally given to us. */
1994 temp
= force_mode (word_mode
, temp
);
1997 temp
= gen_reg_rtx (Pmode
);
1999 /* (const (plus (symbol) (const_int))) must be forced to
2000 memory during/after reload if the const_int will not fit
2002 if (GET_CODE (operand1
) == CONST
2003 && GET_CODE (XEXP (operand1
, 0)) == PLUS
2004 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
2005 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))
2006 && (reload_completed
|| reload_in_progress
)
2009 rtx const_mem
= force_const_mem (mode
, operand1
);
2010 operands
[1] = legitimize_pic_address (XEXP (const_mem
, 0),
2012 operands
[1] = replace_equiv_address (const_mem
, operands
[1]);
2013 pa_emit_move_sequence (operands
, mode
, temp
);
2017 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
2018 if (REG_P (operand0
) && REG_P (operands
[1]))
2019 copy_reg_pointer (operand0
, operands
[1]);
2020 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operands
[1]));
2023 /* On the HPPA, references to data space are supposed to use dp,
2024 register 27, but showing it in the RTL inhibits various cse
2025 and loop optimizations. */
2030 if (reload_in_progress
|| reload_completed
)
2032 temp
= scratch_reg
? scratch_reg
: operand0
;
2033 /* TEMP will hold an address and maybe the actual
2034 data. We want it in WORD_MODE regardless of what mode it
2035 was originally given to us. */
2036 temp
= force_mode (word_mode
, temp
);
2039 temp
= gen_reg_rtx (mode
);
2041 /* Loading a SYMBOL_REF into a register makes that register
2042 safe to be used as the base in an indexed address.
2044 Don't mark hard registers though. That loses. */
2045 if (GET_CODE (operand0
) == REG
2046 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
2047 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
2048 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
2049 mark_reg_pointer (temp
, BITS_PER_UNIT
);
2052 set
= gen_rtx_SET (mode
, operand0
, temp
);
2054 set
= gen_rtx_SET (VOIDmode
,
2056 gen_rtx_LO_SUM (mode
, temp
, operand1
));
2058 emit_insn (gen_rtx_SET (VOIDmode
,
2060 gen_rtx_HIGH (mode
, operand1
)));
2066 else if (tls_referenced_p (operand1
))
2071 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
2073 addend
= XEXP (XEXP (tmp
, 0), 1);
2074 tmp
= XEXP (XEXP (tmp
, 0), 0);
2077 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
2078 tmp
= legitimize_tls_address (tmp
);
2081 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
2082 tmp
= force_operand (tmp
, operands
[0]);
2086 else if (GET_CODE (operand1
) != CONST_INT
2087 || !pa_cint_ok_for_move (INTVAL (operand1
)))
2091 HOST_WIDE_INT value
= 0;
2092 HOST_WIDE_INT insv
= 0;
2095 if (GET_CODE (operand1
) == CONST_INT
)
2096 value
= INTVAL (operand1
);
2099 && GET_CODE (operand1
) == CONST_INT
2100 && HOST_BITS_PER_WIDE_INT
> 32
2101 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
2105 /* Extract the low order 32 bits of the value and sign extend.
2106 If the new value is the same as the original value, we can
2107 can use the original value as-is. If the new value is
2108 different, we use it and insert the most-significant 32-bits
2109 of the original value into the final result. */
2110 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
2111 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
2114 #if HOST_BITS_PER_WIDE_INT > 32
2115 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
2119 operand1
= GEN_INT (nval
);
2123 if (reload_in_progress
|| reload_completed
)
2124 temp
= scratch_reg
? scratch_reg
: operand0
;
2126 temp
= gen_reg_rtx (mode
);
2128 /* We don't directly split DImode constants on 32-bit targets
2129 because PLUS uses an 11-bit immediate and the insn sequence
2130 generated is not as efficient as the one using HIGH/LO_SUM. */
2131 if (GET_CODE (operand1
) == CONST_INT
2132 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2133 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2136 /* Directly break constant into high and low parts. This
2137 provides better optimization opportunities because various
2138 passes recognize constants split with PLUS but not LO_SUM.
2139 We use a 14-bit signed low part except when the addition
2140 of 0x4000 to the high part might change the sign of the
2142 HOST_WIDE_INT low
= value
& 0x3fff;
2143 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2147 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2155 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (high
)));
2156 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2160 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2161 gen_rtx_HIGH (mode
, operand1
)));
2162 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2165 insn
= emit_move_insn (operands
[0], operands
[1]);
2167 /* Now insert the most significant 32 bits of the value
2168 into the register. When we don't have a second register
2169 available, it could take up to nine instructions to load
2170 a 64-bit integer constant. Prior to reload, we force
2171 constants that would take more than three instructions
2172 to load to the constant pool. During and after reload,
2173 we have to handle all possible values. */
2176 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2177 register and the value to be inserted is outside the
2178 range that can be loaded with three depdi instructions. */
2179 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2181 operand1
= GEN_INT (insv
);
2183 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2184 gen_rtx_HIGH (mode
, operand1
)));
2185 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2187 emit_insn (gen_insvdi (operand0
, GEN_INT (32),
2190 emit_insn (gen_insvsi (operand0
, GEN_INT (32),
2195 int len
= 5, pos
= 27;
2197 /* Insert the bits using the depdi instruction. */
2200 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2201 HOST_WIDE_INT sign
= v5
< 0;
2203 /* Left extend the insertion. */
2204 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2205 while (pos
> 0 && (insv
& 1) == sign
)
2207 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2213 emit_insn (gen_insvdi (operand0
, GEN_INT (len
),
2214 GEN_INT (pos
), GEN_INT (v5
)));
2216 emit_insn (gen_insvsi (operand0
, GEN_INT (len
),
2217 GEN_INT (pos
), GEN_INT (v5
)));
2219 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2225 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2230 /* Now have insn-emit do whatever it normally does. */
2234 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2235 it will need a link/runtime reloc). */
2238 pa_reloc_needed (tree exp
)
2242 switch (TREE_CODE (exp
))
2247 case POINTER_PLUS_EXPR
:
2250 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2251 reloc
|= pa_reloc_needed (TREE_OPERAND (exp
, 1));
2255 case NON_LVALUE_EXPR
:
2256 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2262 unsigned HOST_WIDE_INT ix
;
2264 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2266 reloc
|= pa_reloc_needed (value
);
2280 /* Return the best assembler insn template
2281 for moving operands[1] into operands[0] as a fullword. */
2283 pa_singlemove_string (rtx
*operands
)
2285 HOST_WIDE_INT intval
;
2287 if (GET_CODE (operands
[0]) == MEM
)
2288 return "stw %r1,%0";
2289 if (GET_CODE (operands
[1]) == MEM
)
2291 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2296 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2298 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2300 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2301 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2303 operands
[1] = GEN_INT (i
);
2304 /* Fall through to CONST_INT case. */
2306 if (GET_CODE (operands
[1]) == CONST_INT
)
2308 intval
= INTVAL (operands
[1]);
2310 if (VAL_14_BITS_P (intval
))
2312 else if ((intval
& 0x7ff) == 0)
2313 return "ldil L'%1,%0";
2314 else if (pa_zdepi_cint_p (intval
))
2315 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2317 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2319 return "copy %1,%0";
2323 /* Compute position (in OP[1]) and width (in OP[2])
2324 useful for copying IMM to a register using the zdepi
2325 instructions. Store the immediate value to insert in OP[0]. */
2327 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2331 /* Find the least significant set bit in IMM. */
2332 for (lsb
= 0; lsb
< 32; lsb
++)
2339 /* Choose variants based on *sign* of the 5-bit field. */
2340 if ((imm
& 0x10) == 0)
2341 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2344 /* Find the width of the bitstring in IMM. */
2345 for (len
= 5; len
< 32 - lsb
; len
++)
2347 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2351 /* Sign extend IMM as a 5-bit value. */
2352 imm
= (imm
& 0xf) - 0x10;
2360 /* Compute position (in OP[1]) and width (in OP[2])
2361 useful for copying IMM to a register using the depdi,z
2362 instructions. Store the immediate value to insert in OP[0]. */
2365 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2367 int lsb
, len
, maxlen
;
2369 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2371 /* Find the least significant set bit in IMM. */
2372 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2379 /* Choose variants based on *sign* of the 5-bit field. */
2380 if ((imm
& 0x10) == 0)
2381 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2384 /* Find the width of the bitstring in IMM. */
2385 for (len
= 5; len
< maxlen
- lsb
; len
++)
2387 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2391 /* Extend length if host is narrow and IMM is negative. */
2392 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2395 /* Sign extend IMM as a 5-bit value. */
2396 imm
= (imm
& 0xf) - 0x10;
2404 /* Output assembler code to perform a doubleword move insn
2405 with operands OPERANDS. */
2408 pa_output_move_double (rtx
*operands
)
2410 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2412 rtx addreg0
= 0, addreg1
= 0;
2414 /* First classify both operands. */
2416 if (REG_P (operands
[0]))
2418 else if (offsettable_memref_p (operands
[0]))
2420 else if (GET_CODE (operands
[0]) == MEM
)
2425 if (REG_P (operands
[1]))
2427 else if (CONSTANT_P (operands
[1]))
2429 else if (offsettable_memref_p (operands
[1]))
2431 else if (GET_CODE (operands
[1]) == MEM
)
2436 /* Check for the cases that the operand constraints are not
2437 supposed to allow to happen. */
2438 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2440 /* Handle copies between general and floating registers. */
2442 if (optype0
== REGOP
&& optype1
== REGOP
2443 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2445 if (FP_REG_P (operands
[0]))
2447 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2448 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2449 return "{fldds|fldd} -16(%%sp),%0";
2453 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2454 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2455 return "{ldws|ldw} -12(%%sp),%R0";
2459 /* Handle auto decrementing and incrementing loads and stores
2460 specifically, since the structure of the function doesn't work
2461 for them without major modification. Do it better when we learn
2462 this port about the general inc/dec addressing of PA.
2463 (This was written by tege. Chide him if it doesn't work.) */
2465 if (optype0
== MEMOP
)
2467 /* We have to output the address syntax ourselves, since print_operand
2468 doesn't deal with the addresses we want to use. Fix this later. */
2470 rtx addr
= XEXP (operands
[0], 0);
2471 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2473 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2475 operands
[0] = XEXP (addr
, 0);
2476 gcc_assert (GET_CODE (operands
[1]) == REG
2477 && GET_CODE (operands
[0]) == REG
);
2479 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2481 /* No overlap between high target register and address
2482 register. (We do this in a non-obvious way to
2483 save a register file writeback) */
2484 if (GET_CODE (addr
) == POST_INC
)
2485 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2486 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2488 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2490 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2492 operands
[0] = XEXP (addr
, 0);
2493 gcc_assert (GET_CODE (operands
[1]) == REG
2494 && GET_CODE (operands
[0]) == REG
);
2496 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2497 /* No overlap between high target register and address
2498 register. (We do this in a non-obvious way to save a
2499 register file writeback) */
2500 if (GET_CODE (addr
) == PRE_INC
)
2501 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2502 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2505 if (optype1
== MEMOP
)
2507 /* We have to output the address syntax ourselves, since print_operand
2508 doesn't deal with the addresses we want to use. Fix this later. */
2510 rtx addr
= XEXP (operands
[1], 0);
2511 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2513 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2515 operands
[1] = XEXP (addr
, 0);
2516 gcc_assert (GET_CODE (operands
[0]) == REG
2517 && GET_CODE (operands
[1]) == REG
);
2519 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2521 /* No overlap between high target register and address
2522 register. (We do this in a non-obvious way to
2523 save a register file writeback) */
2524 if (GET_CODE (addr
) == POST_INC
)
2525 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2526 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2530 /* This is an undefined situation. We should load into the
2531 address register *and* update that register. Probably
2532 we don't need to handle this at all. */
2533 if (GET_CODE (addr
) == POST_INC
)
2534 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2535 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2538 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2540 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2542 operands
[1] = XEXP (addr
, 0);
2543 gcc_assert (GET_CODE (operands
[0]) == REG
2544 && GET_CODE (operands
[1]) == REG
);
2546 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2548 /* No overlap between high target register and address
2549 register. (We do this in a non-obvious way to
2550 save a register file writeback) */
2551 if (GET_CODE (addr
) == PRE_INC
)
2552 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2553 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2557 /* This is an undefined situation. We should load into the
2558 address register *and* update that register. Probably
2559 we don't need to handle this at all. */
2560 if (GET_CODE (addr
) == PRE_INC
)
2561 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2562 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2565 else if (GET_CODE (addr
) == PLUS
2566 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2569 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2571 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2573 xoperands
[0] = high_reg
;
2574 xoperands
[1] = XEXP (addr
, 1);
2575 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2576 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2577 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2579 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2583 xoperands
[0] = high_reg
;
2584 xoperands
[1] = XEXP (addr
, 1);
2585 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2586 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2587 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2589 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2594 /* If an operand is an unoffsettable memory ref, find a register
2595 we can increment temporarily to make it refer to the second word. */
2597 if (optype0
== MEMOP
)
2598 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2600 if (optype1
== MEMOP
)
2601 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2603 /* Ok, we can do one word at a time.
2604 Normally we do the low-numbered word first.
2606 In either case, set up in LATEHALF the operands to use
2607 for the high-numbered word and in some cases alter the
2608 operands in OPERANDS to be suitable for the low-numbered word. */
2610 if (optype0
== REGOP
)
2611 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2612 else if (optype0
== OFFSOP
)
2613 latehalf
[0] = adjust_address_nv (operands
[0], SImode
, 4);
2615 latehalf
[0] = operands
[0];
2617 if (optype1
== REGOP
)
2618 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2619 else if (optype1
== OFFSOP
)
2620 latehalf
[1] = adjust_address_nv (operands
[1], SImode
, 4);
2621 else if (optype1
== CNSTOP
)
2622 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2624 latehalf
[1] = operands
[1];
2626 /* If the first move would clobber the source of the second one,
2627 do them in the other order.
2629 This can happen in two cases:
2631 mem -> register where the first half of the destination register
2632 is the same register used in the memory's address. Reload
2633 can create such insns.
2635 mem in this case will be either register indirect or register
2636 indirect plus a valid offset.
2638 register -> register move where REGNO(dst) == REGNO(src + 1)
2639 someone (Tim/Tege?) claimed this can happen for parameter loads.
2641 Handle mem -> register case first. */
2642 if (optype0
== REGOP
2643 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2644 && refers_to_regno_p (REGNO (operands
[0]), REGNO (operands
[0]) + 1,
2647 /* Do the late half first. */
2649 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2650 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2654 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2655 return pa_singlemove_string (operands
);
2658 /* Now handle register -> register case. */
2659 if (optype0
== REGOP
&& optype1
== REGOP
2660 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2662 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2663 return pa_singlemove_string (operands
);
2666 /* Normal case: do the two words, low-numbered first. */
2668 output_asm_insn (pa_singlemove_string (operands
), operands
);
2670 /* Make any unoffsettable addresses point at high-numbered word. */
2672 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2674 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2677 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2679 /* Undo the adds we just did. */
2681 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2683 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2689 pa_output_fp_move_double (rtx
*operands
)
2691 if (FP_REG_P (operands
[0]))
2693 if (FP_REG_P (operands
[1])
2694 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2695 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2697 output_asm_insn ("fldd%F1 %1,%0", operands
);
2699 else if (FP_REG_P (operands
[1]))
2701 output_asm_insn ("fstd%F0 %1,%0", operands
);
2707 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2709 /* This is a pain. You have to be prepared to deal with an
2710 arbitrary address here including pre/post increment/decrement.
2712 so avoid this in the MD. */
2713 gcc_assert (GET_CODE (operands
[0]) == REG
);
2715 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2716 xoperands
[0] = operands
[0];
2717 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2722 /* Return a REG that occurs in ADDR with coefficient 1.
2723 ADDR can be effectively incremented by incrementing REG. */
2726 find_addr_reg (rtx addr
)
2728 while (GET_CODE (addr
) == PLUS
)
2730 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2731 addr
= XEXP (addr
, 0);
2732 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2733 addr
= XEXP (addr
, 1);
2734 else if (CONSTANT_P (XEXP (addr
, 0)))
2735 addr
= XEXP (addr
, 1);
2736 else if (CONSTANT_P (XEXP (addr
, 1)))
2737 addr
= XEXP (addr
, 0);
2741 gcc_assert (GET_CODE (addr
) == REG
);
2745 /* Emit code to perform a block move.
2747 OPERANDS[0] is the destination pointer as a REG, clobbered.
2748 OPERANDS[1] is the source pointer as a REG, clobbered.
2749 OPERANDS[2] is a register for temporary storage.
2750 OPERANDS[3] is a register for temporary storage.
2751 OPERANDS[4] is the size as a CONST_INT
2752 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2753 OPERANDS[6] is another temporary register. */
2756 pa_output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2758 int align
= INTVAL (operands
[5]);
2759 unsigned long n_bytes
= INTVAL (operands
[4]);
2761 /* We can't move more than a word at a time because the PA
2762 has no longer integer move insns. (Could use fp mem ops?) */
2763 if (align
> (TARGET_64BIT
? 8 : 4))
2764 align
= (TARGET_64BIT
? 8 : 4);
2766 /* Note that we know each loop below will execute at least twice
2767 (else we would have open-coded the copy). */
2771 /* Pre-adjust the loop counter. */
2772 operands
[4] = GEN_INT (n_bytes
- 16);
2773 output_asm_insn ("ldi %4,%2", operands
);
2776 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2777 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2778 output_asm_insn ("std,ma %3,8(%0)", operands
);
2779 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2780 output_asm_insn ("std,ma %6,8(%0)", operands
);
2782 /* Handle the residual. There could be up to 7 bytes of
2783 residual to copy! */
2784 if (n_bytes
% 16 != 0)
2786 operands
[4] = GEN_INT (n_bytes
% 8);
2787 if (n_bytes
% 16 >= 8)
2788 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2789 if (n_bytes
% 8 != 0)
2790 output_asm_insn ("ldd 0(%1),%6", operands
);
2791 if (n_bytes
% 16 >= 8)
2792 output_asm_insn ("std,ma %3,8(%0)", operands
);
2793 if (n_bytes
% 8 != 0)
2794 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2799 /* Pre-adjust the loop counter. */
2800 operands
[4] = GEN_INT (n_bytes
- 8);
2801 output_asm_insn ("ldi %4,%2", operands
);
2804 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2805 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2806 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2807 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2808 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2810 /* Handle the residual. There could be up to 7 bytes of
2811 residual to copy! */
2812 if (n_bytes
% 8 != 0)
2814 operands
[4] = GEN_INT (n_bytes
% 4);
2815 if (n_bytes
% 8 >= 4)
2816 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2817 if (n_bytes
% 4 != 0)
2818 output_asm_insn ("ldw 0(%1),%6", operands
);
2819 if (n_bytes
% 8 >= 4)
2820 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2821 if (n_bytes
% 4 != 0)
2822 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2827 /* Pre-adjust the loop counter. */
2828 operands
[4] = GEN_INT (n_bytes
- 4);
2829 output_asm_insn ("ldi %4,%2", operands
);
2832 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2833 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2834 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2835 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2836 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2838 /* Handle the residual. */
2839 if (n_bytes
% 4 != 0)
2841 if (n_bytes
% 4 >= 2)
2842 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2843 if (n_bytes
% 2 != 0)
2844 output_asm_insn ("ldb 0(%1),%6", operands
);
2845 if (n_bytes
% 4 >= 2)
2846 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2847 if (n_bytes
% 2 != 0)
2848 output_asm_insn ("stb %6,0(%0)", operands
);
2853 /* Pre-adjust the loop counter. */
2854 operands
[4] = GEN_INT (n_bytes
- 2);
2855 output_asm_insn ("ldi %4,%2", operands
);
2858 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2859 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2860 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2861 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2862 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2864 /* Handle the residual. */
2865 if (n_bytes
% 2 != 0)
2867 output_asm_insn ("ldb 0(%1),%3", operands
);
2868 output_asm_insn ("stb %3,0(%0)", operands
);
2877 /* Count the number of insns necessary to handle this block move.
2879 Basic structure is the same as emit_block_move, except that we
2880 count insns rather than emit them. */
2883 compute_movmem_length (rtx insn
)
2885 rtx pat
= PATTERN (insn
);
2886 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2887 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2888 unsigned int n_insns
= 0;
2890 /* We can't move more than four bytes at a time because the PA
2891 has no longer integer move insns. (Could use fp mem ops?) */
2892 if (align
> (TARGET_64BIT
? 8 : 4))
2893 align
= (TARGET_64BIT
? 8 : 4);
2895 /* The basic copying loop. */
2899 if (n_bytes
% (2 * align
) != 0)
2901 if ((n_bytes
% (2 * align
)) >= align
)
2904 if ((n_bytes
% align
) != 0)
2908 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2912 /* Emit code to perform a block clear.
2914 OPERANDS[0] is the destination pointer as a REG, clobbered.
2915 OPERANDS[1] is a register for temporary storage.
2916 OPERANDS[2] is the size as a CONST_INT
2917 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2920 pa_output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2922 int align
= INTVAL (operands
[3]);
2923 unsigned long n_bytes
= INTVAL (operands
[2]);
2925 /* We can't clear more than a word at a time because the PA
2926 has no longer integer move insns. */
2927 if (align
> (TARGET_64BIT
? 8 : 4))
2928 align
= (TARGET_64BIT
? 8 : 4);
2930 /* Note that we know each loop below will execute at least twice
2931 (else we would have open-coded the copy). */
2935 /* Pre-adjust the loop counter. */
2936 operands
[2] = GEN_INT (n_bytes
- 16);
2937 output_asm_insn ("ldi %2,%1", operands
);
2940 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2941 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2942 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2944 /* Handle the residual. There could be up to 7 bytes of
2945 residual to copy! */
2946 if (n_bytes
% 16 != 0)
2948 operands
[2] = GEN_INT (n_bytes
% 8);
2949 if (n_bytes
% 16 >= 8)
2950 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2951 if (n_bytes
% 8 != 0)
2952 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2957 /* Pre-adjust the loop counter. */
2958 operands
[2] = GEN_INT (n_bytes
- 8);
2959 output_asm_insn ("ldi %2,%1", operands
);
2962 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2963 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2964 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2966 /* Handle the residual. There could be up to 7 bytes of
2967 residual to copy! */
2968 if (n_bytes
% 8 != 0)
2970 operands
[2] = GEN_INT (n_bytes
% 4);
2971 if (n_bytes
% 8 >= 4)
2972 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2973 if (n_bytes
% 4 != 0)
2974 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
2979 /* Pre-adjust the loop counter. */
2980 operands
[2] = GEN_INT (n_bytes
- 4);
2981 output_asm_insn ("ldi %2,%1", operands
);
2984 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2985 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
2986 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2988 /* Handle the residual. */
2989 if (n_bytes
% 4 != 0)
2991 if (n_bytes
% 4 >= 2)
2992 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2993 if (n_bytes
% 2 != 0)
2994 output_asm_insn ("stb %%r0,0(%0)", operands
);
2999 /* Pre-adjust the loop counter. */
3000 operands
[2] = GEN_INT (n_bytes
- 2);
3001 output_asm_insn ("ldi %2,%1", operands
);
3004 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3005 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
3006 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3008 /* Handle the residual. */
3009 if (n_bytes
% 2 != 0)
3010 output_asm_insn ("stb %%r0,0(%0)", operands
);
3019 /* Count the number of insns necessary to handle this block move.
3021 Basic structure is the same as emit_block_move, except that we
3022 count insns rather than emit them. */
3025 compute_clrmem_length (rtx insn
)
3027 rtx pat
= PATTERN (insn
);
3028 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
3029 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
3030 unsigned int n_insns
= 0;
3032 /* We can't clear more than a word at a time because the PA
3033 has no longer integer move insns. */
3034 if (align
> (TARGET_64BIT
? 8 : 4))
3035 align
= (TARGET_64BIT
? 8 : 4);
3037 /* The basic loop. */
3041 if (n_bytes
% (2 * align
) != 0)
3043 if ((n_bytes
% (2 * align
)) >= align
)
3046 if ((n_bytes
% align
) != 0)
3050 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3056 pa_output_and (rtx
*operands
)
3058 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3060 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3061 int ls0
, ls1
, ms0
, p
, len
;
3063 for (ls0
= 0; ls0
< 32; ls0
++)
3064 if ((mask
& (1 << ls0
)) == 0)
3067 for (ls1
= ls0
; ls1
< 32; ls1
++)
3068 if ((mask
& (1 << ls1
)) != 0)
3071 for (ms0
= ls1
; ms0
< 32; ms0
++)
3072 if ((mask
& (1 << ms0
)) == 0)
3075 gcc_assert (ms0
== 32);
3083 operands
[2] = GEN_INT (len
);
3084 return "{extru|extrw,u} %1,31,%2,%0";
3088 /* We could use this `depi' for the case above as well, but `depi'
3089 requires one more register file access than an `extru'. */
3094 operands
[2] = GEN_INT (p
);
3095 operands
[3] = GEN_INT (len
);
3096 return "{depi|depwi} 0,%2,%3,%0";
3100 return "and %1,%2,%0";
3103 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3104 storing the result in operands[0]. */
3106 pa_output_64bit_and (rtx
*operands
)
3108 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3110 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3111 int ls0
, ls1
, ms0
, p
, len
;
3113 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3114 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3117 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3118 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3121 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3122 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3125 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3127 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3133 operands
[2] = GEN_INT (len
);
3134 return "extrd,u %1,63,%2,%0";
3138 /* We could use this `depi' for the case above as well, but `depi'
3139 requires one more register file access than an `extru'. */
3144 operands
[2] = GEN_INT (p
);
3145 operands
[3] = GEN_INT (len
);
3146 return "depdi 0,%2,%3,%0";
3150 return "and %1,%2,%0";
3154 pa_output_ior (rtx
*operands
)
3156 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3157 int bs0
, bs1
, p
, len
;
3159 if (INTVAL (operands
[2]) == 0)
3160 return "copy %1,%0";
3162 for (bs0
= 0; bs0
< 32; bs0
++)
3163 if ((mask
& (1 << bs0
)) != 0)
3166 for (bs1
= bs0
; bs1
< 32; bs1
++)
3167 if ((mask
& (1 << bs1
)) == 0)
3170 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3175 operands
[2] = GEN_INT (p
);
3176 operands
[3] = GEN_INT (len
);
3177 return "{depi|depwi} -1,%2,%3,%0";
3180 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3181 storing the result in operands[0]. */
3183 pa_output_64bit_ior (rtx
*operands
)
3185 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3186 int bs0
, bs1
, p
, len
;
3188 if (INTVAL (operands
[2]) == 0)
3189 return "copy %1,%0";
3191 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3192 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3195 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3196 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3199 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3200 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3205 operands
[2] = GEN_INT (p
);
3206 operands
[3] = GEN_INT (len
);
3207 return "depdi -1,%2,%3,%0";
3210 /* Target hook for assembling integer objects. This code handles
3211 aligned SI and DI integers specially since function references
3212 must be preceded by P%. */
3215 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3217 if (size
== UNITS_PER_WORD
3219 && function_label_operand (x
, VOIDmode
))
3221 fputs (size
== 8? "\t.dword\t" : "\t.word\t", asm_out_file
);
3223 /* We don't want an OPD when generating fast indirect calls. */
3224 if (!TARGET_FAST_INDIRECT_CALLS
)
3225 fputs ("P%", asm_out_file
);
3227 output_addr_const (asm_out_file
, x
);
3228 fputc ('\n', asm_out_file
);
3231 return default_assemble_integer (x
, size
, aligned_p
);
3234 /* Output an ascii string. */
3236 pa_output_ascii (FILE *file
, const char *p
, int size
)
3240 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3242 /* The HP assembler can only take strings of 256 characters at one
3243 time. This is a limitation on input line length, *not* the
3244 length of the string. Sigh. Even worse, it seems that the
3245 restriction is in number of input characters (see \xnn &
3246 \whatever). So we have to do this very carefully. */
3248 fputs ("\t.STRING \"", file
);
3251 for (i
= 0; i
< size
; i
+= 4)
3255 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3257 register unsigned int c
= (unsigned char) p
[i
+ io
];
3259 if (c
== '\"' || c
== '\\')
3260 partial_output
[co
++] = '\\';
3261 if (c
>= ' ' && c
< 0177)
3262 partial_output
[co
++] = c
;
3266 partial_output
[co
++] = '\\';
3267 partial_output
[co
++] = 'x';
3268 hexd
= c
/ 16 - 0 + '0';
3270 hexd
-= '9' - 'a' + 1;
3271 partial_output
[co
++] = hexd
;
3272 hexd
= c
% 16 - 0 + '0';
3274 hexd
-= '9' - 'a' + 1;
3275 partial_output
[co
++] = hexd
;
3278 if (chars_output
+ co
> 243)
3280 fputs ("\"\n\t.STRING \"", file
);
3283 fwrite (partial_output
, 1, (size_t) co
, file
);
3287 fputs ("\"\n", file
);
3290 /* Try to rewrite floating point comparisons & branches to avoid
3291 useless add,tr insns.
3293 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3294 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3295 first attempt to remove useless add,tr insns. It is zero
3296 for the second pass as reorg sometimes leaves bogus REG_DEAD
3299 When CHECK_NOTES is zero we can only eliminate add,tr insns
3300 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3303 remove_useless_addtr_insns (int check_notes
)
3306 static int pass
= 0;
3308 /* This is fairly cheap, so always run it when optimizing. */
3312 int fbranch_count
= 0;
3314 /* Walk all the insns in this function looking for fcmp & fbranch
3315 instructions. Keep track of how many of each we find. */
3316 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3320 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3321 if (! NONJUMP_INSN_P (insn
) && ! JUMP_P (insn
))
3324 tmp
= PATTERN (insn
);
3326 /* It must be a set. */
3327 if (GET_CODE (tmp
) != SET
)
3330 /* If the destination is CCFP, then we've found an fcmp insn. */
3331 tmp
= SET_DEST (tmp
);
3332 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3338 tmp
= PATTERN (insn
);
3339 /* If this is an fbranch instruction, bump the fbranch counter. */
3340 if (GET_CODE (tmp
) == SET
3341 && SET_DEST (tmp
) == pc_rtx
3342 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3343 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3344 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3345 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3353 /* Find all floating point compare + branch insns. If possible,
3354 reverse the comparison & the branch to avoid add,tr insns. */
3355 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3360 /* Ignore anything that isn't an INSN. */
3361 if (! NONJUMP_INSN_P (insn
))
3364 tmp
= PATTERN (insn
);
3366 /* It must be a set. */
3367 if (GET_CODE (tmp
) != SET
)
3370 /* The destination must be CCFP, which is register zero. */
3371 tmp
= SET_DEST (tmp
);
3372 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3375 /* INSN should be a set of CCFP.
3377 See if the result of this insn is used in a reversed FP
3378 conditional branch. If so, reverse our condition and
3379 the branch. Doing so avoids useless add,tr insns. */
3380 next
= next_insn (insn
);
3383 /* Jumps, calls and labels stop our search. */
3384 if (JUMP_P (next
) || CALL_P (next
) || LABEL_P (next
))
3387 /* As does another fcmp insn. */
3388 if (NONJUMP_INSN_P (next
)
3389 && GET_CODE (PATTERN (next
)) == SET
3390 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3391 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3394 next
= next_insn (next
);
3397 /* Is NEXT_INSN a branch? */
3398 if (next
&& JUMP_P (next
))
3400 rtx pattern
= PATTERN (next
);
3402 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3403 and CCFP dies, then reverse our conditional and the branch
3404 to avoid the add,tr. */
3405 if (GET_CODE (pattern
) == SET
3406 && SET_DEST (pattern
) == pc_rtx
3407 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3408 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3409 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3410 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3411 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3412 && (fcmp_count
== fbranch_count
3414 && find_regno_note (next
, REG_DEAD
, 0))))
3416 /* Reverse the branch. */
3417 tmp
= XEXP (SET_SRC (pattern
), 1);
3418 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3419 XEXP (SET_SRC (pattern
), 2) = tmp
;
3420 INSN_CODE (next
) = -1;
3422 /* Reverse our condition. */
3423 tmp
= PATTERN (insn
);
3424 PUT_CODE (XEXP (tmp
, 1),
3425 (reverse_condition_maybe_unordered
3426 (GET_CODE (XEXP (tmp
, 1)))));
3436 /* You may have trouble believing this, but this is the 32 bit HP-PA
3441 Variable arguments (optional; any number may be allocated)
3443 SP-(4*(N+9)) arg word N
3448 Fixed arguments (must be allocated; may remain unused)
3457 SP-32 External Data Pointer (DP)
3459 SP-24 External/stub RP (RP')
3463 SP-8 Calling Stub RP (RP'')
3468 SP-0 Stack Pointer (points to next available address)
3472 /* This function saves registers as follows. Registers marked with ' are
3473 this function's registers (as opposed to the previous function's).
3474 If a frame_pointer isn't needed, r4 is saved as a general register;
3475 the space for the frame pointer is still allocated, though, to keep
3481 SP (FP') Previous FP
3482 SP + 4 Alignment filler (sigh)
3483 SP + 8 Space for locals reserved here.
3487 SP + n All call saved register used.
3491 SP + o All call saved fp registers used.
3495 SP + p (SP') points to next available address.
3499 /* Global variables set by output_function_prologue(). */
3500 /* Size of frame. Need to know this to emit return insns from
3502 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3503 static int save_fregs
;
3505 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3506 Handle case where DISP > 8k by using the add_high_const patterns.
3508 Note in DISP > 8k case, we will leave the high part of the address
3509 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3512 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3514 rtx insn
, dest
, src
, basereg
;
3516 src
= gen_rtx_REG (word_mode
, reg
);
3517 basereg
= gen_rtx_REG (Pmode
, base
);
3518 if (VAL_14_BITS_P (disp
))
3520 dest
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
3521 insn
= emit_move_insn (dest
, src
);
3523 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3525 rtx delta
= GEN_INT (disp
);
3526 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3528 emit_move_insn (tmpreg
, delta
);
3529 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3532 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3533 gen_rtx_SET (VOIDmode
, tmpreg
,
3534 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3535 RTX_FRAME_RELATED_P (insn
) = 1;
3537 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3538 insn
= emit_move_insn (dest
, src
);
3542 rtx delta
= GEN_INT (disp
);
3543 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3544 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3546 emit_move_insn (tmpreg
, high
);
3547 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3548 insn
= emit_move_insn (dest
, src
);
3550 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3551 gen_rtx_SET (VOIDmode
,
3552 gen_rtx_MEM (word_mode
,
3553 gen_rtx_PLUS (word_mode
,
3560 RTX_FRAME_RELATED_P (insn
) = 1;
3563 /* Emit RTL to store REG at the memory location specified by BASE and then
3564 add MOD to BASE. MOD must be <= 8k. */
3567 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3569 rtx insn
, basereg
, srcreg
, delta
;
3571 gcc_assert (VAL_14_BITS_P (mod
));
3573 basereg
= gen_rtx_REG (Pmode
, base
);
3574 srcreg
= gen_rtx_REG (word_mode
, reg
);
3575 delta
= GEN_INT (mod
);
3577 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3580 RTX_FRAME_RELATED_P (insn
) = 1;
3582 /* RTX_FRAME_RELATED_P must be set on each frame related set
3583 in a parallel with more than one element. */
3584 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3585 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3589 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3590 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3591 whether to add a frame note or not.
3593 In the DISP > 8k case, we leave the high part of the address in %r1.
3594 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3597 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3601 if (VAL_14_BITS_P (disp
))
3603 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3604 plus_constant (Pmode
,
3605 gen_rtx_REG (Pmode
, base
), disp
));
3607 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3609 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3610 rtx delta
= GEN_INT (disp
);
3611 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3613 emit_move_insn (tmpreg
, delta
);
3614 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3615 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3617 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3618 gen_rtx_SET (VOIDmode
, tmpreg
,
3619 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3623 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3624 rtx delta
= GEN_INT (disp
);
3625 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3627 emit_move_insn (tmpreg
,
3628 gen_rtx_PLUS (Pmode
, basereg
,
3629 gen_rtx_HIGH (Pmode
, delta
)));
3630 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3631 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3634 if (DO_FRAME_NOTES
&& note
)
3635 RTX_FRAME_RELATED_P (insn
) = 1;
3639 pa_compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3644 /* The code in pa_expand_prologue and pa_expand_epilogue must
3645 be consistent with the rounding and size calculation done here.
3646 Change them at the same time. */
3648 /* We do our own stack alignment. First, round the size of the
3649 stack locals up to a word boundary. */
3650 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3652 /* Space for previous frame pointer + filler. If any frame is
3653 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3654 waste some space here for the sake of HP compatibility. The
3655 first slot is only used when the frame pointer is needed. */
3656 if (size
|| frame_pointer_needed
)
3657 size
+= STARTING_FRAME_OFFSET
;
3659 /* If the current function calls __builtin_eh_return, then we need
3660 to allocate stack space for registers that will hold data for
3661 the exception handler. */
3662 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3666 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3668 size
+= i
* UNITS_PER_WORD
;
3671 /* Account for space used by the callee general register saves. */
3672 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3673 if (df_regs_ever_live_p (i
))
3674 size
+= UNITS_PER_WORD
;
3676 /* Account for space used by the callee floating point register saves. */
3677 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3678 if (df_regs_ever_live_p (i
)
3679 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3683 /* We always save both halves of the FP register, so always
3684 increment the frame size by 8 bytes. */
3688 /* If any of the floating registers are saved, account for the
3689 alignment needed for the floating point register save block. */
3692 size
= (size
+ 7) & ~7;
3697 /* The various ABIs include space for the outgoing parameters in the
3698 size of the current function's stack frame. We don't need to align
3699 for the outgoing arguments as their alignment is set by the final
3700 rounding for the frame as a whole. */
3701 size
+= crtl
->outgoing_args_size
;
3703 /* Allocate space for the fixed frame marker. This space must be
3704 allocated for any function that makes calls or allocates
3706 if (!crtl
->is_leaf
|| size
)
3707 size
+= TARGET_64BIT
? 48 : 32;
3709 /* Finally, round to the preferred stack boundary. */
3710 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3711 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3714 /* Generate the assembly code for function entry. FILE is a stdio
3715 stream to output the code to. SIZE is an int: how many units of
3716 temporary storage to allocate.
3718 Refer to the array `regs_ever_live' to determine which registers to
3719 save; `regs_ever_live[I]' is nonzero if register number I is ever
3720 used in the function. This function is responsible for knowing
3721 which registers should not be saved even if used. */
3723 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3724 of memory. If any fpu reg is used in the function, we allocate
3725 such a block here, at the bottom of the frame, just in case it's needed.
3727 If this function is a leaf procedure, then we may choose not
3728 to do a "save" insn. The decision about whether or not
3729 to do this is made in regclass.c. */
3732 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3734 /* The function's label and associated .PROC must never be
3735 separated and must be output *after* any profiling declarations
3736 to avoid changing spaces/subspaces within a procedure. */
3737 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3738 fputs ("\t.PROC\n", file
);
3740 /* pa_expand_prologue does the dirty work now. We just need
3741 to output the assembler directives which denote the start
3743 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3745 fputs (",NO_CALLS", file
);
3747 fputs (",CALLS", file
);
3749 fputs (",SAVE_RP", file
);
3751 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3752 at the beginning of the frame and that it is used as the frame
3753 pointer for the frame. We do this because our current frame
3754 layout doesn't conform to that specified in the HP runtime
3755 documentation and we need a way to indicate to programs such as
3756 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3757 isn't used by HP compilers but is supported by the assembler.
3758 However, SAVE_SP is supposed to indicate that the previous stack
3759 pointer has been saved in the frame marker. */
3760 if (frame_pointer_needed
)
3761 fputs (",SAVE_SP", file
);
3763 /* Pass on information about the number of callee register saves
3764 performed in the prologue.
3766 The compiler is supposed to pass the highest register number
3767 saved, the assembler then has to adjust that number before
3768 entering it into the unwind descriptor (to account for any
3769 caller saved registers with lower register numbers than the
3770 first callee saved register). */
3772 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3775 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3777 fputs ("\n\t.ENTRY\n", file
);
3779 remove_useless_addtr_insns (0);
3783 pa_expand_prologue (void)
3785 int merge_sp_adjust_with_store
= 0;
3786 HOST_WIDE_INT size
= get_frame_size ();
3787 HOST_WIDE_INT offset
;
3795 /* Compute total size for frame pointer, filler, locals and rounding to
3796 the next word boundary. Similar code appears in pa_compute_frame_size
3797 and must be changed in tandem with this code. */
3798 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3799 if (local_fsize
|| frame_pointer_needed
)
3800 local_fsize
+= STARTING_FRAME_OFFSET
;
3802 actual_fsize
= pa_compute_frame_size (size
, &save_fregs
);
3803 if (flag_stack_usage_info
)
3804 current_function_static_stack_size
= actual_fsize
;
3806 /* Compute a few things we will use often. */
3807 tmpreg
= gen_rtx_REG (word_mode
, 1);
3809 /* Save RP first. The calling conventions manual states RP will
3810 always be stored into the caller's frame at sp - 20 or sp - 16
3811 depending on which ABI is in use. */
3812 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
3814 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3820 /* Allocate the local frame and set up the frame pointer if needed. */
3821 if (actual_fsize
!= 0)
3823 if (frame_pointer_needed
)
3825 /* Copy the old frame pointer temporarily into %r1. Set up the
3826 new stack pointer, then store away the saved old frame pointer
3827 into the stack at sp and at the same time update the stack
3828 pointer by actual_fsize bytes. Two versions, first
3829 handles small (<8k) frames. The second handles large (>=8k)
3831 insn
= emit_move_insn (tmpreg
, hard_frame_pointer_rtx
);
3833 RTX_FRAME_RELATED_P (insn
) = 1;
3835 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
3837 RTX_FRAME_RELATED_P (insn
) = 1;
3839 if (VAL_14_BITS_P (actual_fsize
))
3840 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3843 /* It is incorrect to store the saved frame pointer at *sp,
3844 then increment sp (writes beyond the current stack boundary).
3846 So instead use stwm to store at *sp and post-increment the
3847 stack pointer as an atomic operation. Then increment sp to
3848 finish allocating the new frame. */
3849 HOST_WIDE_INT adjust1
= 8192 - 64;
3850 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3852 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3853 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3857 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3858 we need to store the previous stack pointer (frame pointer)
3859 into the frame marker on targets that use the HP unwind
3860 library. This allows the HP unwind library to be used to
3861 unwind GCC frames. However, we are not fully compatible
3862 with the HP library because our frame layout differs from
3863 that specified in the HP runtime specification.
3865 We don't want a frame note on this instruction as the frame
3866 marker moves during dynamic stack allocation.
3868 This instruction also serves as a blockage to prevent
3869 register spills from being scheduled before the stack
3870 pointer is raised. This is necessary as we store
3871 registers using the frame pointer as a base register,
3872 and the frame pointer is set before sp is raised. */
3873 if (TARGET_HPUX_UNWIND_LIBRARY
)
3875 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3876 GEN_INT (TARGET_64BIT
? -8 : -4));
3878 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3879 hard_frame_pointer_rtx
);
3882 emit_insn (gen_blockage ());
3884 /* no frame pointer needed. */
3887 /* In some cases we can perform the first callee register save
3888 and allocating the stack frame at the same time. If so, just
3889 make a note of it and defer allocating the frame until saving
3890 the callee registers. */
3891 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3892 merge_sp_adjust_with_store
= 1;
3893 /* Can not optimize. Adjust the stack frame by actual_fsize
3896 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3901 /* Normal register save.
3903 Do not save the frame pointer in the frame_pointer_needed case. It
3904 was done earlier. */
3905 if (frame_pointer_needed
)
3907 offset
= local_fsize
;
3909 /* Saving the EH return data registers in the frame is the simplest
3910 way to get the frame unwind information emitted. We put them
3911 just before the general registers. */
3912 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3914 unsigned int i
, regno
;
3918 regno
= EH_RETURN_DATA_REGNO (i
);
3919 if (regno
== INVALID_REGNUM
)
3922 store_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
3923 offset
+= UNITS_PER_WORD
;
3927 for (i
= 18; i
>= 4; i
--)
3928 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3930 store_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
3931 offset
+= UNITS_PER_WORD
;
3934 /* Account for %r3 which is saved in a special place. */
3937 /* No frame pointer needed. */
3940 offset
= local_fsize
- actual_fsize
;
3942 /* Saving the EH return data registers in the frame is the simplest
3943 way to get the frame unwind information emitted. */
3944 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3946 unsigned int i
, regno
;
3950 regno
= EH_RETURN_DATA_REGNO (i
);
3951 if (regno
== INVALID_REGNUM
)
3954 /* If merge_sp_adjust_with_store is nonzero, then we can
3955 optimize the first save. */
3956 if (merge_sp_adjust_with_store
)
3958 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3959 merge_sp_adjust_with_store
= 0;
3962 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3963 offset
+= UNITS_PER_WORD
;
3967 for (i
= 18; i
>= 3; i
--)
3968 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3970 /* If merge_sp_adjust_with_store is nonzero, then we can
3971 optimize the first GR save. */
3972 if (merge_sp_adjust_with_store
)
3974 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
3975 merge_sp_adjust_with_store
= 0;
3978 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
3979 offset
+= UNITS_PER_WORD
;
3983 /* If we wanted to merge the SP adjustment with a GR save, but we never
3984 did any GR saves, then just emit the adjustment here. */
3985 if (merge_sp_adjust_with_store
)
3986 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3990 /* The hppa calling conventions say that %r19, the pic offset
3991 register, is saved at sp - 32 (in this function's frame)
3992 when generating PIC code. FIXME: What is the correct thing
3993 to do for functions which make no calls and allocate no
3994 frame? Do we need to allocate a frame, or can we just omit
3995 the save? For now we'll just omit the save.
3997 We don't want a note on this insn as the frame marker can
3998 move if there is a dynamic stack allocation. */
3999 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
4001 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
4003 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
4007 /* Align pointer properly (doubleword boundary). */
4008 offset
= (offset
+ 7) & ~7;
4010 /* Floating point register store. */
4015 /* First get the frame or stack pointer to the start of the FP register
4017 if (frame_pointer_needed
)
4019 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4020 base
= hard_frame_pointer_rtx
;
4024 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4025 base
= stack_pointer_rtx
;
4028 /* Now actually save the FP registers. */
4029 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4031 if (df_regs_ever_live_p (i
)
4032 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4034 rtx addr
, insn
, reg
;
4035 addr
= gen_rtx_MEM (DFmode
,
4036 gen_rtx_POST_INC (word_mode
, tmpreg
));
4037 reg
= gen_rtx_REG (DFmode
, i
);
4038 insn
= emit_move_insn (addr
, reg
);
4041 RTX_FRAME_RELATED_P (insn
) = 1;
4044 rtx mem
= gen_rtx_MEM (DFmode
,
4045 plus_constant (Pmode
, base
,
4047 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4048 gen_rtx_SET (VOIDmode
, mem
, reg
));
4052 rtx meml
= gen_rtx_MEM (SFmode
,
4053 plus_constant (Pmode
, base
,
4055 rtx memr
= gen_rtx_MEM (SFmode
,
4056 plus_constant (Pmode
, base
,
4058 rtx regl
= gen_rtx_REG (SFmode
, i
);
4059 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
4060 rtx setl
= gen_rtx_SET (VOIDmode
, meml
, regl
);
4061 rtx setr
= gen_rtx_SET (VOIDmode
, memr
, regr
);
4064 RTX_FRAME_RELATED_P (setl
) = 1;
4065 RTX_FRAME_RELATED_P (setr
) = 1;
4066 vec
= gen_rtvec (2, setl
, setr
);
4067 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4068 gen_rtx_SEQUENCE (VOIDmode
, vec
));
4071 offset
+= GET_MODE_SIZE (DFmode
);
4078 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4079 Handle case where DISP > 8k by using the add_high_const patterns. */
4082 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
4084 rtx dest
= gen_rtx_REG (word_mode
, reg
);
4085 rtx basereg
= gen_rtx_REG (Pmode
, base
);
4088 if (VAL_14_BITS_P (disp
))
4089 src
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
4090 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
4092 rtx delta
= GEN_INT (disp
);
4093 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4095 emit_move_insn (tmpreg
, delta
);
4096 if (TARGET_DISABLE_INDEXING
)
4098 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4099 src
= gen_rtx_MEM (word_mode
, tmpreg
);
4102 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4106 rtx delta
= GEN_INT (disp
);
4107 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
4108 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4110 emit_move_insn (tmpreg
, high
);
4111 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
4114 emit_move_insn (dest
, src
);
4117 /* Update the total code bytes output to the text section. */
4120 update_total_code_bytes (unsigned int nbytes
)
4122 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4123 && !IN_NAMED_SECTION_P (cfun
->decl
))
4125 unsigned int old_total
= total_code_bytes
;
4127 total_code_bytes
+= nbytes
;
4129 /* Be prepared to handle overflows. */
4130 if (old_total
> total_code_bytes
)
4131 total_code_bytes
= UINT_MAX
;
4135 /* This function generates the assembly code for function exit.
4136 Args are as for output_function_prologue ().
4138 The function epilogue should not depend on the current stack
4139 pointer! It should use the frame pointer only. This is mandatory
4140 because of alloca; we also take advantage of it to omit stack
4141 adjustments before returning. */
4144 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4146 rtx insn
= get_last_insn ();
4149 /* pa_expand_epilogue does the dirty work now. We just need
4150 to output the assembler directives which denote the end
4153 To make debuggers happy, emit a nop if the epilogue was completely
4154 eliminated due to a volatile call as the last insn in the
4155 current function. That way the return address (in %r2) will
4156 always point to a valid instruction in the current function. */
4158 /* Get the last real insn. */
4160 insn
= prev_real_insn (insn
);
4162 /* If it is a sequence, then look inside. */
4163 if (insn
&& NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4164 insn
= XVECEXP (PATTERN (insn
), 0, 0);
4166 /* If insn is a CALL_INSN, then it must be a call to a volatile
4167 function (otherwise there would be epilogue insns). */
4168 if (insn
&& CALL_P (insn
))
4170 fputs ("\tnop\n", file
);
4176 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4178 if (TARGET_SOM
&& TARGET_GAS
)
4180 /* We are done with this subspace except possibly for some additional
4181 debug information. Forget that we are in this subspace to ensure
4182 that the next function is output in its own subspace. */
4184 cfun
->machine
->in_nsubspa
= 2;
4187 /* Thunks do their own insn accounting. */
4191 if (INSN_ADDRESSES_SET_P ())
4193 last_address
= extra_nop
? 4 : 0;
4194 insn
= get_last_nonnote_insn ();
4195 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4197 last_address
+= insn_default_length (insn
);
4198 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4199 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4202 last_address
= UINT_MAX
;
4204 /* Finally, update the total number of code bytes output so far. */
4205 update_total_code_bytes (last_address
);
4209 pa_expand_epilogue (void)
4212 HOST_WIDE_INT offset
;
4213 HOST_WIDE_INT ret_off
= 0;
4215 int merge_sp_adjust_with_load
= 0;
4217 /* We will use this often. */
4218 tmpreg
= gen_rtx_REG (word_mode
, 1);
4220 /* Try to restore RP early to avoid load/use interlocks when
4221 RP gets used in the return (bv) instruction. This appears to still
4222 be necessary even when we schedule the prologue and epilogue. */
4225 ret_off
= TARGET_64BIT
? -16 : -20;
4226 if (frame_pointer_needed
)
4228 load_reg (2, ret_off
, HARD_FRAME_POINTER_REGNUM
);
4233 /* No frame pointer, and stack is smaller than 8k. */
4234 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4236 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4242 /* General register restores. */
4243 if (frame_pointer_needed
)
4245 offset
= local_fsize
;
4247 /* If the current function calls __builtin_eh_return, then we need
4248 to restore the saved EH data registers. */
4249 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4251 unsigned int i
, regno
;
4255 regno
= EH_RETURN_DATA_REGNO (i
);
4256 if (regno
== INVALID_REGNUM
)
4259 load_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4260 offset
+= UNITS_PER_WORD
;
4264 for (i
= 18; i
>= 4; i
--)
4265 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4267 load_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4268 offset
+= UNITS_PER_WORD
;
4273 offset
= local_fsize
- actual_fsize
;
4275 /* If the current function calls __builtin_eh_return, then we need
4276 to restore the saved EH data registers. */
4277 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4279 unsigned int i
, regno
;
4283 regno
= EH_RETURN_DATA_REGNO (i
);
4284 if (regno
== INVALID_REGNUM
)
4287 /* Only for the first load.
4288 merge_sp_adjust_with_load holds the register load
4289 with which we will merge the sp adjustment. */
4290 if (merge_sp_adjust_with_load
== 0
4292 && VAL_14_BITS_P (-actual_fsize
))
4293 merge_sp_adjust_with_load
= regno
;
4295 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4296 offset
+= UNITS_PER_WORD
;
4300 for (i
= 18; i
>= 3; i
--)
4302 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4304 /* Only for the first load.
4305 merge_sp_adjust_with_load holds the register load
4306 with which we will merge the sp adjustment. */
4307 if (merge_sp_adjust_with_load
== 0
4309 && VAL_14_BITS_P (-actual_fsize
))
4310 merge_sp_adjust_with_load
= i
;
4312 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4313 offset
+= UNITS_PER_WORD
;
4318 /* Align pointer properly (doubleword boundary). */
4319 offset
= (offset
+ 7) & ~7;
4321 /* FP register restores. */
4324 /* Adjust the register to index off of. */
4325 if (frame_pointer_needed
)
4326 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4328 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4330 /* Actually do the restores now. */
4331 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4332 if (df_regs_ever_live_p (i
)
4333 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4335 rtx src
= gen_rtx_MEM (DFmode
,
4336 gen_rtx_POST_INC (word_mode
, tmpreg
));
4337 rtx dest
= gen_rtx_REG (DFmode
, i
);
4338 emit_move_insn (dest
, src
);
4342 /* Emit a blockage insn here to keep these insns from being moved to
4343 an earlier spot in the epilogue, or into the main instruction stream.
4345 This is necessary as we must not cut the stack back before all the
4346 restores are finished. */
4347 emit_insn (gen_blockage ());
4349 /* Reset stack pointer (and possibly frame pointer). The stack
4350 pointer is initially set to fp + 64 to avoid a race condition. */
4351 if (frame_pointer_needed
)
4353 rtx delta
= GEN_INT (-64);
4355 set_reg_plus_d (STACK_POINTER_REGNUM
, HARD_FRAME_POINTER_REGNUM
, 64, 0);
4356 emit_insn (gen_pre_load (hard_frame_pointer_rtx
,
4357 stack_pointer_rtx
, delta
));
4359 /* If we were deferring a callee register restore, do it now. */
4360 else if (merge_sp_adjust_with_load
)
4362 rtx delta
= GEN_INT (-actual_fsize
);
4363 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4365 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4367 else if (actual_fsize
!= 0)
4368 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4371 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4372 frame greater than 8k), do so now. */
4374 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4376 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4378 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4380 emit_insn (gen_blockage ());
4381 emit_insn (TARGET_64BIT
4382 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4383 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4388 pa_can_use_return_insn (void)
4390 if (!reload_completed
)
4393 if (frame_pointer_needed
)
4396 if (df_regs_ever_live_p (2))
4402 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4406 hppa_pic_save_rtx (void)
4408 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4411 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4412 #define NO_DEFERRED_PROFILE_COUNTERS 0
4416 /* Vector of funcdef numbers. */
4417 static vec
<int> funcdef_nos
;
4419 /* Output deferred profile counters. */
4421 output_deferred_profile_counters (void)
4426 if (funcdef_nos
.is_empty ())
4429 switch_to_section (data_section
);
4430 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4431 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4433 for (i
= 0; funcdef_nos
.iterate (i
, &n
); i
++)
4435 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4436 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4439 funcdef_nos
.release ();
4443 hppa_profile_hook (int label_no
)
4445 /* We use SImode for the address of the function in both 32 and
4446 64-bit code to avoid having to provide DImode versions of the
4447 lcla2 and load_offset_label_address insn patterns. */
4448 rtx reg
= gen_reg_rtx (SImode
);
4449 rtx_code_label
*label_rtx
= gen_label_rtx ();
4450 rtx begin_label_rtx
, call_insn
;
4451 char begin_label_name
[16];
4453 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4455 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4458 emit_move_insn (arg_pointer_rtx
,
4459 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4462 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4464 /* The address of the function is loaded into %r25 with an instruction-
4465 relative sequence that avoids the use of relocations. The sequence
4466 is split so that the load_offset_label_address instruction can
4467 occupy the delay slot of the call to _mcount. */
4469 emit_insn (gen_lcla2 (reg
, label_rtx
));
4471 emit_insn (gen_lcla1 (reg
, label_rtx
));
4473 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4474 reg
, begin_label_rtx
, label_rtx
));
4476 #if !NO_DEFERRED_PROFILE_COUNTERS
4478 rtx count_label_rtx
, addr
, r24
;
4479 char count_label_name
[16];
4481 funcdef_nos
.safe_push (label_no
);
4482 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4483 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4485 addr
= force_reg (Pmode
, count_label_rtx
);
4486 r24
= gen_rtx_REG (Pmode
, 24);
4487 emit_move_insn (r24
, addr
);
4490 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4491 gen_rtx_SYMBOL_REF (Pmode
,
4493 GEN_INT (TARGET_64BIT
? 24 : 12)));
4495 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4500 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4501 gen_rtx_SYMBOL_REF (Pmode
,
4503 GEN_INT (TARGET_64BIT
? 16 : 8)));
4507 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4508 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4510 /* Indicate the _mcount call cannot throw, nor will it execute a
4512 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4515 /* Fetch the return address for the frame COUNT steps up from
4516 the current frame, after the prologue. FRAMEADDR is the
4517 frame pointer of the COUNT frame.
4519 We want to ignore any export stub remnants here. To handle this,
4520 we examine the code at the return address, and if it is an export
4521 stub, we return a memory rtx for the stub return address stored
4524 The value returned is used in two different ways:
4526 1. To find a function's caller.
4528 2. To change the return address for a function.
4530 This function handles most instances of case 1; however, it will
4531 fail if there are two levels of stubs to execute on the return
4532 path. The only way I believe that can happen is if the return value
4533 needs a parameter relocation, which never happens for C code.
4535 This function handles most instances of case 2; however, it will
4536 fail if we did not originally have stub code on the return path
4537 but will need stub code on the new return path. This can happen if
4538 the caller & callee are both in the main program, but the new
4539 return location is in a shared library. */
4542 pa_return_addr_rtx (int count
, rtx frameaddr
)
4549 /* The instruction stream at the return address of a PA1.X export stub is:
4551 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4552 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4553 0x00011820 | stub+16: mtsp r1,sr0
4554 0xe0400002 | stub+20: be,n 0(sr0,rp)
4556 0xe0400002 must be specified as -532676606 so that it won't be
4557 rejected as an invalid immediate operand on 64-bit hosts.
4559 The instruction stream at the return address of a PA2.0 export stub is:
4561 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4562 0xe840d002 | stub+12: bve,n (rp)
4565 HOST_WIDE_INT insns
[4];
4571 rp
= get_hard_reg_initial_val (Pmode
, 2);
4573 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4576 /* If there is no export stub then just use the value saved from
4577 the return pointer register. */
4579 saved_rp
= gen_reg_rtx (Pmode
);
4580 emit_move_insn (saved_rp
, rp
);
4582 /* Get pointer to the instruction stream. We have to mask out the
4583 privilege level from the two low order bits of the return address
4584 pointer here so that ins will point to the start of the first
4585 instruction that would have been executed if we returned. */
4586 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4587 label
= gen_label_rtx ();
4591 insns
[0] = 0x4bc23fd1;
4592 insns
[1] = -398405630;
4597 insns
[0] = 0x4bc23fd1;
4598 insns
[1] = 0x004010a1;
4599 insns
[2] = 0x00011820;
4600 insns
[3] = -532676606;
4604 /* Check the instruction stream at the normal return address for the
4605 export stub. If it is an export stub, than our return address is
4606 really in -24[frameaddr]. */
4608 for (i
= 0; i
< len
; i
++)
4610 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, ins
, i
* 4));
4611 rtx op1
= GEN_INT (insns
[i
]);
4612 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4615 /* Here we know that our return address points to an export
4616 stub. We don't want to return the address of the export stub,
4617 but rather the return address of the export stub. That return
4618 address is stored at -24[frameaddr]. */
4620 emit_move_insn (saved_rp
,
4622 memory_address (Pmode
,
4623 plus_constant (Pmode
, frameaddr
,
4632 pa_emit_bcond_fp (rtx operands
[])
4634 enum rtx_code code
= GET_CODE (operands
[0]);
4635 rtx operand0
= operands
[1];
4636 rtx operand1
= operands
[2];
4637 rtx label
= operands
[3];
4639 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCFPmode
, 0),
4640 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4642 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
4643 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4646 gen_rtx_REG (CCFPmode
, 0),
4648 gen_rtx_LABEL_REF (VOIDmode
, label
),
4653 /* Adjust the cost of a scheduling dependency. Return the new cost of
4654 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4657 pa_adjust_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep_insn
, int cost
)
4659 enum attr_type attr_type
;
4661 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4662 true dependencies as they are described with bypasses now. */
4663 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4666 if (! recog_memoized (insn
))
4669 attr_type
= get_attr_type (insn
);
4671 switch (REG_NOTE_KIND (link
))
4674 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4677 if (attr_type
== TYPE_FPLOAD
)
4679 rtx pat
= PATTERN (insn
);
4680 rtx dep_pat
= PATTERN (dep_insn
);
4681 if (GET_CODE (pat
) == PARALLEL
)
4683 /* This happens for the fldXs,mb patterns. */
4684 pat
= XVECEXP (pat
, 0, 0);
4686 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4687 /* If this happens, we have to extend this to schedule
4688 optimally. Return 0 for now. */
4691 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4693 if (! recog_memoized (dep_insn
))
4695 switch (get_attr_type (dep_insn
))
4702 case TYPE_FPSQRTSGL
:
4703 case TYPE_FPSQRTDBL
:
4704 /* A fpload can't be issued until one cycle before a
4705 preceding arithmetic operation has finished if
4706 the target of the fpload is any of the sources
4707 (or destination) of the arithmetic operation. */
4708 return insn_default_latency (dep_insn
) - 1;
4715 else if (attr_type
== TYPE_FPALU
)
4717 rtx pat
= PATTERN (insn
);
4718 rtx dep_pat
= PATTERN (dep_insn
);
4719 if (GET_CODE (pat
) == PARALLEL
)
4721 /* This happens for the fldXs,mb patterns. */
4722 pat
= XVECEXP (pat
, 0, 0);
4724 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4725 /* If this happens, we have to extend this to schedule
4726 optimally. Return 0 for now. */
4729 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4731 if (! recog_memoized (dep_insn
))
4733 switch (get_attr_type (dep_insn
))
4737 case TYPE_FPSQRTSGL
:
4738 case TYPE_FPSQRTDBL
:
4739 /* An ALU flop can't be issued until two cycles before a
4740 preceding divide or sqrt operation has finished if
4741 the target of the ALU flop is any of the sources
4742 (or destination) of the divide or sqrt operation. */
4743 return insn_default_latency (dep_insn
) - 2;
4751 /* For other anti dependencies, the cost is 0. */
4754 case REG_DEP_OUTPUT
:
4755 /* Output dependency; DEP_INSN writes a register that INSN writes some
4757 if (attr_type
== TYPE_FPLOAD
)
4759 rtx pat
= PATTERN (insn
);
4760 rtx dep_pat
= PATTERN (dep_insn
);
4761 if (GET_CODE (pat
) == PARALLEL
)
4763 /* This happens for the fldXs,mb patterns. */
4764 pat
= XVECEXP (pat
, 0, 0);
4766 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4767 /* If this happens, we have to extend this to schedule
4768 optimally. Return 0 for now. */
4771 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4773 if (! recog_memoized (dep_insn
))
4775 switch (get_attr_type (dep_insn
))
4782 case TYPE_FPSQRTSGL
:
4783 case TYPE_FPSQRTDBL
:
4784 /* A fpload can't be issued until one cycle before a
4785 preceding arithmetic operation has finished if
4786 the target of the fpload is the destination of the
4787 arithmetic operation.
4789 Exception: For PA7100LC, PA7200 and PA7300, the cost
4790 is 3 cycles, unless they bundle together. We also
4791 pay the penalty if the second insn is a fpload. */
4792 return insn_default_latency (dep_insn
) - 1;
4799 else if (attr_type
== TYPE_FPALU
)
4801 rtx pat
= PATTERN (insn
);
4802 rtx dep_pat
= PATTERN (dep_insn
);
4803 if (GET_CODE (pat
) == PARALLEL
)
4805 /* This happens for the fldXs,mb patterns. */
4806 pat
= XVECEXP (pat
, 0, 0);
4808 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4809 /* If this happens, we have to extend this to schedule
4810 optimally. Return 0 for now. */
4813 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4815 if (! recog_memoized (dep_insn
))
4817 switch (get_attr_type (dep_insn
))
4821 case TYPE_FPSQRTSGL
:
4822 case TYPE_FPSQRTDBL
:
4823 /* An ALU flop can't be issued until two cycles before a
4824 preceding divide or sqrt operation has finished if
4825 the target of the ALU flop is also the target of
4826 the divide or sqrt operation. */
4827 return insn_default_latency (dep_insn
) - 2;
4835 /* For other output dependencies, the cost is 0. */
4843 /* Adjust scheduling priorities. We use this to try and keep addil
4844 and the next use of %r1 close together. */
4846 pa_adjust_priority (rtx_insn
*insn
, int priority
)
4848 rtx set
= single_set (insn
);
4852 src
= SET_SRC (set
);
4853 dest
= SET_DEST (set
);
4854 if (GET_CODE (src
) == LO_SUM
4855 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4856 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4859 else if (GET_CODE (src
) == MEM
4860 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4861 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4862 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4865 else if (GET_CODE (dest
) == MEM
4866 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4867 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4868 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4874 /* The 700 can only issue a single insn at a time.
4875 The 7XXX processors can issue two insns at a time.
4876 The 8000 can issue 4 insns at a time. */
4878 pa_issue_rate (void)
4882 case PROCESSOR_700
: return 1;
4883 case PROCESSOR_7100
: return 2;
4884 case PROCESSOR_7100LC
: return 2;
4885 case PROCESSOR_7200
: return 2;
4886 case PROCESSOR_7300
: return 2;
4887 case PROCESSOR_8000
: return 4;
4896 /* Return any length plus adjustment needed by INSN which already has
4897 its length computed as LENGTH. Return LENGTH if no adjustment is
4900 Also compute the length of an inline block move here as it is too
4901 complicated to express as a length attribute in pa.md. */
4903 pa_adjust_insn_length (rtx_insn
*insn
, int length
)
4905 rtx pat
= PATTERN (insn
);
4907 /* If length is negative or undefined, provide initial length. */
4908 if ((unsigned int) length
>= INT_MAX
)
4910 if (GET_CODE (pat
) == SEQUENCE
)
4911 insn
= as_a
<rtx_insn
*> (XVECEXP (pat
, 0, 0));
4913 switch (get_attr_type (insn
))
4916 length
= pa_attr_length_millicode_call (insn
);
4919 length
= pa_attr_length_call (insn
, 0);
4922 length
= pa_attr_length_call (insn
, 1);
4925 length
= pa_attr_length_indirect_call (insn
);
4927 case TYPE_SH_FUNC_ADRS
:
4928 length
= pa_attr_length_millicode_call (insn
) + 20;
4935 /* Block move pattern. */
4936 if (NONJUMP_INSN_P (insn
)
4937 && GET_CODE (pat
) == PARALLEL
4938 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4939 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4940 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4941 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4942 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4943 length
+= compute_movmem_length (insn
) - 4;
4944 /* Block clear pattern. */
4945 else if (NONJUMP_INSN_P (insn
)
4946 && GET_CODE (pat
) == PARALLEL
4947 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4948 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4949 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4950 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4951 length
+= compute_clrmem_length (insn
) - 4;
4952 /* Conditional branch with an unfilled delay slot. */
4953 else if (JUMP_P (insn
) && ! simplejump_p (insn
))
4955 /* Adjust a short backwards conditional with an unfilled delay slot. */
4956 if (GET_CODE (pat
) == SET
4958 && JUMP_LABEL (insn
) != NULL_RTX
4959 && ! forward_branch_p (insn
))
4961 else if (GET_CODE (pat
) == PARALLEL
4962 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4965 /* Adjust dbra insn with short backwards conditional branch with
4966 unfilled delay slot -- only for case where counter is in a
4967 general register register. */
4968 else if (GET_CODE (pat
) == PARALLEL
4969 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
4970 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
4971 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
4973 && ! forward_branch_p (insn
))
4979 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4982 pa_print_operand_punct_valid_p (unsigned char code
)
4993 /* Print operand X (an rtx) in assembler syntax to file FILE.
4994 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4995 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4998 pa_print_operand (FILE *file
, rtx x
, int code
)
5003 /* Output a 'nop' if there's nothing for the delay slot. */
5004 if (dbr_sequence_length () == 0)
5005 fputs ("\n\tnop", file
);
5008 /* Output a nullification completer if there's nothing for the */
5009 /* delay slot or nullification is requested. */
5010 if (dbr_sequence_length () == 0 ||
5012 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
5016 /* Print out the second register name of a register pair.
5017 I.e., R (6) => 7. */
5018 fputs (reg_names
[REGNO (x
) + 1], file
);
5021 /* A register or zero. */
5023 || (x
== CONST0_RTX (DFmode
))
5024 || (x
== CONST0_RTX (SFmode
)))
5026 fputs ("%r0", file
);
5032 /* A register or zero (floating point). */
5034 || (x
== CONST0_RTX (DFmode
))
5035 || (x
== CONST0_RTX (SFmode
)))
5037 fputs ("%fr0", file
);
5046 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
5047 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
5048 pa_output_global_address (file
, xoperands
[1], 0);
5049 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
5053 case 'C': /* Plain (C)ondition */
5055 switch (GET_CODE (x
))
5058 fputs ("=", file
); break;
5060 fputs ("<>", file
); break;
5062 fputs (">", file
); break;
5064 fputs (">=", file
); break;
5066 fputs (">>=", file
); break;
5068 fputs (">>", file
); break;
5070 fputs ("<", file
); break;
5072 fputs ("<=", file
); break;
5074 fputs ("<<=", file
); break;
5076 fputs ("<<", file
); break;
5081 case 'N': /* Condition, (N)egated */
5082 switch (GET_CODE (x
))
5085 fputs ("<>", file
); break;
5087 fputs ("=", file
); break;
5089 fputs ("<=", file
); break;
5091 fputs ("<", file
); break;
5093 fputs ("<<", file
); break;
5095 fputs ("<<=", file
); break;
5097 fputs (">=", file
); break;
5099 fputs (">", file
); break;
5101 fputs (">>", file
); break;
5103 fputs (">>=", file
); break;
5108 /* For floating point comparisons. Note that the output
5109 predicates are the complement of the desired mode. The
5110 conditions for GT, GE, LT, LE and LTGT cause an invalid
5111 operation exception if the result is unordered and this
5112 exception is enabled in the floating-point status register. */
5114 switch (GET_CODE (x
))
5117 fputs ("!=", file
); break;
5119 fputs ("=", file
); break;
5121 fputs ("!>", file
); break;
5123 fputs ("!>=", file
); break;
5125 fputs ("!<", file
); break;
5127 fputs ("!<=", file
); break;
5129 fputs ("!<>", file
); break;
5131 fputs ("!?<=", file
); break;
5133 fputs ("!?<", file
); break;
5135 fputs ("!?>=", file
); break;
5137 fputs ("!?>", file
); break;
5139 fputs ("!?=", file
); break;
5141 fputs ("!?", file
); break;
5143 fputs ("?", file
); break;
5148 case 'S': /* Condition, operands are (S)wapped. */
5149 switch (GET_CODE (x
))
5152 fputs ("=", file
); break;
5154 fputs ("<>", file
); break;
5156 fputs ("<", file
); break;
5158 fputs ("<=", file
); break;
5160 fputs ("<<=", file
); break;
5162 fputs ("<<", file
); break;
5164 fputs (">", file
); break;
5166 fputs (">=", file
); break;
5168 fputs (">>=", file
); break;
5170 fputs (">>", file
); break;
5175 case 'B': /* Condition, (B)oth swapped and negate. */
5176 switch (GET_CODE (x
))
5179 fputs ("<>", file
); break;
5181 fputs ("=", file
); break;
5183 fputs (">=", file
); break;
5185 fputs (">", file
); break;
5187 fputs (">>", file
); break;
5189 fputs (">>=", file
); break;
5191 fputs ("<=", file
); break;
5193 fputs ("<", file
); break;
5195 fputs ("<<", file
); break;
5197 fputs ("<<=", file
); break;
5203 gcc_assert (GET_CODE (x
) == CONST_INT
);
5204 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5207 gcc_assert (GET_CODE (x
) == CONST_INT
);
5208 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5211 gcc_assert (GET_CODE (x
) == CONST_INT
);
5212 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5215 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5216 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5219 gcc_assert (GET_CODE (x
) == CONST_INT
);
5220 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5223 gcc_assert (GET_CODE (x
) == CONST_INT
);
5224 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5227 if (GET_CODE (x
) == CONST_INT
)
5232 switch (GET_CODE (XEXP (x
, 0)))
5236 if (ASSEMBLER_DIALECT
== 0)
5237 fputs ("s,mb", file
);
5239 fputs (",mb", file
);
5243 if (ASSEMBLER_DIALECT
== 0)
5244 fputs ("s,ma", file
);
5246 fputs (",ma", file
);
5249 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5250 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5252 if (ASSEMBLER_DIALECT
== 0)
5255 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5256 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5258 if (ASSEMBLER_DIALECT
== 0)
5259 fputs ("x,s", file
);
5263 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5267 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5273 pa_output_global_address (file
, x
, 0);
5276 pa_output_global_address (file
, x
, 1);
5278 case 0: /* Don't do anything special */
5283 compute_zdepwi_operands (INTVAL (x
), op
);
5284 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5290 compute_zdepdi_operands (INTVAL (x
), op
);
5291 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5295 /* We can get here from a .vtable_inherit due to our
5296 CONSTANT_ADDRESS_P rejecting perfectly good constant
5302 if (GET_CODE (x
) == REG
)
5304 fputs (reg_names
[REGNO (x
)], file
);
5305 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5311 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5312 && (REGNO (x
) & 1) == 0)
5315 else if (GET_CODE (x
) == MEM
)
5317 int size
= GET_MODE_SIZE (GET_MODE (x
));
5318 rtx base
= NULL_RTX
;
5319 switch (GET_CODE (XEXP (x
, 0)))
5323 base
= XEXP (XEXP (x
, 0), 0);
5324 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5328 base
= XEXP (XEXP (x
, 0), 0);
5329 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5332 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5333 fprintf (file
, "%s(%s)",
5334 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5335 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5336 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5337 fprintf (file
, "%s(%s)",
5338 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5339 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5340 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5341 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5343 /* Because the REG_POINTER flag can get lost during reload,
5344 pa_legitimate_address_p canonicalizes the order of the
5345 index and base registers in the combined move patterns. */
5346 rtx base
= XEXP (XEXP (x
, 0), 1);
5347 rtx index
= XEXP (XEXP (x
, 0), 0);
5349 fprintf (file
, "%s(%s)",
5350 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5353 output_address (XEXP (x
, 0));
5356 output_address (XEXP (x
, 0));
5361 output_addr_const (file
, x
);
5364 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5367 pa_output_global_address (FILE *file
, rtx x
, int round_constant
)
5370 /* Imagine (high (const (plus ...))). */
5371 if (GET_CODE (x
) == HIGH
)
5374 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5375 output_addr_const (file
, x
);
5376 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5378 output_addr_const (file
, x
);
5379 fputs ("-$global$", file
);
5381 else if (GET_CODE (x
) == CONST
)
5383 const char *sep
= "";
5384 int offset
= 0; /* assembler wants -$global$ at end */
5385 rtx base
= NULL_RTX
;
5387 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5390 base
= XEXP (XEXP (x
, 0), 0);
5391 output_addr_const (file
, base
);
5394 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5400 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5403 base
= XEXP (XEXP (x
, 0), 1);
5404 output_addr_const (file
, base
);
5407 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5413 /* How bogus. The compiler is apparently responsible for
5414 rounding the constant if it uses an LR field selector.
5416 The linker and/or assembler seem a better place since
5417 they have to do this kind of thing already.
5419 If we fail to do this, HP's optimizing linker may eliminate
5420 an addil, but not update the ldw/stw/ldo instruction that
5421 uses the result of the addil. */
5423 offset
= ((offset
+ 0x1000) & ~0x1fff);
5425 switch (GET_CODE (XEXP (x
, 0)))
5438 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5446 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5447 fputs ("-$global$", file
);
5449 fprintf (file
, "%s%d", sep
, offset
);
5452 output_addr_const (file
, x
);
5455 /* Output boilerplate text to appear at the beginning of the file.
5456 There are several possible versions. */
5457 #define aputs(x) fputs(x, asm_out_file)
5459 pa_file_start_level (void)
5462 aputs ("\t.LEVEL 2.0w\n");
5463 else if (TARGET_PA_20
)
5464 aputs ("\t.LEVEL 2.0\n");
5465 else if (TARGET_PA_11
)
5466 aputs ("\t.LEVEL 1.1\n");
5468 aputs ("\t.LEVEL 1.0\n");
5472 pa_file_start_space (int sortspace
)
5474 aputs ("\t.SPACE $PRIVATE$");
5477 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5479 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5480 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5481 "\n\t.SPACE $TEXT$");
5484 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5485 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5489 pa_file_start_file (int want_version
)
5491 if (write_symbols
!= NO_DEBUG
)
5493 output_file_directive (asm_out_file
, main_input_filename
);
5495 aputs ("\t.version\t\"01.01\"\n");
5500 pa_file_start_mcount (const char *aswhat
)
5503 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5507 pa_elf_file_start (void)
5509 pa_file_start_level ();
5510 pa_file_start_mcount ("ENTRY");
5511 pa_file_start_file (0);
5515 pa_som_file_start (void)
5517 pa_file_start_level ();
5518 pa_file_start_space (0);
5519 aputs ("\t.IMPORT $global$,DATA\n"
5520 "\t.IMPORT $$dyncall,MILLICODE\n");
5521 pa_file_start_mcount ("CODE");
5522 pa_file_start_file (0);
5526 pa_linux_file_start (void)
5528 pa_file_start_file (1);
5529 pa_file_start_level ();
5530 pa_file_start_mcount ("CODE");
5534 pa_hpux64_gas_file_start (void)
5536 pa_file_start_level ();
5537 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5539 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5541 pa_file_start_file (1);
5545 pa_hpux64_hpas_file_start (void)
5547 pa_file_start_level ();
5548 pa_file_start_space (1);
5549 pa_file_start_mcount ("CODE");
5550 pa_file_start_file (0);
5554 /* Search the deferred plabel list for SYMBOL and return its internal
5555 label. If an entry for SYMBOL is not found, a new entry is created. */
5558 pa_get_deferred_plabel (rtx symbol
)
5560 const char *fname
= XSTR (symbol
, 0);
5563 /* See if we have already put this function on the list of deferred
5564 plabels. This list is generally small, so a liner search is not
5565 too ugly. If it proves too slow replace it with something faster. */
5566 for (i
= 0; i
< n_deferred_plabels
; i
++)
5567 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5570 /* If the deferred plabel list is empty, or this entry was not found
5571 on the list, create a new entry on the list. */
5572 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5576 if (deferred_plabels
== 0)
5577 deferred_plabels
= ggc_alloc
<deferred_plabel
> ();
5579 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5581 n_deferred_plabels
+ 1);
5583 i
= n_deferred_plabels
++;
5584 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5585 deferred_plabels
[i
].symbol
= symbol
;
5587 /* Gross. We have just implicitly taken the address of this
5588 function. Mark it in the same manner as assemble_name. */
5589 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5591 mark_referenced (id
);
5594 return deferred_plabels
[i
].internal_label
;
5598 output_deferred_plabels (void)
5602 /* If we have some deferred plabels, then we need to switch into the
5603 data or readonly data section, and align it to a 4 byte boundary
5604 before outputting the deferred plabels. */
5605 if (n_deferred_plabels
)
5607 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5608 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5611 /* Now output the deferred plabels. */
5612 for (i
= 0; i
< n_deferred_plabels
; i
++)
5614 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5615 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5616 assemble_integer (deferred_plabels
[i
].symbol
,
5617 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5621 /* Initialize optabs to point to emulation routines. */
5624 pa_init_libfuncs (void)
5626 if (HPUX_LONG_DOUBLE_LIBRARY
)
5628 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5629 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5630 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5631 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5632 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5633 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5634 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5635 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5636 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5638 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5639 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5640 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5641 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5642 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5643 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5644 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5646 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5647 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5648 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5649 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5651 set_conv_libfunc (sfix_optab
, SImode
, TFmode
,
5652 TARGET_64BIT
? "__U_Qfcnvfxt_quad_to_sgl"
5653 : "_U_Qfcnvfxt_quad_to_sgl");
5654 set_conv_libfunc (sfix_optab
, DImode
, TFmode
,
5655 "_U_Qfcnvfxt_quad_to_dbl");
5656 set_conv_libfunc (ufix_optab
, SImode
, TFmode
,
5657 "_U_Qfcnvfxt_quad_to_usgl");
5658 set_conv_libfunc (ufix_optab
, DImode
, TFmode
,
5659 "_U_Qfcnvfxt_quad_to_udbl");
5661 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
,
5662 "_U_Qfcnvxf_sgl_to_quad");
5663 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
,
5664 "_U_Qfcnvxf_dbl_to_quad");
5665 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
,
5666 "_U_Qfcnvxf_usgl_to_quad");
5667 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
,
5668 "_U_Qfcnvxf_udbl_to_quad");
5671 if (TARGET_SYNC_LIBCALL
)
5672 init_sync_libfuncs (UNITS_PER_WORD
);
5675 /* HP's millicode routines mean something special to the assembler.
5676 Keep track of which ones we have used. */
5678 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5679 static void import_milli (enum millicodes
);
5680 static char imported
[(int) end1000
];
5681 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5682 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5683 #define MILLI_START 10
5686 import_milli (enum millicodes code
)
5688 char str
[sizeof (import_string
)];
5690 if (!imported
[(int) code
])
5692 imported
[(int) code
] = 1;
5693 strcpy (str
, import_string
);
5694 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5695 output_asm_insn (str
, 0);
5699 /* The register constraints have put the operands and return value in
5700 the proper registers. */
5703 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx_insn
*insn
)
5705 import_milli (mulI
);
5706 return pa_output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5709 /* Emit the rtl for doing a division by a constant. */
5711 /* Do magic division millicodes exist for this value? */
5712 const int pa_magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5714 /* We'll use an array to keep track of the magic millicodes and
5715 whether or not we've used them already. [n][0] is signed, [n][1] is
5718 static int div_milli
[16][2];
5721 pa_emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5723 if (GET_CODE (operands
[2]) == CONST_INT
5724 && INTVAL (operands
[2]) > 0
5725 && INTVAL (operands
[2]) < 16
5726 && pa_magic_milli
[INTVAL (operands
[2])])
5728 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5730 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5734 gen_rtvec (6, gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, 29),
5735 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5737 gen_rtx_REG (SImode
, 26),
5739 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5740 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5741 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5742 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5743 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5744 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5751 pa_output_div_insn (rtx
*operands
, int unsignedp
, rtx_insn
*insn
)
5755 /* If the divisor is a constant, try to use one of the special
5757 if (GET_CODE (operands
[0]) == CONST_INT
)
5759 static char buf
[100];
5760 divisor
= INTVAL (operands
[0]);
5761 if (!div_milli
[divisor
][unsignedp
])
5763 div_milli
[divisor
][unsignedp
] = 1;
5765 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5767 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5771 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5772 INTVAL (operands
[0]));
5773 return pa_output_millicode_call (insn
,
5774 gen_rtx_SYMBOL_REF (SImode
, buf
));
5778 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5779 INTVAL (operands
[0]));
5780 return pa_output_millicode_call (insn
,
5781 gen_rtx_SYMBOL_REF (SImode
, buf
));
5784 /* Divisor isn't a special constant. */
5789 import_milli (divU
);
5790 return pa_output_millicode_call (insn
,
5791 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5795 import_milli (divI
);
5796 return pa_output_millicode_call (insn
,
5797 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5802 /* Output a $$rem millicode to do mod. */
5805 pa_output_mod_insn (int unsignedp
, rtx_insn
*insn
)
5809 import_milli (remU
);
5810 return pa_output_millicode_call (insn
,
5811 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5815 import_milli (remI
);
5816 return pa_output_millicode_call (insn
,
5817 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5822 pa_output_arg_descriptor (rtx call_insn
)
5824 const char *arg_regs
[4];
5825 enum machine_mode arg_mode
;
5827 int i
, output_flag
= 0;
5830 /* We neither need nor want argument location descriptors for the
5831 64bit runtime environment or the ELF32 environment. */
5832 if (TARGET_64BIT
|| TARGET_ELF32
)
5835 for (i
= 0; i
< 4; i
++)
5838 /* Specify explicitly that no argument relocations should take place
5839 if using the portable runtime calling conventions. */
5840 if (TARGET_PORTABLE_RUNTIME
)
5842 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5847 gcc_assert (CALL_P (call_insn
));
5848 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5849 link
; link
= XEXP (link
, 1))
5851 rtx use
= XEXP (link
, 0);
5853 if (! (GET_CODE (use
) == USE
5854 && GET_CODE (XEXP (use
, 0)) == REG
5855 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5858 arg_mode
= GET_MODE (XEXP (use
, 0));
5859 regno
= REGNO (XEXP (use
, 0));
5860 if (regno
>= 23 && regno
<= 26)
5862 arg_regs
[26 - regno
] = "GR";
5863 if (arg_mode
== DImode
)
5864 arg_regs
[25 - regno
] = "GR";
5866 else if (regno
>= 32 && regno
<= 39)
5868 if (arg_mode
== SFmode
)
5869 arg_regs
[(regno
- 32) / 2] = "FR";
5872 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5873 arg_regs
[(regno
- 34) / 2] = "FR";
5874 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5876 arg_regs
[(regno
- 34) / 2] = "FU";
5877 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5882 fputs ("\t.CALL ", asm_out_file
);
5883 for (i
= 0; i
< 4; i
++)
5888 fputc (',', asm_out_file
);
5889 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5892 fputc ('\n', asm_out_file
);
5895 /* Inform reload about cases where moving X with a mode MODE to or from
5896 a register in RCLASS requires an extra scratch or immediate register.
5897 Return the class needed for the immediate register. */
5900 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
5901 enum machine_mode mode
, secondary_reload_info
*sri
)
5904 enum reg_class rclass
= (enum reg_class
) rclass_i
;
5906 /* Handle the easy stuff first. */
5907 if (rclass
== R1_REGS
)
5913 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
5919 /* If we have something like (mem (mem (...)), we can safely assume the
5920 inner MEM will end up in a general register after reloading, so there's
5921 no need for a secondary reload. */
5922 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
5925 /* Trying to load a constant into a FP register during PIC code
5926 generation requires %r1 as a scratch register. For float modes,
5927 the only legitimate constant is CONST0_RTX. However, there are
5928 a few patterns that accept constant double operands. */
5930 && FP_REG_CLASS_P (rclass
)
5931 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
5936 sri
->icode
= CODE_FOR_reload_insi_r1
;
5940 sri
->icode
= CODE_FOR_reload_indi_r1
;
5944 sri
->icode
= CODE_FOR_reload_insf_r1
;
5948 sri
->icode
= CODE_FOR_reload_indf_r1
;
5957 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5958 register when we're generating PIC code or when the operand isn't
5960 if (pa_symbolic_expression_p (x
))
5962 if (GET_CODE (x
) == HIGH
)
5965 if (flag_pic
|| !read_only_operand (x
, VOIDmode
))
5970 sri
->icode
= CODE_FOR_reload_insi_r1
;
5974 sri
->icode
= CODE_FOR_reload_indi_r1
;
5984 /* Profiling showed the PA port spends about 1.3% of its compilation
5985 time in true_regnum from calls inside pa_secondary_reload_class. */
5986 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
5987 regno
= true_regnum (x
);
5989 /* Handle reloads for floating point loads and stores. */
5990 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
5991 && FP_REG_CLASS_P (rclass
))
5997 /* We don't need an intermediate for indexed and LO_SUM DLT
5998 memory addresses. When INT14_OK_STRICT is true, it might
5999 appear that we could directly allow register indirect
6000 memory addresses. However, this doesn't work because we
6001 don't support SUBREGs in floating-point register copies
6002 and reload doesn't tell us when it's going to use a SUBREG. */
6003 if (IS_INDEX_ADDR_P (x
)
6004 || IS_LO_SUM_DLT_ADDR_P (x
))
6007 /* Request intermediate general register. */
6008 return GENERAL_REGS
;
6011 /* Request a secondary reload with a general scratch register
6012 for everything else. ??? Could symbolic operands be handled
6013 directly when generating non-pic PA 2.0 code? */
6015 ? direct_optab_handler (reload_in_optab
, mode
)
6016 : direct_optab_handler (reload_out_optab
, mode
));
6020 /* A SAR<->FP register copy requires an intermediate general register
6021 and secondary memory. We need a secondary reload with a general
6022 scratch register for spills. */
6023 if (rclass
== SHIFT_REGS
)
6026 if (regno
>= FIRST_PSEUDO_REGISTER
|| regno
< 0)
6029 ? direct_optab_handler (reload_in_optab
, mode
)
6030 : direct_optab_handler (reload_out_optab
, mode
));
6034 /* Handle FP copy. */
6035 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno
)))
6036 return GENERAL_REGS
;
6039 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
6040 && REGNO_REG_CLASS (regno
) == SHIFT_REGS
6041 && FP_REG_CLASS_P (rclass
))
6042 return GENERAL_REGS
;
6047 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6048 is only marked as live on entry by df-scan when it is a fixed
6049 register. It isn't a fixed register in the 64-bit runtime,
6050 so we need to mark it here. */
6053 pa_extra_live_on_entry (bitmap regs
)
6056 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
6059 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6060 to prevent it from being deleted. */
6063 pa_eh_return_handler_rtx (void)
6067 tmp
= gen_rtx_PLUS (word_mode
, hard_frame_pointer_rtx
,
6068 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
6069 tmp
= gen_rtx_MEM (word_mode
, tmp
);
6074 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6075 by invisible reference. As a GCC extension, we also pass anything
6076 with a zero or variable size by reference.
6078 The 64-bit runtime does not describe passing any types by invisible
6079 reference. The internals of GCC can't currently handle passing
6080 empty structures, and zero or variable length arrays when they are
6081 not passed entirely on the stack or by reference. Thus, as a GCC
6082 extension, we pass these types by reference. The HP compiler doesn't
6083 support these types, so hopefully there shouldn't be any compatibility
6084 issues. This may have to be revisited when HP releases a C99 compiler
6085 or updates the ABI. */
6088 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
6089 enum machine_mode mode
, const_tree type
,
6090 bool named ATTRIBUTE_UNUSED
)
6095 size
= int_size_in_bytes (type
);
6097 size
= GET_MODE_SIZE (mode
);
6102 return size
<= 0 || size
> 8;
6106 pa_function_arg_padding (enum machine_mode mode
, const_tree type
)
6111 && (AGGREGATE_TYPE_P (type
)
6112 || TREE_CODE (type
) == COMPLEX_TYPE
6113 || TREE_CODE (type
) == VECTOR_TYPE
)))
6115 /* Return none if justification is not required. */
6117 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
6118 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
6121 /* The directions set here are ignored when a BLKmode argument larger
6122 than a word is placed in a register. Different code is used for
6123 the stack and registers. This makes it difficult to have a
6124 consistent data representation for both the stack and registers.
6125 For both runtimes, the justification and padding for arguments on
6126 the stack and in registers should be identical. */
6128 /* The 64-bit runtime specifies left justification for aggregates. */
6131 /* The 32-bit runtime architecture specifies right justification.
6132 When the argument is passed on the stack, the argument is padded
6133 with garbage on the left. The HP compiler pads with zeros. */
6137 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
6144 /* Do what is necessary for `va_start'. We look at the current function
6145 to determine if stdargs or varargs is used and fill in an initial
6146 va_list. A pointer to this constructor is returned. */
6149 hppa_builtin_saveregs (void)
6152 tree fntype
= TREE_TYPE (current_function_decl
);
6153 int argadj
= ((!stdarg_p (fntype
))
6154 ? UNITS_PER_WORD
: 0);
6157 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
6159 offset
= crtl
->args
.arg_offset_rtx
;
6165 /* Adjust for varargs/stdarg differences. */
6167 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, -argadj
);
6169 offset
= crtl
->args
.arg_offset_rtx
;
6171 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6172 from the incoming arg pointer and growing to larger addresses. */
6173 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
6174 emit_move_insn (gen_rtx_MEM (word_mode
,
6175 plus_constant (Pmode
,
6176 arg_pointer_rtx
, off
)),
6177 gen_rtx_REG (word_mode
, i
));
6179 /* The incoming args pointer points just beyond the flushback area;
6180 normally this is not a serious concern. However, when we are doing
6181 varargs/stdargs we want to make the arg pointer point to the start
6182 of the incoming argument area. */
6183 emit_move_insn (virtual_incoming_args_rtx
,
6184 plus_constant (Pmode
, arg_pointer_rtx
, -64));
6186 /* Now return a pointer to the first anonymous argument. */
6187 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6188 virtual_incoming_args_rtx
,
6189 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6192 /* Store general registers on the stack. */
6193 dest
= gen_rtx_MEM (BLKmode
,
6194 plus_constant (Pmode
, crtl
->args
.internal_arg_pointer
,
6196 set_mem_alias_set (dest
, get_varargs_alias_set ());
6197 set_mem_align (dest
, BITS_PER_WORD
);
6198 move_block_from_reg (23, dest
, 4);
6200 /* move_block_from_reg will emit code to store the argument registers
6201 individually as scalar stores.
6203 However, other insns may later load from the same addresses for
6204 a structure load (passing a struct to a varargs routine).
6206 The alias code assumes that such aliasing can never happen, so we
6207 have to keep memory referencing insns from moving up beyond the
6208 last argument register store. So we emit a blockage insn here. */
6209 emit_insn (gen_blockage ());
6211 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6212 crtl
->args
.internal_arg_pointer
,
6213 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6217 hppa_va_start (tree valist
, rtx nextarg
)
6219 nextarg
= expand_builtin_saveregs ();
6220 std_expand_builtin_va_start (valist
, nextarg
);
6224 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6229 /* Args grow upward. We can use the generic routines. */
6230 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6232 else /* !TARGET_64BIT */
6234 tree ptr
= build_pointer_type (type
);
6237 unsigned int size
, ofs
;
6240 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6244 ptr
= build_pointer_type (type
);
6246 size
= int_size_in_bytes (type
);
6247 valist_type
= TREE_TYPE (valist
);
6249 /* Args grow down. Not handled by generic routines. */
6251 u
= fold_convert (sizetype
, size_in_bytes (type
));
6252 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6253 t
= fold_build_pointer_plus (valist
, u
);
6255 /* Align to 4 or 8 byte boundary depending on argument size. */
6257 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6258 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6259 t
= fold_convert (valist_type
, t
);
6261 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6263 ofs
= (8 - size
) % 4;
6265 t
= fold_build_pointer_plus_hwi (t
, ofs
);
6267 t
= fold_convert (ptr
, t
);
6268 t
= build_va_arg_indirect_ref (t
);
6271 t
= build_va_arg_indirect_ref (t
);
6277 /* True if MODE is valid for the target. By "valid", we mean able to
6278 be manipulated in non-trivial ways. In particular, this means all
6279 the arithmetic is supported.
6281 Currently, TImode is not valid as the HP 64-bit runtime documentation
6282 doesn't document the alignment and calling conventions for this type.
6283 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6284 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6287 pa_scalar_mode_supported_p (enum machine_mode mode
)
6289 int precision
= GET_MODE_PRECISION (mode
);
6291 switch (GET_MODE_CLASS (mode
))
6293 case MODE_PARTIAL_INT
:
6295 if (precision
== CHAR_TYPE_SIZE
)
6297 if (precision
== SHORT_TYPE_SIZE
)
6299 if (precision
== INT_TYPE_SIZE
)
6301 if (precision
== LONG_TYPE_SIZE
)
6303 if (precision
== LONG_LONG_TYPE_SIZE
)
6308 if (precision
== FLOAT_TYPE_SIZE
)
6310 if (precision
== DOUBLE_TYPE_SIZE
)
6312 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6316 case MODE_DECIMAL_FLOAT
:
6324 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6325 it branches into the delay slot. Otherwise, return FALSE. */
6328 branch_to_delay_slot_p (rtx insn
)
6332 if (dbr_sequence_length ())
6335 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6338 insn
= next_active_insn (insn
);
6339 if (jump_insn
== insn
)
6342 /* We can't rely on the length of asms. So, we return FALSE when
6343 the branch is followed by an asm. */
6345 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6346 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
6347 || get_attr_length (insn
) > 0)
6354 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6356 This occurs when INSN has an unfilled delay slot and is followed
6357 by an asm. Disaster can occur if the asm is empty and the jump
6358 branches into the delay slot. So, we add a nop in the delay slot
6359 when this occurs. */
6362 branch_needs_nop_p (rtx insn
)
6366 if (dbr_sequence_length ())
6369 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6372 insn
= next_active_insn (insn
);
6373 if (!insn
|| jump_insn
== insn
)
6376 if (!(GET_CODE (PATTERN (insn
)) == ASM_INPUT
6377 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6378 && get_attr_length (insn
) > 0)
6385 /* Return TRUE if INSN, a forward jump insn, can use nullification
6386 to skip the following instruction. This avoids an extra cycle due
6387 to a mis-predicted branch when we fall through. */
6390 use_skip_p (rtx insn
)
6392 rtx jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6396 insn
= next_active_insn (insn
);
6398 /* We can't rely on the length of asms, so we can't skip asms. */
6400 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6401 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6403 if (get_attr_length (insn
) == 4
6404 && jump_insn
== next_active_insn (insn
))
6406 if (get_attr_length (insn
) > 0)
6413 /* This routine handles all the normal conditional branch sequences we
6414 might need to generate. It handles compare immediate vs compare
6415 register, nullification of delay slots, varying length branches,
6416 negated branches, and all combinations of the above. It returns the
6417 output appropriate to emit the branch corresponding to all given
6421 pa_output_cbranch (rtx
*operands
, int negated
, rtx_insn
*insn
)
6423 static char buf
[100];
6425 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6426 int length
= get_attr_length (insn
);
6429 /* A conditional branch to the following instruction (e.g. the delay slot)
6430 is asking for a disaster. This can happen when not optimizing and
6431 when jump optimization fails.
6433 While it is usually safe to emit nothing, this can fail if the
6434 preceding instruction is a nullified branch with an empty delay
6435 slot and the same branch target as this branch. We could check
6436 for this but jump optimization should eliminate nop jumps. It
6437 is always safe to emit a nop. */
6438 if (branch_to_delay_slot_p (insn
))
6441 /* The doubleword form of the cmpib instruction doesn't have the LEU
6442 and GTU conditions while the cmpb instruction does. Since we accept
6443 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6444 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6445 operands
[2] = gen_rtx_REG (DImode
, 0);
6446 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6447 operands
[1] = gen_rtx_REG (DImode
, 0);
6449 /* If this is a long branch with its delay slot unfilled, set `nullify'
6450 as it can nullify the delay slot and save a nop. */
6451 if (length
== 8 && dbr_sequence_length () == 0)
6454 /* If this is a short forward conditional branch which did not get
6455 its delay slot filled, the delay slot can still be nullified. */
6456 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6457 nullify
= forward_branch_p (insn
);
6459 /* A forward branch over a single nullified insn can be done with a
6460 comclr instruction. This avoids a single cycle penalty due to
6461 mis-predicted branch if we fall through (branch not taken). */
6462 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6466 /* All short conditional branches except backwards with an unfilled
6470 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6472 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6473 if (GET_MODE (operands
[1]) == DImode
)
6476 strcat (buf
, "%B3");
6478 strcat (buf
, "%S3");
6480 strcat (buf
, " %2,%r1,%%r0");
6483 if (branch_needs_nop_p (insn
))
6484 strcat (buf
, ",n %2,%r1,%0%#");
6486 strcat (buf
, ",n %2,%r1,%0");
6489 strcat (buf
, " %2,%r1,%0");
6492 /* All long conditionals. Note a short backward branch with an
6493 unfilled delay slot is treated just like a long backward branch
6494 with an unfilled delay slot. */
6496 /* Handle weird backwards branch with a filled delay slot
6497 which is nullified. */
6498 if (dbr_sequence_length () != 0
6499 && ! forward_branch_p (insn
)
6502 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6503 if (GET_MODE (operands
[1]) == DImode
)
6506 strcat (buf
, "%S3");
6508 strcat (buf
, "%B3");
6509 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6511 /* Handle short backwards branch with an unfilled delay slot.
6512 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6513 taken and untaken branches. */
6514 else if (dbr_sequence_length () == 0
6515 && ! forward_branch_p (insn
)
6516 && INSN_ADDRESSES_SET_P ()
6517 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6518 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6520 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6521 if (GET_MODE (operands
[1]) == DImode
)
6524 strcat (buf
, "%B3 %2,%r1,%0%#");
6526 strcat (buf
, "%S3 %2,%r1,%0%#");
6530 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6531 if (GET_MODE (operands
[1]) == DImode
)
6534 strcat (buf
, "%S3");
6536 strcat (buf
, "%B3");
6538 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6540 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6545 /* The reversed conditional branch must branch over one additional
6546 instruction if the delay slot is filled and needs to be extracted
6547 by pa_output_lbranch. If the delay slot is empty or this is a
6548 nullified forward branch, the instruction after the reversed
6549 condition branch must be nullified. */
6550 if (dbr_sequence_length () == 0
6551 || (nullify
&& forward_branch_p (insn
)))
6555 operands
[4] = GEN_INT (length
);
6560 operands
[4] = GEN_INT (length
+ 4);
6563 /* Create a reversed conditional branch which branches around
6564 the following insns. */
6565 if (GET_MODE (operands
[1]) != DImode
)
6571 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6574 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6580 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6583 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6592 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6595 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6601 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6604 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6608 output_asm_insn (buf
, operands
);
6609 return pa_output_lbranch (operands
[0], insn
, xdelay
);
6614 /* This routine handles output of long unconditional branches that
6615 exceed the maximum range of a simple branch instruction. Since
6616 we don't have a register available for the branch, we save register
6617 %r1 in the frame marker, load the branch destination DEST into %r1,
6618 execute the branch, and restore %r1 in the delay slot of the branch.
6620 Since long branches may have an insn in the delay slot and the
6621 delay slot is used to restore %r1, we in general need to extract
6622 this insn and execute it before the branch. However, to facilitate
6623 use of this function by conditional branches, we also provide an
6624 option to not extract the delay insn so that it will be emitted
6625 after the long branch. So, if there is an insn in the delay slot,
6626 it is extracted if XDELAY is nonzero.
6628 The lengths of the various long-branch sequences are 20, 16 and 24
6629 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6632 pa_output_lbranch (rtx dest
, rtx_insn
*insn
, int xdelay
)
6636 xoperands
[0] = dest
;
6638 /* First, free up the delay slot. */
6639 if (xdelay
&& dbr_sequence_length () != 0)
6641 /* We can't handle a jump in the delay slot. */
6642 gcc_assert (! JUMP_P (NEXT_INSN (insn
)));
6644 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6647 /* Now delete the delay insn. */
6648 SET_INSN_DELETED (NEXT_INSN (insn
));
6651 /* Output an insn to save %r1. The runtime documentation doesn't
6652 specify whether the "Clean Up" slot in the callers frame can
6653 be clobbered by the callee. It isn't copied by HP's builtin
6654 alloca, so this suggests that it can be clobbered if necessary.
6655 The "Static Link" location is copied by HP builtin alloca, so
6656 we avoid using it. Using the cleanup slot might be a problem
6657 if we have to interoperate with languages that pass cleanup
6658 information. However, it should be possible to handle these
6659 situations with GCC's asm feature.
6661 The "Current RP" slot is reserved for the called procedure, so
6662 we try to use it when we don't have a frame of our own. It's
6663 rather unlikely that we won't have a frame when we need to emit
6666 Really the way to go long term is a register scavenger; goto
6667 the target of the jump and find a register which we can use
6668 as a scratch to hold the value in %r1. Then, we wouldn't have
6669 to free up the delay slot or clobber a slot that may be needed
6670 for other purposes. */
6673 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6674 /* Use the return pointer slot in the frame marker. */
6675 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6677 /* Use the slot at -40 in the frame marker since HP builtin
6678 alloca doesn't copy it. */
6679 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6683 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6684 /* Use the return pointer slot in the frame marker. */
6685 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6687 /* Use the "Clean Up" slot in the frame marker. In GCC,
6688 the only other use of this location is for copying a
6689 floating point double argument from a floating-point
6690 register to two general registers. The copy is done
6691 as an "atomic" operation when outputting a call, so it
6692 won't interfere with our using the location here. */
6693 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6696 if (TARGET_PORTABLE_RUNTIME
)
6698 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6699 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6700 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6704 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6705 if (TARGET_SOM
|| !TARGET_GAS
)
6707 xoperands
[1] = gen_label_rtx ();
6708 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6709 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6710 CODE_LABEL_NUMBER (xoperands
[1]));
6711 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6715 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6716 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6718 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6721 /* Now output a very long branch to the original target. */
6722 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6724 /* Now restore the value of %r1 in the delay slot. */
6727 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6728 return "ldd -16(%%r30),%%r1";
6730 return "ldd -40(%%r30),%%r1";
6734 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6735 return "ldw -20(%%r30),%%r1";
6737 return "ldw -12(%%r30),%%r1";
6741 /* This routine handles all the branch-on-bit conditional branch sequences we
6742 might need to generate. It handles nullification of delay slots,
6743 varying length branches, negated branches and all combinations of the
6744 above. it returns the appropriate output template to emit the branch. */
6747 pa_output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx_insn
*insn
, int which
)
6749 static char buf
[100];
6751 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6752 int length
= get_attr_length (insn
);
6755 /* A conditional branch to the following instruction (e.g. the delay slot) is
6756 asking for a disaster. I do not think this can happen as this pattern
6757 is only used when optimizing; jump optimization should eliminate the
6758 jump. But be prepared just in case. */
6760 if (branch_to_delay_slot_p (insn
))
6763 /* If this is a long branch with its delay slot unfilled, set `nullify'
6764 as it can nullify the delay slot and save a nop. */
6765 if (length
== 8 && dbr_sequence_length () == 0)
6768 /* If this is a short forward conditional branch which did not get
6769 its delay slot filled, the delay slot can still be nullified. */
6770 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6771 nullify
= forward_branch_p (insn
);
6773 /* A forward branch over a single nullified insn can be done with a
6774 extrs instruction. This avoids a single cycle penalty due to
6775 mis-predicted branch if we fall through (branch not taken). */
6776 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6781 /* All short conditional branches except backwards with an unfilled
6785 strcpy (buf
, "{extrs,|extrw,s,}");
6787 strcpy (buf
, "bb,");
6788 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6789 strcpy (buf
, "extrd,s,*");
6790 else if (GET_MODE (operands
[0]) == DImode
)
6791 strcpy (buf
, "bb,*");
6792 if ((which
== 0 && negated
)
6793 || (which
== 1 && ! negated
))
6798 strcat (buf
, " %0,%1,1,%%r0");
6799 else if (nullify
&& negated
)
6801 if (branch_needs_nop_p (insn
))
6802 strcat (buf
, ",n %0,%1,%3%#");
6804 strcat (buf
, ",n %0,%1,%3");
6806 else if (nullify
&& ! negated
)
6808 if (branch_needs_nop_p (insn
))
6809 strcat (buf
, ",n %0,%1,%2%#");
6811 strcat (buf
, ",n %0,%1,%2");
6813 else if (! nullify
&& negated
)
6814 strcat (buf
, " %0,%1,%3");
6815 else if (! nullify
&& ! negated
)
6816 strcat (buf
, " %0,%1,%2");
6819 /* All long conditionals. Note a short backward branch with an
6820 unfilled delay slot is treated just like a long backward branch
6821 with an unfilled delay slot. */
6823 /* Handle weird backwards branch with a filled delay slot
6824 which is nullified. */
6825 if (dbr_sequence_length () != 0
6826 && ! forward_branch_p (insn
)
6829 strcpy (buf
, "bb,");
6830 if (GET_MODE (operands
[0]) == DImode
)
6832 if ((which
== 0 && negated
)
6833 || (which
== 1 && ! negated
))
6838 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6840 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6842 /* Handle short backwards branch with an unfilled delay slot.
6843 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6844 taken and untaken branches. */
6845 else if (dbr_sequence_length () == 0
6846 && ! forward_branch_p (insn
)
6847 && INSN_ADDRESSES_SET_P ()
6848 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6849 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6851 strcpy (buf
, "bb,");
6852 if (GET_MODE (operands
[0]) == DImode
)
6854 if ((which
== 0 && negated
)
6855 || (which
== 1 && ! negated
))
6860 strcat (buf
, " %0,%1,%3%#");
6862 strcat (buf
, " %0,%1,%2%#");
6866 if (GET_MODE (operands
[0]) == DImode
)
6867 strcpy (buf
, "extrd,s,*");
6869 strcpy (buf
, "{extrs,|extrw,s,}");
6870 if ((which
== 0 && negated
)
6871 || (which
== 1 && ! negated
))
6875 if (nullify
&& negated
)
6876 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6877 else if (nullify
&& ! negated
)
6878 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6880 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6882 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6887 /* The reversed conditional branch must branch over one additional
6888 instruction if the delay slot is filled and needs to be extracted
6889 by pa_output_lbranch. If the delay slot is empty or this is a
6890 nullified forward branch, the instruction after the reversed
6891 condition branch must be nullified. */
6892 if (dbr_sequence_length () == 0
6893 || (nullify
&& forward_branch_p (insn
)))
6897 operands
[4] = GEN_INT (length
);
6902 operands
[4] = GEN_INT (length
+ 4);
6905 if (GET_MODE (operands
[0]) == DImode
)
6906 strcpy (buf
, "bb,*");
6908 strcpy (buf
, "bb,");
6909 if ((which
== 0 && negated
)
6910 || (which
== 1 && !negated
))
6915 strcat (buf
, ",n %0,%1,.+%4");
6917 strcat (buf
, " %0,%1,.+%4");
6918 output_asm_insn (buf
, operands
);
6919 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
6925 /* This routine handles all the branch-on-variable-bit conditional branch
6926 sequences we might need to generate. It handles nullification of delay
6927 slots, varying length branches, negated branches and all combinations
6928 of the above. it returns the appropriate output template to emit the
6932 pa_output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx_insn
*insn
,
6935 static char buf
[100];
6937 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6938 int length
= get_attr_length (insn
);
6941 /* A conditional branch to the following instruction (e.g. the delay slot) is
6942 asking for a disaster. I do not think this can happen as this pattern
6943 is only used when optimizing; jump optimization should eliminate the
6944 jump. But be prepared just in case. */
6946 if (branch_to_delay_slot_p (insn
))
6949 /* If this is a long branch with its delay slot unfilled, set `nullify'
6950 as it can nullify the delay slot and save a nop. */
6951 if (length
== 8 && dbr_sequence_length () == 0)
6954 /* If this is a short forward conditional branch which did not get
6955 its delay slot filled, the delay slot can still be nullified. */
6956 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6957 nullify
= forward_branch_p (insn
);
6959 /* A forward branch over a single nullified insn can be done with a
6960 extrs instruction. This avoids a single cycle penalty due to
6961 mis-predicted branch if we fall through (branch not taken). */
6962 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6967 /* All short conditional branches except backwards with an unfilled
6971 strcpy (buf
, "{vextrs,|extrw,s,}");
6973 strcpy (buf
, "{bvb,|bb,}");
6974 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6975 strcpy (buf
, "extrd,s,*");
6976 else if (GET_MODE (operands
[0]) == DImode
)
6977 strcpy (buf
, "bb,*");
6978 if ((which
== 0 && negated
)
6979 || (which
== 1 && ! negated
))
6984 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6985 else if (nullify
&& negated
)
6987 if (branch_needs_nop_p (insn
))
6988 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6990 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
6992 else if (nullify
&& ! negated
)
6994 if (branch_needs_nop_p (insn
))
6995 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6997 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
6999 else if (! nullify
&& negated
)
7000 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
7001 else if (! nullify
&& ! negated
)
7002 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
7005 /* All long conditionals. Note a short backward branch with an
7006 unfilled delay slot is treated just like a long backward branch
7007 with an unfilled delay slot. */
7009 /* Handle weird backwards branch with a filled delay slot
7010 which is nullified. */
7011 if (dbr_sequence_length () != 0
7012 && ! forward_branch_p (insn
)
7015 strcpy (buf
, "{bvb,|bb,}");
7016 if (GET_MODE (operands
[0]) == DImode
)
7018 if ((which
== 0 && negated
)
7019 || (which
== 1 && ! negated
))
7024 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7026 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7028 /* Handle short backwards branch with an unfilled delay slot.
7029 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7030 taken and untaken branches. */
7031 else if (dbr_sequence_length () == 0
7032 && ! forward_branch_p (insn
)
7033 && INSN_ADDRESSES_SET_P ()
7034 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7035 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7037 strcpy (buf
, "{bvb,|bb,}");
7038 if (GET_MODE (operands
[0]) == DImode
)
7040 if ((which
== 0 && negated
)
7041 || (which
== 1 && ! negated
))
7046 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
7048 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
7052 strcpy (buf
, "{vextrs,|extrw,s,}");
7053 if (GET_MODE (operands
[0]) == DImode
)
7054 strcpy (buf
, "extrd,s,*");
7055 if ((which
== 0 && negated
)
7056 || (which
== 1 && ! negated
))
7060 if (nullify
&& negated
)
7061 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7062 else if (nullify
&& ! negated
)
7063 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7065 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7067 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7072 /* The reversed conditional branch must branch over one additional
7073 instruction if the delay slot is filled and needs to be extracted
7074 by pa_output_lbranch. If the delay slot is empty or this is a
7075 nullified forward branch, the instruction after the reversed
7076 condition branch must be nullified. */
7077 if (dbr_sequence_length () == 0
7078 || (nullify
&& forward_branch_p (insn
)))
7082 operands
[4] = GEN_INT (length
);
7087 operands
[4] = GEN_INT (length
+ 4);
7090 if (GET_MODE (operands
[0]) == DImode
)
7091 strcpy (buf
, "bb,*");
7093 strcpy (buf
, "{bvb,|bb,}");
7094 if ((which
== 0 && negated
)
7095 || (which
== 1 && !negated
))
7100 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
7102 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
7103 output_asm_insn (buf
, operands
);
7104 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
7110 /* Return the output template for emitting a dbra type insn.
7112 Note it may perform some output operations on its own before
7113 returning the final output string. */
7115 pa_output_dbra (rtx
*operands
, rtx_insn
*insn
, int which_alternative
)
7117 int length
= get_attr_length (insn
);
7119 /* A conditional branch to the following instruction (e.g. the delay slot) is
7120 asking for a disaster. Be prepared! */
7122 if (branch_to_delay_slot_p (insn
))
7124 if (which_alternative
== 0)
7125 return "ldo %1(%0),%0";
7126 else if (which_alternative
== 1)
7128 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
7129 output_asm_insn ("ldw -16(%%r30),%4", operands
);
7130 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7131 return "{fldws|fldw} -16(%%r30),%0";
7135 output_asm_insn ("ldw %0,%4", operands
);
7136 return "ldo %1(%4),%4\n\tstw %4,%0";
7140 if (which_alternative
== 0)
7142 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7145 /* If this is a long branch with its delay slot unfilled, set `nullify'
7146 as it can nullify the delay slot and save a nop. */
7147 if (length
== 8 && dbr_sequence_length () == 0)
7150 /* If this is a short forward conditional branch which did not get
7151 its delay slot filled, the delay slot can still be nullified. */
7152 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7153 nullify
= forward_branch_p (insn
);
7160 if (branch_needs_nop_p (insn
))
7161 return "addib,%C2,n %1,%0,%3%#";
7163 return "addib,%C2,n %1,%0,%3";
7166 return "addib,%C2 %1,%0,%3";
7169 /* Handle weird backwards branch with a fulled delay slot
7170 which is nullified. */
7171 if (dbr_sequence_length () != 0
7172 && ! forward_branch_p (insn
)
7174 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7175 /* Handle short backwards branch with an unfilled delay slot.
7176 Using a addb;nop rather than addi;bl saves 1 cycle for both
7177 taken and untaken branches. */
7178 else if (dbr_sequence_length () == 0
7179 && ! forward_branch_p (insn
)
7180 && INSN_ADDRESSES_SET_P ()
7181 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7182 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7183 return "addib,%C2 %1,%0,%3%#";
7185 /* Handle normal cases. */
7187 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7189 return "addi,%N2 %1,%0,%0\n\tb %3";
7192 /* The reversed conditional branch must branch over one additional
7193 instruction if the delay slot is filled and needs to be extracted
7194 by pa_output_lbranch. If the delay slot is empty or this is a
7195 nullified forward branch, the instruction after the reversed
7196 condition branch must be nullified. */
7197 if (dbr_sequence_length () == 0
7198 || (nullify
&& forward_branch_p (insn
)))
7202 operands
[4] = GEN_INT (length
);
7207 operands
[4] = GEN_INT (length
+ 4);
7211 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
7213 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
7215 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7219 /* Deal with gross reload from FP register case. */
7220 else if (which_alternative
== 1)
7222 /* Move loop counter from FP register to MEM then into a GR,
7223 increment the GR, store the GR into MEM, and finally reload
7224 the FP register from MEM from within the branch's delay slot. */
7225 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7227 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7229 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7230 else if (length
== 28)
7231 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7234 operands
[5] = GEN_INT (length
- 16);
7235 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
7236 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7237 return pa_output_lbranch (operands
[3], insn
, 0);
7240 /* Deal with gross reload from memory case. */
7243 /* Reload loop counter from memory, the store back to memory
7244 happens in the branch's delay slot. */
7245 output_asm_insn ("ldw %0,%4", operands
);
7247 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7248 else if (length
== 16)
7249 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7252 operands
[5] = GEN_INT (length
- 4);
7253 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7254 return pa_output_lbranch (operands
[3], insn
, 0);
7259 /* Return the output template for emitting a movb type insn.
7261 Note it may perform some output operations on its own before
7262 returning the final output string. */
7264 pa_output_movb (rtx
*operands
, rtx_insn
*insn
, int which_alternative
,
7265 int reverse_comparison
)
7267 int length
= get_attr_length (insn
);
7269 /* A conditional branch to the following instruction (e.g. the delay slot) is
7270 asking for a disaster. Be prepared! */
7272 if (branch_to_delay_slot_p (insn
))
7274 if (which_alternative
== 0)
7275 return "copy %1,%0";
7276 else if (which_alternative
== 1)
7278 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7279 return "{fldws|fldw} -16(%%r30),%0";
7281 else if (which_alternative
== 2)
7287 /* Support the second variant. */
7288 if (reverse_comparison
)
7289 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7291 if (which_alternative
== 0)
7293 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7296 /* If this is a long branch with its delay slot unfilled, set `nullify'
7297 as it can nullify the delay slot and save a nop. */
7298 if (length
== 8 && dbr_sequence_length () == 0)
7301 /* If this is a short forward conditional branch which did not get
7302 its delay slot filled, the delay slot can still be nullified. */
7303 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7304 nullify
= forward_branch_p (insn
);
7311 if (branch_needs_nop_p (insn
))
7312 return "movb,%C2,n %1,%0,%3%#";
7314 return "movb,%C2,n %1,%0,%3";
7317 return "movb,%C2 %1,%0,%3";
7320 /* Handle weird backwards branch with a filled delay slot
7321 which is nullified. */
7322 if (dbr_sequence_length () != 0
7323 && ! forward_branch_p (insn
)
7325 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7327 /* Handle short backwards branch with an unfilled delay slot.
7328 Using a movb;nop rather than or;bl saves 1 cycle for both
7329 taken and untaken branches. */
7330 else if (dbr_sequence_length () == 0
7331 && ! forward_branch_p (insn
)
7332 && INSN_ADDRESSES_SET_P ()
7333 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7334 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7335 return "movb,%C2 %1,%0,%3%#";
7336 /* Handle normal cases. */
7338 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7340 return "or,%N2 %1,%%r0,%0\n\tb %3";
7343 /* The reversed conditional branch must branch over one additional
7344 instruction if the delay slot is filled and needs to be extracted
7345 by pa_output_lbranch. If the delay slot is empty or this is a
7346 nullified forward branch, the instruction after the reversed
7347 condition branch must be nullified. */
7348 if (dbr_sequence_length () == 0
7349 || (nullify
&& forward_branch_p (insn
)))
7353 operands
[4] = GEN_INT (length
);
7358 operands
[4] = GEN_INT (length
+ 4);
7362 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7364 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7366 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7369 /* Deal with gross reload for FP destination register case. */
7370 else if (which_alternative
== 1)
7372 /* Move source register to MEM, perform the branch test, then
7373 finally load the FP register from MEM from within the branch's
7375 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7377 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7378 else if (length
== 16)
7379 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7382 operands
[4] = GEN_INT (length
- 4);
7383 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7384 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7385 return pa_output_lbranch (operands
[3], insn
, 0);
7388 /* Deal with gross reload from memory case. */
7389 else if (which_alternative
== 2)
7391 /* Reload loop counter from memory, the store back to memory
7392 happens in the branch's delay slot. */
7394 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7395 else if (length
== 12)
7396 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7399 operands
[4] = GEN_INT (length
);
7400 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7402 return pa_output_lbranch (operands
[3], insn
, 0);
7405 /* Handle SAR as a destination. */
7409 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7410 else if (length
== 12)
7411 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7414 operands
[4] = GEN_INT (length
);
7415 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7417 return pa_output_lbranch (operands
[3], insn
, 0);
7422 /* Copy any FP arguments in INSN into integer registers. */
7424 copy_fp_args (rtx insn
)
7429 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7431 int arg_mode
, regno
;
7432 rtx use
= XEXP (link
, 0);
7434 if (! (GET_CODE (use
) == USE
7435 && GET_CODE (XEXP (use
, 0)) == REG
7436 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7439 arg_mode
= GET_MODE (XEXP (use
, 0));
7440 regno
= REGNO (XEXP (use
, 0));
7442 /* Is it a floating point register? */
7443 if (regno
>= 32 && regno
<= 39)
7445 /* Copy the FP register into an integer register via memory. */
7446 if (arg_mode
== SFmode
)
7448 xoperands
[0] = XEXP (use
, 0);
7449 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7450 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7451 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7455 xoperands
[0] = XEXP (use
, 0);
7456 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7457 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7458 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7459 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7465 /* Compute length of the FP argument copy sequence for INSN. */
7467 length_fp_args (rtx insn
)
7472 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7474 int arg_mode
, regno
;
7475 rtx use
= XEXP (link
, 0);
7477 if (! (GET_CODE (use
) == USE
7478 && GET_CODE (XEXP (use
, 0)) == REG
7479 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7482 arg_mode
= GET_MODE (XEXP (use
, 0));
7483 regno
= REGNO (XEXP (use
, 0));
7485 /* Is it a floating point register? */
7486 if (regno
>= 32 && regno
<= 39)
7488 if (arg_mode
== SFmode
)
7498 /* Return the attribute length for the millicode call instruction INSN.
7499 The length must match the code generated by pa_output_millicode_call.
7500 We include the delay slot in the returned length as it is better to
7501 over estimate the length than to under estimate it. */
7504 pa_attr_length_millicode_call (rtx_insn
*insn
)
7506 unsigned long distance
= -1;
7507 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7509 if (INSN_ADDRESSES_SET_P ())
7511 distance
= (total
+ insn_current_reference_address (insn
));
7512 if (distance
< total
)
7518 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7523 else if (TARGET_PORTABLE_RUNTIME
)
7527 if (!TARGET_LONG_CALLS
&& distance
< MAX_PCREL17F_OFFSET
)
7537 /* INSN is a function call. It may have an unconditional jump
7540 CALL_DEST is the routine we are calling. */
7543 pa_output_millicode_call (rtx_insn
*insn
, rtx call_dest
)
7545 int attr_length
= get_attr_length (insn
);
7546 int seq_length
= dbr_sequence_length ();
7551 xoperands
[0] = call_dest
;
7552 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7554 /* Handle the common case where we are sure that the branch will
7555 reach the beginning of the $CODE$ subspace. The within reach
7556 form of the $$sh_func_adrs call has a length of 28. Because it
7557 has an attribute type of sh_func_adrs, it never has a nonzero
7558 sequence length (i.e., the delay slot is never filled). */
7559 if (!TARGET_LONG_CALLS
7560 && (attr_length
== 8
7561 || (attr_length
== 28
7562 && get_attr_type (insn
) == TYPE_SH_FUNC_ADRS
)))
7564 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
7570 /* It might seem that one insn could be saved by accessing
7571 the millicode function using the linkage table. However,
7572 this doesn't work in shared libraries and other dynamically
7573 loaded objects. Using a pc-relative sequence also avoids
7574 problems related to the implicit use of the gp register. */
7575 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7579 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7580 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7584 xoperands
[1] = gen_label_rtx ();
7585 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7586 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7587 CODE_LABEL_NUMBER (xoperands
[1]));
7588 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7591 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7593 else if (TARGET_PORTABLE_RUNTIME
)
7595 /* Pure portable runtime doesn't allow be/ble; we also don't
7596 have PIC support in the assembler/linker, so this sequence
7599 /* Get the address of our target into %r1. */
7600 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7601 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7603 /* Get our return address into %r31. */
7604 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7605 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7607 /* Jump to our target address in %r1. */
7608 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7612 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7614 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7616 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7620 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7621 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
7623 if (TARGET_SOM
|| !TARGET_GAS
)
7625 /* The HP assembler can generate relocations for the
7626 difference of two symbols. GAS can do this for a
7627 millicode symbol but not an arbitrary external
7628 symbol when generating SOM output. */
7629 xoperands
[1] = gen_label_rtx ();
7630 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7631 CODE_LABEL_NUMBER (xoperands
[1]));
7632 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7633 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7637 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
7638 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7642 /* Jump to our target address in %r1. */
7643 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7647 if (seq_length
== 0)
7648 output_asm_insn ("nop", xoperands
);
7650 /* We are done if there isn't a jump in the delay slot. */
7651 if (seq_length
== 0 || ! JUMP_P (NEXT_INSN (insn
)))
7654 /* This call has an unconditional jump in its delay slot. */
7655 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7657 /* See if the return address can be adjusted. Use the containing
7658 sequence insn's address. */
7659 if (INSN_ADDRESSES_SET_P ())
7661 seq_insn
= NEXT_INSN (PREV_INSN (final_sequence
->insn (0)));
7662 distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7663 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7665 if (VAL_14_BITS_P (distance
))
7667 xoperands
[1] = gen_label_rtx ();
7668 output_asm_insn ("ldo %0-%1(%2),%2", xoperands
);
7669 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7670 CODE_LABEL_NUMBER (xoperands
[1]));
7673 /* ??? This branch may not reach its target. */
7674 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7677 /* ??? This branch may not reach its target. */
7678 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7680 /* Delete the jump. */
7681 SET_INSN_DELETED (NEXT_INSN (insn
));
7686 /* Return the attribute length of the call instruction INSN. The SIBCALL
7687 flag indicates whether INSN is a regular call or a sibling call. The
7688 length returned must be longer than the code actually generated by
7689 pa_output_call. Since branch shortening is done before delay branch
7690 sequencing, there is no way to determine whether or not the delay
7691 slot will be filled during branch shortening. Even when the delay
7692 slot is filled, we may have to add a nop if the delay slot contains
7693 a branch that can't reach its target. Thus, we always have to include
7694 the delay slot in the length estimate. This used to be done in
7695 pa_adjust_insn_length but we do it here now as some sequences always
7696 fill the delay slot and we can save four bytes in the estimate for
7700 pa_attr_length_call (rtx_insn
*insn
, int sibcall
)
7703 rtx call
, call_dest
;
7706 rtx pat
= PATTERN (insn
);
7707 unsigned long distance
= -1;
7709 gcc_assert (CALL_P (insn
));
7711 if (INSN_ADDRESSES_SET_P ())
7713 unsigned long total
;
7715 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7716 distance
= (total
+ insn_current_reference_address (insn
));
7717 if (distance
< total
)
7721 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7723 /* Get the call rtx. */
7724 call
= XVECEXP (pat
, 0, 0);
7725 if (GET_CODE (call
) == SET
)
7726 call
= SET_SRC (call
);
7728 gcc_assert (GET_CODE (call
) == CALL
);
7730 /* Determine if this is a local call. */
7731 call_dest
= XEXP (XEXP (call
, 0), 0);
7732 call_decl
= SYMBOL_REF_DECL (call_dest
);
7733 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7735 /* pc-relative branch. */
7736 if (!TARGET_LONG_CALLS
7737 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7738 || distance
< MAX_PCREL17F_OFFSET
))
7741 /* 64-bit plabel sequence. */
7742 else if (TARGET_64BIT
&& !local_call
)
7743 length
+= sibcall
? 28 : 24;
7745 /* non-pic long absolute branch sequence. */
7746 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7749 /* long pc-relative branch sequence. */
7750 else if (TARGET_LONG_PIC_SDIFF_CALL
7751 || (TARGET_GAS
&& !TARGET_SOM
7752 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7756 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7760 /* 32-bit plabel sequence. */
7766 length
+= length_fp_args (insn
);
7776 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7784 /* INSN is a function call. It may have an unconditional jump
7787 CALL_DEST is the routine we are calling. */
7790 pa_output_call (rtx_insn
*insn
, rtx call_dest
, int sibcall
)
7792 int delay_insn_deleted
= 0;
7793 int delay_slot_filled
= 0;
7794 int seq_length
= dbr_sequence_length ();
7795 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7796 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7799 xoperands
[0] = call_dest
;
7801 /* Handle the common case where we're sure that the branch will reach
7802 the beginning of the "$CODE$" subspace. This is the beginning of
7803 the current function if we are in a named section. */
7804 if (!TARGET_LONG_CALLS
&& pa_attr_length_call (insn
, sibcall
) == 8)
7806 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7807 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7811 if (TARGET_64BIT
&& !local_call
)
7813 /* ??? As far as I can tell, the HP linker doesn't support the
7814 long pc-relative sequence described in the 64-bit runtime
7815 architecture. So, we use a slightly longer indirect call. */
7816 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7817 xoperands
[1] = gen_label_rtx ();
7819 /* If this isn't a sibcall, we put the load of %r27 into the
7820 delay slot. We can't do this in a sibcall as we don't
7821 have a second call-clobbered scratch register available. */
7823 && ! JUMP_P (NEXT_INSN (insn
))
7826 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7829 /* Now delete the delay insn. */
7830 SET_INSN_DELETED (NEXT_INSN (insn
));
7831 delay_insn_deleted
= 1;
7834 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7835 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7836 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7840 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7841 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7842 output_asm_insn ("bve (%%r1)", xoperands
);
7846 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7847 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7848 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7849 delay_slot_filled
= 1;
7854 int indirect_call
= 0;
7856 /* Emit a long call. There are several different sequences
7857 of increasing length and complexity. In most cases,
7858 they don't allow an instruction in the delay slot. */
7859 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7860 && !TARGET_LONG_PIC_SDIFF_CALL
7861 && !(TARGET_GAS
&& !TARGET_SOM
7862 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7867 && ! JUMP_P (NEXT_INSN (insn
))
7871 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
7873 /* A non-jump insn in the delay slot. By definition we can
7874 emit this insn before the call (and in fact before argument
7876 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7879 /* Now delete the delay insn. */
7880 SET_INSN_DELETED (NEXT_INSN (insn
));
7881 delay_insn_deleted
= 1;
7884 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7886 /* This is the best sequence for making long calls in
7887 non-pic code. Unfortunately, GNU ld doesn't provide
7888 the stub needed for external calls, and GAS's support
7889 for this with the SOM linker is buggy. It is safe
7890 to use this for local calls. */
7891 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7893 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7897 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7900 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7902 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7903 delay_slot_filled
= 1;
7908 if (TARGET_LONG_PIC_SDIFF_CALL
)
7910 /* The HP assembler and linker can handle relocations
7911 for the difference of two symbols. The HP assembler
7912 recognizes the sequence as a pc-relative call and
7913 the linker provides stubs when needed. */
7914 xoperands
[1] = gen_label_rtx ();
7915 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7916 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7917 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7918 CODE_LABEL_NUMBER (xoperands
[1]));
7919 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7921 else if (TARGET_GAS
&& !TARGET_SOM
7922 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7924 /* GAS currently can't generate the relocations that
7925 are needed for the SOM linker under HP-UX using this
7926 sequence. The GNU linker doesn't generate the stubs
7927 that are needed for external calls on TARGET_ELF32
7928 with this sequence. For now, we have to use a
7929 longer plabel sequence when using GAS. */
7930 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7931 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7933 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7938 /* Emit a long plabel-based call sequence. This is
7939 essentially an inline implementation of $$dyncall.
7940 We don't actually try to call $$dyncall as this is
7941 as difficult as calling the function itself. */
7942 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7943 xoperands
[1] = gen_label_rtx ();
7945 /* Since the call is indirect, FP arguments in registers
7946 need to be copied to the general registers. Then, the
7947 argument relocation stub will copy them back. */
7949 copy_fp_args (insn
);
7953 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7954 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7955 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7959 output_asm_insn ("addil LR'%0-$global$,%%r27",
7961 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7965 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7966 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7967 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7968 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7970 if (!sibcall
&& !TARGET_PA_20
)
7972 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7973 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7974 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7976 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7983 output_asm_insn ("bve (%%r1)", xoperands
);
7988 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7989 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7990 delay_slot_filled
= 1;
7993 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7998 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7999 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8004 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8005 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
8007 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
8011 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
8012 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
8014 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
8017 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
8019 output_asm_insn ("copy %%r31,%%r2", xoperands
);
8020 delay_slot_filled
= 1;
8027 if (!delay_slot_filled
&& (seq_length
== 0 || delay_insn_deleted
))
8028 output_asm_insn ("nop", xoperands
);
8030 /* We are done if there isn't a jump in the delay slot. */
8032 || delay_insn_deleted
8033 || ! JUMP_P (NEXT_INSN (insn
)))
8036 /* A sibcall should never have a branch in the delay slot. */
8037 gcc_assert (!sibcall
);
8039 /* This call has an unconditional jump in its delay slot. */
8040 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
8042 if (!delay_slot_filled
&& INSN_ADDRESSES_SET_P ())
8044 /* See if the return address can be adjusted. Use the containing
8045 sequence insn's address. This would break the regular call/return@
8046 relationship assumed by the table based eh unwinder, so only do that
8047 if the call is not possibly throwing. */
8048 rtx seq_insn
= NEXT_INSN (PREV_INSN (final_sequence
->insn (0)));
8049 int distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
8050 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
8052 if (VAL_14_BITS_P (distance
)
8053 && !(can_throw_internal (insn
) || can_throw_external (insn
)))
8055 xoperands
[1] = gen_label_rtx ();
8056 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands
);
8057 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8058 CODE_LABEL_NUMBER (xoperands
[1]));
8061 output_asm_insn ("nop\n\tb,n %0", xoperands
);
8064 output_asm_insn ("b,n %0", xoperands
);
8066 /* Delete the jump. */
8067 SET_INSN_DELETED (NEXT_INSN (insn
));
8072 /* Return the attribute length of the indirect call instruction INSN.
8073 The length must match the code generated by output_indirect call.
8074 The returned length includes the delay slot. Currently, the delay
8075 slot of an indirect call sequence is not exposed and it is used by
8076 the sequence itself. */
8079 pa_attr_length_indirect_call (rtx_insn
*insn
)
8081 unsigned long distance
= -1;
8082 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
8084 if (INSN_ADDRESSES_SET_P ())
8086 distance
= (total
+ insn_current_reference_address (insn
));
8087 if (distance
< total
)
8094 if (TARGET_FAST_INDIRECT_CALLS
8095 || (!TARGET_LONG_CALLS
8096 && !TARGET_PORTABLE_RUNTIME
8097 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
8098 || distance
< MAX_PCREL17F_OFFSET
)))
8104 if (TARGET_PORTABLE_RUNTIME
)
8107 /* Out of reach, can use ble. */
8112 pa_output_indirect_call (rtx_insn
*insn
, rtx call_dest
)
8118 xoperands
[0] = call_dest
;
8119 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
8120 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
8124 /* First the special case for kernels, level 0 systems, etc. */
8125 if (TARGET_FAST_INDIRECT_CALLS
)
8126 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8128 /* Now the normal case -- we can reach $$dyncall directly or
8129 we're sure that we can get there via a long-branch stub.
8131 No need to check target flags as the length uniquely identifies
8132 the remaining cases. */
8133 if (pa_attr_length_indirect_call (insn
) == 8)
8135 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8136 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8137 variant of the B,L instruction can't be used on the SOM target. */
8138 if (TARGET_PA_20
&& !TARGET_SOM
)
8139 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8141 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8144 /* Long millicode call, but we are not generating PIC or portable runtime
8146 if (pa_attr_length_indirect_call (insn
) == 12)
8147 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8149 /* Long millicode call for portable runtime. */
8150 if (pa_attr_length_indirect_call (insn
) == 16)
8151 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8153 /* We need a long PIC call to $$dyncall. */
8154 xoperands
[0] = NULL_RTX
;
8155 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
8156 if (TARGET_SOM
|| !TARGET_GAS
)
8158 xoperands
[0] = gen_label_rtx ();
8159 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands
);
8160 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8161 CODE_LABEL_NUMBER (xoperands
[0]));
8162 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
8166 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands
);
8167 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8170 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8171 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands
);
8175 /* In HPUX 8.0's shared library scheme, special relocations are needed
8176 for function labels if they might be passed to a function
8177 in a shared library (because shared libraries don't live in code
8178 space), and special magic is needed to construct their address. */
8181 pa_encode_label (rtx sym
)
8183 const char *str
= XSTR (sym
, 0);
8184 int len
= strlen (str
) + 1;
8187 p
= newstr
= XALLOCAVEC (char, len
+ 1);
8191 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
8195 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
8197 int old_referenced
= 0;
8199 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
8201 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
8203 default_encode_section_info (decl
, rtl
, first
);
8205 if (first
&& TEXT_SPACE_P (decl
))
8207 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
8208 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8209 pa_encode_label (XEXP (rtl
, 0));
8211 else if (old_referenced
)
8212 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
8215 /* This is sort of inverse to pa_encode_section_info. */
8218 pa_strip_name_encoding (const char *str
)
8220 str
+= (*str
== '@');
8221 str
+= (*str
== '*');
8225 /* Returns 1 if OP is a function label involved in a simple addition
8226 with a constant. Used to keep certain patterns from matching
8227 during instruction combination. */
8229 pa_is_function_label_plus_const (rtx op
)
8231 /* Strip off any CONST. */
8232 if (GET_CODE (op
) == CONST
)
8235 return (GET_CODE (op
) == PLUS
8236 && function_label_operand (XEXP (op
, 0), VOIDmode
)
8237 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8240 /* Output assembly code for a thunk to FUNCTION. */
8243 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8244 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
8247 static unsigned int current_thunk_number
;
8248 int val_14
= VAL_14_BITS_P (delta
);
8249 unsigned int old_last_address
= last_address
, nbytes
= 0;
8253 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8254 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8255 xoperands
[2] = GEN_INT (delta
);
8257 final_start_function (emit_barrier (), file
, 1);
8259 /* Output the thunk. We know that the function is in the same
8260 translation unit (i.e., the same space) as the thunk, and that
8261 thunks are output after their method. Thus, we don't need an
8262 external branch to reach the function. With SOM and GAS,
8263 functions and thunks are effectively in different sections.
8264 Thus, we can always use a IA-relative branch and the linker
8265 will add a long branch stub if necessary.
8267 However, we have to be careful when generating PIC code on the
8268 SOM port to ensure that the sequence does not transfer to an
8269 import stub for the target function as this could clobber the
8270 return value saved at SP-24. This would also apply to the
8271 32-bit linux port if the multi-space model is implemented. */
8272 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8273 && !(flag_pic
&& TREE_PUBLIC (function
))
8274 && (TARGET_GAS
|| last_address
< 262132))
8275 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8276 && ((targetm_common
.have_named_sections
8277 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8278 /* The GNU 64-bit linker has rather poor stub management.
8279 So, we use a long branch from thunks that aren't in
8280 the same section as the target function. */
8282 && (DECL_SECTION_NAME (thunk_fndecl
)
8283 != DECL_SECTION_NAME (function
)))
8284 || ((DECL_SECTION_NAME (thunk_fndecl
)
8285 == DECL_SECTION_NAME (function
))
8286 && last_address
< 262132)))
8287 /* In this case, we need to be able to reach the start of
8288 the stub table even though the function is likely closer
8289 and can be jumped to directly. */
8290 || (targetm_common
.have_named_sections
8291 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8292 && DECL_SECTION_NAME (function
) == NULL
8293 && total_code_bytes
< MAX_PCREL17F_OFFSET
)
8295 || (!targetm_common
.have_named_sections
8296 && total_code_bytes
< MAX_PCREL17F_OFFSET
))))
8299 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8301 output_asm_insn ("b %0", xoperands
);
8305 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8310 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8314 else if (TARGET_64BIT
)
8316 /* We only have one call-clobbered scratch register, so we can't
8317 make use of the delay slot if delta doesn't fit in 14 bits. */
8320 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8321 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8324 output_asm_insn ("b,l .+8,%%r1", xoperands
);
8328 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8329 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
8333 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
8334 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
8339 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8340 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8345 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8349 else if (TARGET_PORTABLE_RUNTIME
)
8351 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8352 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8355 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8357 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8361 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8366 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8370 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8372 /* The function is accessible from outside this module. The only
8373 way to avoid an import stub between the thunk and function is to
8374 call the function directly with an indirect sequence similar to
8375 that used by $$dyncall. This is possible because $$dyncall acts
8376 as the import stub in an indirect call. */
8377 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8378 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8379 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8380 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8381 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8382 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8383 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8384 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8385 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8389 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8395 output_asm_insn ("bve (%%r22)", xoperands
);
8398 else if (TARGET_NO_SPACE_REGS
)
8400 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8405 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8406 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8407 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8412 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8414 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8418 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8420 if (TARGET_SOM
|| !TARGET_GAS
)
8422 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
8423 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
8427 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8428 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
8432 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8434 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8438 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8443 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8450 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8452 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8453 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8457 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8462 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8467 final_end_function ();
8469 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8471 switch_to_section (data_section
);
8472 output_asm_insn (".align 4", xoperands
);
8473 ASM_OUTPUT_LABEL (file
, label
);
8474 output_asm_insn (".word P'%0", xoperands
);
8477 current_thunk_number
++;
8478 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8479 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8480 last_address
+= nbytes
;
8481 if (old_last_address
> last_address
)
8482 last_address
= UINT_MAX
;
8483 update_total_code_bytes (nbytes
);
8486 /* Only direct calls to static functions are allowed to be sibling (tail)
8489 This restriction is necessary because some linker generated stubs will
8490 store return pointers into rp' in some cases which might clobber a
8491 live value already in rp'.
8493 In a sibcall the current function and the target function share stack
8494 space. Thus if the path to the current function and the path to the
8495 target function save a value in rp', they save the value into the
8496 same stack slot, which has undesirable consequences.
8498 Because of the deferred binding nature of shared libraries any function
8499 with external scope could be in a different load module and thus require
8500 rp' to be saved when calling that function. So sibcall optimizations
8501 can only be safe for static function.
8503 Note that GCC never needs return value relocations, so we don't have to
8504 worry about static calls with return value relocations (which require
8507 It is safe to perform a sibcall optimization when the target function
8508 will never return. */
8510 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8512 if (TARGET_PORTABLE_RUNTIME
)
8515 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8516 single subspace mode and the call is not indirect. As far as I know,
8517 there is no operating system support for the multiple subspace mode.
8518 It might be possible to support indirect calls if we didn't use
8519 $$dyncall (see the indirect sequence generated in pa_output_call). */
8521 return (decl
!= NULL_TREE
);
8523 /* Sibcalls are not ok because the arg pointer register is not a fixed
8524 register. This prevents the sibcall optimization from occurring. In
8525 addition, there are problems with stub placement using GNU ld. This
8526 is because a normal sibcall branch uses a 17-bit relocation while
8527 a regular call branch uses a 22-bit relocation. As a result, more
8528 care needs to be taken in the placement of long-branch stubs. */
8532 /* Sibcalls are only ok within a translation unit. */
8533 return (decl
&& !TREE_PUBLIC (decl
));
8536 /* ??? Addition is not commutative on the PA due to the weird implicit
8537 space register selection rules for memory addresses. Therefore, we
8538 don't consider a + b == b + a, as this might be inside a MEM. */
8540 pa_commutative_p (const_rtx x
, int outer_code
)
8542 return (COMMUTATIVE_P (x
)
8543 && (TARGET_NO_SPACE_REGS
8544 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8545 || GET_CODE (x
) != PLUS
));
8548 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8549 use in fmpyadd instructions. */
8551 pa_fmpyaddoperands (rtx
*operands
)
8553 enum machine_mode mode
= GET_MODE (operands
[0]);
8555 /* Must be a floating point mode. */
8556 if (mode
!= SFmode
&& mode
!= DFmode
)
8559 /* All modes must be the same. */
8560 if (! (mode
== GET_MODE (operands
[1])
8561 && mode
== GET_MODE (operands
[2])
8562 && mode
== GET_MODE (operands
[3])
8563 && mode
== GET_MODE (operands
[4])
8564 && mode
== GET_MODE (operands
[5])))
8567 /* All operands must be registers. */
8568 if (! (GET_CODE (operands
[1]) == REG
8569 && GET_CODE (operands
[2]) == REG
8570 && GET_CODE (operands
[3]) == REG
8571 && GET_CODE (operands
[4]) == REG
8572 && GET_CODE (operands
[5]) == REG
))
8575 /* Only 2 real operands to the addition. One of the input operands must
8576 be the same as the output operand. */
8577 if (! rtx_equal_p (operands
[3], operands
[4])
8578 && ! rtx_equal_p (operands
[3], operands
[5]))
8581 /* Inout operand of add cannot conflict with any operands from multiply. */
8582 if (rtx_equal_p (operands
[3], operands
[0])
8583 || rtx_equal_p (operands
[3], operands
[1])
8584 || rtx_equal_p (operands
[3], operands
[2]))
8587 /* multiply cannot feed into addition operands. */
8588 if (rtx_equal_p (operands
[4], operands
[0])
8589 || rtx_equal_p (operands
[5], operands
[0]))
8592 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8594 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8595 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8596 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8597 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8598 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8599 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8602 /* Passed. Operands are suitable for fmpyadd. */
8606 #if !defined(USE_COLLECT2)
8608 pa_asm_out_constructor (rtx symbol
, int priority
)
8610 if (!function_label_operand (symbol
, VOIDmode
))
8611 pa_encode_label (symbol
);
8613 #ifdef CTORS_SECTION_ASM_OP
8614 default_ctor_section_asm_out_constructor (symbol
, priority
);
8616 # ifdef TARGET_ASM_NAMED_SECTION
8617 default_named_section_asm_out_constructor (symbol
, priority
);
8619 default_stabs_asm_out_constructor (symbol
, priority
);
8625 pa_asm_out_destructor (rtx symbol
, int priority
)
8627 if (!function_label_operand (symbol
, VOIDmode
))
8628 pa_encode_label (symbol
);
8630 #ifdef DTORS_SECTION_ASM_OP
8631 default_dtor_section_asm_out_destructor (symbol
, priority
);
8633 # ifdef TARGET_ASM_NAMED_SECTION
8634 default_named_section_asm_out_destructor (symbol
, priority
);
8636 default_stabs_asm_out_destructor (symbol
, priority
);
8642 /* This function places uninitialized global data in the bss section.
8643 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8644 function on the SOM port to prevent uninitialized global data from
8645 being placed in the data section. */
8648 pa_asm_output_aligned_bss (FILE *stream
,
8650 unsigned HOST_WIDE_INT size
,
8653 switch_to_section (bss_section
);
8654 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8656 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8657 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8660 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8661 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8664 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8665 ASM_OUTPUT_LABEL (stream
, name
);
8666 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8669 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8670 that doesn't allow the alignment of global common storage to be directly
8671 specified. The SOM linker aligns common storage based on the rounded
8672 value of the NUM_BYTES parameter in the .comm directive. It's not
8673 possible to use the .align directive as it doesn't affect the alignment
8674 of the label associated with a .comm directive. */
8677 pa_asm_output_aligned_common (FILE *stream
,
8679 unsigned HOST_WIDE_INT size
,
8682 unsigned int max_common_align
;
8684 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8685 if (align
> max_common_align
)
8687 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8688 "for global common data. Using %u",
8689 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8690 align
= max_common_align
;
8693 switch_to_section (bss_section
);
8695 assemble_name (stream
, name
);
8696 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8697 MAX (size
, align
/ BITS_PER_UNIT
));
8700 /* We can't use .comm for local common storage as the SOM linker effectively
8701 treats the symbol as universal and uses the same storage for local symbols
8702 with the same name in different object files. The .block directive
8703 reserves an uninitialized block of storage. However, it's not common
8704 storage. Fortunately, GCC never requests common storage with the same
8705 name in any given translation unit. */
8708 pa_asm_output_aligned_local (FILE *stream
,
8710 unsigned HOST_WIDE_INT size
,
8713 switch_to_section (bss_section
);
8714 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8717 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8718 assemble_name (stream
, name
);
8719 fprintf (stream
, "\n");
8722 ASM_OUTPUT_LABEL (stream
, name
);
8723 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8726 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8727 use in fmpysub instructions. */
8729 pa_fmpysuboperands (rtx
*operands
)
8731 enum machine_mode mode
= GET_MODE (operands
[0]);
8733 /* Must be a floating point mode. */
8734 if (mode
!= SFmode
&& mode
!= DFmode
)
8737 /* All modes must be the same. */
8738 if (! (mode
== GET_MODE (operands
[1])
8739 && mode
== GET_MODE (operands
[2])
8740 && mode
== GET_MODE (operands
[3])
8741 && mode
== GET_MODE (operands
[4])
8742 && mode
== GET_MODE (operands
[5])))
8745 /* All operands must be registers. */
8746 if (! (GET_CODE (operands
[1]) == REG
8747 && GET_CODE (operands
[2]) == REG
8748 && GET_CODE (operands
[3]) == REG
8749 && GET_CODE (operands
[4]) == REG
8750 && GET_CODE (operands
[5]) == REG
))
8753 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8754 operation, so operands[4] must be the same as operand[3]. */
8755 if (! rtx_equal_p (operands
[3], operands
[4]))
8758 /* multiply cannot feed into subtraction. */
8759 if (rtx_equal_p (operands
[5], operands
[0]))
8762 /* Inout operand of sub cannot conflict with any operands from multiply. */
8763 if (rtx_equal_p (operands
[3], operands
[0])
8764 || rtx_equal_p (operands
[3], operands
[1])
8765 || rtx_equal_p (operands
[3], operands
[2]))
8768 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8770 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8771 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8772 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8773 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8774 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8775 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8778 /* Passed. Operands are suitable for fmpysub. */
8782 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8783 constants for shadd instructions. */
8785 pa_shadd_constant_p (int val
)
8787 if (val
== 2 || val
== 4 || val
== 8)
8793 /* Return TRUE if INSN branches forward. */
8796 forward_branch_p (rtx_insn
*insn
)
8798 rtx lab
= JUMP_LABEL (insn
);
8800 /* The INSN must have a jump label. */
8801 gcc_assert (lab
!= NULL_RTX
);
8803 if (INSN_ADDRESSES_SET_P ())
8804 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
8811 insn
= NEXT_INSN (insn
);
8817 /* Return 1 if INSN is in the delay slot of a call instruction. */
8819 pa_jump_in_call_delay (rtx_insn
*insn
)
8822 if (! JUMP_P (insn
))
8825 if (PREV_INSN (insn
)
8826 && PREV_INSN (PREV_INSN (insn
))
8827 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn
)))))
8829 rtx test_insn
= next_real_insn (PREV_INSN (PREV_INSN (insn
)));
8831 return (GET_CODE (PATTERN (test_insn
)) == SEQUENCE
8832 && XVECEXP (PATTERN (test_insn
), 0, 1) == insn
);
8839 /* Output an unconditional move and branch insn. */
8842 pa_output_parallel_movb (rtx
*operands
, rtx_insn
*insn
)
8844 int length
= get_attr_length (insn
);
8846 /* These are the cases in which we win. */
8848 return "mov%I1b,tr %1,%0,%2";
8850 /* None of the following cases win, but they don't lose either. */
8853 if (dbr_sequence_length () == 0)
8855 /* Nothing in the delay slot, fake it by putting the combined
8856 insn (the copy or add) in the delay slot of a bl. */
8857 if (GET_CODE (operands
[1]) == CONST_INT
)
8858 return "b %2\n\tldi %1,%0";
8860 return "b %2\n\tcopy %1,%0";
8864 /* Something in the delay slot, but we've got a long branch. */
8865 if (GET_CODE (operands
[1]) == CONST_INT
)
8866 return "ldi %1,%0\n\tb %2";
8868 return "copy %1,%0\n\tb %2";
8872 if (GET_CODE (operands
[1]) == CONST_INT
)
8873 output_asm_insn ("ldi %1,%0", operands
);
8875 output_asm_insn ("copy %1,%0", operands
);
8876 return pa_output_lbranch (operands
[2], insn
, 1);
8879 /* Output an unconditional add and branch insn. */
8882 pa_output_parallel_addb (rtx
*operands
, rtx_insn
*insn
)
8884 int length
= get_attr_length (insn
);
8886 /* To make life easy we want operand0 to be the shared input/output
8887 operand and operand1 to be the readonly operand. */
8888 if (operands
[0] == operands
[1])
8889 operands
[1] = operands
[2];
8891 /* These are the cases in which we win. */
8893 return "add%I1b,tr %1,%0,%3";
8895 /* None of the following cases win, but they don't lose either. */
8898 if (dbr_sequence_length () == 0)
8899 /* Nothing in the delay slot, fake it by putting the combined
8900 insn (the copy or add) in the delay slot of a bl. */
8901 return "b %3\n\tadd%I1 %1,%0,%0";
8903 /* Something in the delay slot, but we've got a long branch. */
8904 return "add%I1 %1,%0,%0\n\tb %3";
8907 output_asm_insn ("add%I1 %1,%0,%0", operands
);
8908 return pa_output_lbranch (operands
[3], insn
, 1);
8911 /* Return nonzero if INSN (a jump insn) immediately follows a call
8912 to a named function. This is used to avoid filling the delay slot
8913 of the jump since it can usually be eliminated by modifying RP in
8914 the delay slot of the call. */
8917 pa_following_call (rtx_insn
*insn
)
8919 if (! TARGET_JUMP_IN_DELAY
)
8922 /* Find the previous real insn, skipping NOTEs. */
8923 insn
= PREV_INSN (insn
);
8924 while (insn
&& NOTE_P (insn
))
8925 insn
= PREV_INSN (insn
);
8927 /* Check for CALL_INSNs and millicode calls. */
8930 && get_attr_type (insn
) != TYPE_DYNCALL
)
8931 || (NONJUMP_INSN_P (insn
)
8932 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8933 && GET_CODE (PATTERN (insn
)) != USE
8934 && GET_CODE (PATTERN (insn
)) != CLOBBER
8935 && get_attr_type (insn
) == TYPE_MILLI
)))
8941 /* We use this hook to perform a PA specific optimization which is difficult
8942 to do in earlier passes. */
8947 remove_useless_addtr_insns (1);
8949 if (pa_cpu
< PROCESSOR_8000
)
8950 pa_combine_instructions ();
8953 /* The PA has a number of odd instructions which can perform multiple
8954 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8955 it may be profitable to combine two instructions into one instruction
8956 with two outputs. It's not profitable PA2.0 machines because the
8957 two outputs would take two slots in the reorder buffers.
8959 This routine finds instructions which can be combined and combines
8960 them. We only support some of the potential combinations, and we
8961 only try common ways to find suitable instructions.
8963 * addb can add two registers or a register and a small integer
8964 and jump to a nearby (+-8k) location. Normally the jump to the
8965 nearby location is conditional on the result of the add, but by
8966 using the "true" condition we can make the jump unconditional.
8967 Thus addb can perform two independent operations in one insn.
8969 * movb is similar to addb in that it can perform a reg->reg
8970 or small immediate->reg copy and jump to a nearby (+-8k location).
8972 * fmpyadd and fmpysub can perform a FP multiply and either an
8973 FP add or FP sub if the operands of the multiply and add/sub are
8974 independent (there are other minor restrictions). Note both
8975 the fmpy and fadd/fsub can in theory move to better spots according
8976 to data dependencies, but for now we require the fmpy stay at a
8979 * Many of the memory operations can perform pre & post updates
8980 of index registers. GCC's pre/post increment/decrement addressing
8981 is far too simple to take advantage of all the possibilities. This
8982 pass may not be suitable since those insns may not be independent.
8984 * comclr can compare two ints or an int and a register, nullify
8985 the following instruction and zero some other register. This
8986 is more difficult to use as it's harder to find an insn which
8987 will generate a comclr than finding something like an unconditional
8988 branch. (conditional moves & long branches create comclr insns).
8990 * Most arithmetic operations can conditionally skip the next
8991 instruction. They can be viewed as "perform this operation
8992 and conditionally jump to this nearby location" (where nearby
8993 is an insns away). These are difficult to use due to the
8994 branch length restrictions. */
8997 pa_combine_instructions (void)
9001 /* This can get expensive since the basic algorithm is on the
9002 order of O(n^2) (or worse). Only do it for -O2 or higher
9003 levels of optimization. */
9007 /* Walk down the list of insns looking for "anchor" insns which
9008 may be combined with "floating" insns. As the name implies,
9009 "anchor" instructions don't move, while "floating" insns may
9011 rtx par
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
9012 rtx_insn
*new_rtx
= make_insn_raw (par
);
9014 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
9016 enum attr_pa_combine_type anchor_attr
;
9017 enum attr_pa_combine_type floater_attr
;
9019 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9020 Also ignore any special USE insns. */
9021 if ((! NONJUMP_INSN_P (anchor
) && ! JUMP_P (anchor
) && ! CALL_P (anchor
))
9022 || GET_CODE (PATTERN (anchor
)) == USE
9023 || GET_CODE (PATTERN (anchor
)) == CLOBBER
)
9026 anchor_attr
= get_attr_pa_combine_type (anchor
);
9027 /* See if anchor is an insn suitable for combination. */
9028 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
9029 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9030 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9031 && ! forward_branch_p (anchor
)))
9035 for (floater
= PREV_INSN (anchor
);
9037 floater
= PREV_INSN (floater
))
9039 if (NOTE_P (floater
)
9040 || (NONJUMP_INSN_P (floater
)
9041 && (GET_CODE (PATTERN (floater
)) == USE
9042 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9045 /* Anything except a regular INSN will stop our search. */
9046 if (! NONJUMP_INSN_P (floater
))
9052 /* See if FLOATER is suitable for combination with the
9054 floater_attr
= get_attr_pa_combine_type (floater
);
9055 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9056 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9057 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9058 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9060 /* If ANCHOR and FLOATER can be combined, then we're
9061 done with this pass. */
9062 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9063 SET_DEST (PATTERN (floater
)),
9064 XEXP (SET_SRC (PATTERN (floater
)), 0),
9065 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9069 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9070 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
9072 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
9074 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9075 SET_DEST (PATTERN (floater
)),
9076 XEXP (SET_SRC (PATTERN (floater
)), 0),
9077 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9082 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9083 SET_DEST (PATTERN (floater
)),
9084 SET_SRC (PATTERN (floater
)),
9085 SET_SRC (PATTERN (floater
))))
9091 /* If we didn't find anything on the backwards scan try forwards. */
9093 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
9094 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
9096 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
9098 if (NOTE_P (floater
)
9099 || (NONJUMP_INSN_P (floater
)
9100 && (GET_CODE (PATTERN (floater
)) == USE
9101 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9105 /* Anything except a regular INSN will stop our search. */
9106 if (! NONJUMP_INSN_P (floater
))
9112 /* See if FLOATER is suitable for combination with the
9114 floater_attr
= get_attr_pa_combine_type (floater
);
9115 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9116 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9117 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9118 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9120 /* If ANCHOR and FLOATER can be combined, then we're
9121 done with this pass. */
9122 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9123 SET_DEST (PATTERN (floater
)),
9124 XEXP (SET_SRC (PATTERN (floater
)),
9126 XEXP (SET_SRC (PATTERN (floater
)),
9133 /* FLOATER will be nonzero if we found a suitable floating
9134 insn for combination with ANCHOR. */
9136 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9137 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9139 /* Emit the new instruction and delete the old anchor. */
9140 emit_insn_before (gen_rtx_PARALLEL
9142 gen_rtvec (2, PATTERN (anchor
),
9143 PATTERN (floater
))),
9146 SET_INSN_DELETED (anchor
);
9148 /* Emit a special USE insn for FLOATER, then delete
9149 the floating insn. */
9150 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9151 delete_insn (floater
);
9156 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9159 /* Emit the new_jump instruction and delete the old anchor. */
9161 = emit_jump_insn_before (gen_rtx_PARALLEL
9163 gen_rtvec (2, PATTERN (anchor
),
9164 PATTERN (floater
))),
9167 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9168 SET_INSN_DELETED (anchor
);
9170 /* Emit a special USE insn for FLOATER, then delete
9171 the floating insn. */
9172 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9173 delete_insn (floater
);
9181 pa_can_combine_p (rtx_insn
*new_rtx
, rtx_insn
*anchor
, rtx_insn
*floater
,
9182 int reversed
, rtx dest
,
9185 int insn_code_number
;
9186 rtx_insn
*start
, *end
;
9188 /* Create a PARALLEL with the patterns of ANCHOR and
9189 FLOATER, try to recognize it, then test constraints
9190 for the resulting pattern.
9192 If the pattern doesn't match or the constraints
9193 aren't met keep searching for a suitable floater
9195 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9196 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9197 INSN_CODE (new_rtx
) = -1;
9198 insn_code_number
= recog_memoized (new_rtx
);
9199 if (insn_code_number
< 0
9200 || (extract_insn (new_rtx
), ! constrain_operands (1)))
9214 /* There's up to three operands to consider. One
9215 output and two inputs.
9217 The output must not be used between FLOATER & ANCHOR
9218 exclusive. The inputs must not be set between
9219 FLOATER and ANCHOR exclusive. */
9221 if (reg_used_between_p (dest
, start
, end
))
9224 if (reg_set_between_p (src1
, start
, end
))
9227 if (reg_set_between_p (src2
, start
, end
))
9230 /* If we get here, then everything is good. */
9234 /* Return nonzero if references for INSN are delayed.
9236 Millicode insns are actually function calls with some special
9237 constraints on arguments and register usage.
9239 Millicode calls always expect their arguments in the integer argument
9240 registers, and always return their result in %r29 (ret1). They
9241 are expected to clobber their arguments, %r1, %r29, and the return
9242 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9244 This function tells reorg that the references to arguments and
9245 millicode calls do not appear to happen until after the millicode call.
9246 This allows reorg to put insns which set the argument registers into the
9247 delay slot of the millicode call -- thus they act more like traditional
9250 Note we cannot consider side effects of the insn to be delayed because
9251 the branch and link insn will clobber the return pointer. If we happened
9252 to use the return pointer in the delay slot of the call, then we lose.
9254 get_attr_type will try to recognize the given insn, so make sure to
9255 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9258 pa_insn_refs_are_delayed (rtx insn
)
9260 return ((NONJUMP_INSN_P (insn
)
9261 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9262 && GET_CODE (PATTERN (insn
)) != USE
9263 && GET_CODE (PATTERN (insn
)) != CLOBBER
9264 && get_attr_type (insn
) == TYPE_MILLI
));
9267 /* Promote the return value, but not the arguments. */
9269 static enum machine_mode
9270 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9271 enum machine_mode mode
,
9272 int *punsignedp ATTRIBUTE_UNUSED
,
9273 const_tree fntype ATTRIBUTE_UNUSED
,
9276 if (for_return
== 0)
9278 return promote_mode (type
, mode
, punsignedp
);
9281 /* On the HP-PA the value is found in register(s) 28(-29), unless
9282 the mode is SF or DF. Then the value is returned in fr4 (32).
9284 This must perform the same promotions as PROMOTE_MODE, else promoting
9285 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9287 Small structures must be returned in a PARALLEL on PA64 in order
9288 to match the HP Compiler ABI. */
9291 pa_function_value (const_tree valtype
,
9292 const_tree func ATTRIBUTE_UNUSED
,
9293 bool outgoing ATTRIBUTE_UNUSED
)
9295 enum machine_mode valmode
;
9297 if (AGGREGATE_TYPE_P (valtype
)
9298 || TREE_CODE (valtype
) == COMPLEX_TYPE
9299 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9303 /* Aggregates with a size less than or equal to 128 bits are
9304 returned in GR 28(-29). They are left justified. The pad
9305 bits are undefined. Larger aggregates are returned in
9309 int ub
= int_size_in_bytes (valtype
) <= UNITS_PER_WORD
? 1 : 2;
9311 for (i
= 0; i
< ub
; i
++)
9313 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9314 gen_rtx_REG (DImode
, 28 + i
),
9319 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9321 else if (int_size_in_bytes (valtype
) > UNITS_PER_WORD
)
9323 /* Aggregates 5 to 8 bytes in size are returned in general
9324 registers r28-r29 in the same manner as other non
9325 floating-point objects. The data is right-justified and
9326 zero-extended to 64 bits. This is opposite to the normal
9327 justification used on big endian targets and requires
9328 special treatment. */
9329 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9330 gen_rtx_REG (DImode
, 28), const0_rtx
);
9331 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9335 if ((INTEGRAL_TYPE_P (valtype
)
9336 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9337 || POINTER_TYPE_P (valtype
))
9338 valmode
= word_mode
;
9340 valmode
= TYPE_MODE (valtype
);
9342 if (TREE_CODE (valtype
) == REAL_TYPE
9343 && !AGGREGATE_TYPE_P (valtype
)
9344 && TYPE_MODE (valtype
) != TFmode
9345 && !TARGET_SOFT_FLOAT
)
9346 return gen_rtx_REG (valmode
, 32);
9348 return gen_rtx_REG (valmode
, 28);
9351 /* Implement the TARGET_LIBCALL_VALUE hook. */
9354 pa_libcall_value (enum machine_mode mode
,
9355 const_rtx fun ATTRIBUTE_UNUSED
)
9357 if (! TARGET_SOFT_FLOAT
9358 && (mode
== SFmode
|| mode
== DFmode
))
9359 return gen_rtx_REG (mode
, 32);
9361 return gen_rtx_REG (mode
, 28);
9364 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9367 pa_function_value_regno_p (const unsigned int regno
)
9370 || (! TARGET_SOFT_FLOAT
&& regno
== 32))
9376 /* Update the data in CUM to advance over an argument
9377 of mode MODE and data type TYPE.
9378 (TYPE is null for libcalls where that information may not be available.) */
9381 pa_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
9382 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9384 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9385 int arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9387 cum
->nargs_prototype
--;
9388 cum
->words
+= (arg_size
9389 + ((cum
->words
& 01)
9390 && type
!= NULL_TREE
9394 /* Return the location of a parameter that is passed in a register or NULL
9395 if the parameter has any component that is passed in memory.
9397 This is new code and will be pushed to into the net sources after
9400 ??? We might want to restructure this so that it looks more like other
9403 pa_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
9404 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9406 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9407 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9414 if (mode
== VOIDmode
)
9417 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9419 /* If this arg would be passed partially or totally on the stack, then
9420 this routine should return zero. pa_arg_partial_bytes will
9421 handle arguments which are split between regs and stack slots if
9422 the ABI mandates split arguments. */
9425 /* The 32-bit ABI does not split arguments. */
9426 if (cum
->words
+ arg_size
> max_arg_words
)
9432 alignment
= cum
->words
& 1;
9433 if (cum
->words
+ alignment
>= max_arg_words
)
9437 /* The 32bit ABIs and the 64bit ABIs are rather different,
9438 particularly in their handling of FP registers. We might
9439 be able to cleverly share code between them, but I'm not
9440 going to bother in the hope that splitting them up results
9441 in code that is more easily understood. */
9445 /* Advance the base registers to their current locations.
9447 Remember, gprs grow towards smaller register numbers while
9448 fprs grow to higher register numbers. Also remember that
9449 although FP regs are 32-bit addressable, we pretend that
9450 the registers are 64-bits wide. */
9451 gpr_reg_base
= 26 - cum
->words
;
9452 fpr_reg_base
= 32 + cum
->words
;
9454 /* Arguments wider than one word and small aggregates need special
9458 || (type
&& (AGGREGATE_TYPE_P (type
)
9459 || TREE_CODE (type
) == COMPLEX_TYPE
9460 || TREE_CODE (type
) == VECTOR_TYPE
)))
9462 /* Double-extended precision (80-bit), quad-precision (128-bit)
9463 and aggregates including complex numbers are aligned on
9464 128-bit boundaries. The first eight 64-bit argument slots
9465 are associated one-to-one, with general registers r26
9466 through r19, and also with floating-point registers fr4
9467 through fr11. Arguments larger than one word are always
9468 passed in general registers.
9470 Using a PARALLEL with a word mode register results in left
9471 justified data on a big-endian target. */
9474 int i
, offset
= 0, ub
= arg_size
;
9476 /* Align the base register. */
9477 gpr_reg_base
-= alignment
;
9479 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9480 for (i
= 0; i
< ub
; i
++)
9482 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9483 gen_rtx_REG (DImode
, gpr_reg_base
),
9489 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9494 /* If the argument is larger than a word, then we know precisely
9495 which registers we must use. */
9509 /* Structures 5 to 8 bytes in size are passed in the general
9510 registers in the same manner as other non floating-point
9511 objects. The data is right-justified and zero-extended
9512 to 64 bits. This is opposite to the normal justification
9513 used on big endian targets and requires special treatment.
9514 We now define BLOCK_REG_PADDING to pad these objects.
9515 Aggregates, complex and vector types are passed in the same
9516 manner as structures. */
9518 || (type
&& (AGGREGATE_TYPE_P (type
)
9519 || TREE_CODE (type
) == COMPLEX_TYPE
9520 || TREE_CODE (type
) == VECTOR_TYPE
)))
9522 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9523 gen_rtx_REG (DImode
, gpr_reg_base
),
9525 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9530 /* We have a single word (32 bits). A simple computation
9531 will get us the register #s we need. */
9532 gpr_reg_base
= 26 - cum
->words
;
9533 fpr_reg_base
= 32 + 2 * cum
->words
;
9537 /* Determine if the argument needs to be passed in both general and
9538 floating point registers. */
9539 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9540 /* If we are doing soft-float with portable runtime, then there
9541 is no need to worry about FP regs. */
9542 && !TARGET_SOFT_FLOAT
9543 /* The parameter must be some kind of scalar float, else we just
9544 pass it in integer registers. */
9545 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9546 /* The target function must not have a prototype. */
9547 && cum
->nargs_prototype
<= 0
9548 /* libcalls do not need to pass items in both FP and general
9550 && type
!= NULL_TREE
9551 /* All this hair applies to "outgoing" args only. This includes
9552 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9554 /* Also pass outgoing floating arguments in both registers in indirect
9555 calls with the 32 bit ABI and the HP assembler since there is no
9556 way to the specify argument locations in static functions. */
9561 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9567 gen_rtx_EXPR_LIST (VOIDmode
,
9568 gen_rtx_REG (mode
, fpr_reg_base
),
9570 gen_rtx_EXPR_LIST (VOIDmode
,
9571 gen_rtx_REG (mode
, gpr_reg_base
),
9576 /* See if we should pass this parameter in a general register. */
9577 if (TARGET_SOFT_FLOAT
9578 /* Indirect calls in the normal 32bit ABI require all arguments
9579 to be passed in general registers. */
9580 || (!TARGET_PORTABLE_RUNTIME
9584 /* If the parameter is not a scalar floating-point parameter,
9585 then it belongs in GPRs. */
9586 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9587 /* Structure with single SFmode field belongs in GPR. */
9588 || (type
&& AGGREGATE_TYPE_P (type
)))
9589 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9591 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9596 /* Arguments larger than one word are double word aligned. */
9599 pa_function_arg_boundary (enum machine_mode mode
, const_tree type
)
9601 bool singleword
= (type
9602 ? (integer_zerop (TYPE_SIZE (type
))
9603 || !TREE_CONSTANT (TYPE_SIZE (type
))
9604 || int_size_in_bytes (type
) <= UNITS_PER_WORD
)
9605 : GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
);
9607 return singleword
? PARM_BOUNDARY
: MAX_PARM_BOUNDARY
;
9610 /* If this arg would be passed totally in registers or totally on the stack,
9611 then this routine should return zero. */
9614 pa_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
9615 tree type
, bool named ATTRIBUTE_UNUSED
)
9617 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9618 unsigned int max_arg_words
= 8;
9619 unsigned int offset
= 0;
9624 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9627 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9628 /* Arg fits fully into registers. */
9630 else if (cum
->words
+ offset
>= max_arg_words
)
9631 /* Arg fully on the stack. */
9635 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9639 /* A get_unnamed_section callback for switching to the text section.
9641 This function is only used with SOM. Because we don't support
9642 named subspaces, we can only create a new subspace or switch back
9643 to the default text subspace. */
9646 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9648 gcc_assert (TARGET_SOM
);
9651 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
9653 /* We only want to emit a .nsubspa directive once at the
9654 start of the function. */
9655 cfun
->machine
->in_nsubspa
= 1;
9657 /* Create a new subspace for the text. This provides
9658 better stub placement and one-only functions. */
9660 && DECL_ONE_ONLY (cfun
->decl
)
9661 && !DECL_WEAK (cfun
->decl
))
9663 output_section_asm_op ("\t.SPACE $TEXT$\n"
9664 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9665 "ACCESS=44,SORT=24,COMDAT");
9671 /* There isn't a current function or the body of the current
9672 function has been completed. So, we are changing to the
9673 text section to output debugging information. Thus, we
9674 need to forget that we are in the text section so that
9675 varasm.c will call us when text_section is selected again. */
9676 gcc_assert (!cfun
|| !cfun
->machine
9677 || cfun
->machine
->in_nsubspa
== 2);
9680 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9683 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9686 /* A get_unnamed_section callback for switching to comdat data
9687 sections. This function is only used with SOM. */
9690 som_output_comdat_data_section_asm_op (const void *data
)
9693 output_section_asm_op (data
);
9696 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9699 pa_som_asm_init_sections (void)
9702 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
9704 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9705 is not being generated. */
9706 som_readonly_data_section
9707 = get_unnamed_section (0, output_section_asm_op
,
9708 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9710 /* When secondary definitions are not supported, SOM makes readonly
9711 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9713 som_one_only_readonly_data_section
9714 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
9716 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9717 "ACCESS=0x2c,SORT=16,COMDAT");
9720 /* When secondary definitions are not supported, SOM makes data one-only
9721 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9722 som_one_only_data_section
9723 = get_unnamed_section (SECTION_WRITE
,
9724 som_output_comdat_data_section_asm_op
,
9725 "\t.SPACE $PRIVATE$\n"
9726 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9727 "ACCESS=31,SORT=24,COMDAT");
9730 som_tm_clone_table_section
9731 = get_unnamed_section (0, output_section_asm_op
,
9732 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9734 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9735 which reference data within the $TEXT$ space (for example constant
9736 strings in the $LIT$ subspace).
9738 The assemblers (GAS and HP as) both have problems with handling
9739 the difference of two symbols which is the other correct way to
9740 reference constant data during PIC code generation.
9742 So, there's no way to reference constant data which is in the
9743 $TEXT$ space during PIC generation. Instead place all constant
9744 data into the $PRIVATE$ subspace (this reduces sharing, but it
9745 works correctly). */
9746 readonly_data_section
= flag_pic
? data_section
: som_readonly_data_section
;
9748 /* We must not have a reference to an external symbol defined in a
9749 shared library in a readonly section, else the SOM linker will
9752 So, we force exception information into the data section. */
9753 exception_section
= data_section
;
9756 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9759 pa_som_tm_clone_table_section (void)
9761 return som_tm_clone_table_section
;
9764 /* On hpux10, the linker will give an error if we have a reference
9765 in the read-only data section to a symbol defined in a shared
9766 library. Therefore, expressions that might require a reloc can
9767 not be placed in the read-only data section. */
9770 pa_select_section (tree exp
, int reloc
,
9771 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9773 if (TREE_CODE (exp
) == VAR_DECL
9774 && TREE_READONLY (exp
)
9775 && !TREE_THIS_VOLATILE (exp
)
9776 && DECL_INITIAL (exp
)
9777 && (DECL_INITIAL (exp
) == error_mark_node
9778 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9782 && DECL_ONE_ONLY (exp
)
9783 && !DECL_WEAK (exp
))
9784 return som_one_only_readonly_data_section
;
9786 return readonly_data_section
;
9788 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9789 return readonly_data_section
;
9791 && TREE_CODE (exp
) == VAR_DECL
9792 && DECL_ONE_ONLY (exp
)
9793 && !DECL_WEAK (exp
))
9794 return som_one_only_data_section
;
9796 return data_section
;
9800 pa_globalize_label (FILE *stream
, const char *name
)
9802 /* We only handle DATA objects here, functions are globalized in
9803 ASM_DECLARE_FUNCTION_NAME. */
9804 if (! FUNCTION_NAME_P (name
))
9806 fputs ("\t.EXPORT ", stream
);
9807 assemble_name (stream
, name
);
9808 fputs (",DATA\n", stream
);
9812 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9815 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9816 int incoming ATTRIBUTE_UNUSED
)
9818 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9821 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9824 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
9826 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9827 PA64 ABI says that objects larger than 128 bits are returned in memory.
9828 Note, int_size_in_bytes can return -1 if the size of the object is
9829 variable or larger than the maximum value that can be expressed as
9830 a HOST_WIDE_INT. It can also return zero for an empty type. The
9831 simplest way to handle variable and empty types is to pass them in
9832 memory. This avoids problems in defining the boundaries of argument
9833 slots, allocating registers, etc. */
9834 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9835 || int_size_in_bytes (type
) <= 0);
9838 /* Structure to hold declaration and name of external symbols that are
9839 emitted by GCC. We generate a vector of these symbols and output them
9840 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9841 This avoids putting out names that are never really used. */
9843 typedef struct GTY(()) extern_symbol
9849 /* Define gc'd vector type for extern_symbol. */
9851 /* Vector of extern_symbol pointers. */
9852 static GTY(()) vec
<extern_symbol
, va_gc
> *extern_symbols
;
9854 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9855 /* Mark DECL (name NAME) as an external reference (assembler output
9856 file FILE). This saves the names to output at the end of the file
9857 if actually referenced. */
9860 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9862 gcc_assert (file
== asm_out_file
);
9863 extern_symbol p
= {decl
, name
};
9864 vec_safe_push (extern_symbols
, p
);
9867 /* Output text required at the end of an assembler file.
9868 This includes deferred plabels and .import directives for
9869 all external symbols that were actually referenced. */
9872 pa_hpux_file_end (void)
9877 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9878 output_deferred_profile_counters ();
9880 output_deferred_plabels ();
9882 for (i
= 0; vec_safe_iterate (extern_symbols
, i
, &p
); i
++)
9884 tree decl
= p
->decl
;
9886 if (!TREE_ASM_WRITTEN (decl
)
9887 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9888 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9891 vec_free (extern_symbols
);
9895 /* Return true if a change from mode FROM to mode TO for a register
9896 in register class RCLASS is invalid. */
9899 pa_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
9900 enum reg_class rclass
)
9905 /* Reject changes to/from complex and vector modes. */
9906 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
9907 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
9910 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
9913 /* There is no way to load QImode or HImode values directly from
9914 memory. SImode loads to the FP registers are not zero extended.
9915 On the 64-bit target, this conflicts with the definition of
9916 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9917 with different sizes in the floating-point registers. */
9918 if (MAYBE_FP_REG_CLASS_P (rclass
))
9921 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9922 in specific sets of registers. Thus, we cannot allow changing
9923 to a larger mode when it's larger than a word. */
9924 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
9925 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
9931 /* Returns TRUE if it is a good idea to tie two pseudo registers
9932 when one has mode MODE1 and one has mode MODE2.
9933 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9934 for any hard reg, then this must be FALSE for correct output.
9936 We should return FALSE for QImode and HImode because these modes
9937 are not ok in the floating-point registers. However, this prevents
9938 tieing these modes to SImode and DImode in the general registers.
9939 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9940 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9941 in the floating-point registers. */
9944 pa_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
9946 /* Don't tie modes in different classes. */
9947 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
9954 /* Length in units of the trampoline instruction code. */
9956 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9959 /* Output assembler code for a block containing the constant parts
9960 of a trampoline, leaving space for the variable parts.\
9962 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9963 and then branches to the specified routine.
9965 This code template is copied from text segment to stack location
9966 and then patched with pa_trampoline_init to contain valid values,
9967 and then entered as a subroutine.
9969 It is best to keep this as small as possible to avoid having to
9970 flush multiple lines in the cache. */
9973 pa_asm_trampoline_template (FILE *f
)
9977 fputs ("\tldw 36(%r22),%r21\n", f
);
9978 fputs ("\tbb,>=,n %r21,30,.+16\n", f
);
9979 if (ASSEMBLER_DIALECT
== 0)
9980 fputs ("\tdepi 0,31,2,%r21\n", f
);
9982 fputs ("\tdepwi 0,31,2,%r21\n", f
);
9983 fputs ("\tldw 4(%r21),%r19\n", f
);
9984 fputs ("\tldw 0(%r21),%r21\n", f
);
9987 fputs ("\tbve (%r21)\n", f
);
9988 fputs ("\tldw 40(%r22),%r29\n", f
);
9989 fputs ("\t.word 0\n", f
);
9990 fputs ("\t.word 0\n", f
);
9994 fputs ("\tldsid (%r21),%r1\n", f
);
9995 fputs ("\tmtsp %r1,%sr0\n", f
);
9996 fputs ("\tbe 0(%sr0,%r21)\n", f
);
9997 fputs ("\tldw 40(%r22),%r29\n", f
);
9999 fputs ("\t.word 0\n", f
);
10000 fputs ("\t.word 0\n", f
);
10001 fputs ("\t.word 0\n", f
);
10002 fputs ("\t.word 0\n", f
);
10006 fputs ("\t.dword 0\n", f
);
10007 fputs ("\t.dword 0\n", f
);
10008 fputs ("\t.dword 0\n", f
);
10009 fputs ("\t.dword 0\n", f
);
10010 fputs ("\tmfia %r31\n", f
);
10011 fputs ("\tldd 24(%r31),%r1\n", f
);
10012 fputs ("\tldd 24(%r1),%r27\n", f
);
10013 fputs ("\tldd 16(%r1),%r1\n", f
);
10014 fputs ("\tbve (%r1)\n", f
);
10015 fputs ("\tldd 32(%r31),%r31\n", f
);
10016 fputs ("\t.dword 0 ; fptr\n", f
);
10017 fputs ("\t.dword 0 ; static link\n", f
);
10021 /* Emit RTL insns to initialize the variable parts of a trampoline.
10022 FNADDR is an RTX for the address of the function's pure code.
10023 CXT is an RTX for the static chain value for the function.
10025 Move the function address to the trampoline template at offset 36.
10026 Move the static chain value to trampoline template at offset 40.
10027 Move the trampoline address to trampoline template at offset 44.
10028 Move r19 to trampoline template at offset 48. The latter two
10029 words create a plabel for the indirect call to the trampoline.
10031 A similar sequence is used for the 64-bit port but the plabel is
10032 at the beginning of the trampoline.
10034 Finally, the cache entries for the trampoline code are flushed.
10035 This is necessary to ensure that the trampoline instruction sequence
10036 is written to memory prior to any attempts at prefetching the code
10040 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
10042 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10043 rtx start_addr
= gen_reg_rtx (Pmode
);
10044 rtx end_addr
= gen_reg_rtx (Pmode
);
10045 rtx line_length
= gen_reg_rtx (Pmode
);
10048 emit_block_move (m_tramp
, assemble_trampoline_template (),
10049 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
10050 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
10054 tmp
= adjust_address (m_tramp
, Pmode
, 36);
10055 emit_move_insn (tmp
, fnaddr
);
10056 tmp
= adjust_address (m_tramp
, Pmode
, 40);
10057 emit_move_insn (tmp
, chain_value
);
10059 /* Create a fat pointer for the trampoline. */
10060 tmp
= adjust_address (m_tramp
, Pmode
, 44);
10061 emit_move_insn (tmp
, r_tramp
);
10062 tmp
= adjust_address (m_tramp
, Pmode
, 48);
10063 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
10065 /* fdc and fic only use registers for the address to flush,
10066 they do not accept integer displacements. We align the
10067 start and end addresses to the beginning of their respective
10068 cache lines to minimize the number of lines flushed. */
10069 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
10070 GEN_INT (-MIN_CACHELINE_SIZE
)));
10071 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
,
10072 TRAMPOLINE_CODE_SIZE
-1));
10073 emit_insn (gen_andsi3 (end_addr
, tmp
,
10074 GEN_INT (-MIN_CACHELINE_SIZE
)));
10075 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10076 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
10077 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
10078 gen_reg_rtx (Pmode
),
10079 gen_reg_rtx (Pmode
)));
10083 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10084 emit_move_insn (tmp
, fnaddr
);
10085 tmp
= adjust_address (m_tramp
, Pmode
, 64);
10086 emit_move_insn (tmp
, chain_value
);
10088 /* Create a fat pointer for the trampoline. */
10089 tmp
= adjust_address (m_tramp
, Pmode
, 16);
10090 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (Pmode
,
10092 tmp
= adjust_address (m_tramp
, Pmode
, 24);
10093 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
10095 /* fdc and fic only use registers for the address to flush,
10096 they do not accept integer displacements. We align the
10097 start and end addresses to the beginning of their respective
10098 cache lines to minimize the number of lines flushed. */
10099 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
, 32));
10100 emit_insn (gen_anddi3 (start_addr
, tmp
,
10101 GEN_INT (-MIN_CACHELINE_SIZE
)));
10102 tmp
= force_reg (Pmode
, plus_constant (Pmode
, tmp
,
10103 TRAMPOLINE_CODE_SIZE
- 1));
10104 emit_insn (gen_anddi3 (end_addr
, tmp
,
10105 GEN_INT (-MIN_CACHELINE_SIZE
)));
10106 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10107 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
10108 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
10109 gen_reg_rtx (Pmode
),
10110 gen_reg_rtx (Pmode
)));
10113 #ifdef HAVE_ENABLE_EXECUTE_STACK
10114 Â
emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
10115 Â Â Â Â LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
10119 /* Perform any machine-specific adjustment in the address of the trampoline.
10120 ADDR contains the address that was passed to pa_trampoline_init.
10121 Adjust the trampoline address to point to the plabel at offset 44. */
10124 pa_trampoline_adjust_address (rtx addr
)
10127 addr
= memory_address (Pmode
, plus_constant (Pmode
, addr
, 46));
10132 pa_delegitimize_address (rtx orig_x
)
10134 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10136 if (GET_CODE (x
) == LO_SUM
10137 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10138 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10139 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));
10144 pa_internal_arg_pointer (void)
10146 /* The argument pointer and the hard frame pointer are the same in
10147 the 32-bit runtime, so we don't need a copy. */
10149 return copy_to_reg (virtual_incoming_args_rtx
);
10151 return virtual_incoming_args_rtx
;
10154 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10155 Frame pointer elimination is automatically handled. */
10158 pa_can_eliminate (const int from
, const int to
)
10160 /* The argument cannot be eliminated in the 64-bit runtime. */
10161 if (TARGET_64BIT
&& from
== ARG_POINTER_REGNUM
)
10164 return (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
10165 ? ! frame_pointer_needed
10169 /* Define the offset between two registers, FROM to be eliminated and its
10170 replacement TO, at the start of a routine. */
10172 pa_initial_elimination_offset (int from
, int to
)
10174 HOST_WIDE_INT offset
;
10176 if ((from
== HARD_FRAME_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
)
10177 && to
== STACK_POINTER_REGNUM
)
10178 offset
= -pa_compute_frame_size (get_frame_size (), 0);
10179 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
10182 gcc_unreachable ();
10188 pa_conditional_register_usage (void)
10192 if (!TARGET_64BIT
&& !TARGET_PA_11
)
10194 for (i
= 56; i
<= FP_REG_LAST
; i
++)
10195 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10196 for (i
= 33; i
< 56; i
+= 2)
10197 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10199 if (TARGET_DISABLE_FPREGS
|| TARGET_SOFT_FLOAT
)
10201 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
10202 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10205 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10208 /* Target hook for c_mode_for_suffix. */
10210 static enum machine_mode
10211 pa_c_mode_for_suffix (char suffix
)
10213 if (HPUX_LONG_DOUBLE_LIBRARY
)
10222 /* Target hook for function_section. */
10225 pa_function_section (tree decl
, enum node_frequency freq
,
10226 bool startup
, bool exit
)
10228 /* Put functions in text section if target doesn't have named sections. */
10229 if (!targetm_common
.have_named_sections
)
10230 return text_section
;
10232 /* Force nested functions into the same section as the containing
10235 && DECL_SECTION_NAME (decl
) == NULL
10236 && DECL_CONTEXT (decl
) != NULL_TREE
10237 && TREE_CODE (DECL_CONTEXT (decl
)) == FUNCTION_DECL
10238 && DECL_SECTION_NAME (DECL_CONTEXT (decl
)) == NULL
)
10239 return function_section (DECL_CONTEXT (decl
));
10241 /* Otherwise, use the default function section. */
10242 return default_function_section (decl
, freq
, startup
, exit
);
10245 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10247 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10248 that need more than three instructions to load prior to reload. This
10249 limit is somewhat arbitrary. It takes three instructions to load a
10250 CONST_INT from memory but two are memory accesses. It may be better
10251 to increase the allowed range for CONST_INTS. We may also be able
10252 to handle CONST_DOUBLES. */
10255 pa_legitimate_constant_p (enum machine_mode mode
, rtx x
)
10257 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& x
!= CONST0_RTX (mode
))
10260 if (!NEW_HP_ASSEMBLER
&& !TARGET_GAS
&& GET_CODE (x
) == LABEL_REF
)
10263 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10264 legitimate constants. The other variants can't be handled by
10265 the move patterns after reload starts. */
10266 if (tls_referenced_p (x
))
10269 if (TARGET_64BIT
&& GET_CODE (x
) == CONST_DOUBLE
)
10273 && HOST_BITS_PER_WIDE_INT
> 32
10274 && GET_CODE (x
) == CONST_INT
10275 && !reload_in_progress
10276 && !reload_completed
10277 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x
))
10278 && !pa_cint_ok_for_move (INTVAL (x
)))
10281 if (function_label_operand (x
, mode
))
10287 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10289 static unsigned int
10290 pa_section_type_flags (tree decl
, const char *name
, int reloc
)
10292 unsigned int flags
;
10294 flags
= default_section_type_flags (decl
, name
, reloc
);
10296 /* Function labels are placed in the constant pool. This can
10297 cause a section conflict if decls are put in ".data.rel.ro"
10298 or ".data.rel.ro.local" using the __attribute__ construct. */
10299 if (strcmp (name
, ".data.rel.ro") == 0
10300 || strcmp (name
, ".data.rel.ro.local") == 0)
10301 flags
|= SECTION_WRITE
| SECTION_RELRO
;
10306 /* pa_legitimate_address_p recognizes an RTL expression that is a
10307 valid memory address for an instruction. The MODE argument is the
10308 machine mode for the MEM expression that wants to use this address.
10310 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10311 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10312 available with floating point loads and stores, and integer loads.
10313 We get better code by allowing indexed addresses in the initial
10316 The acceptance of indexed addresses as legitimate implies that we
10317 must provide patterns for doing indexed integer stores, or the move
10318 expanders must force the address of an indexed store to a register.
10319 We have adopted the latter approach.
10321 Another function of pa_legitimate_address_p is to ensure that
10322 the base register is a valid pointer for indexed instructions.
10323 On targets that have non-equivalent space registers, we have to
10324 know at the time of assembler output which register in a REG+REG
10325 pair is the base register. The REG_POINTER flag is sometimes lost
10326 in reload and the following passes, so it can't be relied on during
10327 code generation. Thus, we either have to canonicalize the order
10328 of the registers in REG+REG indexed addresses, or treat REG+REG
10329 addresses separately and provide patterns for both permutations.
10331 The latter approach requires several hundred additional lines of
10332 code in pa.md. The downside to canonicalizing is that a PLUS
10333 in the wrong order can't combine to form to make a scaled indexed
10334 memory operand. As we won't need to canonicalize the operands if
10335 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10337 We initially break out scaled indexed addresses in canonical order
10338 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10339 scaled indexed addresses during RTL generation. However, fold_rtx
10340 has its own opinion on how the operands of a PLUS should be ordered.
10341 If one of the operands is equivalent to a constant, it will make
10342 that operand the second operand. As the base register is likely to
10343 be equivalent to a SYMBOL_REF, we have made it the second operand.
10345 pa_legitimate_address_p accepts REG+REG as legitimate when the
10346 operands are in the order INDEX+BASE on targets with non-equivalent
10347 space registers, and in any order on targets with equivalent space
10348 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10350 We treat a SYMBOL_REF as legitimate if it is part of the current
10351 function's constant-pool, because such addresses can actually be
10352 output as REG+SMALLINT. */
10355 pa_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
10358 && (strict
? STRICT_REG_OK_FOR_BASE_P (x
)
10359 : REG_OK_FOR_BASE_P (x
)))
10360 || ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_DEC
10361 || GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == POST_INC
)
10362 && REG_P (XEXP (x
, 0))
10363 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10364 : REG_OK_FOR_BASE_P (XEXP (x
, 0)))))
10367 if (GET_CODE (x
) == PLUS
)
10371 /* For REG+REG, the base register should be in XEXP (x, 1),
10372 so check it first. */
10373 if (REG_P (XEXP (x
, 1))
10374 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 1))
10375 : REG_OK_FOR_BASE_P (XEXP (x
, 1))))
10376 base
= XEXP (x
, 1), index
= XEXP (x
, 0);
10377 else if (REG_P (XEXP (x
, 0))
10378 && (strict
? STRICT_REG_OK_FOR_BASE_P (XEXP (x
, 0))
10379 : REG_OK_FOR_BASE_P (XEXP (x
, 0))))
10380 base
= XEXP (x
, 0), index
= XEXP (x
, 1);
10384 if (GET_CODE (index
) == CONST_INT
)
10386 if (INT_5_BITS (index
))
10389 /* When INT14_OK_STRICT is false, a secondary reload is needed
10390 to adjust the displacement of SImode and DImode floating point
10391 instructions but this may fail when the register also needs
10392 reloading. So, we return false when STRICT is true. We
10393 also reject long displacements for float mode addresses since
10394 the majority of accesses will use floating point instructions
10395 that don't support 14-bit offsets. */
10396 if (!INT14_OK_STRICT
10397 && (strict
|| !(reload_in_progress
|| reload_completed
))
10402 return base14_operand (index
, mode
);
10405 if (!TARGET_DISABLE_INDEXING
10406 /* Only accept the "canonical" INDEX+BASE operand order
10407 on targets with non-equivalent space registers. */
10408 && (TARGET_NO_SPACE_REGS
10410 : (base
== XEXP (x
, 1) && REG_P (index
)
10411 && (reload_completed
10412 || (reload_in_progress
&& HARD_REGISTER_P (base
))
10413 || REG_POINTER (base
))
10414 && (reload_completed
10415 || (reload_in_progress
&& HARD_REGISTER_P (index
))
10416 || !REG_POINTER (index
))))
10417 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode
)
10418 && (strict
? STRICT_REG_OK_FOR_INDEX_P (index
)
10419 : REG_OK_FOR_INDEX_P (index
))
10420 && borx_reg_operand (base
, Pmode
)
10421 && borx_reg_operand (index
, Pmode
))
10424 if (!TARGET_DISABLE_INDEXING
10425 && GET_CODE (index
) == MULT
10426 && MODE_OK_FOR_SCALED_INDEXING_P (mode
)
10427 && REG_P (XEXP (index
, 0))
10428 && GET_MODE (XEXP (index
, 0)) == Pmode
10429 && (strict
? STRICT_REG_OK_FOR_INDEX_P (XEXP (index
, 0))
10430 : REG_OK_FOR_INDEX_P (XEXP (index
, 0)))
10431 && GET_CODE (XEXP (index
, 1)) == CONST_INT
10432 && INTVAL (XEXP (index
, 1))
10433 == (HOST_WIDE_INT
) GET_MODE_SIZE (mode
)
10434 && borx_reg_operand (base
, Pmode
))
10440 if (GET_CODE (x
) == LO_SUM
)
10442 rtx y
= XEXP (x
, 0);
10444 if (GET_CODE (y
) == SUBREG
)
10445 y
= SUBREG_REG (y
);
10448 && (strict
? STRICT_REG_OK_FOR_BASE_P (y
)
10449 : REG_OK_FOR_BASE_P (y
)))
10451 /* Needed for -fPIC */
10453 && GET_CODE (XEXP (x
, 1)) == UNSPEC
)
10456 if (!INT14_OK_STRICT
10457 && (strict
|| !(reload_in_progress
|| reload_completed
))
10462 if (CONSTANT_P (XEXP (x
, 1)))
10468 if (GET_CODE (x
) == CONST_INT
&& INT_5_BITS (x
))
10474 /* Look for machine dependent ways to make the invalid address AD a
10477 For the PA, transform:
10479 memory(X + <large int>)
10483 if (<large int> & mask) >= 16
10484 Y = (<large int> & ~mask) + mask + 1 Round up.
10486 Y = (<large int> & ~mask) Round down.
10488 memory (Z + (<large int> - Y));
10490 This makes reload inheritance and reload_cse work better since Z
10493 There may be more opportunities to improve code with this hook. */
10496 pa_legitimize_reload_address (rtx ad
, enum machine_mode mode
,
10497 int opnum
, int type
,
10498 int ind_levels ATTRIBUTE_UNUSED
)
10500 long offset
, newoffset
, mask
;
10501 rtx new_rtx
, temp
= NULL_RTX
;
10503 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
10504 && !INT14_OK_STRICT
? 0x1f : 0x3fff);
10506 if (optimize
&& GET_CODE (ad
) == PLUS
)
10507 temp
= simplify_binary_operation (PLUS
, Pmode
,
10508 XEXP (ad
, 0), XEXP (ad
, 1));
10510 new_rtx
= temp
? temp
: ad
;
10513 && GET_CODE (new_rtx
) == PLUS
10514 && GET_CODE (XEXP (new_rtx
, 0)) == REG
10515 && GET_CODE (XEXP (new_rtx
, 1)) == CONST_INT
)
10517 offset
= INTVAL (XEXP ((new_rtx
), 1));
10519 /* Choose rounding direction. Round up if we are >= halfway. */
10520 if ((offset
& mask
) >= ((mask
+ 1) / 2))
10521 newoffset
= (offset
& ~mask
) + mask
+ 1;
10523 newoffset
= offset
& ~mask
;
10525 /* Ensure that long displacements are aligned. */
10527 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
10528 || (TARGET_64BIT
&& (mode
) == DImode
)))
10529 newoffset
&= ~(GET_MODE_SIZE (mode
) - 1);
10531 if (newoffset
!= 0 && VAL_14_BITS_P (newoffset
))
10533 temp
= gen_rtx_PLUS (Pmode
, XEXP (new_rtx
, 0),
10534 GEN_INT (newoffset
));
10535 ad
= gen_rtx_PLUS (Pmode
, temp
, GEN_INT (offset
- newoffset
));
10536 push_reload (XEXP (ad
, 0), 0, &XEXP (ad
, 0), 0,
10537 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
10538 opnum
, (enum reload_type
) type
);
10546 /* Output address vector. */
10549 pa_output_addr_vec (rtx lab
, rtx body
)
10551 int idx
, vlen
= XVECLEN (body
, 0);
10553 targetm
.asm_out
.internal_label (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
10555 fputs ("\t.begin_brtab\n", asm_out_file
);
10556 for (idx
= 0; idx
< vlen
; idx
++)
10558 ASM_OUTPUT_ADDR_VEC_ELT
10559 (asm_out_file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
10562 fputs ("\t.end_brtab\n", asm_out_file
);
10565 /* Output address difference vector. */
10568 pa_output_addr_diff_vec (rtx lab
, rtx body
)
10570 rtx base
= XEXP (XEXP (body
, 0), 0);
10571 int idx
, vlen
= XVECLEN (body
, 1);
10573 targetm
.asm_out
.internal_label (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
10575 fputs ("\t.begin_brtab\n", asm_out_file
);
10576 for (idx
= 0; idx
< vlen
; idx
++)
10578 ASM_OUTPUT_ADDR_DIFF_ELT
10581 CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 1, idx
), 0)),
10582 CODE_LABEL_NUMBER (base
));
10585 fputs ("\t.end_brtab\n", asm_out_file
);