1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
42 #include "diagnostic-core.h"
48 #include "common/common-target.h"
49 #include "target-def.h"
50 #include "langhooks.h"
54 /* Return nonzero if there is a bypass for the output of
55 OUT_INSN and the fp store IN_INSN. */
57 pa_fpstore_bypass_p (rtx out_insn
, rtx in_insn
)
59 enum machine_mode store_mode
;
60 enum machine_mode other_mode
;
63 if (recog_memoized (in_insn
) < 0
64 || (get_attr_type (in_insn
) != TYPE_FPSTORE
65 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
66 || recog_memoized (out_insn
) < 0)
69 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
71 set
= single_set (out_insn
);
75 other_mode
= GET_MODE (SET_SRC (set
));
77 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
81 #ifndef DO_FRAME_NOTES
82 #ifdef INCOMING_RETURN_ADDR_RTX
83 #define DO_FRAME_NOTES 1
85 #define DO_FRAME_NOTES 0
89 static void pa_option_override (void);
90 static void copy_reg_pointer (rtx
, rtx
);
91 static void fix_range (const char *);
92 static int hppa_register_move_cost (enum machine_mode mode
, reg_class_t
,
94 static int hppa_address_cost (rtx
, enum machine_mode mode
, addr_space_t
, bool);
95 static bool hppa_rtx_costs (rtx
, int, int, int, int *, bool);
96 static inline rtx
force_mode (enum machine_mode
, rtx
);
97 static void pa_reorg (void);
98 static void pa_combine_instructions (void);
99 static int pa_can_combine_p (rtx
, rtx
, rtx
, int, rtx
, rtx
, rtx
);
100 static bool forward_branch_p (rtx
);
101 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
102 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT
, unsigned *);
103 static int compute_movmem_length (rtx
);
104 static int compute_clrmem_length (rtx
);
105 static bool pa_assemble_integer (rtx
, unsigned int, int);
106 static void remove_useless_addtr_insns (int);
107 static void store_reg (int, HOST_WIDE_INT
, int);
108 static void store_reg_modify (int, int, HOST_WIDE_INT
);
109 static void load_reg (int, HOST_WIDE_INT
, int);
110 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
111 static rtx
pa_function_value (const_tree
, const_tree
, bool);
112 static rtx
pa_libcall_value (enum machine_mode
, const_rtx
);
113 static bool pa_function_value_regno_p (const unsigned int);
114 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
115 static void update_total_code_bytes (unsigned int);
116 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
117 static int pa_adjust_cost (rtx
, rtx
, rtx
, int);
118 static int pa_adjust_priority (rtx
, int);
119 static int pa_issue_rate (void);
120 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
121 static section
*pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED
;
122 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
124 static void pa_encode_section_info (tree
, rtx
, int);
125 static const char *pa_strip_name_encoding (const char *);
126 static bool pa_function_ok_for_sibcall (tree
, tree
);
127 static void pa_globalize_label (FILE *, const char *)
129 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
130 HOST_WIDE_INT
, tree
);
131 #if !defined(USE_COLLECT2)
132 static void pa_asm_out_constructor (rtx
, int);
133 static void pa_asm_out_destructor (rtx
, int);
135 static void pa_init_builtins (void);
136 static rtx
pa_expand_builtin (tree
, rtx
, rtx
, enum machine_mode mode
, int);
137 static rtx
hppa_builtin_saveregs (void);
138 static void hppa_va_start (tree
, rtx
);
139 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
140 static bool pa_scalar_mode_supported_p (enum machine_mode
);
141 static bool pa_commutative_p (const_rtx x
, int outer_code
);
142 static void copy_fp_args (rtx
) ATTRIBUTE_UNUSED
;
143 static int length_fp_args (rtx
) ATTRIBUTE_UNUSED
;
144 static rtx
hppa_legitimize_address (rtx
, rtx
, enum machine_mode
);
145 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
146 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
147 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
148 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
149 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
150 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
151 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
152 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
153 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
154 static void output_deferred_plabels (void);
155 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
156 #ifdef ASM_OUTPUT_EXTERNAL_REAL
157 static void pa_hpux_file_end (void);
159 static void pa_init_libfuncs (void);
160 static rtx
pa_struct_value_rtx (tree
, int);
161 static bool pa_pass_by_reference (cumulative_args_t
, enum machine_mode
,
163 static int pa_arg_partial_bytes (cumulative_args_t
, enum machine_mode
,
165 static void pa_function_arg_advance (cumulative_args_t
, enum machine_mode
,
167 static rtx
pa_function_arg (cumulative_args_t
, enum machine_mode
,
169 static unsigned int pa_function_arg_boundary (enum machine_mode
, const_tree
);
170 static struct machine_function
* pa_init_machine_status (void);
171 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
173 secondary_reload_info
*);
174 static void pa_extra_live_on_entry (bitmap
);
175 static enum machine_mode
pa_promote_function_mode (const_tree
,
176 enum machine_mode
, int *,
179 static void pa_asm_trampoline_template (FILE *);
180 static void pa_trampoline_init (rtx
, tree
, rtx
);
181 static rtx
pa_trampoline_adjust_address (rtx
);
182 static rtx
pa_delegitimize_address (rtx
);
183 static bool pa_print_operand_punct_valid_p (unsigned char);
184 static rtx
pa_internal_arg_pointer (void);
185 static bool pa_can_eliminate (const int, const int);
186 static void pa_conditional_register_usage (void);
187 static enum machine_mode
pa_c_mode_for_suffix (char);
188 static section
*pa_function_section (tree
, enum node_frequency
, bool, bool);
189 static bool pa_cannot_force_const_mem (enum machine_mode
, rtx
);
190 static bool pa_legitimate_constant_p (enum machine_mode
, rtx
);
191 static unsigned int pa_section_type_flags (tree
, const char *, int);
193 /* The following extra sections are only used for SOM. */
194 static GTY(()) section
*som_readonly_data_section
;
195 static GTY(()) section
*som_one_only_readonly_data_section
;
196 static GTY(()) section
*som_one_only_data_section
;
197 static GTY(()) section
*som_tm_clone_table_section
;
199 /* Counts for the number of callee-saved general and floating point
200 registers which were saved by the current function's prologue. */
201 static int gr_saved
, fr_saved
;
203 /* Boolean indicating whether the return pointer was saved by the
204 current function's prologue. */
205 static bool rp_saved
;
207 static rtx
find_addr_reg (rtx
);
209 /* Keep track of the number of bytes we have output in the CODE subspace
210 during this compilation so we'll know when to emit inline long-calls. */
211 unsigned long total_code_bytes
;
213 /* The last address of the previous function plus the number of bytes in
214 associated thunks that have been output. This is used to determine if
215 a thunk can use an IA-relative branch to reach its target function. */
216 static unsigned int last_address
;
218 /* Variables to handle plabels that we discover are necessary at assembly
219 output time. They are output after the current function. */
220 struct GTY(()) deferred_plabel
225 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
227 static size_t n_deferred_plabels
= 0;
229 /* Initialize the GCC target structure. */
231 #undef TARGET_OPTION_OVERRIDE
232 #define TARGET_OPTION_OVERRIDE pa_option_override
234 #undef TARGET_ASM_ALIGNED_HI_OP
235 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
236 #undef TARGET_ASM_ALIGNED_SI_OP
237 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
238 #undef TARGET_ASM_ALIGNED_DI_OP
239 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
240 #undef TARGET_ASM_UNALIGNED_HI_OP
241 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
242 #undef TARGET_ASM_UNALIGNED_SI_OP
243 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
244 #undef TARGET_ASM_UNALIGNED_DI_OP
245 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
246 #undef TARGET_ASM_INTEGER
247 #define TARGET_ASM_INTEGER pa_assemble_integer
249 #undef TARGET_ASM_FUNCTION_PROLOGUE
250 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
251 #undef TARGET_ASM_FUNCTION_EPILOGUE
252 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
254 #undef TARGET_FUNCTION_VALUE
255 #define TARGET_FUNCTION_VALUE pa_function_value
256 #undef TARGET_LIBCALL_VALUE
257 #define TARGET_LIBCALL_VALUE pa_libcall_value
258 #undef TARGET_FUNCTION_VALUE_REGNO_P
259 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
261 #undef TARGET_LEGITIMIZE_ADDRESS
262 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
264 #undef TARGET_SCHED_ADJUST_COST
265 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
266 #undef TARGET_SCHED_ADJUST_PRIORITY
267 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
268 #undef TARGET_SCHED_ISSUE_RATE
269 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
271 #undef TARGET_ENCODE_SECTION_INFO
272 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
273 #undef TARGET_STRIP_NAME_ENCODING
274 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
276 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
277 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
279 #undef TARGET_COMMUTATIVE_P
280 #define TARGET_COMMUTATIVE_P pa_commutative_p
282 #undef TARGET_ASM_OUTPUT_MI_THUNK
283 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
284 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
285 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
287 #undef TARGET_ASM_FILE_END
288 #ifdef ASM_OUTPUT_EXTERNAL_REAL
289 #define TARGET_ASM_FILE_END pa_hpux_file_end
291 #define TARGET_ASM_FILE_END output_deferred_plabels
294 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
295 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
297 #if !defined(USE_COLLECT2)
298 #undef TARGET_ASM_CONSTRUCTOR
299 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
300 #undef TARGET_ASM_DESTRUCTOR
301 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
304 #undef TARGET_INIT_BUILTINS
305 #define TARGET_INIT_BUILTINS pa_init_builtins
307 #undef TARGET_EXPAND_BUILTIN
308 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
310 #undef TARGET_REGISTER_MOVE_COST
311 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
312 #undef TARGET_RTX_COSTS
313 #define TARGET_RTX_COSTS hppa_rtx_costs
314 #undef TARGET_ADDRESS_COST
315 #define TARGET_ADDRESS_COST hppa_address_cost
317 #undef TARGET_MACHINE_DEPENDENT_REORG
318 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
320 #undef TARGET_INIT_LIBFUNCS
321 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
323 #undef TARGET_PROMOTE_FUNCTION_MODE
324 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
325 #undef TARGET_PROMOTE_PROTOTYPES
326 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
328 #undef TARGET_STRUCT_VALUE_RTX
329 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
330 #undef TARGET_RETURN_IN_MEMORY
331 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
332 #undef TARGET_MUST_PASS_IN_STACK
333 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
334 #undef TARGET_PASS_BY_REFERENCE
335 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
336 #undef TARGET_CALLEE_COPIES
337 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
338 #undef TARGET_ARG_PARTIAL_BYTES
339 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
340 #undef TARGET_FUNCTION_ARG
341 #define TARGET_FUNCTION_ARG pa_function_arg
342 #undef TARGET_FUNCTION_ARG_ADVANCE
343 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
344 #undef TARGET_FUNCTION_ARG_BOUNDARY
345 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
347 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
348 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
349 #undef TARGET_EXPAND_BUILTIN_VA_START
350 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
351 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
352 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
354 #undef TARGET_SCALAR_MODE_SUPPORTED_P
355 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
357 #undef TARGET_CANNOT_FORCE_CONST_MEM
358 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
360 #undef TARGET_SECONDARY_RELOAD
361 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
363 #undef TARGET_EXTRA_LIVE_ON_ENTRY
364 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
366 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
367 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
368 #undef TARGET_TRAMPOLINE_INIT
369 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
370 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
371 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
372 #undef TARGET_DELEGITIMIZE_ADDRESS
373 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
374 #undef TARGET_INTERNAL_ARG_POINTER
375 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
376 #undef TARGET_CAN_ELIMINATE
377 #define TARGET_CAN_ELIMINATE pa_can_eliminate
378 #undef TARGET_CONDITIONAL_REGISTER_USAGE
379 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
380 #undef TARGET_C_MODE_FOR_SUFFIX
381 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
382 #undef TARGET_ASM_FUNCTION_SECTION
383 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
385 #undef TARGET_LEGITIMATE_CONSTANT_P
386 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
387 #undef TARGET_SECTION_TYPE_FLAGS
388 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
390 struct gcc_target targetm
= TARGET_INITIALIZER
;
392 /* Parse the -mfixed-range= option string. */
395 fix_range (const char *const_str
)
398 char *str
, *dash
, *comma
;
400 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
401 REG2 are either register names or register numbers. The effect
402 of this option is to mark the registers in the range from REG1 to
403 REG2 as ``fixed'' so they won't be used by the compiler. This is
404 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
406 i
= strlen (const_str
);
407 str
= (char *) alloca (i
+ 1);
408 memcpy (str
, const_str
, i
+ 1);
412 dash
= strchr (str
, '-');
415 warning (0, "value of -mfixed-range must have form REG1-REG2");
420 comma
= strchr (dash
+ 1, ',');
424 first
= decode_reg_name (str
);
427 warning (0, "unknown register name: %s", str
);
431 last
= decode_reg_name (dash
+ 1);
434 warning (0, "unknown register name: %s", dash
+ 1);
442 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
446 for (i
= first
; i
<= last
; ++i
)
447 fixed_regs
[i
] = call_used_regs
[i
] = 1;
456 /* Check if all floating point registers have been fixed. */
457 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
462 target_flags
|= MASK_DISABLE_FPREGS
;
465 /* Implement the TARGET_OPTION_OVERRIDE hook. */
468 pa_option_override (void)
471 cl_deferred_option
*opt
;
472 VEC(cl_deferred_option
,heap
) *vec
473 = (VEC(cl_deferred_option
,heap
) *) pa_deferred_options
;
475 FOR_EACH_VEC_ELT (cl_deferred_option
, vec
, i
, opt
)
477 switch (opt
->opt_index
)
479 case OPT_mfixed_range_
:
480 fix_range (opt
->arg
);
488 /* Unconditional branches in the delay slot are not compatible with dwarf2
489 call frame information. There is no benefit in using this optimization
490 on PA8000 and later processors. */
491 if (pa_cpu
>= PROCESSOR_8000
492 || (targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
494 || flag_unwind_tables
)
495 target_flags
&= ~MASK_JUMP_IN_DELAY
;
497 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
499 warning (0, "PIC code generation is not supported in the portable runtime model");
502 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
504 warning (0, "PIC code generation is not compatible with fast indirect calls");
507 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
509 warning (0, "-g is only supported when using GAS on this processor,");
510 warning (0, "-g option disabled");
511 write_symbols
= NO_DEBUG
;
514 /* We only support the "big PIC" model now. And we always generate PIC
515 code when in 64bit mode. */
516 if (flag_pic
== 1 || TARGET_64BIT
)
519 /* Disable -freorder-blocks-and-partition as we don't support hot and
520 cold partitioning. */
521 if (flag_reorder_blocks_and_partition
)
523 inform (input_location
,
524 "-freorder-blocks-and-partition does not work "
525 "on this architecture");
526 flag_reorder_blocks_and_partition
= 0;
527 flag_reorder_blocks
= 1;
530 /* We can't guarantee that .dword is available for 32-bit targets. */
531 if (UNITS_PER_WORD
== 4)
532 targetm
.asm_out
.aligned_op
.di
= NULL
;
534 /* The unaligned ops are only available when using GAS. */
537 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
538 targetm
.asm_out
.unaligned_op
.si
= NULL
;
539 targetm
.asm_out
.unaligned_op
.di
= NULL
;
542 init_machine_status
= pa_init_machine_status
;
547 PA_BUILTIN_COPYSIGNQ
,
550 PA_BUILTIN_HUGE_VALQ
,
554 static GTY(()) tree pa_builtins
[(int) PA_BUILTIN_max
];
557 pa_init_builtins (void)
559 #ifdef DONT_HAVE_FPUTC_UNLOCKED
561 tree decl
= builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED
);
562 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED
, decl
,
563 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED
));
570 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITE
)) != NULL_TREE
)
571 set_user_assembler_name (decl
, "_Isfinite");
572 if ((decl
= builtin_decl_explicit (BUILT_IN_FINITEF
)) != NULL_TREE
)
573 set_user_assembler_name (decl
, "_Isfinitef");
577 if (HPUX_LONG_DOUBLE_LIBRARY
)
581 /* Under HPUX, the __float128 type is a synonym for "long double". */
582 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
585 /* TFmode support builtins. */
586 ftype
= build_function_type_list (long_double_type_node
,
587 long_double_type_node
,
589 decl
= add_builtin_function ("__builtin_fabsq", ftype
,
590 PA_BUILTIN_FABSQ
, BUILT_IN_MD
,
591 "_U_Qfabs", NULL_TREE
);
592 TREE_READONLY (decl
) = 1;
593 pa_builtins
[PA_BUILTIN_FABSQ
] = decl
;
595 ftype
= build_function_type_list (long_double_type_node
,
596 long_double_type_node
,
597 long_double_type_node
,
599 decl
= add_builtin_function ("__builtin_copysignq", ftype
,
600 PA_BUILTIN_COPYSIGNQ
, BUILT_IN_MD
,
601 "_U_Qfcopysign", NULL_TREE
);
602 TREE_READONLY (decl
) = 1;
603 pa_builtins
[PA_BUILTIN_COPYSIGNQ
] = decl
;
605 ftype
= build_function_type_list (long_double_type_node
, NULL_TREE
);
606 decl
= add_builtin_function ("__builtin_infq", ftype
,
607 PA_BUILTIN_INFQ
, BUILT_IN_MD
,
609 pa_builtins
[PA_BUILTIN_INFQ
] = decl
;
611 decl
= add_builtin_function ("__builtin_huge_valq", ftype
,
612 PA_BUILTIN_HUGE_VALQ
, BUILT_IN_MD
,
614 pa_builtins
[PA_BUILTIN_HUGE_VALQ
] = decl
;
619 pa_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
620 enum machine_mode mode ATTRIBUTE_UNUSED
,
621 int ignore ATTRIBUTE_UNUSED
)
623 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
624 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
628 case PA_BUILTIN_FABSQ
:
629 case PA_BUILTIN_COPYSIGNQ
:
630 return expand_call (exp
, target
, ignore
);
632 case PA_BUILTIN_INFQ
:
633 case PA_BUILTIN_HUGE_VALQ
:
635 enum machine_mode target_mode
= TYPE_MODE (TREE_TYPE (exp
));
640 tmp
= CONST_DOUBLE_FROM_REAL_VALUE (inf
, target_mode
);
642 tmp
= validize_mem (force_const_mem (target_mode
, tmp
));
645 target
= gen_reg_rtx (target_mode
);
647 emit_move_insn (target
, tmp
);
658 /* Function to init struct machine_function.
659 This will be called, via a pointer variable,
660 from push_function_context. */
662 static struct machine_function
*
663 pa_init_machine_status (void)
665 return ggc_alloc_cleared_machine_function ();
668 /* If FROM is a probable pointer register, mark TO as a probable
669 pointer register with the same pointer alignment as FROM. */
672 copy_reg_pointer (rtx to
, rtx from
)
674 if (REG_POINTER (from
))
675 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
678 /* Return 1 if X contains a symbolic expression. We know these
679 expressions will have one of a few well defined forms, so
680 we need only check those forms. */
682 pa_symbolic_expression_p (rtx x
)
685 /* Strip off any HIGH. */
686 if (GET_CODE (x
) == HIGH
)
689 return (symbolic_operand (x
, VOIDmode
));
692 /* Accept any constant that can be moved in one instruction into a
695 pa_cint_ok_for_move (HOST_WIDE_INT ival
)
697 /* OK if ldo, ldil, or zdepi, can be used. */
698 return (VAL_14_BITS_P (ival
)
699 || pa_ldil_cint_p (ival
)
700 || pa_zdepi_cint_p (ival
));
703 /* True iff ldil can be used to load this CONST_INT. The least
704 significant 11 bits of the value must be zero and the value must
705 not change sign when extended from 32 to 64 bits. */
707 pa_ldil_cint_p (HOST_WIDE_INT ival
)
709 HOST_WIDE_INT x
= ival
& (((HOST_WIDE_INT
) -1 << 31) | 0x7ff);
711 return x
== 0 || x
== ((HOST_WIDE_INT
) -1 << 31);
714 /* True iff zdepi can be used to generate this CONST_INT.
715 zdepi first sign extends a 5-bit signed number to a given field
716 length, then places this field anywhere in a zero. */
718 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x
)
720 unsigned HOST_WIDE_INT lsb_mask
, t
;
722 /* This might not be obvious, but it's at least fast.
723 This function is critical; we don't have the time loops would take. */
725 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
726 /* Return true iff t is a power of two. */
727 return ((t
& (t
- 1)) == 0);
730 /* True iff depi or extru can be used to compute (reg & mask).
731 Accept bit pattern like these:
736 pa_and_mask_p (unsigned HOST_WIDE_INT mask
)
739 mask
+= mask
& -mask
;
740 return (mask
& (mask
- 1)) == 0;
743 /* True iff depi can be used to compute (reg | MASK). */
745 pa_ior_mask_p (unsigned HOST_WIDE_INT mask
)
747 mask
+= mask
& -mask
;
748 return (mask
& (mask
- 1)) == 0;
751 /* Legitimize PIC addresses. If the address is already
752 position-independent, we return ORIG. Newly generated
753 position-independent addresses go to REG. If we need more
754 than one register, we lose. */
757 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
761 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
763 /* Labels need special handling. */
764 if (pic_label_operand (orig
, mode
))
768 /* We do not want to go through the movXX expanders here since that
769 would create recursion.
771 Nor do we really want to call a generator for a named pattern
772 since that requires multiple patterns if we want to support
775 So instead we just emit the raw set, which avoids the movXX
776 expanders completely. */
777 mark_reg_pointer (reg
, BITS_PER_UNIT
);
778 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, orig
));
780 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
781 add_reg_note (insn
, REG_EQUAL
, orig
);
783 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
784 and update LABEL_NUSES because this is not done automatically. */
785 if (reload_in_progress
|| reload_completed
)
787 /* Extract LABEL_REF. */
788 if (GET_CODE (orig
) == CONST
)
789 orig
= XEXP (XEXP (orig
, 0), 0);
790 /* Extract CODE_LABEL. */
791 orig
= XEXP (orig
, 0);
792 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
793 LABEL_NUSES (orig
)++;
795 crtl
->uses_pic_offset_table
= 1;
798 if (GET_CODE (orig
) == SYMBOL_REF
)
804 /* Before reload, allocate a temporary register for the intermediate
805 result. This allows the sequence to be deleted when the final
806 result is unused and the insns are trivially dead. */
807 tmp_reg
= ((reload_in_progress
|| reload_completed
)
808 ? reg
: gen_reg_rtx (Pmode
));
810 if (function_label_operand (orig
, VOIDmode
))
812 /* Force function label into memory in word mode. */
813 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
814 /* Load plabel address from DLT. */
815 emit_move_insn (tmp_reg
,
816 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
817 gen_rtx_HIGH (word_mode
, orig
)));
819 = gen_const_mem (Pmode
,
820 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
821 gen_rtx_UNSPEC (Pmode
,
824 emit_move_insn (reg
, pic_ref
);
825 /* Now load address of function descriptor. */
826 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
830 /* Load symbol reference from DLT. */
831 emit_move_insn (tmp_reg
,
832 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
833 gen_rtx_HIGH (word_mode
, orig
)));
835 = gen_const_mem (Pmode
,
836 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
837 gen_rtx_UNSPEC (Pmode
,
842 crtl
->uses_pic_offset_table
= 1;
843 mark_reg_pointer (reg
, BITS_PER_UNIT
);
844 insn
= emit_move_insn (reg
, pic_ref
);
846 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
847 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
851 else if (GET_CODE (orig
) == CONST
)
855 if (GET_CODE (XEXP (orig
, 0)) == PLUS
856 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
860 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
862 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
863 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
864 base
== reg
? 0 : reg
);
866 if (GET_CODE (orig
) == CONST_INT
)
868 if (INT_14_BITS (orig
))
869 return plus_constant (Pmode
, base
, INTVAL (orig
));
870 orig
= force_reg (Pmode
, orig
);
872 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
873 /* Likewise, should we set special REG_NOTEs here? */
879 static GTY(()) rtx gen_tls_tga
;
882 gen_tls_get_addr (void)
885 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
890 hppa_tls_call (rtx arg
)
894 ret
= gen_reg_rtx (Pmode
);
895 emit_library_call_value (gen_tls_get_addr (), ret
,
896 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
902 legitimize_tls_address (rtx addr
)
904 rtx ret
, insn
, tmp
, t1
, t2
, tp
;
905 enum tls_model model
= SYMBOL_REF_TLS_MODEL (addr
);
909 case TLS_MODEL_GLOBAL_DYNAMIC
:
910 tmp
= gen_reg_rtx (Pmode
);
912 emit_insn (gen_tgd_load_pic (tmp
, addr
));
914 emit_insn (gen_tgd_load (tmp
, addr
));
915 ret
= hppa_tls_call (tmp
);
918 case TLS_MODEL_LOCAL_DYNAMIC
:
919 ret
= gen_reg_rtx (Pmode
);
920 tmp
= gen_reg_rtx (Pmode
);
923 emit_insn (gen_tld_load_pic (tmp
, addr
));
925 emit_insn (gen_tld_load (tmp
, addr
));
926 t1
= hppa_tls_call (tmp
);
929 t2
= gen_reg_rtx (Pmode
);
930 emit_libcall_block (insn
, t2
, t1
,
931 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
933 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
936 case TLS_MODEL_INITIAL_EXEC
:
937 tp
= gen_reg_rtx (Pmode
);
938 tmp
= gen_reg_rtx (Pmode
);
939 ret
= gen_reg_rtx (Pmode
);
940 emit_insn (gen_tp_load (tp
));
942 emit_insn (gen_tie_load_pic (tmp
, addr
));
944 emit_insn (gen_tie_load (tmp
, addr
));
945 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
948 case TLS_MODEL_LOCAL_EXEC
:
949 tp
= gen_reg_rtx (Pmode
);
950 ret
= gen_reg_rtx (Pmode
);
951 emit_insn (gen_tp_load (tp
));
952 emit_insn (gen_tle_load (ret
, addr
, tp
));
962 /* Try machine-dependent ways of modifying an illegitimate address
963 to be legitimate. If we find one, return the new, valid address.
964 This macro is used in only one place: `memory_address' in explow.c.
966 OLDX is the address as it was before break_out_memory_refs was called.
967 In some cases it is useful to look at this to decide what needs to be done.
969 It is always safe for this macro to do nothing. It exists to recognize
970 opportunities to optimize the output.
972 For the PA, transform:
974 memory(X + <large int>)
978 if (<large int> & mask) >= 16
979 Y = (<large int> & ~mask) + mask + 1 Round up.
981 Y = (<large int> & ~mask) Round down.
983 memory (Z + (<large int> - Y));
985 This is for CSE to find several similar references, and only use one Z.
987 X can either be a SYMBOL_REF or REG, but because combine cannot
988 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
989 D will not fit in 14 bits.
991 MODE_FLOAT references allow displacements which fit in 5 bits, so use
994 MODE_INT references allow displacements which fit in 14 bits, so use
997 This relies on the fact that most mode MODE_FLOAT references will use FP
998 registers and most mode MODE_INT references will use integer registers.
999 (In the rare case of an FP register used in an integer MODE, we depend
1000 on secondary reloads to clean things up.)
1003 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1004 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1005 addressing modes to be used).
1007 Put X and Z into registers. Then put the entire expression into
1011 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1012 enum machine_mode mode
)
1016 /* We need to canonicalize the order of operands in unscaled indexed
1017 addresses since the code that checks if an address is valid doesn't
1018 always try both orders. */
1019 if (!TARGET_NO_SPACE_REGS
1020 && GET_CODE (x
) == PLUS
1021 && GET_MODE (x
) == Pmode
1022 && REG_P (XEXP (x
, 0))
1023 && REG_P (XEXP (x
, 1))
1024 && REG_POINTER (XEXP (x
, 0))
1025 && !REG_POINTER (XEXP (x
, 1)))
1026 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
1028 if (PA_SYMBOL_REF_TLS_P (x
))
1029 return legitimize_tls_address (x
);
1031 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
1033 /* Strip off CONST. */
1034 if (GET_CODE (x
) == CONST
)
1037 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1038 That should always be safe. */
1039 if (GET_CODE (x
) == PLUS
1040 && GET_CODE (XEXP (x
, 0)) == REG
1041 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
1043 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
1044 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
1047 /* Note we must reject symbols which represent function addresses
1048 since the assembler/linker can't handle arithmetic on plabels. */
1049 if (GET_CODE (x
) == PLUS
1050 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1051 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
1052 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
1053 || GET_CODE (XEXP (x
, 0)) == REG
))
1055 rtx int_part
, ptr_reg
;
1057 int offset
= INTVAL (XEXP (x
, 1));
1060 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
1061 ? (INT14_OK_STRICT
? 0x3fff : 0x1f) : 0x3fff);
1063 /* Choose which way to round the offset. Round up if we
1064 are >= halfway to the next boundary. */
1065 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1066 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1068 newoffset
= (offset
& ~ mask
);
1070 /* If the newoffset will not fit in 14 bits (ldo), then
1071 handling this would take 4 or 5 instructions (2 to load
1072 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1073 add the new offset and the SYMBOL_REF.) Combine can
1074 not handle 4->2 or 5->2 combinations, so do not create
1076 if (! VAL_14_BITS_P (newoffset
)
1077 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1079 rtx const_part
= plus_constant (Pmode
, XEXP (x
, 0), newoffset
);
1082 gen_rtx_HIGH (Pmode
, const_part
));
1085 gen_rtx_LO_SUM (Pmode
,
1086 tmp_reg
, const_part
));
1090 if (! VAL_14_BITS_P (newoffset
))
1091 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1093 int_part
= GEN_INT (newoffset
);
1095 ptr_reg
= force_reg (Pmode
,
1096 gen_rtx_PLUS (Pmode
,
1097 force_reg (Pmode
, XEXP (x
, 0)),
1100 return plus_constant (Pmode
, ptr_reg
, offset
- newoffset
);
1103 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1105 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
1106 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1107 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
1108 && (OBJECT_P (XEXP (x
, 1))
1109 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1110 && GET_CODE (XEXP (x
, 1)) != CONST
)
1112 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1116 if (GET_CODE (reg1
) != REG
)
1117 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1119 reg2
= XEXP (XEXP (x
, 0), 0);
1120 if (GET_CODE (reg2
) != REG
)
1121 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1123 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1124 gen_rtx_MULT (Pmode
,
1130 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1132 Only do so for floating point modes since this is more speculative
1133 and we lose if it's an integer store. */
1134 if (GET_CODE (x
) == PLUS
1135 && GET_CODE (XEXP (x
, 0)) == PLUS
1136 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1137 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
1138 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1139 && (mode
== SFmode
|| mode
== DFmode
))
1142 /* First, try and figure out what to use as a base register. */
1143 rtx reg1
, reg2
, base
, idx
;
1145 reg1
= XEXP (XEXP (x
, 0), 1);
1150 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1151 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1152 it's a base register below. */
1153 if (GET_CODE (reg1
) != REG
)
1154 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1156 if (GET_CODE (reg2
) != REG
)
1157 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1159 /* Figure out what the base and index are. */
1161 if (GET_CODE (reg1
) == REG
1162 && REG_POINTER (reg1
))
1165 idx
= gen_rtx_PLUS (Pmode
,
1166 gen_rtx_MULT (Pmode
,
1167 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1168 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
1171 else if (GET_CODE (reg2
) == REG
1172 && REG_POINTER (reg2
))
1181 /* If the index adds a large constant, try to scale the
1182 constant so that it can be loaded with only one insn. */
1183 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1184 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1185 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1186 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1188 /* Divide the CONST_INT by the scale factor, then add it to A. */
1189 int val
= INTVAL (XEXP (idx
, 1));
1191 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1192 reg1
= XEXP (XEXP (idx
, 0), 0);
1193 if (GET_CODE (reg1
) != REG
)
1194 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1196 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1198 /* We can now generate a simple scaled indexed address. */
1201 (Pmode
, gen_rtx_PLUS (Pmode
,
1202 gen_rtx_MULT (Pmode
, reg1
,
1203 XEXP (XEXP (idx
, 0), 1)),
1207 /* If B + C is still a valid base register, then add them. */
1208 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1209 && INTVAL (XEXP (idx
, 1)) <= 4096
1210 && INTVAL (XEXP (idx
, 1)) >= -4096)
1212 int val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1215 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1217 reg2
= XEXP (XEXP (idx
, 0), 0);
1218 if (GET_CODE (reg2
) != CONST_INT
)
1219 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1221 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1222 gen_rtx_MULT (Pmode
,
1228 /* Get the index into a register, then add the base + index and
1229 return a register holding the result. */
1231 /* First get A into a register. */
1232 reg1
= XEXP (XEXP (idx
, 0), 0);
1233 if (GET_CODE (reg1
) != REG
)
1234 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1236 /* And get B into a register. */
1237 reg2
= XEXP (idx
, 1);
1238 if (GET_CODE (reg2
) != REG
)
1239 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1241 reg1
= force_reg (Pmode
,
1242 gen_rtx_PLUS (Pmode
,
1243 gen_rtx_MULT (Pmode
, reg1
,
1244 XEXP (XEXP (idx
, 0), 1)),
1247 /* Add the result to our base register and return. */
1248 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1252 /* Uh-oh. We might have an address for x[n-100000]. This needs
1253 special handling to avoid creating an indexed memory address
1254 with x-100000 as the base.
1256 If the constant part is small enough, then it's still safe because
1257 there is a guard page at the beginning and end of the data segment.
1259 Scaled references are common enough that we want to try and rearrange the
1260 terms so that we can use indexing for these addresses too. Only
1261 do the optimization for floatint point modes. */
1263 if (GET_CODE (x
) == PLUS
1264 && pa_symbolic_expression_p (XEXP (x
, 1)))
1266 /* Ugly. We modify things here so that the address offset specified
1267 by the index expression is computed first, then added to x to form
1268 the entire address. */
1270 rtx regx1
, regx2
, regy1
, regy2
, y
;
1272 /* Strip off any CONST. */
1274 if (GET_CODE (y
) == CONST
)
1277 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1279 /* See if this looks like
1280 (plus (mult (reg) (shadd_const))
1281 (const (plus (symbol_ref) (const_int))))
1283 Where const_int is small. In that case the const
1284 expression is a valid pointer for indexing.
1286 If const_int is big, but can be divided evenly by shadd_const
1287 and added to (reg). This allows more scaled indexed addresses. */
1288 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1289 && GET_CODE (XEXP (x
, 0)) == MULT
1290 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1291 && INTVAL (XEXP (y
, 1)) >= -4096
1292 && INTVAL (XEXP (y
, 1)) <= 4095
1293 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1294 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1296 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1300 if (GET_CODE (reg1
) != REG
)
1301 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1303 reg2
= XEXP (XEXP (x
, 0), 0);
1304 if (GET_CODE (reg2
) != REG
)
1305 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1307 return force_reg (Pmode
,
1308 gen_rtx_PLUS (Pmode
,
1309 gen_rtx_MULT (Pmode
,
1314 else if ((mode
== DFmode
|| mode
== SFmode
)
1315 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1316 && GET_CODE (XEXP (x
, 0)) == MULT
1317 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1318 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1319 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1320 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1323 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1324 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1325 regx2
= XEXP (XEXP (x
, 0), 0);
1326 if (GET_CODE (regx2
) != REG
)
1327 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1328 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1332 gen_rtx_PLUS (Pmode
,
1333 gen_rtx_MULT (Pmode
, regx2
,
1334 XEXP (XEXP (x
, 0), 1)),
1335 force_reg (Pmode
, XEXP (y
, 0))));
1337 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1338 && INTVAL (XEXP (y
, 1)) >= -4096
1339 && INTVAL (XEXP (y
, 1)) <= 4095)
1341 /* This is safe because of the guard page at the
1342 beginning and end of the data space. Just
1343 return the original address. */
1348 /* Doesn't look like one we can optimize. */
1349 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1350 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1351 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1352 regx1
= force_reg (Pmode
,
1353 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1355 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1363 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1365 Compute extra cost of moving data between one register class
1368 Make moves from SAR so expensive they should never happen. We used to
1369 have 0xffff here, but that generates overflow in rare cases.
1371 Copies involving a FP register and a non-FP register are relatively
1372 expensive because they must go through memory.
1374 Other copies are reasonably cheap. */
1377 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
1378 reg_class_t from
, reg_class_t to
)
1380 if (from
== SHIFT_REGS
)
1382 else if (to
== SHIFT_REGS
&& FP_REG_CLASS_P (from
))
1384 else if ((FP_REG_CLASS_P (from
) && ! FP_REG_CLASS_P (to
))
1385 || (FP_REG_CLASS_P (to
) && ! FP_REG_CLASS_P (from
)))
1391 /* For the HPPA, REG and REG+CONST is cost 0
1392 and addresses involving symbolic constants are cost 2.
1394 PIC addresses are very expensive.
1396 It is no coincidence that this has the same structure
1397 as GO_IF_LEGITIMATE_ADDRESS. */
1400 hppa_address_cost (rtx X
, enum machine_mode mode ATTRIBUTE_UNUSED
,
1401 addr_space_t as ATTRIBUTE_UNUSED
,
1402 bool speed ATTRIBUTE_UNUSED
)
1404 switch (GET_CODE (X
))
1417 /* Compute a (partial) cost for rtx X. Return true if the complete
1418 cost has been computed, and false if subexpressions should be
1419 scanned. In either case, *TOTAL contains the cost result. */
1422 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
1423 int *total
, bool speed ATTRIBUTE_UNUSED
)
1430 if (INTVAL (x
) == 0)
1432 else if (INT_14_BITS (x
))
1449 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1450 && outer_code
!= SET
)
1457 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1459 *total
= COSTS_N_INSNS (3);
1463 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1464 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1468 if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1469 *total
= factor
* factor
* COSTS_N_INSNS (8);
1471 *total
= factor
* factor
* COSTS_N_INSNS (20);
1475 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1477 *total
= COSTS_N_INSNS (14);
1485 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1486 factor
= GET_MODE_SIZE (GET_MODE (x
)) / 4;
1490 *total
= factor
* factor
* COSTS_N_INSNS (60);
1493 case PLUS
: /* this includes shNadd insns */
1495 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1497 *total
= COSTS_N_INSNS (3);
1501 /* A size N times larger than UNITS_PER_WORD needs N times as
1502 many insns, taking N times as long. */
1503 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
1506 *total
= factor
* COSTS_N_INSNS (1);
1512 *total
= COSTS_N_INSNS (1);
1520 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1521 new rtx with the correct mode. */
1523 force_mode (enum machine_mode mode
, rtx orig
)
1525 if (mode
== GET_MODE (orig
))
1528 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1530 return gen_rtx_REG (mode
, REGNO (orig
));
1533 /* Return 1 if *X is a thread-local symbol. */
1536 pa_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1538 return PA_SYMBOL_REF_TLS_P (*x
);
1541 /* Return 1 if X contains a thread-local symbol. */
1544 pa_tls_referenced_p (rtx x
)
1546 if (!TARGET_HAVE_TLS
)
1549 return for_each_rtx (&x
, &pa_tls_symbol_ref_1
, 0);
1552 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1555 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1557 return pa_tls_referenced_p (x
);
1560 /* Emit insns to move operands[1] into operands[0].
1562 Return 1 if we have written out everything that needs to be done to
1563 do the move. Otherwise, return 0 and the caller will emit the move
1566 Note SCRATCH_REG may not be in the proper mode depending on how it
1567 will be used. This routine is responsible for creating a new copy
1568 of SCRATCH_REG in the proper mode. */
1571 pa_emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
1573 register rtx operand0
= operands
[0];
1574 register rtx operand1
= operands
[1];
1577 /* We can only handle indexed addresses in the destination operand
1578 of floating point stores. Thus, we need to break out indexed
1579 addresses from the destination operand. */
1580 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1582 gcc_assert (can_create_pseudo_p ());
1584 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1585 operand0
= replace_equiv_address (operand0
, tem
);
1588 /* On targets with non-equivalent space registers, break out unscaled
1589 indexed addresses from the source operand before the final CSE.
1590 We have to do this because the REG_POINTER flag is not correctly
1591 carried through various optimization passes and CSE may substitute
1592 a pseudo without the pointer set for one with the pointer set. As
1593 a result, we loose various opportunities to create insns with
1594 unscaled indexed addresses. */
1595 if (!TARGET_NO_SPACE_REGS
1596 && !cse_not_expected
1597 && GET_CODE (operand1
) == MEM
1598 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1599 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1600 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1602 = replace_equiv_address (operand1
,
1603 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1606 && reload_in_progress
&& GET_CODE (operand0
) == REG
1607 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1608 operand0
= reg_equiv_mem (REGNO (operand0
));
1609 else if (scratch_reg
1610 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1611 && GET_CODE (SUBREG_REG (operand0
)) == REG
1612 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1614 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1615 the code which tracks sets/uses for delete_output_reload. */
1616 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1617 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
1618 SUBREG_BYTE (operand0
));
1619 operand0
= alter_subreg (&temp
, true);
1623 && reload_in_progress
&& GET_CODE (operand1
) == REG
1624 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1625 operand1
= reg_equiv_mem (REGNO (operand1
));
1626 else if (scratch_reg
1627 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1628 && GET_CODE (SUBREG_REG (operand1
)) == REG
1629 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1631 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1632 the code which tracks sets/uses for delete_output_reload. */
1633 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1634 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
1635 SUBREG_BYTE (operand1
));
1636 operand1
= alter_subreg (&temp
, true);
1639 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1640 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1641 != XEXP (operand0
, 0)))
1642 operand0
= replace_equiv_address (operand0
, tem
);
1644 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1645 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1646 != XEXP (operand1
, 0)))
1647 operand1
= replace_equiv_address (operand1
, tem
);
1649 /* Handle secondary reloads for loads/stores of FP registers from
1650 REG+D addresses where D does not fit in 5 or 14 bits, including
1651 (subreg (mem (addr))) cases. */
1653 && fp_reg_operand (operand0
, mode
)
1654 && ((GET_CODE (operand1
) == MEM
1655 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4 ? SFmode
: DFmode
),
1656 XEXP (operand1
, 0)))
1657 || ((GET_CODE (operand1
) == SUBREG
1658 && GET_CODE (XEXP (operand1
, 0)) == MEM
1659 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1661 XEXP (XEXP (operand1
, 0), 0))))))
1663 if (GET_CODE (operand1
) == SUBREG
)
1664 operand1
= XEXP (operand1
, 0);
1666 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1667 it in WORD_MODE regardless of what mode it was originally given
1669 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1671 /* D might not fit in 14 bits either; for such cases load D into
1673 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
1675 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1676 emit_move_insn (scratch_reg
,
1677 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1679 XEXP (XEXP (operand1
, 0), 0),
1683 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1684 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
1685 replace_equiv_address (operand1
, scratch_reg
)));
1688 else if (scratch_reg
1689 && fp_reg_operand (operand1
, mode
)
1690 && ((GET_CODE (operand0
) == MEM
1691 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1693 XEXP (operand0
, 0)))
1694 || ((GET_CODE (operand0
) == SUBREG
)
1695 && GET_CODE (XEXP (operand0
, 0)) == MEM
1696 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1698 XEXP (XEXP (operand0
, 0), 0)))))
1700 if (GET_CODE (operand0
) == SUBREG
)
1701 operand0
= XEXP (operand0
, 0);
1703 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1704 it in WORD_MODE regardless of what mode it was originally given
1706 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1708 /* D might not fit in 14 bits either; for such cases load D into
1710 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
1712 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1713 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1716 XEXP (XEXP (operand0
, 0),
1721 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1722 emit_insn (gen_rtx_SET (VOIDmode
,
1723 replace_equiv_address (operand0
, scratch_reg
),
1727 /* Handle secondary reloads for loads of FP registers from constant
1728 expressions by forcing the constant into memory.
1730 Use scratch_reg to hold the address of the memory location.
1732 The proper fix is to change TARGET_PREFERRED_RELOAD_CLASS to return
1733 NO_REGS when presented with a const_int and a register class
1734 containing only FP registers. Doing so unfortunately creates
1735 more problems than it solves. Fix this for 2.5. */
1736 else if (scratch_reg
1737 && CONSTANT_P (operand1
)
1738 && fp_reg_operand (operand0
, mode
))
1740 rtx const_mem
, xoperands
[2];
1742 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1743 it in WORD_MODE regardless of what mode it was originally given
1745 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1747 /* Force the constant into memory and put the address of the
1748 memory location into scratch_reg. */
1749 const_mem
= force_const_mem (mode
, operand1
);
1750 xoperands
[0] = scratch_reg
;
1751 xoperands
[1] = XEXP (const_mem
, 0);
1752 pa_emit_move_sequence (xoperands
, Pmode
, 0);
1754 /* Now load the destination register. */
1755 emit_insn (gen_rtx_SET (mode
, operand0
,
1756 replace_equiv_address (const_mem
, scratch_reg
)));
1759 /* Handle secondary reloads for SAR. These occur when trying to load
1760 the SAR from memory or a constant. */
1761 else if (scratch_reg
1762 && GET_CODE (operand0
) == REG
1763 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1764 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1765 && (GET_CODE (operand1
) == MEM
|| GET_CODE (operand1
) == CONST_INT
))
1767 /* D might not fit in 14 bits either; for such cases load D into
1769 if (GET_CODE (operand1
) == MEM
1770 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1772 /* We are reloading the address into the scratch register, so we
1773 want to make sure the scratch register is a full register. */
1774 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1776 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1777 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1780 XEXP (XEXP (operand1
, 0),
1784 /* Now we are going to load the scratch register from memory,
1785 we want to load it in the same width as the original MEM,
1786 which must be the same as the width of the ultimate destination,
1788 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1790 emit_move_insn (scratch_reg
,
1791 replace_equiv_address (operand1
, scratch_reg
));
1795 /* We want to load the scratch register using the same mode as
1796 the ultimate destination. */
1797 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1799 emit_move_insn (scratch_reg
, operand1
);
1802 /* And emit the insn to set the ultimate destination. We know that
1803 the scratch register has the same mode as the destination at this
1805 emit_move_insn (operand0
, scratch_reg
);
1808 /* Handle the most common case: storing into a register. */
1809 else if (register_operand (operand0
, mode
))
1811 /* Legitimize TLS symbol references. This happens for references
1812 that aren't a legitimate constant. */
1813 if (PA_SYMBOL_REF_TLS_P (operand1
))
1814 operand1
= legitimize_tls_address (operand1
);
1816 if (register_operand (operand1
, mode
)
1817 || (GET_CODE (operand1
) == CONST_INT
1818 && pa_cint_ok_for_move (INTVAL (operand1
)))
1819 || (operand1
== CONST0_RTX (mode
))
1820 || (GET_CODE (operand1
) == HIGH
1821 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1822 /* Only `general_operands' can come here, so MEM is ok. */
1823 || GET_CODE (operand1
) == MEM
)
1825 /* Various sets are created during RTL generation which don't
1826 have the REG_POINTER flag correctly set. After the CSE pass,
1827 instruction recognition can fail if we don't consistently
1828 set this flag when performing register copies. This should
1829 also improve the opportunities for creating insns that use
1830 unscaled indexing. */
1831 if (REG_P (operand0
) && REG_P (operand1
))
1833 if (REG_POINTER (operand1
)
1834 && !REG_POINTER (operand0
)
1835 && !HARD_REGISTER_P (operand0
))
1836 copy_reg_pointer (operand0
, operand1
);
1839 /* When MEMs are broken out, the REG_POINTER flag doesn't
1840 get set. In some cases, we can set the REG_POINTER flag
1841 from the declaration for the MEM. */
1842 if (REG_P (operand0
)
1843 && GET_CODE (operand1
) == MEM
1844 && !REG_POINTER (operand0
))
1846 tree decl
= MEM_EXPR (operand1
);
1848 /* Set the register pointer flag and register alignment
1849 if the declaration for this memory reference is a
1855 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1857 if (TREE_CODE (decl
) == COMPONENT_REF
)
1858 decl
= TREE_OPERAND (decl
, 1);
1860 type
= TREE_TYPE (decl
);
1861 type
= strip_array_types (type
);
1863 if (POINTER_TYPE_P (type
))
1867 type
= TREE_TYPE (type
);
1868 /* Using TYPE_ALIGN_OK is rather conservative as
1869 only the ada frontend actually sets it. */
1870 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1872 mark_reg_pointer (operand0
, align
);
1877 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1881 else if (GET_CODE (operand0
) == MEM
)
1883 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1884 && !(reload_in_progress
|| reload_completed
))
1886 rtx temp
= gen_reg_rtx (DFmode
);
1888 emit_insn (gen_rtx_SET (VOIDmode
, temp
, operand1
));
1889 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, temp
));
1892 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1894 /* Run this case quickly. */
1895 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1898 if (! (reload_in_progress
|| reload_completed
))
1900 operands
[0] = validize_mem (operand0
);
1901 operands
[1] = operand1
= force_reg (mode
, operand1
);
1905 /* Simplify the source if we need to.
1906 Note we do have to handle function labels here, even though we do
1907 not consider them legitimate constants. Loop optimizations can
1908 call the emit_move_xxx with one as a source. */
1909 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1910 || function_label_operand (operand1
, VOIDmode
)
1911 || (GET_CODE (operand1
) == HIGH
1912 && symbolic_operand (XEXP (operand1
, 0), mode
)))
1916 if (GET_CODE (operand1
) == HIGH
)
1919 operand1
= XEXP (operand1
, 0);
1921 if (symbolic_operand (operand1
, mode
))
1923 /* Argh. The assembler and linker can't handle arithmetic
1926 So we force the plabel into memory, load operand0 from
1927 the memory location, then add in the constant part. */
1928 if ((GET_CODE (operand1
) == CONST
1929 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1930 && function_label_operand (XEXP (XEXP (operand1
, 0), 0),
1932 || function_label_operand (operand1
, VOIDmode
))
1934 rtx temp
, const_part
;
1936 /* Figure out what (if any) scratch register to use. */
1937 if (reload_in_progress
|| reload_completed
)
1939 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1940 /* SCRATCH_REG will hold an address and maybe the actual
1941 data. We want it in WORD_MODE regardless of what mode it
1942 was originally given to us. */
1943 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1946 scratch_reg
= gen_reg_rtx (Pmode
);
1948 if (GET_CODE (operand1
) == CONST
)
1950 /* Save away the constant part of the expression. */
1951 const_part
= XEXP (XEXP (operand1
, 0), 1);
1952 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1954 /* Force the function label into memory. */
1955 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1959 /* No constant part. */
1960 const_part
= NULL_RTX
;
1962 /* Force the function label into memory. */
1963 temp
= force_const_mem (mode
, operand1
);
1967 /* Get the address of the memory location. PIC-ify it if
1969 temp
= XEXP (temp
, 0);
1971 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1973 /* Put the address of the memory location into our destination
1976 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1978 /* Now load from the memory location into our destination
1980 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1981 pa_emit_move_sequence (operands
, mode
, scratch_reg
);
1983 /* And add back in the constant part. */
1984 if (const_part
!= NULL_RTX
)
1985 expand_inc (operand0
, const_part
);
1994 if (reload_in_progress
|| reload_completed
)
1996 temp
= scratch_reg
? scratch_reg
: operand0
;
1997 /* TEMP will hold an address and maybe the actual
1998 data. We want it in WORD_MODE regardless of what mode it
1999 was originally given to us. */
2000 temp
= force_mode (word_mode
, temp
);
2003 temp
= gen_reg_rtx (Pmode
);
2005 /* (const (plus (symbol) (const_int))) must be forced to
2006 memory during/after reload if the const_int will not fit
2008 if (GET_CODE (operand1
) == CONST
2009 && GET_CODE (XEXP (operand1
, 0)) == PLUS
2010 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
2011 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))
2012 && (reload_completed
|| reload_in_progress
)
2015 rtx const_mem
= force_const_mem (mode
, operand1
);
2016 operands
[1] = legitimize_pic_address (XEXP (const_mem
, 0),
2018 operands
[1] = replace_equiv_address (const_mem
, operands
[1]);
2019 pa_emit_move_sequence (operands
, mode
, temp
);
2023 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
2024 if (REG_P (operand0
) && REG_P (operands
[1]))
2025 copy_reg_pointer (operand0
, operands
[1]);
2026 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operands
[1]));
2029 /* On the HPPA, references to data space are supposed to use dp,
2030 register 27, but showing it in the RTL inhibits various cse
2031 and loop optimizations. */
2036 if (reload_in_progress
|| reload_completed
)
2038 temp
= scratch_reg
? scratch_reg
: operand0
;
2039 /* TEMP will hold an address and maybe the actual
2040 data. We want it in WORD_MODE regardless of what mode it
2041 was originally given to us. */
2042 temp
= force_mode (word_mode
, temp
);
2045 temp
= gen_reg_rtx (mode
);
2047 /* Loading a SYMBOL_REF into a register makes that register
2048 safe to be used as the base in an indexed address.
2050 Don't mark hard registers though. That loses. */
2051 if (GET_CODE (operand0
) == REG
2052 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
2053 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
2054 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
2055 mark_reg_pointer (temp
, BITS_PER_UNIT
);
2058 set
= gen_rtx_SET (mode
, operand0
, temp
);
2060 set
= gen_rtx_SET (VOIDmode
,
2062 gen_rtx_LO_SUM (mode
, temp
, operand1
));
2064 emit_insn (gen_rtx_SET (VOIDmode
,
2066 gen_rtx_HIGH (mode
, operand1
)));
2072 else if (pa_tls_referenced_p (operand1
))
2077 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
2079 addend
= XEXP (XEXP (tmp
, 0), 1);
2080 tmp
= XEXP (XEXP (tmp
, 0), 0);
2083 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
2084 tmp
= legitimize_tls_address (tmp
);
2087 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
2088 tmp
= force_operand (tmp
, operands
[0]);
2092 else if (GET_CODE (operand1
) != CONST_INT
2093 || !pa_cint_ok_for_move (INTVAL (operand1
)))
2097 HOST_WIDE_INT value
= 0;
2098 HOST_WIDE_INT insv
= 0;
2101 if (GET_CODE (operand1
) == CONST_INT
)
2102 value
= INTVAL (operand1
);
2105 && GET_CODE (operand1
) == CONST_INT
2106 && HOST_BITS_PER_WIDE_INT
> 32
2107 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
2111 /* Extract the low order 32 bits of the value and sign extend.
2112 If the new value is the same as the original value, we can
2113 can use the original value as-is. If the new value is
2114 different, we use it and insert the most-significant 32-bits
2115 of the original value into the final result. */
2116 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
2117 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
2120 #if HOST_BITS_PER_WIDE_INT > 32
2121 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
2125 operand1
= GEN_INT (nval
);
2129 if (reload_in_progress
|| reload_completed
)
2130 temp
= scratch_reg
? scratch_reg
: operand0
;
2132 temp
= gen_reg_rtx (mode
);
2134 /* We don't directly split DImode constants on 32-bit targets
2135 because PLUS uses an 11-bit immediate and the insn sequence
2136 generated is not as efficient as the one using HIGH/LO_SUM. */
2137 if (GET_CODE (operand1
) == CONST_INT
2138 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2139 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2142 /* Directly break constant into high and low parts. This
2143 provides better optimization opportunities because various
2144 passes recognize constants split with PLUS but not LO_SUM.
2145 We use a 14-bit signed low part except when the addition
2146 of 0x4000 to the high part might change the sign of the
2148 HOST_WIDE_INT low
= value
& 0x3fff;
2149 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2153 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2161 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (high
)));
2162 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2166 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2167 gen_rtx_HIGH (mode
, operand1
)));
2168 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2171 insn
= emit_move_insn (operands
[0], operands
[1]);
2173 /* Now insert the most significant 32 bits of the value
2174 into the register. When we don't have a second register
2175 available, it could take up to nine instructions to load
2176 a 64-bit integer constant. Prior to reload, we force
2177 constants that would take more than three instructions
2178 to load to the constant pool. During and after reload,
2179 we have to handle all possible values. */
2182 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2183 register and the value to be inserted is outside the
2184 range that can be loaded with three depdi instructions. */
2185 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2187 operand1
= GEN_INT (insv
);
2189 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2190 gen_rtx_HIGH (mode
, operand1
)));
2191 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2192 emit_insn (gen_insv (operand0
, GEN_INT (32),
2197 int len
= 5, pos
= 27;
2199 /* Insert the bits using the depdi instruction. */
2202 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2203 HOST_WIDE_INT sign
= v5
< 0;
2205 /* Left extend the insertion. */
2206 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2207 while (pos
> 0 && (insv
& 1) == sign
)
2209 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2214 emit_insn (gen_insv (operand0
, GEN_INT (len
),
2215 GEN_INT (pos
), GEN_INT (v5
)));
2217 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2223 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2228 /* Now have insn-emit do whatever it normally does. */
2232 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2233 it will need a link/runtime reloc). */
2236 pa_reloc_needed (tree exp
)
2240 switch (TREE_CODE (exp
))
2245 case POINTER_PLUS_EXPR
:
2248 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2249 reloc
|= pa_reloc_needed (TREE_OPERAND (exp
, 1));
2253 case NON_LVALUE_EXPR
:
2254 reloc
= pa_reloc_needed (TREE_OPERAND (exp
, 0));
2260 unsigned HOST_WIDE_INT ix
;
2262 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2264 reloc
|= pa_reloc_needed (value
);
2278 /* Return the best assembler insn template
2279 for moving operands[1] into operands[0] as a fullword. */
2281 pa_singlemove_string (rtx
*operands
)
2283 HOST_WIDE_INT intval
;
2285 if (GET_CODE (operands
[0]) == MEM
)
2286 return "stw %r1,%0";
2287 if (GET_CODE (operands
[1]) == MEM
)
2289 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2294 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2296 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2298 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2299 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2301 operands
[1] = GEN_INT (i
);
2302 /* Fall through to CONST_INT case. */
2304 if (GET_CODE (operands
[1]) == CONST_INT
)
2306 intval
= INTVAL (operands
[1]);
2308 if (VAL_14_BITS_P (intval
))
2310 else if ((intval
& 0x7ff) == 0)
2311 return "ldil L'%1,%0";
2312 else if (pa_zdepi_cint_p (intval
))
2313 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2315 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2317 return "copy %1,%0";
2321 /* Compute position (in OP[1]) and width (in OP[2])
2322 useful for copying IMM to a register using the zdepi
2323 instructions. Store the immediate value to insert in OP[0]. */
2325 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2329 /* Find the least significant set bit in IMM. */
2330 for (lsb
= 0; lsb
< 32; lsb
++)
2337 /* Choose variants based on *sign* of the 5-bit field. */
2338 if ((imm
& 0x10) == 0)
2339 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2342 /* Find the width of the bitstring in IMM. */
2343 for (len
= 5; len
< 32 - lsb
; len
++)
2345 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2349 /* Sign extend IMM as a 5-bit value. */
2350 imm
= (imm
& 0xf) - 0x10;
2358 /* Compute position (in OP[1]) and width (in OP[2])
2359 useful for copying IMM to a register using the depdi,z
2360 instructions. Store the immediate value to insert in OP[0]. */
2363 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2365 int lsb
, len
, maxlen
;
2367 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2369 /* Find the least significant set bit in IMM. */
2370 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2377 /* Choose variants based on *sign* of the 5-bit field. */
2378 if ((imm
& 0x10) == 0)
2379 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2382 /* Find the width of the bitstring in IMM. */
2383 for (len
= 5; len
< maxlen
- lsb
; len
++)
2385 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2389 /* Extend length if host is narrow and IMM is negative. */
2390 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2393 /* Sign extend IMM as a 5-bit value. */
2394 imm
= (imm
& 0xf) - 0x10;
2402 /* Output assembler code to perform a doubleword move insn
2403 with operands OPERANDS. */
2406 pa_output_move_double (rtx
*operands
)
2408 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2410 rtx addreg0
= 0, addreg1
= 0;
2412 /* First classify both operands. */
2414 if (REG_P (operands
[0]))
2416 else if (offsettable_memref_p (operands
[0]))
2418 else if (GET_CODE (operands
[0]) == MEM
)
2423 if (REG_P (operands
[1]))
2425 else if (CONSTANT_P (operands
[1]))
2427 else if (offsettable_memref_p (operands
[1]))
2429 else if (GET_CODE (operands
[1]) == MEM
)
2434 /* Check for the cases that the operand constraints are not
2435 supposed to allow to happen. */
2436 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2438 /* Handle copies between general and floating registers. */
2440 if (optype0
== REGOP
&& optype1
== REGOP
2441 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2443 if (FP_REG_P (operands
[0]))
2445 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2446 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2447 return "{fldds|fldd} -16(%%sp),%0";
2451 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2452 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2453 return "{ldws|ldw} -12(%%sp),%R0";
2457 /* Handle auto decrementing and incrementing loads and stores
2458 specifically, since the structure of the function doesn't work
2459 for them without major modification. Do it better when we learn
2460 this port about the general inc/dec addressing of PA.
2461 (This was written by tege. Chide him if it doesn't work.) */
2463 if (optype0
== MEMOP
)
2465 /* We have to output the address syntax ourselves, since print_operand
2466 doesn't deal with the addresses we want to use. Fix this later. */
2468 rtx addr
= XEXP (operands
[0], 0);
2469 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2471 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2473 operands
[0] = XEXP (addr
, 0);
2474 gcc_assert (GET_CODE (operands
[1]) == REG
2475 && GET_CODE (operands
[0]) == REG
);
2477 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2479 /* No overlap between high target register and address
2480 register. (We do this in a non-obvious way to
2481 save a register file writeback) */
2482 if (GET_CODE (addr
) == POST_INC
)
2483 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2484 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2486 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2488 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2490 operands
[0] = XEXP (addr
, 0);
2491 gcc_assert (GET_CODE (operands
[1]) == REG
2492 && GET_CODE (operands
[0]) == REG
);
2494 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2495 /* No overlap between high target register and address
2496 register. (We do this in a non-obvious way to save a
2497 register file writeback) */
2498 if (GET_CODE (addr
) == PRE_INC
)
2499 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2500 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2503 if (optype1
== MEMOP
)
2505 /* We have to output the address syntax ourselves, since print_operand
2506 doesn't deal with the addresses we want to use. Fix this later. */
2508 rtx addr
= XEXP (operands
[1], 0);
2509 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2511 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2513 operands
[1] = XEXP (addr
, 0);
2514 gcc_assert (GET_CODE (operands
[0]) == REG
2515 && GET_CODE (operands
[1]) == REG
);
2517 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2519 /* No overlap between high target register and address
2520 register. (We do this in a non-obvious way to
2521 save a register file writeback) */
2522 if (GET_CODE (addr
) == POST_INC
)
2523 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2524 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2528 /* This is an undefined situation. We should load into the
2529 address register *and* update that register. Probably
2530 we don't need to handle this at all. */
2531 if (GET_CODE (addr
) == POST_INC
)
2532 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2533 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2536 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2538 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2540 operands
[1] = XEXP (addr
, 0);
2541 gcc_assert (GET_CODE (operands
[0]) == REG
2542 && GET_CODE (operands
[1]) == REG
);
2544 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2546 /* No overlap between high target register and address
2547 register. (We do this in a non-obvious way to
2548 save a register file writeback) */
2549 if (GET_CODE (addr
) == PRE_INC
)
2550 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2551 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2555 /* This is an undefined situation. We should load into the
2556 address register *and* update that register. Probably
2557 we don't need to handle this at all. */
2558 if (GET_CODE (addr
) == PRE_INC
)
2559 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2560 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2563 else if (GET_CODE (addr
) == PLUS
2564 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2567 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2569 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2571 xoperands
[0] = high_reg
;
2572 xoperands
[1] = XEXP (addr
, 1);
2573 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2574 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2575 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2577 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2581 xoperands
[0] = high_reg
;
2582 xoperands
[1] = XEXP (addr
, 1);
2583 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2584 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2585 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2587 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2592 /* If an operand is an unoffsettable memory ref, find a register
2593 we can increment temporarily to make it refer to the second word. */
2595 if (optype0
== MEMOP
)
2596 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2598 if (optype1
== MEMOP
)
2599 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2601 /* Ok, we can do one word at a time.
2602 Normally we do the low-numbered word first.
2604 In either case, set up in LATEHALF the operands to use
2605 for the high-numbered word and in some cases alter the
2606 operands in OPERANDS to be suitable for the low-numbered word. */
2608 if (optype0
== REGOP
)
2609 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2610 else if (optype0
== OFFSOP
)
2611 latehalf
[0] = adjust_address (operands
[0], SImode
, 4);
2613 latehalf
[0] = operands
[0];
2615 if (optype1
== REGOP
)
2616 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2617 else if (optype1
== OFFSOP
)
2618 latehalf
[1] = adjust_address (operands
[1], SImode
, 4);
2619 else if (optype1
== CNSTOP
)
2620 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2622 latehalf
[1] = operands
[1];
2624 /* If the first move would clobber the source of the second one,
2625 do them in the other order.
2627 This can happen in two cases:
2629 mem -> register where the first half of the destination register
2630 is the same register used in the memory's address. Reload
2631 can create such insns.
2633 mem in this case will be either register indirect or register
2634 indirect plus a valid offset.
2636 register -> register move where REGNO(dst) == REGNO(src + 1)
2637 someone (Tim/Tege?) claimed this can happen for parameter loads.
2639 Handle mem -> register case first. */
2640 if (optype0
== REGOP
2641 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2642 && refers_to_regno_p (REGNO (operands
[0]), REGNO (operands
[0]) + 1,
2645 /* Do the late half first. */
2647 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2648 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2652 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2653 return pa_singlemove_string (operands
);
2656 /* Now handle register -> register case. */
2657 if (optype0
== REGOP
&& optype1
== REGOP
2658 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2660 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2661 return pa_singlemove_string (operands
);
2664 /* Normal case: do the two words, low-numbered first. */
2666 output_asm_insn (pa_singlemove_string (operands
), operands
);
2668 /* Make any unoffsettable addresses point at high-numbered word. */
2670 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2672 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2675 output_asm_insn (pa_singlemove_string (latehalf
), latehalf
);
2677 /* Undo the adds we just did. */
2679 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2681 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2687 pa_output_fp_move_double (rtx
*operands
)
2689 if (FP_REG_P (operands
[0]))
2691 if (FP_REG_P (operands
[1])
2692 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2693 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2695 output_asm_insn ("fldd%F1 %1,%0", operands
);
2697 else if (FP_REG_P (operands
[1]))
2699 output_asm_insn ("fstd%F0 %1,%0", operands
);
2705 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2707 /* This is a pain. You have to be prepared to deal with an
2708 arbitrary address here including pre/post increment/decrement.
2710 so avoid this in the MD. */
2711 gcc_assert (GET_CODE (operands
[0]) == REG
);
2713 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2714 xoperands
[0] = operands
[0];
2715 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2720 /* Return a REG that occurs in ADDR with coefficient 1.
2721 ADDR can be effectively incremented by incrementing REG. */
2724 find_addr_reg (rtx addr
)
2726 while (GET_CODE (addr
) == PLUS
)
2728 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2729 addr
= XEXP (addr
, 0);
2730 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2731 addr
= XEXP (addr
, 1);
2732 else if (CONSTANT_P (XEXP (addr
, 0)))
2733 addr
= XEXP (addr
, 1);
2734 else if (CONSTANT_P (XEXP (addr
, 1)))
2735 addr
= XEXP (addr
, 0);
2739 gcc_assert (GET_CODE (addr
) == REG
);
2743 /* Emit code to perform a block move.
2745 OPERANDS[0] is the destination pointer as a REG, clobbered.
2746 OPERANDS[1] is the source pointer as a REG, clobbered.
2747 OPERANDS[2] is a register for temporary storage.
2748 OPERANDS[3] is a register for temporary storage.
2749 OPERANDS[4] is the size as a CONST_INT
2750 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2751 OPERANDS[6] is another temporary register. */
2754 pa_output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2756 int align
= INTVAL (operands
[5]);
2757 unsigned long n_bytes
= INTVAL (operands
[4]);
2759 /* We can't move more than a word at a time because the PA
2760 has no longer integer move insns. (Could use fp mem ops?) */
2761 if (align
> (TARGET_64BIT
? 8 : 4))
2762 align
= (TARGET_64BIT
? 8 : 4);
2764 /* Note that we know each loop below will execute at least twice
2765 (else we would have open-coded the copy). */
2769 /* Pre-adjust the loop counter. */
2770 operands
[4] = GEN_INT (n_bytes
- 16);
2771 output_asm_insn ("ldi %4,%2", operands
);
2774 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2775 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2776 output_asm_insn ("std,ma %3,8(%0)", operands
);
2777 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2778 output_asm_insn ("std,ma %6,8(%0)", operands
);
2780 /* Handle the residual. There could be up to 7 bytes of
2781 residual to copy! */
2782 if (n_bytes
% 16 != 0)
2784 operands
[4] = GEN_INT (n_bytes
% 8);
2785 if (n_bytes
% 16 >= 8)
2786 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2787 if (n_bytes
% 8 != 0)
2788 output_asm_insn ("ldd 0(%1),%6", operands
);
2789 if (n_bytes
% 16 >= 8)
2790 output_asm_insn ("std,ma %3,8(%0)", operands
);
2791 if (n_bytes
% 8 != 0)
2792 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2797 /* Pre-adjust the loop counter. */
2798 operands
[4] = GEN_INT (n_bytes
- 8);
2799 output_asm_insn ("ldi %4,%2", operands
);
2802 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2803 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2804 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2805 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2806 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2808 /* Handle the residual. There could be up to 7 bytes of
2809 residual to copy! */
2810 if (n_bytes
% 8 != 0)
2812 operands
[4] = GEN_INT (n_bytes
% 4);
2813 if (n_bytes
% 8 >= 4)
2814 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2815 if (n_bytes
% 4 != 0)
2816 output_asm_insn ("ldw 0(%1),%6", operands
);
2817 if (n_bytes
% 8 >= 4)
2818 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2819 if (n_bytes
% 4 != 0)
2820 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2825 /* Pre-adjust the loop counter. */
2826 operands
[4] = GEN_INT (n_bytes
- 4);
2827 output_asm_insn ("ldi %4,%2", operands
);
2830 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2831 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2832 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2833 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2834 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2836 /* Handle the residual. */
2837 if (n_bytes
% 4 != 0)
2839 if (n_bytes
% 4 >= 2)
2840 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2841 if (n_bytes
% 2 != 0)
2842 output_asm_insn ("ldb 0(%1),%6", operands
);
2843 if (n_bytes
% 4 >= 2)
2844 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2845 if (n_bytes
% 2 != 0)
2846 output_asm_insn ("stb %6,0(%0)", operands
);
2851 /* Pre-adjust the loop counter. */
2852 operands
[4] = GEN_INT (n_bytes
- 2);
2853 output_asm_insn ("ldi %4,%2", operands
);
2856 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2857 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2858 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2859 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2860 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2862 /* Handle the residual. */
2863 if (n_bytes
% 2 != 0)
2865 output_asm_insn ("ldb 0(%1),%3", operands
);
2866 output_asm_insn ("stb %3,0(%0)", operands
);
2875 /* Count the number of insns necessary to handle this block move.
2877 Basic structure is the same as emit_block_move, except that we
2878 count insns rather than emit them. */
2881 compute_movmem_length (rtx insn
)
2883 rtx pat
= PATTERN (insn
);
2884 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2885 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2886 unsigned int n_insns
= 0;
2888 /* We can't move more than four bytes at a time because the PA
2889 has no longer integer move insns. (Could use fp mem ops?) */
2890 if (align
> (TARGET_64BIT
? 8 : 4))
2891 align
= (TARGET_64BIT
? 8 : 4);
2893 /* The basic copying loop. */
2897 if (n_bytes
% (2 * align
) != 0)
2899 if ((n_bytes
% (2 * align
)) >= align
)
2902 if ((n_bytes
% align
) != 0)
2906 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2910 /* Emit code to perform a block clear.
2912 OPERANDS[0] is the destination pointer as a REG, clobbered.
2913 OPERANDS[1] is a register for temporary storage.
2914 OPERANDS[2] is the size as a CONST_INT
2915 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2918 pa_output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2920 int align
= INTVAL (operands
[3]);
2921 unsigned long n_bytes
= INTVAL (operands
[2]);
2923 /* We can't clear more than a word at a time because the PA
2924 has no longer integer move insns. */
2925 if (align
> (TARGET_64BIT
? 8 : 4))
2926 align
= (TARGET_64BIT
? 8 : 4);
2928 /* Note that we know each loop below will execute at least twice
2929 (else we would have open-coded the copy). */
2933 /* Pre-adjust the loop counter. */
2934 operands
[2] = GEN_INT (n_bytes
- 16);
2935 output_asm_insn ("ldi %2,%1", operands
);
2938 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2939 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2940 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2942 /* Handle the residual. There could be up to 7 bytes of
2943 residual to copy! */
2944 if (n_bytes
% 16 != 0)
2946 operands
[2] = GEN_INT (n_bytes
% 8);
2947 if (n_bytes
% 16 >= 8)
2948 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2949 if (n_bytes
% 8 != 0)
2950 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2955 /* Pre-adjust the loop counter. */
2956 operands
[2] = GEN_INT (n_bytes
- 8);
2957 output_asm_insn ("ldi %2,%1", operands
);
2960 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2961 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2962 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2964 /* Handle the residual. There could be up to 7 bytes of
2965 residual to copy! */
2966 if (n_bytes
% 8 != 0)
2968 operands
[2] = GEN_INT (n_bytes
% 4);
2969 if (n_bytes
% 8 >= 4)
2970 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2971 if (n_bytes
% 4 != 0)
2972 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
2977 /* Pre-adjust the loop counter. */
2978 operands
[2] = GEN_INT (n_bytes
- 4);
2979 output_asm_insn ("ldi %2,%1", operands
);
2982 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2983 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
2984 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2986 /* Handle the residual. */
2987 if (n_bytes
% 4 != 0)
2989 if (n_bytes
% 4 >= 2)
2990 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2991 if (n_bytes
% 2 != 0)
2992 output_asm_insn ("stb %%r0,0(%0)", operands
);
2997 /* Pre-adjust the loop counter. */
2998 operands
[2] = GEN_INT (n_bytes
- 2);
2999 output_asm_insn ("ldi %2,%1", operands
);
3002 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3003 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
3004 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
3006 /* Handle the residual. */
3007 if (n_bytes
% 2 != 0)
3008 output_asm_insn ("stb %%r0,0(%0)", operands
);
3017 /* Count the number of insns necessary to handle this block move.
3019 Basic structure is the same as emit_block_move, except that we
3020 count insns rather than emit them. */
3023 compute_clrmem_length (rtx insn
)
3025 rtx pat
= PATTERN (insn
);
3026 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
3027 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
3028 unsigned int n_insns
= 0;
3030 /* We can't clear more than a word at a time because the PA
3031 has no longer integer move insns. */
3032 if (align
> (TARGET_64BIT
? 8 : 4))
3033 align
= (TARGET_64BIT
? 8 : 4);
3035 /* The basic loop. */
3039 if (n_bytes
% (2 * align
) != 0)
3041 if ((n_bytes
% (2 * align
)) >= align
)
3044 if ((n_bytes
% align
) != 0)
3048 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3054 pa_output_and (rtx
*operands
)
3056 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3058 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3059 int ls0
, ls1
, ms0
, p
, len
;
3061 for (ls0
= 0; ls0
< 32; ls0
++)
3062 if ((mask
& (1 << ls0
)) == 0)
3065 for (ls1
= ls0
; ls1
< 32; ls1
++)
3066 if ((mask
& (1 << ls1
)) != 0)
3069 for (ms0
= ls1
; ms0
< 32; ms0
++)
3070 if ((mask
& (1 << ms0
)) == 0)
3073 gcc_assert (ms0
== 32);
3081 operands
[2] = GEN_INT (len
);
3082 return "{extru|extrw,u} %1,31,%2,%0";
3086 /* We could use this `depi' for the case above as well, but `depi'
3087 requires one more register file access than an `extru'. */
3092 operands
[2] = GEN_INT (p
);
3093 operands
[3] = GEN_INT (len
);
3094 return "{depi|depwi} 0,%2,%3,%0";
3098 return "and %1,%2,%0";
3101 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3102 storing the result in operands[0]. */
3104 pa_output_64bit_and (rtx
*operands
)
3106 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3108 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3109 int ls0
, ls1
, ms0
, p
, len
;
3111 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3112 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3115 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3116 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3119 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3120 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3123 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3125 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3131 operands
[2] = GEN_INT (len
);
3132 return "extrd,u %1,63,%2,%0";
3136 /* We could use this `depi' for the case above as well, but `depi'
3137 requires one more register file access than an `extru'. */
3142 operands
[2] = GEN_INT (p
);
3143 operands
[3] = GEN_INT (len
);
3144 return "depdi 0,%2,%3,%0";
3148 return "and %1,%2,%0";
3152 pa_output_ior (rtx
*operands
)
3154 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3155 int bs0
, bs1
, p
, len
;
3157 if (INTVAL (operands
[2]) == 0)
3158 return "copy %1,%0";
3160 for (bs0
= 0; bs0
< 32; bs0
++)
3161 if ((mask
& (1 << bs0
)) != 0)
3164 for (bs1
= bs0
; bs1
< 32; bs1
++)
3165 if ((mask
& (1 << bs1
)) == 0)
3168 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3173 operands
[2] = GEN_INT (p
);
3174 operands
[3] = GEN_INT (len
);
3175 return "{depi|depwi} -1,%2,%3,%0";
3178 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3179 storing the result in operands[0]. */
3181 pa_output_64bit_ior (rtx
*operands
)
3183 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3184 int bs0
, bs1
, p
, len
;
3186 if (INTVAL (operands
[2]) == 0)
3187 return "copy %1,%0";
3189 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3190 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3193 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3194 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3197 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3198 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3203 operands
[2] = GEN_INT (p
);
3204 operands
[3] = GEN_INT (len
);
3205 return "depdi -1,%2,%3,%0";
3208 /* Target hook for assembling integer objects. This code handles
3209 aligned SI and DI integers specially since function references
3210 must be preceded by P%. */
3213 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3215 if (size
== UNITS_PER_WORD
3217 && function_label_operand (x
, VOIDmode
))
3219 fputs (size
== 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file
);
3220 output_addr_const (asm_out_file
, x
);
3221 fputc ('\n', asm_out_file
);
3224 return default_assemble_integer (x
, size
, aligned_p
);
3227 /* Output an ascii string. */
3229 pa_output_ascii (FILE *file
, const char *p
, int size
)
3233 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3235 /* The HP assembler can only take strings of 256 characters at one
3236 time. This is a limitation on input line length, *not* the
3237 length of the string. Sigh. Even worse, it seems that the
3238 restriction is in number of input characters (see \xnn &
3239 \whatever). So we have to do this very carefully. */
3241 fputs ("\t.STRING \"", file
);
3244 for (i
= 0; i
< size
; i
+= 4)
3248 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3250 register unsigned int c
= (unsigned char) p
[i
+ io
];
3252 if (c
== '\"' || c
== '\\')
3253 partial_output
[co
++] = '\\';
3254 if (c
>= ' ' && c
< 0177)
3255 partial_output
[co
++] = c
;
3259 partial_output
[co
++] = '\\';
3260 partial_output
[co
++] = 'x';
3261 hexd
= c
/ 16 - 0 + '0';
3263 hexd
-= '9' - 'a' + 1;
3264 partial_output
[co
++] = hexd
;
3265 hexd
= c
% 16 - 0 + '0';
3267 hexd
-= '9' - 'a' + 1;
3268 partial_output
[co
++] = hexd
;
3271 if (chars_output
+ co
> 243)
3273 fputs ("\"\n\t.STRING \"", file
);
3276 fwrite (partial_output
, 1, (size_t) co
, file
);
3280 fputs ("\"\n", file
);
3283 /* Try to rewrite floating point comparisons & branches to avoid
3284 useless add,tr insns.
3286 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3287 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3288 first attempt to remove useless add,tr insns. It is zero
3289 for the second pass as reorg sometimes leaves bogus REG_DEAD
3292 When CHECK_NOTES is zero we can only eliminate add,tr insns
3293 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3296 remove_useless_addtr_insns (int check_notes
)
3299 static int pass
= 0;
3301 /* This is fairly cheap, so always run it when optimizing. */
3305 int fbranch_count
= 0;
3307 /* Walk all the insns in this function looking for fcmp & fbranch
3308 instructions. Keep track of how many of each we find. */
3309 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3313 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3314 if (GET_CODE (insn
) != INSN
&& GET_CODE (insn
) != JUMP_INSN
)
3317 tmp
= PATTERN (insn
);
3319 /* It must be a set. */
3320 if (GET_CODE (tmp
) != SET
)
3323 /* If the destination is CCFP, then we've found an fcmp insn. */
3324 tmp
= SET_DEST (tmp
);
3325 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3331 tmp
= PATTERN (insn
);
3332 /* If this is an fbranch instruction, bump the fbranch counter. */
3333 if (GET_CODE (tmp
) == SET
3334 && SET_DEST (tmp
) == pc_rtx
3335 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3336 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3337 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3338 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3346 /* Find all floating point compare + branch insns. If possible,
3347 reverse the comparison & the branch to avoid add,tr insns. */
3348 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3352 /* Ignore anything that isn't an INSN. */
3353 if (GET_CODE (insn
) != INSN
)
3356 tmp
= PATTERN (insn
);
3358 /* It must be a set. */
3359 if (GET_CODE (tmp
) != SET
)
3362 /* The destination must be CCFP, which is register zero. */
3363 tmp
= SET_DEST (tmp
);
3364 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3367 /* INSN should be a set of CCFP.
3369 See if the result of this insn is used in a reversed FP
3370 conditional branch. If so, reverse our condition and
3371 the branch. Doing so avoids useless add,tr insns. */
3372 next
= next_insn (insn
);
3375 /* Jumps, calls and labels stop our search. */
3376 if (GET_CODE (next
) == JUMP_INSN
3377 || GET_CODE (next
) == CALL_INSN
3378 || GET_CODE (next
) == CODE_LABEL
)
3381 /* As does another fcmp insn. */
3382 if (GET_CODE (next
) == INSN
3383 && GET_CODE (PATTERN (next
)) == SET
3384 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3385 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3388 next
= next_insn (next
);
3391 /* Is NEXT_INSN a branch? */
3393 && GET_CODE (next
) == JUMP_INSN
)
3395 rtx pattern
= PATTERN (next
);
3397 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3398 and CCFP dies, then reverse our conditional and the branch
3399 to avoid the add,tr. */
3400 if (GET_CODE (pattern
) == SET
3401 && SET_DEST (pattern
) == pc_rtx
3402 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3403 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3404 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3405 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3406 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3407 && (fcmp_count
== fbranch_count
3409 && find_regno_note (next
, REG_DEAD
, 0))))
3411 /* Reverse the branch. */
3412 tmp
= XEXP (SET_SRC (pattern
), 1);
3413 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3414 XEXP (SET_SRC (pattern
), 2) = tmp
;
3415 INSN_CODE (next
) = -1;
3417 /* Reverse our condition. */
3418 tmp
= PATTERN (insn
);
3419 PUT_CODE (XEXP (tmp
, 1),
3420 (reverse_condition_maybe_unordered
3421 (GET_CODE (XEXP (tmp
, 1)))));
3431 /* You may have trouble believing this, but this is the 32 bit HP-PA
3436 Variable arguments (optional; any number may be allocated)
3438 SP-(4*(N+9)) arg word N
3443 Fixed arguments (must be allocated; may remain unused)
3452 SP-32 External Data Pointer (DP)
3454 SP-24 External/stub RP (RP')
3458 SP-8 Calling Stub RP (RP'')
3463 SP-0 Stack Pointer (points to next available address)
3467 /* This function saves registers as follows. Registers marked with ' are
3468 this function's registers (as opposed to the previous function's).
3469 If a frame_pointer isn't needed, r4 is saved as a general register;
3470 the space for the frame pointer is still allocated, though, to keep
3476 SP (FP') Previous FP
3477 SP + 4 Alignment filler (sigh)
3478 SP + 8 Space for locals reserved here.
3482 SP + n All call saved register used.
3486 SP + o All call saved fp registers used.
3490 SP + p (SP') points to next available address.
3494 /* Global variables set by output_function_prologue(). */
3495 /* Size of frame. Need to know this to emit return insns from
3497 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3498 static int save_fregs
;
3500 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3501 Handle case where DISP > 8k by using the add_high_const patterns.
3503 Note in DISP > 8k case, we will leave the high part of the address
3504 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3507 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3509 rtx insn
, dest
, src
, basereg
;
3511 src
= gen_rtx_REG (word_mode
, reg
);
3512 basereg
= gen_rtx_REG (Pmode
, base
);
3513 if (VAL_14_BITS_P (disp
))
3515 dest
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
3516 insn
= emit_move_insn (dest
, src
);
3518 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3520 rtx delta
= GEN_INT (disp
);
3521 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3523 emit_move_insn (tmpreg
, delta
);
3524 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3527 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3528 gen_rtx_SET (VOIDmode
, tmpreg
,
3529 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3530 RTX_FRAME_RELATED_P (insn
) = 1;
3532 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3533 insn
= emit_move_insn (dest
, src
);
3537 rtx delta
= GEN_INT (disp
);
3538 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3539 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3541 emit_move_insn (tmpreg
, high
);
3542 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3543 insn
= emit_move_insn (dest
, src
);
3545 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3546 gen_rtx_SET (VOIDmode
,
3547 gen_rtx_MEM (word_mode
,
3548 gen_rtx_PLUS (word_mode
,
3555 RTX_FRAME_RELATED_P (insn
) = 1;
3558 /* Emit RTL to store REG at the memory location specified by BASE and then
3559 add MOD to BASE. MOD must be <= 8k. */
3562 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3564 rtx insn
, basereg
, srcreg
, delta
;
3566 gcc_assert (VAL_14_BITS_P (mod
));
3568 basereg
= gen_rtx_REG (Pmode
, base
);
3569 srcreg
= gen_rtx_REG (word_mode
, reg
);
3570 delta
= GEN_INT (mod
);
3572 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3575 RTX_FRAME_RELATED_P (insn
) = 1;
3577 /* RTX_FRAME_RELATED_P must be set on each frame related set
3578 in a parallel with more than one element. */
3579 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3580 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3584 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3585 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3586 whether to add a frame note or not.
3588 In the DISP > 8k case, we leave the high part of the address in %r1.
3589 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3592 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3596 if (VAL_14_BITS_P (disp
))
3598 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3599 plus_constant (Pmode
,
3600 gen_rtx_REG (Pmode
, base
), disp
));
3602 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3604 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3605 rtx delta
= GEN_INT (disp
);
3606 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3608 emit_move_insn (tmpreg
, delta
);
3609 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3610 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3612 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3613 gen_rtx_SET (VOIDmode
, tmpreg
,
3614 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3618 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3619 rtx delta
= GEN_INT (disp
);
3620 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3622 emit_move_insn (tmpreg
,
3623 gen_rtx_PLUS (Pmode
, basereg
,
3624 gen_rtx_HIGH (Pmode
, delta
)));
3625 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3626 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3629 if (DO_FRAME_NOTES
&& note
)
3630 RTX_FRAME_RELATED_P (insn
) = 1;
3634 pa_compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3639 /* The code in pa_expand_prologue and pa_expand_epilogue must
3640 be consistent with the rounding and size calculation done here.
3641 Change them at the same time. */
3643 /* We do our own stack alignment. First, round the size of the
3644 stack locals up to a word boundary. */
3645 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3647 /* Space for previous frame pointer + filler. If any frame is
3648 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3649 waste some space here for the sake of HP compatibility. The
3650 first slot is only used when the frame pointer is needed. */
3651 if (size
|| frame_pointer_needed
)
3652 size
+= STARTING_FRAME_OFFSET
;
3654 /* If the current function calls __builtin_eh_return, then we need
3655 to allocate stack space for registers that will hold data for
3656 the exception handler. */
3657 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3661 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3663 size
+= i
* UNITS_PER_WORD
;
3666 /* Account for space used by the callee general register saves. */
3667 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3668 if (df_regs_ever_live_p (i
))
3669 size
+= UNITS_PER_WORD
;
3671 /* Account for space used by the callee floating point register saves. */
3672 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3673 if (df_regs_ever_live_p (i
)
3674 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3678 /* We always save both halves of the FP register, so always
3679 increment the frame size by 8 bytes. */
3683 /* If any of the floating registers are saved, account for the
3684 alignment needed for the floating point register save block. */
3687 size
= (size
+ 7) & ~7;
3692 /* The various ABIs include space for the outgoing parameters in the
3693 size of the current function's stack frame. We don't need to align
3694 for the outgoing arguments as their alignment is set by the final
3695 rounding for the frame as a whole. */
3696 size
+= crtl
->outgoing_args_size
;
3698 /* Allocate space for the fixed frame marker. This space must be
3699 allocated for any function that makes calls or allocates
3701 if (!crtl
->is_leaf
|| size
)
3702 size
+= TARGET_64BIT
? 48 : 32;
3704 /* Finally, round to the preferred stack boundary. */
3705 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3706 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3709 /* Generate the assembly code for function entry. FILE is a stdio
3710 stream to output the code to. SIZE is an int: how many units of
3711 temporary storage to allocate.
3713 Refer to the array `regs_ever_live' to determine which registers to
3714 save; `regs_ever_live[I]' is nonzero if register number I is ever
3715 used in the function. This function is responsible for knowing
3716 which registers should not be saved even if used. */
3718 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3719 of memory. If any fpu reg is used in the function, we allocate
3720 such a block here, at the bottom of the frame, just in case it's needed.
3722 If this function is a leaf procedure, then we may choose not
3723 to do a "save" insn. The decision about whether or not
3724 to do this is made in regclass.c. */
3727 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3729 /* The function's label and associated .PROC must never be
3730 separated and must be output *after* any profiling declarations
3731 to avoid changing spaces/subspaces within a procedure. */
3732 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3733 fputs ("\t.PROC\n", file
);
3735 /* pa_expand_prologue does the dirty work now. We just need
3736 to output the assembler directives which denote the start
3738 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3740 fputs (",NO_CALLS", file
);
3742 fputs (",CALLS", file
);
3744 fputs (",SAVE_RP", file
);
3746 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3747 at the beginning of the frame and that it is used as the frame
3748 pointer for the frame. We do this because our current frame
3749 layout doesn't conform to that specified in the HP runtime
3750 documentation and we need a way to indicate to programs such as
3751 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3752 isn't used by HP compilers but is supported by the assembler.
3753 However, SAVE_SP is supposed to indicate that the previous stack
3754 pointer has been saved in the frame marker. */
3755 if (frame_pointer_needed
)
3756 fputs (",SAVE_SP", file
);
3758 /* Pass on information about the number of callee register saves
3759 performed in the prologue.
3761 The compiler is supposed to pass the highest register number
3762 saved, the assembler then has to adjust that number before
3763 entering it into the unwind descriptor (to account for any
3764 caller saved registers with lower register numbers than the
3765 first callee saved register). */
3767 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3770 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3772 fputs ("\n\t.ENTRY\n", file
);
3774 remove_useless_addtr_insns (0);
3778 pa_expand_prologue (void)
3780 int merge_sp_adjust_with_store
= 0;
3781 HOST_WIDE_INT size
= get_frame_size ();
3782 HOST_WIDE_INT offset
;
3790 /* Compute total size for frame pointer, filler, locals and rounding to
3791 the next word boundary. Similar code appears in pa_compute_frame_size
3792 and must be changed in tandem with this code. */
3793 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3794 if (local_fsize
|| frame_pointer_needed
)
3795 local_fsize
+= STARTING_FRAME_OFFSET
;
3797 actual_fsize
= pa_compute_frame_size (size
, &save_fregs
);
3798 if (flag_stack_usage_info
)
3799 current_function_static_stack_size
= actual_fsize
;
3801 /* Compute a few things we will use often. */
3802 tmpreg
= gen_rtx_REG (word_mode
, 1);
3804 /* Save RP first. The calling conventions manual states RP will
3805 always be stored into the caller's frame at sp - 20 or sp - 16
3806 depending on which ABI is in use. */
3807 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
3809 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3815 /* Allocate the local frame and set up the frame pointer if needed. */
3816 if (actual_fsize
!= 0)
3818 if (frame_pointer_needed
)
3820 /* Copy the old frame pointer temporarily into %r1. Set up the
3821 new stack pointer, then store away the saved old frame pointer
3822 into the stack at sp and at the same time update the stack
3823 pointer by actual_fsize bytes. Two versions, first
3824 handles small (<8k) frames. The second handles large (>=8k)
3826 insn
= emit_move_insn (tmpreg
, hard_frame_pointer_rtx
);
3828 RTX_FRAME_RELATED_P (insn
) = 1;
3830 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
3832 RTX_FRAME_RELATED_P (insn
) = 1;
3834 if (VAL_14_BITS_P (actual_fsize
))
3835 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3838 /* It is incorrect to store the saved frame pointer at *sp,
3839 then increment sp (writes beyond the current stack boundary).
3841 So instead use stwm to store at *sp and post-increment the
3842 stack pointer as an atomic operation. Then increment sp to
3843 finish allocating the new frame. */
3844 HOST_WIDE_INT adjust1
= 8192 - 64;
3845 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3847 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3848 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3852 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3853 we need to store the previous stack pointer (frame pointer)
3854 into the frame marker on targets that use the HP unwind
3855 library. This allows the HP unwind library to be used to
3856 unwind GCC frames. However, we are not fully compatible
3857 with the HP library because our frame layout differs from
3858 that specified in the HP runtime specification.
3860 We don't want a frame note on this instruction as the frame
3861 marker moves during dynamic stack allocation.
3863 This instruction also serves as a blockage to prevent
3864 register spills from being scheduled before the stack
3865 pointer is raised. This is necessary as we store
3866 registers using the frame pointer as a base register,
3867 and the frame pointer is set before sp is raised. */
3868 if (TARGET_HPUX_UNWIND_LIBRARY
)
3870 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3871 GEN_INT (TARGET_64BIT
? -8 : -4));
3873 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3874 hard_frame_pointer_rtx
);
3877 emit_insn (gen_blockage ());
3879 /* no frame pointer needed. */
3882 /* In some cases we can perform the first callee register save
3883 and allocating the stack frame at the same time. If so, just
3884 make a note of it and defer allocating the frame until saving
3885 the callee registers. */
3886 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3887 merge_sp_adjust_with_store
= 1;
3888 /* Can not optimize. Adjust the stack frame by actual_fsize
3891 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3896 /* Normal register save.
3898 Do not save the frame pointer in the frame_pointer_needed case. It
3899 was done earlier. */
3900 if (frame_pointer_needed
)
3902 offset
= local_fsize
;
3904 /* Saving the EH return data registers in the frame is the simplest
3905 way to get the frame unwind information emitted. We put them
3906 just before the general registers. */
3907 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3909 unsigned int i
, regno
;
3913 regno
= EH_RETURN_DATA_REGNO (i
);
3914 if (regno
== INVALID_REGNUM
)
3917 store_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
3918 offset
+= UNITS_PER_WORD
;
3922 for (i
= 18; i
>= 4; i
--)
3923 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3925 store_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
3926 offset
+= UNITS_PER_WORD
;
3929 /* Account for %r3 which is saved in a special place. */
3932 /* No frame pointer needed. */
3935 offset
= local_fsize
- actual_fsize
;
3937 /* Saving the EH return data registers in the frame is the simplest
3938 way to get the frame unwind information emitted. */
3939 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3941 unsigned int i
, regno
;
3945 regno
= EH_RETURN_DATA_REGNO (i
);
3946 if (regno
== INVALID_REGNUM
)
3949 /* If merge_sp_adjust_with_store is nonzero, then we can
3950 optimize the first save. */
3951 if (merge_sp_adjust_with_store
)
3953 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3954 merge_sp_adjust_with_store
= 0;
3957 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3958 offset
+= UNITS_PER_WORD
;
3962 for (i
= 18; i
>= 3; i
--)
3963 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3965 /* If merge_sp_adjust_with_store is nonzero, then we can
3966 optimize the first GR save. */
3967 if (merge_sp_adjust_with_store
)
3969 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
3970 merge_sp_adjust_with_store
= 0;
3973 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
3974 offset
+= UNITS_PER_WORD
;
3978 /* If we wanted to merge the SP adjustment with a GR save, but we never
3979 did any GR saves, then just emit the adjustment here. */
3980 if (merge_sp_adjust_with_store
)
3981 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3985 /* The hppa calling conventions say that %r19, the pic offset
3986 register, is saved at sp - 32 (in this function's frame)
3987 when generating PIC code. FIXME: What is the correct thing
3988 to do for functions which make no calls and allocate no
3989 frame? Do we need to allocate a frame, or can we just omit
3990 the save? For now we'll just omit the save.
3992 We don't want a note on this insn as the frame marker can
3993 move if there is a dynamic stack allocation. */
3994 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
3996 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
3998 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
4002 /* Align pointer properly (doubleword boundary). */
4003 offset
= (offset
+ 7) & ~7;
4005 /* Floating point register store. */
4010 /* First get the frame or stack pointer to the start of the FP register
4012 if (frame_pointer_needed
)
4014 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4015 base
= hard_frame_pointer_rtx
;
4019 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4020 base
= stack_pointer_rtx
;
4023 /* Now actually save the FP registers. */
4024 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4026 if (df_regs_ever_live_p (i
)
4027 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4029 rtx addr
, insn
, reg
;
4030 addr
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
4031 reg
= gen_rtx_REG (DFmode
, i
);
4032 insn
= emit_move_insn (addr
, reg
);
4035 RTX_FRAME_RELATED_P (insn
) = 1;
4038 rtx mem
= gen_rtx_MEM (DFmode
,
4039 plus_constant (Pmode
, base
,
4041 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4042 gen_rtx_SET (VOIDmode
, mem
, reg
));
4046 rtx meml
= gen_rtx_MEM (SFmode
,
4047 plus_constant (Pmode
, base
,
4049 rtx memr
= gen_rtx_MEM (SFmode
,
4050 plus_constant (Pmode
, base
,
4052 rtx regl
= gen_rtx_REG (SFmode
, i
);
4053 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
4054 rtx setl
= gen_rtx_SET (VOIDmode
, meml
, regl
);
4055 rtx setr
= gen_rtx_SET (VOIDmode
, memr
, regr
);
4058 RTX_FRAME_RELATED_P (setl
) = 1;
4059 RTX_FRAME_RELATED_P (setr
) = 1;
4060 vec
= gen_rtvec (2, setl
, setr
);
4061 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4062 gen_rtx_SEQUENCE (VOIDmode
, vec
));
4065 offset
+= GET_MODE_SIZE (DFmode
);
4072 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4073 Handle case where DISP > 8k by using the add_high_const patterns. */
4076 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
4078 rtx dest
= gen_rtx_REG (word_mode
, reg
);
4079 rtx basereg
= gen_rtx_REG (Pmode
, base
);
4082 if (VAL_14_BITS_P (disp
))
4083 src
= gen_rtx_MEM (word_mode
, plus_constant (Pmode
, basereg
, disp
));
4084 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
4086 rtx delta
= GEN_INT (disp
);
4087 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4089 emit_move_insn (tmpreg
, delta
);
4090 if (TARGET_DISABLE_INDEXING
)
4092 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4093 src
= gen_rtx_MEM (word_mode
, tmpreg
);
4096 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4100 rtx delta
= GEN_INT (disp
);
4101 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
4102 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4104 emit_move_insn (tmpreg
, high
);
4105 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
4108 emit_move_insn (dest
, src
);
4111 /* Update the total code bytes output to the text section. */
4114 update_total_code_bytes (unsigned int nbytes
)
4116 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4117 && !IN_NAMED_SECTION_P (cfun
->decl
))
4119 unsigned int old_total
= total_code_bytes
;
4121 total_code_bytes
+= nbytes
;
4123 /* Be prepared to handle overflows. */
4124 if (old_total
> total_code_bytes
)
4125 total_code_bytes
= UINT_MAX
;
4129 /* This function generates the assembly code for function exit.
4130 Args are as for output_function_prologue ().
4132 The function epilogue should not depend on the current stack
4133 pointer! It should use the frame pointer only. This is mandatory
4134 because of alloca; we also take advantage of it to omit stack
4135 adjustments before returning. */
4138 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4140 rtx insn
= get_last_insn ();
4144 /* pa_expand_epilogue does the dirty work now. We just need
4145 to output the assembler directives which denote the end
4148 To make debuggers happy, emit a nop if the epilogue was completely
4149 eliminated due to a volatile call as the last insn in the
4150 current function. That way the return address (in %r2) will
4151 always point to a valid instruction in the current function. */
4153 /* Get the last real insn. */
4154 if (GET_CODE (insn
) == NOTE
)
4155 insn
= prev_real_insn (insn
);
4157 /* If it is a sequence, then look inside. */
4158 if (insn
&& GET_CODE (insn
) == INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4159 insn
= XVECEXP (PATTERN (insn
), 0, 0);
4161 /* If insn is a CALL_INSN, then it must be a call to a volatile
4162 function (otherwise there would be epilogue insns). */
4163 if (insn
&& GET_CODE (insn
) == CALL_INSN
)
4165 fputs ("\tnop\n", file
);
4169 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4171 if (TARGET_SOM
&& TARGET_GAS
)
4173 /* We done with this subspace except possibly for some additional
4174 debug information. Forget that we are in this subspace to ensure
4175 that the next function is output in its own subspace. */
4177 cfun
->machine
->in_nsubspa
= 2;
4180 if (INSN_ADDRESSES_SET_P ())
4182 insn
= get_last_nonnote_insn ();
4183 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4185 last_address
+= insn_default_length (insn
);
4186 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4187 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4190 last_address
= UINT_MAX
;
4192 /* Finally, update the total number of code bytes output so far. */
4193 update_total_code_bytes (last_address
);
4197 pa_expand_epilogue (void)
4200 HOST_WIDE_INT offset
;
4201 HOST_WIDE_INT ret_off
= 0;
4203 int merge_sp_adjust_with_load
= 0;
4205 /* We will use this often. */
4206 tmpreg
= gen_rtx_REG (word_mode
, 1);
4208 /* Try to restore RP early to avoid load/use interlocks when
4209 RP gets used in the return (bv) instruction. This appears to still
4210 be necessary even when we schedule the prologue and epilogue. */
4213 ret_off
= TARGET_64BIT
? -16 : -20;
4214 if (frame_pointer_needed
)
4216 load_reg (2, ret_off
, HARD_FRAME_POINTER_REGNUM
);
4221 /* No frame pointer, and stack is smaller than 8k. */
4222 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4224 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4230 /* General register restores. */
4231 if (frame_pointer_needed
)
4233 offset
= local_fsize
;
4235 /* If the current function calls __builtin_eh_return, then we need
4236 to restore the saved EH data registers. */
4237 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4239 unsigned int i
, regno
;
4243 regno
= EH_RETURN_DATA_REGNO (i
);
4244 if (regno
== INVALID_REGNUM
)
4247 load_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4248 offset
+= UNITS_PER_WORD
;
4252 for (i
= 18; i
>= 4; i
--)
4253 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4255 load_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4256 offset
+= UNITS_PER_WORD
;
4261 offset
= local_fsize
- actual_fsize
;
4263 /* If the current function calls __builtin_eh_return, then we need
4264 to restore the saved EH data registers. */
4265 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4267 unsigned int i
, regno
;
4271 regno
= EH_RETURN_DATA_REGNO (i
);
4272 if (regno
== INVALID_REGNUM
)
4275 /* Only for the first load.
4276 merge_sp_adjust_with_load holds the register load
4277 with which we will merge the sp adjustment. */
4278 if (merge_sp_adjust_with_load
== 0
4280 && VAL_14_BITS_P (-actual_fsize
))
4281 merge_sp_adjust_with_load
= regno
;
4283 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4284 offset
+= UNITS_PER_WORD
;
4288 for (i
= 18; i
>= 3; i
--)
4290 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4292 /* Only for the first load.
4293 merge_sp_adjust_with_load holds the register load
4294 with which we will merge the sp adjustment. */
4295 if (merge_sp_adjust_with_load
== 0
4297 && VAL_14_BITS_P (-actual_fsize
))
4298 merge_sp_adjust_with_load
= i
;
4300 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4301 offset
+= UNITS_PER_WORD
;
4306 /* Align pointer properly (doubleword boundary). */
4307 offset
= (offset
+ 7) & ~7;
4309 /* FP register restores. */
4312 /* Adjust the register to index off of. */
4313 if (frame_pointer_needed
)
4314 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4316 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4318 /* Actually do the restores now. */
4319 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4320 if (df_regs_ever_live_p (i
)
4321 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4323 rtx src
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
4324 rtx dest
= gen_rtx_REG (DFmode
, i
);
4325 emit_move_insn (dest
, src
);
4329 /* Emit a blockage insn here to keep these insns from being moved to
4330 an earlier spot in the epilogue, or into the main instruction stream.
4332 This is necessary as we must not cut the stack back before all the
4333 restores are finished. */
4334 emit_insn (gen_blockage ());
4336 /* Reset stack pointer (and possibly frame pointer). The stack
4337 pointer is initially set to fp + 64 to avoid a race condition. */
4338 if (frame_pointer_needed
)
4340 rtx delta
= GEN_INT (-64);
4342 set_reg_plus_d (STACK_POINTER_REGNUM
, HARD_FRAME_POINTER_REGNUM
, 64, 0);
4343 emit_insn (gen_pre_load (hard_frame_pointer_rtx
,
4344 stack_pointer_rtx
, delta
));
4346 /* If we were deferring a callee register restore, do it now. */
4347 else if (merge_sp_adjust_with_load
)
4349 rtx delta
= GEN_INT (-actual_fsize
);
4350 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4352 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4354 else if (actual_fsize
!= 0)
4355 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4358 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4359 frame greater than 8k), do so now. */
4361 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4363 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4365 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4367 emit_insn (gen_blockage ());
4368 emit_insn (TARGET_64BIT
4369 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4370 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4375 pa_can_use_return_insn (void)
4377 if (!reload_completed
)
4380 if (frame_pointer_needed
)
4383 if (df_regs_ever_live_p (2))
4389 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4393 hppa_pic_save_rtx (void)
4395 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4398 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4399 #define NO_DEFERRED_PROFILE_COUNTERS 0
4403 /* Vector of funcdef numbers. */
4404 static VEC(int,heap
) *funcdef_nos
;
4406 /* Output deferred profile counters. */
4408 output_deferred_profile_counters (void)
4413 if (VEC_empty (int, funcdef_nos
))
4416 switch_to_section (data_section
);
4417 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4418 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4420 for (i
= 0; VEC_iterate (int, funcdef_nos
, i
, n
); i
++)
4422 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4423 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4426 VEC_free (int, heap
, funcdef_nos
);
4430 hppa_profile_hook (int label_no
)
4432 /* We use SImode for the address of the function in both 32 and
4433 64-bit code to avoid having to provide DImode versions of the
4434 lcla2 and load_offset_label_address insn patterns. */
4435 rtx reg
= gen_reg_rtx (SImode
);
4436 rtx label_rtx
= gen_label_rtx ();
4437 rtx begin_label_rtx
, call_insn
;
4438 char begin_label_name
[16];
4440 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4442 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4445 emit_move_insn (arg_pointer_rtx
,
4446 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4449 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4451 /* The address of the function is loaded into %r25 with an instruction-
4452 relative sequence that avoids the use of relocations. The sequence
4453 is split so that the load_offset_label_address instruction can
4454 occupy the delay slot of the call to _mcount. */
4456 emit_insn (gen_lcla2 (reg
, label_rtx
));
4458 emit_insn (gen_lcla1 (reg
, label_rtx
));
4460 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4461 reg
, begin_label_rtx
, label_rtx
));
4463 #if !NO_DEFERRED_PROFILE_COUNTERS
4465 rtx count_label_rtx
, addr
, r24
;
4466 char count_label_name
[16];
4468 VEC_safe_push (int, heap
, funcdef_nos
, label_no
);
4469 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4470 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4472 addr
= force_reg (Pmode
, count_label_rtx
);
4473 r24
= gen_rtx_REG (Pmode
, 24);
4474 emit_move_insn (r24
, addr
);
4477 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4478 gen_rtx_SYMBOL_REF (Pmode
,
4480 GEN_INT (TARGET_64BIT
? 24 : 12)));
4482 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4487 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4488 gen_rtx_SYMBOL_REF (Pmode
,
4490 GEN_INT (TARGET_64BIT
? 16 : 8)));
4494 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4495 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4497 /* Indicate the _mcount call cannot throw, nor will it execute a
4499 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4502 /* Fetch the return address for the frame COUNT steps up from
4503 the current frame, after the prologue. FRAMEADDR is the
4504 frame pointer of the COUNT frame.
4506 We want to ignore any export stub remnants here. To handle this,
4507 we examine the code at the return address, and if it is an export
4508 stub, we return a memory rtx for the stub return address stored
4511 The value returned is used in two different ways:
4513 1. To find a function's caller.
4515 2. To change the return address for a function.
4517 This function handles most instances of case 1; however, it will
4518 fail if there are two levels of stubs to execute on the return
4519 path. The only way I believe that can happen is if the return value
4520 needs a parameter relocation, which never happens for C code.
4522 This function handles most instances of case 2; however, it will
4523 fail if we did not originally have stub code on the return path
4524 but will need stub code on the new return path. This can happen if
4525 the caller & callee are both in the main program, but the new
4526 return location is in a shared library. */
4529 pa_return_addr_rtx (int count
, rtx frameaddr
)
4536 /* The instruction stream at the return address of a PA1.X export stub is:
4538 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4539 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4540 0x00011820 | stub+16: mtsp r1,sr0
4541 0xe0400002 | stub+20: be,n 0(sr0,rp)
4543 0xe0400002 must be specified as -532676606 so that it won't be
4544 rejected as an invalid immediate operand on 64-bit hosts.
4546 The instruction stream at the return address of a PA2.0 export stub is:
4548 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4549 0xe840d002 | stub+12: bve,n (rp)
4552 HOST_WIDE_INT insns
[4];
4558 rp
= get_hard_reg_initial_val (Pmode
, 2);
4560 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4563 /* If there is no export stub then just use the value saved from
4564 the return pointer register. */
4566 saved_rp
= gen_reg_rtx (Pmode
);
4567 emit_move_insn (saved_rp
, rp
);
4569 /* Get pointer to the instruction stream. We have to mask out the
4570 privilege level from the two low order bits of the return address
4571 pointer here so that ins will point to the start of the first
4572 instruction that would have been executed if we returned. */
4573 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4574 label
= gen_label_rtx ();
4578 insns
[0] = 0x4bc23fd1;
4579 insns
[1] = -398405630;
4584 insns
[0] = 0x4bc23fd1;
4585 insns
[1] = 0x004010a1;
4586 insns
[2] = 0x00011820;
4587 insns
[3] = -532676606;
4591 /* Check the instruction stream at the normal return address for the
4592 export stub. If it is an export stub, than our return address is
4593 really in -24[frameaddr]. */
4595 for (i
= 0; i
< len
; i
++)
4597 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, ins
, i
* 4));
4598 rtx op1
= GEN_INT (insns
[i
]);
4599 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4602 /* Here we know that our return address points to an export
4603 stub. We don't want to return the address of the export stub,
4604 but rather the return address of the export stub. That return
4605 address is stored at -24[frameaddr]. */
4607 emit_move_insn (saved_rp
,
4609 memory_address (Pmode
,
4610 plus_constant (Pmode
, frameaddr
,
4619 pa_emit_bcond_fp (rtx operands
[])
4621 enum rtx_code code
= GET_CODE (operands
[0]);
4622 rtx operand0
= operands
[1];
4623 rtx operand1
= operands
[2];
4624 rtx label
= operands
[3];
4626 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCFPmode
, 0),
4627 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4629 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
4630 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4633 gen_rtx_REG (CCFPmode
, 0),
4635 gen_rtx_LABEL_REF (VOIDmode
, label
),
4640 /* Adjust the cost of a scheduling dependency. Return the new cost of
4641 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4644 pa_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4646 enum attr_type attr_type
;
4648 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4649 true dependencies as they are described with bypasses now. */
4650 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4653 if (! recog_memoized (insn
))
4656 attr_type
= get_attr_type (insn
);
4658 switch (REG_NOTE_KIND (link
))
4661 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4664 if (attr_type
== TYPE_FPLOAD
)
4666 rtx pat
= PATTERN (insn
);
4667 rtx dep_pat
= PATTERN (dep_insn
);
4668 if (GET_CODE (pat
) == PARALLEL
)
4670 /* This happens for the fldXs,mb patterns. */
4671 pat
= XVECEXP (pat
, 0, 0);
4673 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4674 /* If this happens, we have to extend this to schedule
4675 optimally. Return 0 for now. */
4678 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4680 if (! recog_memoized (dep_insn
))
4682 switch (get_attr_type (dep_insn
))
4689 case TYPE_FPSQRTSGL
:
4690 case TYPE_FPSQRTDBL
:
4691 /* A fpload can't be issued until one cycle before a
4692 preceding arithmetic operation has finished if
4693 the target of the fpload is any of the sources
4694 (or destination) of the arithmetic operation. */
4695 return insn_default_latency (dep_insn
) - 1;
4702 else if (attr_type
== TYPE_FPALU
)
4704 rtx pat
= PATTERN (insn
);
4705 rtx dep_pat
= PATTERN (dep_insn
);
4706 if (GET_CODE (pat
) == PARALLEL
)
4708 /* This happens for the fldXs,mb patterns. */
4709 pat
= XVECEXP (pat
, 0, 0);
4711 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4712 /* If this happens, we have to extend this to schedule
4713 optimally. Return 0 for now. */
4716 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4718 if (! recog_memoized (dep_insn
))
4720 switch (get_attr_type (dep_insn
))
4724 case TYPE_FPSQRTSGL
:
4725 case TYPE_FPSQRTDBL
:
4726 /* An ALU flop can't be issued until two cycles before a
4727 preceding divide or sqrt operation has finished if
4728 the target of the ALU flop is any of the sources
4729 (or destination) of the divide or sqrt operation. */
4730 return insn_default_latency (dep_insn
) - 2;
4738 /* For other anti dependencies, the cost is 0. */
4741 case REG_DEP_OUTPUT
:
4742 /* Output dependency; DEP_INSN writes a register that INSN writes some
4744 if (attr_type
== TYPE_FPLOAD
)
4746 rtx pat
= PATTERN (insn
);
4747 rtx dep_pat
= PATTERN (dep_insn
);
4748 if (GET_CODE (pat
) == PARALLEL
)
4750 /* This happens for the fldXs,mb patterns. */
4751 pat
= XVECEXP (pat
, 0, 0);
4753 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4754 /* If this happens, we have to extend this to schedule
4755 optimally. Return 0 for now. */
4758 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4760 if (! recog_memoized (dep_insn
))
4762 switch (get_attr_type (dep_insn
))
4769 case TYPE_FPSQRTSGL
:
4770 case TYPE_FPSQRTDBL
:
4771 /* A fpload can't be issued until one cycle before a
4772 preceding arithmetic operation has finished if
4773 the target of the fpload is the destination of the
4774 arithmetic operation.
4776 Exception: For PA7100LC, PA7200 and PA7300, the cost
4777 is 3 cycles, unless they bundle together. We also
4778 pay the penalty if the second insn is a fpload. */
4779 return insn_default_latency (dep_insn
) - 1;
4786 else if (attr_type
== TYPE_FPALU
)
4788 rtx pat
= PATTERN (insn
);
4789 rtx dep_pat
= PATTERN (dep_insn
);
4790 if (GET_CODE (pat
) == PARALLEL
)
4792 /* This happens for the fldXs,mb patterns. */
4793 pat
= XVECEXP (pat
, 0, 0);
4795 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4796 /* If this happens, we have to extend this to schedule
4797 optimally. Return 0 for now. */
4800 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4802 if (! recog_memoized (dep_insn
))
4804 switch (get_attr_type (dep_insn
))
4808 case TYPE_FPSQRTSGL
:
4809 case TYPE_FPSQRTDBL
:
4810 /* An ALU flop can't be issued until two cycles before a
4811 preceding divide or sqrt operation has finished if
4812 the target of the ALU flop is also the target of
4813 the divide or sqrt operation. */
4814 return insn_default_latency (dep_insn
) - 2;
4822 /* For other output dependencies, the cost is 0. */
4830 /* Adjust scheduling priorities. We use this to try and keep addil
4831 and the next use of %r1 close together. */
4833 pa_adjust_priority (rtx insn
, int priority
)
4835 rtx set
= single_set (insn
);
4839 src
= SET_SRC (set
);
4840 dest
= SET_DEST (set
);
4841 if (GET_CODE (src
) == LO_SUM
4842 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4843 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4846 else if (GET_CODE (src
) == MEM
4847 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4848 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4849 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4852 else if (GET_CODE (dest
) == MEM
4853 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4854 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4855 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4861 /* The 700 can only issue a single insn at a time.
4862 The 7XXX processors can issue two insns at a time.
4863 The 8000 can issue 4 insns at a time. */
4865 pa_issue_rate (void)
4869 case PROCESSOR_700
: return 1;
4870 case PROCESSOR_7100
: return 2;
4871 case PROCESSOR_7100LC
: return 2;
4872 case PROCESSOR_7200
: return 2;
4873 case PROCESSOR_7300
: return 2;
4874 case PROCESSOR_8000
: return 4;
4883 /* Return any length adjustment needed by INSN which already has its length
4884 computed as LENGTH. Return zero if no adjustment is necessary.
4886 For the PA: function calls, millicode calls, and backwards short
4887 conditional branches with unfilled delay slots need an adjustment by +1
4888 (to account for the NOP which will be inserted into the instruction stream).
4890 Also compute the length of an inline block move here as it is too
4891 complicated to express as a length attribute in pa.md. */
4893 pa_adjust_insn_length (rtx insn
, int length
)
4895 rtx pat
= PATTERN (insn
);
4897 /* Jumps inside switch tables which have unfilled delay slots need
4899 if (GET_CODE (insn
) == JUMP_INSN
4900 && GET_CODE (pat
) == PARALLEL
4901 && get_attr_type (insn
) == TYPE_BTABLE_BRANCH
)
4903 /* Millicode insn with an unfilled delay slot. */
4904 else if (GET_CODE (insn
) == INSN
4905 && GET_CODE (pat
) != SEQUENCE
4906 && GET_CODE (pat
) != USE
4907 && GET_CODE (pat
) != CLOBBER
4908 && get_attr_type (insn
) == TYPE_MILLI
)
4910 /* Block move pattern. */
4911 else if (GET_CODE (insn
) == INSN
4912 && GET_CODE (pat
) == PARALLEL
4913 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4914 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4915 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4916 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4917 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4918 return compute_movmem_length (insn
) - 4;
4919 /* Block clear pattern. */
4920 else if (GET_CODE (insn
) == INSN
4921 && GET_CODE (pat
) == PARALLEL
4922 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4923 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4924 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4925 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4926 return compute_clrmem_length (insn
) - 4;
4927 /* Conditional branch with an unfilled delay slot. */
4928 else if (GET_CODE (insn
) == JUMP_INSN
&& ! simplejump_p (insn
))
4930 /* Adjust a short backwards conditional with an unfilled delay slot. */
4931 if (GET_CODE (pat
) == SET
4933 && JUMP_LABEL (insn
) != NULL_RTX
4934 && ! forward_branch_p (insn
))
4936 else if (GET_CODE (pat
) == PARALLEL
4937 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4940 /* Adjust dbra insn with short backwards conditional branch with
4941 unfilled delay slot -- only for case where counter is in a
4942 general register register. */
4943 else if (GET_CODE (pat
) == PARALLEL
4944 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
4945 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
4946 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
4948 && ! forward_branch_p (insn
))
4956 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4959 pa_print_operand_punct_valid_p (unsigned char code
)
4970 /* Print operand X (an rtx) in assembler syntax to file FILE.
4971 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4972 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4975 pa_print_operand (FILE *file
, rtx x
, int code
)
4980 /* Output a 'nop' if there's nothing for the delay slot. */
4981 if (dbr_sequence_length () == 0)
4982 fputs ("\n\tnop", file
);
4985 /* Output a nullification completer if there's nothing for the */
4986 /* delay slot or nullification is requested. */
4987 if (dbr_sequence_length () == 0 ||
4989 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
4993 /* Print out the second register name of a register pair.
4994 I.e., R (6) => 7. */
4995 fputs (reg_names
[REGNO (x
) + 1], file
);
4998 /* A register or zero. */
5000 || (x
== CONST0_RTX (DFmode
))
5001 || (x
== CONST0_RTX (SFmode
)))
5003 fputs ("%r0", file
);
5009 /* A register or zero (floating point). */
5011 || (x
== CONST0_RTX (DFmode
))
5012 || (x
== CONST0_RTX (SFmode
)))
5014 fputs ("%fr0", file
);
5023 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
5024 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
5025 pa_output_global_address (file
, xoperands
[1], 0);
5026 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
5030 case 'C': /* Plain (C)ondition */
5032 switch (GET_CODE (x
))
5035 fputs ("=", file
); break;
5037 fputs ("<>", file
); break;
5039 fputs (">", file
); break;
5041 fputs (">=", file
); break;
5043 fputs (">>=", file
); break;
5045 fputs (">>", file
); break;
5047 fputs ("<", file
); break;
5049 fputs ("<=", file
); break;
5051 fputs ("<<=", file
); break;
5053 fputs ("<<", file
); break;
5058 case 'N': /* Condition, (N)egated */
5059 switch (GET_CODE (x
))
5062 fputs ("<>", file
); break;
5064 fputs ("=", file
); break;
5066 fputs ("<=", file
); break;
5068 fputs ("<", file
); break;
5070 fputs ("<<", file
); break;
5072 fputs ("<<=", file
); break;
5074 fputs (">=", file
); break;
5076 fputs (">", file
); break;
5078 fputs (">>", file
); break;
5080 fputs (">>=", file
); break;
5085 /* For floating point comparisons. Note that the output
5086 predicates are the complement of the desired mode. The
5087 conditions for GT, GE, LT, LE and LTGT cause an invalid
5088 operation exception if the result is unordered and this
5089 exception is enabled in the floating-point status register. */
5091 switch (GET_CODE (x
))
5094 fputs ("!=", file
); break;
5096 fputs ("=", file
); break;
5098 fputs ("!>", file
); break;
5100 fputs ("!>=", file
); break;
5102 fputs ("!<", file
); break;
5104 fputs ("!<=", file
); break;
5106 fputs ("!<>", file
); break;
5108 fputs ("!?<=", file
); break;
5110 fputs ("!?<", file
); break;
5112 fputs ("!?>=", file
); break;
5114 fputs ("!?>", file
); break;
5116 fputs ("!?=", file
); break;
5118 fputs ("!?", file
); break;
5120 fputs ("?", file
); break;
5125 case 'S': /* Condition, operands are (S)wapped. */
5126 switch (GET_CODE (x
))
5129 fputs ("=", file
); break;
5131 fputs ("<>", file
); break;
5133 fputs ("<", file
); break;
5135 fputs ("<=", file
); break;
5137 fputs ("<<=", file
); break;
5139 fputs ("<<", file
); break;
5141 fputs (">", file
); break;
5143 fputs (">=", file
); break;
5145 fputs (">>=", file
); break;
5147 fputs (">>", file
); break;
5152 case 'B': /* Condition, (B)oth swapped and negate. */
5153 switch (GET_CODE (x
))
5156 fputs ("<>", file
); break;
5158 fputs ("=", file
); break;
5160 fputs (">=", file
); break;
5162 fputs (">", file
); break;
5164 fputs (">>", file
); break;
5166 fputs (">>=", file
); break;
5168 fputs ("<=", file
); break;
5170 fputs ("<", file
); break;
5172 fputs ("<<", file
); break;
5174 fputs ("<<=", file
); break;
5180 gcc_assert (GET_CODE (x
) == CONST_INT
);
5181 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5184 gcc_assert (GET_CODE (x
) == CONST_INT
);
5185 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5188 gcc_assert (GET_CODE (x
) == CONST_INT
);
5189 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5192 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5193 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5196 gcc_assert (GET_CODE (x
) == CONST_INT
);
5197 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5200 gcc_assert (GET_CODE (x
) == CONST_INT
);
5201 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5204 if (GET_CODE (x
) == CONST_INT
)
5209 switch (GET_CODE (XEXP (x
, 0)))
5213 if (ASSEMBLER_DIALECT
== 0)
5214 fputs ("s,mb", file
);
5216 fputs (",mb", file
);
5220 if (ASSEMBLER_DIALECT
== 0)
5221 fputs ("s,ma", file
);
5223 fputs (",ma", file
);
5226 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5227 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5229 if (ASSEMBLER_DIALECT
== 0)
5232 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5233 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5235 if (ASSEMBLER_DIALECT
== 0)
5236 fputs ("x,s", file
);
5240 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5244 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5250 pa_output_global_address (file
, x
, 0);
5253 pa_output_global_address (file
, x
, 1);
5255 case 0: /* Don't do anything special */
5260 compute_zdepwi_operands (INTVAL (x
), op
);
5261 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5267 compute_zdepdi_operands (INTVAL (x
), op
);
5268 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5272 /* We can get here from a .vtable_inherit due to our
5273 CONSTANT_ADDRESS_P rejecting perfectly good constant
5279 if (GET_CODE (x
) == REG
)
5281 fputs (reg_names
[REGNO (x
)], file
);
5282 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5288 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5289 && (REGNO (x
) & 1) == 0)
5292 else if (GET_CODE (x
) == MEM
)
5294 int size
= GET_MODE_SIZE (GET_MODE (x
));
5295 rtx base
= NULL_RTX
;
5296 switch (GET_CODE (XEXP (x
, 0)))
5300 base
= XEXP (XEXP (x
, 0), 0);
5301 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5305 base
= XEXP (XEXP (x
, 0), 0);
5306 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5309 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5310 fprintf (file
, "%s(%s)",
5311 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5312 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5313 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5314 fprintf (file
, "%s(%s)",
5315 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5316 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5317 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5318 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5320 /* Because the REG_POINTER flag can get lost during reload,
5321 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5322 index and base registers in the combined move patterns. */
5323 rtx base
= XEXP (XEXP (x
, 0), 1);
5324 rtx index
= XEXP (XEXP (x
, 0), 0);
5326 fprintf (file
, "%s(%s)",
5327 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5330 output_address (XEXP (x
, 0));
5333 output_address (XEXP (x
, 0));
5338 output_addr_const (file
, x
);
5341 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5344 pa_output_global_address (FILE *file
, rtx x
, int round_constant
)
5347 /* Imagine (high (const (plus ...))). */
5348 if (GET_CODE (x
) == HIGH
)
5351 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5352 output_addr_const (file
, x
);
5353 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5355 output_addr_const (file
, x
);
5356 fputs ("-$global$", file
);
5358 else if (GET_CODE (x
) == CONST
)
5360 const char *sep
= "";
5361 int offset
= 0; /* assembler wants -$global$ at end */
5362 rtx base
= NULL_RTX
;
5364 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5367 base
= XEXP (XEXP (x
, 0), 0);
5368 output_addr_const (file
, base
);
5371 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5377 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5380 base
= XEXP (XEXP (x
, 0), 1);
5381 output_addr_const (file
, base
);
5384 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5390 /* How bogus. The compiler is apparently responsible for
5391 rounding the constant if it uses an LR field selector.
5393 The linker and/or assembler seem a better place since
5394 they have to do this kind of thing already.
5396 If we fail to do this, HP's optimizing linker may eliminate
5397 an addil, but not update the ldw/stw/ldo instruction that
5398 uses the result of the addil. */
5400 offset
= ((offset
+ 0x1000) & ~0x1fff);
5402 switch (GET_CODE (XEXP (x
, 0)))
5415 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5423 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5424 fputs ("-$global$", file
);
5426 fprintf (file
, "%s%d", sep
, offset
);
5429 output_addr_const (file
, x
);
5432 /* Output boilerplate text to appear at the beginning of the file.
5433 There are several possible versions. */
5434 #define aputs(x) fputs(x, asm_out_file)
5436 pa_file_start_level (void)
5439 aputs ("\t.LEVEL 2.0w\n");
5440 else if (TARGET_PA_20
)
5441 aputs ("\t.LEVEL 2.0\n");
5442 else if (TARGET_PA_11
)
5443 aputs ("\t.LEVEL 1.1\n");
5445 aputs ("\t.LEVEL 1.0\n");
5449 pa_file_start_space (int sortspace
)
5451 aputs ("\t.SPACE $PRIVATE$");
5454 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5456 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5457 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5458 "\n\t.SPACE $TEXT$");
5461 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5462 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5466 pa_file_start_file (int want_version
)
5468 if (write_symbols
!= NO_DEBUG
)
5470 output_file_directive (asm_out_file
, main_input_filename
);
5472 aputs ("\t.version\t\"01.01\"\n");
5477 pa_file_start_mcount (const char *aswhat
)
5480 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5484 pa_elf_file_start (void)
5486 pa_file_start_level ();
5487 pa_file_start_mcount ("ENTRY");
5488 pa_file_start_file (0);
5492 pa_som_file_start (void)
5494 pa_file_start_level ();
5495 pa_file_start_space (0);
5496 aputs ("\t.IMPORT $global$,DATA\n"
5497 "\t.IMPORT $$dyncall,MILLICODE\n");
5498 pa_file_start_mcount ("CODE");
5499 pa_file_start_file (0);
5503 pa_linux_file_start (void)
5505 pa_file_start_file (1);
5506 pa_file_start_level ();
5507 pa_file_start_mcount ("CODE");
5511 pa_hpux64_gas_file_start (void)
5513 pa_file_start_level ();
5514 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5516 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5518 pa_file_start_file (1);
5522 pa_hpux64_hpas_file_start (void)
5524 pa_file_start_level ();
5525 pa_file_start_space (1);
5526 pa_file_start_mcount ("CODE");
5527 pa_file_start_file (0);
5531 /* Search the deferred plabel list for SYMBOL and return its internal
5532 label. If an entry for SYMBOL is not found, a new entry is created. */
5535 pa_get_deferred_plabel (rtx symbol
)
5537 const char *fname
= XSTR (symbol
, 0);
5540 /* See if we have already put this function on the list of deferred
5541 plabels. This list is generally small, so a liner search is not
5542 too ugly. If it proves too slow replace it with something faster. */
5543 for (i
= 0; i
< n_deferred_plabels
; i
++)
5544 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5547 /* If the deferred plabel list is empty, or this entry was not found
5548 on the list, create a new entry on the list. */
5549 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5553 if (deferred_plabels
== 0)
5554 deferred_plabels
= ggc_alloc_deferred_plabel ();
5556 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5558 n_deferred_plabels
+ 1);
5560 i
= n_deferred_plabels
++;
5561 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5562 deferred_plabels
[i
].symbol
= symbol
;
5564 /* Gross. We have just implicitly taken the address of this
5565 function. Mark it in the same manner as assemble_name. */
5566 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5568 mark_referenced (id
);
5571 return deferred_plabels
[i
].internal_label
;
5575 output_deferred_plabels (void)
5579 /* If we have some deferred plabels, then we need to switch into the
5580 data or readonly data section, and align it to a 4 byte boundary
5581 before outputting the deferred plabels. */
5582 if (n_deferred_plabels
)
5584 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5585 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5588 /* Now output the deferred plabels. */
5589 for (i
= 0; i
< n_deferred_plabels
; i
++)
5591 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5592 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5593 assemble_integer (deferred_plabels
[i
].symbol
,
5594 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5598 /* Initialize optabs to point to emulation routines. */
5601 pa_init_libfuncs (void)
5603 if (HPUX_LONG_DOUBLE_LIBRARY
)
5605 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5606 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5607 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5608 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5609 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5610 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5611 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5612 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5613 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5615 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5616 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5617 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5618 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5619 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5620 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5621 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5623 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5624 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5625 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5626 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5628 set_conv_libfunc (sfix_optab
, SImode
, TFmode
,
5629 TARGET_64BIT
? "__U_Qfcnvfxt_quad_to_sgl"
5630 : "_U_Qfcnvfxt_quad_to_sgl");
5631 set_conv_libfunc (sfix_optab
, DImode
, TFmode
,
5632 "_U_Qfcnvfxt_quad_to_dbl");
5633 set_conv_libfunc (ufix_optab
, SImode
, TFmode
,
5634 "_U_Qfcnvfxt_quad_to_usgl");
5635 set_conv_libfunc (ufix_optab
, DImode
, TFmode
,
5636 "_U_Qfcnvfxt_quad_to_udbl");
5638 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
,
5639 "_U_Qfcnvxf_sgl_to_quad");
5640 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
,
5641 "_U_Qfcnvxf_dbl_to_quad");
5642 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
,
5643 "_U_Qfcnvxf_usgl_to_quad");
5644 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
,
5645 "_U_Qfcnvxf_udbl_to_quad");
5648 if (TARGET_SYNC_LIBCALL
)
5649 init_sync_libfuncs (UNITS_PER_WORD
);
5652 /* HP's millicode routines mean something special to the assembler.
5653 Keep track of which ones we have used. */
5655 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5656 static void import_milli (enum millicodes
);
5657 static char imported
[(int) end1000
];
5658 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5659 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5660 #define MILLI_START 10
5663 import_milli (enum millicodes code
)
5665 char str
[sizeof (import_string
)];
5667 if (!imported
[(int) code
])
5669 imported
[(int) code
] = 1;
5670 strcpy (str
, import_string
);
5671 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5672 output_asm_insn (str
, 0);
5676 /* The register constraints have put the operands and return value in
5677 the proper registers. */
5680 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx insn
)
5682 import_milli (mulI
);
5683 return pa_output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5686 /* Emit the rtl for doing a division by a constant. */
5688 /* Do magic division millicodes exist for this value? */
5689 const int pa_magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5691 /* We'll use an array to keep track of the magic millicodes and
5692 whether or not we've used them already. [n][0] is signed, [n][1] is
5695 static int div_milli
[16][2];
5698 pa_emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5700 if (GET_CODE (operands
[2]) == CONST_INT
5701 && INTVAL (operands
[2]) > 0
5702 && INTVAL (operands
[2]) < 16
5703 && pa_magic_milli
[INTVAL (operands
[2])])
5705 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5707 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5711 gen_rtvec (6, gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, 29),
5712 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5714 gen_rtx_REG (SImode
, 26),
5716 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5717 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5718 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5719 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5720 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5721 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5728 pa_output_div_insn (rtx
*operands
, int unsignedp
, rtx insn
)
5732 /* If the divisor is a constant, try to use one of the special
5734 if (GET_CODE (operands
[0]) == CONST_INT
)
5736 static char buf
[100];
5737 divisor
= INTVAL (operands
[0]);
5738 if (!div_milli
[divisor
][unsignedp
])
5740 div_milli
[divisor
][unsignedp
] = 1;
5742 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5744 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5748 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5749 INTVAL (operands
[0]));
5750 return pa_output_millicode_call (insn
,
5751 gen_rtx_SYMBOL_REF (SImode
, buf
));
5755 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5756 INTVAL (operands
[0]));
5757 return pa_output_millicode_call (insn
,
5758 gen_rtx_SYMBOL_REF (SImode
, buf
));
5761 /* Divisor isn't a special constant. */
5766 import_milli (divU
);
5767 return pa_output_millicode_call (insn
,
5768 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5772 import_milli (divI
);
5773 return pa_output_millicode_call (insn
,
5774 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5779 /* Output a $$rem millicode to do mod. */
5782 pa_output_mod_insn (int unsignedp
, rtx insn
)
5786 import_milli (remU
);
5787 return pa_output_millicode_call (insn
,
5788 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5792 import_milli (remI
);
5793 return pa_output_millicode_call (insn
,
5794 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5799 pa_output_arg_descriptor (rtx call_insn
)
5801 const char *arg_regs
[4];
5802 enum machine_mode arg_mode
;
5804 int i
, output_flag
= 0;
5807 /* We neither need nor want argument location descriptors for the
5808 64bit runtime environment or the ELF32 environment. */
5809 if (TARGET_64BIT
|| TARGET_ELF32
)
5812 for (i
= 0; i
< 4; i
++)
5815 /* Specify explicitly that no argument relocations should take place
5816 if using the portable runtime calling conventions. */
5817 if (TARGET_PORTABLE_RUNTIME
)
5819 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5824 gcc_assert (GET_CODE (call_insn
) == CALL_INSN
);
5825 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5826 link
; link
= XEXP (link
, 1))
5828 rtx use
= XEXP (link
, 0);
5830 if (! (GET_CODE (use
) == USE
5831 && GET_CODE (XEXP (use
, 0)) == REG
5832 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5835 arg_mode
= GET_MODE (XEXP (use
, 0));
5836 regno
= REGNO (XEXP (use
, 0));
5837 if (regno
>= 23 && regno
<= 26)
5839 arg_regs
[26 - regno
] = "GR";
5840 if (arg_mode
== DImode
)
5841 arg_regs
[25 - regno
] = "GR";
5843 else if (regno
>= 32 && regno
<= 39)
5845 if (arg_mode
== SFmode
)
5846 arg_regs
[(regno
- 32) / 2] = "FR";
5849 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5850 arg_regs
[(regno
- 34) / 2] = "FR";
5851 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5853 arg_regs
[(regno
- 34) / 2] = "FU";
5854 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5859 fputs ("\t.CALL ", asm_out_file
);
5860 for (i
= 0; i
< 4; i
++)
5865 fputc (',', asm_out_file
);
5866 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5869 fputc ('\n', asm_out_file
);
5872 /* Inform reload about cases where moving X with a mode MODE to a register in
5873 RCLASS requires an extra scratch or immediate register. Return the class
5874 needed for the immediate register. */
5877 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
5878 enum machine_mode mode
, secondary_reload_info
*sri
)
5881 enum reg_class rclass
= (enum reg_class
) rclass_i
;
5883 /* Handle the easy stuff first. */
5884 if (rclass
== R1_REGS
)
5890 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
5896 /* If we have something like (mem (mem (...)), we can safely assume the
5897 inner MEM will end up in a general register after reloading, so there's
5898 no need for a secondary reload. */
5899 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
5902 /* Trying to load a constant into a FP register during PIC code
5903 generation requires %r1 as a scratch register. */
5905 && (mode
== SImode
|| mode
== DImode
)
5906 && FP_REG_CLASS_P (rclass
)
5907 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
5909 sri
->icode
= (mode
== SImode
? CODE_FOR_reload_insi_r1
5910 : CODE_FOR_reload_indi_r1
);
5914 /* Secondary reloads of symbolic operands require %r1 as a scratch
5915 register when we're generating PIC code and when the operand isn't
5917 if (pa_symbolic_expression_p (x
))
5919 if (GET_CODE (x
) == HIGH
)
5922 if (flag_pic
|| !read_only_operand (x
, VOIDmode
))
5924 gcc_assert (mode
== SImode
|| mode
== DImode
);
5925 sri
->icode
= (mode
== SImode
? CODE_FOR_reload_insi_r1
5926 : CODE_FOR_reload_indi_r1
);
5931 /* Profiling showed the PA port spends about 1.3% of its compilation
5932 time in true_regnum from calls inside pa_secondary_reload_class. */
5933 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
5934 regno
= true_regnum (x
);
5936 /* In order to allow 14-bit displacements in integer loads and stores,
5937 we need to prevent reload from generating out of range integer mode
5938 loads and stores to the floating point registers. Previously, we
5939 used to call for a secondary reload and have pa_emit_move_sequence()
5940 fix the instruction sequence. However, reload occasionally wouldn't
5941 generate the reload and we would end up with an invalid REG+D memory
5942 address. So, now we use an intermediate general register for most
5943 memory loads and stores. */
5944 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
5945 && GET_MODE_CLASS (mode
) == MODE_INT
5946 && FP_REG_CLASS_P (rclass
))
5948 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5949 the secondary reload needed for a pseudo. It never passes a
5951 if (GET_CODE (x
) == MEM
)
5955 /* We don't need an intermediate for indexed and LO_SUM DLT
5956 memory addresses. When INT14_OK_STRICT is true, it might
5957 appear that we could directly allow register indirect
5958 memory addresses. However, this doesn't work because we
5959 don't support SUBREGs in floating-point register copies
5960 and reload doesn't tell us when it's going to use a SUBREG. */
5961 if (IS_INDEX_ADDR_P (x
)
5962 || IS_LO_SUM_DLT_ADDR_P (x
))
5965 /* Otherwise, we need an intermediate general register. */
5966 return GENERAL_REGS
;
5969 /* Request a secondary reload with a general scratch register
5970 for everything else. ??? Could symbolic operands be handled
5971 directly when generating non-pic PA 2.0 code? */
5973 ? direct_optab_handler (reload_in_optab
, mode
)
5974 : direct_optab_handler (reload_out_optab
, mode
));
5978 /* A SAR<->FP register copy requires an intermediate general register
5979 and secondary memory. We need a secondary reload with a general
5980 scratch register for spills. */
5981 if (rclass
== SHIFT_REGS
)
5984 if (regno
>= FIRST_PSEUDO_REGISTER
|| regno
< 0)
5987 ? direct_optab_handler (reload_in_optab
, mode
)
5988 : direct_optab_handler (reload_out_optab
, mode
));
5992 /* Handle FP copy. */
5993 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno
)))
5994 return GENERAL_REGS
;
5997 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
5998 && REGNO_REG_CLASS (regno
) == SHIFT_REGS
5999 && FP_REG_CLASS_P (rclass
))
6000 return GENERAL_REGS
;
6005 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6006 is only marked as live on entry by df-scan when it is a fixed
6007 register. It isn't a fixed register in the 64-bit runtime,
6008 so we need to mark it here. */
6011 pa_extra_live_on_entry (bitmap regs
)
6014 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
6017 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6018 to prevent it from being deleted. */
6021 pa_eh_return_handler_rtx (void)
6025 tmp
= gen_rtx_PLUS (word_mode
, hard_frame_pointer_rtx
,
6026 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
6027 tmp
= gen_rtx_MEM (word_mode
, tmp
);
6032 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6033 by invisible reference. As a GCC extension, we also pass anything
6034 with a zero or variable size by reference.
6036 The 64-bit runtime does not describe passing any types by invisible
6037 reference. The internals of GCC can't currently handle passing
6038 empty structures, and zero or variable length arrays when they are
6039 not passed entirely on the stack or by reference. Thus, as a GCC
6040 extension, we pass these types by reference. The HP compiler doesn't
6041 support these types, so hopefully there shouldn't be any compatibility
6042 issues. This may have to be revisited when HP releases a C99 compiler
6043 or updates the ABI. */
6046 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
6047 enum machine_mode mode
, const_tree type
,
6048 bool named ATTRIBUTE_UNUSED
)
6053 size
= int_size_in_bytes (type
);
6055 size
= GET_MODE_SIZE (mode
);
6060 return size
<= 0 || size
> 8;
6064 pa_function_arg_padding (enum machine_mode mode
, const_tree type
)
6069 && (AGGREGATE_TYPE_P (type
)
6070 || TREE_CODE (type
) == COMPLEX_TYPE
6071 || TREE_CODE (type
) == VECTOR_TYPE
)))
6073 /* Return none if justification is not required. */
6075 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
6076 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
6079 /* The directions set here are ignored when a BLKmode argument larger
6080 than a word is placed in a register. Different code is used for
6081 the stack and registers. This makes it difficult to have a
6082 consistent data representation for both the stack and registers.
6083 For both runtimes, the justification and padding for arguments on
6084 the stack and in registers should be identical. */
6086 /* The 64-bit runtime specifies left justification for aggregates. */
6089 /* The 32-bit runtime architecture specifies right justification.
6090 When the argument is passed on the stack, the argument is padded
6091 with garbage on the left. The HP compiler pads with zeros. */
6095 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
6102 /* Do what is necessary for `va_start'. We look at the current function
6103 to determine if stdargs or varargs is used and fill in an initial
6104 va_list. A pointer to this constructor is returned. */
6107 hppa_builtin_saveregs (void)
6110 tree fntype
= TREE_TYPE (current_function_decl
);
6111 int argadj
= ((!stdarg_p (fntype
))
6112 ? UNITS_PER_WORD
: 0);
6115 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
6117 offset
= crtl
->args
.arg_offset_rtx
;
6123 /* Adjust for varargs/stdarg differences. */
6125 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, -argadj
);
6127 offset
= crtl
->args
.arg_offset_rtx
;
6129 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6130 from the incoming arg pointer and growing to larger addresses. */
6131 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
6132 emit_move_insn (gen_rtx_MEM (word_mode
,
6133 plus_constant (Pmode
,
6134 arg_pointer_rtx
, off
)),
6135 gen_rtx_REG (word_mode
, i
));
6137 /* The incoming args pointer points just beyond the flushback area;
6138 normally this is not a serious concern. However, when we are doing
6139 varargs/stdargs we want to make the arg pointer point to the start
6140 of the incoming argument area. */
6141 emit_move_insn (virtual_incoming_args_rtx
,
6142 plus_constant (Pmode
, arg_pointer_rtx
, -64));
6144 /* Now return a pointer to the first anonymous argument. */
6145 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6146 virtual_incoming_args_rtx
,
6147 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6150 /* Store general registers on the stack. */
6151 dest
= gen_rtx_MEM (BLKmode
,
6152 plus_constant (Pmode
, crtl
->args
.internal_arg_pointer
,
6154 set_mem_alias_set (dest
, get_varargs_alias_set ());
6155 set_mem_align (dest
, BITS_PER_WORD
);
6156 move_block_from_reg (23, dest
, 4);
6158 /* move_block_from_reg will emit code to store the argument registers
6159 individually as scalar stores.
6161 However, other insns may later load from the same addresses for
6162 a structure load (passing a struct to a varargs routine).
6164 The alias code assumes that such aliasing can never happen, so we
6165 have to keep memory referencing insns from moving up beyond the
6166 last argument register store. So we emit a blockage insn here. */
6167 emit_insn (gen_blockage ());
6169 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6170 crtl
->args
.internal_arg_pointer
,
6171 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6175 hppa_va_start (tree valist
, rtx nextarg
)
6177 nextarg
= expand_builtin_saveregs ();
6178 std_expand_builtin_va_start (valist
, nextarg
);
6182 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6187 /* Args grow upward. We can use the generic routines. */
6188 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6190 else /* !TARGET_64BIT */
6192 tree ptr
= build_pointer_type (type
);
6195 unsigned int size
, ofs
;
6198 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6202 ptr
= build_pointer_type (type
);
6204 size
= int_size_in_bytes (type
);
6205 valist_type
= TREE_TYPE (valist
);
6207 /* Args grow down. Not handled by generic routines. */
6209 u
= fold_convert (sizetype
, size_in_bytes (type
));
6210 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6211 t
= fold_build_pointer_plus (valist
, u
);
6213 /* Align to 4 or 8 byte boundary depending on argument size. */
6215 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6216 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6217 t
= fold_convert (valist_type
, t
);
6219 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6221 ofs
= (8 - size
) % 4;
6223 t
= fold_build_pointer_plus_hwi (t
, ofs
);
6225 t
= fold_convert (ptr
, t
);
6226 t
= build_va_arg_indirect_ref (t
);
6229 t
= build_va_arg_indirect_ref (t
);
6235 /* True if MODE is valid for the target. By "valid", we mean able to
6236 be manipulated in non-trivial ways. In particular, this means all
6237 the arithmetic is supported.
6239 Currently, TImode is not valid as the HP 64-bit runtime documentation
6240 doesn't document the alignment and calling conventions for this type.
6241 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6242 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6245 pa_scalar_mode_supported_p (enum machine_mode mode
)
6247 int precision
= GET_MODE_PRECISION (mode
);
6249 switch (GET_MODE_CLASS (mode
))
6251 case MODE_PARTIAL_INT
:
6253 if (precision
== CHAR_TYPE_SIZE
)
6255 if (precision
== SHORT_TYPE_SIZE
)
6257 if (precision
== INT_TYPE_SIZE
)
6259 if (precision
== LONG_TYPE_SIZE
)
6261 if (precision
== LONG_LONG_TYPE_SIZE
)
6266 if (precision
== FLOAT_TYPE_SIZE
)
6268 if (precision
== DOUBLE_TYPE_SIZE
)
6270 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6274 case MODE_DECIMAL_FLOAT
:
6282 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6283 it branches into the delay slot. Otherwise, return FALSE. */
6286 branch_to_delay_slot_p (rtx insn
)
6290 if (dbr_sequence_length ())
6293 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6296 insn
= next_active_insn (insn
);
6297 if (jump_insn
== insn
)
6300 /* We can't rely on the length of asms. So, we return FALSE when
6301 the branch is followed by an asm. */
6303 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6304 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
6305 || get_attr_length (insn
) > 0)
6312 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6314 This occurs when INSN has an unfilled delay slot and is followed
6315 by an asm. Disaster can occur if the asm is empty and the jump
6316 branches into the delay slot. So, we add a nop in the delay slot
6317 when this occurs. */
6320 branch_needs_nop_p (rtx insn
)
6324 if (dbr_sequence_length ())
6327 jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6330 insn
= next_active_insn (insn
);
6331 if (!insn
|| jump_insn
== insn
)
6334 if (!(GET_CODE (PATTERN (insn
)) == ASM_INPUT
6335 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6336 && get_attr_length (insn
) > 0)
6343 /* Return TRUE if INSN, a forward jump insn, can use nullification
6344 to skip the following instruction. This avoids an extra cycle due
6345 to a mis-predicted branch when we fall through. */
6348 use_skip_p (rtx insn
)
6350 rtx jump_insn
= next_active_insn (JUMP_LABEL (insn
));
6354 insn
= next_active_insn (insn
);
6356 /* We can't rely on the length of asms, so we can't skip asms. */
6358 || GET_CODE (PATTERN (insn
)) == ASM_INPUT
6359 || extract_asm_operands (PATTERN (insn
)) != NULL_RTX
)
6361 if (get_attr_length (insn
) == 4
6362 && jump_insn
== next_active_insn (insn
))
6364 if (get_attr_length (insn
) > 0)
6371 /* This routine handles all the normal conditional branch sequences we
6372 might need to generate. It handles compare immediate vs compare
6373 register, nullification of delay slots, varying length branches,
6374 negated branches, and all combinations of the above. It returns the
6375 output appropriate to emit the branch corresponding to all given
6379 pa_output_cbranch (rtx
*operands
, int negated
, rtx insn
)
6381 static char buf
[100];
6383 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6384 int length
= get_attr_length (insn
);
6387 /* A conditional branch to the following instruction (e.g. the delay slot)
6388 is asking for a disaster. This can happen when not optimizing and
6389 when jump optimization fails.
6391 While it is usually safe to emit nothing, this can fail if the
6392 preceding instruction is a nullified branch with an empty delay
6393 slot and the same branch target as this branch. We could check
6394 for this but jump optimization should eliminate nop jumps. It
6395 is always safe to emit a nop. */
6396 if (branch_to_delay_slot_p (insn
))
6399 /* The doubleword form of the cmpib instruction doesn't have the LEU
6400 and GTU conditions while the cmpb instruction does. Since we accept
6401 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6402 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6403 operands
[2] = gen_rtx_REG (DImode
, 0);
6404 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6405 operands
[1] = gen_rtx_REG (DImode
, 0);
6407 /* If this is a long branch with its delay slot unfilled, set `nullify'
6408 as it can nullify the delay slot and save a nop. */
6409 if (length
== 8 && dbr_sequence_length () == 0)
6412 /* If this is a short forward conditional branch which did not get
6413 its delay slot filled, the delay slot can still be nullified. */
6414 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6415 nullify
= forward_branch_p (insn
);
6417 /* A forward branch over a single nullified insn can be done with a
6418 comclr instruction. This avoids a single cycle penalty due to
6419 mis-predicted branch if we fall through (branch not taken). */
6420 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6424 /* All short conditional branches except backwards with an unfilled
6428 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6430 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6431 if (GET_MODE (operands
[1]) == DImode
)
6434 strcat (buf
, "%B3");
6436 strcat (buf
, "%S3");
6438 strcat (buf
, " %2,%r1,%%r0");
6441 if (branch_needs_nop_p (insn
))
6442 strcat (buf
, ",n %2,%r1,%0%#");
6444 strcat (buf
, ",n %2,%r1,%0");
6447 strcat (buf
, " %2,%r1,%0");
6450 /* All long conditionals. Note a short backward branch with an
6451 unfilled delay slot is treated just like a long backward branch
6452 with an unfilled delay slot. */
6454 /* Handle weird backwards branch with a filled delay slot
6455 which is nullified. */
6456 if (dbr_sequence_length () != 0
6457 && ! forward_branch_p (insn
)
6460 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6461 if (GET_MODE (operands
[1]) == DImode
)
6464 strcat (buf
, "%S3");
6466 strcat (buf
, "%B3");
6467 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6469 /* Handle short backwards branch with an unfilled delay slot.
6470 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6471 taken and untaken branches. */
6472 else if (dbr_sequence_length () == 0
6473 && ! forward_branch_p (insn
)
6474 && INSN_ADDRESSES_SET_P ()
6475 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6476 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6478 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6479 if (GET_MODE (operands
[1]) == DImode
)
6482 strcat (buf
, "%B3 %2,%r1,%0%#");
6484 strcat (buf
, "%S3 %2,%r1,%0%#");
6488 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6489 if (GET_MODE (operands
[1]) == DImode
)
6492 strcat (buf
, "%S3");
6494 strcat (buf
, "%B3");
6496 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6498 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6503 /* The reversed conditional branch must branch over one additional
6504 instruction if the delay slot is filled and needs to be extracted
6505 by pa_output_lbranch. If the delay slot is empty or this is a
6506 nullified forward branch, the instruction after the reversed
6507 condition branch must be nullified. */
6508 if (dbr_sequence_length () == 0
6509 || (nullify
&& forward_branch_p (insn
)))
6513 operands
[4] = GEN_INT (length
);
6518 operands
[4] = GEN_INT (length
+ 4);
6521 /* Create a reversed conditional branch which branches around
6522 the following insns. */
6523 if (GET_MODE (operands
[1]) != DImode
)
6529 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6532 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6538 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6541 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6550 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6553 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6559 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6562 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6566 output_asm_insn (buf
, operands
);
6567 return pa_output_lbranch (operands
[0], insn
, xdelay
);
6572 /* This routine handles output of long unconditional branches that
6573 exceed the maximum range of a simple branch instruction. Since
6574 we don't have a register available for the branch, we save register
6575 %r1 in the frame marker, load the branch destination DEST into %r1,
6576 execute the branch, and restore %r1 in the delay slot of the branch.
6578 Since long branches may have an insn in the delay slot and the
6579 delay slot is used to restore %r1, we in general need to extract
6580 this insn and execute it before the branch. However, to facilitate
6581 use of this function by conditional branches, we also provide an
6582 option to not extract the delay insn so that it will be emitted
6583 after the long branch. So, if there is an insn in the delay slot,
6584 it is extracted if XDELAY is nonzero.
6586 The lengths of the various long-branch sequences are 20, 16 and 24
6587 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6590 pa_output_lbranch (rtx dest
, rtx insn
, int xdelay
)
6594 xoperands
[0] = dest
;
6596 /* First, free up the delay slot. */
6597 if (xdelay
&& dbr_sequence_length () != 0)
6599 /* We can't handle a jump in the delay slot. */
6600 gcc_assert (GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
);
6602 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6605 /* Now delete the delay insn. */
6606 SET_INSN_DELETED (NEXT_INSN (insn
));
6609 /* Output an insn to save %r1. The runtime documentation doesn't
6610 specify whether the "Clean Up" slot in the callers frame can
6611 be clobbered by the callee. It isn't copied by HP's builtin
6612 alloca, so this suggests that it can be clobbered if necessary.
6613 The "Static Link" location is copied by HP builtin alloca, so
6614 we avoid using it. Using the cleanup slot might be a problem
6615 if we have to interoperate with languages that pass cleanup
6616 information. However, it should be possible to handle these
6617 situations with GCC's asm feature.
6619 The "Current RP" slot is reserved for the called procedure, so
6620 we try to use it when we don't have a frame of our own. It's
6621 rather unlikely that we won't have a frame when we need to emit
6624 Really the way to go long term is a register scavenger; goto
6625 the target of the jump and find a register which we can use
6626 as a scratch to hold the value in %r1. Then, we wouldn't have
6627 to free up the delay slot or clobber a slot that may be needed
6628 for other purposes. */
6631 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6632 /* Use the return pointer slot in the frame marker. */
6633 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6635 /* Use the slot at -40 in the frame marker since HP builtin
6636 alloca doesn't copy it. */
6637 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6641 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6642 /* Use the return pointer slot in the frame marker. */
6643 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6645 /* Use the "Clean Up" slot in the frame marker. In GCC,
6646 the only other use of this location is for copying a
6647 floating point double argument from a floating-point
6648 register to two general registers. The copy is done
6649 as an "atomic" operation when outputting a call, so it
6650 won't interfere with our using the location here. */
6651 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6654 if (TARGET_PORTABLE_RUNTIME
)
6656 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6657 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6658 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6662 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6663 if (TARGET_SOM
|| !TARGET_GAS
)
6665 xoperands
[1] = gen_label_rtx ();
6666 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6667 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6668 CODE_LABEL_NUMBER (xoperands
[1]));
6669 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6673 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6674 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6676 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6679 /* Now output a very long branch to the original target. */
6680 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6682 /* Now restore the value of %r1 in the delay slot. */
6685 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6686 return "ldd -16(%%r30),%%r1";
6688 return "ldd -40(%%r30),%%r1";
6692 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6693 return "ldw -20(%%r30),%%r1";
6695 return "ldw -12(%%r30),%%r1";
6699 /* This routine handles all the branch-on-bit conditional branch sequences we
6700 might need to generate. It handles nullification of delay slots,
6701 varying length branches, negated branches and all combinations of the
6702 above. it returns the appropriate output template to emit the branch. */
6705 pa_output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
, int which
)
6707 static char buf
[100];
6709 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6710 int length
= get_attr_length (insn
);
6713 /* A conditional branch to the following instruction (e.g. the delay slot) is
6714 asking for a disaster. I do not think this can happen as this pattern
6715 is only used when optimizing; jump optimization should eliminate the
6716 jump. But be prepared just in case. */
6718 if (branch_to_delay_slot_p (insn
))
6721 /* If this is a long branch with its delay slot unfilled, set `nullify'
6722 as it can nullify the delay slot and save a nop. */
6723 if (length
== 8 && dbr_sequence_length () == 0)
6726 /* If this is a short forward conditional branch which did not get
6727 its delay slot filled, the delay slot can still be nullified. */
6728 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6729 nullify
= forward_branch_p (insn
);
6731 /* A forward branch over a single nullified insn can be done with a
6732 extrs instruction. This avoids a single cycle penalty due to
6733 mis-predicted branch if we fall through (branch not taken). */
6734 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6739 /* All short conditional branches except backwards with an unfilled
6743 strcpy (buf
, "{extrs,|extrw,s,}");
6745 strcpy (buf
, "bb,");
6746 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6747 strcpy (buf
, "extrd,s,*");
6748 else if (GET_MODE (operands
[0]) == DImode
)
6749 strcpy (buf
, "bb,*");
6750 if ((which
== 0 && negated
)
6751 || (which
== 1 && ! negated
))
6756 strcat (buf
, " %0,%1,1,%%r0");
6757 else if (nullify
&& negated
)
6759 if (branch_needs_nop_p (insn
))
6760 strcat (buf
, ",n %0,%1,%3%#");
6762 strcat (buf
, ",n %0,%1,%3");
6764 else if (nullify
&& ! negated
)
6766 if (branch_needs_nop_p (insn
))
6767 strcat (buf
, ",n %0,%1,%2%#");
6769 strcat (buf
, ",n %0,%1,%2");
6771 else if (! nullify
&& negated
)
6772 strcat (buf
, " %0,%1,%3");
6773 else if (! nullify
&& ! negated
)
6774 strcat (buf
, " %0,%1,%2");
6777 /* All long conditionals. Note a short backward branch with an
6778 unfilled delay slot is treated just like a long backward branch
6779 with an unfilled delay slot. */
6781 /* Handle weird backwards branch with a filled delay slot
6782 which is nullified. */
6783 if (dbr_sequence_length () != 0
6784 && ! forward_branch_p (insn
)
6787 strcpy (buf
, "bb,");
6788 if (GET_MODE (operands
[0]) == DImode
)
6790 if ((which
== 0 && negated
)
6791 || (which
== 1 && ! negated
))
6796 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6798 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6800 /* Handle short backwards branch with an unfilled delay slot.
6801 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6802 taken and untaken branches. */
6803 else if (dbr_sequence_length () == 0
6804 && ! forward_branch_p (insn
)
6805 && INSN_ADDRESSES_SET_P ()
6806 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6807 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6809 strcpy (buf
, "bb,");
6810 if (GET_MODE (operands
[0]) == DImode
)
6812 if ((which
== 0 && negated
)
6813 || (which
== 1 && ! negated
))
6818 strcat (buf
, " %0,%1,%3%#");
6820 strcat (buf
, " %0,%1,%2%#");
6824 if (GET_MODE (operands
[0]) == DImode
)
6825 strcpy (buf
, "extrd,s,*");
6827 strcpy (buf
, "{extrs,|extrw,s,}");
6828 if ((which
== 0 && negated
)
6829 || (which
== 1 && ! negated
))
6833 if (nullify
&& negated
)
6834 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6835 else if (nullify
&& ! negated
)
6836 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6838 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6840 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6845 /* The reversed conditional branch must branch over one additional
6846 instruction if the delay slot is filled and needs to be extracted
6847 by pa_output_lbranch. If the delay slot is empty or this is a
6848 nullified forward branch, the instruction after the reversed
6849 condition branch must be nullified. */
6850 if (dbr_sequence_length () == 0
6851 || (nullify
&& forward_branch_p (insn
)))
6855 operands
[4] = GEN_INT (length
);
6860 operands
[4] = GEN_INT (length
+ 4);
6863 if (GET_MODE (operands
[0]) == DImode
)
6864 strcpy (buf
, "bb,*");
6866 strcpy (buf
, "bb,");
6867 if ((which
== 0 && negated
)
6868 || (which
== 1 && !negated
))
6873 strcat (buf
, ",n %0,%1,.+%4");
6875 strcat (buf
, " %0,%1,.+%4");
6876 output_asm_insn (buf
, operands
);
6877 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
6883 /* This routine handles all the branch-on-variable-bit conditional branch
6884 sequences we might need to generate. It handles nullification of delay
6885 slots, varying length branches, negated branches and all combinations
6886 of the above. it returns the appropriate output template to emit the
6890 pa_output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
,
6893 static char buf
[100];
6895 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6896 int length
= get_attr_length (insn
);
6899 /* A conditional branch to the following instruction (e.g. the delay slot) is
6900 asking for a disaster. I do not think this can happen as this pattern
6901 is only used when optimizing; jump optimization should eliminate the
6902 jump. But be prepared just in case. */
6904 if (branch_to_delay_slot_p (insn
))
6907 /* If this is a long branch with its delay slot unfilled, set `nullify'
6908 as it can nullify the delay slot and save a nop. */
6909 if (length
== 8 && dbr_sequence_length () == 0)
6912 /* If this is a short forward conditional branch which did not get
6913 its delay slot filled, the delay slot can still be nullified. */
6914 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6915 nullify
= forward_branch_p (insn
);
6917 /* A forward branch over a single nullified insn can be done with a
6918 extrs instruction. This avoids a single cycle penalty due to
6919 mis-predicted branch if we fall through (branch not taken). */
6920 useskip
= (length
== 4 && nullify
) ? use_skip_p (insn
) : FALSE
;
6925 /* All short conditional branches except backwards with an unfilled
6929 strcpy (buf
, "{vextrs,|extrw,s,}");
6931 strcpy (buf
, "{bvb,|bb,}");
6932 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6933 strcpy (buf
, "extrd,s,*");
6934 else if (GET_MODE (operands
[0]) == DImode
)
6935 strcpy (buf
, "bb,*");
6936 if ((which
== 0 && negated
)
6937 || (which
== 1 && ! negated
))
6942 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6943 else if (nullify
&& negated
)
6945 if (branch_needs_nop_p (insn
))
6946 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6948 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
6950 else if (nullify
&& ! negated
)
6952 if (branch_needs_nop_p (insn
))
6953 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6955 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
6957 else if (! nullify
&& negated
)
6958 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
6959 else if (! nullify
&& ! negated
)
6960 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
6963 /* All long conditionals. Note a short backward branch with an
6964 unfilled delay slot is treated just like a long backward branch
6965 with an unfilled delay slot. */
6967 /* Handle weird backwards branch with a filled delay slot
6968 which is nullified. */
6969 if (dbr_sequence_length () != 0
6970 && ! forward_branch_p (insn
)
6973 strcpy (buf
, "{bvb,|bb,}");
6974 if (GET_MODE (operands
[0]) == DImode
)
6976 if ((which
== 0 && negated
)
6977 || (which
== 1 && ! negated
))
6982 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6984 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6986 /* Handle short backwards branch with an unfilled delay slot.
6987 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6988 taken and untaken branches. */
6989 else if (dbr_sequence_length () == 0
6990 && ! forward_branch_p (insn
)
6991 && INSN_ADDRESSES_SET_P ()
6992 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6993 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6995 strcpy (buf
, "{bvb,|bb,}");
6996 if (GET_MODE (operands
[0]) == DImode
)
6998 if ((which
== 0 && negated
)
6999 || (which
== 1 && ! negated
))
7004 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
7006 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
7010 strcpy (buf
, "{vextrs,|extrw,s,}");
7011 if (GET_MODE (operands
[0]) == DImode
)
7012 strcpy (buf
, "extrd,s,*");
7013 if ((which
== 0 && negated
)
7014 || (which
== 1 && ! negated
))
7018 if (nullify
&& negated
)
7019 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7020 else if (nullify
&& ! negated
)
7021 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7023 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7025 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7030 /* The reversed conditional branch must branch over one additional
7031 instruction if the delay slot is filled and needs to be extracted
7032 by pa_output_lbranch. If the delay slot is empty or this is a
7033 nullified forward branch, the instruction after the reversed
7034 condition branch must be nullified. */
7035 if (dbr_sequence_length () == 0
7036 || (nullify
&& forward_branch_p (insn
)))
7040 operands
[4] = GEN_INT (length
);
7045 operands
[4] = GEN_INT (length
+ 4);
7048 if (GET_MODE (operands
[0]) == DImode
)
7049 strcpy (buf
, "bb,*");
7051 strcpy (buf
, "{bvb,|bb,}");
7052 if ((which
== 0 && negated
)
7053 || (which
== 1 && !negated
))
7058 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
7060 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
7061 output_asm_insn (buf
, operands
);
7062 return pa_output_lbranch (negated
? operands
[3] : operands
[2],
7068 /* Return the output template for emitting a dbra type insn.
7070 Note it may perform some output operations on its own before
7071 returning the final output string. */
7073 pa_output_dbra (rtx
*operands
, rtx insn
, int which_alternative
)
7075 int length
= get_attr_length (insn
);
7077 /* A conditional branch to the following instruction (e.g. the delay slot) is
7078 asking for a disaster. Be prepared! */
7080 if (branch_to_delay_slot_p (insn
))
7082 if (which_alternative
== 0)
7083 return "ldo %1(%0),%0";
7084 else if (which_alternative
== 1)
7086 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
7087 output_asm_insn ("ldw -16(%%r30),%4", operands
);
7088 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7089 return "{fldws|fldw} -16(%%r30),%0";
7093 output_asm_insn ("ldw %0,%4", operands
);
7094 return "ldo %1(%4),%4\n\tstw %4,%0";
7098 if (which_alternative
== 0)
7100 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7103 /* If this is a long branch with its delay slot unfilled, set `nullify'
7104 as it can nullify the delay slot and save a nop. */
7105 if (length
== 8 && dbr_sequence_length () == 0)
7108 /* If this is a short forward conditional branch which did not get
7109 its delay slot filled, the delay slot can still be nullified. */
7110 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7111 nullify
= forward_branch_p (insn
);
7118 if (branch_needs_nop_p (insn
))
7119 return "addib,%C2,n %1,%0,%3%#";
7121 return "addib,%C2,n %1,%0,%3";
7124 return "addib,%C2 %1,%0,%3";
7127 /* Handle weird backwards branch with a fulled delay slot
7128 which is nullified. */
7129 if (dbr_sequence_length () != 0
7130 && ! forward_branch_p (insn
)
7132 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7133 /* Handle short backwards branch with an unfilled delay slot.
7134 Using a addb;nop rather than addi;bl saves 1 cycle for both
7135 taken and untaken branches. */
7136 else if (dbr_sequence_length () == 0
7137 && ! forward_branch_p (insn
)
7138 && INSN_ADDRESSES_SET_P ()
7139 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7140 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7141 return "addib,%C2 %1,%0,%3%#";
7143 /* Handle normal cases. */
7145 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7147 return "addi,%N2 %1,%0,%0\n\tb %3";
7150 /* The reversed conditional branch must branch over one additional
7151 instruction if the delay slot is filled and needs to be extracted
7152 by pa_output_lbranch. If the delay slot is empty or this is a
7153 nullified forward branch, the instruction after the reversed
7154 condition branch must be nullified. */
7155 if (dbr_sequence_length () == 0
7156 || (nullify
&& forward_branch_p (insn
)))
7160 operands
[4] = GEN_INT (length
);
7165 operands
[4] = GEN_INT (length
+ 4);
7169 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
7171 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
7173 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7177 /* Deal with gross reload from FP register case. */
7178 else if (which_alternative
== 1)
7180 /* Move loop counter from FP register to MEM then into a GR,
7181 increment the GR, store the GR into MEM, and finally reload
7182 the FP register from MEM from within the branch's delay slot. */
7183 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7185 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7187 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7188 else if (length
== 28)
7189 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7192 operands
[5] = GEN_INT (length
- 16);
7193 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
7194 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7195 return pa_output_lbranch (operands
[3], insn
, 0);
7198 /* Deal with gross reload from memory case. */
7201 /* Reload loop counter from memory, the store back to memory
7202 happens in the branch's delay slot. */
7203 output_asm_insn ("ldw %0,%4", operands
);
7205 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7206 else if (length
== 16)
7207 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7210 operands
[5] = GEN_INT (length
- 4);
7211 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7212 return pa_output_lbranch (operands
[3], insn
, 0);
7217 /* Return the output template for emitting a movb type insn.
7219 Note it may perform some output operations on its own before
7220 returning the final output string. */
7222 pa_output_movb (rtx
*operands
, rtx insn
, int which_alternative
,
7223 int reverse_comparison
)
7225 int length
= get_attr_length (insn
);
7227 /* A conditional branch to the following instruction (e.g. the delay slot) is
7228 asking for a disaster. Be prepared! */
7230 if (branch_to_delay_slot_p (insn
))
7232 if (which_alternative
== 0)
7233 return "copy %1,%0";
7234 else if (which_alternative
== 1)
7236 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7237 return "{fldws|fldw} -16(%%r30),%0";
7239 else if (which_alternative
== 2)
7245 /* Support the second variant. */
7246 if (reverse_comparison
)
7247 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7249 if (which_alternative
== 0)
7251 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7254 /* If this is a long branch with its delay slot unfilled, set `nullify'
7255 as it can nullify the delay slot and save a nop. */
7256 if (length
== 8 && dbr_sequence_length () == 0)
7259 /* If this is a short forward conditional branch which did not get
7260 its delay slot filled, the delay slot can still be nullified. */
7261 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7262 nullify
= forward_branch_p (insn
);
7269 if (branch_needs_nop_p (insn
))
7270 return "movb,%C2,n %1,%0,%3%#";
7272 return "movb,%C2,n %1,%0,%3";
7275 return "movb,%C2 %1,%0,%3";
7278 /* Handle weird backwards branch with a filled delay slot
7279 which is nullified. */
7280 if (dbr_sequence_length () != 0
7281 && ! forward_branch_p (insn
)
7283 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7285 /* Handle short backwards branch with an unfilled delay slot.
7286 Using a movb;nop rather than or;bl saves 1 cycle for both
7287 taken and untaken branches. */
7288 else if (dbr_sequence_length () == 0
7289 && ! forward_branch_p (insn
)
7290 && INSN_ADDRESSES_SET_P ()
7291 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7292 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7293 return "movb,%C2 %1,%0,%3%#";
7294 /* Handle normal cases. */
7296 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7298 return "or,%N2 %1,%%r0,%0\n\tb %3";
7301 /* The reversed conditional branch must branch over one additional
7302 instruction if the delay slot is filled and needs to be extracted
7303 by pa_output_lbranch. If the delay slot is empty or this is a
7304 nullified forward branch, the instruction after the reversed
7305 condition branch must be nullified. */
7306 if (dbr_sequence_length () == 0
7307 || (nullify
&& forward_branch_p (insn
)))
7311 operands
[4] = GEN_INT (length
);
7316 operands
[4] = GEN_INT (length
+ 4);
7320 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7322 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7324 return pa_output_lbranch (operands
[3], insn
, xdelay
);
7327 /* Deal with gross reload for FP destination register case. */
7328 else if (which_alternative
== 1)
7330 /* Move source register to MEM, perform the branch test, then
7331 finally load the FP register from MEM from within the branch's
7333 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7335 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7336 else if (length
== 16)
7337 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7340 operands
[4] = GEN_INT (length
- 4);
7341 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7342 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7343 return pa_output_lbranch (operands
[3], insn
, 0);
7346 /* Deal with gross reload from memory case. */
7347 else if (which_alternative
== 2)
7349 /* Reload loop counter from memory, the store back to memory
7350 happens in the branch's delay slot. */
7352 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7353 else if (length
== 12)
7354 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7357 operands
[4] = GEN_INT (length
);
7358 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7360 return pa_output_lbranch (operands
[3], insn
, 0);
7363 /* Handle SAR as a destination. */
7367 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7368 else if (length
== 12)
7369 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7372 operands
[4] = GEN_INT (length
);
7373 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7375 return pa_output_lbranch (operands
[3], insn
, 0);
7380 /* Copy any FP arguments in INSN into integer registers. */
7382 copy_fp_args (rtx insn
)
7387 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7389 int arg_mode
, regno
;
7390 rtx use
= XEXP (link
, 0);
7392 if (! (GET_CODE (use
) == USE
7393 && GET_CODE (XEXP (use
, 0)) == REG
7394 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7397 arg_mode
= GET_MODE (XEXP (use
, 0));
7398 regno
= REGNO (XEXP (use
, 0));
7400 /* Is it a floating point register? */
7401 if (regno
>= 32 && regno
<= 39)
7403 /* Copy the FP register into an integer register via memory. */
7404 if (arg_mode
== SFmode
)
7406 xoperands
[0] = XEXP (use
, 0);
7407 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7408 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7409 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7413 xoperands
[0] = XEXP (use
, 0);
7414 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7415 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7416 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7417 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7423 /* Compute length of the FP argument copy sequence for INSN. */
7425 length_fp_args (rtx insn
)
7430 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7432 int arg_mode
, regno
;
7433 rtx use
= XEXP (link
, 0);
7435 if (! (GET_CODE (use
) == USE
7436 && GET_CODE (XEXP (use
, 0)) == REG
7437 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7440 arg_mode
= GET_MODE (XEXP (use
, 0));
7441 regno
= REGNO (XEXP (use
, 0));
7443 /* Is it a floating point register? */
7444 if (regno
>= 32 && regno
<= 39)
7446 if (arg_mode
== SFmode
)
7456 /* Return the attribute length for the millicode call instruction INSN.
7457 The length must match the code generated by pa_output_millicode_call.
7458 We include the delay slot in the returned length as it is better to
7459 over estimate the length than to under estimate it. */
7462 pa_attr_length_millicode_call (rtx insn
)
7464 unsigned long distance
= -1;
7465 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7467 if (INSN_ADDRESSES_SET_P ())
7469 distance
= (total
+ insn_current_reference_address (insn
));
7470 if (distance
< total
)
7476 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7481 else if (TARGET_PORTABLE_RUNTIME
)
7485 if (!TARGET_LONG_CALLS
&& distance
< MAX_PCREL17F_OFFSET
)
7488 if (TARGET_LONG_ABS_CALL
&& !flag_pic
)
7495 /* INSN is a function call. It may have an unconditional jump
7498 CALL_DEST is the routine we are calling. */
7501 pa_output_millicode_call (rtx insn
, rtx call_dest
)
7503 int attr_length
= get_attr_length (insn
);
7504 int seq_length
= dbr_sequence_length ();
7509 xoperands
[0] = call_dest
;
7510 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7512 /* Handle the common case where we are sure that the branch will
7513 reach the beginning of the $CODE$ subspace. The within reach
7514 form of the $$sh_func_adrs call has a length of 28. Because
7515 it has an attribute type of multi, it never has a nonzero
7516 sequence length. The length of the $$sh_func_adrs is the same
7517 as certain out of reach PIC calls to other routines. */
7518 if (!TARGET_LONG_CALLS
7519 && ((seq_length
== 0
7520 && (attr_length
== 12
7521 || (attr_length
== 28 && get_attr_type (insn
) == TYPE_MULTI
)))
7522 || (seq_length
!= 0 && attr_length
== 8)))
7524 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
7530 /* It might seem that one insn could be saved by accessing
7531 the millicode function using the linkage table. However,
7532 this doesn't work in shared libraries and other dynamically
7533 loaded objects. Using a pc-relative sequence also avoids
7534 problems related to the implicit use of the gp register. */
7535 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7539 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7540 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7544 xoperands
[1] = gen_label_rtx ();
7545 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7546 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7547 CODE_LABEL_NUMBER (xoperands
[1]));
7548 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7551 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7553 else if (TARGET_PORTABLE_RUNTIME
)
7555 /* Pure portable runtime doesn't allow be/ble; we also don't
7556 have PIC support in the assembler/linker, so this sequence
7559 /* Get the address of our target into %r1. */
7560 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7561 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7563 /* Get our return address into %r31. */
7564 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7565 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7567 /* Jump to our target address in %r1. */
7568 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7572 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7574 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7576 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7580 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7581 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
7583 if (TARGET_SOM
|| !TARGET_GAS
)
7585 /* The HP assembler can generate relocations for the
7586 difference of two symbols. GAS can do this for a
7587 millicode symbol but not an arbitrary external
7588 symbol when generating SOM output. */
7589 xoperands
[1] = gen_label_rtx ();
7590 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7591 CODE_LABEL_NUMBER (xoperands
[1]));
7592 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7593 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7597 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
7598 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7602 /* Jump to our target address in %r1. */
7603 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7607 if (seq_length
== 0)
7608 output_asm_insn ("nop", xoperands
);
7610 /* We are done if there isn't a jump in the delay slot. */
7611 if (seq_length
== 0 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7614 /* This call has an unconditional jump in its delay slot. */
7615 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7617 /* See if the return address can be adjusted. Use the containing
7618 sequence insn's address. */
7619 if (INSN_ADDRESSES_SET_P ())
7621 seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7622 distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7623 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7625 if (VAL_14_BITS_P (distance
))
7627 xoperands
[1] = gen_label_rtx ();
7628 output_asm_insn ("ldo %0-%1(%2),%2", xoperands
);
7629 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7630 CODE_LABEL_NUMBER (xoperands
[1]));
7633 /* ??? This branch may not reach its target. */
7634 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7637 /* ??? This branch may not reach its target. */
7638 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7640 /* Delete the jump. */
7641 SET_INSN_DELETED (NEXT_INSN (insn
));
7646 /* Return the attribute length of the call instruction INSN. The SIBCALL
7647 flag indicates whether INSN is a regular call or a sibling call. The
7648 length returned must be longer than the code actually generated by
7649 pa_output_call. Since branch shortening is done before delay branch
7650 sequencing, there is no way to determine whether or not the delay
7651 slot will be filled during branch shortening. Even when the delay
7652 slot is filled, we may have to add a nop if the delay slot contains
7653 a branch that can't reach its target. Thus, we always have to include
7654 the delay slot in the length estimate. This used to be done in
7655 pa_adjust_insn_length but we do it here now as some sequences always
7656 fill the delay slot and we can save four bytes in the estimate for
7660 pa_attr_length_call (rtx insn
, int sibcall
)
7663 rtx call
, call_dest
;
7666 rtx pat
= PATTERN (insn
);
7667 unsigned long distance
= -1;
7669 gcc_assert (GET_CODE (insn
) == CALL_INSN
);
7671 if (INSN_ADDRESSES_SET_P ())
7673 unsigned long total
;
7675 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7676 distance
= (total
+ insn_current_reference_address (insn
));
7677 if (distance
< total
)
7681 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7683 /* Get the call rtx. */
7684 call
= XVECEXP (pat
, 0, 0);
7685 if (GET_CODE (call
) == SET
)
7686 call
= SET_SRC (call
);
7688 gcc_assert (GET_CODE (call
) == CALL
);
7690 /* Determine if this is a local call. */
7691 call_dest
= XEXP (XEXP (call
, 0), 0);
7692 call_decl
= SYMBOL_REF_DECL (call_dest
);
7693 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7695 /* pc-relative branch. */
7696 if (!TARGET_LONG_CALLS
7697 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7698 || distance
< MAX_PCREL17F_OFFSET
))
7701 /* 64-bit plabel sequence. */
7702 else if (TARGET_64BIT
&& !local_call
)
7703 length
+= sibcall
? 28 : 24;
7705 /* non-pic long absolute branch sequence. */
7706 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7709 /* long pc-relative branch sequence. */
7710 else if (TARGET_LONG_PIC_SDIFF_CALL
7711 || (TARGET_GAS
&& !TARGET_SOM
7712 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7716 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7720 /* 32-bit plabel sequence. */
7726 length
+= length_fp_args (insn
);
7736 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7744 /* INSN is a function call. It may have an unconditional jump
7747 CALL_DEST is the routine we are calling. */
7750 pa_output_call (rtx insn
, rtx call_dest
, int sibcall
)
7752 int delay_insn_deleted
= 0;
7753 int delay_slot_filled
= 0;
7754 int seq_length
= dbr_sequence_length ();
7755 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7756 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7759 xoperands
[0] = call_dest
;
7761 /* Handle the common case where we're sure that the branch will reach
7762 the beginning of the "$CODE$" subspace. This is the beginning of
7763 the current function if we are in a named section. */
7764 if (!TARGET_LONG_CALLS
&& pa_attr_length_call (insn
, sibcall
) == 8)
7766 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7767 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7771 if (TARGET_64BIT
&& !local_call
)
7773 /* ??? As far as I can tell, the HP linker doesn't support the
7774 long pc-relative sequence described in the 64-bit runtime
7775 architecture. So, we use a slightly longer indirect call. */
7776 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7777 xoperands
[1] = gen_label_rtx ();
7779 /* If this isn't a sibcall, we put the load of %r27 into the
7780 delay slot. We can't do this in a sibcall as we don't
7781 have a second call-clobbered scratch register available. */
7783 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7786 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7789 /* Now delete the delay insn. */
7790 SET_INSN_DELETED (NEXT_INSN (insn
));
7791 delay_insn_deleted
= 1;
7794 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7795 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7796 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7800 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7801 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7802 output_asm_insn ("bve (%%r1)", xoperands
);
7806 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7807 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7808 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7809 delay_slot_filled
= 1;
7814 int indirect_call
= 0;
7816 /* Emit a long call. There are several different sequences
7817 of increasing length and complexity. In most cases,
7818 they don't allow an instruction in the delay slot. */
7819 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7820 && !TARGET_LONG_PIC_SDIFF_CALL
7821 && !(TARGET_GAS
&& !TARGET_SOM
7822 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7827 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7831 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
7833 /* A non-jump insn in the delay slot. By definition we can
7834 emit this insn before the call (and in fact before argument
7836 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7839 /* Now delete the delay insn. */
7840 SET_INSN_DELETED (NEXT_INSN (insn
));
7841 delay_insn_deleted
= 1;
7844 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7846 /* This is the best sequence for making long calls in
7847 non-pic code. Unfortunately, GNU ld doesn't provide
7848 the stub needed for external calls, and GAS's support
7849 for this with the SOM linker is buggy. It is safe
7850 to use this for local calls. */
7851 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7853 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7857 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7860 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7862 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7863 delay_slot_filled
= 1;
7868 if (TARGET_LONG_PIC_SDIFF_CALL
)
7870 /* The HP assembler and linker can handle relocations
7871 for the difference of two symbols. The HP assembler
7872 recognizes the sequence as a pc-relative call and
7873 the linker provides stubs when needed. */
7874 xoperands
[1] = gen_label_rtx ();
7875 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7876 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7877 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7878 CODE_LABEL_NUMBER (xoperands
[1]));
7879 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7881 else if (TARGET_GAS
&& !TARGET_SOM
7882 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7884 /* GAS currently can't generate the relocations that
7885 are needed for the SOM linker under HP-UX using this
7886 sequence. The GNU linker doesn't generate the stubs
7887 that are needed for external calls on TARGET_ELF32
7888 with this sequence. For now, we have to use a
7889 longer plabel sequence when using GAS. */
7890 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7891 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7893 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7898 /* Emit a long plabel-based call sequence. This is
7899 essentially an inline implementation of $$dyncall.
7900 We don't actually try to call $$dyncall as this is
7901 as difficult as calling the function itself. */
7902 xoperands
[0] = pa_get_deferred_plabel (call_dest
);
7903 xoperands
[1] = gen_label_rtx ();
7905 /* Since the call is indirect, FP arguments in registers
7906 need to be copied to the general registers. Then, the
7907 argument relocation stub will copy them back. */
7909 copy_fp_args (insn
);
7913 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7914 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7915 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7919 output_asm_insn ("addil LR'%0-$global$,%%r27",
7921 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7925 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7926 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7927 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7928 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7930 if (!sibcall
&& !TARGET_PA_20
)
7932 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7933 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7934 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7936 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7943 output_asm_insn ("bve (%%r1)", xoperands
);
7948 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7949 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7950 delay_slot_filled
= 1;
7953 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7958 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7959 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7964 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7965 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
7967 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
7971 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7972 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
7974 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
7977 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
7979 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7980 delay_slot_filled
= 1;
7987 if (!delay_slot_filled
&& (seq_length
== 0 || delay_insn_deleted
))
7988 output_asm_insn ("nop", xoperands
);
7990 /* We are done if there isn't a jump in the delay slot. */
7992 || delay_insn_deleted
7993 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7996 /* A sibcall should never have a branch in the delay slot. */
7997 gcc_assert (!sibcall
);
7999 /* This call has an unconditional jump in its delay slot. */
8000 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
8002 if (!delay_slot_filled
&& INSN_ADDRESSES_SET_P ())
8004 /* See if the return address can be adjusted. Use the containing
8005 sequence insn's address. This would break the regular call/return@
8006 relationship assumed by the table based eh unwinder, so only do that
8007 if the call is not possibly throwing. */
8008 rtx seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
8009 int distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
8010 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
8012 if (VAL_14_BITS_P (distance
)
8013 && !(can_throw_internal (insn
) || can_throw_external (insn
)))
8015 xoperands
[1] = gen_label_rtx ();
8016 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands
);
8017 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8018 CODE_LABEL_NUMBER (xoperands
[1]));
8021 output_asm_insn ("nop\n\tb,n %0", xoperands
);
8024 output_asm_insn ("b,n %0", xoperands
);
8026 /* Delete the jump. */
8027 SET_INSN_DELETED (NEXT_INSN (insn
));
8032 /* Return the attribute length of the indirect call instruction INSN.
8033 The length must match the code generated by output_indirect call.
8034 The returned length includes the delay slot. Currently, the delay
8035 slot of an indirect call sequence is not exposed and it is used by
8036 the sequence itself. */
8039 pa_attr_length_indirect_call (rtx insn
)
8041 unsigned long distance
= -1;
8042 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
8044 if (INSN_ADDRESSES_SET_P ())
8046 distance
= (total
+ insn_current_reference_address (insn
));
8047 if (distance
< total
)
8054 if (TARGET_FAST_INDIRECT_CALLS
8055 || (!TARGET_PORTABLE_RUNTIME
8056 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
8057 || distance
< MAX_PCREL17F_OFFSET
)))
8063 if (TARGET_PORTABLE_RUNTIME
)
8066 /* Out of reach, can use ble. */
8071 pa_output_indirect_call (rtx insn
, rtx call_dest
)
8077 xoperands
[0] = call_dest
;
8078 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
8079 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
8083 /* First the special case for kernels, level 0 systems, etc. */
8084 if (TARGET_FAST_INDIRECT_CALLS
)
8085 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8087 /* Now the normal case -- we can reach $$dyncall directly or
8088 we're sure that we can get there via a long-branch stub.
8090 No need to check target flags as the length uniquely identifies
8091 the remaining cases. */
8092 if (pa_attr_length_indirect_call (insn
) == 8)
8094 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8095 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8096 variant of the B,L instruction can't be used on the SOM target. */
8097 if (TARGET_PA_20
&& !TARGET_SOM
)
8098 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8100 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8103 /* Long millicode call, but we are not generating PIC or portable runtime
8105 if (pa_attr_length_indirect_call (insn
) == 12)
8106 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8108 /* Long millicode call for portable runtime. */
8109 if (pa_attr_length_indirect_call (insn
) == 20)
8110 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
8112 /* We need a long PIC call to $$dyncall. */
8113 xoperands
[0] = NULL_RTX
;
8114 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8115 if (TARGET_SOM
|| !TARGET_GAS
)
8117 xoperands
[0] = gen_label_rtx ();
8118 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands
);
8119 targetm
.asm_out
.internal_label (asm_out_file
, "L",
8120 CODE_LABEL_NUMBER (xoperands
[0]));
8121 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
8125 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands
);
8126 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8129 output_asm_insn ("blr %%r0,%%r2", xoperands
);
8130 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands
);
8134 /* In HPUX 8.0's shared library scheme, special relocations are needed
8135 for function labels if they might be passed to a function
8136 in a shared library (because shared libraries don't live in code
8137 space), and special magic is needed to construct their address. */
8140 pa_encode_label (rtx sym
)
8142 const char *str
= XSTR (sym
, 0);
8143 int len
= strlen (str
) + 1;
8146 p
= newstr
= XALLOCAVEC (char, len
+ 1);
8150 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
8154 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
8156 int old_referenced
= 0;
8158 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
8160 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
8162 default_encode_section_info (decl
, rtl
, first
);
8164 if (first
&& TEXT_SPACE_P (decl
))
8166 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
8167 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8168 pa_encode_label (XEXP (rtl
, 0));
8170 else if (old_referenced
)
8171 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
8174 /* This is sort of inverse to pa_encode_section_info. */
8177 pa_strip_name_encoding (const char *str
)
8179 str
+= (*str
== '@');
8180 str
+= (*str
== '*');
8184 /* Returns 1 if OP is a function label involved in a simple addition
8185 with a constant. Used to keep certain patterns from matching
8186 during instruction combination. */
8188 pa_is_function_label_plus_const (rtx op
)
8190 /* Strip off any CONST. */
8191 if (GET_CODE (op
) == CONST
)
8194 return (GET_CODE (op
) == PLUS
8195 && function_label_operand (XEXP (op
, 0), VOIDmode
)
8196 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8199 /* Output assembly code for a thunk to FUNCTION. */
8202 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8203 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
8206 static unsigned int current_thunk_number
;
8207 int val_14
= VAL_14_BITS_P (delta
);
8208 unsigned int old_last_address
= last_address
, nbytes
= 0;
8212 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8213 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8214 xoperands
[2] = GEN_INT (delta
);
8216 ASM_OUTPUT_LABEL (file
, XSTR (xoperands
[1], 0));
8217 fprintf (file
, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8219 /* Output the thunk. We know that the function is in the same
8220 translation unit (i.e., the same space) as the thunk, and that
8221 thunks are output after their method. Thus, we don't need an
8222 external branch to reach the function. With SOM and GAS,
8223 functions and thunks are effectively in different sections.
8224 Thus, we can always use a IA-relative branch and the linker
8225 will add a long branch stub if necessary.
8227 However, we have to be careful when generating PIC code on the
8228 SOM port to ensure that the sequence does not transfer to an
8229 import stub for the target function as this could clobber the
8230 return value saved at SP-24. This would also apply to the
8231 32-bit linux port if the multi-space model is implemented. */
8232 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8233 && !(flag_pic
&& TREE_PUBLIC (function
))
8234 && (TARGET_GAS
|| last_address
< 262132))
8235 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8236 && ((targetm_common
.have_named_sections
8237 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8238 /* The GNU 64-bit linker has rather poor stub management.
8239 So, we use a long branch from thunks that aren't in
8240 the same section as the target function. */
8242 && (DECL_SECTION_NAME (thunk_fndecl
)
8243 != DECL_SECTION_NAME (function
)))
8244 || ((DECL_SECTION_NAME (thunk_fndecl
)
8245 == DECL_SECTION_NAME (function
))
8246 && last_address
< 262132)))
8247 || (targetm_common
.have_named_sections
8248 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8249 && DECL_SECTION_NAME (function
) == NULL
8250 && last_address
< 262132)
8251 || (!targetm_common
.have_named_sections
8252 && last_address
< 262132))))
8255 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8257 output_asm_insn ("b %0", xoperands
);
8261 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8266 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8270 else if (TARGET_64BIT
)
8272 /* We only have one call-clobbered scratch register, so we can't
8273 make use of the delay slot if delta doesn't fit in 14 bits. */
8276 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8277 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8280 output_asm_insn ("b,l .+8,%%r1", xoperands
);
8284 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8285 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
8289 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
8290 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
8295 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8296 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8301 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8305 else if (TARGET_PORTABLE_RUNTIME
)
8307 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8308 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8311 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8313 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8317 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8322 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8326 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8328 /* The function is accessible from outside this module. The only
8329 way to avoid an import stub between the thunk and function is to
8330 call the function directly with an indirect sequence similar to
8331 that used by $$dyncall. This is possible because $$dyncall acts
8332 as the import stub in an indirect call. */
8333 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8334 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8335 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8336 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8337 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8338 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8339 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8340 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8341 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8345 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8351 output_asm_insn ("bve (%%r22)", xoperands
);
8354 else if (TARGET_NO_SPACE_REGS
)
8356 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8361 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8362 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8363 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8368 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8370 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8374 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8376 if (TARGET_SOM
|| !TARGET_GAS
)
8378 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
8379 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
8383 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8384 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
8388 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8390 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8394 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8399 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8406 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8408 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8409 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8413 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8418 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8423 fprintf (file
, "\t.EXIT\n\t.PROCEND\n");
8425 if (TARGET_SOM
&& TARGET_GAS
)
8427 /* We done with this subspace except possibly for some additional
8428 debug information. Forget that we are in this subspace to ensure
8429 that the next function is output in its own subspace. */
8431 cfun
->machine
->in_nsubspa
= 2;
8434 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8436 switch_to_section (data_section
);
8437 output_asm_insn (".align 4", xoperands
);
8438 ASM_OUTPUT_LABEL (file
, label
);
8439 output_asm_insn (".word P'%0", xoperands
);
8442 current_thunk_number
++;
8443 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8444 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8445 last_address
+= nbytes
;
8446 if (old_last_address
> last_address
)
8447 last_address
= UINT_MAX
;
8448 update_total_code_bytes (nbytes
);
8451 /* Only direct calls to static functions are allowed to be sibling (tail)
8454 This restriction is necessary because some linker generated stubs will
8455 store return pointers into rp' in some cases which might clobber a
8456 live value already in rp'.
8458 In a sibcall the current function and the target function share stack
8459 space. Thus if the path to the current function and the path to the
8460 target function save a value in rp', they save the value into the
8461 same stack slot, which has undesirable consequences.
8463 Because of the deferred binding nature of shared libraries any function
8464 with external scope could be in a different load module and thus require
8465 rp' to be saved when calling that function. So sibcall optimizations
8466 can only be safe for static function.
8468 Note that GCC never needs return value relocations, so we don't have to
8469 worry about static calls with return value relocations (which require
8472 It is safe to perform a sibcall optimization when the target function
8473 will never return. */
8475 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8477 if (TARGET_PORTABLE_RUNTIME
)
8480 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8481 single subspace mode and the call is not indirect. As far as I know,
8482 there is no operating system support for the multiple subspace mode.
8483 It might be possible to support indirect calls if we didn't use
8484 $$dyncall (see the indirect sequence generated in pa_output_call). */
8486 return (decl
!= NULL_TREE
);
8488 /* Sibcalls are not ok because the arg pointer register is not a fixed
8489 register. This prevents the sibcall optimization from occurring. In
8490 addition, there are problems with stub placement using GNU ld. This
8491 is because a normal sibcall branch uses a 17-bit relocation while
8492 a regular call branch uses a 22-bit relocation. As a result, more
8493 care needs to be taken in the placement of long-branch stubs. */
8497 /* Sibcalls are only ok within a translation unit. */
8498 return (decl
&& !TREE_PUBLIC (decl
));
8501 /* ??? Addition is not commutative on the PA due to the weird implicit
8502 space register selection rules for memory addresses. Therefore, we
8503 don't consider a + b == b + a, as this might be inside a MEM. */
8505 pa_commutative_p (const_rtx x
, int outer_code
)
8507 return (COMMUTATIVE_P (x
)
8508 && (TARGET_NO_SPACE_REGS
8509 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8510 || GET_CODE (x
) != PLUS
));
8513 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8514 use in fmpyadd instructions. */
8516 pa_fmpyaddoperands (rtx
*operands
)
8518 enum machine_mode mode
= GET_MODE (operands
[0]);
8520 /* Must be a floating point mode. */
8521 if (mode
!= SFmode
&& mode
!= DFmode
)
8524 /* All modes must be the same. */
8525 if (! (mode
== GET_MODE (operands
[1])
8526 && mode
== GET_MODE (operands
[2])
8527 && mode
== GET_MODE (operands
[3])
8528 && mode
== GET_MODE (operands
[4])
8529 && mode
== GET_MODE (operands
[5])))
8532 /* All operands must be registers. */
8533 if (! (GET_CODE (operands
[1]) == REG
8534 && GET_CODE (operands
[2]) == REG
8535 && GET_CODE (operands
[3]) == REG
8536 && GET_CODE (operands
[4]) == REG
8537 && GET_CODE (operands
[5]) == REG
))
8540 /* Only 2 real operands to the addition. One of the input operands must
8541 be the same as the output operand. */
8542 if (! rtx_equal_p (operands
[3], operands
[4])
8543 && ! rtx_equal_p (operands
[3], operands
[5]))
8546 /* Inout operand of add cannot conflict with any operands from multiply. */
8547 if (rtx_equal_p (operands
[3], operands
[0])
8548 || rtx_equal_p (operands
[3], operands
[1])
8549 || rtx_equal_p (operands
[3], operands
[2]))
8552 /* multiply cannot feed into addition operands. */
8553 if (rtx_equal_p (operands
[4], operands
[0])
8554 || rtx_equal_p (operands
[5], operands
[0]))
8557 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8559 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8560 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8561 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8562 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8563 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8564 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8567 /* Passed. Operands are suitable for fmpyadd. */
8571 #if !defined(USE_COLLECT2)
8573 pa_asm_out_constructor (rtx symbol
, int priority
)
8575 if (!function_label_operand (symbol
, VOIDmode
))
8576 pa_encode_label (symbol
);
8578 #ifdef CTORS_SECTION_ASM_OP
8579 default_ctor_section_asm_out_constructor (symbol
, priority
);
8581 # ifdef TARGET_ASM_NAMED_SECTION
8582 default_named_section_asm_out_constructor (symbol
, priority
);
8584 default_stabs_asm_out_constructor (symbol
, priority
);
8590 pa_asm_out_destructor (rtx symbol
, int priority
)
8592 if (!function_label_operand (symbol
, VOIDmode
))
8593 pa_encode_label (symbol
);
8595 #ifdef DTORS_SECTION_ASM_OP
8596 default_dtor_section_asm_out_destructor (symbol
, priority
);
8598 # ifdef TARGET_ASM_NAMED_SECTION
8599 default_named_section_asm_out_destructor (symbol
, priority
);
8601 default_stabs_asm_out_destructor (symbol
, priority
);
8607 /* This function places uninitialized global data in the bss section.
8608 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8609 function on the SOM port to prevent uninitialized global data from
8610 being placed in the data section. */
8613 pa_asm_output_aligned_bss (FILE *stream
,
8615 unsigned HOST_WIDE_INT size
,
8618 switch_to_section (bss_section
);
8619 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8621 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8622 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8625 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8626 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8629 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8630 ASM_OUTPUT_LABEL (stream
, name
);
8631 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8634 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8635 that doesn't allow the alignment of global common storage to be directly
8636 specified. The SOM linker aligns common storage based on the rounded
8637 value of the NUM_BYTES parameter in the .comm directive. It's not
8638 possible to use the .align directive as it doesn't affect the alignment
8639 of the label associated with a .comm directive. */
8642 pa_asm_output_aligned_common (FILE *stream
,
8644 unsigned HOST_WIDE_INT size
,
8647 unsigned int max_common_align
;
8649 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8650 if (align
> max_common_align
)
8652 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8653 "for global common data. Using %u",
8654 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8655 align
= max_common_align
;
8658 switch_to_section (bss_section
);
8660 assemble_name (stream
, name
);
8661 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8662 MAX (size
, align
/ BITS_PER_UNIT
));
8665 /* We can't use .comm for local common storage as the SOM linker effectively
8666 treats the symbol as universal and uses the same storage for local symbols
8667 with the same name in different object files. The .block directive
8668 reserves an uninitialized block of storage. However, it's not common
8669 storage. Fortunately, GCC never requests common storage with the same
8670 name in any given translation unit. */
8673 pa_asm_output_aligned_local (FILE *stream
,
8675 unsigned HOST_WIDE_INT size
,
8678 switch_to_section (bss_section
);
8679 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8682 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8683 assemble_name (stream
, name
);
8684 fprintf (stream
, "\n");
8687 ASM_OUTPUT_LABEL (stream
, name
);
8688 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8691 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8692 use in fmpysub instructions. */
8694 pa_fmpysuboperands (rtx
*operands
)
8696 enum machine_mode mode
= GET_MODE (operands
[0]);
8698 /* Must be a floating point mode. */
8699 if (mode
!= SFmode
&& mode
!= DFmode
)
8702 /* All modes must be the same. */
8703 if (! (mode
== GET_MODE (operands
[1])
8704 && mode
== GET_MODE (operands
[2])
8705 && mode
== GET_MODE (operands
[3])
8706 && mode
== GET_MODE (operands
[4])
8707 && mode
== GET_MODE (operands
[5])))
8710 /* All operands must be registers. */
8711 if (! (GET_CODE (operands
[1]) == REG
8712 && GET_CODE (operands
[2]) == REG
8713 && GET_CODE (operands
[3]) == REG
8714 && GET_CODE (operands
[4]) == REG
8715 && GET_CODE (operands
[5]) == REG
))
8718 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8719 operation, so operands[4] must be the same as operand[3]. */
8720 if (! rtx_equal_p (operands
[3], operands
[4]))
8723 /* multiply cannot feed into subtraction. */
8724 if (rtx_equal_p (operands
[5], operands
[0]))
8727 /* Inout operand of sub cannot conflict with any operands from multiply. */
8728 if (rtx_equal_p (operands
[3], operands
[0])
8729 || rtx_equal_p (operands
[3], operands
[1])
8730 || rtx_equal_p (operands
[3], operands
[2]))
8733 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8735 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8736 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8737 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8738 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8739 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8740 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8743 /* Passed. Operands are suitable for fmpysub. */
8747 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8748 constants for shadd instructions. */
8750 pa_shadd_constant_p (int val
)
8752 if (val
== 2 || val
== 4 || val
== 8)
8758 /* Return TRUE if INSN branches forward. */
8761 forward_branch_p (rtx insn
)
8763 rtx lab
= JUMP_LABEL (insn
);
8765 /* The INSN must have a jump label. */
8766 gcc_assert (lab
!= NULL_RTX
);
8768 if (INSN_ADDRESSES_SET_P ())
8769 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
8776 insn
= NEXT_INSN (insn
);
8782 /* Return 1 if INSN is in the delay slot of a call instruction. */
8784 pa_jump_in_call_delay (rtx insn
)
8787 if (GET_CODE (insn
) != JUMP_INSN
)
8790 if (PREV_INSN (insn
)
8791 && PREV_INSN (PREV_INSN (insn
))
8792 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn
)))) == INSN
)
8794 rtx test_insn
= next_real_insn (PREV_INSN (PREV_INSN (insn
)));
8796 return (GET_CODE (PATTERN (test_insn
)) == SEQUENCE
8797 && XVECEXP (PATTERN (test_insn
), 0, 1) == insn
);
8804 /* Output an unconditional move and branch insn. */
8807 pa_output_parallel_movb (rtx
*operands
, rtx insn
)
8809 int length
= get_attr_length (insn
);
8811 /* These are the cases in which we win. */
8813 return "mov%I1b,tr %1,%0,%2";
8815 /* None of the following cases win, but they don't lose either. */
8818 if (dbr_sequence_length () == 0)
8820 /* Nothing in the delay slot, fake it by putting the combined
8821 insn (the copy or add) in the delay slot of a bl. */
8822 if (GET_CODE (operands
[1]) == CONST_INT
)
8823 return "b %2\n\tldi %1,%0";
8825 return "b %2\n\tcopy %1,%0";
8829 /* Something in the delay slot, but we've got a long branch. */
8830 if (GET_CODE (operands
[1]) == CONST_INT
)
8831 return "ldi %1,%0\n\tb %2";
8833 return "copy %1,%0\n\tb %2";
8837 if (GET_CODE (operands
[1]) == CONST_INT
)
8838 output_asm_insn ("ldi %1,%0", operands
);
8840 output_asm_insn ("copy %1,%0", operands
);
8841 return pa_output_lbranch (operands
[2], insn
, 1);
8844 /* Output an unconditional add and branch insn. */
8847 pa_output_parallel_addb (rtx
*operands
, rtx insn
)
8849 int length
= get_attr_length (insn
);
8851 /* To make life easy we want operand0 to be the shared input/output
8852 operand and operand1 to be the readonly operand. */
8853 if (operands
[0] == operands
[1])
8854 operands
[1] = operands
[2];
8856 /* These are the cases in which we win. */
8858 return "add%I1b,tr %1,%0,%3";
8860 /* None of the following cases win, but they don't lose either. */
8863 if (dbr_sequence_length () == 0)
8864 /* Nothing in the delay slot, fake it by putting the combined
8865 insn (the copy or add) in the delay slot of a bl. */
8866 return "b %3\n\tadd%I1 %1,%0,%0";
8868 /* Something in the delay slot, but we've got a long branch. */
8869 return "add%I1 %1,%0,%0\n\tb %3";
8872 output_asm_insn ("add%I1 %1,%0,%0", operands
);
8873 return pa_output_lbranch (operands
[3], insn
, 1);
8876 /* Return nonzero if INSN (a jump insn) immediately follows a call
8877 to a named function. This is used to avoid filling the delay slot
8878 of the jump since it can usually be eliminated by modifying RP in
8879 the delay slot of the call. */
8882 pa_following_call (rtx insn
)
8884 if (! TARGET_JUMP_IN_DELAY
)
8887 /* Find the previous real insn, skipping NOTEs. */
8888 insn
= PREV_INSN (insn
);
8889 while (insn
&& GET_CODE (insn
) == NOTE
)
8890 insn
= PREV_INSN (insn
);
8892 /* Check for CALL_INSNs and millicode calls. */
8894 && ((GET_CODE (insn
) == CALL_INSN
8895 && get_attr_type (insn
) != TYPE_DYNCALL
)
8896 || (GET_CODE (insn
) == INSN
8897 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8898 && GET_CODE (PATTERN (insn
)) != USE
8899 && GET_CODE (PATTERN (insn
)) != CLOBBER
8900 && get_attr_type (insn
) == TYPE_MILLI
)))
8906 /* We use this hook to perform a PA specific optimization which is difficult
8907 to do in earlier passes.
8909 We want the delay slots of branches within jump tables to be filled.
8910 None of the compiler passes at the moment even has the notion that a
8911 PA jump table doesn't contain addresses, but instead contains actual
8914 Because we actually jump into the table, the addresses of each entry
8915 must stay constant in relation to the beginning of the table (which
8916 itself must stay constant relative to the instruction to jump into
8917 it). I don't believe we can guarantee earlier passes of the compiler
8918 will adhere to those rules.
8920 So, late in the compilation process we find all the jump tables, and
8921 expand them into real code -- e.g. each entry in the jump table vector
8922 will get an appropriate label followed by a jump to the final target.
8924 Reorg and the final jump pass can then optimize these branches and
8925 fill their delay slots. We end up with smaller, more efficient code.
8927 The jump instructions within the table are special; we must be able
8928 to identify them during assembly output (if the jumps don't get filled
8929 we need to emit a nop rather than nullifying the delay slot)). We
8930 identify jumps in switch tables by using insns with the attribute
8931 type TYPE_BTABLE_BRANCH.
8933 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8934 insns. This serves two purposes, first it prevents jump.c from
8935 noticing that the last N entries in the table jump to the instruction
8936 immediately after the table and deleting the jumps. Second, those
8937 insns mark where we should emit .begin_brtab and .end_brtab directives
8938 when using GAS (allows for better link time optimizations). */
8945 remove_useless_addtr_insns (1);
8947 if (pa_cpu
< PROCESSOR_8000
)
8948 pa_combine_instructions ();
8951 /* This is fairly cheap, so always run it if optimizing. */
8952 if (optimize
> 0 && !TARGET_BIG_SWITCH
)
8954 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8955 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8957 rtx pattern
, tmp
, location
, label
;
8958 unsigned int length
, i
;
8960 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8961 if (GET_CODE (insn
) != JUMP_INSN
8962 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8963 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8966 /* Emit marker for the beginning of the branch table. */
8967 emit_insn_before (gen_begin_brtab (), insn
);
8969 pattern
= PATTERN (insn
);
8970 location
= PREV_INSN (insn
);
8971 length
= XVECLEN (pattern
, GET_CODE (pattern
) == ADDR_DIFF_VEC
);
8973 for (i
= 0; i
< length
; i
++)
8975 /* Emit a label before each jump to keep jump.c from
8976 removing this code. */
8977 tmp
= gen_label_rtx ();
8978 LABEL_NUSES (tmp
) = 1;
8979 emit_label_after (tmp
, location
);
8980 location
= NEXT_INSN (location
);
8982 if (GET_CODE (pattern
) == ADDR_VEC
)
8983 label
= XEXP (XVECEXP (pattern
, 0, i
), 0);
8985 label
= XEXP (XVECEXP (pattern
, 1, i
), 0);
8987 tmp
= gen_short_jump (label
);
8989 /* Emit the jump itself. */
8990 tmp
= emit_jump_insn_after (tmp
, location
);
8991 JUMP_LABEL (tmp
) = label
;
8992 LABEL_NUSES (label
)++;
8993 location
= NEXT_INSN (location
);
8995 /* Emit a BARRIER after the jump. */
8996 emit_barrier_after (location
);
8997 location
= NEXT_INSN (location
);
9000 /* Emit marker for the end of the branch table. */
9001 emit_insn_before (gen_end_brtab (), location
);
9002 location
= NEXT_INSN (location
);
9003 emit_barrier_after (location
);
9005 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
9011 /* Still need brtab marker insns. FIXME: the presence of these
9012 markers disables output of the branch table to readonly memory,
9013 and any alignment directives that might be needed. Possibly,
9014 the begin_brtab insn should be output before the label for the
9015 table. This doesn't matter at the moment since the tables are
9016 always output in the text section. */
9017 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9019 /* Find an ADDR_VEC insn. */
9020 if (GET_CODE (insn
) != JUMP_INSN
9021 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
9022 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
9025 /* Now generate markers for the beginning and end of the
9027 emit_insn_before (gen_begin_brtab (), insn
);
9028 emit_insn_after (gen_end_brtab (), insn
);
9033 /* The PA has a number of odd instructions which can perform multiple
9034 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9035 it may be profitable to combine two instructions into one instruction
9036 with two outputs. It's not profitable PA2.0 machines because the
9037 two outputs would take two slots in the reorder buffers.
9039 This routine finds instructions which can be combined and combines
9040 them. We only support some of the potential combinations, and we
9041 only try common ways to find suitable instructions.
9043 * addb can add two registers or a register and a small integer
9044 and jump to a nearby (+-8k) location. Normally the jump to the
9045 nearby location is conditional on the result of the add, but by
9046 using the "true" condition we can make the jump unconditional.
9047 Thus addb can perform two independent operations in one insn.
9049 * movb is similar to addb in that it can perform a reg->reg
9050 or small immediate->reg copy and jump to a nearby (+-8k location).
9052 * fmpyadd and fmpysub can perform a FP multiply and either an
9053 FP add or FP sub if the operands of the multiply and add/sub are
9054 independent (there are other minor restrictions). Note both
9055 the fmpy and fadd/fsub can in theory move to better spots according
9056 to data dependencies, but for now we require the fmpy stay at a
9059 * Many of the memory operations can perform pre & post updates
9060 of index registers. GCC's pre/post increment/decrement addressing
9061 is far too simple to take advantage of all the possibilities. This
9062 pass may not be suitable since those insns may not be independent.
9064 * comclr can compare two ints or an int and a register, nullify
9065 the following instruction and zero some other register. This
9066 is more difficult to use as it's harder to find an insn which
9067 will generate a comclr than finding something like an unconditional
9068 branch. (conditional moves & long branches create comclr insns).
9070 * Most arithmetic operations can conditionally skip the next
9071 instruction. They can be viewed as "perform this operation
9072 and conditionally jump to this nearby location" (where nearby
9073 is an insns away). These are difficult to use due to the
9074 branch length restrictions. */
9077 pa_combine_instructions (void)
9079 rtx anchor
, new_rtx
;
9081 /* This can get expensive since the basic algorithm is on the
9082 order of O(n^2) (or worse). Only do it for -O2 or higher
9083 levels of optimization. */
9087 /* Walk down the list of insns looking for "anchor" insns which
9088 may be combined with "floating" insns. As the name implies,
9089 "anchor" instructions don't move, while "floating" insns may
9091 new_rtx
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
9092 new_rtx
= make_insn_raw (new_rtx
);
9094 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
9096 enum attr_pa_combine_type anchor_attr
;
9097 enum attr_pa_combine_type floater_attr
;
9099 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9100 Also ignore any special USE insns. */
9101 if ((GET_CODE (anchor
) != INSN
9102 && GET_CODE (anchor
) != JUMP_INSN
9103 && GET_CODE (anchor
) != CALL_INSN
)
9104 || GET_CODE (PATTERN (anchor
)) == USE
9105 || GET_CODE (PATTERN (anchor
)) == CLOBBER
9106 || GET_CODE (PATTERN (anchor
)) == ADDR_VEC
9107 || GET_CODE (PATTERN (anchor
)) == ADDR_DIFF_VEC
)
9110 anchor_attr
= get_attr_pa_combine_type (anchor
);
9111 /* See if anchor is an insn suitable for combination. */
9112 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
9113 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9114 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9115 && ! forward_branch_p (anchor
)))
9119 for (floater
= PREV_INSN (anchor
);
9121 floater
= PREV_INSN (floater
))
9123 if (GET_CODE (floater
) == NOTE
9124 || (GET_CODE (floater
) == INSN
9125 && (GET_CODE (PATTERN (floater
)) == USE
9126 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9129 /* Anything except a regular INSN will stop our search. */
9130 if (GET_CODE (floater
) != INSN
9131 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
9132 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
9138 /* See if FLOATER is suitable for combination with the
9140 floater_attr
= get_attr_pa_combine_type (floater
);
9141 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9142 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9143 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9144 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9146 /* If ANCHOR and FLOATER can be combined, then we're
9147 done with this pass. */
9148 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9149 SET_DEST (PATTERN (floater
)),
9150 XEXP (SET_SRC (PATTERN (floater
)), 0),
9151 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9155 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9156 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
9158 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
9160 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9161 SET_DEST (PATTERN (floater
)),
9162 XEXP (SET_SRC (PATTERN (floater
)), 0),
9163 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9168 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9169 SET_DEST (PATTERN (floater
)),
9170 SET_SRC (PATTERN (floater
)),
9171 SET_SRC (PATTERN (floater
))))
9177 /* If we didn't find anything on the backwards scan try forwards. */
9179 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
9180 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
9182 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
9184 if (GET_CODE (floater
) == NOTE
9185 || (GET_CODE (floater
) == INSN
9186 && (GET_CODE (PATTERN (floater
)) == USE
9187 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9191 /* Anything except a regular INSN will stop our search. */
9192 if (GET_CODE (floater
) != INSN
9193 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
9194 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
9200 /* See if FLOATER is suitable for combination with the
9202 floater_attr
= get_attr_pa_combine_type (floater
);
9203 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9204 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9205 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9206 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9208 /* If ANCHOR and FLOATER can be combined, then we're
9209 done with this pass. */
9210 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9211 SET_DEST (PATTERN (floater
)),
9212 XEXP (SET_SRC (PATTERN (floater
)),
9214 XEXP (SET_SRC (PATTERN (floater
)),
9221 /* FLOATER will be nonzero if we found a suitable floating
9222 insn for combination with ANCHOR. */
9224 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9225 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9227 /* Emit the new instruction and delete the old anchor. */
9228 emit_insn_before (gen_rtx_PARALLEL
9230 gen_rtvec (2, PATTERN (anchor
),
9231 PATTERN (floater
))),
9234 SET_INSN_DELETED (anchor
);
9236 /* Emit a special USE insn for FLOATER, then delete
9237 the floating insn. */
9238 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9239 delete_insn (floater
);
9244 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9247 /* Emit the new_jump instruction and delete the old anchor. */
9249 = emit_jump_insn_before (gen_rtx_PARALLEL
9251 gen_rtvec (2, PATTERN (anchor
),
9252 PATTERN (floater
))),
9255 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9256 SET_INSN_DELETED (anchor
);
9258 /* Emit a special USE insn for FLOATER, then delete
9259 the floating insn. */
9260 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9261 delete_insn (floater
);
9269 pa_can_combine_p (rtx new_rtx
, rtx anchor
, rtx floater
, int reversed
, rtx dest
,
9272 int insn_code_number
;
9275 /* Create a PARALLEL with the patterns of ANCHOR and
9276 FLOATER, try to recognize it, then test constraints
9277 for the resulting pattern.
9279 If the pattern doesn't match or the constraints
9280 aren't met keep searching for a suitable floater
9282 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9283 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9284 INSN_CODE (new_rtx
) = -1;
9285 insn_code_number
= recog_memoized (new_rtx
);
9286 if (insn_code_number
< 0
9287 || (extract_insn (new_rtx
), ! constrain_operands (1)))
9301 /* There's up to three operands to consider. One
9302 output and two inputs.
9304 The output must not be used between FLOATER & ANCHOR
9305 exclusive. The inputs must not be set between
9306 FLOATER and ANCHOR exclusive. */
9308 if (reg_used_between_p (dest
, start
, end
))
9311 if (reg_set_between_p (src1
, start
, end
))
9314 if (reg_set_between_p (src2
, start
, end
))
9317 /* If we get here, then everything is good. */
9321 /* Return nonzero if references for INSN are delayed.
9323 Millicode insns are actually function calls with some special
9324 constraints on arguments and register usage.
9326 Millicode calls always expect their arguments in the integer argument
9327 registers, and always return their result in %r29 (ret1). They
9328 are expected to clobber their arguments, %r1, %r29, and the return
9329 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9331 This function tells reorg that the references to arguments and
9332 millicode calls do not appear to happen until after the millicode call.
9333 This allows reorg to put insns which set the argument registers into the
9334 delay slot of the millicode call -- thus they act more like traditional
9337 Note we cannot consider side effects of the insn to be delayed because
9338 the branch and link insn will clobber the return pointer. If we happened
9339 to use the return pointer in the delay slot of the call, then we lose.
9341 get_attr_type will try to recognize the given insn, so make sure to
9342 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9345 pa_insn_refs_are_delayed (rtx insn
)
9347 return ((GET_CODE (insn
) == INSN
9348 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9349 && GET_CODE (PATTERN (insn
)) != USE
9350 && GET_CODE (PATTERN (insn
)) != CLOBBER
9351 && get_attr_type (insn
) == TYPE_MILLI
));
9354 /* Promote the return value, but not the arguments. */
9356 static enum machine_mode
9357 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9358 enum machine_mode mode
,
9359 int *punsignedp ATTRIBUTE_UNUSED
,
9360 const_tree fntype ATTRIBUTE_UNUSED
,
9363 if (for_return
== 0)
9365 return promote_mode (type
, mode
, punsignedp
);
9368 /* On the HP-PA the value is found in register(s) 28(-29), unless
9369 the mode is SF or DF. Then the value is returned in fr4 (32).
9371 This must perform the same promotions as PROMOTE_MODE, else promoting
9372 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9374 Small structures must be returned in a PARALLEL on PA64 in order
9375 to match the HP Compiler ABI. */
9378 pa_function_value (const_tree valtype
,
9379 const_tree func ATTRIBUTE_UNUSED
,
9380 bool outgoing ATTRIBUTE_UNUSED
)
9382 enum machine_mode valmode
;
9384 if (AGGREGATE_TYPE_P (valtype
)
9385 || TREE_CODE (valtype
) == COMPLEX_TYPE
9386 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9390 /* Aggregates with a size less than or equal to 128 bits are
9391 returned in GR 28(-29). They are left justified. The pad
9392 bits are undefined. Larger aggregates are returned in
9396 int ub
= int_size_in_bytes (valtype
) <= UNITS_PER_WORD
? 1 : 2;
9398 for (i
= 0; i
< ub
; i
++)
9400 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9401 gen_rtx_REG (DImode
, 28 + i
),
9406 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9408 else if (int_size_in_bytes (valtype
) > UNITS_PER_WORD
)
9410 /* Aggregates 5 to 8 bytes in size are returned in general
9411 registers r28-r29 in the same manner as other non
9412 floating-point objects. The data is right-justified and
9413 zero-extended to 64 bits. This is opposite to the normal
9414 justification used on big endian targets and requires
9415 special treatment. */
9416 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9417 gen_rtx_REG (DImode
, 28), const0_rtx
);
9418 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9422 if ((INTEGRAL_TYPE_P (valtype
)
9423 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9424 || POINTER_TYPE_P (valtype
))
9425 valmode
= word_mode
;
9427 valmode
= TYPE_MODE (valtype
);
9429 if (TREE_CODE (valtype
) == REAL_TYPE
9430 && !AGGREGATE_TYPE_P (valtype
)
9431 && TYPE_MODE (valtype
) != TFmode
9432 && !TARGET_SOFT_FLOAT
)
9433 return gen_rtx_REG (valmode
, 32);
9435 return gen_rtx_REG (valmode
, 28);
9438 /* Implement the TARGET_LIBCALL_VALUE hook. */
9441 pa_libcall_value (enum machine_mode mode
,
9442 const_rtx fun ATTRIBUTE_UNUSED
)
9444 if (! TARGET_SOFT_FLOAT
9445 && (mode
== SFmode
|| mode
== DFmode
))
9446 return gen_rtx_REG (mode
, 32);
9448 return gen_rtx_REG (mode
, 28);
9451 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9454 pa_function_value_regno_p (const unsigned int regno
)
9457 || (! TARGET_SOFT_FLOAT
&& regno
== 32))
9463 /* Update the data in CUM to advance over an argument
9464 of mode MODE and data type TYPE.
9465 (TYPE is null for libcalls where that information may not be available.) */
9468 pa_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
9469 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9471 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9472 int arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9474 cum
->nargs_prototype
--;
9475 cum
->words
+= (arg_size
9476 + ((cum
->words
& 01)
9477 && type
!= NULL_TREE
9481 /* Return the location of a parameter that is passed in a register or NULL
9482 if the parameter has any component that is passed in memory.
9484 This is new code and will be pushed to into the net sources after
9487 ??? We might want to restructure this so that it looks more like other
9490 pa_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
9491 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9493 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9494 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9501 if (mode
== VOIDmode
)
9504 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9506 /* If this arg would be passed partially or totally on the stack, then
9507 this routine should return zero. pa_arg_partial_bytes will
9508 handle arguments which are split between regs and stack slots if
9509 the ABI mandates split arguments. */
9512 /* The 32-bit ABI does not split arguments. */
9513 if (cum
->words
+ arg_size
> max_arg_words
)
9519 alignment
= cum
->words
& 1;
9520 if (cum
->words
+ alignment
>= max_arg_words
)
9524 /* The 32bit ABIs and the 64bit ABIs are rather different,
9525 particularly in their handling of FP registers. We might
9526 be able to cleverly share code between them, but I'm not
9527 going to bother in the hope that splitting them up results
9528 in code that is more easily understood. */
9532 /* Advance the base registers to their current locations.
9534 Remember, gprs grow towards smaller register numbers while
9535 fprs grow to higher register numbers. Also remember that
9536 although FP regs are 32-bit addressable, we pretend that
9537 the registers are 64-bits wide. */
9538 gpr_reg_base
= 26 - cum
->words
;
9539 fpr_reg_base
= 32 + cum
->words
;
9541 /* Arguments wider than one word and small aggregates need special
9545 || (type
&& (AGGREGATE_TYPE_P (type
)
9546 || TREE_CODE (type
) == COMPLEX_TYPE
9547 || TREE_CODE (type
) == VECTOR_TYPE
)))
9549 /* Double-extended precision (80-bit), quad-precision (128-bit)
9550 and aggregates including complex numbers are aligned on
9551 128-bit boundaries. The first eight 64-bit argument slots
9552 are associated one-to-one, with general registers r26
9553 through r19, and also with floating-point registers fr4
9554 through fr11. Arguments larger than one word are always
9555 passed in general registers.
9557 Using a PARALLEL with a word mode register results in left
9558 justified data on a big-endian target. */
9561 int i
, offset
= 0, ub
= arg_size
;
9563 /* Align the base register. */
9564 gpr_reg_base
-= alignment
;
9566 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9567 for (i
= 0; i
< ub
; i
++)
9569 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9570 gen_rtx_REG (DImode
, gpr_reg_base
),
9576 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9581 /* If the argument is larger than a word, then we know precisely
9582 which registers we must use. */
9596 /* Structures 5 to 8 bytes in size are passed in the general
9597 registers in the same manner as other non floating-point
9598 objects. The data is right-justified and zero-extended
9599 to 64 bits. This is opposite to the normal justification
9600 used on big endian targets and requires special treatment.
9601 We now define BLOCK_REG_PADDING to pad these objects.
9602 Aggregates, complex and vector types are passed in the same
9603 manner as structures. */
9605 || (type
&& (AGGREGATE_TYPE_P (type
)
9606 || TREE_CODE (type
) == COMPLEX_TYPE
9607 || TREE_CODE (type
) == VECTOR_TYPE
)))
9609 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9610 gen_rtx_REG (DImode
, gpr_reg_base
),
9612 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9617 /* We have a single word (32 bits). A simple computation
9618 will get us the register #s we need. */
9619 gpr_reg_base
= 26 - cum
->words
;
9620 fpr_reg_base
= 32 + 2 * cum
->words
;
9624 /* Determine if the argument needs to be passed in both general and
9625 floating point registers. */
9626 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9627 /* If we are doing soft-float with portable runtime, then there
9628 is no need to worry about FP regs. */
9629 && !TARGET_SOFT_FLOAT
9630 /* The parameter must be some kind of scalar float, else we just
9631 pass it in integer registers. */
9632 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9633 /* The target function must not have a prototype. */
9634 && cum
->nargs_prototype
<= 0
9635 /* libcalls do not need to pass items in both FP and general
9637 && type
!= NULL_TREE
9638 /* All this hair applies to "outgoing" args only. This includes
9639 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9641 /* Also pass outgoing floating arguments in both registers in indirect
9642 calls with the 32 bit ABI and the HP assembler since there is no
9643 way to the specify argument locations in static functions. */
9648 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9654 gen_rtx_EXPR_LIST (VOIDmode
,
9655 gen_rtx_REG (mode
, fpr_reg_base
),
9657 gen_rtx_EXPR_LIST (VOIDmode
,
9658 gen_rtx_REG (mode
, gpr_reg_base
),
9663 /* See if we should pass this parameter in a general register. */
9664 if (TARGET_SOFT_FLOAT
9665 /* Indirect calls in the normal 32bit ABI require all arguments
9666 to be passed in general registers. */
9667 || (!TARGET_PORTABLE_RUNTIME
9671 /* If the parameter is not a scalar floating-point parameter,
9672 then it belongs in GPRs. */
9673 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9674 /* Structure with single SFmode field belongs in GPR. */
9675 || (type
&& AGGREGATE_TYPE_P (type
)))
9676 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9678 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9683 /* Arguments larger than one word are double word aligned. */
9686 pa_function_arg_boundary (enum machine_mode mode
, const_tree type
)
9688 bool singleword
= (type
9689 ? (integer_zerop (TYPE_SIZE (type
))
9690 || !TREE_CONSTANT (TYPE_SIZE (type
))
9691 || int_size_in_bytes (type
) <= UNITS_PER_WORD
)
9692 : GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
);
9694 return singleword
? PARM_BOUNDARY
: MAX_PARM_BOUNDARY
;
9697 /* If this arg would be passed totally in registers or totally on the stack,
9698 then this routine should return zero. */
9701 pa_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
9702 tree type
, bool named ATTRIBUTE_UNUSED
)
9704 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9705 unsigned int max_arg_words
= 8;
9706 unsigned int offset
= 0;
9711 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9714 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9715 /* Arg fits fully into registers. */
9717 else if (cum
->words
+ offset
>= max_arg_words
)
9718 /* Arg fully on the stack. */
9722 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9726 /* A get_unnamed_section callback for switching to the text section.
9728 This function is only used with SOM. Because we don't support
9729 named subspaces, we can only create a new subspace or switch back
9730 to the default text subspace. */
9733 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9735 gcc_assert (TARGET_SOM
);
9738 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
9740 /* We only want to emit a .nsubspa directive once at the
9741 start of the function. */
9742 cfun
->machine
->in_nsubspa
= 1;
9744 /* Create a new subspace for the text. This provides
9745 better stub placement and one-only functions. */
9747 && DECL_ONE_ONLY (cfun
->decl
)
9748 && !DECL_WEAK (cfun
->decl
))
9750 output_section_asm_op ("\t.SPACE $TEXT$\n"
9751 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9752 "ACCESS=44,SORT=24,COMDAT");
9758 /* There isn't a current function or the body of the current
9759 function has been completed. So, we are changing to the
9760 text section to output debugging information. Thus, we
9761 need to forget that we are in the text section so that
9762 varasm.c will call us when text_section is selected again. */
9763 gcc_assert (!cfun
|| !cfun
->machine
9764 || cfun
->machine
->in_nsubspa
== 2);
9767 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9770 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9773 /* A get_unnamed_section callback for switching to comdat data
9774 sections. This function is only used with SOM. */
9777 som_output_comdat_data_section_asm_op (const void *data
)
9780 output_section_asm_op (data
);
9783 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9786 pa_som_asm_init_sections (void)
9789 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
9791 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9792 is not being generated. */
9793 som_readonly_data_section
9794 = get_unnamed_section (0, output_section_asm_op
,
9795 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9797 /* When secondary definitions are not supported, SOM makes readonly
9798 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9800 som_one_only_readonly_data_section
9801 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
9803 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9804 "ACCESS=0x2c,SORT=16,COMDAT");
9807 /* When secondary definitions are not supported, SOM makes data one-only
9808 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9809 som_one_only_data_section
9810 = get_unnamed_section (SECTION_WRITE
,
9811 som_output_comdat_data_section_asm_op
,
9812 "\t.SPACE $PRIVATE$\n"
9813 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9814 "ACCESS=31,SORT=24,COMDAT");
9817 som_tm_clone_table_section
9818 = get_unnamed_section (0, output_section_asm_op
,
9819 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9821 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9822 which reference data within the $TEXT$ space (for example constant
9823 strings in the $LIT$ subspace).
9825 The assemblers (GAS and HP as) both have problems with handling
9826 the difference of two symbols which is the other correct way to
9827 reference constant data during PIC code generation.
9829 So, there's no way to reference constant data which is in the
9830 $TEXT$ space during PIC generation. Instead place all constant
9831 data into the $PRIVATE$ subspace (this reduces sharing, but it
9832 works correctly). */
9833 readonly_data_section
= flag_pic
? data_section
: som_readonly_data_section
;
9835 /* We must not have a reference to an external symbol defined in a
9836 shared library in a readonly section, else the SOM linker will
9839 So, we force exception information into the data section. */
9840 exception_section
= data_section
;
9843 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9846 pa_som_tm_clone_table_section (void)
9848 return som_tm_clone_table_section
;
9851 /* On hpux10, the linker will give an error if we have a reference
9852 in the read-only data section to a symbol defined in a shared
9853 library. Therefore, expressions that might require a reloc can
9854 not be placed in the read-only data section. */
9857 pa_select_section (tree exp
, int reloc
,
9858 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9860 if (TREE_CODE (exp
) == VAR_DECL
9861 && TREE_READONLY (exp
)
9862 && !TREE_THIS_VOLATILE (exp
)
9863 && DECL_INITIAL (exp
)
9864 && (DECL_INITIAL (exp
) == error_mark_node
9865 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9869 && DECL_ONE_ONLY (exp
)
9870 && !DECL_WEAK (exp
))
9871 return som_one_only_readonly_data_section
;
9873 return readonly_data_section
;
9875 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9876 return readonly_data_section
;
9878 && TREE_CODE (exp
) == VAR_DECL
9879 && DECL_ONE_ONLY (exp
)
9880 && !DECL_WEAK (exp
))
9881 return som_one_only_data_section
;
9883 return data_section
;
9887 pa_globalize_label (FILE *stream
, const char *name
)
9889 /* We only handle DATA objects here, functions are globalized in
9890 ASM_DECLARE_FUNCTION_NAME. */
9891 if (! FUNCTION_NAME_P (name
))
9893 fputs ("\t.EXPORT ", stream
);
9894 assemble_name (stream
, name
);
9895 fputs (",DATA\n", stream
);
9899 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9902 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9903 int incoming ATTRIBUTE_UNUSED
)
9905 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9908 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9911 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
9913 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9914 PA64 ABI says that objects larger than 128 bits are returned in memory.
9915 Note, int_size_in_bytes can return -1 if the size of the object is
9916 variable or larger than the maximum value that can be expressed as
9917 a HOST_WIDE_INT. It can also return zero for an empty type. The
9918 simplest way to handle variable and empty types is to pass them in
9919 memory. This avoids problems in defining the boundaries of argument
9920 slots, allocating registers, etc. */
9921 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9922 || int_size_in_bytes (type
) <= 0);
9925 /* Structure to hold declaration and name of external symbols that are
9926 emitted by GCC. We generate a vector of these symbols and output them
9927 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9928 This avoids putting out names that are never really used. */
9930 typedef struct GTY(()) extern_symbol
9936 /* Define gc'd vector type for extern_symbol. */
9937 DEF_VEC_O(extern_symbol
);
9938 DEF_VEC_ALLOC_O(extern_symbol
,gc
);
9940 /* Vector of extern_symbol pointers. */
9941 static GTY(()) VEC(extern_symbol
,gc
) *extern_symbols
;
9943 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9944 /* Mark DECL (name NAME) as an external reference (assembler output
9945 file FILE). This saves the names to output at the end of the file
9946 if actually referenced. */
9949 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9951 gcc_assert (file
== asm_out_file
);
9952 extern_symbol p
= {decl
, name
};
9953 VEC_safe_push (extern_symbol
, gc
, extern_symbols
, p
);
9956 /* Output text required at the end of an assembler file.
9957 This includes deferred plabels and .import directives for
9958 all external symbols that were actually referenced. */
9961 pa_hpux_file_end (void)
9966 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9967 output_deferred_profile_counters ();
9969 output_deferred_plabels ();
9971 for (i
= 0; VEC_iterate (extern_symbol
, extern_symbols
, i
, p
); i
++)
9973 tree decl
= p
->decl
;
9975 if (!TREE_ASM_WRITTEN (decl
)
9976 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9977 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9980 VEC_free (extern_symbol
, gc
, extern_symbols
);
9984 /* Return true if a change from mode FROM to mode TO for a register
9985 in register class RCLASS is invalid. */
9988 pa_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
9989 enum reg_class rclass
)
9994 /* Reject changes to/from complex and vector modes. */
9995 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
9996 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
9999 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
10002 /* There is no way to load QImode or HImode values directly from
10003 memory. SImode loads to the FP registers are not zero extended.
10004 On the 64-bit target, this conflicts with the definition of
10005 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
10006 with different sizes in the floating-point registers. */
10007 if (MAYBE_FP_REG_CLASS_P (rclass
))
10010 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
10011 in specific sets of registers. Thus, we cannot allow changing
10012 to a larger mode when it's larger than a word. */
10013 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
10014 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
10020 /* Returns TRUE if it is a good idea to tie two pseudo registers
10021 when one has mode MODE1 and one has mode MODE2.
10022 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
10023 for any hard reg, then this must be FALSE for correct output.
10025 We should return FALSE for QImode and HImode because these modes
10026 are not ok in the floating-point registers. However, this prevents
10027 tieing these modes to SImode and DImode in the general registers.
10028 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
10029 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10030 in the floating-point registers. */
10033 pa_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
10035 /* Don't tie modes in different classes. */
10036 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
10043 /* Length in units of the trampoline instruction code. */
10045 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10048 /* Output assembler code for a block containing the constant parts
10049 of a trampoline, leaving space for the variable parts.\
10051 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10052 and then branches to the specified routine.
10054 This code template is copied from text segment to stack location
10055 and then patched with pa_trampoline_init to contain valid values,
10056 and then entered as a subroutine.
10058 It is best to keep this as small as possible to avoid having to
10059 flush multiple lines in the cache. */
10062 pa_asm_trampoline_template (FILE *f
)
10066 fputs ("\tldw 36(%r22),%r21\n", f
);
10067 fputs ("\tbb,>=,n %r21,30,.+16\n", f
);
10068 if (ASSEMBLER_DIALECT
== 0)
10069 fputs ("\tdepi 0,31,2,%r21\n", f
);
10071 fputs ("\tdepwi 0,31,2,%r21\n", f
);
10072 fputs ("\tldw 4(%r21),%r19\n", f
);
10073 fputs ("\tldw 0(%r21),%r21\n", f
);
10076 fputs ("\tbve (%r21)\n", f
);
10077 fputs ("\tldw 40(%r22),%r29\n", f
);
10078 fputs ("\t.word 0\n", f
);
10079 fputs ("\t.word 0\n", f
);
10083 fputs ("\tldsid (%r21),%r1\n", f
);
10084 fputs ("\tmtsp %r1,%sr0\n", f
);
10085 fputs ("\tbe 0(%sr0,%r21)\n", f
);
10086 fputs ("\tldw 40(%r22),%r29\n", f
);
10088 fputs ("\t.word 0\n", f
);
10089 fputs ("\t.word 0\n", f
);
10090 fputs ("\t.word 0\n", f
);
10091 fputs ("\t.word 0\n", f
);
10095 fputs ("\t.dword 0\n", f
);
10096 fputs ("\t.dword 0\n", f
);
10097 fputs ("\t.dword 0\n", f
);
10098 fputs ("\t.dword 0\n", f
);
10099 fputs ("\tmfia %r31\n", f
);
10100 fputs ("\tldd 24(%r31),%r1\n", f
);
10101 fputs ("\tldd 24(%r1),%r27\n", f
);
10102 fputs ("\tldd 16(%r1),%r1\n", f
);
10103 fputs ("\tbve (%r1)\n", f
);
10104 fputs ("\tldd 32(%r31),%r31\n", f
);
10105 fputs ("\t.dword 0 ; fptr\n", f
);
10106 fputs ("\t.dword 0 ; static link\n", f
);
10110 /* Emit RTL insns to initialize the variable parts of a trampoline.
10111 FNADDR is an RTX for the address of the function's pure code.
10112 CXT is an RTX for the static chain value for the function.
10114 Move the function address to the trampoline template at offset 36.
10115 Move the static chain value to trampoline template at offset 40.
10116 Move the trampoline address to trampoline template at offset 44.
10117 Move r19 to trampoline template at offset 48. The latter two
10118 words create a plabel for the indirect call to the trampoline.
10120 A similar sequence is used for the 64-bit port but the plabel is
10121 at the beginning of the trampoline.
10123 Finally, the cache entries for the trampoline code are flushed.
10124 This is necessary to ensure that the trampoline instruction sequence
10125 is written to memory prior to any attempts at prefetching the code
10129 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
10131 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10132 rtx start_addr
= gen_reg_rtx (Pmode
);
10133 rtx end_addr
= gen_reg_rtx (Pmode
);
10134 rtx line_length
= gen_reg_rtx (Pmode
);
10137 emit_block_move (m_tramp
, assemble_trampoline_template (),
10138 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
10139 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
10143 tmp
= adjust_address (m_tramp
, Pmode
, 36);
10144 emit_move_insn (tmp
, fnaddr
);
10145 tmp
= adjust_address (m_tramp
, Pmode
, 40);
10146 emit_move_insn (tmp
, chain_value
);
10148 /* Create a fat pointer for the trampoline. */
10149 tmp
= adjust_address (m_tramp
, Pmode
, 44);
10150 emit_move_insn (tmp
, r_tramp
);
10151 tmp
= adjust_address (m_tramp
, Pmode
, 48);
10152 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
10154 /* fdc and fic only use registers for the address to flush,
10155 they do not accept integer displacements. We align the
10156 start and end addresses to the beginning of their respective
10157 cache lines to minimize the number of lines flushed. */
10158 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
10159 GEN_INT (-MIN_CACHELINE_SIZE
)));
10160 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
,
10161 TRAMPOLINE_CODE_SIZE
-1));
10162 emit_insn (gen_andsi3 (end_addr
, tmp
,
10163 GEN_INT (-MIN_CACHELINE_SIZE
)));
10164 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10165 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
10166 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
10167 gen_reg_rtx (Pmode
),
10168 gen_reg_rtx (Pmode
)));
10172 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10173 emit_move_insn (tmp
, fnaddr
);
10174 tmp
= adjust_address (m_tramp
, Pmode
, 64);
10175 emit_move_insn (tmp
, chain_value
);
10177 /* Create a fat pointer for the trampoline. */
10178 tmp
= adjust_address (m_tramp
, Pmode
, 16);
10179 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (Pmode
,
10181 tmp
= adjust_address (m_tramp
, Pmode
, 24);
10182 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
10184 /* fdc and fic only use registers for the address to flush,
10185 they do not accept integer displacements. We align the
10186 start and end addresses to the beginning of their respective
10187 cache lines to minimize the number of lines flushed. */
10188 tmp
= force_reg (Pmode
, plus_constant (Pmode
, r_tramp
, 32));
10189 emit_insn (gen_anddi3 (start_addr
, tmp
,
10190 GEN_INT (-MIN_CACHELINE_SIZE
)));
10191 tmp
= force_reg (Pmode
, plus_constant (Pmode
, tmp
,
10192 TRAMPOLINE_CODE_SIZE
- 1));
10193 emit_insn (gen_anddi3 (end_addr
, tmp
,
10194 GEN_INT (-MIN_CACHELINE_SIZE
)));
10195 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10196 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
10197 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
10198 gen_reg_rtx (Pmode
),
10199 gen_reg_rtx (Pmode
)));
10203 /* Perform any machine-specific adjustment in the address of the trampoline.
10204 ADDR contains the address that was passed to pa_trampoline_init.
10205 Adjust the trampoline address to point to the plabel at offset 44. */
10208 pa_trampoline_adjust_address (rtx addr
)
10211 addr
= memory_address (Pmode
, plus_constant (Pmode
, addr
, 46));
10216 pa_delegitimize_address (rtx orig_x
)
10218 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10220 if (GET_CODE (x
) == LO_SUM
10221 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10222 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10223 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));
10228 pa_internal_arg_pointer (void)
10230 /* The argument pointer and the hard frame pointer are the same in
10231 the 32-bit runtime, so we don't need a copy. */
10233 return copy_to_reg (virtual_incoming_args_rtx
);
10235 return virtual_incoming_args_rtx
;
10238 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10239 Frame pointer elimination is automatically handled. */
10242 pa_can_eliminate (const int from
, const int to
)
10244 /* The argument cannot be eliminated in the 64-bit runtime. */
10245 if (TARGET_64BIT
&& from
== ARG_POINTER_REGNUM
)
10248 return (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
10249 ? ! frame_pointer_needed
10253 /* Define the offset between two registers, FROM to be eliminated and its
10254 replacement TO, at the start of a routine. */
10256 pa_initial_elimination_offset (int from
, int to
)
10258 HOST_WIDE_INT offset
;
10260 if ((from
== HARD_FRAME_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
)
10261 && to
== STACK_POINTER_REGNUM
)
10262 offset
= -pa_compute_frame_size (get_frame_size (), 0);
10263 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
10266 gcc_unreachable ();
10272 pa_conditional_register_usage (void)
10276 if (!TARGET_64BIT
&& !TARGET_PA_11
)
10278 for (i
= 56; i
<= FP_REG_LAST
; i
++)
10279 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10280 for (i
= 33; i
< 56; i
+= 2)
10281 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10283 if (TARGET_DISABLE_FPREGS
|| TARGET_SOFT_FLOAT
)
10285 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
10286 fixed_regs
[i
] = call_used_regs
[i
] = 1;
10289 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
10292 /* Target hook for c_mode_for_suffix. */
10294 static enum machine_mode
10295 pa_c_mode_for_suffix (char suffix
)
10297 if (HPUX_LONG_DOUBLE_LIBRARY
)
10306 /* Target hook for function_section. */
10309 pa_function_section (tree decl
, enum node_frequency freq
,
10310 bool startup
, bool exit
)
10312 /* Put functions in text section if target doesn't have named sections. */
10313 if (!targetm_common
.have_named_sections
)
10314 return text_section
;
10316 /* Force nested functions into the same section as the containing
10319 && DECL_SECTION_NAME (decl
) == NULL_TREE
10320 && DECL_CONTEXT (decl
) != NULL_TREE
10321 && TREE_CODE (DECL_CONTEXT (decl
)) == FUNCTION_DECL
10322 && DECL_SECTION_NAME (DECL_CONTEXT (decl
)) == NULL_TREE
)
10323 return function_section (DECL_CONTEXT (decl
));
10325 /* Otherwise, use the default function section. */
10326 return default_function_section (decl
, freq
, startup
, exit
);
10329 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10331 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10332 that need more than three instructions to load prior to reload. This
10333 limit is somewhat arbitrary. It takes three instructions to load a
10334 CONST_INT from memory but two are memory accesses. It may be better
10335 to increase the allowed range for CONST_INTS. We may also be able
10336 to handle CONST_DOUBLES. */
10339 pa_legitimate_constant_p (enum machine_mode mode
, rtx x
)
10341 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& x
!= CONST0_RTX (mode
))
10344 if (!NEW_HP_ASSEMBLER
&& !TARGET_GAS
&& GET_CODE (x
) == LABEL_REF
)
10347 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10348 legitimate constants. */
10349 if (PA_SYMBOL_REF_TLS_P (x
))
10351 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
10353 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
|| model
== TLS_MODEL_LOCAL_DYNAMIC
)
10357 if (TARGET_64BIT
&& GET_CODE (x
) == CONST_DOUBLE
)
10361 && HOST_BITS_PER_WIDE_INT
> 32
10362 && GET_CODE (x
) == CONST_INT
10363 && !reload_in_progress
10364 && !reload_completed
10365 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x
))
10366 && !pa_cint_ok_for_move (INTVAL (x
)))
10369 if (function_label_operand (x
, mode
))
10375 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10377 static unsigned int
10378 pa_section_type_flags (tree decl
, const char *name
, int reloc
)
10380 unsigned int flags
;
10382 flags
= default_section_type_flags (decl
, name
, reloc
);
10384 /* Function labels are placed in the constant pool. This can
10385 cause a section conflict if decls are put in ".data.rel.ro"
10386 or ".data.rel.ro.local" using the __attribute__ construct. */
10387 if (strcmp (name
, ".data.rel.ro") == 0
10388 || strcmp (name
, ".data.rel.ro.local") == 0)
10389 flags
|= SECTION_WRITE
| SECTION_RELRO
;