1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
40 #include "integrate.h"
42 #include "diagnostic-core.h"
49 #include "target-def.h"
52 /* Return nonzero if there is a bypass for the output of
53 OUT_INSN and the fp store IN_INSN. */
55 hppa_fpstore_bypass_p (rtx out_insn
, rtx in_insn
)
57 enum machine_mode store_mode
;
58 enum machine_mode other_mode
;
61 if (recog_memoized (in_insn
) < 0
62 || (get_attr_type (in_insn
) != TYPE_FPSTORE
63 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
64 || recog_memoized (out_insn
) < 0)
67 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
69 set
= single_set (out_insn
);
73 other_mode
= GET_MODE (SET_SRC (set
));
75 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
79 #ifndef DO_FRAME_NOTES
80 #ifdef INCOMING_RETURN_ADDR_RTX
81 #define DO_FRAME_NOTES 1
83 #define DO_FRAME_NOTES 0
87 static void pa_option_override (void);
88 static void copy_reg_pointer (rtx
, rtx
);
89 static void fix_range (const char *);
90 static bool pa_handle_option (size_t, const char *, int);
91 static int hppa_register_move_cost (enum machine_mode mode
, reg_class_t
,
93 static int hppa_address_cost (rtx
, bool);
94 static bool hppa_rtx_costs (rtx
, int, int, int *, bool);
95 static inline rtx
force_mode (enum machine_mode
, rtx
);
96 static void pa_reorg (void);
97 static void pa_combine_instructions (void);
98 static int pa_can_combine_p (rtx
, rtx
, rtx
, int, rtx
, rtx
, rtx
);
99 static bool forward_branch_p (rtx
);
100 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
101 static int compute_movmem_length (rtx
);
102 static int compute_clrmem_length (rtx
);
103 static bool pa_assemble_integer (rtx
, unsigned int, int);
104 static void remove_useless_addtr_insns (int);
105 static void store_reg (int, HOST_WIDE_INT
, int);
106 static void store_reg_modify (int, int, HOST_WIDE_INT
);
107 static void load_reg (int, HOST_WIDE_INT
, int);
108 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
109 static rtx
pa_function_value (const_tree
, const_tree
, bool);
110 static rtx
pa_libcall_value (enum machine_mode
, const_rtx
);
111 static bool pa_function_value_regno_p (const unsigned int);
112 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
113 static void update_total_code_bytes (unsigned int);
114 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
115 static int pa_adjust_cost (rtx
, rtx
, rtx
, int);
116 static int pa_adjust_priority (rtx
, int);
117 static int pa_issue_rate (void);
118 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
119 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
121 static void pa_encode_section_info (tree
, rtx
, int);
122 static const char *pa_strip_name_encoding (const char *);
123 static bool pa_function_ok_for_sibcall (tree
, tree
);
124 static void pa_globalize_label (FILE *, const char *)
126 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
127 HOST_WIDE_INT
, tree
);
128 #if !defined(USE_COLLECT2)
129 static void pa_asm_out_constructor (rtx
, int);
130 static void pa_asm_out_destructor (rtx
, int);
132 static void pa_init_builtins (void);
133 static rtx
hppa_builtin_saveregs (void);
134 static void hppa_va_start (tree
, rtx
);
135 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
136 static bool pa_scalar_mode_supported_p (enum machine_mode
);
137 static bool pa_commutative_p (const_rtx x
, int outer_code
);
138 static void copy_fp_args (rtx
) ATTRIBUTE_UNUSED
;
139 static int length_fp_args (rtx
) ATTRIBUTE_UNUSED
;
140 static rtx
hppa_legitimize_address (rtx
, rtx
, enum machine_mode
);
141 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
142 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
143 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
144 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
145 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
146 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
147 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
148 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
149 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
150 static void output_deferred_plabels (void);
151 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
152 #ifdef ASM_OUTPUT_EXTERNAL_REAL
153 static void pa_hpux_file_end (void);
155 #ifdef HPUX_LONG_DOUBLE_LIBRARY
156 static void pa_hpux_init_libfuncs (void);
158 static rtx
pa_struct_value_rtx (tree
, int);
159 static bool pa_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
161 static int pa_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
163 static void pa_function_arg_advance (CUMULATIVE_ARGS
*, enum machine_mode
,
165 static rtx
pa_function_arg (CUMULATIVE_ARGS
*, enum machine_mode
,
167 static struct machine_function
* pa_init_machine_status (void);
168 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
170 secondary_reload_info
*);
171 static void pa_extra_live_on_entry (bitmap
);
172 static enum machine_mode
pa_promote_function_mode (const_tree
,
173 enum machine_mode
, int *,
176 static void pa_asm_trampoline_template (FILE *);
177 static void pa_trampoline_init (rtx
, tree
, rtx
);
178 static rtx
pa_trampoline_adjust_address (rtx
);
179 static rtx
pa_delegitimize_address (rtx
);
180 static bool pa_print_operand_punct_valid_p (unsigned char);
181 static rtx
pa_internal_arg_pointer (void);
182 static bool pa_can_eliminate (const int, const int);
184 /* The following extra sections are only used for SOM. */
185 static GTY(()) section
*som_readonly_data_section
;
186 static GTY(()) section
*som_one_only_readonly_data_section
;
187 static GTY(()) section
*som_one_only_data_section
;
189 /* Which cpu we are scheduling for. */
190 enum processor_type pa_cpu
= TARGET_SCHED_DEFAULT
;
192 /* The UNIX standard to use for predefines and linking. */
193 int flag_pa_unix
= TARGET_HPUX_11_11
? 1998 : TARGET_HPUX_10_10
? 1995 : 1993;
195 /* Counts for the number of callee-saved general and floating point
196 registers which were saved by the current function's prologue. */
197 static int gr_saved
, fr_saved
;
199 /* Boolean indicating whether the return pointer was saved by the
200 current function's prologue. */
201 static bool rp_saved
;
203 static rtx
find_addr_reg (rtx
);
205 /* Keep track of the number of bytes we have output in the CODE subspace
206 during this compilation so we'll know when to emit inline long-calls. */
207 unsigned long total_code_bytes
;
209 /* The last address of the previous function plus the number of bytes in
210 associated thunks that have been output. This is used to determine if
211 a thunk can use an IA-relative branch to reach its target function. */
212 static unsigned int last_address
;
214 /* Variables to handle plabels that we discover are necessary at assembly
215 output time. They are output after the current function. */
216 struct GTY(()) deferred_plabel
221 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
223 static size_t n_deferred_plabels
= 0;
226 /* Initialize the GCC target structure. */
228 #undef TARGET_OPTION_OVERRIDE
229 #define TARGET_OPTION_OVERRIDE pa_option_override
231 #undef TARGET_ASM_ALIGNED_HI_OP
232 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
233 #undef TARGET_ASM_ALIGNED_SI_OP
234 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
235 #undef TARGET_ASM_ALIGNED_DI_OP
236 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
237 #undef TARGET_ASM_UNALIGNED_HI_OP
238 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
239 #undef TARGET_ASM_UNALIGNED_SI_OP
240 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
241 #undef TARGET_ASM_UNALIGNED_DI_OP
242 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
243 #undef TARGET_ASM_INTEGER
244 #define TARGET_ASM_INTEGER pa_assemble_integer
246 #undef TARGET_ASM_FUNCTION_PROLOGUE
247 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
248 #undef TARGET_ASM_FUNCTION_EPILOGUE
249 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
251 #undef TARGET_FUNCTION_VALUE
252 #define TARGET_FUNCTION_VALUE pa_function_value
253 #undef TARGET_LIBCALL_VALUE
254 #define TARGET_LIBCALL_VALUE pa_libcall_value
255 #undef TARGET_FUNCTION_VALUE_REGNO_P
256 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
258 #undef TARGET_LEGITIMIZE_ADDRESS
259 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
261 #undef TARGET_SCHED_ADJUST_COST
262 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
263 #undef TARGET_SCHED_ADJUST_PRIORITY
264 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
265 #undef TARGET_SCHED_ISSUE_RATE
266 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
268 #undef TARGET_ENCODE_SECTION_INFO
269 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
270 #undef TARGET_STRIP_NAME_ENCODING
271 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
273 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
274 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
276 #undef TARGET_COMMUTATIVE_P
277 #define TARGET_COMMUTATIVE_P pa_commutative_p
279 #undef TARGET_ASM_OUTPUT_MI_THUNK
280 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
281 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
282 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
284 #undef TARGET_ASM_FILE_END
285 #ifdef ASM_OUTPUT_EXTERNAL_REAL
286 #define TARGET_ASM_FILE_END pa_hpux_file_end
288 #define TARGET_ASM_FILE_END output_deferred_plabels
291 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
292 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
294 #if !defined(USE_COLLECT2)
295 #undef TARGET_ASM_CONSTRUCTOR
296 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
297 #undef TARGET_ASM_DESTRUCTOR
298 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
301 #undef TARGET_DEFAULT_TARGET_FLAGS
302 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
303 #undef TARGET_HANDLE_OPTION
304 #define TARGET_HANDLE_OPTION pa_handle_option
306 #undef TARGET_INIT_BUILTINS
307 #define TARGET_INIT_BUILTINS pa_init_builtins
309 #undef TARGET_REGISTER_MOVE_COST
310 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
311 #undef TARGET_RTX_COSTS
312 #define TARGET_RTX_COSTS hppa_rtx_costs
313 #undef TARGET_ADDRESS_COST
314 #define TARGET_ADDRESS_COST hppa_address_cost
316 #undef TARGET_MACHINE_DEPENDENT_REORG
317 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
319 #ifdef HPUX_LONG_DOUBLE_LIBRARY
320 #undef TARGET_INIT_LIBFUNCS
321 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
324 #undef TARGET_PROMOTE_FUNCTION_MODE
325 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
326 #undef TARGET_PROMOTE_PROTOTYPES
327 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
329 #undef TARGET_STRUCT_VALUE_RTX
330 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
331 #undef TARGET_RETURN_IN_MEMORY
332 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
333 #undef TARGET_MUST_PASS_IN_STACK
334 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
335 #undef TARGET_PASS_BY_REFERENCE
336 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
337 #undef TARGET_CALLEE_COPIES
338 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
339 #undef TARGET_ARG_PARTIAL_BYTES
340 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
341 #undef TARGET_FUNCTION_ARG
342 #define TARGET_FUNCTION_ARG pa_function_arg
343 #undef TARGET_FUNCTION_ARG_ADVANCE
344 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
346 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
347 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
348 #undef TARGET_EXPAND_BUILTIN_VA_START
349 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
350 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
351 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
354 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
356 #undef TARGET_CANNOT_FORCE_CONST_MEM
357 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
359 #undef TARGET_SECONDARY_RELOAD
360 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
362 #undef TARGET_EXTRA_LIVE_ON_ENTRY
363 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
365 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
366 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
367 #undef TARGET_TRAMPOLINE_INIT
368 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
369 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
370 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
371 #undef TARGET_DELEGITIMIZE_ADDRESS
372 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
373 #undef TARGET_INTERNAL_ARG_POINTER
374 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
375 #undef TARGET_CAN_ELIMINATE
376 #define TARGET_CAN_ELIMINATE pa_can_eliminate
378 struct gcc_target targetm
= TARGET_INITIALIZER
;
380 /* Parse the -mfixed-range= option string. */
383 fix_range (const char *const_str
)
386 char *str
, *dash
, *comma
;
388 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
389 REG2 are either register names or register numbers. The effect
390 of this option is to mark the registers in the range from REG1 to
391 REG2 as ``fixed'' so they won't be used by the compiler. This is
392 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
394 i
= strlen (const_str
);
395 str
= (char *) alloca (i
+ 1);
396 memcpy (str
, const_str
, i
+ 1);
400 dash
= strchr (str
, '-');
403 warning (0, "value of -mfixed-range must have form REG1-REG2");
408 comma
= strchr (dash
+ 1, ',');
412 first
= decode_reg_name (str
);
415 warning (0, "unknown register name: %s", str
);
419 last
= decode_reg_name (dash
+ 1);
422 warning (0, "unknown register name: %s", dash
+ 1);
430 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
434 for (i
= first
; i
<= last
; ++i
)
435 fixed_regs
[i
] = call_used_regs
[i
] = 1;
444 /* Check if all floating point registers have been fixed. */
445 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
450 target_flags
|= MASK_DISABLE_FPREGS
;
453 /* Implement TARGET_HANDLE_OPTION. */
456 pa_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
461 case OPT_mpa_risc_1_0
:
463 target_flags
&= ~(MASK_PA_11
| MASK_PA_20
);
467 case OPT_mpa_risc_1_1
:
469 target_flags
&= ~MASK_PA_20
;
470 target_flags
|= MASK_PA_11
;
473 case OPT_mpa_risc_2_0
:
475 target_flags
|= MASK_PA_11
| MASK_PA_20
;
479 if (strcmp (arg
, "8000") == 0)
480 pa_cpu
= PROCESSOR_8000
;
481 else if (strcmp (arg
, "7100") == 0)
482 pa_cpu
= PROCESSOR_7100
;
483 else if (strcmp (arg
, "700") == 0)
484 pa_cpu
= PROCESSOR_700
;
485 else if (strcmp (arg
, "7100LC") == 0)
486 pa_cpu
= PROCESSOR_7100LC
;
487 else if (strcmp (arg
, "7200") == 0)
488 pa_cpu
= PROCESSOR_7200
;
489 else if (strcmp (arg
, "7300") == 0)
490 pa_cpu
= PROCESSOR_7300
;
495 case OPT_mfixed_range_
:
505 #if TARGET_HPUX_10_10
511 #if TARGET_HPUX_11_11
522 /* Implement the TARGET_OPTION_OVERRIDE hook. */
525 pa_option_override (void)
527 /* Unconditional branches in the delay slot are not compatible with dwarf2
528 call frame information. There is no benefit in using this optimization
529 on PA8000 and later processors. */
530 if (pa_cpu
>= PROCESSOR_8000
531 || (targetm
.except_unwind_info () == UI_DWARF2
&& flag_exceptions
)
532 || flag_unwind_tables
)
533 target_flags
&= ~MASK_JUMP_IN_DELAY
;
535 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
537 warning (0, "PIC code generation is not supported in the portable runtime model");
540 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
542 warning (0, "PIC code generation is not compatible with fast indirect calls");
545 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
547 warning (0, "-g is only supported when using GAS on this processor,");
548 warning (0, "-g option disabled");
549 write_symbols
= NO_DEBUG
;
552 /* We only support the "big PIC" model now. And we always generate PIC
553 code when in 64bit mode. */
554 if (flag_pic
== 1 || TARGET_64BIT
)
557 /* Disable -freorder-blocks-and-partition as we don't support hot and
558 cold partitioning. */
559 if (flag_reorder_blocks_and_partition
)
561 inform (input_location
,
562 "-freorder-blocks-and-partition does not work "
563 "on this architecture");
564 flag_reorder_blocks_and_partition
= 0;
565 flag_reorder_blocks
= 1;
568 /* We can't guarantee that .dword is available for 32-bit targets. */
569 if (UNITS_PER_WORD
== 4)
570 targetm
.asm_out
.aligned_op
.di
= NULL
;
572 /* The unaligned ops are only available when using GAS. */
575 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
576 targetm
.asm_out
.unaligned_op
.si
= NULL
;
577 targetm
.asm_out
.unaligned_op
.di
= NULL
;
580 init_machine_status
= pa_init_machine_status
;
584 pa_init_builtins (void)
586 #ifdef DONT_HAVE_FPUTC_UNLOCKED
587 built_in_decls
[(int) BUILT_IN_FPUTC_UNLOCKED
] =
588 built_in_decls
[(int) BUILT_IN_PUTC_UNLOCKED
];
589 implicit_built_in_decls
[(int) BUILT_IN_FPUTC_UNLOCKED
]
590 = implicit_built_in_decls
[(int) BUILT_IN_PUTC_UNLOCKED
];
593 if (built_in_decls
[BUILT_IN_FINITE
])
594 set_user_assembler_name (built_in_decls
[BUILT_IN_FINITE
], "_Isfinite");
595 if (built_in_decls
[BUILT_IN_FINITEF
])
596 set_user_assembler_name (built_in_decls
[BUILT_IN_FINITEF
], "_Isfinitef");
600 /* Function to init struct machine_function.
601 This will be called, via a pointer variable,
602 from push_function_context. */
604 static struct machine_function
*
605 pa_init_machine_status (void)
607 return ggc_alloc_cleared_machine_function ();
610 /* If FROM is a probable pointer register, mark TO as a probable
611 pointer register with the same pointer alignment as FROM. */
614 copy_reg_pointer (rtx to
, rtx from
)
616 if (REG_POINTER (from
))
617 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
620 /* Return 1 if X contains a symbolic expression. We know these
621 expressions will have one of a few well defined forms, so
622 we need only check those forms. */
624 symbolic_expression_p (rtx x
)
627 /* Strip off any HIGH. */
628 if (GET_CODE (x
) == HIGH
)
631 return (symbolic_operand (x
, VOIDmode
));
634 /* Accept any constant that can be moved in one instruction into a
637 cint_ok_for_move (HOST_WIDE_INT ival
)
639 /* OK if ldo, ldil, or zdepi, can be used. */
640 return (VAL_14_BITS_P (ival
)
641 || ldil_cint_p (ival
)
642 || zdepi_cint_p (ival
));
645 /* Return truth value of whether OP can be used as an operand in a
648 adddi3_operand (rtx op
, enum machine_mode mode
)
650 return (register_operand (op
, mode
)
651 || (GET_CODE (op
) == CONST_INT
652 && (TARGET_64BIT
? INT_14_BITS (op
) : INT_11_BITS (op
))));
655 /* True iff the operand OP can be used as the destination operand of
656 an integer store. This also implies the operand could be used as
657 the source operand of an integer load. Symbolic, lo_sum and indexed
658 memory operands are not allowed. We accept reloading pseudos and
659 other memory operands. */
661 integer_store_memory_operand (rtx op
, enum machine_mode mode
)
663 return ((reload_in_progress
665 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
666 && reg_renumber
[REGNO (op
)] < 0)
667 || (GET_CODE (op
) == MEM
668 && (reload_in_progress
|| memory_address_p (mode
, XEXP (op
, 0)))
669 && !symbolic_memory_operand (op
, VOIDmode
)
670 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op
, 0))
671 && !IS_INDEX_ADDR_P (XEXP (op
, 0))));
674 /* True iff ldil can be used to load this CONST_INT. The least
675 significant 11 bits of the value must be zero and the value must
676 not change sign when extended from 32 to 64 bits. */
678 ldil_cint_p (HOST_WIDE_INT ival
)
680 HOST_WIDE_INT x
= ival
& (((HOST_WIDE_INT
) -1 << 31) | 0x7ff);
682 return x
== 0 || x
== ((HOST_WIDE_INT
) -1 << 31);
685 /* True iff zdepi can be used to generate this CONST_INT.
686 zdepi first sign extends a 5-bit signed number to a given field
687 length, then places this field anywhere in a zero. */
689 zdepi_cint_p (unsigned HOST_WIDE_INT x
)
691 unsigned HOST_WIDE_INT lsb_mask
, t
;
693 /* This might not be obvious, but it's at least fast.
694 This function is critical; we don't have the time loops would take. */
696 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
697 /* Return true iff t is a power of two. */
698 return ((t
& (t
- 1)) == 0);
701 /* True iff depi or extru can be used to compute (reg & mask).
702 Accept bit pattern like these:
707 and_mask_p (unsigned HOST_WIDE_INT mask
)
710 mask
+= mask
& -mask
;
711 return (mask
& (mask
- 1)) == 0;
714 /* True iff depi can be used to compute (reg | MASK). */
716 ior_mask_p (unsigned HOST_WIDE_INT mask
)
718 mask
+= mask
& -mask
;
719 return (mask
& (mask
- 1)) == 0;
722 /* Legitimize PIC addresses. If the address is already
723 position-independent, we return ORIG. Newly generated
724 position-independent addresses go to REG. If we need more
725 than one register, we lose. */
728 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
732 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
734 /* Labels need special handling. */
735 if (pic_label_operand (orig
, mode
))
739 /* We do not want to go through the movXX expanders here since that
740 would create recursion.
742 Nor do we really want to call a generator for a named pattern
743 since that requires multiple patterns if we want to support
746 So instead we just emit the raw set, which avoids the movXX
747 expanders completely. */
748 mark_reg_pointer (reg
, BITS_PER_UNIT
);
749 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, orig
));
751 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
752 add_reg_note (insn
, REG_EQUAL
, orig
);
754 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
755 and update LABEL_NUSES because this is not done automatically. */
756 if (reload_in_progress
|| reload_completed
)
758 /* Extract LABEL_REF. */
759 if (GET_CODE (orig
) == CONST
)
760 orig
= XEXP (XEXP (orig
, 0), 0);
761 /* Extract CODE_LABEL. */
762 orig
= XEXP (orig
, 0);
763 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
764 LABEL_NUSES (orig
)++;
766 crtl
->uses_pic_offset_table
= 1;
769 if (GET_CODE (orig
) == SYMBOL_REF
)
775 /* Before reload, allocate a temporary register for the intermediate
776 result. This allows the sequence to be deleted when the final
777 result is unused and the insns are trivially dead. */
778 tmp_reg
= ((reload_in_progress
|| reload_completed
)
779 ? reg
: gen_reg_rtx (Pmode
));
781 if (function_label_operand (orig
, mode
))
783 /* Force function label into memory in word mode. */
784 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
785 /* Load plabel address from DLT. */
786 emit_move_insn (tmp_reg
,
787 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
788 gen_rtx_HIGH (word_mode
, orig
)));
790 = gen_const_mem (Pmode
,
791 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
792 gen_rtx_UNSPEC (Pmode
,
795 emit_move_insn (reg
, pic_ref
);
796 /* Now load address of function descriptor. */
797 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
801 /* Load symbol reference from DLT. */
802 emit_move_insn (tmp_reg
,
803 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
804 gen_rtx_HIGH (word_mode
, orig
)));
806 = gen_const_mem (Pmode
,
807 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
808 gen_rtx_UNSPEC (Pmode
,
813 crtl
->uses_pic_offset_table
= 1;
814 mark_reg_pointer (reg
, BITS_PER_UNIT
);
815 insn
= emit_move_insn (reg
, pic_ref
);
817 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
818 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
822 else if (GET_CODE (orig
) == CONST
)
826 if (GET_CODE (XEXP (orig
, 0)) == PLUS
827 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
831 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
833 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
834 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
835 base
== reg
? 0 : reg
);
837 if (GET_CODE (orig
) == CONST_INT
)
839 if (INT_14_BITS (orig
))
840 return plus_constant (base
, INTVAL (orig
));
841 orig
= force_reg (Pmode
, orig
);
843 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
844 /* Likewise, should we set special REG_NOTEs here? */
850 static GTY(()) rtx gen_tls_tga
;
853 gen_tls_get_addr (void)
856 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
861 hppa_tls_call (rtx arg
)
865 ret
= gen_reg_rtx (Pmode
);
866 emit_library_call_value (gen_tls_get_addr (), ret
,
867 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
873 legitimize_tls_address (rtx addr
)
875 rtx ret
, insn
, tmp
, t1
, t2
, tp
;
876 enum tls_model model
= SYMBOL_REF_TLS_MODEL (addr
);
880 case TLS_MODEL_GLOBAL_DYNAMIC
:
881 tmp
= gen_reg_rtx (Pmode
);
883 emit_insn (gen_tgd_load_pic (tmp
, addr
));
885 emit_insn (gen_tgd_load (tmp
, addr
));
886 ret
= hppa_tls_call (tmp
);
889 case TLS_MODEL_LOCAL_DYNAMIC
:
890 ret
= gen_reg_rtx (Pmode
);
891 tmp
= gen_reg_rtx (Pmode
);
894 emit_insn (gen_tld_load_pic (tmp
, addr
));
896 emit_insn (gen_tld_load (tmp
, addr
));
897 t1
= hppa_tls_call (tmp
);
900 t2
= gen_reg_rtx (Pmode
);
901 emit_libcall_block (insn
, t2
, t1
,
902 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
904 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
907 case TLS_MODEL_INITIAL_EXEC
:
908 tp
= gen_reg_rtx (Pmode
);
909 tmp
= gen_reg_rtx (Pmode
);
910 ret
= gen_reg_rtx (Pmode
);
911 emit_insn (gen_tp_load (tp
));
913 emit_insn (gen_tie_load_pic (tmp
, addr
));
915 emit_insn (gen_tie_load (tmp
, addr
));
916 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
919 case TLS_MODEL_LOCAL_EXEC
:
920 tp
= gen_reg_rtx (Pmode
);
921 ret
= gen_reg_rtx (Pmode
);
922 emit_insn (gen_tp_load (tp
));
923 emit_insn (gen_tle_load (ret
, addr
, tp
));
933 /* Try machine-dependent ways of modifying an illegitimate address
934 to be legitimate. If we find one, return the new, valid address.
935 This macro is used in only one place: `memory_address' in explow.c.
937 OLDX is the address as it was before break_out_memory_refs was called.
938 In some cases it is useful to look at this to decide what needs to be done.
940 It is always safe for this macro to do nothing. It exists to recognize
941 opportunities to optimize the output.
943 For the PA, transform:
945 memory(X + <large int>)
949 if (<large int> & mask) >= 16
950 Y = (<large int> & ~mask) + mask + 1 Round up.
952 Y = (<large int> & ~mask) Round down.
954 memory (Z + (<large int> - Y));
956 This is for CSE to find several similar references, and only use one Z.
958 X can either be a SYMBOL_REF or REG, but because combine cannot
959 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
960 D will not fit in 14 bits.
962 MODE_FLOAT references allow displacements which fit in 5 bits, so use
965 MODE_INT references allow displacements which fit in 14 bits, so use
968 This relies on the fact that most mode MODE_FLOAT references will use FP
969 registers and most mode MODE_INT references will use integer registers.
970 (In the rare case of an FP register used in an integer MODE, we depend
971 on secondary reloads to clean things up.)
974 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
975 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
976 addressing modes to be used).
978 Put X and Z into registers. Then put the entire expression into
982 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
983 enum machine_mode mode
)
987 /* We need to canonicalize the order of operands in unscaled indexed
988 addresses since the code that checks if an address is valid doesn't
989 always try both orders. */
990 if (!TARGET_NO_SPACE_REGS
991 && GET_CODE (x
) == PLUS
992 && GET_MODE (x
) == Pmode
993 && REG_P (XEXP (x
, 0))
994 && REG_P (XEXP (x
, 1))
995 && REG_POINTER (XEXP (x
, 0))
996 && !REG_POINTER (XEXP (x
, 1)))
997 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
999 if (PA_SYMBOL_REF_TLS_P (x
))
1000 return legitimize_tls_address (x
);
1002 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
1004 /* Strip off CONST. */
1005 if (GET_CODE (x
) == CONST
)
1008 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1009 That should always be safe. */
1010 if (GET_CODE (x
) == PLUS
1011 && GET_CODE (XEXP (x
, 0)) == REG
1012 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
1014 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
1015 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
1018 /* Note we must reject symbols which represent function addresses
1019 since the assembler/linker can't handle arithmetic on plabels. */
1020 if (GET_CODE (x
) == PLUS
1021 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1022 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
1023 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
1024 || GET_CODE (XEXP (x
, 0)) == REG
))
1026 rtx int_part
, ptr_reg
;
1028 int offset
= INTVAL (XEXP (x
, 1));
1031 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
1032 ? (INT14_OK_STRICT
? 0x3fff : 0x1f) : 0x3fff);
1034 /* Choose which way to round the offset. Round up if we
1035 are >= halfway to the next boundary. */
1036 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1037 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1039 newoffset
= (offset
& ~ mask
);
1041 /* If the newoffset will not fit in 14 bits (ldo), then
1042 handling this would take 4 or 5 instructions (2 to load
1043 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1044 add the new offset and the SYMBOL_REF.) Combine can
1045 not handle 4->2 or 5->2 combinations, so do not create
1047 if (! VAL_14_BITS_P (newoffset
)
1048 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1050 rtx const_part
= plus_constant (XEXP (x
, 0), newoffset
);
1053 gen_rtx_HIGH (Pmode
, const_part
));
1056 gen_rtx_LO_SUM (Pmode
,
1057 tmp_reg
, const_part
));
1061 if (! VAL_14_BITS_P (newoffset
))
1062 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1064 int_part
= GEN_INT (newoffset
);
1066 ptr_reg
= force_reg (Pmode
,
1067 gen_rtx_PLUS (Pmode
,
1068 force_reg (Pmode
, XEXP (x
, 0)),
1071 return plus_constant (ptr_reg
, offset
- newoffset
);
1074 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1076 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
1077 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1078 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
1079 && (OBJECT_P (XEXP (x
, 1))
1080 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1081 && GET_CODE (XEXP (x
, 1)) != CONST
)
1083 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1087 if (GET_CODE (reg1
) != REG
)
1088 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1090 reg2
= XEXP (XEXP (x
, 0), 0);
1091 if (GET_CODE (reg2
) != REG
)
1092 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1094 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1095 gen_rtx_MULT (Pmode
,
1101 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1103 Only do so for floating point modes since this is more speculative
1104 and we lose if it's an integer store. */
1105 if (GET_CODE (x
) == PLUS
1106 && GET_CODE (XEXP (x
, 0)) == PLUS
1107 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1108 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
1109 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1110 && (mode
== SFmode
|| mode
== DFmode
))
1113 /* First, try and figure out what to use as a base register. */
1114 rtx reg1
, reg2
, base
, idx
;
1116 reg1
= XEXP (XEXP (x
, 0), 1);
1121 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1122 then emit_move_sequence will turn on REG_POINTER so we'll know
1123 it's a base register below. */
1124 if (GET_CODE (reg1
) != REG
)
1125 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1127 if (GET_CODE (reg2
) != REG
)
1128 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1130 /* Figure out what the base and index are. */
1132 if (GET_CODE (reg1
) == REG
1133 && REG_POINTER (reg1
))
1136 idx
= gen_rtx_PLUS (Pmode
,
1137 gen_rtx_MULT (Pmode
,
1138 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1139 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
1142 else if (GET_CODE (reg2
) == REG
1143 && REG_POINTER (reg2
))
1152 /* If the index adds a large constant, try to scale the
1153 constant so that it can be loaded with only one insn. */
1154 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1155 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1156 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1157 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1159 /* Divide the CONST_INT by the scale factor, then add it to A. */
1160 int val
= INTVAL (XEXP (idx
, 1));
1162 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1163 reg1
= XEXP (XEXP (idx
, 0), 0);
1164 if (GET_CODE (reg1
) != REG
)
1165 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1167 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1169 /* We can now generate a simple scaled indexed address. */
1172 (Pmode
, gen_rtx_PLUS (Pmode
,
1173 gen_rtx_MULT (Pmode
, reg1
,
1174 XEXP (XEXP (idx
, 0), 1)),
1178 /* If B + C is still a valid base register, then add them. */
1179 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1180 && INTVAL (XEXP (idx
, 1)) <= 4096
1181 && INTVAL (XEXP (idx
, 1)) >= -4096)
1183 int val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1186 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1188 reg2
= XEXP (XEXP (idx
, 0), 0);
1189 if (GET_CODE (reg2
) != CONST_INT
)
1190 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1192 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1193 gen_rtx_MULT (Pmode
,
1199 /* Get the index into a register, then add the base + index and
1200 return a register holding the result. */
1202 /* First get A into a register. */
1203 reg1
= XEXP (XEXP (idx
, 0), 0);
1204 if (GET_CODE (reg1
) != REG
)
1205 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1207 /* And get B into a register. */
1208 reg2
= XEXP (idx
, 1);
1209 if (GET_CODE (reg2
) != REG
)
1210 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1212 reg1
= force_reg (Pmode
,
1213 gen_rtx_PLUS (Pmode
,
1214 gen_rtx_MULT (Pmode
, reg1
,
1215 XEXP (XEXP (idx
, 0), 1)),
1218 /* Add the result to our base register and return. */
1219 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1223 /* Uh-oh. We might have an address for x[n-100000]. This needs
1224 special handling to avoid creating an indexed memory address
1225 with x-100000 as the base.
1227 If the constant part is small enough, then it's still safe because
1228 there is a guard page at the beginning and end of the data segment.
1230 Scaled references are common enough that we want to try and rearrange the
1231 terms so that we can use indexing for these addresses too. Only
1232 do the optimization for floatint point modes. */
1234 if (GET_CODE (x
) == PLUS
1235 && symbolic_expression_p (XEXP (x
, 1)))
1237 /* Ugly. We modify things here so that the address offset specified
1238 by the index expression is computed first, then added to x to form
1239 the entire address. */
1241 rtx regx1
, regx2
, regy1
, regy2
, y
;
1243 /* Strip off any CONST. */
1245 if (GET_CODE (y
) == CONST
)
1248 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1250 /* See if this looks like
1251 (plus (mult (reg) (shadd_const))
1252 (const (plus (symbol_ref) (const_int))))
1254 Where const_int is small. In that case the const
1255 expression is a valid pointer for indexing.
1257 If const_int is big, but can be divided evenly by shadd_const
1258 and added to (reg). This allows more scaled indexed addresses. */
1259 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1260 && GET_CODE (XEXP (x
, 0)) == MULT
1261 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1262 && INTVAL (XEXP (y
, 1)) >= -4096
1263 && INTVAL (XEXP (y
, 1)) <= 4095
1264 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1265 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1267 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1271 if (GET_CODE (reg1
) != REG
)
1272 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1274 reg2
= XEXP (XEXP (x
, 0), 0);
1275 if (GET_CODE (reg2
) != REG
)
1276 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1278 return force_reg (Pmode
,
1279 gen_rtx_PLUS (Pmode
,
1280 gen_rtx_MULT (Pmode
,
1285 else if ((mode
== DFmode
|| mode
== SFmode
)
1286 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1287 && GET_CODE (XEXP (x
, 0)) == MULT
1288 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1289 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1290 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1291 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1294 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1295 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1296 regx2
= XEXP (XEXP (x
, 0), 0);
1297 if (GET_CODE (regx2
) != REG
)
1298 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1299 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1303 gen_rtx_PLUS (Pmode
,
1304 gen_rtx_MULT (Pmode
, regx2
,
1305 XEXP (XEXP (x
, 0), 1)),
1306 force_reg (Pmode
, XEXP (y
, 0))));
1308 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1309 && INTVAL (XEXP (y
, 1)) >= -4096
1310 && INTVAL (XEXP (y
, 1)) <= 4095)
1312 /* This is safe because of the guard page at the
1313 beginning and end of the data space. Just
1314 return the original address. */
1319 /* Doesn't look like one we can optimize. */
1320 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1321 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1322 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1323 regx1
= force_reg (Pmode
,
1324 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1326 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1334 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1336 Compute extra cost of moving data between one register class
1339 Make moves from SAR so expensive they should never happen. We used to
1340 have 0xffff here, but that generates overflow in rare cases.
1342 Copies involving a FP register and a non-FP register are relatively
1343 expensive because they must go through memory.
1345 Other copies are reasonably cheap. */
1348 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
1349 reg_class_t from
, reg_class_t to
)
1351 if (from
== SHIFT_REGS
)
1353 else if ((FP_REG_CLASS_P (from
) && ! FP_REG_CLASS_P (to
))
1354 || (FP_REG_CLASS_P (to
) && ! FP_REG_CLASS_P (from
)))
1360 /* For the HPPA, REG and REG+CONST is cost 0
1361 and addresses involving symbolic constants are cost 2.
1363 PIC addresses are very expensive.
1365 It is no coincidence that this has the same structure
1366 as GO_IF_LEGITIMATE_ADDRESS. */
1369 hppa_address_cost (rtx X
,
1370 bool speed ATTRIBUTE_UNUSED
)
1372 switch (GET_CODE (X
))
1385 /* Compute a (partial) cost for rtx X. Return true if the complete
1386 cost has been computed, and false if subexpressions should be
1387 scanned. In either case, *TOTAL contains the cost result. */
1390 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
1391 bool speed ATTRIBUTE_UNUSED
)
1396 if (INTVAL (x
) == 0)
1398 else if (INT_14_BITS (x
))
1415 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1416 && outer_code
!= SET
)
1423 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1424 *total
= COSTS_N_INSNS (3);
1425 else if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1426 *total
= COSTS_N_INSNS (8);
1428 *total
= COSTS_N_INSNS (20);
1432 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1434 *total
= COSTS_N_INSNS (14);
1442 *total
= COSTS_N_INSNS (60);
1445 case PLUS
: /* this includes shNadd insns */
1447 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1448 *total
= COSTS_N_INSNS (3);
1450 *total
= COSTS_N_INSNS (1);
1456 *total
= COSTS_N_INSNS (1);
1464 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1465 new rtx with the correct mode. */
1467 force_mode (enum machine_mode mode
, rtx orig
)
1469 if (mode
== GET_MODE (orig
))
1472 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1474 return gen_rtx_REG (mode
, REGNO (orig
));
1477 /* Return 1 if *X is a thread-local symbol. */
1480 pa_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1482 return PA_SYMBOL_REF_TLS_P (*x
);
1485 /* Return 1 if X contains a thread-local symbol. */
1488 pa_tls_referenced_p (rtx x
)
1490 if (!TARGET_HAVE_TLS
)
1493 return for_each_rtx (&x
, &pa_tls_symbol_ref_1
, 0);
1496 /* Emit insns to move operands[1] into operands[0].
1498 Return 1 if we have written out everything that needs to be done to
1499 do the move. Otherwise, return 0 and the caller will emit the move
1502 Note SCRATCH_REG may not be in the proper mode depending on how it
1503 will be used. This routine is responsible for creating a new copy
1504 of SCRATCH_REG in the proper mode. */
1507 emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
1509 register rtx operand0
= operands
[0];
1510 register rtx operand1
= operands
[1];
1513 /* We can only handle indexed addresses in the destination operand
1514 of floating point stores. Thus, we need to break out indexed
1515 addresses from the destination operand. */
1516 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1518 gcc_assert (can_create_pseudo_p ());
1520 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1521 operand0
= replace_equiv_address (operand0
, tem
);
1524 /* On targets with non-equivalent space registers, break out unscaled
1525 indexed addresses from the source operand before the final CSE.
1526 We have to do this because the REG_POINTER flag is not correctly
1527 carried through various optimization passes and CSE may substitute
1528 a pseudo without the pointer set for one with the pointer set. As
1529 a result, we loose various opportunities to create insns with
1530 unscaled indexed addresses. */
1531 if (!TARGET_NO_SPACE_REGS
1532 && !cse_not_expected
1533 && GET_CODE (operand1
) == MEM
1534 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1535 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1536 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1538 = replace_equiv_address (operand1
,
1539 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1542 && reload_in_progress
&& GET_CODE (operand0
) == REG
1543 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1544 operand0
= reg_equiv_mem
[REGNO (operand0
)];
1545 else if (scratch_reg
1546 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1547 && GET_CODE (SUBREG_REG (operand0
)) == REG
1548 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1550 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1551 the code which tracks sets/uses for delete_output_reload. */
1552 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1553 reg_equiv_mem
[REGNO (SUBREG_REG (operand0
))],
1554 SUBREG_BYTE (operand0
));
1555 operand0
= alter_subreg (&temp
);
1559 && reload_in_progress
&& GET_CODE (operand1
) == REG
1560 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1561 operand1
= reg_equiv_mem
[REGNO (operand1
)];
1562 else if (scratch_reg
1563 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1564 && GET_CODE (SUBREG_REG (operand1
)) == REG
1565 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1567 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1568 the code which tracks sets/uses for delete_output_reload. */
1569 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1570 reg_equiv_mem
[REGNO (SUBREG_REG (operand1
))],
1571 SUBREG_BYTE (operand1
));
1572 operand1
= alter_subreg (&temp
);
1575 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1576 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1577 != XEXP (operand0
, 0)))
1578 operand0
= replace_equiv_address (operand0
, tem
);
1580 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1581 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1582 != XEXP (operand1
, 0)))
1583 operand1
= replace_equiv_address (operand1
, tem
);
1585 /* Handle secondary reloads for loads/stores of FP registers from
1586 REG+D addresses where D does not fit in 5 or 14 bits, including
1587 (subreg (mem (addr))) cases. */
1589 && fp_reg_operand (operand0
, mode
)
1590 && ((GET_CODE (operand1
) == MEM
1591 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4 ? SFmode
: DFmode
),
1592 XEXP (operand1
, 0)))
1593 || ((GET_CODE (operand1
) == SUBREG
1594 && GET_CODE (XEXP (operand1
, 0)) == MEM
1595 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1597 XEXP (XEXP (operand1
, 0), 0))))))
1599 if (GET_CODE (operand1
) == SUBREG
)
1600 operand1
= XEXP (operand1
, 0);
1602 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1603 it in WORD_MODE regardless of what mode it was originally given
1605 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1607 /* D might not fit in 14 bits either; for such cases load D into
1609 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
1611 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1612 emit_move_insn (scratch_reg
,
1613 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1615 XEXP (XEXP (operand1
, 0), 0),
1619 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1620 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
1621 replace_equiv_address (operand1
, scratch_reg
)));
1624 else if (scratch_reg
1625 && fp_reg_operand (operand1
, mode
)
1626 && ((GET_CODE (operand0
) == MEM
1627 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1629 XEXP (operand0
, 0)))
1630 || ((GET_CODE (operand0
) == SUBREG
)
1631 && GET_CODE (XEXP (operand0
, 0)) == MEM
1632 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1634 XEXP (XEXP (operand0
, 0), 0)))))
1636 if (GET_CODE (operand0
) == SUBREG
)
1637 operand0
= XEXP (operand0
, 0);
1639 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1640 it in WORD_MODE regardless of what mode it was originally given
1642 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1644 /* D might not fit in 14 bits either; for such cases load D into
1646 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
1648 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1649 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1652 XEXP (XEXP (operand0
, 0),
1657 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1658 emit_insn (gen_rtx_SET (VOIDmode
,
1659 replace_equiv_address (operand0
, scratch_reg
),
1663 /* Handle secondary reloads for loads of FP registers from constant
1664 expressions by forcing the constant into memory.
1666 Use scratch_reg to hold the address of the memory location.
1668 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1669 NO_REGS when presented with a const_int and a register class
1670 containing only FP registers. Doing so unfortunately creates
1671 more problems than it solves. Fix this for 2.5. */
1672 else if (scratch_reg
1673 && CONSTANT_P (operand1
)
1674 && fp_reg_operand (operand0
, mode
))
1676 rtx const_mem
, xoperands
[2];
1678 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1679 it in WORD_MODE regardless of what mode it was originally given
1681 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1683 /* Force the constant into memory and put the address of the
1684 memory location into scratch_reg. */
1685 const_mem
= force_const_mem (mode
, operand1
);
1686 xoperands
[0] = scratch_reg
;
1687 xoperands
[1] = XEXP (const_mem
, 0);
1688 emit_move_sequence (xoperands
, Pmode
, 0);
1690 /* Now load the destination register. */
1691 emit_insn (gen_rtx_SET (mode
, operand0
,
1692 replace_equiv_address (const_mem
, scratch_reg
)));
1695 /* Handle secondary reloads for SAR. These occur when trying to load
1696 the SAR from memory, FP register, or with a constant. */
1697 else if (scratch_reg
1698 && GET_CODE (operand0
) == REG
1699 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1700 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1701 && (GET_CODE (operand1
) == MEM
1702 || GET_CODE (operand1
) == CONST_INT
1703 || (GET_CODE (operand1
) == REG
1704 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1
))))))
1706 /* D might not fit in 14 bits either; for such cases load D into
1708 if (GET_CODE (operand1
) == MEM
1709 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1711 /* We are reloading the address into the scratch register, so we
1712 want to make sure the scratch register is a full register. */
1713 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1715 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1716 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1719 XEXP (XEXP (operand1
, 0),
1723 /* Now we are going to load the scratch register from memory,
1724 we want to load it in the same width as the original MEM,
1725 which must be the same as the width of the ultimate destination,
1727 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1729 emit_move_insn (scratch_reg
,
1730 replace_equiv_address (operand1
, scratch_reg
));
1734 /* We want to load the scratch register using the same mode as
1735 the ultimate destination. */
1736 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1738 emit_move_insn (scratch_reg
, operand1
);
1741 /* And emit the insn to set the ultimate destination. We know that
1742 the scratch register has the same mode as the destination at this
1744 emit_move_insn (operand0
, scratch_reg
);
1747 /* Handle the most common case: storing into a register. */
1748 else if (register_operand (operand0
, mode
))
1750 if (register_operand (operand1
, mode
)
1751 || (GET_CODE (operand1
) == CONST_INT
1752 && cint_ok_for_move (INTVAL (operand1
)))
1753 || (operand1
== CONST0_RTX (mode
))
1754 || (GET_CODE (operand1
) == HIGH
1755 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1756 /* Only `general_operands' can come here, so MEM is ok. */
1757 || GET_CODE (operand1
) == MEM
)
1759 /* Various sets are created during RTL generation which don't
1760 have the REG_POINTER flag correctly set. After the CSE pass,
1761 instruction recognition can fail if we don't consistently
1762 set this flag when performing register copies. This should
1763 also improve the opportunities for creating insns that use
1764 unscaled indexing. */
1765 if (REG_P (operand0
) && REG_P (operand1
))
1767 if (REG_POINTER (operand1
)
1768 && !REG_POINTER (operand0
)
1769 && !HARD_REGISTER_P (operand0
))
1770 copy_reg_pointer (operand0
, operand1
);
1773 /* When MEMs are broken out, the REG_POINTER flag doesn't
1774 get set. In some cases, we can set the REG_POINTER flag
1775 from the declaration for the MEM. */
1776 if (REG_P (operand0
)
1777 && GET_CODE (operand1
) == MEM
1778 && !REG_POINTER (operand0
))
1780 tree decl
= MEM_EXPR (operand1
);
1782 /* Set the register pointer flag and register alignment
1783 if the declaration for this memory reference is a
1789 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1791 if (TREE_CODE (decl
) == COMPONENT_REF
)
1792 decl
= TREE_OPERAND (decl
, 1);
1794 type
= TREE_TYPE (decl
);
1795 type
= strip_array_types (type
);
1797 if (POINTER_TYPE_P (type
))
1801 type
= TREE_TYPE (type
);
1802 /* Using TYPE_ALIGN_OK is rather conservative as
1803 only the ada frontend actually sets it. */
1804 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1806 mark_reg_pointer (operand0
, align
);
1811 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1815 else if (GET_CODE (operand0
) == MEM
)
1817 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1818 && !(reload_in_progress
|| reload_completed
))
1820 rtx temp
= gen_reg_rtx (DFmode
);
1822 emit_insn (gen_rtx_SET (VOIDmode
, temp
, operand1
));
1823 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, temp
));
1826 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1828 /* Run this case quickly. */
1829 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1832 if (! (reload_in_progress
|| reload_completed
))
1834 operands
[0] = validize_mem (operand0
);
1835 operands
[1] = operand1
= force_reg (mode
, operand1
);
1839 /* Simplify the source if we need to.
1840 Note we do have to handle function labels here, even though we do
1841 not consider them legitimate constants. Loop optimizations can
1842 call the emit_move_xxx with one as a source. */
1843 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1844 || function_label_operand (operand1
, mode
)
1845 || (GET_CODE (operand1
) == HIGH
1846 && symbolic_operand (XEXP (operand1
, 0), mode
)))
1850 if (GET_CODE (operand1
) == HIGH
)
1853 operand1
= XEXP (operand1
, 0);
1855 if (symbolic_operand (operand1
, mode
))
1857 /* Argh. The assembler and linker can't handle arithmetic
1860 So we force the plabel into memory, load operand0 from
1861 the memory location, then add in the constant part. */
1862 if ((GET_CODE (operand1
) == CONST
1863 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1864 && function_label_operand (XEXP (XEXP (operand1
, 0), 0), Pmode
))
1865 || function_label_operand (operand1
, mode
))
1867 rtx temp
, const_part
;
1869 /* Figure out what (if any) scratch register to use. */
1870 if (reload_in_progress
|| reload_completed
)
1872 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1873 /* SCRATCH_REG will hold an address and maybe the actual
1874 data. We want it in WORD_MODE regardless of what mode it
1875 was originally given to us. */
1876 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1879 scratch_reg
= gen_reg_rtx (Pmode
);
1881 if (GET_CODE (operand1
) == CONST
)
1883 /* Save away the constant part of the expression. */
1884 const_part
= XEXP (XEXP (operand1
, 0), 1);
1885 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1887 /* Force the function label into memory. */
1888 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1892 /* No constant part. */
1893 const_part
= NULL_RTX
;
1895 /* Force the function label into memory. */
1896 temp
= force_const_mem (mode
, operand1
);
1900 /* Get the address of the memory location. PIC-ify it if
1902 temp
= XEXP (temp
, 0);
1904 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1906 /* Put the address of the memory location into our destination
1909 emit_move_sequence (operands
, mode
, scratch_reg
);
1911 /* Now load from the memory location into our destination
1913 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1914 emit_move_sequence (operands
, mode
, scratch_reg
);
1916 /* And add back in the constant part. */
1917 if (const_part
!= NULL_RTX
)
1918 expand_inc (operand0
, const_part
);
1927 if (reload_in_progress
|| reload_completed
)
1929 temp
= scratch_reg
? scratch_reg
: operand0
;
1930 /* TEMP will hold an address and maybe the actual
1931 data. We want it in WORD_MODE regardless of what mode it
1932 was originally given to us. */
1933 temp
= force_mode (word_mode
, temp
);
1936 temp
= gen_reg_rtx (Pmode
);
1938 /* (const (plus (symbol) (const_int))) must be forced to
1939 memory during/after reload if the const_int will not fit
1941 if (GET_CODE (operand1
) == CONST
1942 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1943 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
1944 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))
1945 && (reload_completed
|| reload_in_progress
)
1948 rtx const_mem
= force_const_mem (mode
, operand1
);
1949 operands
[1] = legitimize_pic_address (XEXP (const_mem
, 0),
1951 operands
[1] = replace_equiv_address (const_mem
, operands
[1]);
1952 emit_move_sequence (operands
, mode
, temp
);
1956 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
1957 if (REG_P (operand0
) && REG_P (operands
[1]))
1958 copy_reg_pointer (operand0
, operands
[1]);
1959 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operands
[1]));
1962 /* On the HPPA, references to data space are supposed to use dp,
1963 register 27, but showing it in the RTL inhibits various cse
1964 and loop optimizations. */
1969 if (reload_in_progress
|| reload_completed
)
1971 temp
= scratch_reg
? scratch_reg
: operand0
;
1972 /* TEMP will hold an address and maybe the actual
1973 data. We want it in WORD_MODE regardless of what mode it
1974 was originally given to us. */
1975 temp
= force_mode (word_mode
, temp
);
1978 temp
= gen_reg_rtx (mode
);
1980 /* Loading a SYMBOL_REF into a register makes that register
1981 safe to be used as the base in an indexed address.
1983 Don't mark hard registers though. That loses. */
1984 if (GET_CODE (operand0
) == REG
1985 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1986 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
1987 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
1988 mark_reg_pointer (temp
, BITS_PER_UNIT
);
1991 set
= gen_rtx_SET (mode
, operand0
, temp
);
1993 set
= gen_rtx_SET (VOIDmode
,
1995 gen_rtx_LO_SUM (mode
, temp
, operand1
));
1997 emit_insn (gen_rtx_SET (VOIDmode
,
1999 gen_rtx_HIGH (mode
, operand1
)));
2005 else if (pa_tls_referenced_p (operand1
))
2010 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
2012 addend
= XEXP (XEXP (tmp
, 0), 1);
2013 tmp
= XEXP (XEXP (tmp
, 0), 0);
2016 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
2017 tmp
= legitimize_tls_address (tmp
);
2020 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
2021 tmp
= force_operand (tmp
, operands
[0]);
2025 else if (GET_CODE (operand1
) != CONST_INT
2026 || !cint_ok_for_move (INTVAL (operand1
)))
2030 HOST_WIDE_INT value
= 0;
2031 HOST_WIDE_INT insv
= 0;
2034 if (GET_CODE (operand1
) == CONST_INT
)
2035 value
= INTVAL (operand1
);
2038 && GET_CODE (operand1
) == CONST_INT
2039 && HOST_BITS_PER_WIDE_INT
> 32
2040 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
2044 /* Extract the low order 32 bits of the value and sign extend.
2045 If the new value is the same as the original value, we can
2046 can use the original value as-is. If the new value is
2047 different, we use it and insert the most-significant 32-bits
2048 of the original value into the final result. */
2049 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
2050 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
2053 #if HOST_BITS_PER_WIDE_INT > 32
2054 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
2058 operand1
= GEN_INT (nval
);
2062 if (reload_in_progress
|| reload_completed
)
2063 temp
= scratch_reg
? scratch_reg
: operand0
;
2065 temp
= gen_reg_rtx (mode
);
2067 /* We don't directly split DImode constants on 32-bit targets
2068 because PLUS uses an 11-bit immediate and the insn sequence
2069 generated is not as efficient as the one using HIGH/LO_SUM. */
2070 if (GET_CODE (operand1
) == CONST_INT
2071 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2072 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2075 /* Directly break constant into high and low parts. This
2076 provides better optimization opportunities because various
2077 passes recognize constants split with PLUS but not LO_SUM.
2078 We use a 14-bit signed low part except when the addition
2079 of 0x4000 to the high part might change the sign of the
2081 HOST_WIDE_INT low
= value
& 0x3fff;
2082 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2086 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2094 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (high
)));
2095 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2099 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2100 gen_rtx_HIGH (mode
, operand1
)));
2101 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2104 insn
= emit_move_insn (operands
[0], operands
[1]);
2106 /* Now insert the most significant 32 bits of the value
2107 into the register. When we don't have a second register
2108 available, it could take up to nine instructions to load
2109 a 64-bit integer constant. Prior to reload, we force
2110 constants that would take more than three instructions
2111 to load to the constant pool. During and after reload,
2112 we have to handle all possible values. */
2115 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2116 register and the value to be inserted is outside the
2117 range that can be loaded with three depdi instructions. */
2118 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2120 operand1
= GEN_INT (insv
);
2122 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2123 gen_rtx_HIGH (mode
, operand1
)));
2124 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2125 emit_insn (gen_insv (operand0
, GEN_INT (32),
2130 int len
= 5, pos
= 27;
2132 /* Insert the bits using the depdi instruction. */
2135 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2136 HOST_WIDE_INT sign
= v5
< 0;
2138 /* Left extend the insertion. */
2139 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2140 while (pos
> 0 && (insv
& 1) == sign
)
2142 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2147 emit_insn (gen_insv (operand0
, GEN_INT (len
),
2148 GEN_INT (pos
), GEN_INT (v5
)));
2150 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2156 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2161 /* Now have insn-emit do whatever it normally does. */
2165 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2166 it will need a link/runtime reloc). */
2169 reloc_needed (tree exp
)
2173 switch (TREE_CODE (exp
))
2178 case POINTER_PLUS_EXPR
:
2181 reloc
= reloc_needed (TREE_OPERAND (exp
, 0));
2182 reloc
|= reloc_needed (TREE_OPERAND (exp
, 1));
2186 case NON_LVALUE_EXPR
:
2187 reloc
= reloc_needed (TREE_OPERAND (exp
, 0));
2193 unsigned HOST_WIDE_INT ix
;
2195 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2197 reloc
|= reloc_needed (value
);
2210 /* Does operand (which is a symbolic_operand) live in text space?
2211 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2215 read_only_operand (rtx operand
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2217 if (GET_CODE (operand
) == CONST
)
2218 operand
= XEXP (XEXP (operand
, 0), 0);
2221 if (GET_CODE (operand
) == SYMBOL_REF
)
2222 return SYMBOL_REF_FLAG (operand
) && !CONSTANT_POOL_ADDRESS_P (operand
);
2226 if (GET_CODE (operand
) == SYMBOL_REF
)
2227 return SYMBOL_REF_FLAG (operand
) || CONSTANT_POOL_ADDRESS_P (operand
);
2233 /* Return the best assembler insn template
2234 for moving operands[1] into operands[0] as a fullword. */
2236 singlemove_string (rtx
*operands
)
2238 HOST_WIDE_INT intval
;
2240 if (GET_CODE (operands
[0]) == MEM
)
2241 return "stw %r1,%0";
2242 if (GET_CODE (operands
[1]) == MEM
)
2244 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2249 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2251 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2253 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2254 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2256 operands
[1] = GEN_INT (i
);
2257 /* Fall through to CONST_INT case. */
2259 if (GET_CODE (operands
[1]) == CONST_INT
)
2261 intval
= INTVAL (operands
[1]);
2263 if (VAL_14_BITS_P (intval
))
2265 else if ((intval
& 0x7ff) == 0)
2266 return "ldil L'%1,%0";
2267 else if (zdepi_cint_p (intval
))
2268 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2270 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2272 return "copy %1,%0";
2276 /* Compute position (in OP[1]) and width (in OP[2])
2277 useful for copying IMM to a register using the zdepi
2278 instructions. Store the immediate value to insert in OP[0]. */
2280 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2284 /* Find the least significant set bit in IMM. */
2285 for (lsb
= 0; lsb
< 32; lsb
++)
2292 /* Choose variants based on *sign* of the 5-bit field. */
2293 if ((imm
& 0x10) == 0)
2294 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2297 /* Find the width of the bitstring in IMM. */
2298 for (len
= 5; len
< 32 - lsb
; len
++)
2300 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2304 /* Sign extend IMM as a 5-bit value. */
2305 imm
= (imm
& 0xf) - 0x10;
2313 /* Compute position (in OP[1]) and width (in OP[2])
2314 useful for copying IMM to a register using the depdi,z
2315 instructions. Store the immediate value to insert in OP[0]. */
2317 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2319 int lsb
, len
, maxlen
;
2321 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2323 /* Find the least significant set bit in IMM. */
2324 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2331 /* Choose variants based on *sign* of the 5-bit field. */
2332 if ((imm
& 0x10) == 0)
2333 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2336 /* Find the width of the bitstring in IMM. */
2337 for (len
= 5; len
< maxlen
- lsb
; len
++)
2339 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2343 /* Extend length if host is narrow and IMM is negative. */
2344 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2347 /* Sign extend IMM as a 5-bit value. */
2348 imm
= (imm
& 0xf) - 0x10;
2356 /* Output assembler code to perform a doubleword move insn
2357 with operands OPERANDS. */
2360 output_move_double (rtx
*operands
)
2362 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2364 rtx addreg0
= 0, addreg1
= 0;
2366 /* First classify both operands. */
2368 if (REG_P (operands
[0]))
2370 else if (offsettable_memref_p (operands
[0]))
2372 else if (GET_CODE (operands
[0]) == MEM
)
2377 if (REG_P (operands
[1]))
2379 else if (CONSTANT_P (operands
[1]))
2381 else if (offsettable_memref_p (operands
[1]))
2383 else if (GET_CODE (operands
[1]) == MEM
)
2388 /* Check for the cases that the operand constraints are not
2389 supposed to allow to happen. */
2390 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2392 /* Handle copies between general and floating registers. */
2394 if (optype0
== REGOP
&& optype1
== REGOP
2395 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2397 if (FP_REG_P (operands
[0]))
2399 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2400 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2401 return "{fldds|fldd} -16(%%sp),%0";
2405 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2406 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2407 return "{ldws|ldw} -12(%%sp),%R0";
2411 /* Handle auto decrementing and incrementing loads and stores
2412 specifically, since the structure of the function doesn't work
2413 for them without major modification. Do it better when we learn
2414 this port about the general inc/dec addressing of PA.
2415 (This was written by tege. Chide him if it doesn't work.) */
2417 if (optype0
== MEMOP
)
2419 /* We have to output the address syntax ourselves, since print_operand
2420 doesn't deal with the addresses we want to use. Fix this later. */
2422 rtx addr
= XEXP (operands
[0], 0);
2423 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2425 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2427 operands
[0] = XEXP (addr
, 0);
2428 gcc_assert (GET_CODE (operands
[1]) == REG
2429 && GET_CODE (operands
[0]) == REG
);
2431 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2433 /* No overlap between high target register and address
2434 register. (We do this in a non-obvious way to
2435 save a register file writeback) */
2436 if (GET_CODE (addr
) == POST_INC
)
2437 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2438 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2440 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2442 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2444 operands
[0] = XEXP (addr
, 0);
2445 gcc_assert (GET_CODE (operands
[1]) == REG
2446 && GET_CODE (operands
[0]) == REG
);
2448 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2449 /* No overlap between high target register and address
2450 register. (We do this in a non-obvious way to save a
2451 register file writeback) */
2452 if (GET_CODE (addr
) == PRE_INC
)
2453 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2454 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2457 if (optype1
== MEMOP
)
2459 /* We have to output the address syntax ourselves, since print_operand
2460 doesn't deal with the addresses we want to use. Fix this later. */
2462 rtx addr
= XEXP (operands
[1], 0);
2463 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2465 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2467 operands
[1] = XEXP (addr
, 0);
2468 gcc_assert (GET_CODE (operands
[0]) == REG
2469 && GET_CODE (operands
[1]) == REG
);
2471 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2473 /* No overlap between high target register and address
2474 register. (We do this in a non-obvious way to
2475 save a register file writeback) */
2476 if (GET_CODE (addr
) == POST_INC
)
2477 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2478 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2482 /* This is an undefined situation. We should load into the
2483 address register *and* update that register. Probably
2484 we don't need to handle this at all. */
2485 if (GET_CODE (addr
) == POST_INC
)
2486 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2487 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2490 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2492 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2494 operands
[1] = XEXP (addr
, 0);
2495 gcc_assert (GET_CODE (operands
[0]) == REG
2496 && GET_CODE (operands
[1]) == REG
);
2498 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2500 /* No overlap between high target register and address
2501 register. (We do this in a non-obvious way to
2502 save a register file writeback) */
2503 if (GET_CODE (addr
) == PRE_INC
)
2504 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2505 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2509 /* This is an undefined situation. We should load into the
2510 address register *and* update that register. Probably
2511 we don't need to handle this at all. */
2512 if (GET_CODE (addr
) == PRE_INC
)
2513 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2514 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2517 else if (GET_CODE (addr
) == PLUS
2518 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2521 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2523 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2525 xoperands
[0] = high_reg
;
2526 xoperands
[1] = XEXP (addr
, 1);
2527 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2528 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2529 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2531 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2535 xoperands
[0] = high_reg
;
2536 xoperands
[1] = XEXP (addr
, 1);
2537 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2538 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2539 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2541 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2546 /* If an operand is an unoffsettable memory ref, find a register
2547 we can increment temporarily to make it refer to the second word. */
2549 if (optype0
== MEMOP
)
2550 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2552 if (optype1
== MEMOP
)
2553 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2555 /* Ok, we can do one word at a time.
2556 Normally we do the low-numbered word first.
2558 In either case, set up in LATEHALF the operands to use
2559 for the high-numbered word and in some cases alter the
2560 operands in OPERANDS to be suitable for the low-numbered word. */
2562 if (optype0
== REGOP
)
2563 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2564 else if (optype0
== OFFSOP
)
2565 latehalf
[0] = adjust_address (operands
[0], SImode
, 4);
2567 latehalf
[0] = operands
[0];
2569 if (optype1
== REGOP
)
2570 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2571 else if (optype1
== OFFSOP
)
2572 latehalf
[1] = adjust_address (operands
[1], SImode
, 4);
2573 else if (optype1
== CNSTOP
)
2574 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2576 latehalf
[1] = operands
[1];
2578 /* If the first move would clobber the source of the second one,
2579 do them in the other order.
2581 This can happen in two cases:
2583 mem -> register where the first half of the destination register
2584 is the same register used in the memory's address. Reload
2585 can create such insns.
2587 mem in this case will be either register indirect or register
2588 indirect plus a valid offset.
2590 register -> register move where REGNO(dst) == REGNO(src + 1)
2591 someone (Tim/Tege?) claimed this can happen for parameter loads.
2593 Handle mem -> register case first. */
2594 if (optype0
== REGOP
2595 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2596 && refers_to_regno_p (REGNO (operands
[0]), REGNO (operands
[0]) + 1,
2599 /* Do the late half first. */
2601 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2602 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2606 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2607 return singlemove_string (operands
);
2610 /* Now handle register -> register case. */
2611 if (optype0
== REGOP
&& optype1
== REGOP
2612 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2614 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2615 return singlemove_string (operands
);
2618 /* Normal case: do the two words, low-numbered first. */
2620 output_asm_insn (singlemove_string (operands
), operands
);
2622 /* Make any unoffsettable addresses point at high-numbered word. */
2624 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2626 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2629 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2631 /* Undo the adds we just did. */
2633 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2635 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2641 output_fp_move_double (rtx
*operands
)
2643 if (FP_REG_P (operands
[0]))
2645 if (FP_REG_P (operands
[1])
2646 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2647 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2649 output_asm_insn ("fldd%F1 %1,%0", operands
);
2651 else if (FP_REG_P (operands
[1]))
2653 output_asm_insn ("fstd%F0 %1,%0", operands
);
2659 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2661 /* This is a pain. You have to be prepared to deal with an
2662 arbitrary address here including pre/post increment/decrement.
2664 so avoid this in the MD. */
2665 gcc_assert (GET_CODE (operands
[0]) == REG
);
2667 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2668 xoperands
[0] = operands
[0];
2669 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2674 /* Return a REG that occurs in ADDR with coefficient 1.
2675 ADDR can be effectively incremented by incrementing REG. */
2678 find_addr_reg (rtx addr
)
2680 while (GET_CODE (addr
) == PLUS
)
2682 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2683 addr
= XEXP (addr
, 0);
2684 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2685 addr
= XEXP (addr
, 1);
2686 else if (CONSTANT_P (XEXP (addr
, 0)))
2687 addr
= XEXP (addr
, 1);
2688 else if (CONSTANT_P (XEXP (addr
, 1)))
2689 addr
= XEXP (addr
, 0);
2693 gcc_assert (GET_CODE (addr
) == REG
);
2697 /* Emit code to perform a block move.
2699 OPERANDS[0] is the destination pointer as a REG, clobbered.
2700 OPERANDS[1] is the source pointer as a REG, clobbered.
2701 OPERANDS[2] is a register for temporary storage.
2702 OPERANDS[3] is a register for temporary storage.
2703 OPERANDS[4] is the size as a CONST_INT
2704 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2705 OPERANDS[6] is another temporary register. */
2708 output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2710 int align
= INTVAL (operands
[5]);
2711 unsigned long n_bytes
= INTVAL (operands
[4]);
2713 /* We can't move more than a word at a time because the PA
2714 has no longer integer move insns. (Could use fp mem ops?) */
2715 if (align
> (TARGET_64BIT
? 8 : 4))
2716 align
= (TARGET_64BIT
? 8 : 4);
2718 /* Note that we know each loop below will execute at least twice
2719 (else we would have open-coded the copy). */
2723 /* Pre-adjust the loop counter. */
2724 operands
[4] = GEN_INT (n_bytes
- 16);
2725 output_asm_insn ("ldi %4,%2", operands
);
2728 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2729 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2730 output_asm_insn ("std,ma %3,8(%0)", operands
);
2731 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2732 output_asm_insn ("std,ma %6,8(%0)", operands
);
2734 /* Handle the residual. There could be up to 7 bytes of
2735 residual to copy! */
2736 if (n_bytes
% 16 != 0)
2738 operands
[4] = GEN_INT (n_bytes
% 8);
2739 if (n_bytes
% 16 >= 8)
2740 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2741 if (n_bytes
% 8 != 0)
2742 output_asm_insn ("ldd 0(%1),%6", operands
);
2743 if (n_bytes
% 16 >= 8)
2744 output_asm_insn ("std,ma %3,8(%0)", operands
);
2745 if (n_bytes
% 8 != 0)
2746 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2751 /* Pre-adjust the loop counter. */
2752 operands
[4] = GEN_INT (n_bytes
- 8);
2753 output_asm_insn ("ldi %4,%2", operands
);
2756 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2757 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2758 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2759 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2760 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2762 /* Handle the residual. There could be up to 7 bytes of
2763 residual to copy! */
2764 if (n_bytes
% 8 != 0)
2766 operands
[4] = GEN_INT (n_bytes
% 4);
2767 if (n_bytes
% 8 >= 4)
2768 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2769 if (n_bytes
% 4 != 0)
2770 output_asm_insn ("ldw 0(%1),%6", operands
);
2771 if (n_bytes
% 8 >= 4)
2772 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2773 if (n_bytes
% 4 != 0)
2774 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2779 /* Pre-adjust the loop counter. */
2780 operands
[4] = GEN_INT (n_bytes
- 4);
2781 output_asm_insn ("ldi %4,%2", operands
);
2784 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2785 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2786 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2787 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2788 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2790 /* Handle the residual. */
2791 if (n_bytes
% 4 != 0)
2793 if (n_bytes
% 4 >= 2)
2794 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2795 if (n_bytes
% 2 != 0)
2796 output_asm_insn ("ldb 0(%1),%6", operands
);
2797 if (n_bytes
% 4 >= 2)
2798 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2799 if (n_bytes
% 2 != 0)
2800 output_asm_insn ("stb %6,0(%0)", operands
);
2805 /* Pre-adjust the loop counter. */
2806 operands
[4] = GEN_INT (n_bytes
- 2);
2807 output_asm_insn ("ldi %4,%2", operands
);
2810 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2811 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2812 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2813 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2814 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2816 /* Handle the residual. */
2817 if (n_bytes
% 2 != 0)
2819 output_asm_insn ("ldb 0(%1),%3", operands
);
2820 output_asm_insn ("stb %3,0(%0)", operands
);
2829 /* Count the number of insns necessary to handle this block move.
2831 Basic structure is the same as emit_block_move, except that we
2832 count insns rather than emit them. */
2835 compute_movmem_length (rtx insn
)
2837 rtx pat
= PATTERN (insn
);
2838 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2839 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2840 unsigned int n_insns
= 0;
2842 /* We can't move more than four bytes at a time because the PA
2843 has no longer integer move insns. (Could use fp mem ops?) */
2844 if (align
> (TARGET_64BIT
? 8 : 4))
2845 align
= (TARGET_64BIT
? 8 : 4);
2847 /* The basic copying loop. */
2851 if (n_bytes
% (2 * align
) != 0)
2853 if ((n_bytes
% (2 * align
)) >= align
)
2856 if ((n_bytes
% align
) != 0)
2860 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2864 /* Emit code to perform a block clear.
2866 OPERANDS[0] is the destination pointer as a REG, clobbered.
2867 OPERANDS[1] is a register for temporary storage.
2868 OPERANDS[2] is the size as a CONST_INT
2869 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2872 output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2874 int align
= INTVAL (operands
[3]);
2875 unsigned long n_bytes
= INTVAL (operands
[2]);
2877 /* We can't clear more than a word at a time because the PA
2878 has no longer integer move insns. */
2879 if (align
> (TARGET_64BIT
? 8 : 4))
2880 align
= (TARGET_64BIT
? 8 : 4);
2882 /* Note that we know each loop below will execute at least twice
2883 (else we would have open-coded the copy). */
2887 /* Pre-adjust the loop counter. */
2888 operands
[2] = GEN_INT (n_bytes
- 16);
2889 output_asm_insn ("ldi %2,%1", operands
);
2892 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2893 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2894 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2896 /* Handle the residual. There could be up to 7 bytes of
2897 residual to copy! */
2898 if (n_bytes
% 16 != 0)
2900 operands
[2] = GEN_INT (n_bytes
% 8);
2901 if (n_bytes
% 16 >= 8)
2902 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2903 if (n_bytes
% 8 != 0)
2904 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2909 /* Pre-adjust the loop counter. */
2910 operands
[2] = GEN_INT (n_bytes
- 8);
2911 output_asm_insn ("ldi %2,%1", operands
);
2914 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2915 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2916 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2918 /* Handle the residual. There could be up to 7 bytes of
2919 residual to copy! */
2920 if (n_bytes
% 8 != 0)
2922 operands
[2] = GEN_INT (n_bytes
% 4);
2923 if (n_bytes
% 8 >= 4)
2924 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2925 if (n_bytes
% 4 != 0)
2926 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
2931 /* Pre-adjust the loop counter. */
2932 operands
[2] = GEN_INT (n_bytes
- 4);
2933 output_asm_insn ("ldi %2,%1", operands
);
2936 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2937 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
2938 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2940 /* Handle the residual. */
2941 if (n_bytes
% 4 != 0)
2943 if (n_bytes
% 4 >= 2)
2944 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2945 if (n_bytes
% 2 != 0)
2946 output_asm_insn ("stb %%r0,0(%0)", operands
);
2951 /* Pre-adjust the loop counter. */
2952 operands
[2] = GEN_INT (n_bytes
- 2);
2953 output_asm_insn ("ldi %2,%1", operands
);
2956 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
2957 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
2958 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
2960 /* Handle the residual. */
2961 if (n_bytes
% 2 != 0)
2962 output_asm_insn ("stb %%r0,0(%0)", operands
);
2971 /* Count the number of insns necessary to handle this block move.
2973 Basic structure is the same as emit_block_move, except that we
2974 count insns rather than emit them. */
2977 compute_clrmem_length (rtx insn
)
2979 rtx pat
= PATTERN (insn
);
2980 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
2981 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
2982 unsigned int n_insns
= 0;
2984 /* We can't clear more than a word at a time because the PA
2985 has no longer integer move insns. */
2986 if (align
> (TARGET_64BIT
? 8 : 4))
2987 align
= (TARGET_64BIT
? 8 : 4);
2989 /* The basic loop. */
2993 if (n_bytes
% (2 * align
) != 0)
2995 if ((n_bytes
% (2 * align
)) >= align
)
2998 if ((n_bytes
% align
) != 0)
3002 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3008 output_and (rtx
*operands
)
3010 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3012 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3013 int ls0
, ls1
, ms0
, p
, len
;
3015 for (ls0
= 0; ls0
< 32; ls0
++)
3016 if ((mask
& (1 << ls0
)) == 0)
3019 for (ls1
= ls0
; ls1
< 32; ls1
++)
3020 if ((mask
& (1 << ls1
)) != 0)
3023 for (ms0
= ls1
; ms0
< 32; ms0
++)
3024 if ((mask
& (1 << ms0
)) == 0)
3027 gcc_assert (ms0
== 32);
3035 operands
[2] = GEN_INT (len
);
3036 return "{extru|extrw,u} %1,31,%2,%0";
3040 /* We could use this `depi' for the case above as well, but `depi'
3041 requires one more register file access than an `extru'. */
3046 operands
[2] = GEN_INT (p
);
3047 operands
[3] = GEN_INT (len
);
3048 return "{depi|depwi} 0,%2,%3,%0";
3052 return "and %1,%2,%0";
3055 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3056 storing the result in operands[0]. */
3058 output_64bit_and (rtx
*operands
)
3060 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3062 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3063 int ls0
, ls1
, ms0
, p
, len
;
3065 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3066 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3069 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3070 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3073 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3074 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3077 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3079 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3085 operands
[2] = GEN_INT (len
);
3086 return "extrd,u %1,63,%2,%0";
3090 /* We could use this `depi' for the case above as well, but `depi'
3091 requires one more register file access than an `extru'. */
3096 operands
[2] = GEN_INT (p
);
3097 operands
[3] = GEN_INT (len
);
3098 return "depdi 0,%2,%3,%0";
3102 return "and %1,%2,%0";
3106 output_ior (rtx
*operands
)
3108 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3109 int bs0
, bs1
, p
, len
;
3111 if (INTVAL (operands
[2]) == 0)
3112 return "copy %1,%0";
3114 for (bs0
= 0; bs0
< 32; bs0
++)
3115 if ((mask
& (1 << bs0
)) != 0)
3118 for (bs1
= bs0
; bs1
< 32; bs1
++)
3119 if ((mask
& (1 << bs1
)) == 0)
3122 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3127 operands
[2] = GEN_INT (p
);
3128 operands
[3] = GEN_INT (len
);
3129 return "{depi|depwi} -1,%2,%3,%0";
3132 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3133 storing the result in operands[0]. */
3135 output_64bit_ior (rtx
*operands
)
3137 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3138 int bs0
, bs1
, p
, len
;
3140 if (INTVAL (operands
[2]) == 0)
3141 return "copy %1,%0";
3143 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3144 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3147 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3148 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3151 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3152 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3157 operands
[2] = GEN_INT (p
);
3158 operands
[3] = GEN_INT (len
);
3159 return "depdi -1,%2,%3,%0";
3162 /* Target hook for assembling integer objects. This code handles
3163 aligned SI and DI integers specially since function references
3164 must be preceded by P%. */
3167 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3169 if (size
== UNITS_PER_WORD
3171 && function_label_operand (x
, VOIDmode
))
3173 fputs (size
== 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file
);
3174 output_addr_const (asm_out_file
, x
);
3175 fputc ('\n', asm_out_file
);
3178 return default_assemble_integer (x
, size
, aligned_p
);
3181 /* Output an ascii string. */
3183 output_ascii (FILE *file
, const char *p
, int size
)
3187 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3189 /* The HP assembler can only take strings of 256 characters at one
3190 time. This is a limitation on input line length, *not* the
3191 length of the string. Sigh. Even worse, it seems that the
3192 restriction is in number of input characters (see \xnn &
3193 \whatever). So we have to do this very carefully. */
3195 fputs ("\t.STRING \"", file
);
3198 for (i
= 0; i
< size
; i
+= 4)
3202 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3204 register unsigned int c
= (unsigned char) p
[i
+ io
];
3206 if (c
== '\"' || c
== '\\')
3207 partial_output
[co
++] = '\\';
3208 if (c
>= ' ' && c
< 0177)
3209 partial_output
[co
++] = c
;
3213 partial_output
[co
++] = '\\';
3214 partial_output
[co
++] = 'x';
3215 hexd
= c
/ 16 - 0 + '0';
3217 hexd
-= '9' - 'a' + 1;
3218 partial_output
[co
++] = hexd
;
3219 hexd
= c
% 16 - 0 + '0';
3221 hexd
-= '9' - 'a' + 1;
3222 partial_output
[co
++] = hexd
;
3225 if (chars_output
+ co
> 243)
3227 fputs ("\"\n\t.STRING \"", file
);
3230 fwrite (partial_output
, 1, (size_t) co
, file
);
3234 fputs ("\"\n", file
);
3237 /* Try to rewrite floating point comparisons & branches to avoid
3238 useless add,tr insns.
3240 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3241 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3242 first attempt to remove useless add,tr insns. It is zero
3243 for the second pass as reorg sometimes leaves bogus REG_DEAD
3246 When CHECK_NOTES is zero we can only eliminate add,tr insns
3247 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3250 remove_useless_addtr_insns (int check_notes
)
3253 static int pass
= 0;
3255 /* This is fairly cheap, so always run it when optimizing. */
3259 int fbranch_count
= 0;
3261 /* Walk all the insns in this function looking for fcmp & fbranch
3262 instructions. Keep track of how many of each we find. */
3263 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3267 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3268 if (GET_CODE (insn
) != INSN
&& GET_CODE (insn
) != JUMP_INSN
)
3271 tmp
= PATTERN (insn
);
3273 /* It must be a set. */
3274 if (GET_CODE (tmp
) != SET
)
3277 /* If the destination is CCFP, then we've found an fcmp insn. */
3278 tmp
= SET_DEST (tmp
);
3279 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3285 tmp
= PATTERN (insn
);
3286 /* If this is an fbranch instruction, bump the fbranch counter. */
3287 if (GET_CODE (tmp
) == SET
3288 && SET_DEST (tmp
) == pc_rtx
3289 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3290 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3291 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3292 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3300 /* Find all floating point compare + branch insns. If possible,
3301 reverse the comparison & the branch to avoid add,tr insns. */
3302 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3306 /* Ignore anything that isn't an INSN. */
3307 if (GET_CODE (insn
) != INSN
)
3310 tmp
= PATTERN (insn
);
3312 /* It must be a set. */
3313 if (GET_CODE (tmp
) != SET
)
3316 /* The destination must be CCFP, which is register zero. */
3317 tmp
= SET_DEST (tmp
);
3318 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3321 /* INSN should be a set of CCFP.
3323 See if the result of this insn is used in a reversed FP
3324 conditional branch. If so, reverse our condition and
3325 the branch. Doing so avoids useless add,tr insns. */
3326 next
= next_insn (insn
);
3329 /* Jumps, calls and labels stop our search. */
3330 if (GET_CODE (next
) == JUMP_INSN
3331 || GET_CODE (next
) == CALL_INSN
3332 || GET_CODE (next
) == CODE_LABEL
)
3335 /* As does another fcmp insn. */
3336 if (GET_CODE (next
) == INSN
3337 && GET_CODE (PATTERN (next
)) == SET
3338 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3339 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3342 next
= next_insn (next
);
3345 /* Is NEXT_INSN a branch? */
3347 && GET_CODE (next
) == JUMP_INSN
)
3349 rtx pattern
= PATTERN (next
);
3351 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3352 and CCFP dies, then reverse our conditional and the branch
3353 to avoid the add,tr. */
3354 if (GET_CODE (pattern
) == SET
3355 && SET_DEST (pattern
) == pc_rtx
3356 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3357 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3358 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3359 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3360 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3361 && (fcmp_count
== fbranch_count
3363 && find_regno_note (next
, REG_DEAD
, 0))))
3365 /* Reverse the branch. */
3366 tmp
= XEXP (SET_SRC (pattern
), 1);
3367 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3368 XEXP (SET_SRC (pattern
), 2) = tmp
;
3369 INSN_CODE (next
) = -1;
3371 /* Reverse our condition. */
3372 tmp
= PATTERN (insn
);
3373 PUT_CODE (XEXP (tmp
, 1),
3374 (reverse_condition_maybe_unordered
3375 (GET_CODE (XEXP (tmp
, 1)))));
3385 /* You may have trouble believing this, but this is the 32 bit HP-PA
3390 Variable arguments (optional; any number may be allocated)
3392 SP-(4*(N+9)) arg word N
3397 Fixed arguments (must be allocated; may remain unused)
3406 SP-32 External Data Pointer (DP)
3408 SP-24 External/stub RP (RP')
3412 SP-8 Calling Stub RP (RP'')
3417 SP-0 Stack Pointer (points to next available address)
3421 /* This function saves registers as follows. Registers marked with ' are
3422 this function's registers (as opposed to the previous function's).
3423 If a frame_pointer isn't needed, r4 is saved as a general register;
3424 the space for the frame pointer is still allocated, though, to keep
3430 SP (FP') Previous FP
3431 SP + 4 Alignment filler (sigh)
3432 SP + 8 Space for locals reserved here.
3436 SP + n All call saved register used.
3440 SP + o All call saved fp registers used.
3444 SP + p (SP') points to next available address.
3448 /* Global variables set by output_function_prologue(). */
3449 /* Size of frame. Need to know this to emit return insns from
3451 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3452 static int save_fregs
;
3454 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3455 Handle case where DISP > 8k by using the add_high_const patterns.
3457 Note in DISP > 8k case, we will leave the high part of the address
3458 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3461 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3463 rtx insn
, dest
, src
, basereg
;
3465 src
= gen_rtx_REG (word_mode
, reg
);
3466 basereg
= gen_rtx_REG (Pmode
, base
);
3467 if (VAL_14_BITS_P (disp
))
3469 dest
= gen_rtx_MEM (word_mode
, plus_constant (basereg
, disp
));
3470 insn
= emit_move_insn (dest
, src
);
3472 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3474 rtx delta
= GEN_INT (disp
);
3475 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3477 emit_move_insn (tmpreg
, delta
);
3478 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3481 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3482 gen_rtx_SET (VOIDmode
, tmpreg
,
3483 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3484 RTX_FRAME_RELATED_P (insn
) = 1;
3486 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3487 insn
= emit_move_insn (dest
, src
);
3491 rtx delta
= GEN_INT (disp
);
3492 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3493 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3495 emit_move_insn (tmpreg
, high
);
3496 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3497 insn
= emit_move_insn (dest
, src
);
3499 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3500 gen_rtx_SET (VOIDmode
,
3501 gen_rtx_MEM (word_mode
,
3502 gen_rtx_PLUS (word_mode
,
3509 RTX_FRAME_RELATED_P (insn
) = 1;
3512 /* Emit RTL to store REG at the memory location specified by BASE and then
3513 add MOD to BASE. MOD must be <= 8k. */
3516 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3518 rtx insn
, basereg
, srcreg
, delta
;
3520 gcc_assert (VAL_14_BITS_P (mod
));
3522 basereg
= gen_rtx_REG (Pmode
, base
);
3523 srcreg
= gen_rtx_REG (word_mode
, reg
);
3524 delta
= GEN_INT (mod
);
3526 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3529 RTX_FRAME_RELATED_P (insn
) = 1;
3531 /* RTX_FRAME_RELATED_P must be set on each frame related set
3532 in a parallel with more than one element. */
3533 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3534 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3538 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3539 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3540 whether to add a frame note or not.
3542 In the DISP > 8k case, we leave the high part of the address in %r1.
3543 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3546 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3550 if (VAL_14_BITS_P (disp
))
3552 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3553 plus_constant (gen_rtx_REG (Pmode
, base
), disp
));
3555 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3557 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3558 rtx delta
= GEN_INT (disp
);
3559 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3561 emit_move_insn (tmpreg
, delta
);
3562 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3563 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3565 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3566 gen_rtx_SET (VOIDmode
, tmpreg
,
3567 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3571 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3572 rtx delta
= GEN_INT (disp
);
3573 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3575 emit_move_insn (tmpreg
,
3576 gen_rtx_PLUS (Pmode
, basereg
,
3577 gen_rtx_HIGH (Pmode
, delta
)));
3578 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3579 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3582 if (DO_FRAME_NOTES
&& note
)
3583 RTX_FRAME_RELATED_P (insn
) = 1;
3587 compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3592 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3593 be consistent with the rounding and size calculation done here.
3594 Change them at the same time. */
3596 /* We do our own stack alignment. First, round the size of the
3597 stack locals up to a word boundary. */
3598 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3600 /* Space for previous frame pointer + filler. If any frame is
3601 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3602 waste some space here for the sake of HP compatibility. The
3603 first slot is only used when the frame pointer is needed. */
3604 if (size
|| frame_pointer_needed
)
3605 size
+= STARTING_FRAME_OFFSET
;
3607 /* If the current function calls __builtin_eh_return, then we need
3608 to allocate stack space for registers that will hold data for
3609 the exception handler. */
3610 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3614 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3616 size
+= i
* UNITS_PER_WORD
;
3619 /* Account for space used by the callee general register saves. */
3620 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3621 if (df_regs_ever_live_p (i
))
3622 size
+= UNITS_PER_WORD
;
3624 /* Account for space used by the callee floating point register saves. */
3625 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3626 if (df_regs_ever_live_p (i
)
3627 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3631 /* We always save both halves of the FP register, so always
3632 increment the frame size by 8 bytes. */
3636 /* If any of the floating registers are saved, account for the
3637 alignment needed for the floating point register save block. */
3640 size
= (size
+ 7) & ~7;
3645 /* The various ABIs include space for the outgoing parameters in the
3646 size of the current function's stack frame. We don't need to align
3647 for the outgoing arguments as their alignment is set by the final
3648 rounding for the frame as a whole. */
3649 size
+= crtl
->outgoing_args_size
;
3651 /* Allocate space for the fixed frame marker. This space must be
3652 allocated for any function that makes calls or allocates
3654 if (!current_function_is_leaf
|| size
)
3655 size
+= TARGET_64BIT
? 48 : 32;
3657 /* Finally, round to the preferred stack boundary. */
3658 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3659 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3662 /* Generate the assembly code for function entry. FILE is a stdio
3663 stream to output the code to. SIZE is an int: how many units of
3664 temporary storage to allocate.
3666 Refer to the array `regs_ever_live' to determine which registers to
3667 save; `regs_ever_live[I]' is nonzero if register number I is ever
3668 used in the function. This function is responsible for knowing
3669 which registers should not be saved even if used. */
3671 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3672 of memory. If any fpu reg is used in the function, we allocate
3673 such a block here, at the bottom of the frame, just in case it's needed.
3675 If this function is a leaf procedure, then we may choose not
3676 to do a "save" insn. The decision about whether or not
3677 to do this is made in regclass.c. */
3680 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3682 /* The function's label and associated .PROC must never be
3683 separated and must be output *after* any profiling declarations
3684 to avoid changing spaces/subspaces within a procedure. */
3685 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3686 fputs ("\t.PROC\n", file
);
3688 /* hppa_expand_prologue does the dirty work now. We just need
3689 to output the assembler directives which denote the start
3691 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3692 if (current_function_is_leaf
)
3693 fputs (",NO_CALLS", file
);
3695 fputs (",CALLS", file
);
3697 fputs (",SAVE_RP", file
);
3699 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3700 at the beginning of the frame and that it is used as the frame
3701 pointer for the frame. We do this because our current frame
3702 layout doesn't conform to that specified in the HP runtime
3703 documentation and we need a way to indicate to programs such as
3704 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3705 isn't used by HP compilers but is supported by the assembler.
3706 However, SAVE_SP is supposed to indicate that the previous stack
3707 pointer has been saved in the frame marker. */
3708 if (frame_pointer_needed
)
3709 fputs (",SAVE_SP", file
);
3711 /* Pass on information about the number of callee register saves
3712 performed in the prologue.
3714 The compiler is supposed to pass the highest register number
3715 saved, the assembler then has to adjust that number before
3716 entering it into the unwind descriptor (to account for any
3717 caller saved registers with lower register numbers than the
3718 first callee saved register). */
3720 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3723 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3725 fputs ("\n\t.ENTRY\n", file
);
3727 remove_useless_addtr_insns (0);
3731 hppa_expand_prologue (void)
3733 int merge_sp_adjust_with_store
= 0;
3734 HOST_WIDE_INT size
= get_frame_size ();
3735 HOST_WIDE_INT offset
;
3743 /* Compute total size for frame pointer, filler, locals and rounding to
3744 the next word boundary. Similar code appears in compute_frame_size
3745 and must be changed in tandem with this code. */
3746 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3747 if (local_fsize
|| frame_pointer_needed
)
3748 local_fsize
+= STARTING_FRAME_OFFSET
;
3750 actual_fsize
= compute_frame_size (size
, &save_fregs
);
3751 if (flag_stack_usage
)
3752 current_function_static_stack_size
= actual_fsize
;
3754 /* Compute a few things we will use often. */
3755 tmpreg
= gen_rtx_REG (word_mode
, 1);
3757 /* Save RP first. The calling conventions manual states RP will
3758 always be stored into the caller's frame at sp - 20 or sp - 16
3759 depending on which ABI is in use. */
3760 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
3762 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3768 /* Allocate the local frame and set up the frame pointer if needed. */
3769 if (actual_fsize
!= 0)
3771 if (frame_pointer_needed
)
3773 /* Copy the old frame pointer temporarily into %r1. Set up the
3774 new stack pointer, then store away the saved old frame pointer
3775 into the stack at sp and at the same time update the stack
3776 pointer by actual_fsize bytes. Two versions, first
3777 handles small (<8k) frames. The second handles large (>=8k)
3779 insn
= emit_move_insn (tmpreg
, hard_frame_pointer_rtx
);
3781 RTX_FRAME_RELATED_P (insn
) = 1;
3783 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
3785 RTX_FRAME_RELATED_P (insn
) = 1;
3787 if (VAL_14_BITS_P (actual_fsize
))
3788 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3791 /* It is incorrect to store the saved frame pointer at *sp,
3792 then increment sp (writes beyond the current stack boundary).
3794 So instead use stwm to store at *sp and post-increment the
3795 stack pointer as an atomic operation. Then increment sp to
3796 finish allocating the new frame. */
3797 HOST_WIDE_INT adjust1
= 8192 - 64;
3798 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3800 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3801 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3805 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3806 we need to store the previous stack pointer (frame pointer)
3807 into the frame marker on targets that use the HP unwind
3808 library. This allows the HP unwind library to be used to
3809 unwind GCC frames. However, we are not fully compatible
3810 with the HP library because our frame layout differs from
3811 that specified in the HP runtime specification.
3813 We don't want a frame note on this instruction as the frame
3814 marker moves during dynamic stack allocation.
3816 This instruction also serves as a blockage to prevent
3817 register spills from being scheduled before the stack
3818 pointer is raised. This is necessary as we store
3819 registers using the frame pointer as a base register,
3820 and the frame pointer is set before sp is raised. */
3821 if (TARGET_HPUX_UNWIND_LIBRARY
)
3823 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3824 GEN_INT (TARGET_64BIT
? -8 : -4));
3826 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3827 hard_frame_pointer_rtx
);
3830 emit_insn (gen_blockage ());
3832 /* no frame pointer needed. */
3835 /* In some cases we can perform the first callee register save
3836 and allocating the stack frame at the same time. If so, just
3837 make a note of it and defer allocating the frame until saving
3838 the callee registers. */
3839 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3840 merge_sp_adjust_with_store
= 1;
3841 /* Can not optimize. Adjust the stack frame by actual_fsize
3844 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3849 /* Normal register save.
3851 Do not save the frame pointer in the frame_pointer_needed case. It
3852 was done earlier. */
3853 if (frame_pointer_needed
)
3855 offset
= local_fsize
;
3857 /* Saving the EH return data registers in the frame is the simplest
3858 way to get the frame unwind information emitted. We put them
3859 just before the general registers. */
3860 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3862 unsigned int i
, regno
;
3866 regno
= EH_RETURN_DATA_REGNO (i
);
3867 if (regno
== INVALID_REGNUM
)
3870 store_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
3871 offset
+= UNITS_PER_WORD
;
3875 for (i
= 18; i
>= 4; i
--)
3876 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3878 store_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
3879 offset
+= UNITS_PER_WORD
;
3882 /* Account for %r3 which is saved in a special place. */
3885 /* No frame pointer needed. */
3888 offset
= local_fsize
- actual_fsize
;
3890 /* Saving the EH return data registers in the frame is the simplest
3891 way to get the frame unwind information emitted. */
3892 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3894 unsigned int i
, regno
;
3898 regno
= EH_RETURN_DATA_REGNO (i
);
3899 if (regno
== INVALID_REGNUM
)
3902 /* If merge_sp_adjust_with_store is nonzero, then we can
3903 optimize the first save. */
3904 if (merge_sp_adjust_with_store
)
3906 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3907 merge_sp_adjust_with_store
= 0;
3910 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3911 offset
+= UNITS_PER_WORD
;
3915 for (i
= 18; i
>= 3; i
--)
3916 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3918 /* If merge_sp_adjust_with_store is nonzero, then we can
3919 optimize the first GR save. */
3920 if (merge_sp_adjust_with_store
)
3922 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
3923 merge_sp_adjust_with_store
= 0;
3926 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
3927 offset
+= UNITS_PER_WORD
;
3931 /* If we wanted to merge the SP adjustment with a GR save, but we never
3932 did any GR saves, then just emit the adjustment here. */
3933 if (merge_sp_adjust_with_store
)
3934 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3938 /* The hppa calling conventions say that %r19, the pic offset
3939 register, is saved at sp - 32 (in this function's frame)
3940 when generating PIC code. FIXME: What is the correct thing
3941 to do for functions which make no calls and allocate no
3942 frame? Do we need to allocate a frame, or can we just omit
3943 the save? For now we'll just omit the save.
3945 We don't want a note on this insn as the frame marker can
3946 move if there is a dynamic stack allocation. */
3947 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
3949 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
3951 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
3955 /* Align pointer properly (doubleword boundary). */
3956 offset
= (offset
+ 7) & ~7;
3958 /* Floating point register store. */
3963 /* First get the frame or stack pointer to the start of the FP register
3965 if (frame_pointer_needed
)
3967 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
3968 base
= hard_frame_pointer_rtx
;
3972 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
3973 base
= stack_pointer_rtx
;
3976 /* Now actually save the FP registers. */
3977 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3979 if (df_regs_ever_live_p (i
)
3980 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3982 rtx addr
, insn
, reg
;
3983 addr
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
3984 reg
= gen_rtx_REG (DFmode
, i
);
3985 insn
= emit_move_insn (addr
, reg
);
3988 RTX_FRAME_RELATED_P (insn
) = 1;
3991 rtx mem
= gen_rtx_MEM (DFmode
,
3992 plus_constant (base
, offset
));
3993 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3994 gen_rtx_SET (VOIDmode
, mem
, reg
));
3998 rtx meml
= gen_rtx_MEM (SFmode
,
3999 plus_constant (base
, offset
));
4000 rtx memr
= gen_rtx_MEM (SFmode
,
4001 plus_constant (base
, offset
+ 4));
4002 rtx regl
= gen_rtx_REG (SFmode
, i
);
4003 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
4004 rtx setl
= gen_rtx_SET (VOIDmode
, meml
, regl
);
4005 rtx setr
= gen_rtx_SET (VOIDmode
, memr
, regr
);
4008 RTX_FRAME_RELATED_P (setl
) = 1;
4009 RTX_FRAME_RELATED_P (setr
) = 1;
4010 vec
= gen_rtvec (2, setl
, setr
);
4011 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
4012 gen_rtx_SEQUENCE (VOIDmode
, vec
));
4015 offset
+= GET_MODE_SIZE (DFmode
);
4022 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4023 Handle case where DISP > 8k by using the add_high_const patterns. */
4026 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
4028 rtx dest
= gen_rtx_REG (word_mode
, reg
);
4029 rtx basereg
= gen_rtx_REG (Pmode
, base
);
4032 if (VAL_14_BITS_P (disp
))
4033 src
= gen_rtx_MEM (word_mode
, plus_constant (basereg
, disp
));
4034 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
4036 rtx delta
= GEN_INT (disp
);
4037 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4039 emit_move_insn (tmpreg
, delta
);
4040 if (TARGET_DISABLE_INDEXING
)
4042 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4043 src
= gen_rtx_MEM (word_mode
, tmpreg
);
4046 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
4050 rtx delta
= GEN_INT (disp
);
4051 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
4052 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
4054 emit_move_insn (tmpreg
, high
);
4055 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
4058 emit_move_insn (dest
, src
);
4061 /* Update the total code bytes output to the text section. */
4064 update_total_code_bytes (unsigned int nbytes
)
4066 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4067 && !IN_NAMED_SECTION_P (cfun
->decl
))
4069 unsigned int old_total
= total_code_bytes
;
4071 total_code_bytes
+= nbytes
;
4073 /* Be prepared to handle overflows. */
4074 if (old_total
> total_code_bytes
)
4075 total_code_bytes
= UINT_MAX
;
4079 /* This function generates the assembly code for function exit.
4080 Args are as for output_function_prologue ().
4082 The function epilogue should not depend on the current stack
4083 pointer! It should use the frame pointer only. This is mandatory
4084 because of alloca; we also take advantage of it to omit stack
4085 adjustments before returning. */
4088 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4090 rtx insn
= get_last_insn ();
4094 /* hppa_expand_epilogue does the dirty work now. We just need
4095 to output the assembler directives which denote the end
4098 To make debuggers happy, emit a nop if the epilogue was completely
4099 eliminated due to a volatile call as the last insn in the
4100 current function. That way the return address (in %r2) will
4101 always point to a valid instruction in the current function. */
4103 /* Get the last real insn. */
4104 if (GET_CODE (insn
) == NOTE
)
4105 insn
= prev_real_insn (insn
);
4107 /* If it is a sequence, then look inside. */
4108 if (insn
&& GET_CODE (insn
) == INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4109 insn
= XVECEXP (PATTERN (insn
), 0, 0);
4111 /* If insn is a CALL_INSN, then it must be a call to a volatile
4112 function (otherwise there would be epilogue insns). */
4113 if (insn
&& GET_CODE (insn
) == CALL_INSN
)
4115 fputs ("\tnop\n", file
);
4119 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4121 if (TARGET_SOM
&& TARGET_GAS
)
4123 /* We done with this subspace except possibly for some additional
4124 debug information. Forget that we are in this subspace to ensure
4125 that the next function is output in its own subspace. */
4127 cfun
->machine
->in_nsubspa
= 2;
4130 if (INSN_ADDRESSES_SET_P ())
4132 insn
= get_last_nonnote_insn ();
4133 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4135 last_address
+= insn_default_length (insn
);
4136 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4137 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4140 last_address
= UINT_MAX
;
4142 /* Finally, update the total number of code bytes output so far. */
4143 update_total_code_bytes (last_address
);
4147 hppa_expand_epilogue (void)
4150 HOST_WIDE_INT offset
;
4151 HOST_WIDE_INT ret_off
= 0;
4153 int merge_sp_adjust_with_load
= 0;
4155 /* We will use this often. */
4156 tmpreg
= gen_rtx_REG (word_mode
, 1);
4158 /* Try to restore RP early to avoid load/use interlocks when
4159 RP gets used in the return (bv) instruction. This appears to still
4160 be necessary even when we schedule the prologue and epilogue. */
4163 ret_off
= TARGET_64BIT
? -16 : -20;
4164 if (frame_pointer_needed
)
4166 load_reg (2, ret_off
, HARD_FRAME_POINTER_REGNUM
);
4171 /* No frame pointer, and stack is smaller than 8k. */
4172 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4174 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4180 /* General register restores. */
4181 if (frame_pointer_needed
)
4183 offset
= local_fsize
;
4185 /* If the current function calls __builtin_eh_return, then we need
4186 to restore the saved EH data registers. */
4187 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4189 unsigned int i
, regno
;
4193 regno
= EH_RETURN_DATA_REGNO (i
);
4194 if (regno
== INVALID_REGNUM
)
4197 load_reg (regno
, offset
, HARD_FRAME_POINTER_REGNUM
);
4198 offset
+= UNITS_PER_WORD
;
4202 for (i
= 18; i
>= 4; i
--)
4203 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4205 load_reg (i
, offset
, HARD_FRAME_POINTER_REGNUM
);
4206 offset
+= UNITS_PER_WORD
;
4211 offset
= local_fsize
- actual_fsize
;
4213 /* If the current function calls __builtin_eh_return, then we need
4214 to restore the saved EH data registers. */
4215 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4217 unsigned int i
, regno
;
4221 regno
= EH_RETURN_DATA_REGNO (i
);
4222 if (regno
== INVALID_REGNUM
)
4225 /* Only for the first load.
4226 merge_sp_adjust_with_load holds the register load
4227 with which we will merge the sp adjustment. */
4228 if (merge_sp_adjust_with_load
== 0
4230 && VAL_14_BITS_P (-actual_fsize
))
4231 merge_sp_adjust_with_load
= regno
;
4233 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4234 offset
+= UNITS_PER_WORD
;
4238 for (i
= 18; i
>= 3; i
--)
4240 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4242 /* Only for the first load.
4243 merge_sp_adjust_with_load holds the register load
4244 with which we will merge the sp adjustment. */
4245 if (merge_sp_adjust_with_load
== 0
4247 && VAL_14_BITS_P (-actual_fsize
))
4248 merge_sp_adjust_with_load
= i
;
4250 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4251 offset
+= UNITS_PER_WORD
;
4256 /* Align pointer properly (doubleword boundary). */
4257 offset
= (offset
+ 7) & ~7;
4259 /* FP register restores. */
4262 /* Adjust the register to index off of. */
4263 if (frame_pointer_needed
)
4264 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM
, offset
, 0);
4266 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4268 /* Actually do the restores now. */
4269 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4270 if (df_regs_ever_live_p (i
)
4271 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4273 rtx src
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
4274 rtx dest
= gen_rtx_REG (DFmode
, i
);
4275 emit_move_insn (dest
, src
);
4279 /* Emit a blockage insn here to keep these insns from being moved to
4280 an earlier spot in the epilogue, or into the main instruction stream.
4282 This is necessary as we must not cut the stack back before all the
4283 restores are finished. */
4284 emit_insn (gen_blockage ());
4286 /* Reset stack pointer (and possibly frame pointer). The stack
4287 pointer is initially set to fp + 64 to avoid a race condition. */
4288 if (frame_pointer_needed
)
4290 rtx delta
= GEN_INT (-64);
4292 set_reg_plus_d (STACK_POINTER_REGNUM
, HARD_FRAME_POINTER_REGNUM
, 64, 0);
4293 emit_insn (gen_pre_load (hard_frame_pointer_rtx
,
4294 stack_pointer_rtx
, delta
));
4296 /* If we were deferring a callee register restore, do it now. */
4297 else if (merge_sp_adjust_with_load
)
4299 rtx delta
= GEN_INT (-actual_fsize
);
4300 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4302 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4304 else if (actual_fsize
!= 0)
4305 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4308 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4309 frame greater than 8k), do so now. */
4311 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4313 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4315 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4317 emit_insn (gen_blockage ());
4318 emit_insn (TARGET_64BIT
4319 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4320 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4325 hppa_pic_save_rtx (void)
4327 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4330 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4331 #define NO_DEFERRED_PROFILE_COUNTERS 0
4335 /* Vector of funcdef numbers. */
4336 static VEC(int,heap
) *funcdef_nos
;
4338 /* Output deferred profile counters. */
4340 output_deferred_profile_counters (void)
4345 if (VEC_empty (int, funcdef_nos
))
4348 switch_to_section (data_section
);
4349 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4350 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4352 for (i
= 0; VEC_iterate (int, funcdef_nos
, i
, n
); i
++)
4354 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4355 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4358 VEC_free (int, heap
, funcdef_nos
);
4362 hppa_profile_hook (int label_no
)
4364 /* We use SImode for the address of the function in both 32 and
4365 64-bit code to avoid having to provide DImode versions of the
4366 lcla2 and load_offset_label_address insn patterns. */
4367 rtx reg
= gen_reg_rtx (SImode
);
4368 rtx label_rtx
= gen_label_rtx ();
4369 rtx begin_label_rtx
, call_insn
;
4370 char begin_label_name
[16];
4372 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4374 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4377 emit_move_insn (arg_pointer_rtx
,
4378 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4381 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4383 /* The address of the function is loaded into %r25 with an instruction-
4384 relative sequence that avoids the use of relocations. The sequence
4385 is split so that the load_offset_label_address instruction can
4386 occupy the delay slot of the call to _mcount. */
4388 emit_insn (gen_lcla2 (reg
, label_rtx
));
4390 emit_insn (gen_lcla1 (reg
, label_rtx
));
4392 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4393 reg
, begin_label_rtx
, label_rtx
));
4395 #if !NO_DEFERRED_PROFILE_COUNTERS
4397 rtx count_label_rtx
, addr
, r24
;
4398 char count_label_name
[16];
4400 VEC_safe_push (int, heap
, funcdef_nos
, label_no
);
4401 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4402 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4404 addr
= force_reg (Pmode
, count_label_rtx
);
4405 r24
= gen_rtx_REG (Pmode
, 24);
4406 emit_move_insn (r24
, addr
);
4409 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4410 gen_rtx_SYMBOL_REF (Pmode
,
4412 GEN_INT (TARGET_64BIT
? 24 : 12)));
4414 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4419 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4420 gen_rtx_SYMBOL_REF (Pmode
,
4422 GEN_INT (TARGET_64BIT
? 16 : 8)));
4426 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4427 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4429 /* Indicate the _mcount call cannot throw, nor will it execute a
4431 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4434 /* Fetch the return address for the frame COUNT steps up from
4435 the current frame, after the prologue. FRAMEADDR is the
4436 frame pointer of the COUNT frame.
4438 We want to ignore any export stub remnants here. To handle this,
4439 we examine the code at the return address, and if it is an export
4440 stub, we return a memory rtx for the stub return address stored
4443 The value returned is used in two different ways:
4445 1. To find a function's caller.
4447 2. To change the return address for a function.
4449 This function handles most instances of case 1; however, it will
4450 fail if there are two levels of stubs to execute on the return
4451 path. The only way I believe that can happen is if the return value
4452 needs a parameter relocation, which never happens for C code.
4454 This function handles most instances of case 2; however, it will
4455 fail if we did not originally have stub code on the return path
4456 but will need stub code on the new return path. This can happen if
4457 the caller & callee are both in the main program, but the new
4458 return location is in a shared library. */
4461 return_addr_rtx (int count
, rtx frameaddr
)
4468 /* Instruction stream at the normal return address for the export stub:
4470 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4471 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4472 0x00011820 | stub+16: mtsp r1,sr0
4473 0xe0400002 | stub+20: be,n 0(sr0,rp)
4475 0xe0400002 must be specified as -532676606 so that it won't be
4476 rejected as an invalid immediate operand on 64-bit hosts. */
4478 HOST_WIDE_INT insns
[4] = {0x4bc23fd1, 0x004010a1, 0x00011820, -532676606};
4484 rp
= get_hard_reg_initial_val (Pmode
, 2);
4486 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4489 /* If there is no export stub then just use the value saved from
4490 the return pointer register. */
4492 saved_rp
= gen_reg_rtx (Pmode
);
4493 emit_move_insn (saved_rp
, rp
);
4495 /* Get pointer to the instruction stream. We have to mask out the
4496 privilege level from the two low order bits of the return address
4497 pointer here so that ins will point to the start of the first
4498 instruction that would have been executed if we returned. */
4499 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4500 label
= gen_label_rtx ();
4502 /* Check the instruction stream at the normal return address for the
4503 export stub. If it is an export stub, than our return address is
4504 really in -24[frameaddr]. */
4506 for (i
= 0; i
< 3; i
++)
4508 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (ins
, i
* 4));
4509 rtx op1
= GEN_INT (insns
[i
]);
4510 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4513 /* Here we know that our return address points to an export
4514 stub. We don't want to return the address of the export stub,
4515 but rather the return address of the export stub. That return
4516 address is stored at -24[frameaddr]. */
4518 emit_move_insn (saved_rp
,
4520 memory_address (Pmode
,
4521 plus_constant (frameaddr
,
4530 emit_bcond_fp (rtx operands
[])
4532 enum rtx_code code
= GET_CODE (operands
[0]);
4533 rtx operand0
= operands
[1];
4534 rtx operand1
= operands
[2];
4535 rtx label
= operands
[3];
4537 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCFPmode
, 0),
4538 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4540 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
4541 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4544 gen_rtx_REG (CCFPmode
, 0),
4546 gen_rtx_LABEL_REF (VOIDmode
, label
),
4551 /* Adjust the cost of a scheduling dependency. Return the new cost of
4552 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4555 pa_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4557 enum attr_type attr_type
;
4559 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4560 true dependencies as they are described with bypasses now. */
4561 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4564 if (! recog_memoized (insn
))
4567 attr_type
= get_attr_type (insn
);
4569 switch (REG_NOTE_KIND (link
))
4572 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4575 if (attr_type
== TYPE_FPLOAD
)
4577 rtx pat
= PATTERN (insn
);
4578 rtx dep_pat
= PATTERN (dep_insn
);
4579 if (GET_CODE (pat
) == PARALLEL
)
4581 /* This happens for the fldXs,mb patterns. */
4582 pat
= XVECEXP (pat
, 0, 0);
4584 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4585 /* If this happens, we have to extend this to schedule
4586 optimally. Return 0 for now. */
4589 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4591 if (! recog_memoized (dep_insn
))
4593 switch (get_attr_type (dep_insn
))
4600 case TYPE_FPSQRTSGL
:
4601 case TYPE_FPSQRTDBL
:
4602 /* A fpload can't be issued until one cycle before a
4603 preceding arithmetic operation has finished if
4604 the target of the fpload is any of the sources
4605 (or destination) of the arithmetic operation. */
4606 return insn_default_latency (dep_insn
) - 1;
4613 else if (attr_type
== TYPE_FPALU
)
4615 rtx pat
= PATTERN (insn
);
4616 rtx dep_pat
= PATTERN (dep_insn
);
4617 if (GET_CODE (pat
) == PARALLEL
)
4619 /* This happens for the fldXs,mb patterns. */
4620 pat
= XVECEXP (pat
, 0, 0);
4622 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4623 /* If this happens, we have to extend this to schedule
4624 optimally. Return 0 for now. */
4627 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4629 if (! recog_memoized (dep_insn
))
4631 switch (get_attr_type (dep_insn
))
4635 case TYPE_FPSQRTSGL
:
4636 case TYPE_FPSQRTDBL
:
4637 /* An ALU flop can't be issued until two cycles before a
4638 preceding divide or sqrt operation has finished if
4639 the target of the ALU flop is any of the sources
4640 (or destination) of the divide or sqrt operation. */
4641 return insn_default_latency (dep_insn
) - 2;
4649 /* For other anti dependencies, the cost is 0. */
4652 case REG_DEP_OUTPUT
:
4653 /* Output dependency; DEP_INSN writes a register that INSN writes some
4655 if (attr_type
== TYPE_FPLOAD
)
4657 rtx pat
= PATTERN (insn
);
4658 rtx dep_pat
= PATTERN (dep_insn
);
4659 if (GET_CODE (pat
) == PARALLEL
)
4661 /* This happens for the fldXs,mb patterns. */
4662 pat
= XVECEXP (pat
, 0, 0);
4664 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4665 /* If this happens, we have to extend this to schedule
4666 optimally. Return 0 for now. */
4669 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4671 if (! recog_memoized (dep_insn
))
4673 switch (get_attr_type (dep_insn
))
4680 case TYPE_FPSQRTSGL
:
4681 case TYPE_FPSQRTDBL
:
4682 /* A fpload can't be issued until one cycle before a
4683 preceding arithmetic operation has finished if
4684 the target of the fpload is the destination of the
4685 arithmetic operation.
4687 Exception: For PA7100LC, PA7200 and PA7300, the cost
4688 is 3 cycles, unless they bundle together. We also
4689 pay the penalty if the second insn is a fpload. */
4690 return insn_default_latency (dep_insn
) - 1;
4697 else if (attr_type
== TYPE_FPALU
)
4699 rtx pat
= PATTERN (insn
);
4700 rtx dep_pat
= PATTERN (dep_insn
);
4701 if (GET_CODE (pat
) == PARALLEL
)
4703 /* This happens for the fldXs,mb patterns. */
4704 pat
= XVECEXP (pat
, 0, 0);
4706 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4707 /* If this happens, we have to extend this to schedule
4708 optimally. Return 0 for now. */
4711 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4713 if (! recog_memoized (dep_insn
))
4715 switch (get_attr_type (dep_insn
))
4719 case TYPE_FPSQRTSGL
:
4720 case TYPE_FPSQRTDBL
:
4721 /* An ALU flop can't be issued until two cycles before a
4722 preceding divide or sqrt operation has finished if
4723 the target of the ALU flop is also the target of
4724 the divide or sqrt operation. */
4725 return insn_default_latency (dep_insn
) - 2;
4733 /* For other output dependencies, the cost is 0. */
4741 /* Adjust scheduling priorities. We use this to try and keep addil
4742 and the next use of %r1 close together. */
4744 pa_adjust_priority (rtx insn
, int priority
)
4746 rtx set
= single_set (insn
);
4750 src
= SET_SRC (set
);
4751 dest
= SET_DEST (set
);
4752 if (GET_CODE (src
) == LO_SUM
4753 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4754 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4757 else if (GET_CODE (src
) == MEM
4758 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4759 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4760 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4763 else if (GET_CODE (dest
) == MEM
4764 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4765 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4766 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4772 /* The 700 can only issue a single insn at a time.
4773 The 7XXX processors can issue two insns at a time.
4774 The 8000 can issue 4 insns at a time. */
4776 pa_issue_rate (void)
4780 case PROCESSOR_700
: return 1;
4781 case PROCESSOR_7100
: return 2;
4782 case PROCESSOR_7100LC
: return 2;
4783 case PROCESSOR_7200
: return 2;
4784 case PROCESSOR_7300
: return 2;
4785 case PROCESSOR_8000
: return 4;
4794 /* Return any length adjustment needed by INSN which already has its length
4795 computed as LENGTH. Return zero if no adjustment is necessary.
4797 For the PA: function calls, millicode calls, and backwards short
4798 conditional branches with unfilled delay slots need an adjustment by +1
4799 (to account for the NOP which will be inserted into the instruction stream).
4801 Also compute the length of an inline block move here as it is too
4802 complicated to express as a length attribute in pa.md. */
4804 pa_adjust_insn_length (rtx insn
, int length
)
4806 rtx pat
= PATTERN (insn
);
4808 /* Jumps inside switch tables which have unfilled delay slots need
4810 if (GET_CODE (insn
) == JUMP_INSN
4811 && GET_CODE (pat
) == PARALLEL
4812 && get_attr_type (insn
) == TYPE_BTABLE_BRANCH
)
4814 /* Millicode insn with an unfilled delay slot. */
4815 else if (GET_CODE (insn
) == INSN
4816 && GET_CODE (pat
) != SEQUENCE
4817 && GET_CODE (pat
) != USE
4818 && GET_CODE (pat
) != CLOBBER
4819 && get_attr_type (insn
) == TYPE_MILLI
)
4821 /* Block move pattern. */
4822 else if (GET_CODE (insn
) == INSN
4823 && GET_CODE (pat
) == PARALLEL
4824 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4825 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4826 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4827 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4828 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4829 return compute_movmem_length (insn
) - 4;
4830 /* Block clear pattern. */
4831 else if (GET_CODE (insn
) == INSN
4832 && GET_CODE (pat
) == PARALLEL
4833 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4834 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4835 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4836 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4837 return compute_clrmem_length (insn
) - 4;
4838 /* Conditional branch with an unfilled delay slot. */
4839 else if (GET_CODE (insn
) == JUMP_INSN
&& ! simplejump_p (insn
))
4841 /* Adjust a short backwards conditional with an unfilled delay slot. */
4842 if (GET_CODE (pat
) == SET
4844 && JUMP_LABEL (insn
) != NULL_RTX
4845 && ! forward_branch_p (insn
))
4847 else if (GET_CODE (pat
) == PARALLEL
4848 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4851 /* Adjust dbra insn with short backwards conditional branch with
4852 unfilled delay slot -- only for case where counter is in a
4853 general register register. */
4854 else if (GET_CODE (pat
) == PARALLEL
4855 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
4856 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
4857 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
4859 && ! forward_branch_p (insn
))
4867 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4870 pa_print_operand_punct_valid_p (unsigned char code
)
4881 /* Print operand X (an rtx) in assembler syntax to file FILE.
4882 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4883 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4886 print_operand (FILE *file
, rtx x
, int code
)
4891 /* Output a 'nop' if there's nothing for the delay slot. */
4892 if (dbr_sequence_length () == 0)
4893 fputs ("\n\tnop", file
);
4896 /* Output a nullification completer if there's nothing for the */
4897 /* delay slot or nullification is requested. */
4898 if (dbr_sequence_length () == 0 ||
4900 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
4904 /* Print out the second register name of a register pair.
4905 I.e., R (6) => 7. */
4906 fputs (reg_names
[REGNO (x
) + 1], file
);
4909 /* A register or zero. */
4911 || (x
== CONST0_RTX (DFmode
))
4912 || (x
== CONST0_RTX (SFmode
)))
4914 fputs ("%r0", file
);
4920 /* A register or zero (floating point). */
4922 || (x
== CONST0_RTX (DFmode
))
4923 || (x
== CONST0_RTX (SFmode
)))
4925 fputs ("%fr0", file
);
4934 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
4935 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
4936 output_global_address (file
, xoperands
[1], 0);
4937 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
4941 case 'C': /* Plain (C)ondition */
4943 switch (GET_CODE (x
))
4946 fputs ("=", file
); break;
4948 fputs ("<>", file
); break;
4950 fputs (">", file
); break;
4952 fputs (">=", file
); break;
4954 fputs (">>=", file
); break;
4956 fputs (">>", file
); break;
4958 fputs ("<", file
); break;
4960 fputs ("<=", file
); break;
4962 fputs ("<<=", file
); break;
4964 fputs ("<<", file
); break;
4969 case 'N': /* Condition, (N)egated */
4970 switch (GET_CODE (x
))
4973 fputs ("<>", file
); break;
4975 fputs ("=", file
); break;
4977 fputs ("<=", file
); break;
4979 fputs ("<", file
); break;
4981 fputs ("<<", file
); break;
4983 fputs ("<<=", file
); break;
4985 fputs (">=", file
); break;
4987 fputs (">", file
); break;
4989 fputs (">>", file
); break;
4991 fputs (">>=", file
); break;
4996 /* For floating point comparisons. Note that the output
4997 predicates are the complement of the desired mode. The
4998 conditions for GT, GE, LT, LE and LTGT cause an invalid
4999 operation exception if the result is unordered and this
5000 exception is enabled in the floating-point status register. */
5002 switch (GET_CODE (x
))
5005 fputs ("!=", file
); break;
5007 fputs ("=", file
); break;
5009 fputs ("!>", file
); break;
5011 fputs ("!>=", file
); break;
5013 fputs ("!<", file
); break;
5015 fputs ("!<=", file
); break;
5017 fputs ("!<>", file
); break;
5019 fputs ("!?<=", file
); break;
5021 fputs ("!?<", file
); break;
5023 fputs ("!?>=", file
); break;
5025 fputs ("!?>", file
); break;
5027 fputs ("!?=", file
); break;
5029 fputs ("!?", file
); break;
5031 fputs ("?", file
); break;
5036 case 'S': /* Condition, operands are (S)wapped. */
5037 switch (GET_CODE (x
))
5040 fputs ("=", file
); break;
5042 fputs ("<>", file
); break;
5044 fputs ("<", file
); break;
5046 fputs ("<=", file
); break;
5048 fputs ("<<=", file
); break;
5050 fputs ("<<", file
); break;
5052 fputs (">", file
); break;
5054 fputs (">=", file
); break;
5056 fputs (">>=", file
); break;
5058 fputs (">>", file
); break;
5063 case 'B': /* Condition, (B)oth swapped and negate. */
5064 switch (GET_CODE (x
))
5067 fputs ("<>", file
); break;
5069 fputs ("=", file
); break;
5071 fputs (">=", file
); break;
5073 fputs (">", file
); break;
5075 fputs (">>", file
); break;
5077 fputs (">>=", file
); break;
5079 fputs ("<=", file
); break;
5081 fputs ("<", file
); break;
5083 fputs ("<<", file
); break;
5085 fputs ("<<=", file
); break;
5091 gcc_assert (GET_CODE (x
) == CONST_INT
);
5092 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5095 gcc_assert (GET_CODE (x
) == CONST_INT
);
5096 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5099 gcc_assert (GET_CODE (x
) == CONST_INT
);
5100 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5103 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5104 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5107 gcc_assert (GET_CODE (x
) == CONST_INT
);
5108 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5111 gcc_assert (GET_CODE (x
) == CONST_INT
);
5112 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5115 if (GET_CODE (x
) == CONST_INT
)
5120 switch (GET_CODE (XEXP (x
, 0)))
5124 if (ASSEMBLER_DIALECT
== 0)
5125 fputs ("s,mb", file
);
5127 fputs (",mb", file
);
5131 if (ASSEMBLER_DIALECT
== 0)
5132 fputs ("s,ma", file
);
5134 fputs (",ma", file
);
5137 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5138 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5140 if (ASSEMBLER_DIALECT
== 0)
5143 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5144 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5146 if (ASSEMBLER_DIALECT
== 0)
5147 fputs ("x,s", file
);
5151 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5155 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5161 output_global_address (file
, x
, 0);
5164 output_global_address (file
, x
, 1);
5166 case 0: /* Don't do anything special */
5171 compute_zdepwi_operands (INTVAL (x
), op
);
5172 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5178 compute_zdepdi_operands (INTVAL (x
), op
);
5179 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5183 /* We can get here from a .vtable_inherit due to our
5184 CONSTANT_ADDRESS_P rejecting perfectly good constant
5190 if (GET_CODE (x
) == REG
)
5192 fputs (reg_names
[REGNO (x
)], file
);
5193 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5199 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5200 && (REGNO (x
) & 1) == 0)
5203 else if (GET_CODE (x
) == MEM
)
5205 int size
= GET_MODE_SIZE (GET_MODE (x
));
5206 rtx base
= NULL_RTX
;
5207 switch (GET_CODE (XEXP (x
, 0)))
5211 base
= XEXP (XEXP (x
, 0), 0);
5212 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5216 base
= XEXP (XEXP (x
, 0), 0);
5217 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5220 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5221 fprintf (file
, "%s(%s)",
5222 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5223 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5224 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5225 fprintf (file
, "%s(%s)",
5226 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5227 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5228 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5229 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5231 /* Because the REG_POINTER flag can get lost during reload,
5232 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5233 index and base registers in the combined move patterns. */
5234 rtx base
= XEXP (XEXP (x
, 0), 1);
5235 rtx index
= XEXP (XEXP (x
, 0), 0);
5237 fprintf (file
, "%s(%s)",
5238 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5241 output_address (XEXP (x
, 0));
5244 output_address (XEXP (x
, 0));
5249 output_addr_const (file
, x
);
5252 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5255 output_global_address (FILE *file
, rtx x
, int round_constant
)
5258 /* Imagine (high (const (plus ...))). */
5259 if (GET_CODE (x
) == HIGH
)
5262 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5263 output_addr_const (file
, x
);
5264 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5266 output_addr_const (file
, x
);
5267 fputs ("-$global$", file
);
5269 else if (GET_CODE (x
) == CONST
)
5271 const char *sep
= "";
5272 int offset
= 0; /* assembler wants -$global$ at end */
5273 rtx base
= NULL_RTX
;
5275 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5278 base
= XEXP (XEXP (x
, 0), 0);
5279 output_addr_const (file
, base
);
5282 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5288 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5291 base
= XEXP (XEXP (x
, 0), 1);
5292 output_addr_const (file
, base
);
5295 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5301 /* How bogus. The compiler is apparently responsible for
5302 rounding the constant if it uses an LR field selector.
5304 The linker and/or assembler seem a better place since
5305 they have to do this kind of thing already.
5307 If we fail to do this, HP's optimizing linker may eliminate
5308 an addil, but not update the ldw/stw/ldo instruction that
5309 uses the result of the addil. */
5311 offset
= ((offset
+ 0x1000) & ~0x1fff);
5313 switch (GET_CODE (XEXP (x
, 0)))
5326 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5334 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5335 fputs ("-$global$", file
);
5337 fprintf (file
, "%s%d", sep
, offset
);
5340 output_addr_const (file
, x
);
5343 /* Output boilerplate text to appear at the beginning of the file.
5344 There are several possible versions. */
5345 #define aputs(x) fputs(x, asm_out_file)
5347 pa_file_start_level (void)
5350 aputs ("\t.LEVEL 2.0w\n");
5351 else if (TARGET_PA_20
)
5352 aputs ("\t.LEVEL 2.0\n");
5353 else if (TARGET_PA_11
)
5354 aputs ("\t.LEVEL 1.1\n");
5356 aputs ("\t.LEVEL 1.0\n");
5360 pa_file_start_space (int sortspace
)
5362 aputs ("\t.SPACE $PRIVATE$");
5365 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5366 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5367 "\n\t.SPACE $TEXT$");
5370 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5371 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5375 pa_file_start_file (int want_version
)
5377 if (write_symbols
!= NO_DEBUG
)
5379 output_file_directive (asm_out_file
, main_input_filename
);
5381 aputs ("\t.version\t\"01.01\"\n");
5386 pa_file_start_mcount (const char *aswhat
)
5389 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5393 pa_elf_file_start (void)
5395 pa_file_start_level ();
5396 pa_file_start_mcount ("ENTRY");
5397 pa_file_start_file (0);
5401 pa_som_file_start (void)
5403 pa_file_start_level ();
5404 pa_file_start_space (0);
5405 aputs ("\t.IMPORT $global$,DATA\n"
5406 "\t.IMPORT $$dyncall,MILLICODE\n");
5407 pa_file_start_mcount ("CODE");
5408 pa_file_start_file (0);
5412 pa_linux_file_start (void)
5414 pa_file_start_file (1);
5415 pa_file_start_level ();
5416 pa_file_start_mcount ("CODE");
5420 pa_hpux64_gas_file_start (void)
5422 pa_file_start_level ();
5423 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5425 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5427 pa_file_start_file (1);
5431 pa_hpux64_hpas_file_start (void)
5433 pa_file_start_level ();
5434 pa_file_start_space (1);
5435 pa_file_start_mcount ("CODE");
5436 pa_file_start_file (0);
5440 /* Search the deferred plabel list for SYMBOL and return its internal
5441 label. If an entry for SYMBOL is not found, a new entry is created. */
5444 get_deferred_plabel (rtx symbol
)
5446 const char *fname
= XSTR (symbol
, 0);
5449 /* See if we have already put this function on the list of deferred
5450 plabels. This list is generally small, so a liner search is not
5451 too ugly. If it proves too slow replace it with something faster. */
5452 for (i
= 0; i
< n_deferred_plabels
; i
++)
5453 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5456 /* If the deferred plabel list is empty, or this entry was not found
5457 on the list, create a new entry on the list. */
5458 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5462 if (deferred_plabels
== 0)
5463 deferred_plabels
= ggc_alloc_deferred_plabel ();
5465 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5467 n_deferred_plabels
+ 1);
5469 i
= n_deferred_plabels
++;
5470 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5471 deferred_plabels
[i
].symbol
= symbol
;
5473 /* Gross. We have just implicitly taken the address of this
5474 function. Mark it in the same manner as assemble_name. */
5475 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5477 mark_referenced (id
);
5480 return deferred_plabels
[i
].internal_label
;
5484 output_deferred_plabels (void)
5488 /* If we have some deferred plabels, then we need to switch into the
5489 data or readonly data section, and align it to a 4 byte boundary
5490 before outputting the deferred plabels. */
5491 if (n_deferred_plabels
)
5493 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5494 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5497 /* Now output the deferred plabels. */
5498 for (i
= 0; i
< n_deferred_plabels
; i
++)
5500 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5501 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5502 assemble_integer (deferred_plabels
[i
].symbol
,
5503 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5507 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5508 /* Initialize optabs to point to HPUX long double emulation routines. */
5510 pa_hpux_init_libfuncs (void)
5512 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5513 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5514 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5515 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5516 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5517 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5518 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5519 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5520 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5522 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5523 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5524 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5525 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5526 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5527 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5528 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5530 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5531 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5532 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5533 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5535 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, TARGET_64BIT
5536 ? "__U_Qfcnvfxt_quad_to_sgl"
5537 : "_U_Qfcnvfxt_quad_to_sgl");
5538 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_dbl");
5539 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_U_Qfcnvfxt_quad_to_usgl");
5540 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_udbl");
5542 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_sgl_to_quad");
5543 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_dbl_to_quad");
5544 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_usgl_to_quad");
5545 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_udbl_to_quad");
5549 /* HP's millicode routines mean something special to the assembler.
5550 Keep track of which ones we have used. */
5552 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5553 static void import_milli (enum millicodes
);
5554 static char imported
[(int) end1000
];
5555 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5556 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5557 #define MILLI_START 10
5560 import_milli (enum millicodes code
)
5562 char str
[sizeof (import_string
)];
5564 if (!imported
[(int) code
])
5566 imported
[(int) code
] = 1;
5567 strcpy (str
, import_string
);
5568 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5569 output_asm_insn (str
, 0);
5573 /* The register constraints have put the operands and return value in
5574 the proper registers. */
5577 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx insn
)
5579 import_milli (mulI
);
5580 return output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5583 /* Emit the rtl for doing a division by a constant. */
5585 /* Do magic division millicodes exist for this value? */
5586 const int magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5588 /* We'll use an array to keep track of the magic millicodes and
5589 whether or not we've used them already. [n][0] is signed, [n][1] is
5592 static int div_milli
[16][2];
5595 emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5597 if (GET_CODE (operands
[2]) == CONST_INT
5598 && INTVAL (operands
[2]) > 0
5599 && INTVAL (operands
[2]) < 16
5600 && magic_milli
[INTVAL (operands
[2])])
5602 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5604 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5608 gen_rtvec (6, gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, 29),
5609 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5611 gen_rtx_REG (SImode
, 26),
5613 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5614 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5615 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5616 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5617 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5618 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5625 output_div_insn (rtx
*operands
, int unsignedp
, rtx insn
)
5629 /* If the divisor is a constant, try to use one of the special
5631 if (GET_CODE (operands
[0]) == CONST_INT
)
5633 static char buf
[100];
5634 divisor
= INTVAL (operands
[0]);
5635 if (!div_milli
[divisor
][unsignedp
])
5637 div_milli
[divisor
][unsignedp
] = 1;
5639 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5641 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5645 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5646 INTVAL (operands
[0]));
5647 return output_millicode_call (insn
,
5648 gen_rtx_SYMBOL_REF (SImode
, buf
));
5652 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5653 INTVAL (operands
[0]));
5654 return output_millicode_call (insn
,
5655 gen_rtx_SYMBOL_REF (SImode
, buf
));
5658 /* Divisor isn't a special constant. */
5663 import_milli (divU
);
5664 return output_millicode_call (insn
,
5665 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5669 import_milli (divI
);
5670 return output_millicode_call (insn
,
5671 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5676 /* Output a $$rem millicode to do mod. */
5679 output_mod_insn (int unsignedp
, rtx insn
)
5683 import_milli (remU
);
5684 return output_millicode_call (insn
,
5685 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5689 import_milli (remI
);
5690 return output_millicode_call (insn
,
5691 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5696 output_arg_descriptor (rtx call_insn
)
5698 const char *arg_regs
[4];
5699 enum machine_mode arg_mode
;
5701 int i
, output_flag
= 0;
5704 /* We neither need nor want argument location descriptors for the
5705 64bit runtime environment or the ELF32 environment. */
5706 if (TARGET_64BIT
|| TARGET_ELF32
)
5709 for (i
= 0; i
< 4; i
++)
5712 /* Specify explicitly that no argument relocations should take place
5713 if using the portable runtime calling conventions. */
5714 if (TARGET_PORTABLE_RUNTIME
)
5716 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5721 gcc_assert (GET_CODE (call_insn
) == CALL_INSN
);
5722 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5723 link
; link
= XEXP (link
, 1))
5725 rtx use
= XEXP (link
, 0);
5727 if (! (GET_CODE (use
) == USE
5728 && GET_CODE (XEXP (use
, 0)) == REG
5729 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5732 arg_mode
= GET_MODE (XEXP (use
, 0));
5733 regno
= REGNO (XEXP (use
, 0));
5734 if (regno
>= 23 && regno
<= 26)
5736 arg_regs
[26 - regno
] = "GR";
5737 if (arg_mode
== DImode
)
5738 arg_regs
[25 - regno
] = "GR";
5740 else if (regno
>= 32 && regno
<= 39)
5742 if (arg_mode
== SFmode
)
5743 arg_regs
[(regno
- 32) / 2] = "FR";
5746 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5747 arg_regs
[(regno
- 34) / 2] = "FR";
5748 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5750 arg_regs
[(regno
- 34) / 2] = "FU";
5751 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5756 fputs ("\t.CALL ", asm_out_file
);
5757 for (i
= 0; i
< 4; i
++)
5762 fputc (',', asm_out_file
);
5763 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5766 fputc ('\n', asm_out_file
);
5770 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
5771 enum machine_mode mode
, secondary_reload_info
*sri
)
5774 enum reg_class rclass
= (enum reg_class
) rclass_i
;
5776 /* Handle the easy stuff first. */
5777 if (rclass
== R1_REGS
)
5783 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
5789 /* If we have something like (mem (mem (...)), we can safely assume the
5790 inner MEM will end up in a general register after reloading, so there's
5791 no need for a secondary reload. */
5792 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
5795 /* Trying to load a constant into a FP register during PIC code
5796 generation requires %r1 as a scratch register. */
5798 && (mode
== SImode
|| mode
== DImode
)
5799 && FP_REG_CLASS_P (rclass
)
5800 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
5802 sri
->icode
= (mode
== SImode
? CODE_FOR_reload_insi_r1
5803 : CODE_FOR_reload_indi_r1
);
5807 /* Secondary reloads of symbolic operands require %r1 as a scratch
5808 register when we're generating PIC code and when the operand isn't
5810 if (symbolic_expression_p (x
))
5812 if (GET_CODE (x
) == HIGH
)
5815 if (flag_pic
|| !read_only_operand (x
, VOIDmode
))
5817 gcc_assert (mode
== SImode
|| mode
== DImode
);
5818 sri
->icode
= (mode
== SImode
? CODE_FOR_reload_insi_r1
5819 : CODE_FOR_reload_indi_r1
);
5824 /* Profiling showed the PA port spends about 1.3% of its compilation
5825 time in true_regnum from calls inside pa_secondary_reload_class. */
5826 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
5827 regno
= true_regnum (x
);
5829 /* In order to allow 14-bit displacements in integer loads and stores,
5830 we need to prevent reload from generating out of range integer mode
5831 loads and stores to the floating point registers. Previously, we
5832 used to call for a secondary reload and have emit_move_sequence()
5833 fix the instruction sequence. However, reload occasionally wouldn't
5834 generate the reload and we would end up with an invalid REG+D memory
5835 address. So, now we use an intermediate general register for most
5836 memory loads and stores. */
5837 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
5838 && GET_MODE_CLASS (mode
) == MODE_INT
5839 && FP_REG_CLASS_P (rclass
))
5841 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5842 the secondary reload needed for a pseudo. It never passes a
5844 if (GET_CODE (x
) == MEM
)
5848 /* We don't need an intermediate for indexed and LO_SUM DLT
5849 memory addresses. When INT14_OK_STRICT is true, it might
5850 appear that we could directly allow register indirect
5851 memory addresses. However, this doesn't work because we
5852 don't support SUBREGs in floating-point register copies
5853 and reload doesn't tell us when it's going to use a SUBREG. */
5854 if (IS_INDEX_ADDR_P (x
)
5855 || IS_LO_SUM_DLT_ADDR_P (x
))
5858 /* Otherwise, we need an intermediate general register. */
5859 return GENERAL_REGS
;
5862 /* Request a secondary reload with a general scratch register
5863 for everthing else. ??? Could symbolic operands be handled
5864 directly when generating non-pic PA 2.0 code? */
5866 ? direct_optab_handler (reload_in_optab
, mode
)
5867 : direct_optab_handler (reload_out_optab
, mode
));
5871 /* We need a secondary register (GPR) for copies between the SAR
5872 and anything other than a general register. */
5873 if (rclass
== SHIFT_REGS
&& (regno
<= 0 || regno
>= 32))
5876 ? direct_optab_handler (reload_in_optab
, mode
)
5877 : direct_optab_handler (reload_out_optab
, mode
));
5881 /* A SAR<->FP register copy requires a secondary register (GPR) as
5882 well as secondary memory. */
5883 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
5884 && (REGNO_REG_CLASS (regno
) == SHIFT_REGS
5885 && FP_REG_CLASS_P (rclass
)))
5887 ? direct_optab_handler (reload_in_optab
, mode
)
5888 : direct_optab_handler (reload_out_optab
, mode
));
5893 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5894 is only marked as live on entry by df-scan when it is a fixed
5895 register. It isn't a fixed register in the 64-bit runtime,
5896 so we need to mark it here. */
5899 pa_extra_live_on_entry (bitmap regs
)
5902 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
5905 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5906 to prevent it from being deleted. */
5909 pa_eh_return_handler_rtx (void)
5913 tmp
= gen_rtx_PLUS (word_mode
, hard_frame_pointer_rtx
,
5914 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
5915 tmp
= gen_rtx_MEM (word_mode
, tmp
);
5920 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5921 by invisible reference. As a GCC extension, we also pass anything
5922 with a zero or variable size by reference.
5924 The 64-bit runtime does not describe passing any types by invisible
5925 reference. The internals of GCC can't currently handle passing
5926 empty structures, and zero or variable length arrays when they are
5927 not passed entirely on the stack or by reference. Thus, as a GCC
5928 extension, we pass these types by reference. The HP compiler doesn't
5929 support these types, so hopefully there shouldn't be any compatibility
5930 issues. This may have to be revisited when HP releases a C99 compiler
5931 or updates the ABI. */
5934 pa_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
5935 enum machine_mode mode
, const_tree type
,
5936 bool named ATTRIBUTE_UNUSED
)
5941 size
= int_size_in_bytes (type
);
5943 size
= GET_MODE_SIZE (mode
);
5948 return size
<= 0 || size
> 8;
5952 function_arg_padding (enum machine_mode mode
, const_tree type
)
5957 && (AGGREGATE_TYPE_P (type
)
5958 || TREE_CODE (type
) == COMPLEX_TYPE
5959 || TREE_CODE (type
) == VECTOR_TYPE
)))
5961 /* Return none if justification is not required. */
5963 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
5964 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
5967 /* The directions set here are ignored when a BLKmode argument larger
5968 than a word is placed in a register. Different code is used for
5969 the stack and registers. This makes it difficult to have a
5970 consistent data representation for both the stack and registers.
5971 For both runtimes, the justification and padding for arguments on
5972 the stack and in registers should be identical. */
5974 /* The 64-bit runtime specifies left justification for aggregates. */
5977 /* The 32-bit runtime architecture specifies right justification.
5978 When the argument is passed on the stack, the argument is padded
5979 with garbage on the left. The HP compiler pads with zeros. */
5983 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
5990 /* Do what is necessary for `va_start'. We look at the current function
5991 to determine if stdargs or varargs is used and fill in an initial
5992 va_list. A pointer to this constructor is returned. */
5995 hppa_builtin_saveregs (void)
5998 tree fntype
= TREE_TYPE (current_function_decl
);
5999 int argadj
= ((!stdarg_p (fntype
))
6000 ? UNITS_PER_WORD
: 0);
6003 offset
= plus_constant (crtl
->args
.arg_offset_rtx
, argadj
);
6005 offset
= crtl
->args
.arg_offset_rtx
;
6011 /* Adjust for varargs/stdarg differences. */
6013 offset
= plus_constant (crtl
->args
.arg_offset_rtx
, -argadj
);
6015 offset
= crtl
->args
.arg_offset_rtx
;
6017 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6018 from the incoming arg pointer and growing to larger addresses. */
6019 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
6020 emit_move_insn (gen_rtx_MEM (word_mode
,
6021 plus_constant (arg_pointer_rtx
, off
)),
6022 gen_rtx_REG (word_mode
, i
));
6024 /* The incoming args pointer points just beyond the flushback area;
6025 normally this is not a serious concern. However, when we are doing
6026 varargs/stdargs we want to make the arg pointer point to the start
6027 of the incoming argument area. */
6028 emit_move_insn (virtual_incoming_args_rtx
,
6029 plus_constant (arg_pointer_rtx
, -64));
6031 /* Now return a pointer to the first anonymous argument. */
6032 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6033 virtual_incoming_args_rtx
,
6034 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6037 /* Store general registers on the stack. */
6038 dest
= gen_rtx_MEM (BLKmode
,
6039 plus_constant (crtl
->args
.internal_arg_pointer
,
6041 set_mem_alias_set (dest
, get_varargs_alias_set ());
6042 set_mem_align (dest
, BITS_PER_WORD
);
6043 move_block_from_reg (23, dest
, 4);
6045 /* move_block_from_reg will emit code to store the argument registers
6046 individually as scalar stores.
6048 However, other insns may later load from the same addresses for
6049 a structure load (passing a struct to a varargs routine).
6051 The alias code assumes that such aliasing can never happen, so we
6052 have to keep memory referencing insns from moving up beyond the
6053 last argument register store. So we emit a blockage insn here. */
6054 emit_insn (gen_blockage ());
6056 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6057 crtl
->args
.internal_arg_pointer
,
6058 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6062 hppa_va_start (tree valist
, rtx nextarg
)
6064 nextarg
= expand_builtin_saveregs ();
6065 std_expand_builtin_va_start (valist
, nextarg
);
6069 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6074 /* Args grow upward. We can use the generic routines. */
6075 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6077 else /* !TARGET_64BIT */
6079 tree ptr
= build_pointer_type (type
);
6082 unsigned int size
, ofs
;
6085 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6089 ptr
= build_pointer_type (type
);
6091 size
= int_size_in_bytes (type
);
6092 valist_type
= TREE_TYPE (valist
);
6094 /* Args grow down. Not handled by generic routines. */
6096 u
= fold_convert (sizetype
, size_in_bytes (type
));
6097 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6098 t
= build2 (POINTER_PLUS_EXPR
, valist_type
, valist
, u
);
6100 /* Align to 4 or 8 byte boundary depending on argument size. */
6102 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6103 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6104 t
= fold_convert (valist_type
, t
);
6106 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6108 ofs
= (8 - size
) % 4;
6112 t
= build2 (POINTER_PLUS_EXPR
, valist_type
, t
, u
);
6115 t
= fold_convert (ptr
, t
);
6116 t
= build_va_arg_indirect_ref (t
);
6119 t
= build_va_arg_indirect_ref (t
);
6125 /* True if MODE is valid for the target. By "valid", we mean able to
6126 be manipulated in non-trivial ways. In particular, this means all
6127 the arithmetic is supported.
6129 Currently, TImode is not valid as the HP 64-bit runtime documentation
6130 doesn't document the alignment and calling conventions for this type.
6131 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6132 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6135 pa_scalar_mode_supported_p (enum machine_mode mode
)
6137 int precision
= GET_MODE_PRECISION (mode
);
6139 switch (GET_MODE_CLASS (mode
))
6141 case MODE_PARTIAL_INT
:
6143 if (precision
== CHAR_TYPE_SIZE
)
6145 if (precision
== SHORT_TYPE_SIZE
)
6147 if (precision
== INT_TYPE_SIZE
)
6149 if (precision
== LONG_TYPE_SIZE
)
6151 if (precision
== LONG_LONG_TYPE_SIZE
)
6156 if (precision
== FLOAT_TYPE_SIZE
)
6158 if (precision
== DOUBLE_TYPE_SIZE
)
6160 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6164 case MODE_DECIMAL_FLOAT
:
6172 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6173 it branches to the next real instruction. Otherwise, return FALSE. */
6176 branch_to_delay_slot_p (rtx insn
)
6178 if (dbr_sequence_length ())
6181 return next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
);
6184 /* Return TRUE if INSN, a jump insn, needs a nop in its delay slot.
6186 This occurs when INSN has an unfilled delay slot and is followed
6187 by an ASM_INPUT. Disaster can occur if the ASM_INPUT is empty and
6188 the jump branches into the delay slot. So, we add a nop in the delay
6189 slot just to be safe. This messes up our instruction count, but we
6190 don't know how big the ASM_INPUT insn is anyway. */
6193 branch_needs_nop_p (rtx insn
)
6197 if (dbr_sequence_length ())
6200 next_insn
= next_real_insn (insn
);
6201 return GET_CODE (PATTERN (next_insn
)) == ASM_INPUT
;
6204 /* This routine handles all the normal conditional branch sequences we
6205 might need to generate. It handles compare immediate vs compare
6206 register, nullification of delay slots, varying length branches,
6207 negated branches, and all combinations of the above. It returns the
6208 output appropriate to emit the branch corresponding to all given
6212 output_cbranch (rtx
*operands
, int negated
, rtx insn
)
6214 static char buf
[100];
6216 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6217 int length
= get_attr_length (insn
);
6220 /* A conditional branch to the following instruction (e.g. the delay slot)
6221 is asking for a disaster. This can happen when not optimizing and
6222 when jump optimization fails.
6224 While it is usually safe to emit nothing, this can fail if the
6225 preceding instruction is a nullified branch with an empty delay
6226 slot and the same branch target as this branch. We could check
6227 for this but jump optimization should eliminate nop jumps. It
6228 is always safe to emit a nop. */
6229 if (branch_to_delay_slot_p (insn
))
6232 /* The doubleword form of the cmpib instruction doesn't have the LEU
6233 and GTU conditions while the cmpb instruction does. Since we accept
6234 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6235 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6236 operands
[2] = gen_rtx_REG (DImode
, 0);
6237 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6238 operands
[1] = gen_rtx_REG (DImode
, 0);
6240 /* If this is a long branch with its delay slot unfilled, set `nullify'
6241 as it can nullify the delay slot and save a nop. */
6242 if (length
== 8 && dbr_sequence_length () == 0)
6245 /* If this is a short forward conditional branch which did not get
6246 its delay slot filled, the delay slot can still be nullified. */
6247 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6248 nullify
= forward_branch_p (insn
);
6250 /* A forward branch over a single nullified insn can be done with a
6251 comclr instruction. This avoids a single cycle penalty due to
6252 mis-predicted branch if we fall through (branch not taken). */
6254 && next_real_insn (insn
) != 0
6255 && get_attr_length (next_real_insn (insn
)) == 4
6256 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6262 /* All short conditional branches except backwards with an unfilled
6266 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6268 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6269 if (GET_MODE (operands
[1]) == DImode
)
6272 strcat (buf
, "%B3");
6274 strcat (buf
, "%S3");
6276 strcat (buf
, " %2,%r1,%%r0");
6279 if (branch_needs_nop_p (insn
))
6280 strcat (buf
, ",n %2,%r1,%0%#");
6282 strcat (buf
, ",n %2,%r1,%0");
6285 strcat (buf
, " %2,%r1,%0");
6288 /* All long conditionals. Note a short backward branch with an
6289 unfilled delay slot is treated just like a long backward branch
6290 with an unfilled delay slot. */
6292 /* Handle weird backwards branch with a filled delay slot
6293 which is nullified. */
6294 if (dbr_sequence_length () != 0
6295 && ! forward_branch_p (insn
)
6298 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6299 if (GET_MODE (operands
[1]) == DImode
)
6302 strcat (buf
, "%S3");
6304 strcat (buf
, "%B3");
6305 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6307 /* Handle short backwards branch with an unfilled delay slot.
6308 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6309 taken and untaken branches. */
6310 else if (dbr_sequence_length () == 0
6311 && ! forward_branch_p (insn
)
6312 && INSN_ADDRESSES_SET_P ()
6313 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6314 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6316 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6317 if (GET_MODE (operands
[1]) == DImode
)
6320 strcat (buf
, "%B3 %2,%r1,%0%#");
6322 strcat (buf
, "%S3 %2,%r1,%0%#");
6326 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6327 if (GET_MODE (operands
[1]) == DImode
)
6330 strcat (buf
, "%S3");
6332 strcat (buf
, "%B3");
6334 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6336 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6341 /* The reversed conditional branch must branch over one additional
6342 instruction if the delay slot is filled and needs to be extracted
6343 by output_lbranch. If the delay slot is empty or this is a
6344 nullified forward branch, the instruction after the reversed
6345 condition branch must be nullified. */
6346 if (dbr_sequence_length () == 0
6347 || (nullify
&& forward_branch_p (insn
)))
6351 operands
[4] = GEN_INT (length
);
6356 operands
[4] = GEN_INT (length
+ 4);
6359 /* Create a reversed conditional branch which branches around
6360 the following insns. */
6361 if (GET_MODE (operands
[1]) != DImode
)
6367 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6370 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6376 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6379 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6388 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6391 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6397 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6400 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6404 output_asm_insn (buf
, operands
);
6405 return output_lbranch (operands
[0], insn
, xdelay
);
6410 /* This routine handles output of long unconditional branches that
6411 exceed the maximum range of a simple branch instruction. Since
6412 we don't have a register available for the branch, we save register
6413 %r1 in the frame marker, load the branch destination DEST into %r1,
6414 execute the branch, and restore %r1 in the delay slot of the branch.
6416 Since long branches may have an insn in the delay slot and the
6417 delay slot is used to restore %r1, we in general need to extract
6418 this insn and execute it before the branch. However, to facilitate
6419 use of this function by conditional branches, we also provide an
6420 option to not extract the delay insn so that it will be emitted
6421 after the long branch. So, if there is an insn in the delay slot,
6422 it is extracted if XDELAY is nonzero.
6424 The lengths of the various long-branch sequences are 20, 16 and 24
6425 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6428 output_lbranch (rtx dest
, rtx insn
, int xdelay
)
6432 xoperands
[0] = dest
;
6434 /* First, free up the delay slot. */
6435 if (xdelay
&& dbr_sequence_length () != 0)
6437 /* We can't handle a jump in the delay slot. */
6438 gcc_assert (GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
);
6440 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6443 /* Now delete the delay insn. */
6444 SET_INSN_DELETED (NEXT_INSN (insn
));
6447 /* Output an insn to save %r1. The runtime documentation doesn't
6448 specify whether the "Clean Up" slot in the callers frame can
6449 be clobbered by the callee. It isn't copied by HP's builtin
6450 alloca, so this suggests that it can be clobbered if necessary.
6451 The "Static Link" location is copied by HP builtin alloca, so
6452 we avoid using it. Using the cleanup slot might be a problem
6453 if we have to interoperate with languages that pass cleanup
6454 information. However, it should be possible to handle these
6455 situations with GCC's asm feature.
6457 The "Current RP" slot is reserved for the called procedure, so
6458 we try to use it when we don't have a frame of our own. It's
6459 rather unlikely that we won't have a frame when we need to emit
6462 Really the way to go long term is a register scavenger; goto
6463 the target of the jump and find a register which we can use
6464 as a scratch to hold the value in %r1. Then, we wouldn't have
6465 to free up the delay slot or clobber a slot that may be needed
6466 for other purposes. */
6469 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6470 /* Use the return pointer slot in the frame marker. */
6471 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6473 /* Use the slot at -40 in the frame marker since HP builtin
6474 alloca doesn't copy it. */
6475 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6479 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6480 /* Use the return pointer slot in the frame marker. */
6481 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6483 /* Use the "Clean Up" slot in the frame marker. In GCC,
6484 the only other use of this location is for copying a
6485 floating point double argument from a floating-point
6486 register to two general registers. The copy is done
6487 as an "atomic" operation when outputting a call, so it
6488 won't interfere with our using the location here. */
6489 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6492 if (TARGET_PORTABLE_RUNTIME
)
6494 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6495 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6496 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6500 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6501 if (TARGET_SOM
|| !TARGET_GAS
)
6503 xoperands
[1] = gen_label_rtx ();
6504 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6505 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6506 CODE_LABEL_NUMBER (xoperands
[1]));
6507 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6511 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6512 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6514 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6517 /* Now output a very long branch to the original target. */
6518 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6520 /* Now restore the value of %r1 in the delay slot. */
6523 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6524 return "ldd -16(%%r30),%%r1";
6526 return "ldd -40(%%r30),%%r1";
6530 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6531 return "ldw -20(%%r30),%%r1";
6533 return "ldw -12(%%r30),%%r1";
6537 /* This routine handles all the branch-on-bit conditional branch sequences we
6538 might need to generate. It handles nullification of delay slots,
6539 varying length branches, negated branches and all combinations of the
6540 above. it returns the appropriate output template to emit the branch. */
6543 output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
, int which
)
6545 static char buf
[100];
6547 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6548 int length
= get_attr_length (insn
);
6551 /* A conditional branch to the following instruction (e.g. the delay slot) is
6552 asking for a disaster. I do not think this can happen as this pattern
6553 is only used when optimizing; jump optimization should eliminate the
6554 jump. But be prepared just in case. */
6556 if (branch_to_delay_slot_p (insn
))
6559 /* If this is a long branch with its delay slot unfilled, set `nullify'
6560 as it can nullify the delay slot and save a nop. */
6561 if (length
== 8 && dbr_sequence_length () == 0)
6564 /* If this is a short forward conditional branch which did not get
6565 its delay slot filled, the delay slot can still be nullified. */
6566 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6567 nullify
= forward_branch_p (insn
);
6569 /* A forward branch over a single nullified insn can be done with a
6570 extrs instruction. This avoids a single cycle penalty due to
6571 mis-predicted branch if we fall through (branch not taken). */
6574 && next_real_insn (insn
) != 0
6575 && get_attr_length (next_real_insn (insn
)) == 4
6576 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6583 /* All short conditional branches except backwards with an unfilled
6587 strcpy (buf
, "{extrs,|extrw,s,}");
6589 strcpy (buf
, "bb,");
6590 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6591 strcpy (buf
, "extrd,s,*");
6592 else if (GET_MODE (operands
[0]) == DImode
)
6593 strcpy (buf
, "bb,*");
6594 if ((which
== 0 && negated
)
6595 || (which
== 1 && ! negated
))
6600 strcat (buf
, " %0,%1,1,%%r0");
6601 else if (nullify
&& negated
)
6603 if (branch_needs_nop_p (insn
))
6604 strcat (buf
, ",n %0,%1,%3%#");
6606 strcat (buf
, ",n %0,%1,%3");
6608 else if (nullify
&& ! negated
)
6610 if (branch_needs_nop_p (insn
))
6611 strcat (buf
, ",n %0,%1,%2%#");
6613 strcat (buf
, ",n %0,%1,%2");
6615 else if (! nullify
&& negated
)
6616 strcat (buf
, " %0,%1,%3");
6617 else if (! nullify
&& ! negated
)
6618 strcat (buf
, " %0,%1,%2");
6621 /* All long conditionals. Note a short backward branch with an
6622 unfilled delay slot is treated just like a long backward branch
6623 with an unfilled delay slot. */
6625 /* Handle weird backwards branch with a filled delay slot
6626 which is nullified. */
6627 if (dbr_sequence_length () != 0
6628 && ! forward_branch_p (insn
)
6631 strcpy (buf
, "bb,");
6632 if (GET_MODE (operands
[0]) == DImode
)
6634 if ((which
== 0 && negated
)
6635 || (which
== 1 && ! negated
))
6640 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6642 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6644 /* Handle short backwards branch with an unfilled delay slot.
6645 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6646 taken and untaken branches. */
6647 else if (dbr_sequence_length () == 0
6648 && ! forward_branch_p (insn
)
6649 && INSN_ADDRESSES_SET_P ()
6650 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6651 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6653 strcpy (buf
, "bb,");
6654 if (GET_MODE (operands
[0]) == DImode
)
6656 if ((which
== 0 && negated
)
6657 || (which
== 1 && ! negated
))
6662 strcat (buf
, " %0,%1,%3%#");
6664 strcat (buf
, " %0,%1,%2%#");
6668 if (GET_MODE (operands
[0]) == DImode
)
6669 strcpy (buf
, "extrd,s,*");
6671 strcpy (buf
, "{extrs,|extrw,s,}");
6672 if ((which
== 0 && negated
)
6673 || (which
== 1 && ! negated
))
6677 if (nullify
&& negated
)
6678 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6679 else if (nullify
&& ! negated
)
6680 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6682 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6684 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6689 /* The reversed conditional branch must branch over one additional
6690 instruction if the delay slot is filled and needs to be extracted
6691 by output_lbranch. If the delay slot is empty or this is a
6692 nullified forward branch, the instruction after the reversed
6693 condition branch must be nullified. */
6694 if (dbr_sequence_length () == 0
6695 || (nullify
&& forward_branch_p (insn
)))
6699 operands
[4] = GEN_INT (length
);
6704 operands
[4] = GEN_INT (length
+ 4);
6707 if (GET_MODE (operands
[0]) == DImode
)
6708 strcpy (buf
, "bb,*");
6710 strcpy (buf
, "bb,");
6711 if ((which
== 0 && negated
)
6712 || (which
== 1 && !negated
))
6717 strcat (buf
, ",n %0,%1,.+%4");
6719 strcat (buf
, " %0,%1,.+%4");
6720 output_asm_insn (buf
, operands
);
6721 return output_lbranch (negated
? operands
[3] : operands
[2],
6727 /* This routine handles all the branch-on-variable-bit conditional branch
6728 sequences we might need to generate. It handles nullification of delay
6729 slots, varying length branches, negated branches and all combinations
6730 of the above. it returns the appropriate output template to emit the
6734 output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
, int which
)
6736 static char buf
[100];
6738 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6739 int length
= get_attr_length (insn
);
6742 /* A conditional branch to the following instruction (e.g. the delay slot) is
6743 asking for a disaster. I do not think this can happen as this pattern
6744 is only used when optimizing; jump optimization should eliminate the
6745 jump. But be prepared just in case. */
6747 if (branch_to_delay_slot_p (insn
))
6750 /* If this is a long branch with its delay slot unfilled, set `nullify'
6751 as it can nullify the delay slot and save a nop. */
6752 if (length
== 8 && dbr_sequence_length () == 0)
6755 /* If this is a short forward conditional branch which did not get
6756 its delay slot filled, the delay slot can still be nullified. */
6757 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6758 nullify
= forward_branch_p (insn
);
6760 /* A forward branch over a single nullified insn can be done with a
6761 extrs instruction. This avoids a single cycle penalty due to
6762 mis-predicted branch if we fall through (branch not taken). */
6765 && next_real_insn (insn
) != 0
6766 && get_attr_length (next_real_insn (insn
)) == 4
6767 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6774 /* All short conditional branches except backwards with an unfilled
6778 strcpy (buf
, "{vextrs,|extrw,s,}");
6780 strcpy (buf
, "{bvb,|bb,}");
6781 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6782 strcpy (buf
, "extrd,s,*");
6783 else if (GET_MODE (operands
[0]) == DImode
)
6784 strcpy (buf
, "bb,*");
6785 if ((which
== 0 && negated
)
6786 || (which
== 1 && ! negated
))
6791 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6792 else if (nullify
&& negated
)
6794 if (branch_needs_nop_p (insn
))
6795 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6797 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
6799 else if (nullify
&& ! negated
)
6801 if (branch_needs_nop_p (insn
))
6802 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6804 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
6806 else if (! nullify
&& negated
)
6807 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
6808 else if (! nullify
&& ! negated
)
6809 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
6812 /* All long conditionals. Note a short backward branch with an
6813 unfilled delay slot is treated just like a long backward branch
6814 with an unfilled delay slot. */
6816 /* Handle weird backwards branch with a filled delay slot
6817 which is nullified. */
6818 if (dbr_sequence_length () != 0
6819 && ! forward_branch_p (insn
)
6822 strcpy (buf
, "{bvb,|bb,}");
6823 if (GET_MODE (operands
[0]) == DImode
)
6825 if ((which
== 0 && negated
)
6826 || (which
== 1 && ! negated
))
6831 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6833 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6835 /* Handle short backwards branch with an unfilled delay slot.
6836 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6837 taken and untaken branches. */
6838 else if (dbr_sequence_length () == 0
6839 && ! forward_branch_p (insn
)
6840 && INSN_ADDRESSES_SET_P ()
6841 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6842 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6844 strcpy (buf
, "{bvb,|bb,}");
6845 if (GET_MODE (operands
[0]) == DImode
)
6847 if ((which
== 0 && negated
)
6848 || (which
== 1 && ! negated
))
6853 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
6855 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
6859 strcpy (buf
, "{vextrs,|extrw,s,}");
6860 if (GET_MODE (operands
[0]) == DImode
)
6861 strcpy (buf
, "extrd,s,*");
6862 if ((which
== 0 && negated
)
6863 || (which
== 1 && ! negated
))
6867 if (nullify
&& negated
)
6868 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6869 else if (nullify
&& ! negated
)
6870 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6872 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6874 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6879 /* The reversed conditional branch must branch over one additional
6880 instruction if the delay slot is filled and needs to be extracted
6881 by output_lbranch. If the delay slot is empty or this is a
6882 nullified forward branch, the instruction after the reversed
6883 condition branch must be nullified. */
6884 if (dbr_sequence_length () == 0
6885 || (nullify
&& forward_branch_p (insn
)))
6889 operands
[4] = GEN_INT (length
);
6894 operands
[4] = GEN_INT (length
+ 4);
6897 if (GET_MODE (operands
[0]) == DImode
)
6898 strcpy (buf
, "bb,*");
6900 strcpy (buf
, "{bvb,|bb,}");
6901 if ((which
== 0 && negated
)
6902 || (which
== 1 && !negated
))
6907 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
6909 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
6910 output_asm_insn (buf
, operands
);
6911 return output_lbranch (negated
? operands
[3] : operands
[2],
6917 /* Return the output template for emitting a dbra type insn.
6919 Note it may perform some output operations on its own before
6920 returning the final output string. */
6922 output_dbra (rtx
*operands
, rtx insn
, int which_alternative
)
6924 int length
= get_attr_length (insn
);
6926 /* A conditional branch to the following instruction (e.g. the delay slot) is
6927 asking for a disaster. Be prepared! */
6929 if (branch_to_delay_slot_p (insn
))
6931 if (which_alternative
== 0)
6932 return "ldo %1(%0),%0";
6933 else if (which_alternative
== 1)
6935 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
6936 output_asm_insn ("ldw -16(%%r30),%4", operands
);
6937 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
6938 return "{fldws|fldw} -16(%%r30),%0";
6942 output_asm_insn ("ldw %0,%4", operands
);
6943 return "ldo %1(%4),%4\n\tstw %4,%0";
6947 if (which_alternative
== 0)
6949 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6952 /* If this is a long branch with its delay slot unfilled, set `nullify'
6953 as it can nullify the delay slot and save a nop. */
6954 if (length
== 8 && dbr_sequence_length () == 0)
6957 /* If this is a short forward conditional branch which did not get
6958 its delay slot filled, the delay slot can still be nullified. */
6959 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6960 nullify
= forward_branch_p (insn
);
6967 if (branch_needs_nop_p (insn
))
6968 return "addib,%C2,n %1,%0,%3%#";
6970 return "addib,%C2,n %1,%0,%3";
6973 return "addib,%C2 %1,%0,%3";
6976 /* Handle weird backwards branch with a fulled delay slot
6977 which is nullified. */
6978 if (dbr_sequence_length () != 0
6979 && ! forward_branch_p (insn
)
6981 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6982 /* Handle short backwards branch with an unfilled delay slot.
6983 Using a addb;nop rather than addi;bl saves 1 cycle for both
6984 taken and untaken branches. */
6985 else if (dbr_sequence_length () == 0
6986 && ! forward_branch_p (insn
)
6987 && INSN_ADDRESSES_SET_P ()
6988 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6989 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6990 return "addib,%C2 %1,%0,%3%#";
6992 /* Handle normal cases. */
6994 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6996 return "addi,%N2 %1,%0,%0\n\tb %3";
6999 /* The reversed conditional branch must branch over one additional
7000 instruction if the delay slot is filled and needs to be extracted
7001 by output_lbranch. If the delay slot is empty or this is a
7002 nullified forward branch, the instruction after the reversed
7003 condition branch must be nullified. */
7004 if (dbr_sequence_length () == 0
7005 || (nullify
&& forward_branch_p (insn
)))
7009 operands
[4] = GEN_INT (length
);
7014 operands
[4] = GEN_INT (length
+ 4);
7018 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
7020 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
7022 return output_lbranch (operands
[3], insn
, xdelay
);
7026 /* Deal with gross reload from FP register case. */
7027 else if (which_alternative
== 1)
7029 /* Move loop counter from FP register to MEM then into a GR,
7030 increment the GR, store the GR into MEM, and finally reload
7031 the FP register from MEM from within the branch's delay slot. */
7032 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7034 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
7036 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7037 else if (length
== 28)
7038 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7041 operands
[5] = GEN_INT (length
- 16);
7042 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
7043 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7044 return output_lbranch (operands
[3], insn
, 0);
7047 /* Deal with gross reload from memory case. */
7050 /* Reload loop counter from memory, the store back to memory
7051 happens in the branch's delay slot. */
7052 output_asm_insn ("ldw %0,%4", operands
);
7054 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7055 else if (length
== 16)
7056 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7059 operands
[5] = GEN_INT (length
- 4);
7060 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7061 return output_lbranch (operands
[3], insn
, 0);
7066 /* Return the output template for emitting a movb type insn.
7068 Note it may perform some output operations on its own before
7069 returning the final output string. */
7071 output_movb (rtx
*operands
, rtx insn
, int which_alternative
,
7072 int reverse_comparison
)
7074 int length
= get_attr_length (insn
);
7076 /* A conditional branch to the following instruction (e.g. the delay slot) is
7077 asking for a disaster. Be prepared! */
7079 if (branch_to_delay_slot_p (insn
))
7081 if (which_alternative
== 0)
7082 return "copy %1,%0";
7083 else if (which_alternative
== 1)
7085 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7086 return "{fldws|fldw} -16(%%r30),%0";
7088 else if (which_alternative
== 2)
7094 /* Support the second variant. */
7095 if (reverse_comparison
)
7096 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7098 if (which_alternative
== 0)
7100 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7103 /* If this is a long branch with its delay slot unfilled, set `nullify'
7104 as it can nullify the delay slot and save a nop. */
7105 if (length
== 8 && dbr_sequence_length () == 0)
7108 /* If this is a short forward conditional branch which did not get
7109 its delay slot filled, the delay slot can still be nullified. */
7110 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7111 nullify
= forward_branch_p (insn
);
7118 if (branch_needs_nop_p (insn
))
7119 return "movb,%C2,n %1,%0,%3%#";
7121 return "movb,%C2,n %1,%0,%3";
7124 return "movb,%C2 %1,%0,%3";
7127 /* Handle weird backwards branch with a filled delay slot
7128 which is nullified. */
7129 if (dbr_sequence_length () != 0
7130 && ! forward_branch_p (insn
)
7132 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7134 /* Handle short backwards branch with an unfilled delay slot.
7135 Using a movb;nop rather than or;bl saves 1 cycle for both
7136 taken and untaken branches. */
7137 else if (dbr_sequence_length () == 0
7138 && ! forward_branch_p (insn
)
7139 && INSN_ADDRESSES_SET_P ()
7140 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7141 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7142 return "movb,%C2 %1,%0,%3%#";
7143 /* Handle normal cases. */
7145 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7147 return "or,%N2 %1,%%r0,%0\n\tb %3";
7150 /* The reversed conditional branch must branch over one additional
7151 instruction if the delay slot is filled and needs to be extracted
7152 by output_lbranch. If the delay slot is empty or this is a
7153 nullified forward branch, the instruction after the reversed
7154 condition branch must be nullified. */
7155 if (dbr_sequence_length () == 0
7156 || (nullify
&& forward_branch_p (insn
)))
7160 operands
[4] = GEN_INT (length
);
7165 operands
[4] = GEN_INT (length
+ 4);
7169 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7171 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7173 return output_lbranch (operands
[3], insn
, xdelay
);
7176 /* Deal with gross reload for FP destination register case. */
7177 else if (which_alternative
== 1)
7179 /* Move source register to MEM, perform the branch test, then
7180 finally load the FP register from MEM from within the branch's
7182 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7184 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7185 else if (length
== 16)
7186 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7189 operands
[4] = GEN_INT (length
- 4);
7190 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7191 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7192 return output_lbranch (operands
[3], insn
, 0);
7195 /* Deal with gross reload from memory case. */
7196 else if (which_alternative
== 2)
7198 /* Reload loop counter from memory, the store back to memory
7199 happens in the branch's delay slot. */
7201 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7202 else if (length
== 12)
7203 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7206 operands
[4] = GEN_INT (length
);
7207 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7209 return output_lbranch (operands
[3], insn
, 0);
7212 /* Handle SAR as a destination. */
7216 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7217 else if (length
== 12)
7218 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7221 operands
[4] = GEN_INT (length
);
7222 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7224 return output_lbranch (operands
[3], insn
, 0);
7229 /* Copy any FP arguments in INSN into integer registers. */
7231 copy_fp_args (rtx insn
)
7236 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7238 int arg_mode
, regno
;
7239 rtx use
= XEXP (link
, 0);
7241 if (! (GET_CODE (use
) == USE
7242 && GET_CODE (XEXP (use
, 0)) == REG
7243 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7246 arg_mode
= GET_MODE (XEXP (use
, 0));
7247 regno
= REGNO (XEXP (use
, 0));
7249 /* Is it a floating point register? */
7250 if (regno
>= 32 && regno
<= 39)
7252 /* Copy the FP register into an integer register via memory. */
7253 if (arg_mode
== SFmode
)
7255 xoperands
[0] = XEXP (use
, 0);
7256 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7257 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7258 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7262 xoperands
[0] = XEXP (use
, 0);
7263 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7264 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7265 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7266 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7272 /* Compute length of the FP argument copy sequence for INSN. */
7274 length_fp_args (rtx insn
)
7279 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7281 int arg_mode
, regno
;
7282 rtx use
= XEXP (link
, 0);
7284 if (! (GET_CODE (use
) == USE
7285 && GET_CODE (XEXP (use
, 0)) == REG
7286 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7289 arg_mode
= GET_MODE (XEXP (use
, 0));
7290 regno
= REGNO (XEXP (use
, 0));
7292 /* Is it a floating point register? */
7293 if (regno
>= 32 && regno
<= 39)
7295 if (arg_mode
== SFmode
)
7305 /* Return the attribute length for the millicode call instruction INSN.
7306 The length must match the code generated by output_millicode_call.
7307 We include the delay slot in the returned length as it is better to
7308 over estimate the length than to under estimate it. */
7311 attr_length_millicode_call (rtx insn
)
7313 unsigned long distance
= -1;
7314 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7316 if (INSN_ADDRESSES_SET_P ())
7318 distance
= (total
+ insn_current_reference_address (insn
));
7319 if (distance
< total
)
7325 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7330 else if (TARGET_PORTABLE_RUNTIME
)
7334 if (!TARGET_LONG_CALLS
&& distance
< 240000)
7337 if (TARGET_LONG_ABS_CALL
&& !flag_pic
)
7344 /* INSN is a function call. It may have an unconditional jump
7347 CALL_DEST is the routine we are calling. */
7350 output_millicode_call (rtx insn
, rtx call_dest
)
7352 int attr_length
= get_attr_length (insn
);
7353 int seq_length
= dbr_sequence_length ();
7358 xoperands
[0] = call_dest
;
7359 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7361 /* Handle the common case where we are sure that the branch will
7362 reach the beginning of the $CODE$ subspace. The within reach
7363 form of the $$sh_func_adrs call has a length of 28. Because
7364 it has an attribute type of multi, it never has a nonzero
7365 sequence length. The length of the $$sh_func_adrs is the same
7366 as certain out of reach PIC calls to other routines. */
7367 if (!TARGET_LONG_CALLS
7368 && ((seq_length
== 0
7369 && (attr_length
== 12
7370 || (attr_length
== 28 && get_attr_type (insn
) == TYPE_MULTI
)))
7371 || (seq_length
!= 0 && attr_length
== 8)))
7373 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
7379 /* It might seem that one insn could be saved by accessing
7380 the millicode function using the linkage table. However,
7381 this doesn't work in shared libraries and other dynamically
7382 loaded objects. Using a pc-relative sequence also avoids
7383 problems related to the implicit use of the gp register. */
7384 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7388 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7389 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7393 xoperands
[1] = gen_label_rtx ();
7394 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7395 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7396 CODE_LABEL_NUMBER (xoperands
[1]));
7397 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7400 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7402 else if (TARGET_PORTABLE_RUNTIME
)
7404 /* Pure portable runtime doesn't allow be/ble; we also don't
7405 have PIC support in the assembler/linker, so this sequence
7408 /* Get the address of our target into %r1. */
7409 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7410 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7412 /* Get our return address into %r31. */
7413 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7414 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7416 /* Jump to our target address in %r1. */
7417 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7421 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7423 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7425 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7429 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7430 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
7432 if (TARGET_SOM
|| !TARGET_GAS
)
7434 /* The HP assembler can generate relocations for the
7435 difference of two symbols. GAS can do this for a
7436 millicode symbol but not an arbitrary external
7437 symbol when generating SOM output. */
7438 xoperands
[1] = gen_label_rtx ();
7439 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7440 CODE_LABEL_NUMBER (xoperands
[1]));
7441 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7442 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7446 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
7447 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7451 /* Jump to our target address in %r1. */
7452 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7456 if (seq_length
== 0)
7457 output_asm_insn ("nop", xoperands
);
7459 /* We are done if there isn't a jump in the delay slot. */
7460 if (seq_length
== 0 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7463 /* This call has an unconditional jump in its delay slot. */
7464 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7466 /* See if the return address can be adjusted. Use the containing
7467 sequence insn's address. */
7468 if (INSN_ADDRESSES_SET_P ())
7470 seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7471 distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7472 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7474 if (VAL_14_BITS_P (distance
))
7476 xoperands
[1] = gen_label_rtx ();
7477 output_asm_insn ("ldo %0-%1(%2),%2", xoperands
);
7478 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7479 CODE_LABEL_NUMBER (xoperands
[1]));
7482 /* ??? This branch may not reach its target. */
7483 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7486 /* ??? This branch may not reach its target. */
7487 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7489 /* Delete the jump. */
7490 SET_INSN_DELETED (NEXT_INSN (insn
));
7495 /* Return the attribute length of the call instruction INSN. The SIBCALL
7496 flag indicates whether INSN is a regular call or a sibling call. The
7497 length returned must be longer than the code actually generated by
7498 output_call. Since branch shortening is done before delay branch
7499 sequencing, there is no way to determine whether or not the delay
7500 slot will be filled during branch shortening. Even when the delay
7501 slot is filled, we may have to add a nop if the delay slot contains
7502 a branch that can't reach its target. Thus, we always have to include
7503 the delay slot in the length estimate. This used to be done in
7504 pa_adjust_insn_length but we do it here now as some sequences always
7505 fill the delay slot and we can save four bytes in the estimate for
7509 attr_length_call (rtx insn
, int sibcall
)
7512 rtx call
, call_dest
;
7515 rtx pat
= PATTERN (insn
);
7516 unsigned long distance
= -1;
7518 gcc_assert (GET_CODE (insn
) == CALL_INSN
);
7520 if (INSN_ADDRESSES_SET_P ())
7522 unsigned long total
;
7524 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7525 distance
= (total
+ insn_current_reference_address (insn
));
7526 if (distance
< total
)
7530 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7532 /* Get the call rtx. */
7533 call
= XVECEXP (pat
, 0, 0);
7534 if (GET_CODE (call
) == SET
)
7535 call
= SET_SRC (call
);
7537 gcc_assert (GET_CODE (call
) == CALL
);
7539 /* Determine if this is a local call. */
7540 call_dest
= XEXP (XEXP (call
, 0), 0);
7541 call_decl
= SYMBOL_REF_DECL (call_dest
);
7542 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7544 /* pc-relative branch. */
7545 if (!TARGET_LONG_CALLS
7546 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7547 || distance
< 240000))
7550 /* 64-bit plabel sequence. */
7551 else if (TARGET_64BIT
&& !local_call
)
7552 length
+= sibcall
? 28 : 24;
7554 /* non-pic long absolute branch sequence. */
7555 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7558 /* long pc-relative branch sequence. */
7559 else if (TARGET_LONG_PIC_SDIFF_CALL
7560 || (TARGET_GAS
&& !TARGET_SOM
7561 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7565 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7569 /* 32-bit plabel sequence. */
7575 length
+= length_fp_args (insn
);
7585 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7593 /* INSN is a function call. It may have an unconditional jump
7596 CALL_DEST is the routine we are calling. */
7599 output_call (rtx insn
, rtx call_dest
, int sibcall
)
7601 int delay_insn_deleted
= 0;
7602 int delay_slot_filled
= 0;
7603 int seq_length
= dbr_sequence_length ();
7604 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7605 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7608 xoperands
[0] = call_dest
;
7610 /* Handle the common case where we're sure that the branch will reach
7611 the beginning of the "$CODE$" subspace. This is the beginning of
7612 the current function if we are in a named section. */
7613 if (!TARGET_LONG_CALLS
&& attr_length_call (insn
, sibcall
) == 8)
7615 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7616 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7620 if (TARGET_64BIT
&& !local_call
)
7622 /* ??? As far as I can tell, the HP linker doesn't support the
7623 long pc-relative sequence described in the 64-bit runtime
7624 architecture. So, we use a slightly longer indirect call. */
7625 xoperands
[0] = get_deferred_plabel (call_dest
);
7626 xoperands
[1] = gen_label_rtx ();
7628 /* If this isn't a sibcall, we put the load of %r27 into the
7629 delay slot. We can't do this in a sibcall as we don't
7630 have a second call-clobbered scratch register available. */
7632 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7635 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7638 /* Now delete the delay insn. */
7639 SET_INSN_DELETED (NEXT_INSN (insn
));
7640 delay_insn_deleted
= 1;
7643 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7644 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7645 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7649 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7650 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7651 output_asm_insn ("bve (%%r1)", xoperands
);
7655 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7656 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7657 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7658 delay_slot_filled
= 1;
7663 int indirect_call
= 0;
7665 /* Emit a long call. There are several different sequences
7666 of increasing length and complexity. In most cases,
7667 they don't allow an instruction in the delay slot. */
7668 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7669 && !TARGET_LONG_PIC_SDIFF_CALL
7670 && !(TARGET_GAS
&& !TARGET_SOM
7671 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7676 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7680 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
7682 /* A non-jump insn in the delay slot. By definition we can
7683 emit this insn before the call (and in fact before argument
7685 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7688 /* Now delete the delay insn. */
7689 SET_INSN_DELETED (NEXT_INSN (insn
));
7690 delay_insn_deleted
= 1;
7693 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7695 /* This is the best sequence for making long calls in
7696 non-pic code. Unfortunately, GNU ld doesn't provide
7697 the stub needed for external calls, and GAS's support
7698 for this with the SOM linker is buggy. It is safe
7699 to use this for local calls. */
7700 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7702 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7706 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7709 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7711 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7712 delay_slot_filled
= 1;
7717 if (TARGET_LONG_PIC_SDIFF_CALL
)
7719 /* The HP assembler and linker can handle relocations
7720 for the difference of two symbols. The HP assembler
7721 recognizes the sequence as a pc-relative call and
7722 the linker provides stubs when needed. */
7723 xoperands
[1] = gen_label_rtx ();
7724 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7725 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7726 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7727 CODE_LABEL_NUMBER (xoperands
[1]));
7728 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7730 else if (TARGET_GAS
&& !TARGET_SOM
7731 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7733 /* GAS currently can't generate the relocations that
7734 are needed for the SOM linker under HP-UX using this
7735 sequence. The GNU linker doesn't generate the stubs
7736 that are needed for external calls on TARGET_ELF32
7737 with this sequence. For now, we have to use a
7738 longer plabel sequence when using GAS. */
7739 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7740 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7742 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7747 /* Emit a long plabel-based call sequence. This is
7748 essentially an inline implementation of $$dyncall.
7749 We don't actually try to call $$dyncall as this is
7750 as difficult as calling the function itself. */
7751 xoperands
[0] = get_deferred_plabel (call_dest
);
7752 xoperands
[1] = gen_label_rtx ();
7754 /* Since the call is indirect, FP arguments in registers
7755 need to be copied to the general registers. Then, the
7756 argument relocation stub will copy them back. */
7758 copy_fp_args (insn
);
7762 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7763 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7764 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7768 output_asm_insn ("addil LR'%0-$global$,%%r27",
7770 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7774 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7775 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7776 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7777 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7779 if (!sibcall
&& !TARGET_PA_20
)
7781 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7782 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7783 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7785 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7792 output_asm_insn ("bve (%%r1)", xoperands
);
7797 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7798 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7799 delay_slot_filled
= 1;
7802 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7807 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7808 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7813 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7814 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
7816 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
7820 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7821 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
7823 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
7826 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
7828 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7829 delay_slot_filled
= 1;
7836 if (!delay_slot_filled
&& (seq_length
== 0 || delay_insn_deleted
))
7837 output_asm_insn ("nop", xoperands
);
7839 /* We are done if there isn't a jump in the delay slot. */
7841 || delay_insn_deleted
7842 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7845 /* A sibcall should never have a branch in the delay slot. */
7846 gcc_assert (!sibcall
);
7848 /* This call has an unconditional jump in its delay slot. */
7849 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7851 if (!delay_slot_filled
&& INSN_ADDRESSES_SET_P ())
7853 /* See if the return address can be adjusted. Use the containing
7854 sequence insn's address. This would break the regular call/return@
7855 relationship assumed by the table based eh unwinder, so only do that
7856 if the call is not possibly throwing. */
7857 rtx seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7858 int distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7859 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7861 if (VAL_14_BITS_P (distance
)
7862 && !(can_throw_internal (insn
) || can_throw_external (insn
)))
7864 xoperands
[1] = gen_label_rtx ();
7865 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands
);
7866 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7867 CODE_LABEL_NUMBER (xoperands
[1]));
7870 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7873 output_asm_insn ("b,n %0", xoperands
);
7875 /* Delete the jump. */
7876 SET_INSN_DELETED (NEXT_INSN (insn
));
7881 /* Return the attribute length of the indirect call instruction INSN.
7882 The length must match the code generated by output_indirect call.
7883 The returned length includes the delay slot. Currently, the delay
7884 slot of an indirect call sequence is not exposed and it is used by
7885 the sequence itself. */
7888 attr_length_indirect_call (rtx insn
)
7890 unsigned long distance
= -1;
7891 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7893 if (INSN_ADDRESSES_SET_P ())
7895 distance
= (total
+ insn_current_reference_address (insn
));
7896 if (distance
< total
)
7903 if (TARGET_FAST_INDIRECT_CALLS
7904 || (!TARGET_PORTABLE_RUNTIME
7905 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
7906 || distance
< 240000)))
7912 if (TARGET_PORTABLE_RUNTIME
)
7915 /* Out of reach, can use ble. */
7920 output_indirect_call (rtx insn
, rtx call_dest
)
7926 xoperands
[0] = call_dest
;
7927 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
7928 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
7932 /* First the special case for kernels, level 0 systems, etc. */
7933 if (TARGET_FAST_INDIRECT_CALLS
)
7934 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7936 /* Now the normal case -- we can reach $$dyncall directly or
7937 we're sure that we can get there via a long-branch stub.
7939 No need to check target flags as the length uniquely identifies
7940 the remaining cases. */
7941 if (attr_length_indirect_call (insn
) == 8)
7943 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7944 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7945 variant of the B,L instruction can't be used on the SOM target. */
7946 if (TARGET_PA_20
&& !TARGET_SOM
)
7947 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7949 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7952 /* Long millicode call, but we are not generating PIC or portable runtime
7954 if (attr_length_indirect_call (insn
) == 12)
7955 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7957 /* Long millicode call for portable runtime. */
7958 if (attr_length_indirect_call (insn
) == 20)
7959 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7961 /* We need a long PIC call to $$dyncall. */
7962 xoperands
[0] = NULL_RTX
;
7963 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7964 if (TARGET_SOM
|| !TARGET_GAS
)
7966 xoperands
[0] = gen_label_rtx ();
7967 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands
);
7968 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7969 CODE_LABEL_NUMBER (xoperands
[0]));
7970 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
7974 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands
);
7975 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7978 output_asm_insn ("blr %%r0,%%r2", xoperands
);
7979 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands
);
7983 /* Return the total length of the save and restore instructions needed for
7984 the data linkage table pointer (i.e., the PIC register) across the call
7985 instruction INSN. No-return calls do not require a save and restore.
7986 In addition, we may be able to avoid the save and restore for calls
7987 within the same translation unit. */
7990 attr_length_save_restore_dltp (rtx insn
)
7992 if (find_reg_note (insn
, REG_NORETURN
, NULL_RTX
))
7998 /* In HPUX 8.0's shared library scheme, special relocations are needed
7999 for function labels if they might be passed to a function
8000 in a shared library (because shared libraries don't live in code
8001 space), and special magic is needed to construct their address. */
8004 hppa_encode_label (rtx sym
)
8006 const char *str
= XSTR (sym
, 0);
8007 int len
= strlen (str
) + 1;
8010 p
= newstr
= XALLOCAVEC (char, len
+ 1);
8014 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
8018 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
8020 int old_referenced
= 0;
8022 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
8024 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
8026 default_encode_section_info (decl
, rtl
, first
);
8028 if (first
&& TEXT_SPACE_P (decl
))
8030 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
8031 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8032 hppa_encode_label (XEXP (rtl
, 0));
8034 else if (old_referenced
)
8035 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
8038 /* This is sort of inverse to pa_encode_section_info. */
8041 pa_strip_name_encoding (const char *str
)
8043 str
+= (*str
== '@');
8044 str
+= (*str
== '*');
8049 function_label_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8051 return GET_CODE (op
) == SYMBOL_REF
&& FUNCTION_NAME_P (XSTR (op
, 0));
8054 /* Returns 1 if OP is a function label involved in a simple addition
8055 with a constant. Used to keep certain patterns from matching
8056 during instruction combination. */
8058 is_function_label_plus_const (rtx op
)
8060 /* Strip off any CONST. */
8061 if (GET_CODE (op
) == CONST
)
8064 return (GET_CODE (op
) == PLUS
8065 && function_label_operand (XEXP (op
, 0), Pmode
)
8066 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8069 /* Output assembly code for a thunk to FUNCTION. */
8072 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8073 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
8076 static unsigned int current_thunk_number
;
8077 int val_14
= VAL_14_BITS_P (delta
);
8078 unsigned int old_last_address
= last_address
, nbytes
= 0;
8082 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8083 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8084 xoperands
[2] = GEN_INT (delta
);
8086 ASM_OUTPUT_LABEL (file
, XSTR (xoperands
[1], 0));
8087 fprintf (file
, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8089 /* Output the thunk. We know that the function is in the same
8090 translation unit (i.e., the same space) as the thunk, and that
8091 thunks are output after their method. Thus, we don't need an
8092 external branch to reach the function. With SOM and GAS,
8093 functions and thunks are effectively in different sections.
8094 Thus, we can always use a IA-relative branch and the linker
8095 will add a long branch stub if necessary.
8097 However, we have to be careful when generating PIC code on the
8098 SOM port to ensure that the sequence does not transfer to an
8099 import stub for the target function as this could clobber the
8100 return value saved at SP-24. This would also apply to the
8101 32-bit linux port if the multi-space model is implemented. */
8102 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8103 && !(flag_pic
&& TREE_PUBLIC (function
))
8104 && (TARGET_GAS
|| last_address
< 262132))
8105 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8106 && ((targetm
.have_named_sections
8107 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8108 /* The GNU 64-bit linker has rather poor stub management.
8109 So, we use a long branch from thunks that aren't in
8110 the same section as the target function. */
8112 && (DECL_SECTION_NAME (thunk_fndecl
)
8113 != DECL_SECTION_NAME (function
)))
8114 || ((DECL_SECTION_NAME (thunk_fndecl
)
8115 == DECL_SECTION_NAME (function
))
8116 && last_address
< 262132)))
8117 || (targetm
.have_named_sections
8118 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8119 && DECL_SECTION_NAME (function
) == NULL
8120 && last_address
< 262132)
8121 || (!targetm
.have_named_sections
&& last_address
< 262132))))
8124 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8126 output_asm_insn ("b %0", xoperands
);
8130 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8135 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8139 else if (TARGET_64BIT
)
8141 /* We only have one call-clobbered scratch register, so we can't
8142 make use of the delay slot if delta doesn't fit in 14 bits. */
8145 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8146 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8149 output_asm_insn ("b,l .+8,%%r1", xoperands
);
8153 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8154 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
8158 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
8159 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
8164 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8165 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8170 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8174 else if (TARGET_PORTABLE_RUNTIME
)
8176 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8177 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8180 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8182 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8186 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8191 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8195 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8197 /* The function is accessible from outside this module. The only
8198 way to avoid an import stub between the thunk and function is to
8199 call the function directly with an indirect sequence similar to
8200 that used by $$dyncall. This is possible because $$dyncall acts
8201 as the import stub in an indirect call. */
8202 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8203 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8204 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8205 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8206 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8207 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8208 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8209 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8210 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8214 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8220 output_asm_insn ("bve (%%r22)", xoperands
);
8223 else if (TARGET_NO_SPACE_REGS
)
8225 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8230 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8231 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8232 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8237 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8239 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8243 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8245 if (TARGET_SOM
|| !TARGET_GAS
)
8247 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
8248 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
8252 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8253 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
8257 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8259 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8263 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8268 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8275 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8277 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8278 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8282 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8287 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8292 fprintf (file
, "\t.EXIT\n\t.PROCEND\n");
8294 if (TARGET_SOM
&& TARGET_GAS
)
8296 /* We done with this subspace except possibly for some additional
8297 debug information. Forget that we are in this subspace to ensure
8298 that the next function is output in its own subspace. */
8300 cfun
->machine
->in_nsubspa
= 2;
8303 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8305 switch_to_section (data_section
);
8306 output_asm_insn (".align 4", xoperands
);
8307 ASM_OUTPUT_LABEL (file
, label
);
8308 output_asm_insn (".word P'%0", xoperands
);
8311 current_thunk_number
++;
8312 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8313 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8314 last_address
+= nbytes
;
8315 if (old_last_address
> last_address
)
8316 last_address
= UINT_MAX
;
8317 update_total_code_bytes (nbytes
);
8320 /* Only direct calls to static functions are allowed to be sibling (tail)
8323 This restriction is necessary because some linker generated stubs will
8324 store return pointers into rp' in some cases which might clobber a
8325 live value already in rp'.
8327 In a sibcall the current function and the target function share stack
8328 space. Thus if the path to the current function and the path to the
8329 target function save a value in rp', they save the value into the
8330 same stack slot, which has undesirable consequences.
8332 Because of the deferred binding nature of shared libraries any function
8333 with external scope could be in a different load module and thus require
8334 rp' to be saved when calling that function. So sibcall optimizations
8335 can only be safe for static function.
8337 Note that GCC never needs return value relocations, so we don't have to
8338 worry about static calls with return value relocations (which require
8341 It is safe to perform a sibcall optimization when the target function
8342 will never return. */
8344 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8346 if (TARGET_PORTABLE_RUNTIME
)
8349 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8350 single subspace mode and the call is not indirect. As far as I know,
8351 there is no operating system support for the multiple subspace mode.
8352 It might be possible to support indirect calls if we didn't use
8353 $$dyncall (see the indirect sequence generated in output_call). */
8355 return (decl
!= NULL_TREE
);
8357 /* Sibcalls are not ok because the arg pointer register is not a fixed
8358 register. This prevents the sibcall optimization from occurring. In
8359 addition, there are problems with stub placement using GNU ld. This
8360 is because a normal sibcall branch uses a 17-bit relocation while
8361 a regular call branch uses a 22-bit relocation. As a result, more
8362 care needs to be taken in the placement of long-branch stubs. */
8366 /* Sibcalls are only ok within a translation unit. */
8367 return (decl
&& !TREE_PUBLIC (decl
));
8370 /* ??? Addition is not commutative on the PA due to the weird implicit
8371 space register selection rules for memory addresses. Therefore, we
8372 don't consider a + b == b + a, as this might be inside a MEM. */
8374 pa_commutative_p (const_rtx x
, int outer_code
)
8376 return (COMMUTATIVE_P (x
)
8377 && (TARGET_NO_SPACE_REGS
8378 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8379 || GET_CODE (x
) != PLUS
));
8382 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8383 use in fmpyadd instructions. */
8385 fmpyaddoperands (rtx
*operands
)
8387 enum machine_mode mode
= GET_MODE (operands
[0]);
8389 /* Must be a floating point mode. */
8390 if (mode
!= SFmode
&& mode
!= DFmode
)
8393 /* All modes must be the same. */
8394 if (! (mode
== GET_MODE (operands
[1])
8395 && mode
== GET_MODE (operands
[2])
8396 && mode
== GET_MODE (operands
[3])
8397 && mode
== GET_MODE (operands
[4])
8398 && mode
== GET_MODE (operands
[5])))
8401 /* All operands must be registers. */
8402 if (! (GET_CODE (operands
[1]) == REG
8403 && GET_CODE (operands
[2]) == REG
8404 && GET_CODE (operands
[3]) == REG
8405 && GET_CODE (operands
[4]) == REG
8406 && GET_CODE (operands
[5]) == REG
))
8409 /* Only 2 real operands to the addition. One of the input operands must
8410 be the same as the output operand. */
8411 if (! rtx_equal_p (operands
[3], operands
[4])
8412 && ! rtx_equal_p (operands
[3], operands
[5]))
8415 /* Inout operand of add cannot conflict with any operands from multiply. */
8416 if (rtx_equal_p (operands
[3], operands
[0])
8417 || rtx_equal_p (operands
[3], operands
[1])
8418 || rtx_equal_p (operands
[3], operands
[2]))
8421 /* multiply cannot feed into addition operands. */
8422 if (rtx_equal_p (operands
[4], operands
[0])
8423 || rtx_equal_p (operands
[5], operands
[0]))
8426 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8428 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8429 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8430 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8431 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8432 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8433 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8436 /* Passed. Operands are suitable for fmpyadd. */
8440 #if !defined(USE_COLLECT2)
8442 pa_asm_out_constructor (rtx symbol
, int priority
)
8444 if (!function_label_operand (symbol
, VOIDmode
))
8445 hppa_encode_label (symbol
);
8447 #ifdef CTORS_SECTION_ASM_OP
8448 default_ctor_section_asm_out_constructor (symbol
, priority
);
8450 # ifdef TARGET_ASM_NAMED_SECTION
8451 default_named_section_asm_out_constructor (symbol
, priority
);
8453 default_stabs_asm_out_constructor (symbol
, priority
);
8459 pa_asm_out_destructor (rtx symbol
, int priority
)
8461 if (!function_label_operand (symbol
, VOIDmode
))
8462 hppa_encode_label (symbol
);
8464 #ifdef DTORS_SECTION_ASM_OP
8465 default_dtor_section_asm_out_destructor (symbol
, priority
);
8467 # ifdef TARGET_ASM_NAMED_SECTION
8468 default_named_section_asm_out_destructor (symbol
, priority
);
8470 default_stabs_asm_out_destructor (symbol
, priority
);
8476 /* This function places uninitialized global data in the bss section.
8477 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8478 function on the SOM port to prevent uninitialized global data from
8479 being placed in the data section. */
8482 pa_asm_output_aligned_bss (FILE *stream
,
8484 unsigned HOST_WIDE_INT size
,
8487 switch_to_section (bss_section
);
8488 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8490 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8491 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8494 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8495 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8498 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8499 ASM_OUTPUT_LABEL (stream
, name
);
8500 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8503 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8504 that doesn't allow the alignment of global common storage to be directly
8505 specified. The SOM linker aligns common storage based on the rounded
8506 value of the NUM_BYTES parameter in the .comm directive. It's not
8507 possible to use the .align directive as it doesn't affect the alignment
8508 of the label associated with a .comm directive. */
8511 pa_asm_output_aligned_common (FILE *stream
,
8513 unsigned HOST_WIDE_INT size
,
8516 unsigned int max_common_align
;
8518 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8519 if (align
> max_common_align
)
8521 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8522 "for global common data. Using %u",
8523 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8524 align
= max_common_align
;
8527 switch_to_section (bss_section
);
8529 assemble_name (stream
, name
);
8530 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8531 MAX (size
, align
/ BITS_PER_UNIT
));
8534 /* We can't use .comm for local common storage as the SOM linker effectively
8535 treats the symbol as universal and uses the same storage for local symbols
8536 with the same name in different object files. The .block directive
8537 reserves an uninitialized block of storage. However, it's not common
8538 storage. Fortunately, GCC never requests common storage with the same
8539 name in any given translation unit. */
8542 pa_asm_output_aligned_local (FILE *stream
,
8544 unsigned HOST_WIDE_INT size
,
8547 switch_to_section (bss_section
);
8548 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8551 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8552 assemble_name (stream
, name
);
8553 fprintf (stream
, "\n");
8556 ASM_OUTPUT_LABEL (stream
, name
);
8557 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8560 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8561 use in fmpysub instructions. */
8563 fmpysuboperands (rtx
*operands
)
8565 enum machine_mode mode
= GET_MODE (operands
[0]);
8567 /* Must be a floating point mode. */
8568 if (mode
!= SFmode
&& mode
!= DFmode
)
8571 /* All modes must be the same. */
8572 if (! (mode
== GET_MODE (operands
[1])
8573 && mode
== GET_MODE (operands
[2])
8574 && mode
== GET_MODE (operands
[3])
8575 && mode
== GET_MODE (operands
[4])
8576 && mode
== GET_MODE (operands
[5])))
8579 /* All operands must be registers. */
8580 if (! (GET_CODE (operands
[1]) == REG
8581 && GET_CODE (operands
[2]) == REG
8582 && GET_CODE (operands
[3]) == REG
8583 && GET_CODE (operands
[4]) == REG
8584 && GET_CODE (operands
[5]) == REG
))
8587 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8588 operation, so operands[4] must be the same as operand[3]. */
8589 if (! rtx_equal_p (operands
[3], operands
[4]))
8592 /* multiply cannot feed into subtraction. */
8593 if (rtx_equal_p (operands
[5], operands
[0]))
8596 /* Inout operand of sub cannot conflict with any operands from multiply. */
8597 if (rtx_equal_p (operands
[3], operands
[0])
8598 || rtx_equal_p (operands
[3], operands
[1])
8599 || rtx_equal_p (operands
[3], operands
[2]))
8602 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8604 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8605 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8606 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8607 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8608 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8609 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8612 /* Passed. Operands are suitable for fmpysub. */
8616 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8617 constants for shadd instructions. */
8619 shadd_constant_p (int val
)
8621 if (val
== 2 || val
== 4 || val
== 8)
8627 /* Return 1 if OP is valid as a base or index register in a
8631 borx_reg_operand (rtx op
, enum machine_mode mode
)
8633 if (GET_CODE (op
) != REG
)
8636 /* We must reject virtual registers as the only expressions that
8637 can be instantiated are REG and REG+CONST. */
8638 if (op
== virtual_incoming_args_rtx
8639 || op
== virtual_stack_vars_rtx
8640 || op
== virtual_stack_dynamic_rtx
8641 || op
== virtual_outgoing_args_rtx
8642 || op
== virtual_cfa_rtx
)
8645 /* While it's always safe to index off the frame pointer, it's not
8646 profitable to do so when the frame pointer is being eliminated. */
8647 if (!reload_completed
8648 && flag_omit_frame_pointer
8649 && !cfun
->calls_alloca
8650 && op
== frame_pointer_rtx
)
8653 return register_operand (op
, mode
);
8656 /* Return 1 if this operand is anything other than a hard register. */
8659 non_hard_reg_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8661 return ! (GET_CODE (op
) == REG
&& REGNO (op
) < FIRST_PSEUDO_REGISTER
);
8664 /* Return TRUE if INSN branches forward. */
8667 forward_branch_p (rtx insn
)
8669 rtx lab
= JUMP_LABEL (insn
);
8671 /* The INSN must have a jump label. */
8672 gcc_assert (lab
!= NULL_RTX
);
8674 if (INSN_ADDRESSES_SET_P ())
8675 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
8682 insn
= NEXT_INSN (insn
);
8688 /* Return 1 if OP is an equality comparison, else return 0. */
8690 eq_neq_comparison_operator (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8692 return (GET_CODE (op
) == EQ
|| GET_CODE (op
) == NE
);
8695 /* Return 1 if INSN is in the delay slot of a call instruction. */
8697 jump_in_call_delay (rtx insn
)
8700 if (GET_CODE (insn
) != JUMP_INSN
)
8703 if (PREV_INSN (insn
)
8704 && PREV_INSN (PREV_INSN (insn
))
8705 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn
)))) == INSN
)
8707 rtx test_insn
= next_real_insn (PREV_INSN (PREV_INSN (insn
)));
8709 return (GET_CODE (PATTERN (test_insn
)) == SEQUENCE
8710 && XVECEXP (PATTERN (test_insn
), 0, 1) == insn
);
8717 /* Output an unconditional move and branch insn. */
8720 output_parallel_movb (rtx
*operands
, rtx insn
)
8722 int length
= get_attr_length (insn
);
8724 /* These are the cases in which we win. */
8726 return "mov%I1b,tr %1,%0,%2";
8728 /* None of the following cases win, but they don't lose either. */
8731 if (dbr_sequence_length () == 0)
8733 /* Nothing in the delay slot, fake it by putting the combined
8734 insn (the copy or add) in the delay slot of a bl. */
8735 if (GET_CODE (operands
[1]) == CONST_INT
)
8736 return "b %2\n\tldi %1,%0";
8738 return "b %2\n\tcopy %1,%0";
8742 /* Something in the delay slot, but we've got a long branch. */
8743 if (GET_CODE (operands
[1]) == CONST_INT
)
8744 return "ldi %1,%0\n\tb %2";
8746 return "copy %1,%0\n\tb %2";
8750 if (GET_CODE (operands
[1]) == CONST_INT
)
8751 output_asm_insn ("ldi %1,%0", operands
);
8753 output_asm_insn ("copy %1,%0", operands
);
8754 return output_lbranch (operands
[2], insn
, 1);
8757 /* Output an unconditional add and branch insn. */
8760 output_parallel_addb (rtx
*operands
, rtx insn
)
8762 int length
= get_attr_length (insn
);
8764 /* To make life easy we want operand0 to be the shared input/output
8765 operand and operand1 to be the readonly operand. */
8766 if (operands
[0] == operands
[1])
8767 operands
[1] = operands
[2];
8769 /* These are the cases in which we win. */
8771 return "add%I1b,tr %1,%0,%3";
8773 /* None of the following cases win, but they don't lose either. */
8776 if (dbr_sequence_length () == 0)
8777 /* Nothing in the delay slot, fake it by putting the combined
8778 insn (the copy or add) in the delay slot of a bl. */
8779 return "b %3\n\tadd%I1 %1,%0,%0";
8781 /* Something in the delay slot, but we've got a long branch. */
8782 return "add%I1 %1,%0,%0\n\tb %3";
8785 output_asm_insn ("add%I1 %1,%0,%0", operands
);
8786 return output_lbranch (operands
[3], insn
, 1);
8789 /* Return nonzero if INSN (a jump insn) immediately follows a call
8790 to a named function. This is used to avoid filling the delay slot
8791 of the jump since it can usually be eliminated by modifying RP in
8792 the delay slot of the call. */
8795 following_call (rtx insn
)
8797 if (! TARGET_JUMP_IN_DELAY
)
8800 /* Find the previous real insn, skipping NOTEs. */
8801 insn
= PREV_INSN (insn
);
8802 while (insn
&& GET_CODE (insn
) == NOTE
)
8803 insn
= PREV_INSN (insn
);
8805 /* Check for CALL_INSNs and millicode calls. */
8807 && ((GET_CODE (insn
) == CALL_INSN
8808 && get_attr_type (insn
) != TYPE_DYNCALL
)
8809 || (GET_CODE (insn
) == INSN
8810 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8811 && GET_CODE (PATTERN (insn
)) != USE
8812 && GET_CODE (PATTERN (insn
)) != CLOBBER
8813 && get_attr_type (insn
) == TYPE_MILLI
)))
8819 /* We use this hook to perform a PA specific optimization which is difficult
8820 to do in earlier passes.
8822 We want the delay slots of branches within jump tables to be filled.
8823 None of the compiler passes at the moment even has the notion that a
8824 PA jump table doesn't contain addresses, but instead contains actual
8827 Because we actually jump into the table, the addresses of each entry
8828 must stay constant in relation to the beginning of the table (which
8829 itself must stay constant relative to the instruction to jump into
8830 it). I don't believe we can guarantee earlier passes of the compiler
8831 will adhere to those rules.
8833 So, late in the compilation process we find all the jump tables, and
8834 expand them into real code -- e.g. each entry in the jump table vector
8835 will get an appropriate label followed by a jump to the final target.
8837 Reorg and the final jump pass can then optimize these branches and
8838 fill their delay slots. We end up with smaller, more efficient code.
8840 The jump instructions within the table are special; we must be able
8841 to identify them during assembly output (if the jumps don't get filled
8842 we need to emit a nop rather than nullifying the delay slot)). We
8843 identify jumps in switch tables by using insns with the attribute
8844 type TYPE_BTABLE_BRANCH.
8846 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8847 insns. This serves two purposes, first it prevents jump.c from
8848 noticing that the last N entries in the table jump to the instruction
8849 immediately after the table and deleting the jumps. Second, those
8850 insns mark where we should emit .begin_brtab and .end_brtab directives
8851 when using GAS (allows for better link time optimizations). */
8858 remove_useless_addtr_insns (1);
8860 if (pa_cpu
< PROCESSOR_8000
)
8861 pa_combine_instructions ();
8864 /* This is fairly cheap, so always run it if optimizing. */
8865 if (optimize
> 0 && !TARGET_BIG_SWITCH
)
8867 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8868 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8870 rtx pattern
, tmp
, location
, label
;
8871 unsigned int length
, i
;
8873 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8874 if (GET_CODE (insn
) != JUMP_INSN
8875 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8876 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8879 /* Emit marker for the beginning of the branch table. */
8880 emit_insn_before (gen_begin_brtab (), insn
);
8882 pattern
= PATTERN (insn
);
8883 location
= PREV_INSN (insn
);
8884 length
= XVECLEN (pattern
, GET_CODE (pattern
) == ADDR_DIFF_VEC
);
8886 for (i
= 0; i
< length
; i
++)
8888 /* Emit a label before each jump to keep jump.c from
8889 removing this code. */
8890 tmp
= gen_label_rtx ();
8891 LABEL_NUSES (tmp
) = 1;
8892 emit_label_after (tmp
, location
);
8893 location
= NEXT_INSN (location
);
8895 if (GET_CODE (pattern
) == ADDR_VEC
)
8896 label
= XEXP (XVECEXP (pattern
, 0, i
), 0);
8898 label
= XEXP (XVECEXP (pattern
, 1, i
), 0);
8900 tmp
= gen_short_jump (label
);
8902 /* Emit the jump itself. */
8903 tmp
= emit_jump_insn_after (tmp
, location
);
8904 JUMP_LABEL (tmp
) = label
;
8905 LABEL_NUSES (label
)++;
8906 location
= NEXT_INSN (location
);
8908 /* Emit a BARRIER after the jump. */
8909 emit_barrier_after (location
);
8910 location
= NEXT_INSN (location
);
8913 /* Emit marker for the end of the branch table. */
8914 emit_insn_before (gen_end_brtab (), location
);
8915 location
= NEXT_INSN (location
);
8916 emit_barrier_after (location
);
8918 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8924 /* Still need brtab marker insns. FIXME: the presence of these
8925 markers disables output of the branch table to readonly memory,
8926 and any alignment directives that might be needed. Possibly,
8927 the begin_brtab insn should be output before the label for the
8928 table. This doesn't matter at the moment since the tables are
8929 always output in the text section. */
8930 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8932 /* Find an ADDR_VEC insn. */
8933 if (GET_CODE (insn
) != JUMP_INSN
8934 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8935 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8938 /* Now generate markers for the beginning and end of the
8940 emit_insn_before (gen_begin_brtab (), insn
);
8941 emit_insn_after (gen_end_brtab (), insn
);
8946 /* The PA has a number of odd instructions which can perform multiple
8947 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8948 it may be profitable to combine two instructions into one instruction
8949 with two outputs. It's not profitable PA2.0 machines because the
8950 two outputs would take two slots in the reorder buffers.
8952 This routine finds instructions which can be combined and combines
8953 them. We only support some of the potential combinations, and we
8954 only try common ways to find suitable instructions.
8956 * addb can add two registers or a register and a small integer
8957 and jump to a nearby (+-8k) location. Normally the jump to the
8958 nearby location is conditional on the result of the add, but by
8959 using the "true" condition we can make the jump unconditional.
8960 Thus addb can perform two independent operations in one insn.
8962 * movb is similar to addb in that it can perform a reg->reg
8963 or small immediate->reg copy and jump to a nearby (+-8k location).
8965 * fmpyadd and fmpysub can perform a FP multiply and either an
8966 FP add or FP sub if the operands of the multiply and add/sub are
8967 independent (there are other minor restrictions). Note both
8968 the fmpy and fadd/fsub can in theory move to better spots according
8969 to data dependencies, but for now we require the fmpy stay at a
8972 * Many of the memory operations can perform pre & post updates
8973 of index registers. GCC's pre/post increment/decrement addressing
8974 is far too simple to take advantage of all the possibilities. This
8975 pass may not be suitable since those insns may not be independent.
8977 * comclr can compare two ints or an int and a register, nullify
8978 the following instruction and zero some other register. This
8979 is more difficult to use as it's harder to find an insn which
8980 will generate a comclr than finding something like an unconditional
8981 branch. (conditional moves & long branches create comclr insns).
8983 * Most arithmetic operations can conditionally skip the next
8984 instruction. They can be viewed as "perform this operation
8985 and conditionally jump to this nearby location" (where nearby
8986 is an insns away). These are difficult to use due to the
8987 branch length restrictions. */
8990 pa_combine_instructions (void)
8992 rtx anchor
, new_rtx
;
8994 /* This can get expensive since the basic algorithm is on the
8995 order of O(n^2) (or worse). Only do it for -O2 or higher
8996 levels of optimization. */
9000 /* Walk down the list of insns looking for "anchor" insns which
9001 may be combined with "floating" insns. As the name implies,
9002 "anchor" instructions don't move, while "floating" insns may
9004 new_rtx
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
9005 new_rtx
= make_insn_raw (new_rtx
);
9007 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
9009 enum attr_pa_combine_type anchor_attr
;
9010 enum attr_pa_combine_type floater_attr
;
9012 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9013 Also ignore any special USE insns. */
9014 if ((GET_CODE (anchor
) != INSN
9015 && GET_CODE (anchor
) != JUMP_INSN
9016 && GET_CODE (anchor
) != CALL_INSN
)
9017 || GET_CODE (PATTERN (anchor
)) == USE
9018 || GET_CODE (PATTERN (anchor
)) == CLOBBER
9019 || GET_CODE (PATTERN (anchor
)) == ADDR_VEC
9020 || GET_CODE (PATTERN (anchor
)) == ADDR_DIFF_VEC
)
9023 anchor_attr
= get_attr_pa_combine_type (anchor
);
9024 /* See if anchor is an insn suitable for combination. */
9025 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
9026 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9027 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9028 && ! forward_branch_p (anchor
)))
9032 for (floater
= PREV_INSN (anchor
);
9034 floater
= PREV_INSN (floater
))
9036 if (GET_CODE (floater
) == NOTE
9037 || (GET_CODE (floater
) == INSN
9038 && (GET_CODE (PATTERN (floater
)) == USE
9039 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9042 /* Anything except a regular INSN will stop our search. */
9043 if (GET_CODE (floater
) != INSN
9044 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
9045 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
9051 /* See if FLOATER is suitable for combination with the
9053 floater_attr
= get_attr_pa_combine_type (floater
);
9054 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9055 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9056 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9057 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9059 /* If ANCHOR and FLOATER can be combined, then we're
9060 done with this pass. */
9061 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9062 SET_DEST (PATTERN (floater
)),
9063 XEXP (SET_SRC (PATTERN (floater
)), 0),
9064 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9068 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9069 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
9071 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
9073 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9074 SET_DEST (PATTERN (floater
)),
9075 XEXP (SET_SRC (PATTERN (floater
)), 0),
9076 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9081 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9082 SET_DEST (PATTERN (floater
)),
9083 SET_SRC (PATTERN (floater
)),
9084 SET_SRC (PATTERN (floater
))))
9090 /* If we didn't find anything on the backwards scan try forwards. */
9092 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
9093 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
9095 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
9097 if (GET_CODE (floater
) == NOTE
9098 || (GET_CODE (floater
) == INSN
9099 && (GET_CODE (PATTERN (floater
)) == USE
9100 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9104 /* Anything except a regular INSN will stop our search. */
9105 if (GET_CODE (floater
) != INSN
9106 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
9107 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
9113 /* See if FLOATER is suitable for combination with the
9115 floater_attr
= get_attr_pa_combine_type (floater
);
9116 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9117 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9118 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9119 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9121 /* If ANCHOR and FLOATER can be combined, then we're
9122 done with this pass. */
9123 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9124 SET_DEST (PATTERN (floater
)),
9125 XEXP (SET_SRC (PATTERN (floater
)),
9127 XEXP (SET_SRC (PATTERN (floater
)),
9134 /* FLOATER will be nonzero if we found a suitable floating
9135 insn for combination with ANCHOR. */
9137 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9138 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9140 /* Emit the new instruction and delete the old anchor. */
9141 emit_insn_before (gen_rtx_PARALLEL
9143 gen_rtvec (2, PATTERN (anchor
),
9144 PATTERN (floater
))),
9147 SET_INSN_DELETED (anchor
);
9149 /* Emit a special USE insn for FLOATER, then delete
9150 the floating insn. */
9151 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9152 delete_insn (floater
);
9157 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9160 /* Emit the new_jump instruction and delete the old anchor. */
9162 = emit_jump_insn_before (gen_rtx_PARALLEL
9164 gen_rtvec (2, PATTERN (anchor
),
9165 PATTERN (floater
))),
9168 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9169 SET_INSN_DELETED (anchor
);
9171 /* Emit a special USE insn for FLOATER, then delete
9172 the floating insn. */
9173 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9174 delete_insn (floater
);
9182 pa_can_combine_p (rtx new_rtx
, rtx anchor
, rtx floater
, int reversed
, rtx dest
,
9185 int insn_code_number
;
9188 /* Create a PARALLEL with the patterns of ANCHOR and
9189 FLOATER, try to recognize it, then test constraints
9190 for the resulting pattern.
9192 If the pattern doesn't match or the constraints
9193 aren't met keep searching for a suitable floater
9195 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9196 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9197 INSN_CODE (new_rtx
) = -1;
9198 insn_code_number
= recog_memoized (new_rtx
);
9199 if (insn_code_number
< 0
9200 || (extract_insn (new_rtx
), ! constrain_operands (1)))
9214 /* There's up to three operands to consider. One
9215 output and two inputs.
9217 The output must not be used between FLOATER & ANCHOR
9218 exclusive. The inputs must not be set between
9219 FLOATER and ANCHOR exclusive. */
9221 if (reg_used_between_p (dest
, start
, end
))
9224 if (reg_set_between_p (src1
, start
, end
))
9227 if (reg_set_between_p (src2
, start
, end
))
9230 /* If we get here, then everything is good. */
9234 /* Return nonzero if references for INSN are delayed.
9236 Millicode insns are actually function calls with some special
9237 constraints on arguments and register usage.
9239 Millicode calls always expect their arguments in the integer argument
9240 registers, and always return their result in %r29 (ret1). They
9241 are expected to clobber their arguments, %r1, %r29, and the return
9242 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9244 This function tells reorg that the references to arguments and
9245 millicode calls do not appear to happen until after the millicode call.
9246 This allows reorg to put insns which set the argument registers into the
9247 delay slot of the millicode call -- thus they act more like traditional
9250 Note we cannot consider side effects of the insn to be delayed because
9251 the branch and link insn will clobber the return pointer. If we happened
9252 to use the return pointer in the delay slot of the call, then we lose.
9254 get_attr_type will try to recognize the given insn, so make sure to
9255 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9258 insn_refs_are_delayed (rtx insn
)
9260 return ((GET_CODE (insn
) == INSN
9261 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9262 && GET_CODE (PATTERN (insn
)) != USE
9263 && GET_CODE (PATTERN (insn
)) != CLOBBER
9264 && get_attr_type (insn
) == TYPE_MILLI
));
9267 /* Promote the return value, but not the arguments. */
9269 static enum machine_mode
9270 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9271 enum machine_mode mode
,
9272 int *punsignedp ATTRIBUTE_UNUSED
,
9273 const_tree fntype ATTRIBUTE_UNUSED
,
9276 if (for_return
== 0)
9278 return promote_mode (type
, mode
, punsignedp
);
9281 /* On the HP-PA the value is found in register(s) 28(-29), unless
9282 the mode is SF or DF. Then the value is returned in fr4 (32).
9284 This must perform the same promotions as PROMOTE_MODE, else promoting
9285 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9287 Small structures must be returned in a PARALLEL on PA64 in order
9288 to match the HP Compiler ABI. */
9291 pa_function_value (const_tree valtype
,
9292 const_tree func ATTRIBUTE_UNUSED
,
9293 bool outgoing ATTRIBUTE_UNUSED
)
9295 enum machine_mode valmode
;
9297 if (AGGREGATE_TYPE_P (valtype
)
9298 || TREE_CODE (valtype
) == COMPLEX_TYPE
9299 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9303 /* Aggregates with a size less than or equal to 128 bits are
9304 returned in GR 28(-29). They are left justified. The pad
9305 bits are undefined. Larger aggregates are returned in
9309 int ub
= int_size_in_bytes (valtype
) <= UNITS_PER_WORD
? 1 : 2;
9311 for (i
= 0; i
< ub
; i
++)
9313 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9314 gen_rtx_REG (DImode
, 28 + i
),
9319 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9321 else if (int_size_in_bytes (valtype
) > UNITS_PER_WORD
)
9323 /* Aggregates 5 to 8 bytes in size are returned in general
9324 registers r28-r29 in the same manner as other non
9325 floating-point objects. The data is right-justified and
9326 zero-extended to 64 bits. This is opposite to the normal
9327 justification used on big endian targets and requires
9328 special treatment. */
9329 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9330 gen_rtx_REG (DImode
, 28), const0_rtx
);
9331 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9335 if ((INTEGRAL_TYPE_P (valtype
)
9336 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9337 || POINTER_TYPE_P (valtype
))
9338 valmode
= word_mode
;
9340 valmode
= TYPE_MODE (valtype
);
9342 if (TREE_CODE (valtype
) == REAL_TYPE
9343 && !AGGREGATE_TYPE_P (valtype
)
9344 && TYPE_MODE (valtype
) != TFmode
9345 && !TARGET_SOFT_FLOAT
)
9346 return gen_rtx_REG (valmode
, 32);
9348 return gen_rtx_REG (valmode
, 28);
9351 /* Implement the TARGET_LIBCALL_VALUE hook. */
9354 pa_libcall_value (enum machine_mode mode
,
9355 const_rtx fun ATTRIBUTE_UNUSED
)
9357 if (! TARGET_SOFT_FLOAT
9358 && (mode
== SFmode
|| mode
== DFmode
))
9359 return gen_rtx_REG (mode
, 32);
9361 return gen_rtx_REG (mode
, 28);
9364 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9367 pa_function_value_regno_p (const unsigned int regno
)
9370 || (! TARGET_SOFT_FLOAT
&& regno
== 32))
9376 /* Update the data in CUM to advance over an argument
9377 of mode MODE and data type TYPE.
9378 (TYPE is null for libcalls where that information may not be available.) */
9381 pa_function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9382 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9384 int arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9386 cum
->nargs_prototype
--;
9387 cum
->words
+= (arg_size
9388 + ((cum
->words
& 01)
9389 && type
!= NULL_TREE
9393 /* Return the location of a parameter that is passed in a register or NULL
9394 if the parameter has any component that is passed in memory.
9396 This is new code and will be pushed to into the net sources after
9399 ??? We might want to restructure this so that it looks more like other
9402 pa_function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9403 const_tree type
, bool named ATTRIBUTE_UNUSED
)
9405 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9412 if (mode
== VOIDmode
)
9415 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9417 /* If this arg would be passed partially or totally on the stack, then
9418 this routine should return zero. pa_arg_partial_bytes will
9419 handle arguments which are split between regs and stack slots if
9420 the ABI mandates split arguments. */
9423 /* The 32-bit ABI does not split arguments. */
9424 if (cum
->words
+ arg_size
> max_arg_words
)
9430 alignment
= cum
->words
& 1;
9431 if (cum
->words
+ alignment
>= max_arg_words
)
9435 /* The 32bit ABIs and the 64bit ABIs are rather different,
9436 particularly in their handling of FP registers. We might
9437 be able to cleverly share code between them, but I'm not
9438 going to bother in the hope that splitting them up results
9439 in code that is more easily understood. */
9443 /* Advance the base registers to their current locations.
9445 Remember, gprs grow towards smaller register numbers while
9446 fprs grow to higher register numbers. Also remember that
9447 although FP regs are 32-bit addressable, we pretend that
9448 the registers are 64-bits wide. */
9449 gpr_reg_base
= 26 - cum
->words
;
9450 fpr_reg_base
= 32 + cum
->words
;
9452 /* Arguments wider than one word and small aggregates need special
9456 || (type
&& (AGGREGATE_TYPE_P (type
)
9457 || TREE_CODE (type
) == COMPLEX_TYPE
9458 || TREE_CODE (type
) == VECTOR_TYPE
)))
9460 /* Double-extended precision (80-bit), quad-precision (128-bit)
9461 and aggregates including complex numbers are aligned on
9462 128-bit boundaries. The first eight 64-bit argument slots
9463 are associated one-to-one, with general registers r26
9464 through r19, and also with floating-point registers fr4
9465 through fr11. Arguments larger than one word are always
9466 passed in general registers.
9468 Using a PARALLEL with a word mode register results in left
9469 justified data on a big-endian target. */
9472 int i
, offset
= 0, ub
= arg_size
;
9474 /* Align the base register. */
9475 gpr_reg_base
-= alignment
;
9477 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9478 for (i
= 0; i
< ub
; i
++)
9480 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9481 gen_rtx_REG (DImode
, gpr_reg_base
),
9487 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9492 /* If the argument is larger than a word, then we know precisely
9493 which registers we must use. */
9507 /* Structures 5 to 8 bytes in size are passed in the general
9508 registers in the same manner as other non floating-point
9509 objects. The data is right-justified and zero-extended
9510 to 64 bits. This is opposite to the normal justification
9511 used on big endian targets and requires special treatment.
9512 We now define BLOCK_REG_PADDING to pad these objects.
9513 Aggregates, complex and vector types are passed in the same
9514 manner as structures. */
9516 || (type
&& (AGGREGATE_TYPE_P (type
)
9517 || TREE_CODE (type
) == COMPLEX_TYPE
9518 || TREE_CODE (type
) == VECTOR_TYPE
)))
9520 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9521 gen_rtx_REG (DImode
, gpr_reg_base
),
9523 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9528 /* We have a single word (32 bits). A simple computation
9529 will get us the register #s we need. */
9530 gpr_reg_base
= 26 - cum
->words
;
9531 fpr_reg_base
= 32 + 2 * cum
->words
;
9535 /* Determine if the argument needs to be passed in both general and
9536 floating point registers. */
9537 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9538 /* If we are doing soft-float with portable runtime, then there
9539 is no need to worry about FP regs. */
9540 && !TARGET_SOFT_FLOAT
9541 /* The parameter must be some kind of scalar float, else we just
9542 pass it in integer registers. */
9543 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9544 /* The target function must not have a prototype. */
9545 && cum
->nargs_prototype
<= 0
9546 /* libcalls do not need to pass items in both FP and general
9548 && type
!= NULL_TREE
9549 /* All this hair applies to "outgoing" args only. This includes
9550 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9552 /* Also pass outgoing floating arguments in both registers in indirect
9553 calls with the 32 bit ABI and the HP assembler since there is no
9554 way to the specify argument locations in static functions. */
9559 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9565 gen_rtx_EXPR_LIST (VOIDmode
,
9566 gen_rtx_REG (mode
, fpr_reg_base
),
9568 gen_rtx_EXPR_LIST (VOIDmode
,
9569 gen_rtx_REG (mode
, gpr_reg_base
),
9574 /* See if we should pass this parameter in a general register. */
9575 if (TARGET_SOFT_FLOAT
9576 /* Indirect calls in the normal 32bit ABI require all arguments
9577 to be passed in general registers. */
9578 || (!TARGET_PORTABLE_RUNTIME
9582 /* If the parameter is not a scalar floating-point parameter,
9583 then it belongs in GPRs. */
9584 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9585 /* Structure with single SFmode field belongs in GPR. */
9586 || (type
&& AGGREGATE_TYPE_P (type
)))
9587 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9589 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9595 /* If this arg would be passed totally in registers or totally on the stack,
9596 then this routine should return zero. */
9599 pa_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9600 tree type
, bool named ATTRIBUTE_UNUSED
)
9602 unsigned int max_arg_words
= 8;
9603 unsigned int offset
= 0;
9608 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9611 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9612 /* Arg fits fully into registers. */
9614 else if (cum
->words
+ offset
>= max_arg_words
)
9615 /* Arg fully on the stack. */
9619 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9623 /* A get_unnamed_section callback for switching to the text section.
9625 This function is only used with SOM. Because we don't support
9626 named subspaces, we can only create a new subspace or switch back
9627 to the default text subspace. */
9630 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9632 gcc_assert (TARGET_SOM
);
9635 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
9637 /* We only want to emit a .nsubspa directive once at the
9638 start of the function. */
9639 cfun
->machine
->in_nsubspa
= 1;
9641 /* Create a new subspace for the text. This provides
9642 better stub placement and one-only functions. */
9644 && DECL_ONE_ONLY (cfun
->decl
)
9645 && !DECL_WEAK (cfun
->decl
))
9647 output_section_asm_op ("\t.SPACE $TEXT$\n"
9648 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9649 "ACCESS=44,SORT=24,COMDAT");
9655 /* There isn't a current function or the body of the current
9656 function has been completed. So, we are changing to the
9657 text section to output debugging information. Thus, we
9658 need to forget that we are in the text section so that
9659 varasm.c will call us when text_section is selected again. */
9660 gcc_assert (!cfun
|| !cfun
->machine
9661 || cfun
->machine
->in_nsubspa
== 2);
9664 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9667 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9670 /* A get_unnamed_section callback for switching to comdat data
9671 sections. This function is only used with SOM. */
9674 som_output_comdat_data_section_asm_op (const void *data
)
9677 output_section_asm_op (data
);
9680 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9683 pa_som_asm_init_sections (void)
9686 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
9688 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9689 is not being generated. */
9690 som_readonly_data_section
9691 = get_unnamed_section (0, output_section_asm_op
,
9692 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9694 /* When secondary definitions are not supported, SOM makes readonly
9695 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9697 som_one_only_readonly_data_section
9698 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
9700 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9701 "ACCESS=0x2c,SORT=16,COMDAT");
9704 /* When secondary definitions are not supported, SOM makes data one-only
9705 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9706 som_one_only_data_section
9707 = get_unnamed_section (SECTION_WRITE
,
9708 som_output_comdat_data_section_asm_op
,
9709 "\t.SPACE $PRIVATE$\n"
9710 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9711 "ACCESS=31,SORT=24,COMDAT");
9713 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9714 which reference data within the $TEXT$ space (for example constant
9715 strings in the $LIT$ subspace).
9717 The assemblers (GAS and HP as) both have problems with handling
9718 the difference of two symbols which is the other correct way to
9719 reference constant data during PIC code generation.
9721 So, there's no way to reference constant data which is in the
9722 $TEXT$ space during PIC generation. Instead place all constant
9723 data into the $PRIVATE$ subspace (this reduces sharing, but it
9724 works correctly). */
9725 readonly_data_section
= flag_pic
? data_section
: som_readonly_data_section
;
9727 /* We must not have a reference to an external symbol defined in a
9728 shared library in a readonly section, else the SOM linker will
9731 So, we force exception information into the data section. */
9732 exception_section
= data_section
;
9735 /* On hpux10, the linker will give an error if we have a reference
9736 in the read-only data section to a symbol defined in a shared
9737 library. Therefore, expressions that might require a reloc can
9738 not be placed in the read-only data section. */
9741 pa_select_section (tree exp
, int reloc
,
9742 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9744 if (TREE_CODE (exp
) == VAR_DECL
9745 && TREE_READONLY (exp
)
9746 && !TREE_THIS_VOLATILE (exp
)
9747 && DECL_INITIAL (exp
)
9748 && (DECL_INITIAL (exp
) == error_mark_node
9749 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9753 && DECL_ONE_ONLY (exp
)
9754 && !DECL_WEAK (exp
))
9755 return som_one_only_readonly_data_section
;
9757 return readonly_data_section
;
9759 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9760 return readonly_data_section
;
9762 && TREE_CODE (exp
) == VAR_DECL
9763 && DECL_ONE_ONLY (exp
)
9764 && !DECL_WEAK (exp
))
9765 return som_one_only_data_section
;
9767 return data_section
;
9771 pa_globalize_label (FILE *stream
, const char *name
)
9773 /* We only handle DATA objects here, functions are globalized in
9774 ASM_DECLARE_FUNCTION_NAME. */
9775 if (! FUNCTION_NAME_P (name
))
9777 fputs ("\t.EXPORT ", stream
);
9778 assemble_name (stream
, name
);
9779 fputs (",DATA\n", stream
);
9783 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9786 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9787 int incoming ATTRIBUTE_UNUSED
)
9789 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9792 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9795 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
9797 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9798 PA64 ABI says that objects larger than 128 bits are returned in memory.
9799 Note, int_size_in_bytes can return -1 if the size of the object is
9800 variable or larger than the maximum value that can be expressed as
9801 a HOST_WIDE_INT. It can also return zero for an empty type. The
9802 simplest way to handle variable and empty types is to pass them in
9803 memory. This avoids problems in defining the boundaries of argument
9804 slots, allocating registers, etc. */
9805 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9806 || int_size_in_bytes (type
) <= 0);
9809 /* Structure to hold declaration and name of external symbols that are
9810 emitted by GCC. We generate a vector of these symbols and output them
9811 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9812 This avoids putting out names that are never really used. */
9814 typedef struct GTY(()) extern_symbol
9820 /* Define gc'd vector type for extern_symbol. */
9821 DEF_VEC_O(extern_symbol
);
9822 DEF_VEC_ALLOC_O(extern_symbol
,gc
);
9824 /* Vector of extern_symbol pointers. */
9825 static GTY(()) VEC(extern_symbol
,gc
) *extern_symbols
;
9827 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9828 /* Mark DECL (name NAME) as an external reference (assembler output
9829 file FILE). This saves the names to output at the end of the file
9830 if actually referenced. */
9833 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9835 extern_symbol
* p
= VEC_safe_push (extern_symbol
, gc
, extern_symbols
, NULL
);
9837 gcc_assert (file
== asm_out_file
);
9842 /* Output text required at the end of an assembler file.
9843 This includes deferred plabels and .import directives for
9844 all external symbols that were actually referenced. */
9847 pa_hpux_file_end (void)
9852 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9853 output_deferred_profile_counters ();
9855 output_deferred_plabels ();
9857 for (i
= 0; VEC_iterate (extern_symbol
, extern_symbols
, i
, p
); i
++)
9859 tree decl
= p
->decl
;
9861 if (!TREE_ASM_WRITTEN (decl
)
9862 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9863 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9866 VEC_free (extern_symbol
, gc
, extern_symbols
);
9870 /* Return true if a change from mode FROM to mode TO for a register
9871 in register class RCLASS is invalid. */
9874 pa_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
9875 enum reg_class rclass
)
9880 /* Reject changes to/from complex and vector modes. */
9881 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
9882 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
9885 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
9888 /* There is no way to load QImode or HImode values directly from
9889 memory. SImode loads to the FP registers are not zero extended.
9890 On the 64-bit target, this conflicts with the definition of
9891 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9892 with different sizes in the floating-point registers. */
9893 if (MAYBE_FP_REG_CLASS_P (rclass
))
9896 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9897 in specific sets of registers. Thus, we cannot allow changing
9898 to a larger mode when it's larger than a word. */
9899 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
9900 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
9906 /* Returns TRUE if it is a good idea to tie two pseudo registers
9907 when one has mode MODE1 and one has mode MODE2.
9908 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9909 for any hard reg, then this must be FALSE for correct output.
9911 We should return FALSE for QImode and HImode because these modes
9912 are not ok in the floating-point registers. However, this prevents
9913 tieing these modes to SImode and DImode in the general registers.
9914 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9915 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9916 in the floating-point registers. */
9919 pa_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
9921 /* Don't tie modes in different classes. */
9922 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
9929 /* Length in units of the trampoline instruction code. */
9931 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9934 /* Output assembler code for a block containing the constant parts
9935 of a trampoline, leaving space for the variable parts.\
9937 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9938 and then branches to the specified routine.
9940 This code template is copied from text segment to stack location
9941 and then patched with pa_trampoline_init to contain valid values,
9942 and then entered as a subroutine.
9944 It is best to keep this as small as possible to avoid having to
9945 flush multiple lines in the cache. */
9948 pa_asm_trampoline_template (FILE *f
)
9952 fputs ("\tldw 36(%r22),%r21\n", f
);
9953 fputs ("\tbb,>=,n %r21,30,.+16\n", f
);
9954 if (ASSEMBLER_DIALECT
== 0)
9955 fputs ("\tdepi 0,31,2,%r21\n", f
);
9957 fputs ("\tdepwi 0,31,2,%r21\n", f
);
9958 fputs ("\tldw 4(%r21),%r19\n", f
);
9959 fputs ("\tldw 0(%r21),%r21\n", f
);
9962 fputs ("\tbve (%r21)\n", f
);
9963 fputs ("\tldw 40(%r22),%r29\n", f
);
9964 fputs ("\t.word 0\n", f
);
9965 fputs ("\t.word 0\n", f
);
9969 fputs ("\tldsid (%r21),%r1\n", f
);
9970 fputs ("\tmtsp %r1,%sr0\n", f
);
9971 fputs ("\tbe 0(%sr0,%r21)\n", f
);
9972 fputs ("\tldw 40(%r22),%r29\n", f
);
9974 fputs ("\t.word 0\n", f
);
9975 fputs ("\t.word 0\n", f
);
9976 fputs ("\t.word 0\n", f
);
9977 fputs ("\t.word 0\n", f
);
9981 fputs ("\t.dword 0\n", f
);
9982 fputs ("\t.dword 0\n", f
);
9983 fputs ("\t.dword 0\n", f
);
9984 fputs ("\t.dword 0\n", f
);
9985 fputs ("\tmfia %r31\n", f
);
9986 fputs ("\tldd 24(%r31),%r1\n", f
);
9987 fputs ("\tldd 24(%r1),%r27\n", f
);
9988 fputs ("\tldd 16(%r1),%r1\n", f
);
9989 fputs ("\tbve (%r1)\n", f
);
9990 fputs ("\tldd 32(%r31),%r31\n", f
);
9991 fputs ("\t.dword 0 ; fptr\n", f
);
9992 fputs ("\t.dword 0 ; static link\n", f
);
9996 /* Emit RTL insns to initialize the variable parts of a trampoline.
9997 FNADDR is an RTX for the address of the function's pure code.
9998 CXT is an RTX for the static chain value for the function.
10000 Move the function address to the trampoline template at offset 36.
10001 Move the static chain value to trampoline template at offset 40.
10002 Move the trampoline address to trampoline template at offset 44.
10003 Move r19 to trampoline template at offset 48. The latter two
10004 words create a plabel for the indirect call to the trampoline.
10006 A similar sequence is used for the 64-bit port but the plabel is
10007 at the beginning of the trampoline.
10009 Finally, the cache entries for the trampoline code are flushed.
10010 This is necessary to ensure that the trampoline instruction sequence
10011 is written to memory prior to any attempts at prefetching the code
10015 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
10017 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10018 rtx start_addr
= gen_reg_rtx (Pmode
);
10019 rtx end_addr
= gen_reg_rtx (Pmode
);
10020 rtx line_length
= gen_reg_rtx (Pmode
);
10023 emit_block_move (m_tramp
, assemble_trampoline_template (),
10024 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
10025 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
10029 tmp
= adjust_address (m_tramp
, Pmode
, 36);
10030 emit_move_insn (tmp
, fnaddr
);
10031 tmp
= adjust_address (m_tramp
, Pmode
, 40);
10032 emit_move_insn (tmp
, chain_value
);
10034 /* Create a fat pointer for the trampoline. */
10035 tmp
= adjust_address (m_tramp
, Pmode
, 44);
10036 emit_move_insn (tmp
, r_tramp
);
10037 tmp
= adjust_address (m_tramp
, Pmode
, 48);
10038 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
10040 /* fdc and fic only use registers for the address to flush,
10041 they do not accept integer displacements. We align the
10042 start and end addresses to the beginning of their respective
10043 cache lines to minimize the number of lines flushed. */
10044 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
10045 GEN_INT (-MIN_CACHELINE_SIZE
)));
10046 tmp
= force_reg (Pmode
, plus_constant (r_tramp
, TRAMPOLINE_CODE_SIZE
-1));
10047 emit_insn (gen_andsi3 (end_addr
, tmp
,
10048 GEN_INT (-MIN_CACHELINE_SIZE
)));
10049 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10050 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
10051 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
10052 gen_reg_rtx (Pmode
),
10053 gen_reg_rtx (Pmode
)));
10057 tmp
= adjust_address (m_tramp
, Pmode
, 56);
10058 emit_move_insn (tmp
, fnaddr
);
10059 tmp
= adjust_address (m_tramp
, Pmode
, 64);
10060 emit_move_insn (tmp
, chain_value
);
10062 /* Create a fat pointer for the trampoline. */
10063 tmp
= adjust_address (m_tramp
, Pmode
, 16);
10064 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (r_tramp
, 32)));
10065 tmp
= adjust_address (m_tramp
, Pmode
, 24);
10066 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
10068 /* fdc and fic only use registers for the address to flush,
10069 they do not accept integer displacements. We align the
10070 start and end addresses to the beginning of their respective
10071 cache lines to minimize the number of lines flushed. */
10072 tmp
= force_reg (Pmode
, plus_constant (r_tramp
, 32));
10073 emit_insn (gen_anddi3 (start_addr
, tmp
,
10074 GEN_INT (-MIN_CACHELINE_SIZE
)));
10075 tmp
= force_reg (Pmode
, plus_constant (tmp
, TRAMPOLINE_CODE_SIZE
- 1));
10076 emit_insn (gen_anddi3 (end_addr
, tmp
,
10077 GEN_INT (-MIN_CACHELINE_SIZE
)));
10078 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
10079 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
10080 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
10081 gen_reg_rtx (Pmode
),
10082 gen_reg_rtx (Pmode
)));
10086 /* Perform any machine-specific adjustment in the address of the trampoline.
10087 ADDR contains the address that was passed to pa_trampoline_init.
10088 Adjust the trampoline address to point to the plabel at offset 44. */
10091 pa_trampoline_adjust_address (rtx addr
)
10094 addr
= memory_address (Pmode
, plus_constant (addr
, 46));
10099 pa_delegitimize_address (rtx orig_x
)
10101 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10103 if (GET_CODE (x
) == LO_SUM
10104 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10105 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10106 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));
10111 pa_internal_arg_pointer (void)
10113 /* The argument pointer and the hard frame pointer are the same in
10114 the 32-bit runtime, so we don't need a copy. */
10116 return copy_to_reg (virtual_incoming_args_rtx
);
10118 return virtual_incoming_args_rtx
;
10121 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10122 Frame pointer elimination is automatically handled. */
10125 pa_can_eliminate (const int from
, const int to
)
10127 /* The argument cannot be eliminated in the 64-bit runtime. */
10128 if (TARGET_64BIT
&& from
== ARG_POINTER_REGNUM
)
10131 return (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
10132 ? ! frame_pointer_needed
10136 /* Define the offset between two registers, FROM to be eliminated and its
10137 replacement TO, at the start of a routine. */
10139 pa_initial_elimination_offset (int from
, int to
)
10141 HOST_WIDE_INT offset
;
10143 if ((from
== HARD_FRAME_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
)
10144 && to
== STACK_POINTER_REGNUM
)
10145 offset
= -compute_frame_size (get_frame_size (), 0);
10146 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
10149 gcc_unreachable ();