1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
40 #include "integrate.h"
42 #include "diagnostic-core.h"
49 #include "target-def.h"
52 /* Return nonzero if there is a bypass for the output of
53 OUT_INSN and the fp store IN_INSN. */
55 hppa_fpstore_bypass_p (rtx out_insn
, rtx in_insn
)
57 enum machine_mode store_mode
;
58 enum machine_mode other_mode
;
61 if (recog_memoized (in_insn
) < 0
62 || (get_attr_type (in_insn
) != TYPE_FPSTORE
63 && get_attr_type (in_insn
) != TYPE_FPSTORE_LOAD
)
64 || recog_memoized (out_insn
) < 0)
67 store_mode
= GET_MODE (SET_SRC (PATTERN (in_insn
)));
69 set
= single_set (out_insn
);
73 other_mode
= GET_MODE (SET_SRC (set
));
75 return (GET_MODE_SIZE (store_mode
) == GET_MODE_SIZE (other_mode
));
79 #ifndef DO_FRAME_NOTES
80 #ifdef INCOMING_RETURN_ADDR_RTX
81 #define DO_FRAME_NOTES 1
83 #define DO_FRAME_NOTES 0
87 static void copy_reg_pointer (rtx
, rtx
);
88 static void fix_range (const char *);
89 static bool pa_handle_option (size_t, const char *, int);
90 static int hppa_address_cost (rtx
, bool);
91 static bool hppa_rtx_costs (rtx
, int, int, int *, bool);
92 static inline rtx
force_mode (enum machine_mode
, rtx
);
93 static void pa_reorg (void);
94 static void pa_combine_instructions (void);
95 static int pa_can_combine_p (rtx
, rtx
, rtx
, int, rtx
, rtx
, rtx
);
96 static bool forward_branch_p (rtx
);
97 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT
, unsigned *);
98 static int compute_movmem_length (rtx
);
99 static int compute_clrmem_length (rtx
);
100 static bool pa_assemble_integer (rtx
, unsigned int, int);
101 static void remove_useless_addtr_insns (int);
102 static void store_reg (int, HOST_WIDE_INT
, int);
103 static void store_reg_modify (int, int, HOST_WIDE_INT
);
104 static void load_reg (int, HOST_WIDE_INT
, int);
105 static void set_reg_plus_d (int, int, HOST_WIDE_INT
, int);
106 static rtx
pa_function_value (const_tree
, const_tree
, bool);
107 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT
);
108 static void update_total_code_bytes (unsigned int);
109 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT
);
110 static int pa_adjust_cost (rtx
, rtx
, rtx
, int);
111 static int pa_adjust_priority (rtx
, int);
112 static int pa_issue_rate (void);
113 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED
;
114 static section
*pa_select_section (tree
, int, unsigned HOST_WIDE_INT
)
116 static void pa_encode_section_info (tree
, rtx
, int);
117 static const char *pa_strip_name_encoding (const char *);
118 static bool pa_function_ok_for_sibcall (tree
, tree
);
119 static void pa_globalize_label (FILE *, const char *)
121 static void pa_asm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
122 HOST_WIDE_INT
, tree
);
123 #if !defined(USE_COLLECT2)
124 static void pa_asm_out_constructor (rtx
, int);
125 static void pa_asm_out_destructor (rtx
, int);
127 static void pa_init_builtins (void);
128 static rtx
hppa_builtin_saveregs (void);
129 static void hppa_va_start (tree
, rtx
);
130 static tree
hppa_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
131 static bool pa_scalar_mode_supported_p (enum machine_mode
);
132 static bool pa_commutative_p (const_rtx x
, int outer_code
);
133 static void copy_fp_args (rtx
) ATTRIBUTE_UNUSED
;
134 static int length_fp_args (rtx
) ATTRIBUTE_UNUSED
;
135 static rtx
hppa_legitimize_address (rtx
, rtx
, enum machine_mode
);
136 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED
;
137 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED
;
138 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED
;
139 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED
;
140 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED
;
141 static void pa_som_file_start (void) ATTRIBUTE_UNUSED
;
142 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED
;
143 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED
;
144 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED
;
145 static void output_deferred_plabels (void);
146 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED
;
147 #ifdef ASM_OUTPUT_EXTERNAL_REAL
148 static void pa_hpux_file_end (void);
150 #ifdef HPUX_LONG_DOUBLE_LIBRARY
151 static void pa_hpux_init_libfuncs (void);
153 static rtx
pa_struct_value_rtx (tree
, int);
154 static bool pa_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
156 static int pa_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
158 static struct machine_function
* pa_init_machine_status (void);
159 static reg_class_t
pa_secondary_reload (bool, rtx
, reg_class_t
,
161 secondary_reload_info
*);
162 static void pa_extra_live_on_entry (bitmap
);
163 static enum machine_mode
pa_promote_function_mode (const_tree
,
164 enum machine_mode
, int *,
167 static void pa_asm_trampoline_template (FILE *);
168 static void pa_trampoline_init (rtx
, tree
, rtx
);
169 static rtx
pa_trampoline_adjust_address (rtx
);
170 static rtx
pa_delegitimize_address (rtx
);
172 /* The following extra sections are only used for SOM. */
173 static GTY(()) section
*som_readonly_data_section
;
174 static GTY(()) section
*som_one_only_readonly_data_section
;
175 static GTY(()) section
*som_one_only_data_section
;
177 /* Which cpu we are scheduling for. */
178 enum processor_type pa_cpu
= TARGET_SCHED_DEFAULT
;
180 /* The UNIX standard to use for predefines and linking. */
181 int flag_pa_unix
= TARGET_HPUX_11_11
? 1998 : TARGET_HPUX_10_10
? 1995 : 1993;
183 /* Counts for the number of callee-saved general and floating point
184 registers which were saved by the current function's prologue. */
185 static int gr_saved
, fr_saved
;
187 /* Boolean indicating whether the return pointer was saved by the
188 current function's prologue. */
189 static bool rp_saved
;
191 static rtx
find_addr_reg (rtx
);
193 /* Keep track of the number of bytes we have output in the CODE subspace
194 during this compilation so we'll know when to emit inline long-calls. */
195 unsigned long total_code_bytes
;
197 /* The last address of the previous function plus the number of bytes in
198 associated thunks that have been output. This is used to determine if
199 a thunk can use an IA-relative branch to reach its target function. */
200 static unsigned int last_address
;
202 /* Variables to handle plabels that we discover are necessary at assembly
203 output time. They are output after the current function. */
204 struct GTY(()) deferred_plabel
209 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel
*
211 static size_t n_deferred_plabels
= 0;
214 /* Initialize the GCC target structure. */
216 #undef TARGET_ASM_ALIGNED_HI_OP
217 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
218 #undef TARGET_ASM_ALIGNED_SI_OP
219 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
220 #undef TARGET_ASM_ALIGNED_DI_OP
221 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
222 #undef TARGET_ASM_UNALIGNED_HI_OP
223 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
224 #undef TARGET_ASM_UNALIGNED_SI_OP
225 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
226 #undef TARGET_ASM_UNALIGNED_DI_OP
227 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
228 #undef TARGET_ASM_INTEGER
229 #define TARGET_ASM_INTEGER pa_assemble_integer
231 #undef TARGET_ASM_FUNCTION_PROLOGUE
232 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
233 #undef TARGET_ASM_FUNCTION_EPILOGUE
234 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
236 #undef TARGET_FUNCTION_VALUE
237 #define TARGET_FUNCTION_VALUE pa_function_value
239 #undef TARGET_LEGITIMIZE_ADDRESS
240 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
242 #undef TARGET_SCHED_ADJUST_COST
243 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
244 #undef TARGET_SCHED_ADJUST_PRIORITY
245 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
246 #undef TARGET_SCHED_ISSUE_RATE
247 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
249 #undef TARGET_ENCODE_SECTION_INFO
250 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
251 #undef TARGET_STRIP_NAME_ENCODING
252 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
254 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
255 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
257 #undef TARGET_COMMUTATIVE_P
258 #define TARGET_COMMUTATIVE_P pa_commutative_p
260 #undef TARGET_ASM_OUTPUT_MI_THUNK
261 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
262 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
263 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265 #undef TARGET_ASM_FILE_END
266 #ifdef ASM_OUTPUT_EXTERNAL_REAL
267 #define TARGET_ASM_FILE_END pa_hpux_file_end
269 #define TARGET_ASM_FILE_END output_deferred_plabels
272 #if !defined(USE_COLLECT2)
273 #undef TARGET_ASM_CONSTRUCTOR
274 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
275 #undef TARGET_ASM_DESTRUCTOR
276 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
279 #undef TARGET_DEFAULT_TARGET_FLAGS
280 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
281 #undef TARGET_HANDLE_OPTION
282 #define TARGET_HANDLE_OPTION pa_handle_option
284 #undef TARGET_INIT_BUILTINS
285 #define TARGET_INIT_BUILTINS pa_init_builtins
287 #undef TARGET_RTX_COSTS
288 #define TARGET_RTX_COSTS hppa_rtx_costs
289 #undef TARGET_ADDRESS_COST
290 #define TARGET_ADDRESS_COST hppa_address_cost
292 #undef TARGET_MACHINE_DEPENDENT_REORG
293 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
295 #ifdef HPUX_LONG_DOUBLE_LIBRARY
296 #undef TARGET_INIT_LIBFUNCS
297 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
300 #undef TARGET_PROMOTE_FUNCTION_MODE
301 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
302 #undef TARGET_PROMOTE_PROTOTYPES
303 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
305 #undef TARGET_STRUCT_VALUE_RTX
306 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
307 #undef TARGET_RETURN_IN_MEMORY
308 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
309 #undef TARGET_MUST_PASS_IN_STACK
310 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
311 #undef TARGET_PASS_BY_REFERENCE
312 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
313 #undef TARGET_CALLEE_COPIES
314 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
315 #undef TARGET_ARG_PARTIAL_BYTES
316 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
318 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
319 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
320 #undef TARGET_EXPAND_BUILTIN_VA_START
321 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
322 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
323 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
325 #undef TARGET_SCALAR_MODE_SUPPORTED_P
326 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
328 #undef TARGET_CANNOT_FORCE_CONST_MEM
329 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
331 #undef TARGET_SECONDARY_RELOAD
332 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
334 #undef TARGET_EXTRA_LIVE_ON_ENTRY
335 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
337 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
338 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
339 #undef TARGET_TRAMPOLINE_INIT
340 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
341 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
342 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
343 #undef TARGET_DELEGITIMIZE_ADDRESS
344 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
346 struct gcc_target targetm
= TARGET_INITIALIZER
;
348 /* Parse the -mfixed-range= option string. */
351 fix_range (const char *const_str
)
354 char *str
, *dash
, *comma
;
356 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
357 REG2 are either register names or register numbers. The effect
358 of this option is to mark the registers in the range from REG1 to
359 REG2 as ``fixed'' so they won't be used by the compiler. This is
360 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
362 i
= strlen (const_str
);
363 str
= (char *) alloca (i
+ 1);
364 memcpy (str
, const_str
, i
+ 1);
368 dash
= strchr (str
, '-');
371 warning (0, "value of -mfixed-range must have form REG1-REG2");
376 comma
= strchr (dash
+ 1, ',');
380 first
= decode_reg_name (str
);
383 warning (0, "unknown register name: %s", str
);
387 last
= decode_reg_name (dash
+ 1);
390 warning (0, "unknown register name: %s", dash
+ 1);
398 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
402 for (i
= first
; i
<= last
; ++i
)
403 fixed_regs
[i
] = call_used_regs
[i
] = 1;
412 /* Check if all floating point registers have been fixed. */
413 for (i
= FP_REG_FIRST
; i
<= FP_REG_LAST
; i
++)
418 target_flags
|= MASK_DISABLE_FPREGS
;
421 /* Implement TARGET_HANDLE_OPTION. */
424 pa_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
429 case OPT_mpa_risc_1_0
:
431 target_flags
&= ~(MASK_PA_11
| MASK_PA_20
);
435 case OPT_mpa_risc_1_1
:
437 target_flags
&= ~MASK_PA_20
;
438 target_flags
|= MASK_PA_11
;
441 case OPT_mpa_risc_2_0
:
443 target_flags
|= MASK_PA_11
| MASK_PA_20
;
447 if (strcmp (arg
, "8000") == 0)
448 pa_cpu
= PROCESSOR_8000
;
449 else if (strcmp (arg
, "7100") == 0)
450 pa_cpu
= PROCESSOR_7100
;
451 else if (strcmp (arg
, "700") == 0)
452 pa_cpu
= PROCESSOR_700
;
453 else if (strcmp (arg
, "7100LC") == 0)
454 pa_cpu
= PROCESSOR_7100LC
;
455 else if (strcmp (arg
, "7200") == 0)
456 pa_cpu
= PROCESSOR_7200
;
457 else if (strcmp (arg
, "7300") == 0)
458 pa_cpu
= PROCESSOR_7300
;
463 case OPT_mfixed_range_
:
473 #if TARGET_HPUX_10_10
479 #if TARGET_HPUX_11_11
491 override_options (void)
493 /* Unconditional branches in the delay slot are not compatible with dwarf2
494 call frame information. There is no benefit in using this optimization
495 on PA8000 and later processors. */
496 if (pa_cpu
>= PROCESSOR_8000
497 || (! USING_SJLJ_EXCEPTIONS
&& flag_exceptions
)
498 || flag_unwind_tables
)
499 target_flags
&= ~MASK_JUMP_IN_DELAY
;
501 if (flag_pic
&& TARGET_PORTABLE_RUNTIME
)
503 warning (0, "PIC code generation is not supported in the portable runtime model");
506 if (flag_pic
&& TARGET_FAST_INDIRECT_CALLS
)
508 warning (0, "PIC code generation is not compatible with fast indirect calls");
511 if (! TARGET_GAS
&& write_symbols
!= NO_DEBUG
)
513 warning (0, "-g is only supported when using GAS on this processor,");
514 warning (0, "-g option disabled");
515 write_symbols
= NO_DEBUG
;
518 /* We only support the "big PIC" model now. And we always generate PIC
519 code when in 64bit mode. */
520 if (flag_pic
== 1 || TARGET_64BIT
)
523 /* Disable -freorder-blocks-and-partition as we don't support hot and
524 cold partitioning. */
525 if (flag_reorder_blocks_and_partition
)
527 inform (input_location
,
528 "-freorder-blocks-and-partition does not work "
529 "on this architecture");
530 flag_reorder_blocks_and_partition
= 0;
531 flag_reorder_blocks
= 1;
534 /* We can't guarantee that .dword is available for 32-bit targets. */
535 if (UNITS_PER_WORD
== 4)
536 targetm
.asm_out
.aligned_op
.di
= NULL
;
538 /* The unaligned ops are only available when using GAS. */
541 targetm
.asm_out
.unaligned_op
.hi
= NULL
;
542 targetm
.asm_out
.unaligned_op
.si
= NULL
;
543 targetm
.asm_out
.unaligned_op
.di
= NULL
;
546 init_machine_status
= pa_init_machine_status
;
550 pa_init_builtins (void)
552 #ifdef DONT_HAVE_FPUTC_UNLOCKED
553 built_in_decls
[(int) BUILT_IN_FPUTC_UNLOCKED
] =
554 built_in_decls
[(int) BUILT_IN_PUTC_UNLOCKED
];
555 implicit_built_in_decls
[(int) BUILT_IN_FPUTC_UNLOCKED
]
556 = implicit_built_in_decls
[(int) BUILT_IN_PUTC_UNLOCKED
];
559 if (built_in_decls
[BUILT_IN_FINITE
])
560 set_user_assembler_name (built_in_decls
[BUILT_IN_FINITE
], "_Isfinite");
561 if (built_in_decls
[BUILT_IN_FINITEF
])
562 set_user_assembler_name (built_in_decls
[BUILT_IN_FINITEF
], "_Isfinitef");
566 /* Function to init struct machine_function.
567 This will be called, via a pointer variable,
568 from push_function_context. */
570 static struct machine_function
*
571 pa_init_machine_status (void)
573 return ggc_alloc_cleared_machine_function ();
576 /* If FROM is a probable pointer register, mark TO as a probable
577 pointer register with the same pointer alignment as FROM. */
580 copy_reg_pointer (rtx to
, rtx from
)
582 if (REG_POINTER (from
))
583 mark_reg_pointer (to
, REGNO_POINTER_ALIGN (REGNO (from
)));
586 /* Return 1 if X contains a symbolic expression. We know these
587 expressions will have one of a few well defined forms, so
588 we need only check those forms. */
590 symbolic_expression_p (rtx x
)
593 /* Strip off any HIGH. */
594 if (GET_CODE (x
) == HIGH
)
597 return (symbolic_operand (x
, VOIDmode
));
600 /* Accept any constant that can be moved in one instruction into a
603 cint_ok_for_move (HOST_WIDE_INT ival
)
605 /* OK if ldo, ldil, or zdepi, can be used. */
606 return (VAL_14_BITS_P (ival
)
607 || ldil_cint_p (ival
)
608 || zdepi_cint_p (ival
));
611 /* Return truth value of whether OP can be used as an operand in a
614 adddi3_operand (rtx op
, enum machine_mode mode
)
616 return (register_operand (op
, mode
)
617 || (GET_CODE (op
) == CONST_INT
618 && (TARGET_64BIT
? INT_14_BITS (op
) : INT_11_BITS (op
))));
621 /* True iff the operand OP can be used as the destination operand of
622 an integer store. This also implies the operand could be used as
623 the source operand of an integer load. Symbolic, lo_sum and indexed
624 memory operands are not allowed. We accept reloading pseudos and
625 other memory operands. */
627 integer_store_memory_operand (rtx op
, enum machine_mode mode
)
629 return ((reload_in_progress
631 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
632 && reg_renumber
[REGNO (op
)] < 0)
633 || (GET_CODE (op
) == MEM
634 && (reload_in_progress
|| memory_address_p (mode
, XEXP (op
, 0)))
635 && !symbolic_memory_operand (op
, VOIDmode
)
636 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op
, 0))
637 && !IS_INDEX_ADDR_P (XEXP (op
, 0))));
640 /* True iff ldil can be used to load this CONST_INT. The least
641 significant 11 bits of the value must be zero and the value must
642 not change sign when extended from 32 to 64 bits. */
644 ldil_cint_p (HOST_WIDE_INT ival
)
646 HOST_WIDE_INT x
= ival
& (((HOST_WIDE_INT
) -1 << 31) | 0x7ff);
648 return x
== 0 || x
== ((HOST_WIDE_INT
) -1 << 31);
651 /* True iff zdepi can be used to generate this CONST_INT.
652 zdepi first sign extends a 5-bit signed number to a given field
653 length, then places this field anywhere in a zero. */
655 zdepi_cint_p (unsigned HOST_WIDE_INT x
)
657 unsigned HOST_WIDE_INT lsb_mask
, t
;
659 /* This might not be obvious, but it's at least fast.
660 This function is critical; we don't have the time loops would take. */
662 t
= ((x
>> 4) + lsb_mask
) & ~(lsb_mask
- 1);
663 /* Return true iff t is a power of two. */
664 return ((t
& (t
- 1)) == 0);
667 /* True iff depi or extru can be used to compute (reg & mask).
668 Accept bit pattern like these:
673 and_mask_p (unsigned HOST_WIDE_INT mask
)
676 mask
+= mask
& -mask
;
677 return (mask
& (mask
- 1)) == 0;
680 /* True iff depi can be used to compute (reg | MASK). */
682 ior_mask_p (unsigned HOST_WIDE_INT mask
)
684 mask
+= mask
& -mask
;
685 return (mask
& (mask
- 1)) == 0;
688 /* Legitimize PIC addresses. If the address is already
689 position-independent, we return ORIG. Newly generated
690 position-independent addresses go to REG. If we need more
691 than one register, we lose. */
694 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
698 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig
));
700 /* Labels need special handling. */
701 if (pic_label_operand (orig
, mode
))
705 /* We do not want to go through the movXX expanders here since that
706 would create recursion.
708 Nor do we really want to call a generator for a named pattern
709 since that requires multiple patterns if we want to support
712 So instead we just emit the raw set, which avoids the movXX
713 expanders completely. */
714 mark_reg_pointer (reg
, BITS_PER_UNIT
);
715 insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
, orig
));
717 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
718 add_reg_note (insn
, REG_EQUAL
, orig
);
720 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
721 and update LABEL_NUSES because this is not done automatically. */
722 if (reload_in_progress
|| reload_completed
)
724 /* Extract LABEL_REF. */
725 if (GET_CODE (orig
) == CONST
)
726 orig
= XEXP (XEXP (orig
, 0), 0);
727 /* Extract CODE_LABEL. */
728 orig
= XEXP (orig
, 0);
729 add_reg_note (insn
, REG_LABEL_OPERAND
, orig
);
730 LABEL_NUSES (orig
)++;
732 crtl
->uses_pic_offset_table
= 1;
735 if (GET_CODE (orig
) == SYMBOL_REF
)
741 /* Before reload, allocate a temporary register for the intermediate
742 result. This allows the sequence to be deleted when the final
743 result is unused and the insns are trivially dead. */
744 tmp_reg
= ((reload_in_progress
|| reload_completed
)
745 ? reg
: gen_reg_rtx (Pmode
));
747 if (function_label_operand (orig
, mode
))
749 /* Force function label into memory in word mode. */
750 orig
= XEXP (force_const_mem (word_mode
, orig
), 0);
751 /* Load plabel address from DLT. */
752 emit_move_insn (tmp_reg
,
753 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
754 gen_rtx_HIGH (word_mode
, orig
)));
756 = gen_const_mem (Pmode
,
757 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
758 gen_rtx_UNSPEC (Pmode
,
761 emit_move_insn (reg
, pic_ref
);
762 /* Now load address of function descriptor. */
763 pic_ref
= gen_rtx_MEM (Pmode
, reg
);
767 /* Load symbol reference from DLT. */
768 emit_move_insn (tmp_reg
,
769 gen_rtx_PLUS (word_mode
, pic_offset_table_rtx
,
770 gen_rtx_HIGH (word_mode
, orig
)));
772 = gen_const_mem (Pmode
,
773 gen_rtx_LO_SUM (Pmode
, tmp_reg
,
774 gen_rtx_UNSPEC (Pmode
,
779 crtl
->uses_pic_offset_table
= 1;
780 mark_reg_pointer (reg
, BITS_PER_UNIT
);
781 insn
= emit_move_insn (reg
, pic_ref
);
783 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
784 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
788 else if (GET_CODE (orig
) == CONST
)
792 if (GET_CODE (XEXP (orig
, 0)) == PLUS
793 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
797 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
799 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
800 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
801 base
== reg
? 0 : reg
);
803 if (GET_CODE (orig
) == CONST_INT
)
805 if (INT_14_BITS (orig
))
806 return plus_constant (base
, INTVAL (orig
));
807 orig
= force_reg (Pmode
, orig
);
809 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
810 /* Likewise, should we set special REG_NOTEs here? */
816 static GTY(()) rtx gen_tls_tga
;
819 gen_tls_get_addr (void)
822 gen_tls_tga
= init_one_libfunc ("__tls_get_addr");
827 hppa_tls_call (rtx arg
)
831 ret
= gen_reg_rtx (Pmode
);
832 emit_library_call_value (gen_tls_get_addr (), ret
,
833 LCT_CONST
, Pmode
, 1, arg
, Pmode
);
839 legitimize_tls_address (rtx addr
)
841 rtx ret
, insn
, tmp
, t1
, t2
, tp
;
842 enum tls_model model
= SYMBOL_REF_TLS_MODEL (addr
);
846 case TLS_MODEL_GLOBAL_DYNAMIC
:
847 tmp
= gen_reg_rtx (Pmode
);
849 emit_insn (gen_tgd_load_pic (tmp
, addr
));
851 emit_insn (gen_tgd_load (tmp
, addr
));
852 ret
= hppa_tls_call (tmp
);
855 case TLS_MODEL_LOCAL_DYNAMIC
:
856 ret
= gen_reg_rtx (Pmode
);
857 tmp
= gen_reg_rtx (Pmode
);
860 emit_insn (gen_tld_load_pic (tmp
, addr
));
862 emit_insn (gen_tld_load (tmp
, addr
));
863 t1
= hppa_tls_call (tmp
);
866 t2
= gen_reg_rtx (Pmode
);
867 emit_libcall_block (insn
, t2
, t1
,
868 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
870 emit_insn (gen_tld_offset_load (ret
, addr
, t2
));
873 case TLS_MODEL_INITIAL_EXEC
:
874 tp
= gen_reg_rtx (Pmode
);
875 tmp
= gen_reg_rtx (Pmode
);
876 ret
= gen_reg_rtx (Pmode
);
877 emit_insn (gen_tp_load (tp
));
879 emit_insn (gen_tie_load_pic (tmp
, addr
));
881 emit_insn (gen_tie_load (tmp
, addr
));
882 emit_move_insn (ret
, gen_rtx_PLUS (Pmode
, tp
, tmp
));
885 case TLS_MODEL_LOCAL_EXEC
:
886 tp
= gen_reg_rtx (Pmode
);
887 ret
= gen_reg_rtx (Pmode
);
888 emit_insn (gen_tp_load (tp
));
889 emit_insn (gen_tle_load (ret
, addr
, tp
));
899 /* Try machine-dependent ways of modifying an illegitimate address
900 to be legitimate. If we find one, return the new, valid address.
901 This macro is used in only one place: `memory_address' in explow.c.
903 OLDX is the address as it was before break_out_memory_refs was called.
904 In some cases it is useful to look at this to decide what needs to be done.
906 It is always safe for this macro to do nothing. It exists to recognize
907 opportunities to optimize the output.
909 For the PA, transform:
911 memory(X + <large int>)
915 if (<large int> & mask) >= 16
916 Y = (<large int> & ~mask) + mask + 1 Round up.
918 Y = (<large int> & ~mask) Round down.
920 memory (Z + (<large int> - Y));
922 This is for CSE to find several similar references, and only use one Z.
924 X can either be a SYMBOL_REF or REG, but because combine cannot
925 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
926 D will not fit in 14 bits.
928 MODE_FLOAT references allow displacements which fit in 5 bits, so use
931 MODE_INT references allow displacements which fit in 14 bits, so use
934 This relies on the fact that most mode MODE_FLOAT references will use FP
935 registers and most mode MODE_INT references will use integer registers.
936 (In the rare case of an FP register used in an integer MODE, we depend
937 on secondary reloads to clean things up.)
940 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
941 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
942 addressing modes to be used).
944 Put X and Z into registers. Then put the entire expression into
948 hppa_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
949 enum machine_mode mode
)
953 /* We need to canonicalize the order of operands in unscaled indexed
954 addresses since the code that checks if an address is valid doesn't
955 always try both orders. */
956 if (!TARGET_NO_SPACE_REGS
957 && GET_CODE (x
) == PLUS
958 && GET_MODE (x
) == Pmode
959 && REG_P (XEXP (x
, 0))
960 && REG_P (XEXP (x
, 1))
961 && REG_POINTER (XEXP (x
, 0))
962 && !REG_POINTER (XEXP (x
, 1)))
963 return gen_rtx_PLUS (Pmode
, XEXP (x
, 1), XEXP (x
, 0));
965 if (PA_SYMBOL_REF_TLS_P (x
))
966 return legitimize_tls_address (x
);
968 return legitimize_pic_address (x
, mode
, gen_reg_rtx (Pmode
));
970 /* Strip off CONST. */
971 if (GET_CODE (x
) == CONST
)
974 /* Special case. Get the SYMBOL_REF into a register and use indexing.
975 That should always be safe. */
976 if (GET_CODE (x
) == PLUS
977 && GET_CODE (XEXP (x
, 0)) == REG
978 && GET_CODE (XEXP (x
, 1)) == SYMBOL_REF
)
980 rtx reg
= force_reg (Pmode
, XEXP (x
, 1));
981 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg
, XEXP (x
, 0)));
984 /* Note we must reject symbols which represent function addresses
985 since the assembler/linker can't handle arithmetic on plabels. */
986 if (GET_CODE (x
) == PLUS
987 && GET_CODE (XEXP (x
, 1)) == CONST_INT
988 && ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
989 && !FUNCTION_NAME_P (XSTR (XEXP (x
, 0), 0)))
990 || GET_CODE (XEXP (x
, 0)) == REG
))
992 rtx int_part
, ptr_reg
;
994 int offset
= INTVAL (XEXP (x
, 1));
997 mask
= (GET_MODE_CLASS (mode
) == MODE_FLOAT
998 ? (INT14_OK_STRICT
? 0x3fff : 0x1f) : 0x3fff);
1000 /* Choose which way to round the offset. Round up if we
1001 are >= halfway to the next boundary. */
1002 if ((offset
& mask
) >= ((mask
+ 1) / 2))
1003 newoffset
= (offset
& ~ mask
) + mask
+ 1;
1005 newoffset
= (offset
& ~ mask
);
1007 /* If the newoffset will not fit in 14 bits (ldo), then
1008 handling this would take 4 or 5 instructions (2 to load
1009 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1010 add the new offset and the SYMBOL_REF.) Combine can
1011 not handle 4->2 or 5->2 combinations, so do not create
1013 if (! VAL_14_BITS_P (newoffset
)
1014 && GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
1016 rtx const_part
= plus_constant (XEXP (x
, 0), newoffset
);
1019 gen_rtx_HIGH (Pmode
, const_part
));
1022 gen_rtx_LO_SUM (Pmode
,
1023 tmp_reg
, const_part
));
1027 if (! VAL_14_BITS_P (newoffset
))
1028 int_part
= force_reg (Pmode
, GEN_INT (newoffset
));
1030 int_part
= GEN_INT (newoffset
);
1032 ptr_reg
= force_reg (Pmode
,
1033 gen_rtx_PLUS (Pmode
,
1034 force_reg (Pmode
, XEXP (x
, 0)),
1037 return plus_constant (ptr_reg
, offset
- newoffset
);
1040 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1042 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
1043 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1044 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1)))
1045 && (OBJECT_P (XEXP (x
, 1))
1046 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1047 && GET_CODE (XEXP (x
, 1)) != CONST
)
1049 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1053 if (GET_CODE (reg1
) != REG
)
1054 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1056 reg2
= XEXP (XEXP (x
, 0), 0);
1057 if (GET_CODE (reg2
) != REG
)
1058 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1060 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1061 gen_rtx_MULT (Pmode
,
1067 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1069 Only do so for floating point modes since this is more speculative
1070 and we lose if it's an integer store. */
1071 if (GET_CODE (x
) == PLUS
1072 && GET_CODE (XEXP (x
, 0)) == PLUS
1073 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1074 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
1075 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1076 && (mode
== SFmode
|| mode
== DFmode
))
1079 /* First, try and figure out what to use as a base register. */
1080 rtx reg1
, reg2
, base
, idx
;
1082 reg1
= XEXP (XEXP (x
, 0), 1);
1087 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1088 then emit_move_sequence will turn on REG_POINTER so we'll know
1089 it's a base register below. */
1090 if (GET_CODE (reg1
) != REG
)
1091 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1093 if (GET_CODE (reg2
) != REG
)
1094 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1096 /* Figure out what the base and index are. */
1098 if (GET_CODE (reg1
) == REG
1099 && REG_POINTER (reg1
))
1102 idx
= gen_rtx_PLUS (Pmode
,
1103 gen_rtx_MULT (Pmode
,
1104 XEXP (XEXP (XEXP (x
, 0), 0), 0),
1105 XEXP (XEXP (XEXP (x
, 0), 0), 1)),
1108 else if (GET_CODE (reg2
) == REG
1109 && REG_POINTER (reg2
))
1118 /* If the index adds a large constant, try to scale the
1119 constant so that it can be loaded with only one insn. */
1120 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1121 && VAL_14_BITS_P (INTVAL (XEXP (idx
, 1))
1122 / INTVAL (XEXP (XEXP (idx
, 0), 1)))
1123 && INTVAL (XEXP (idx
, 1)) % INTVAL (XEXP (XEXP (idx
, 0), 1)) == 0)
1125 /* Divide the CONST_INT by the scale factor, then add it to A. */
1126 int val
= INTVAL (XEXP (idx
, 1));
1128 val
/= INTVAL (XEXP (XEXP (idx
, 0), 1));
1129 reg1
= XEXP (XEXP (idx
, 0), 0);
1130 if (GET_CODE (reg1
) != REG
)
1131 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1133 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, reg1
, GEN_INT (val
)));
1135 /* We can now generate a simple scaled indexed address. */
1138 (Pmode
, gen_rtx_PLUS (Pmode
,
1139 gen_rtx_MULT (Pmode
, reg1
,
1140 XEXP (XEXP (idx
, 0), 1)),
1144 /* If B + C is still a valid base register, then add them. */
1145 if (GET_CODE (XEXP (idx
, 1)) == CONST_INT
1146 && INTVAL (XEXP (idx
, 1)) <= 4096
1147 && INTVAL (XEXP (idx
, 1)) >= -4096)
1149 int val
= INTVAL (XEXP (XEXP (idx
, 0), 1));
1152 reg1
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, XEXP (idx
, 1)));
1154 reg2
= XEXP (XEXP (idx
, 0), 0);
1155 if (GET_CODE (reg2
) != CONST_INT
)
1156 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1158 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
,
1159 gen_rtx_MULT (Pmode
,
1165 /* Get the index into a register, then add the base + index and
1166 return a register holding the result. */
1168 /* First get A into a register. */
1169 reg1
= XEXP (XEXP (idx
, 0), 0);
1170 if (GET_CODE (reg1
) != REG
)
1171 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1173 /* And get B into a register. */
1174 reg2
= XEXP (idx
, 1);
1175 if (GET_CODE (reg2
) != REG
)
1176 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1178 reg1
= force_reg (Pmode
,
1179 gen_rtx_PLUS (Pmode
,
1180 gen_rtx_MULT (Pmode
, reg1
,
1181 XEXP (XEXP (idx
, 0), 1)),
1184 /* Add the result to our base register and return. */
1185 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, base
, reg1
));
1189 /* Uh-oh. We might have an address for x[n-100000]. This needs
1190 special handling to avoid creating an indexed memory address
1191 with x-100000 as the base.
1193 If the constant part is small enough, then it's still safe because
1194 there is a guard page at the beginning and end of the data segment.
1196 Scaled references are common enough that we want to try and rearrange the
1197 terms so that we can use indexing for these addresses too. Only
1198 do the optimization for floatint point modes. */
1200 if (GET_CODE (x
) == PLUS
1201 && symbolic_expression_p (XEXP (x
, 1)))
1203 /* Ugly. We modify things here so that the address offset specified
1204 by the index expression is computed first, then added to x to form
1205 the entire address. */
1207 rtx regx1
, regx2
, regy1
, regy2
, y
;
1209 /* Strip off any CONST. */
1211 if (GET_CODE (y
) == CONST
)
1214 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1216 /* See if this looks like
1217 (plus (mult (reg) (shadd_const))
1218 (const (plus (symbol_ref) (const_int))))
1220 Where const_int is small. In that case the const
1221 expression is a valid pointer for indexing.
1223 If const_int is big, but can be divided evenly by shadd_const
1224 and added to (reg). This allows more scaled indexed addresses. */
1225 if (GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1226 && GET_CODE (XEXP (x
, 0)) == MULT
1227 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1228 && INTVAL (XEXP (y
, 1)) >= -4096
1229 && INTVAL (XEXP (y
, 1)) <= 4095
1230 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1231 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1233 int val
= INTVAL (XEXP (XEXP (x
, 0), 1));
1237 if (GET_CODE (reg1
) != REG
)
1238 reg1
= force_reg (Pmode
, force_operand (reg1
, 0));
1240 reg2
= XEXP (XEXP (x
, 0), 0);
1241 if (GET_CODE (reg2
) != REG
)
1242 reg2
= force_reg (Pmode
, force_operand (reg2
, 0));
1244 return force_reg (Pmode
,
1245 gen_rtx_PLUS (Pmode
,
1246 gen_rtx_MULT (Pmode
,
1251 else if ((mode
== DFmode
|| mode
== SFmode
)
1252 && GET_CODE (XEXP (y
, 0)) == SYMBOL_REF
1253 && GET_CODE (XEXP (x
, 0)) == MULT
1254 && GET_CODE (XEXP (y
, 1)) == CONST_INT
1255 && INTVAL (XEXP (y
, 1)) % INTVAL (XEXP (XEXP (x
, 0), 1)) == 0
1256 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1257 && shadd_constant_p (INTVAL (XEXP (XEXP (x
, 0), 1))))
1260 = force_reg (Pmode
, GEN_INT (INTVAL (XEXP (y
, 1))
1261 / INTVAL (XEXP (XEXP (x
, 0), 1))));
1262 regx2
= XEXP (XEXP (x
, 0), 0);
1263 if (GET_CODE (regx2
) != REG
)
1264 regx2
= force_reg (Pmode
, force_operand (regx2
, 0));
1265 regx2
= force_reg (Pmode
, gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1269 gen_rtx_PLUS (Pmode
,
1270 gen_rtx_MULT (Pmode
, regx2
,
1271 XEXP (XEXP (x
, 0), 1)),
1272 force_reg (Pmode
, XEXP (y
, 0))));
1274 else if (GET_CODE (XEXP (y
, 1)) == CONST_INT
1275 && INTVAL (XEXP (y
, 1)) >= -4096
1276 && INTVAL (XEXP (y
, 1)) <= 4095)
1278 /* This is safe because of the guard page at the
1279 beginning and end of the data space. Just
1280 return the original address. */
1285 /* Doesn't look like one we can optimize. */
1286 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1287 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1288 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1289 regx1
= force_reg (Pmode
,
1290 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
,
1292 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1300 /* For the HPPA, REG and REG+CONST is cost 0
1301 and addresses involving symbolic constants are cost 2.
1303 PIC addresses are very expensive.
1305 It is no coincidence that this has the same structure
1306 as GO_IF_LEGITIMATE_ADDRESS. */
1309 hppa_address_cost (rtx X
,
1310 bool speed ATTRIBUTE_UNUSED
)
1312 switch (GET_CODE (X
))
1325 /* Compute a (partial) cost for rtx X. Return true if the complete
1326 cost has been computed, and false if subexpressions should be
1327 scanned. In either case, *TOTAL contains the cost result. */
1330 hppa_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
1331 bool speed ATTRIBUTE_UNUSED
)
1336 if (INTVAL (x
) == 0)
1338 else if (INT_14_BITS (x
))
1355 if ((x
== CONST0_RTX (DFmode
) || x
== CONST0_RTX (SFmode
))
1356 && outer_code
!= SET
)
1363 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1364 *total
= COSTS_N_INSNS (3);
1365 else if (TARGET_PA_11
&& !TARGET_DISABLE_FPREGS
&& !TARGET_SOFT_FLOAT
)
1366 *total
= COSTS_N_INSNS (8);
1368 *total
= COSTS_N_INSNS (20);
1372 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1374 *total
= COSTS_N_INSNS (14);
1382 *total
= COSTS_N_INSNS (60);
1385 case PLUS
: /* this includes shNadd insns */
1387 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
1388 *total
= COSTS_N_INSNS (3);
1390 *total
= COSTS_N_INSNS (1);
1396 *total
= COSTS_N_INSNS (1);
1404 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1405 new rtx with the correct mode. */
1407 force_mode (enum machine_mode mode
, rtx orig
)
1409 if (mode
== GET_MODE (orig
))
1412 gcc_assert (REGNO (orig
) < FIRST_PSEUDO_REGISTER
);
1414 return gen_rtx_REG (mode
, REGNO (orig
));
1417 /* Return 1 if *X is a thread-local symbol. */
1420 pa_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
1422 return PA_SYMBOL_REF_TLS_P (*x
);
1425 /* Return 1 if X contains a thread-local symbol. */
1428 pa_tls_referenced_p (rtx x
)
1430 if (!TARGET_HAVE_TLS
)
1433 return for_each_rtx (&x
, &pa_tls_symbol_ref_1
, 0);
1436 /* Emit insns to move operands[1] into operands[0].
1438 Return 1 if we have written out everything that needs to be done to
1439 do the move. Otherwise, return 0 and the caller will emit the move
1442 Note SCRATCH_REG may not be in the proper mode depending on how it
1443 will be used. This routine is responsible for creating a new copy
1444 of SCRATCH_REG in the proper mode. */
1447 emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
1449 register rtx operand0
= operands
[0];
1450 register rtx operand1
= operands
[1];
1453 /* We can only handle indexed addresses in the destination operand
1454 of floating point stores. Thus, we need to break out indexed
1455 addresses from the destination operand. */
1456 if (GET_CODE (operand0
) == MEM
&& IS_INDEX_ADDR_P (XEXP (operand0
, 0)))
1458 gcc_assert (can_create_pseudo_p ());
1460 tem
= copy_to_mode_reg (Pmode
, XEXP (operand0
, 0));
1461 operand0
= replace_equiv_address (operand0
, tem
);
1464 /* On targets with non-equivalent space registers, break out unscaled
1465 indexed addresses from the source operand before the final CSE.
1466 We have to do this because the REG_POINTER flag is not correctly
1467 carried through various optimization passes and CSE may substitute
1468 a pseudo without the pointer set for one with the pointer set. As
1469 a result, we loose various opportunities to create insns with
1470 unscaled indexed addresses. */
1471 if (!TARGET_NO_SPACE_REGS
1472 && !cse_not_expected
1473 && GET_CODE (operand1
) == MEM
1474 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1475 && REG_P (XEXP (XEXP (operand1
, 0), 0))
1476 && REG_P (XEXP (XEXP (operand1
, 0), 1)))
1478 = replace_equiv_address (operand1
,
1479 copy_to_mode_reg (Pmode
, XEXP (operand1
, 0)));
1482 && reload_in_progress
&& GET_CODE (operand0
) == REG
1483 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1484 operand0
= reg_equiv_mem
[REGNO (operand0
)];
1485 else if (scratch_reg
1486 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
1487 && GET_CODE (SUBREG_REG (operand0
)) == REG
1488 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
1490 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1491 the code which tracks sets/uses for delete_output_reload. */
1492 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
1493 reg_equiv_mem
[REGNO (SUBREG_REG (operand0
))],
1494 SUBREG_BYTE (operand0
));
1495 operand0
= alter_subreg (&temp
);
1499 && reload_in_progress
&& GET_CODE (operand1
) == REG
1500 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
1501 operand1
= reg_equiv_mem
[REGNO (operand1
)];
1502 else if (scratch_reg
1503 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
1504 && GET_CODE (SUBREG_REG (operand1
)) == REG
1505 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
1507 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1508 the code which tracks sets/uses for delete_output_reload. */
1509 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
1510 reg_equiv_mem
[REGNO (SUBREG_REG (operand1
))],
1511 SUBREG_BYTE (operand1
));
1512 operand1
= alter_subreg (&temp
);
1515 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
1516 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
1517 != XEXP (operand0
, 0)))
1518 operand0
= replace_equiv_address (operand0
, tem
);
1520 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
1521 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
1522 != XEXP (operand1
, 0)))
1523 operand1
= replace_equiv_address (operand1
, tem
);
1525 /* Handle secondary reloads for loads/stores of FP registers from
1526 REG+D addresses where D does not fit in 5 or 14 bits, including
1527 (subreg (mem (addr))) cases. */
1529 && fp_reg_operand (operand0
, mode
)
1530 && ((GET_CODE (operand1
) == MEM
1531 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4 ? SFmode
: DFmode
),
1532 XEXP (operand1
, 0)))
1533 || ((GET_CODE (operand1
) == SUBREG
1534 && GET_CODE (XEXP (operand1
, 0)) == MEM
1535 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1537 XEXP (XEXP (operand1
, 0), 0))))))
1539 if (GET_CODE (operand1
) == SUBREG
)
1540 operand1
= XEXP (operand1
, 0);
1542 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1543 it in WORD_MODE regardless of what mode it was originally given
1545 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1547 /* D might not fit in 14 bits either; for such cases load D into
1549 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
1551 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1552 emit_move_insn (scratch_reg
,
1553 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
1555 XEXP (XEXP (operand1
, 0), 0),
1559 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
1560 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
1561 replace_equiv_address (operand1
, scratch_reg
)));
1564 else if (scratch_reg
1565 && fp_reg_operand (operand1
, mode
)
1566 && ((GET_CODE (operand0
) == MEM
1567 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1569 XEXP (operand0
, 0)))
1570 || ((GET_CODE (operand0
) == SUBREG
)
1571 && GET_CODE (XEXP (operand0
, 0)) == MEM
1572 && !memory_address_p ((GET_MODE_SIZE (mode
) == 4
1574 XEXP (XEXP (operand0
, 0), 0)))))
1576 if (GET_CODE (operand0
) == SUBREG
)
1577 operand0
= XEXP (operand0
, 0);
1579 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1580 it in WORD_MODE regardless of what mode it was originally given
1582 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1584 /* D might not fit in 14 bits either; for such cases load D into
1586 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
1588 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
1589 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
1592 XEXP (XEXP (operand0
, 0),
1597 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
1598 emit_insn (gen_rtx_SET (VOIDmode
,
1599 replace_equiv_address (operand0
, scratch_reg
),
1603 /* Handle secondary reloads for loads of FP registers from constant
1604 expressions by forcing the constant into memory.
1606 Use scratch_reg to hold the address of the memory location.
1608 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1609 NO_REGS when presented with a const_int and a register class
1610 containing only FP registers. Doing so unfortunately creates
1611 more problems than it solves. Fix this for 2.5. */
1612 else if (scratch_reg
1613 && CONSTANT_P (operand1
)
1614 && fp_reg_operand (operand0
, mode
))
1616 rtx const_mem
, xoperands
[2];
1618 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1619 it in WORD_MODE regardless of what mode it was originally given
1621 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1623 /* Force the constant into memory and put the address of the
1624 memory location into scratch_reg. */
1625 const_mem
= force_const_mem (mode
, operand1
);
1626 xoperands
[0] = scratch_reg
;
1627 xoperands
[1] = XEXP (const_mem
, 0);
1628 emit_move_sequence (xoperands
, Pmode
, 0);
1630 /* Now load the destination register. */
1631 emit_insn (gen_rtx_SET (mode
, operand0
,
1632 replace_equiv_address (const_mem
, scratch_reg
)));
1635 /* Handle secondary reloads for SAR. These occur when trying to load
1636 the SAR from memory, FP register, or with a constant. */
1637 else if (scratch_reg
1638 && GET_CODE (operand0
) == REG
1639 && REGNO (operand0
) < FIRST_PSEUDO_REGISTER
1640 && REGNO_REG_CLASS (REGNO (operand0
)) == SHIFT_REGS
1641 && (GET_CODE (operand1
) == MEM
1642 || GET_CODE (operand1
) == CONST_INT
1643 || (GET_CODE (operand1
) == REG
1644 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1
))))))
1646 /* D might not fit in 14 bits either; for such cases load D into
1648 if (GET_CODE (operand1
) == MEM
1649 && !memory_address_p (GET_MODE (operand0
), XEXP (operand1
, 0)))
1651 /* We are reloading the address into the scratch register, so we
1652 want to make sure the scratch register is a full register. */
1653 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1655 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
1656 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
,
1659 XEXP (XEXP (operand1
, 0),
1663 /* Now we are going to load the scratch register from memory,
1664 we want to load it in the same width as the original MEM,
1665 which must be the same as the width of the ultimate destination,
1667 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1669 emit_move_insn (scratch_reg
,
1670 replace_equiv_address (operand1
, scratch_reg
));
1674 /* We want to load the scratch register using the same mode as
1675 the ultimate destination. */
1676 scratch_reg
= force_mode (GET_MODE (operand0
), scratch_reg
);
1678 emit_move_insn (scratch_reg
, operand1
);
1681 /* And emit the insn to set the ultimate destination. We know that
1682 the scratch register has the same mode as the destination at this
1684 emit_move_insn (operand0
, scratch_reg
);
1687 /* Handle the most common case: storing into a register. */
1688 else if (register_operand (operand0
, mode
))
1690 if (register_operand (operand1
, mode
)
1691 || (GET_CODE (operand1
) == CONST_INT
1692 && cint_ok_for_move (INTVAL (operand1
)))
1693 || (operand1
== CONST0_RTX (mode
))
1694 || (GET_CODE (operand1
) == HIGH
1695 && !symbolic_operand (XEXP (operand1
, 0), VOIDmode
))
1696 /* Only `general_operands' can come here, so MEM is ok. */
1697 || GET_CODE (operand1
) == MEM
)
1699 /* Various sets are created during RTL generation which don't
1700 have the REG_POINTER flag correctly set. After the CSE pass,
1701 instruction recognition can fail if we don't consistently
1702 set this flag when performing register copies. This should
1703 also improve the opportunities for creating insns that use
1704 unscaled indexing. */
1705 if (REG_P (operand0
) && REG_P (operand1
))
1707 if (REG_POINTER (operand1
)
1708 && !REG_POINTER (operand0
)
1709 && !HARD_REGISTER_P (operand0
))
1710 copy_reg_pointer (operand0
, operand1
);
1713 /* When MEMs are broken out, the REG_POINTER flag doesn't
1714 get set. In some cases, we can set the REG_POINTER flag
1715 from the declaration for the MEM. */
1716 if (REG_P (operand0
)
1717 && GET_CODE (operand1
) == MEM
1718 && !REG_POINTER (operand0
))
1720 tree decl
= MEM_EXPR (operand1
);
1722 /* Set the register pointer flag and register alignment
1723 if the declaration for this memory reference is a
1729 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1731 if (TREE_CODE (decl
) == COMPONENT_REF
)
1732 decl
= TREE_OPERAND (decl
, 1);
1734 type
= TREE_TYPE (decl
);
1735 type
= strip_array_types (type
);
1737 if (POINTER_TYPE_P (type
))
1741 type
= TREE_TYPE (type
);
1742 /* Using TYPE_ALIGN_OK is rather conservative as
1743 only the ada frontend actually sets it. */
1744 align
= (TYPE_ALIGN_OK (type
) ? TYPE_ALIGN (type
)
1746 mark_reg_pointer (operand0
, align
);
1751 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1755 else if (GET_CODE (operand0
) == MEM
)
1757 if (mode
== DFmode
&& operand1
== CONST0_RTX (mode
)
1758 && !(reload_in_progress
|| reload_completed
))
1760 rtx temp
= gen_reg_rtx (DFmode
);
1762 emit_insn (gen_rtx_SET (VOIDmode
, temp
, operand1
));
1763 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, temp
));
1766 if (register_operand (operand1
, mode
) || operand1
== CONST0_RTX (mode
))
1768 /* Run this case quickly. */
1769 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operand1
));
1772 if (! (reload_in_progress
|| reload_completed
))
1774 operands
[0] = validize_mem (operand0
);
1775 operands
[1] = operand1
= force_reg (mode
, operand1
);
1779 /* Simplify the source if we need to.
1780 Note we do have to handle function labels here, even though we do
1781 not consider them legitimate constants. Loop optimizations can
1782 call the emit_move_xxx with one as a source. */
1783 if ((GET_CODE (operand1
) != HIGH
&& immediate_operand (operand1
, mode
))
1784 || function_label_operand (operand1
, mode
)
1785 || (GET_CODE (operand1
) == HIGH
1786 && symbolic_operand (XEXP (operand1
, 0), mode
)))
1790 if (GET_CODE (operand1
) == HIGH
)
1793 operand1
= XEXP (operand1
, 0);
1795 if (symbolic_operand (operand1
, mode
))
1797 /* Argh. The assembler and linker can't handle arithmetic
1800 So we force the plabel into memory, load operand0 from
1801 the memory location, then add in the constant part. */
1802 if ((GET_CODE (operand1
) == CONST
1803 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1804 && function_label_operand (XEXP (XEXP (operand1
, 0), 0), Pmode
))
1805 || function_label_operand (operand1
, mode
))
1807 rtx temp
, const_part
;
1809 /* Figure out what (if any) scratch register to use. */
1810 if (reload_in_progress
|| reload_completed
)
1812 scratch_reg
= scratch_reg
? scratch_reg
: operand0
;
1813 /* SCRATCH_REG will hold an address and maybe the actual
1814 data. We want it in WORD_MODE regardless of what mode it
1815 was originally given to us. */
1816 scratch_reg
= force_mode (word_mode
, scratch_reg
);
1819 scratch_reg
= gen_reg_rtx (Pmode
);
1821 if (GET_CODE (operand1
) == CONST
)
1823 /* Save away the constant part of the expression. */
1824 const_part
= XEXP (XEXP (operand1
, 0), 1);
1825 gcc_assert (GET_CODE (const_part
) == CONST_INT
);
1827 /* Force the function label into memory. */
1828 temp
= force_const_mem (mode
, XEXP (XEXP (operand1
, 0), 0));
1832 /* No constant part. */
1833 const_part
= NULL_RTX
;
1835 /* Force the function label into memory. */
1836 temp
= force_const_mem (mode
, operand1
);
1840 /* Get the address of the memory location. PIC-ify it if
1842 temp
= XEXP (temp
, 0);
1844 temp
= legitimize_pic_address (temp
, mode
, scratch_reg
);
1846 /* Put the address of the memory location into our destination
1849 emit_move_sequence (operands
, mode
, scratch_reg
);
1851 /* Now load from the memory location into our destination
1853 operands
[1] = gen_rtx_MEM (Pmode
, operands
[0]);
1854 emit_move_sequence (operands
, mode
, scratch_reg
);
1856 /* And add back in the constant part. */
1857 if (const_part
!= NULL_RTX
)
1858 expand_inc (operand0
, const_part
);
1867 if (reload_in_progress
|| reload_completed
)
1869 temp
= scratch_reg
? scratch_reg
: operand0
;
1870 /* TEMP will hold an address and maybe the actual
1871 data. We want it in WORD_MODE regardless of what mode it
1872 was originally given to us. */
1873 temp
= force_mode (word_mode
, temp
);
1876 temp
= gen_reg_rtx (Pmode
);
1878 /* (const (plus (symbol) (const_int))) must be forced to
1879 memory during/after reload if the const_int will not fit
1881 if (GET_CODE (operand1
) == CONST
1882 && GET_CODE (XEXP (operand1
, 0)) == PLUS
1883 && GET_CODE (XEXP (XEXP (operand1
, 0), 1)) == CONST_INT
1884 && !INT_14_BITS (XEXP (XEXP (operand1
, 0), 1))
1885 && (reload_completed
|| reload_in_progress
)
1888 rtx const_mem
= force_const_mem (mode
, operand1
);
1889 operands
[1] = legitimize_pic_address (XEXP (const_mem
, 0),
1891 operands
[1] = replace_equiv_address (const_mem
, operands
[1]);
1892 emit_move_sequence (operands
, mode
, temp
);
1896 operands
[1] = legitimize_pic_address (operand1
, mode
, temp
);
1897 if (REG_P (operand0
) && REG_P (operands
[1]))
1898 copy_reg_pointer (operand0
, operands
[1]);
1899 emit_insn (gen_rtx_SET (VOIDmode
, operand0
, operands
[1]));
1902 /* On the HPPA, references to data space are supposed to use dp,
1903 register 27, but showing it in the RTL inhibits various cse
1904 and loop optimizations. */
1909 if (reload_in_progress
|| reload_completed
)
1911 temp
= scratch_reg
? scratch_reg
: operand0
;
1912 /* TEMP will hold an address and maybe the actual
1913 data. We want it in WORD_MODE regardless of what mode it
1914 was originally given to us. */
1915 temp
= force_mode (word_mode
, temp
);
1918 temp
= gen_reg_rtx (mode
);
1920 /* Loading a SYMBOL_REF into a register makes that register
1921 safe to be used as the base in an indexed address.
1923 Don't mark hard registers though. That loses. */
1924 if (GET_CODE (operand0
) == REG
1925 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
1926 mark_reg_pointer (operand0
, BITS_PER_UNIT
);
1927 if (REGNO (temp
) >= FIRST_PSEUDO_REGISTER
)
1928 mark_reg_pointer (temp
, BITS_PER_UNIT
);
1931 set
= gen_rtx_SET (mode
, operand0
, temp
);
1933 set
= gen_rtx_SET (VOIDmode
,
1935 gen_rtx_LO_SUM (mode
, temp
, operand1
));
1937 emit_insn (gen_rtx_SET (VOIDmode
,
1939 gen_rtx_HIGH (mode
, operand1
)));
1945 else if (pa_tls_referenced_p (operand1
))
1950 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
1952 addend
= XEXP (XEXP (tmp
, 0), 1);
1953 tmp
= XEXP (XEXP (tmp
, 0), 0);
1956 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
1957 tmp
= legitimize_tls_address (tmp
);
1960 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
1961 tmp
= force_operand (tmp
, operands
[0]);
1965 else if (GET_CODE (operand1
) != CONST_INT
1966 || !cint_ok_for_move (INTVAL (operand1
)))
1970 HOST_WIDE_INT value
= 0;
1971 HOST_WIDE_INT insv
= 0;
1974 if (GET_CODE (operand1
) == CONST_INT
)
1975 value
= INTVAL (operand1
);
1978 && GET_CODE (operand1
) == CONST_INT
1979 && HOST_BITS_PER_WIDE_INT
> 32
1980 && GET_MODE_BITSIZE (GET_MODE (operand0
)) > 32)
1984 /* Extract the low order 32 bits of the value and sign extend.
1985 If the new value is the same as the original value, we can
1986 can use the original value as-is. If the new value is
1987 different, we use it and insert the most-significant 32-bits
1988 of the original value into the final result. */
1989 nval
= ((value
& (((HOST_WIDE_INT
) 2 << 31) - 1))
1990 ^ ((HOST_WIDE_INT
) 1 << 31)) - ((HOST_WIDE_INT
) 1 << 31);
1993 #if HOST_BITS_PER_WIDE_INT > 32
1994 insv
= value
>= 0 ? value
>> 32 : ~(~value
>> 32);
1998 operand1
= GEN_INT (nval
);
2002 if (reload_in_progress
|| reload_completed
)
2003 temp
= scratch_reg
? scratch_reg
: operand0
;
2005 temp
= gen_reg_rtx (mode
);
2007 /* We don't directly split DImode constants on 32-bit targets
2008 because PLUS uses an 11-bit immediate and the insn sequence
2009 generated is not as efficient as the one using HIGH/LO_SUM. */
2010 if (GET_CODE (operand1
) == CONST_INT
2011 && GET_MODE_BITSIZE (mode
) <= BITS_PER_WORD
2012 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2015 /* Directly break constant into high and low parts. This
2016 provides better optimization opportunities because various
2017 passes recognize constants split with PLUS but not LO_SUM.
2018 We use a 14-bit signed low part except when the addition
2019 of 0x4000 to the high part might change the sign of the
2021 HOST_WIDE_INT low
= value
& 0x3fff;
2022 HOST_WIDE_INT high
= value
& ~ 0x3fff;
2026 if (high
== 0x7fffc000 || (mode
== HImode
&& high
== 0x4000))
2034 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (high
)));
2035 operands
[1] = gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2039 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2040 gen_rtx_HIGH (mode
, operand1
)));
2041 operands
[1] = gen_rtx_LO_SUM (mode
, temp
, operand1
);
2044 insn
= emit_move_insn (operands
[0], operands
[1]);
2046 /* Now insert the most significant 32 bits of the value
2047 into the register. When we don't have a second register
2048 available, it could take up to nine instructions to load
2049 a 64-bit integer constant. Prior to reload, we force
2050 constants that would take more than three instructions
2051 to load to the constant pool. During and after reload,
2052 we have to handle all possible values. */
2055 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2056 register and the value to be inserted is outside the
2057 range that can be loaded with three depdi instructions. */
2058 if (temp
!= operand0
&& (insv
>= 16384 || insv
< -16384))
2060 operand1
= GEN_INT (insv
);
2062 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
2063 gen_rtx_HIGH (mode
, operand1
)));
2064 emit_move_insn (temp
, gen_rtx_LO_SUM (mode
, temp
, operand1
));
2065 emit_insn (gen_insv (operand0
, GEN_INT (32),
2070 int len
= 5, pos
= 27;
2072 /* Insert the bits using the depdi instruction. */
2075 HOST_WIDE_INT v5
= ((insv
& 31) ^ 16) - 16;
2076 HOST_WIDE_INT sign
= v5
< 0;
2078 /* Left extend the insertion. */
2079 insv
= (insv
>= 0 ? insv
>> len
: ~(~insv
>> len
));
2080 while (pos
> 0 && (insv
& 1) == sign
)
2082 insv
= (insv
>= 0 ? insv
>> 1 : ~(~insv
>> 1));
2087 emit_insn (gen_insv (operand0
, GEN_INT (len
),
2088 GEN_INT (pos
), GEN_INT (v5
)));
2090 len
= pos
> 0 && pos
< 5 ? pos
: 5;
2096 set_unique_reg_note (insn
, REG_EQUAL
, op1
);
2101 /* Now have insn-emit do whatever it normally does. */
2105 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2106 it will need a link/runtime reloc). */
2109 reloc_needed (tree exp
)
2113 switch (TREE_CODE (exp
))
2118 case POINTER_PLUS_EXPR
:
2121 reloc
= reloc_needed (TREE_OPERAND (exp
, 0));
2122 reloc
|= reloc_needed (TREE_OPERAND (exp
, 1));
2126 case NON_LVALUE_EXPR
:
2127 reloc
= reloc_needed (TREE_OPERAND (exp
, 0));
2133 unsigned HOST_WIDE_INT ix
;
2135 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp
), ix
, value
)
2137 reloc
|= reloc_needed (value
);
2150 /* Does operand (which is a symbolic_operand) live in text space?
2151 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2155 read_only_operand (rtx operand
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2157 if (GET_CODE (operand
) == CONST
)
2158 operand
= XEXP (XEXP (operand
, 0), 0);
2161 if (GET_CODE (operand
) == SYMBOL_REF
)
2162 return SYMBOL_REF_FLAG (operand
) && !CONSTANT_POOL_ADDRESS_P (operand
);
2166 if (GET_CODE (operand
) == SYMBOL_REF
)
2167 return SYMBOL_REF_FLAG (operand
) || CONSTANT_POOL_ADDRESS_P (operand
);
2173 /* Return the best assembler insn template
2174 for moving operands[1] into operands[0] as a fullword. */
2176 singlemove_string (rtx
*operands
)
2178 HOST_WIDE_INT intval
;
2180 if (GET_CODE (operands
[0]) == MEM
)
2181 return "stw %r1,%0";
2182 if (GET_CODE (operands
[1]) == MEM
)
2184 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
2189 gcc_assert (GET_MODE (operands
[1]) == SFmode
);
2191 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2193 REAL_VALUE_FROM_CONST_DOUBLE (d
, operands
[1]);
2194 REAL_VALUE_TO_TARGET_SINGLE (d
, i
);
2196 operands
[1] = GEN_INT (i
);
2197 /* Fall through to CONST_INT case. */
2199 if (GET_CODE (operands
[1]) == CONST_INT
)
2201 intval
= INTVAL (operands
[1]);
2203 if (VAL_14_BITS_P (intval
))
2205 else if ((intval
& 0x7ff) == 0)
2206 return "ldil L'%1,%0";
2207 else if (zdepi_cint_p (intval
))
2208 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2210 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2212 return "copy %1,%0";
2216 /* Compute position (in OP[1]) and width (in OP[2])
2217 useful for copying IMM to a register using the zdepi
2218 instructions. Store the immediate value to insert in OP[0]. */
2220 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2224 /* Find the least significant set bit in IMM. */
2225 for (lsb
= 0; lsb
< 32; lsb
++)
2232 /* Choose variants based on *sign* of the 5-bit field. */
2233 if ((imm
& 0x10) == 0)
2234 len
= (lsb
<= 28) ? 4 : 32 - lsb
;
2237 /* Find the width of the bitstring in IMM. */
2238 for (len
= 5; len
< 32 - lsb
; len
++)
2240 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2244 /* Sign extend IMM as a 5-bit value. */
2245 imm
= (imm
& 0xf) - 0x10;
2253 /* Compute position (in OP[1]) and width (in OP[2])
2254 useful for copying IMM to a register using the depdi,z
2255 instructions. Store the immediate value to insert in OP[0]. */
2257 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm
, unsigned *op
)
2259 int lsb
, len
, maxlen
;
2261 maxlen
= MIN (HOST_BITS_PER_WIDE_INT
, 64);
2263 /* Find the least significant set bit in IMM. */
2264 for (lsb
= 0; lsb
< maxlen
; lsb
++)
2271 /* Choose variants based on *sign* of the 5-bit field. */
2272 if ((imm
& 0x10) == 0)
2273 len
= (lsb
<= maxlen
- 4) ? 4 : maxlen
- lsb
;
2276 /* Find the width of the bitstring in IMM. */
2277 for (len
= 5; len
< maxlen
- lsb
; len
++)
2279 if ((imm
& ((unsigned HOST_WIDE_INT
) 1 << len
)) == 0)
2283 /* Extend length if host is narrow and IMM is negative. */
2284 if (HOST_BITS_PER_WIDE_INT
== 32 && len
== maxlen
- lsb
)
2287 /* Sign extend IMM as a 5-bit value. */
2288 imm
= (imm
& 0xf) - 0x10;
2296 /* Output assembler code to perform a doubleword move insn
2297 with operands OPERANDS. */
2300 output_move_double (rtx
*operands
)
2302 enum { REGOP
, OFFSOP
, MEMOP
, CNSTOP
, RNDOP
} optype0
, optype1
;
2304 rtx addreg0
= 0, addreg1
= 0;
2306 /* First classify both operands. */
2308 if (REG_P (operands
[0]))
2310 else if (offsettable_memref_p (operands
[0]))
2312 else if (GET_CODE (operands
[0]) == MEM
)
2317 if (REG_P (operands
[1]))
2319 else if (CONSTANT_P (operands
[1]))
2321 else if (offsettable_memref_p (operands
[1]))
2323 else if (GET_CODE (operands
[1]) == MEM
)
2328 /* Check for the cases that the operand constraints are not
2329 supposed to allow to happen. */
2330 gcc_assert (optype0
== REGOP
|| optype1
== REGOP
);
2332 /* Handle copies between general and floating registers. */
2334 if (optype0
== REGOP
&& optype1
== REGOP
2335 && FP_REG_P (operands
[0]) ^ FP_REG_P (operands
[1]))
2337 if (FP_REG_P (operands
[0]))
2339 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands
);
2340 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands
);
2341 return "{fldds|fldd} -16(%%sp),%0";
2345 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands
);
2346 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands
);
2347 return "{ldws|ldw} -12(%%sp),%R0";
2351 /* Handle auto decrementing and incrementing loads and stores
2352 specifically, since the structure of the function doesn't work
2353 for them without major modification. Do it better when we learn
2354 this port about the general inc/dec addressing of PA.
2355 (This was written by tege. Chide him if it doesn't work.) */
2357 if (optype0
== MEMOP
)
2359 /* We have to output the address syntax ourselves, since print_operand
2360 doesn't deal with the addresses we want to use. Fix this later. */
2362 rtx addr
= XEXP (operands
[0], 0);
2363 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2365 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2367 operands
[0] = XEXP (addr
, 0);
2368 gcc_assert (GET_CODE (operands
[1]) == REG
2369 && GET_CODE (operands
[0]) == REG
);
2371 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2373 /* No overlap between high target register and address
2374 register. (We do this in a non-obvious way to
2375 save a register file writeback) */
2376 if (GET_CODE (addr
) == POST_INC
)
2377 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2378 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2380 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2382 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[1], 0);
2384 operands
[0] = XEXP (addr
, 0);
2385 gcc_assert (GET_CODE (operands
[1]) == REG
2386 && GET_CODE (operands
[0]) == REG
);
2388 gcc_assert (!reg_overlap_mentioned_p (high_reg
, addr
));
2389 /* No overlap between high target register and address
2390 register. (We do this in a non-obvious way to save a
2391 register file writeback) */
2392 if (GET_CODE (addr
) == PRE_INC
)
2393 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2394 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2397 if (optype1
== MEMOP
)
2399 /* We have to output the address syntax ourselves, since print_operand
2400 doesn't deal with the addresses we want to use. Fix this later. */
2402 rtx addr
= XEXP (operands
[1], 0);
2403 if (GET_CODE (addr
) == POST_INC
|| GET_CODE (addr
) == POST_DEC
)
2405 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2407 operands
[1] = XEXP (addr
, 0);
2408 gcc_assert (GET_CODE (operands
[0]) == REG
2409 && GET_CODE (operands
[1]) == REG
);
2411 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2413 /* No overlap between high target register and address
2414 register. (We do this in a non-obvious way to
2415 save a register file writeback) */
2416 if (GET_CODE (addr
) == POST_INC
)
2417 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2418 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2422 /* This is an undefined situation. We should load into the
2423 address register *and* update that register. Probably
2424 we don't need to handle this at all. */
2425 if (GET_CODE (addr
) == POST_INC
)
2426 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2427 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2430 else if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
2432 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2434 operands
[1] = XEXP (addr
, 0);
2435 gcc_assert (GET_CODE (operands
[0]) == REG
2436 && GET_CODE (operands
[1]) == REG
);
2438 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2440 /* No overlap between high target register and address
2441 register. (We do this in a non-obvious way to
2442 save a register file writeback) */
2443 if (GET_CODE (addr
) == PRE_INC
)
2444 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2445 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2449 /* This is an undefined situation. We should load into the
2450 address register *and* update that register. Probably
2451 we don't need to handle this at all. */
2452 if (GET_CODE (addr
) == PRE_INC
)
2453 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2454 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2457 else if (GET_CODE (addr
) == PLUS
2458 && GET_CODE (XEXP (addr
, 0)) == MULT
)
2461 rtx high_reg
= gen_rtx_SUBREG (SImode
, operands
[0], 0);
2463 if (!reg_overlap_mentioned_p (high_reg
, addr
))
2465 xoperands
[0] = high_reg
;
2466 xoperands
[1] = XEXP (addr
, 1);
2467 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2468 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2469 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2471 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2475 xoperands
[0] = high_reg
;
2476 xoperands
[1] = XEXP (addr
, 1);
2477 xoperands
[2] = XEXP (XEXP (addr
, 0), 0);
2478 xoperands
[3] = XEXP (XEXP (addr
, 0), 1);
2479 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2481 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2486 /* If an operand is an unoffsettable memory ref, find a register
2487 we can increment temporarily to make it refer to the second word. */
2489 if (optype0
== MEMOP
)
2490 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
2492 if (optype1
== MEMOP
)
2493 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
2495 /* Ok, we can do one word at a time.
2496 Normally we do the low-numbered word first.
2498 In either case, set up in LATEHALF the operands to use
2499 for the high-numbered word and in some cases alter the
2500 operands in OPERANDS to be suitable for the low-numbered word. */
2502 if (optype0
== REGOP
)
2503 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2504 else if (optype0
== OFFSOP
)
2505 latehalf
[0] = adjust_address (operands
[0], SImode
, 4);
2507 latehalf
[0] = operands
[0];
2509 if (optype1
== REGOP
)
2510 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
2511 else if (optype1
== OFFSOP
)
2512 latehalf
[1] = adjust_address (operands
[1], SImode
, 4);
2513 else if (optype1
== CNSTOP
)
2514 split_double (operands
[1], &operands
[1], &latehalf
[1]);
2516 latehalf
[1] = operands
[1];
2518 /* If the first move would clobber the source of the second one,
2519 do them in the other order.
2521 This can happen in two cases:
2523 mem -> register where the first half of the destination register
2524 is the same register used in the memory's address. Reload
2525 can create such insns.
2527 mem in this case will be either register indirect or register
2528 indirect plus a valid offset.
2530 register -> register move where REGNO(dst) == REGNO(src + 1)
2531 someone (Tim/Tege?) claimed this can happen for parameter loads.
2533 Handle mem -> register case first. */
2534 if (optype0
== REGOP
2535 && (optype1
== MEMOP
|| optype1
== OFFSOP
)
2536 && refers_to_regno_p (REGNO (operands
[0]), REGNO (operands
[0]) + 1,
2539 /* Do the late half first. */
2541 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2542 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2546 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2547 return singlemove_string (operands
);
2550 /* Now handle register -> register case. */
2551 if (optype0
== REGOP
&& optype1
== REGOP
2552 && REGNO (operands
[0]) == REGNO (operands
[1]) + 1)
2554 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2555 return singlemove_string (operands
);
2558 /* Normal case: do the two words, low-numbered first. */
2560 output_asm_insn (singlemove_string (operands
), operands
);
2562 /* Make any unoffsettable addresses point at high-numbered word. */
2564 output_asm_insn ("ldo 4(%0),%0", &addreg0
);
2566 output_asm_insn ("ldo 4(%0),%0", &addreg1
);
2569 output_asm_insn (singlemove_string (latehalf
), latehalf
);
2571 /* Undo the adds we just did. */
2573 output_asm_insn ("ldo -4(%0),%0", &addreg0
);
2575 output_asm_insn ("ldo -4(%0),%0", &addreg1
);
2581 output_fp_move_double (rtx
*operands
)
2583 if (FP_REG_P (operands
[0]))
2585 if (FP_REG_P (operands
[1])
2586 || operands
[1] == CONST0_RTX (GET_MODE (operands
[0])))
2587 output_asm_insn ("fcpy,dbl %f1,%0", operands
);
2589 output_asm_insn ("fldd%F1 %1,%0", operands
);
2591 else if (FP_REG_P (operands
[1]))
2593 output_asm_insn ("fstd%F0 %1,%0", operands
);
2599 gcc_assert (operands
[1] == CONST0_RTX (GET_MODE (operands
[0])));
2601 /* This is a pain. You have to be prepared to deal with an
2602 arbitrary address here including pre/post increment/decrement.
2604 so avoid this in the MD. */
2605 gcc_assert (GET_CODE (operands
[0]) == REG
);
2607 xoperands
[1] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
2608 xoperands
[0] = operands
[0];
2609 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands
);
2614 /* Return a REG that occurs in ADDR with coefficient 1.
2615 ADDR can be effectively incremented by incrementing REG. */
2618 find_addr_reg (rtx addr
)
2620 while (GET_CODE (addr
) == PLUS
)
2622 if (GET_CODE (XEXP (addr
, 0)) == REG
)
2623 addr
= XEXP (addr
, 0);
2624 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
2625 addr
= XEXP (addr
, 1);
2626 else if (CONSTANT_P (XEXP (addr
, 0)))
2627 addr
= XEXP (addr
, 1);
2628 else if (CONSTANT_P (XEXP (addr
, 1)))
2629 addr
= XEXP (addr
, 0);
2633 gcc_assert (GET_CODE (addr
) == REG
);
2637 /* Emit code to perform a block move.
2639 OPERANDS[0] is the destination pointer as a REG, clobbered.
2640 OPERANDS[1] is the source pointer as a REG, clobbered.
2641 OPERANDS[2] is a register for temporary storage.
2642 OPERANDS[3] is a register for temporary storage.
2643 OPERANDS[4] is the size as a CONST_INT
2644 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2645 OPERANDS[6] is another temporary register. */
2648 output_block_move (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2650 int align
= INTVAL (operands
[5]);
2651 unsigned long n_bytes
= INTVAL (operands
[4]);
2653 /* We can't move more than a word at a time because the PA
2654 has no longer integer move insns. (Could use fp mem ops?) */
2655 if (align
> (TARGET_64BIT
? 8 : 4))
2656 align
= (TARGET_64BIT
? 8 : 4);
2658 /* Note that we know each loop below will execute at least twice
2659 (else we would have open-coded the copy). */
2663 /* Pre-adjust the loop counter. */
2664 operands
[4] = GEN_INT (n_bytes
- 16);
2665 output_asm_insn ("ldi %4,%2", operands
);
2668 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2669 output_asm_insn ("ldd,ma 8(%1),%6", operands
);
2670 output_asm_insn ("std,ma %3,8(%0)", operands
);
2671 output_asm_insn ("addib,>= -16,%2,.-12", operands
);
2672 output_asm_insn ("std,ma %6,8(%0)", operands
);
2674 /* Handle the residual. There could be up to 7 bytes of
2675 residual to copy! */
2676 if (n_bytes
% 16 != 0)
2678 operands
[4] = GEN_INT (n_bytes
% 8);
2679 if (n_bytes
% 16 >= 8)
2680 output_asm_insn ("ldd,ma 8(%1),%3", operands
);
2681 if (n_bytes
% 8 != 0)
2682 output_asm_insn ("ldd 0(%1),%6", operands
);
2683 if (n_bytes
% 16 >= 8)
2684 output_asm_insn ("std,ma %3,8(%0)", operands
);
2685 if (n_bytes
% 8 != 0)
2686 output_asm_insn ("stdby,e %6,%4(%0)", operands
);
2691 /* Pre-adjust the loop counter. */
2692 operands
[4] = GEN_INT (n_bytes
- 8);
2693 output_asm_insn ("ldi %4,%2", operands
);
2696 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2697 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands
);
2698 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2699 output_asm_insn ("addib,>= -8,%2,.-12", operands
);
2700 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands
);
2702 /* Handle the residual. There could be up to 7 bytes of
2703 residual to copy! */
2704 if (n_bytes
% 8 != 0)
2706 operands
[4] = GEN_INT (n_bytes
% 4);
2707 if (n_bytes
% 8 >= 4)
2708 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands
);
2709 if (n_bytes
% 4 != 0)
2710 output_asm_insn ("ldw 0(%1),%6", operands
);
2711 if (n_bytes
% 8 >= 4)
2712 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands
);
2713 if (n_bytes
% 4 != 0)
2714 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands
);
2719 /* Pre-adjust the loop counter. */
2720 operands
[4] = GEN_INT (n_bytes
- 4);
2721 output_asm_insn ("ldi %4,%2", operands
);
2724 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2725 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands
);
2726 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2727 output_asm_insn ("addib,>= -4,%2,.-12", operands
);
2728 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands
);
2730 /* Handle the residual. */
2731 if (n_bytes
% 4 != 0)
2733 if (n_bytes
% 4 >= 2)
2734 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands
);
2735 if (n_bytes
% 2 != 0)
2736 output_asm_insn ("ldb 0(%1),%6", operands
);
2737 if (n_bytes
% 4 >= 2)
2738 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands
);
2739 if (n_bytes
% 2 != 0)
2740 output_asm_insn ("stb %6,0(%0)", operands
);
2745 /* Pre-adjust the loop counter. */
2746 operands
[4] = GEN_INT (n_bytes
- 2);
2747 output_asm_insn ("ldi %4,%2", operands
);
2750 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands
);
2751 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands
);
2752 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands
);
2753 output_asm_insn ("addib,>= -2,%2,.-12", operands
);
2754 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands
);
2756 /* Handle the residual. */
2757 if (n_bytes
% 2 != 0)
2759 output_asm_insn ("ldb 0(%1),%3", operands
);
2760 output_asm_insn ("stb %3,0(%0)", operands
);
2769 /* Count the number of insns necessary to handle this block move.
2771 Basic structure is the same as emit_block_move, except that we
2772 count insns rather than emit them. */
2775 compute_movmem_length (rtx insn
)
2777 rtx pat
= PATTERN (insn
);
2778 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 7), 0));
2779 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 6), 0));
2780 unsigned int n_insns
= 0;
2782 /* We can't move more than four bytes at a time because the PA
2783 has no longer integer move insns. (Could use fp mem ops?) */
2784 if (align
> (TARGET_64BIT
? 8 : 4))
2785 align
= (TARGET_64BIT
? 8 : 4);
2787 /* The basic copying loop. */
2791 if (n_bytes
% (2 * align
) != 0)
2793 if ((n_bytes
% (2 * align
)) >= align
)
2796 if ((n_bytes
% align
) != 0)
2800 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2804 /* Emit code to perform a block clear.
2806 OPERANDS[0] is the destination pointer as a REG, clobbered.
2807 OPERANDS[1] is a register for temporary storage.
2808 OPERANDS[2] is the size as a CONST_INT
2809 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2812 output_block_clear (rtx
*operands
, int size_is_constant ATTRIBUTE_UNUSED
)
2814 int align
= INTVAL (operands
[3]);
2815 unsigned long n_bytes
= INTVAL (operands
[2]);
2817 /* We can't clear more than a word at a time because the PA
2818 has no longer integer move insns. */
2819 if (align
> (TARGET_64BIT
? 8 : 4))
2820 align
= (TARGET_64BIT
? 8 : 4);
2822 /* Note that we know each loop below will execute at least twice
2823 (else we would have open-coded the copy). */
2827 /* Pre-adjust the loop counter. */
2828 operands
[2] = GEN_INT (n_bytes
- 16);
2829 output_asm_insn ("ldi %2,%1", operands
);
2832 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2833 output_asm_insn ("addib,>= -16,%1,.-4", operands
);
2834 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2836 /* Handle the residual. There could be up to 7 bytes of
2837 residual to copy! */
2838 if (n_bytes
% 16 != 0)
2840 operands
[2] = GEN_INT (n_bytes
% 8);
2841 if (n_bytes
% 16 >= 8)
2842 output_asm_insn ("std,ma %%r0,8(%0)", operands
);
2843 if (n_bytes
% 8 != 0)
2844 output_asm_insn ("stdby,e %%r0,%2(%0)", operands
);
2849 /* Pre-adjust the loop counter. */
2850 operands
[2] = GEN_INT (n_bytes
- 8);
2851 output_asm_insn ("ldi %2,%1", operands
);
2854 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2855 output_asm_insn ("addib,>= -8,%1,.-4", operands
);
2856 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2858 /* Handle the residual. There could be up to 7 bytes of
2859 residual to copy! */
2860 if (n_bytes
% 8 != 0)
2862 operands
[2] = GEN_INT (n_bytes
% 4);
2863 if (n_bytes
% 8 >= 4)
2864 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands
);
2865 if (n_bytes
% 4 != 0)
2866 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands
);
2871 /* Pre-adjust the loop counter. */
2872 operands
[2] = GEN_INT (n_bytes
- 4);
2873 output_asm_insn ("ldi %2,%1", operands
);
2876 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2877 output_asm_insn ("addib,>= -4,%1,.-4", operands
);
2878 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2880 /* Handle the residual. */
2881 if (n_bytes
% 4 != 0)
2883 if (n_bytes
% 4 >= 2)
2884 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands
);
2885 if (n_bytes
% 2 != 0)
2886 output_asm_insn ("stb %%r0,0(%0)", operands
);
2891 /* Pre-adjust the loop counter. */
2892 operands
[2] = GEN_INT (n_bytes
- 2);
2893 output_asm_insn ("ldi %2,%1", operands
);
2896 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
2897 output_asm_insn ("addib,>= -2,%1,.-4", operands
);
2898 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands
);
2900 /* Handle the residual. */
2901 if (n_bytes
% 2 != 0)
2902 output_asm_insn ("stb %%r0,0(%0)", operands
);
2911 /* Count the number of insns necessary to handle this block move.
2913 Basic structure is the same as emit_block_move, except that we
2914 count insns rather than emit them. */
2917 compute_clrmem_length (rtx insn
)
2919 rtx pat
= PATTERN (insn
);
2920 unsigned int align
= INTVAL (XEXP (XVECEXP (pat
, 0, 4), 0));
2921 unsigned long n_bytes
= INTVAL (XEXP (XVECEXP (pat
, 0, 3), 0));
2922 unsigned int n_insns
= 0;
2924 /* We can't clear more than a word at a time because the PA
2925 has no longer integer move insns. */
2926 if (align
> (TARGET_64BIT
? 8 : 4))
2927 align
= (TARGET_64BIT
? 8 : 4);
2929 /* The basic loop. */
2933 if (n_bytes
% (2 * align
) != 0)
2935 if ((n_bytes
% (2 * align
)) >= align
)
2938 if ((n_bytes
% align
) != 0)
2942 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2948 output_and (rtx
*operands
)
2950 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
2952 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
2953 int ls0
, ls1
, ms0
, p
, len
;
2955 for (ls0
= 0; ls0
< 32; ls0
++)
2956 if ((mask
& (1 << ls0
)) == 0)
2959 for (ls1
= ls0
; ls1
< 32; ls1
++)
2960 if ((mask
& (1 << ls1
)) != 0)
2963 for (ms0
= ls1
; ms0
< 32; ms0
++)
2964 if ((mask
& (1 << ms0
)) == 0)
2967 gcc_assert (ms0
== 32);
2975 operands
[2] = GEN_INT (len
);
2976 return "{extru|extrw,u} %1,31,%2,%0";
2980 /* We could use this `depi' for the case above as well, but `depi'
2981 requires one more register file access than an `extru'. */
2986 operands
[2] = GEN_INT (p
);
2987 operands
[3] = GEN_INT (len
);
2988 return "{depi|depwi} 0,%2,%3,%0";
2992 return "and %1,%2,%0";
2995 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2996 storing the result in operands[0]. */
2998 output_64bit_and (rtx
*operands
)
3000 if (GET_CODE (operands
[2]) == CONST_INT
&& INTVAL (operands
[2]) != 0)
3002 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3003 int ls0
, ls1
, ms0
, p
, len
;
3005 for (ls0
= 0; ls0
< HOST_BITS_PER_WIDE_INT
; ls0
++)
3006 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls0
)) == 0)
3009 for (ls1
= ls0
; ls1
< HOST_BITS_PER_WIDE_INT
; ls1
++)
3010 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ls1
)) != 0)
3013 for (ms0
= ls1
; ms0
< HOST_BITS_PER_WIDE_INT
; ms0
++)
3014 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << ms0
)) == 0)
3017 gcc_assert (ms0
== HOST_BITS_PER_WIDE_INT
);
3019 if (ls1
== HOST_BITS_PER_WIDE_INT
)
3025 operands
[2] = GEN_INT (len
);
3026 return "extrd,u %1,63,%2,%0";
3030 /* We could use this `depi' for the case above as well, but `depi'
3031 requires one more register file access than an `extru'. */
3036 operands
[2] = GEN_INT (p
);
3037 operands
[3] = GEN_INT (len
);
3038 return "depdi 0,%2,%3,%0";
3042 return "and %1,%2,%0";
3046 output_ior (rtx
*operands
)
3048 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3049 int bs0
, bs1
, p
, len
;
3051 if (INTVAL (operands
[2]) == 0)
3052 return "copy %1,%0";
3054 for (bs0
= 0; bs0
< 32; bs0
++)
3055 if ((mask
& (1 << bs0
)) != 0)
3058 for (bs1
= bs0
; bs1
< 32; bs1
++)
3059 if ((mask
& (1 << bs1
)) == 0)
3062 gcc_assert (bs1
== 32 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3067 operands
[2] = GEN_INT (p
);
3068 operands
[3] = GEN_INT (len
);
3069 return "{depi|depwi} -1,%2,%3,%0";
3072 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3073 storing the result in operands[0]. */
3075 output_64bit_ior (rtx
*operands
)
3077 unsigned HOST_WIDE_INT mask
= INTVAL (operands
[2]);
3078 int bs0
, bs1
, p
, len
;
3080 if (INTVAL (operands
[2]) == 0)
3081 return "copy %1,%0";
3083 for (bs0
= 0; bs0
< HOST_BITS_PER_WIDE_INT
; bs0
++)
3084 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs0
)) != 0)
3087 for (bs1
= bs0
; bs1
< HOST_BITS_PER_WIDE_INT
; bs1
++)
3088 if ((mask
& ((unsigned HOST_WIDE_INT
) 1 << bs1
)) == 0)
3091 gcc_assert (bs1
== HOST_BITS_PER_WIDE_INT
3092 || ((unsigned HOST_WIDE_INT
) 1 << bs1
) > mask
);
3097 operands
[2] = GEN_INT (p
);
3098 operands
[3] = GEN_INT (len
);
3099 return "depdi -1,%2,%3,%0";
3102 /* Target hook for assembling integer objects. This code handles
3103 aligned SI and DI integers specially since function references
3104 must be preceded by P%. */
3107 pa_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
3109 if (size
== UNITS_PER_WORD
3111 && function_label_operand (x
, VOIDmode
))
3113 fputs (size
== 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file
);
3114 output_addr_const (asm_out_file
, x
);
3115 fputc ('\n', asm_out_file
);
3118 return default_assemble_integer (x
, size
, aligned_p
);
3121 /* Output an ascii string. */
3123 output_ascii (FILE *file
, const char *p
, int size
)
3127 unsigned char partial_output
[16]; /* Max space 4 chars can occupy. */
3129 /* The HP assembler can only take strings of 256 characters at one
3130 time. This is a limitation on input line length, *not* the
3131 length of the string. Sigh. Even worse, it seems that the
3132 restriction is in number of input characters (see \xnn &
3133 \whatever). So we have to do this very carefully. */
3135 fputs ("\t.STRING \"", file
);
3138 for (i
= 0; i
< size
; i
+= 4)
3142 for (io
= 0, co
= 0; io
< MIN (4, size
- i
); io
++)
3144 register unsigned int c
= (unsigned char) p
[i
+ io
];
3146 if (c
== '\"' || c
== '\\')
3147 partial_output
[co
++] = '\\';
3148 if (c
>= ' ' && c
< 0177)
3149 partial_output
[co
++] = c
;
3153 partial_output
[co
++] = '\\';
3154 partial_output
[co
++] = 'x';
3155 hexd
= c
/ 16 - 0 + '0';
3157 hexd
-= '9' - 'a' + 1;
3158 partial_output
[co
++] = hexd
;
3159 hexd
= c
% 16 - 0 + '0';
3161 hexd
-= '9' - 'a' + 1;
3162 partial_output
[co
++] = hexd
;
3165 if (chars_output
+ co
> 243)
3167 fputs ("\"\n\t.STRING \"", file
);
3170 fwrite (partial_output
, 1, (size_t) co
, file
);
3174 fputs ("\"\n", file
);
3177 /* Try to rewrite floating point comparisons & branches to avoid
3178 useless add,tr insns.
3180 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3181 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3182 first attempt to remove useless add,tr insns. It is zero
3183 for the second pass as reorg sometimes leaves bogus REG_DEAD
3186 When CHECK_NOTES is zero we can only eliminate add,tr insns
3187 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3190 remove_useless_addtr_insns (int check_notes
)
3193 static int pass
= 0;
3195 /* This is fairly cheap, so always run it when optimizing. */
3199 int fbranch_count
= 0;
3201 /* Walk all the insns in this function looking for fcmp & fbranch
3202 instructions. Keep track of how many of each we find. */
3203 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3207 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3208 if (GET_CODE (insn
) != INSN
&& GET_CODE (insn
) != JUMP_INSN
)
3211 tmp
= PATTERN (insn
);
3213 /* It must be a set. */
3214 if (GET_CODE (tmp
) != SET
)
3217 /* If the destination is CCFP, then we've found an fcmp insn. */
3218 tmp
= SET_DEST (tmp
);
3219 if (GET_CODE (tmp
) == REG
&& REGNO (tmp
) == 0)
3225 tmp
= PATTERN (insn
);
3226 /* If this is an fbranch instruction, bump the fbranch counter. */
3227 if (GET_CODE (tmp
) == SET
3228 && SET_DEST (tmp
) == pc_rtx
3229 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3230 && GET_CODE (XEXP (SET_SRC (tmp
), 0)) == NE
3231 && GET_CODE (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == REG
3232 && REGNO (XEXP (XEXP (SET_SRC (tmp
), 0), 0)) == 0)
3240 /* Find all floating point compare + branch insns. If possible,
3241 reverse the comparison & the branch to avoid add,tr insns. */
3242 for (insn
= get_insns (); insn
; insn
= next_insn (insn
))
3246 /* Ignore anything that isn't an INSN. */
3247 if (GET_CODE (insn
) != INSN
)
3250 tmp
= PATTERN (insn
);
3252 /* It must be a set. */
3253 if (GET_CODE (tmp
) != SET
)
3256 /* The destination must be CCFP, which is register zero. */
3257 tmp
= SET_DEST (tmp
);
3258 if (GET_CODE (tmp
) != REG
|| REGNO (tmp
) != 0)
3261 /* INSN should be a set of CCFP.
3263 See if the result of this insn is used in a reversed FP
3264 conditional branch. If so, reverse our condition and
3265 the branch. Doing so avoids useless add,tr insns. */
3266 next
= next_insn (insn
);
3269 /* Jumps, calls and labels stop our search. */
3270 if (GET_CODE (next
) == JUMP_INSN
3271 || GET_CODE (next
) == CALL_INSN
3272 || GET_CODE (next
) == CODE_LABEL
)
3275 /* As does another fcmp insn. */
3276 if (GET_CODE (next
) == INSN
3277 && GET_CODE (PATTERN (next
)) == SET
3278 && GET_CODE (SET_DEST (PATTERN (next
))) == REG
3279 && REGNO (SET_DEST (PATTERN (next
))) == 0)
3282 next
= next_insn (next
);
3285 /* Is NEXT_INSN a branch? */
3287 && GET_CODE (next
) == JUMP_INSN
)
3289 rtx pattern
= PATTERN (next
);
3291 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3292 and CCFP dies, then reverse our conditional and the branch
3293 to avoid the add,tr. */
3294 if (GET_CODE (pattern
) == SET
3295 && SET_DEST (pattern
) == pc_rtx
3296 && GET_CODE (SET_SRC (pattern
)) == IF_THEN_ELSE
3297 && GET_CODE (XEXP (SET_SRC (pattern
), 0)) == NE
3298 && GET_CODE (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == REG
3299 && REGNO (XEXP (XEXP (SET_SRC (pattern
), 0), 0)) == 0
3300 && GET_CODE (XEXP (SET_SRC (pattern
), 1)) == PC
3301 && (fcmp_count
== fbranch_count
3303 && find_regno_note (next
, REG_DEAD
, 0))))
3305 /* Reverse the branch. */
3306 tmp
= XEXP (SET_SRC (pattern
), 1);
3307 XEXP (SET_SRC (pattern
), 1) = XEXP (SET_SRC (pattern
), 2);
3308 XEXP (SET_SRC (pattern
), 2) = tmp
;
3309 INSN_CODE (next
) = -1;
3311 /* Reverse our condition. */
3312 tmp
= PATTERN (insn
);
3313 PUT_CODE (XEXP (tmp
, 1),
3314 (reverse_condition_maybe_unordered
3315 (GET_CODE (XEXP (tmp
, 1)))));
3325 /* You may have trouble believing this, but this is the 32 bit HP-PA
3330 Variable arguments (optional; any number may be allocated)
3332 SP-(4*(N+9)) arg word N
3337 Fixed arguments (must be allocated; may remain unused)
3346 SP-32 External Data Pointer (DP)
3348 SP-24 External/stub RP (RP')
3352 SP-8 Calling Stub RP (RP'')
3357 SP-0 Stack Pointer (points to next available address)
3361 /* This function saves registers as follows. Registers marked with ' are
3362 this function's registers (as opposed to the previous function's).
3363 If a frame_pointer isn't needed, r4 is saved as a general register;
3364 the space for the frame pointer is still allocated, though, to keep
3370 SP (FP') Previous FP
3371 SP + 4 Alignment filler (sigh)
3372 SP + 8 Space for locals reserved here.
3376 SP + n All call saved register used.
3380 SP + o All call saved fp registers used.
3384 SP + p (SP') points to next available address.
3388 /* Global variables set by output_function_prologue(). */
3389 /* Size of frame. Need to know this to emit return insns from
3391 static HOST_WIDE_INT actual_fsize
, local_fsize
;
3392 static int save_fregs
;
3394 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3395 Handle case where DISP > 8k by using the add_high_const patterns.
3397 Note in DISP > 8k case, we will leave the high part of the address
3398 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3401 store_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3403 rtx insn
, dest
, src
, basereg
;
3405 src
= gen_rtx_REG (word_mode
, reg
);
3406 basereg
= gen_rtx_REG (Pmode
, base
);
3407 if (VAL_14_BITS_P (disp
))
3409 dest
= gen_rtx_MEM (word_mode
, plus_constant (basereg
, disp
));
3410 insn
= emit_move_insn (dest
, src
);
3412 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3414 rtx delta
= GEN_INT (disp
);
3415 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3417 emit_move_insn (tmpreg
, delta
);
3418 insn
= emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3421 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3422 gen_rtx_SET (VOIDmode
, tmpreg
,
3423 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3424 RTX_FRAME_RELATED_P (insn
) = 1;
3426 dest
= gen_rtx_MEM (word_mode
, tmpreg
);
3427 insn
= emit_move_insn (dest
, src
);
3431 rtx delta
= GEN_INT (disp
);
3432 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3433 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3435 emit_move_insn (tmpreg
, high
);
3436 dest
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3437 insn
= emit_move_insn (dest
, src
);
3439 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3440 gen_rtx_SET (VOIDmode
,
3441 gen_rtx_MEM (word_mode
,
3442 gen_rtx_PLUS (word_mode
,
3449 RTX_FRAME_RELATED_P (insn
) = 1;
3452 /* Emit RTL to store REG at the memory location specified by BASE and then
3453 add MOD to BASE. MOD must be <= 8k. */
3456 store_reg_modify (int base
, int reg
, HOST_WIDE_INT mod
)
3458 rtx insn
, basereg
, srcreg
, delta
;
3460 gcc_assert (VAL_14_BITS_P (mod
));
3462 basereg
= gen_rtx_REG (Pmode
, base
);
3463 srcreg
= gen_rtx_REG (word_mode
, reg
);
3464 delta
= GEN_INT (mod
);
3466 insn
= emit_insn (gen_post_store (basereg
, srcreg
, delta
));
3469 RTX_FRAME_RELATED_P (insn
) = 1;
3471 /* RTX_FRAME_RELATED_P must be set on each frame related set
3472 in a parallel with more than one element. */
3473 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 0)) = 1;
3474 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn
), 0, 1)) = 1;
3478 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3479 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3480 whether to add a frame note or not.
3482 In the DISP > 8k case, we leave the high part of the address in %r1.
3483 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3486 set_reg_plus_d (int reg
, int base
, HOST_WIDE_INT disp
, int note
)
3490 if (VAL_14_BITS_P (disp
))
3492 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3493 plus_constant (gen_rtx_REG (Pmode
, base
), disp
));
3495 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3497 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3498 rtx delta
= GEN_INT (disp
);
3499 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3501 emit_move_insn (tmpreg
, delta
);
3502 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3503 gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3505 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3506 gen_rtx_SET (VOIDmode
, tmpreg
,
3507 gen_rtx_PLUS (Pmode
, basereg
, delta
)));
3511 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3512 rtx delta
= GEN_INT (disp
);
3513 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3515 emit_move_insn (tmpreg
,
3516 gen_rtx_PLUS (Pmode
, basereg
,
3517 gen_rtx_HIGH (Pmode
, delta
)));
3518 insn
= emit_move_insn (gen_rtx_REG (Pmode
, reg
),
3519 gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3522 if (DO_FRAME_NOTES
&& note
)
3523 RTX_FRAME_RELATED_P (insn
) = 1;
3527 compute_frame_size (HOST_WIDE_INT size
, int *fregs_live
)
3532 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3533 be consistent with the rounding and size calculation done here.
3534 Change them at the same time. */
3536 /* We do our own stack alignment. First, round the size of the
3537 stack locals up to a word boundary. */
3538 size
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3540 /* Space for previous frame pointer + filler. If any frame is
3541 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3542 waste some space here for the sake of HP compatibility. The
3543 first slot is only used when the frame pointer is needed. */
3544 if (size
|| frame_pointer_needed
)
3545 size
+= STARTING_FRAME_OFFSET
;
3547 /* If the current function calls __builtin_eh_return, then we need
3548 to allocate stack space for registers that will hold data for
3549 the exception handler. */
3550 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3554 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
3556 size
+= i
* UNITS_PER_WORD
;
3559 /* Account for space used by the callee general register saves. */
3560 for (i
= 18, j
= frame_pointer_needed
? 4 : 3; i
>= j
; i
--)
3561 if (df_regs_ever_live_p (i
))
3562 size
+= UNITS_PER_WORD
;
3564 /* Account for space used by the callee floating point register saves. */
3565 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3566 if (df_regs_ever_live_p (i
)
3567 || (!TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3571 /* We always save both halves of the FP register, so always
3572 increment the frame size by 8 bytes. */
3576 /* If any of the floating registers are saved, account for the
3577 alignment needed for the floating point register save block. */
3580 size
= (size
+ 7) & ~7;
3585 /* The various ABIs include space for the outgoing parameters in the
3586 size of the current function's stack frame. We don't need to align
3587 for the outgoing arguments as their alignment is set by the final
3588 rounding for the frame as a whole. */
3589 size
+= crtl
->outgoing_args_size
;
3591 /* Allocate space for the fixed frame marker. This space must be
3592 allocated for any function that makes calls or allocates
3594 if (!current_function_is_leaf
|| size
)
3595 size
+= TARGET_64BIT
? 48 : 32;
3597 /* Finally, round to the preferred stack boundary. */
3598 return ((size
+ PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1)
3599 & ~(PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
- 1));
3602 /* Generate the assembly code for function entry. FILE is a stdio
3603 stream to output the code to. SIZE is an int: how many units of
3604 temporary storage to allocate.
3606 Refer to the array `regs_ever_live' to determine which registers to
3607 save; `regs_ever_live[I]' is nonzero if register number I is ever
3608 used in the function. This function is responsible for knowing
3609 which registers should not be saved even if used. */
3611 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3612 of memory. If any fpu reg is used in the function, we allocate
3613 such a block here, at the bottom of the frame, just in case it's needed.
3615 If this function is a leaf procedure, then we may choose not
3616 to do a "save" insn. The decision about whether or not
3617 to do this is made in regclass.c. */
3620 pa_output_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
3622 /* The function's label and associated .PROC must never be
3623 separated and must be output *after* any profiling declarations
3624 to avoid changing spaces/subspaces within a procedure. */
3625 ASM_OUTPUT_LABEL (file
, XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0));
3626 fputs ("\t.PROC\n", file
);
3628 /* hppa_expand_prologue does the dirty work now. We just need
3629 to output the assembler directives which denote the start
3631 fprintf (file
, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC
, actual_fsize
);
3632 if (current_function_is_leaf
)
3633 fputs (",NO_CALLS", file
);
3635 fputs (",CALLS", file
);
3637 fputs (",SAVE_RP", file
);
3639 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3640 at the beginning of the frame and that it is used as the frame
3641 pointer for the frame. We do this because our current frame
3642 layout doesn't conform to that specified in the HP runtime
3643 documentation and we need a way to indicate to programs such as
3644 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3645 isn't used by HP compilers but is supported by the assembler.
3646 However, SAVE_SP is supposed to indicate that the previous stack
3647 pointer has been saved in the frame marker. */
3648 if (frame_pointer_needed
)
3649 fputs (",SAVE_SP", file
);
3651 /* Pass on information about the number of callee register saves
3652 performed in the prologue.
3654 The compiler is supposed to pass the highest register number
3655 saved, the assembler then has to adjust that number before
3656 entering it into the unwind descriptor (to account for any
3657 caller saved registers with lower register numbers than the
3658 first callee saved register). */
3660 fprintf (file
, ",ENTRY_GR=%d", gr_saved
+ 2);
3663 fprintf (file
, ",ENTRY_FR=%d", fr_saved
+ 11);
3665 fputs ("\n\t.ENTRY\n", file
);
3667 remove_useless_addtr_insns (0);
3671 hppa_expand_prologue (void)
3673 int merge_sp_adjust_with_store
= 0;
3674 HOST_WIDE_INT size
= get_frame_size ();
3675 HOST_WIDE_INT offset
;
3683 /* Compute total size for frame pointer, filler, locals and rounding to
3684 the next word boundary. Similar code appears in compute_frame_size
3685 and must be changed in tandem with this code. */
3686 local_fsize
= (size
+ UNITS_PER_WORD
- 1) & ~(UNITS_PER_WORD
- 1);
3687 if (local_fsize
|| frame_pointer_needed
)
3688 local_fsize
+= STARTING_FRAME_OFFSET
;
3690 actual_fsize
= compute_frame_size (size
, &save_fregs
);
3692 /* Compute a few things we will use often. */
3693 tmpreg
= gen_rtx_REG (word_mode
, 1);
3695 /* Save RP first. The calling conventions manual states RP will
3696 always be stored into the caller's frame at sp - 20 or sp - 16
3697 depending on which ABI is in use. */
3698 if (df_regs_ever_live_p (2) || crtl
->calls_eh_return
)
3700 store_reg (2, TARGET_64BIT
? -16 : -20, STACK_POINTER_REGNUM
);
3706 /* Allocate the local frame and set up the frame pointer if needed. */
3707 if (actual_fsize
!= 0)
3709 if (frame_pointer_needed
)
3711 /* Copy the old frame pointer temporarily into %r1. Set up the
3712 new stack pointer, then store away the saved old frame pointer
3713 into the stack at sp and at the same time update the stack
3714 pointer by actual_fsize bytes. Two versions, first
3715 handles small (<8k) frames. The second handles large (>=8k)
3717 insn
= emit_move_insn (tmpreg
, frame_pointer_rtx
);
3719 RTX_FRAME_RELATED_P (insn
) = 1;
3721 insn
= emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
);
3723 RTX_FRAME_RELATED_P (insn
) = 1;
3725 if (VAL_14_BITS_P (actual_fsize
))
3726 store_reg_modify (STACK_POINTER_REGNUM
, 1, actual_fsize
);
3729 /* It is incorrect to store the saved frame pointer at *sp,
3730 then increment sp (writes beyond the current stack boundary).
3732 So instead use stwm to store at *sp and post-increment the
3733 stack pointer as an atomic operation. Then increment sp to
3734 finish allocating the new frame. */
3735 HOST_WIDE_INT adjust1
= 8192 - 64;
3736 HOST_WIDE_INT adjust2
= actual_fsize
- adjust1
;
3738 store_reg_modify (STACK_POINTER_REGNUM
, 1, adjust1
);
3739 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3743 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3744 we need to store the previous stack pointer (frame pointer)
3745 into the frame marker on targets that use the HP unwind
3746 library. This allows the HP unwind library to be used to
3747 unwind GCC frames. However, we are not fully compatible
3748 with the HP library because our frame layout differs from
3749 that specified in the HP runtime specification.
3751 We don't want a frame note on this instruction as the frame
3752 marker moves during dynamic stack allocation.
3754 This instruction also serves as a blockage to prevent
3755 register spills from being scheduled before the stack
3756 pointer is raised. This is necessary as we store
3757 registers using the frame pointer as a base register,
3758 and the frame pointer is set before sp is raised. */
3759 if (TARGET_HPUX_UNWIND_LIBRARY
)
3761 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
,
3762 GEN_INT (TARGET_64BIT
? -8 : -4));
3764 emit_move_insn (gen_rtx_MEM (word_mode
, addr
),
3768 emit_insn (gen_blockage ());
3770 /* no frame pointer needed. */
3773 /* In some cases we can perform the first callee register save
3774 and allocating the stack frame at the same time. If so, just
3775 make a note of it and defer allocating the frame until saving
3776 the callee registers. */
3777 if (VAL_14_BITS_P (actual_fsize
) && local_fsize
== 0)
3778 merge_sp_adjust_with_store
= 1;
3779 /* Can not optimize. Adjust the stack frame by actual_fsize
3782 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3787 /* Normal register save.
3789 Do not save the frame pointer in the frame_pointer_needed case. It
3790 was done earlier. */
3791 if (frame_pointer_needed
)
3793 offset
= local_fsize
;
3795 /* Saving the EH return data registers in the frame is the simplest
3796 way to get the frame unwind information emitted. We put them
3797 just before the general registers. */
3798 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3800 unsigned int i
, regno
;
3804 regno
= EH_RETURN_DATA_REGNO (i
);
3805 if (regno
== INVALID_REGNUM
)
3808 store_reg (regno
, offset
, FRAME_POINTER_REGNUM
);
3809 offset
+= UNITS_PER_WORD
;
3813 for (i
= 18; i
>= 4; i
--)
3814 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3816 store_reg (i
, offset
, FRAME_POINTER_REGNUM
);
3817 offset
+= UNITS_PER_WORD
;
3820 /* Account for %r3 which is saved in a special place. */
3823 /* No frame pointer needed. */
3826 offset
= local_fsize
- actual_fsize
;
3828 /* Saving the EH return data registers in the frame is the simplest
3829 way to get the frame unwind information emitted. */
3830 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
3832 unsigned int i
, regno
;
3836 regno
= EH_RETURN_DATA_REGNO (i
);
3837 if (regno
== INVALID_REGNUM
)
3840 /* If merge_sp_adjust_with_store is nonzero, then we can
3841 optimize the first save. */
3842 if (merge_sp_adjust_with_store
)
3844 store_reg_modify (STACK_POINTER_REGNUM
, regno
, -offset
);
3845 merge_sp_adjust_with_store
= 0;
3848 store_reg (regno
, offset
, STACK_POINTER_REGNUM
);
3849 offset
+= UNITS_PER_WORD
;
3853 for (i
= 18; i
>= 3; i
--)
3854 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
3856 /* If merge_sp_adjust_with_store is nonzero, then we can
3857 optimize the first GR save. */
3858 if (merge_sp_adjust_with_store
)
3860 store_reg_modify (STACK_POINTER_REGNUM
, i
, -offset
);
3861 merge_sp_adjust_with_store
= 0;
3864 store_reg (i
, offset
, STACK_POINTER_REGNUM
);
3865 offset
+= UNITS_PER_WORD
;
3869 /* If we wanted to merge the SP adjustment with a GR save, but we never
3870 did any GR saves, then just emit the adjustment here. */
3871 if (merge_sp_adjust_with_store
)
3872 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
3876 /* The hppa calling conventions say that %r19, the pic offset
3877 register, is saved at sp - 32 (in this function's frame)
3878 when generating PIC code. FIXME: What is the correct thing
3879 to do for functions which make no calls and allocate no
3880 frame? Do we need to allocate a frame, or can we just omit
3881 the save? For now we'll just omit the save.
3883 We don't want a note on this insn as the frame marker can
3884 move if there is a dynamic stack allocation. */
3885 if (flag_pic
&& actual_fsize
!= 0 && !TARGET_64BIT
)
3887 rtx addr
= gen_rtx_PLUS (word_mode
, stack_pointer_rtx
, GEN_INT (-32));
3889 emit_move_insn (gen_rtx_MEM (word_mode
, addr
), pic_offset_table_rtx
);
3893 /* Align pointer properly (doubleword boundary). */
3894 offset
= (offset
+ 7) & ~7;
3896 /* Floating point register store. */
3901 /* First get the frame or stack pointer to the start of the FP register
3903 if (frame_pointer_needed
)
3905 set_reg_plus_d (1, FRAME_POINTER_REGNUM
, offset
, 0);
3906 base
= frame_pointer_rtx
;
3910 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
3911 base
= stack_pointer_rtx
;
3914 /* Now actually save the FP registers. */
3915 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
3917 if (df_regs_ever_live_p (i
)
3918 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
3920 rtx addr
, insn
, reg
;
3921 addr
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
3922 reg
= gen_rtx_REG (DFmode
, i
);
3923 insn
= emit_move_insn (addr
, reg
);
3926 RTX_FRAME_RELATED_P (insn
) = 1;
3929 rtx mem
= gen_rtx_MEM (DFmode
,
3930 plus_constant (base
, offset
));
3931 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3932 gen_rtx_SET (VOIDmode
, mem
, reg
));
3936 rtx meml
= gen_rtx_MEM (SFmode
,
3937 plus_constant (base
, offset
));
3938 rtx memr
= gen_rtx_MEM (SFmode
,
3939 plus_constant (base
, offset
+ 4));
3940 rtx regl
= gen_rtx_REG (SFmode
, i
);
3941 rtx regr
= gen_rtx_REG (SFmode
, i
+ 1);
3942 rtx setl
= gen_rtx_SET (VOIDmode
, meml
, regl
);
3943 rtx setr
= gen_rtx_SET (VOIDmode
, memr
, regr
);
3946 RTX_FRAME_RELATED_P (setl
) = 1;
3947 RTX_FRAME_RELATED_P (setr
) = 1;
3948 vec
= gen_rtvec (2, setl
, setr
);
3949 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
3950 gen_rtx_SEQUENCE (VOIDmode
, vec
));
3953 offset
+= GET_MODE_SIZE (DFmode
);
3960 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3961 Handle case where DISP > 8k by using the add_high_const patterns. */
3964 load_reg (int reg
, HOST_WIDE_INT disp
, int base
)
3966 rtx dest
= gen_rtx_REG (word_mode
, reg
);
3967 rtx basereg
= gen_rtx_REG (Pmode
, base
);
3970 if (VAL_14_BITS_P (disp
))
3971 src
= gen_rtx_MEM (word_mode
, plus_constant (basereg
, disp
));
3972 else if (TARGET_64BIT
&& !VAL_32_BITS_P (disp
))
3974 rtx delta
= GEN_INT (disp
);
3975 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3977 emit_move_insn (tmpreg
, delta
);
3978 if (TARGET_DISABLE_INDEXING
)
3980 emit_move_insn (tmpreg
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3981 src
= gen_rtx_MEM (word_mode
, tmpreg
);
3984 src
= gen_rtx_MEM (word_mode
, gen_rtx_PLUS (Pmode
, tmpreg
, basereg
));
3988 rtx delta
= GEN_INT (disp
);
3989 rtx high
= gen_rtx_PLUS (Pmode
, basereg
, gen_rtx_HIGH (Pmode
, delta
));
3990 rtx tmpreg
= gen_rtx_REG (Pmode
, 1);
3992 emit_move_insn (tmpreg
, high
);
3993 src
= gen_rtx_MEM (word_mode
, gen_rtx_LO_SUM (Pmode
, tmpreg
, delta
));
3996 emit_move_insn (dest
, src
);
3999 /* Update the total code bytes output to the text section. */
4002 update_total_code_bytes (unsigned int nbytes
)
4004 if ((TARGET_PORTABLE_RUNTIME
|| !TARGET_GAS
|| !TARGET_SOM
)
4005 && !IN_NAMED_SECTION_P (cfun
->decl
))
4007 unsigned int old_total
= total_code_bytes
;
4009 total_code_bytes
+= nbytes
;
4011 /* Be prepared to handle overflows. */
4012 if (old_total
> total_code_bytes
)
4013 total_code_bytes
= UINT_MAX
;
4017 /* This function generates the assembly code for function exit.
4018 Args are as for output_function_prologue ().
4020 The function epilogue should not depend on the current stack
4021 pointer! It should use the frame pointer only. This is mandatory
4022 because of alloca; we also take advantage of it to omit stack
4023 adjustments before returning. */
4026 pa_output_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4028 rtx insn
= get_last_insn ();
4032 /* hppa_expand_epilogue does the dirty work now. We just need
4033 to output the assembler directives which denote the end
4036 To make debuggers happy, emit a nop if the epilogue was completely
4037 eliminated due to a volatile call as the last insn in the
4038 current function. That way the return address (in %r2) will
4039 always point to a valid instruction in the current function. */
4041 /* Get the last real insn. */
4042 if (GET_CODE (insn
) == NOTE
)
4043 insn
= prev_real_insn (insn
);
4045 /* If it is a sequence, then look inside. */
4046 if (insn
&& GET_CODE (insn
) == INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
4047 insn
= XVECEXP (PATTERN (insn
), 0, 0);
4049 /* If insn is a CALL_INSN, then it must be a call to a volatile
4050 function (otherwise there would be epilogue insns). */
4051 if (insn
&& GET_CODE (insn
) == CALL_INSN
)
4053 fputs ("\tnop\n", file
);
4057 fputs ("\t.EXIT\n\t.PROCEND\n", file
);
4059 if (TARGET_SOM
&& TARGET_GAS
)
4061 /* We done with this subspace except possibly for some additional
4062 debug information. Forget that we are in this subspace to ensure
4063 that the next function is output in its own subspace. */
4065 cfun
->machine
->in_nsubspa
= 2;
4068 if (INSN_ADDRESSES_SET_P ())
4070 insn
= get_last_nonnote_insn ();
4071 last_address
+= INSN_ADDRESSES (INSN_UID (insn
));
4073 last_address
+= insn_default_length (insn
);
4074 last_address
= ((last_address
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
4075 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
4078 last_address
= UINT_MAX
;
4080 /* Finally, update the total number of code bytes output so far. */
4081 update_total_code_bytes (last_address
);
4085 hppa_expand_epilogue (void)
4088 HOST_WIDE_INT offset
;
4089 HOST_WIDE_INT ret_off
= 0;
4091 int merge_sp_adjust_with_load
= 0;
4093 /* We will use this often. */
4094 tmpreg
= gen_rtx_REG (word_mode
, 1);
4096 /* Try to restore RP early to avoid load/use interlocks when
4097 RP gets used in the return (bv) instruction. This appears to still
4098 be necessary even when we schedule the prologue and epilogue. */
4101 ret_off
= TARGET_64BIT
? -16 : -20;
4102 if (frame_pointer_needed
)
4104 load_reg (2, ret_off
, FRAME_POINTER_REGNUM
);
4109 /* No frame pointer, and stack is smaller than 8k. */
4110 if (VAL_14_BITS_P (ret_off
- actual_fsize
))
4112 load_reg (2, ret_off
- actual_fsize
, STACK_POINTER_REGNUM
);
4118 /* General register restores. */
4119 if (frame_pointer_needed
)
4121 offset
= local_fsize
;
4123 /* If the current function calls __builtin_eh_return, then we need
4124 to restore the saved EH data registers. */
4125 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4127 unsigned int i
, regno
;
4131 regno
= EH_RETURN_DATA_REGNO (i
);
4132 if (regno
== INVALID_REGNUM
)
4135 load_reg (regno
, offset
, FRAME_POINTER_REGNUM
);
4136 offset
+= UNITS_PER_WORD
;
4140 for (i
= 18; i
>= 4; i
--)
4141 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4143 load_reg (i
, offset
, FRAME_POINTER_REGNUM
);
4144 offset
+= UNITS_PER_WORD
;
4149 offset
= local_fsize
- actual_fsize
;
4151 /* If the current function calls __builtin_eh_return, then we need
4152 to restore the saved EH data registers. */
4153 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4155 unsigned int i
, regno
;
4159 regno
= EH_RETURN_DATA_REGNO (i
);
4160 if (regno
== INVALID_REGNUM
)
4163 /* Only for the first load.
4164 merge_sp_adjust_with_load holds the register load
4165 with which we will merge the sp adjustment. */
4166 if (merge_sp_adjust_with_load
== 0
4168 && VAL_14_BITS_P (-actual_fsize
))
4169 merge_sp_adjust_with_load
= regno
;
4171 load_reg (regno
, offset
, STACK_POINTER_REGNUM
);
4172 offset
+= UNITS_PER_WORD
;
4176 for (i
= 18; i
>= 3; i
--)
4178 if (df_regs_ever_live_p (i
) && ! call_used_regs
[i
])
4180 /* Only for the first load.
4181 merge_sp_adjust_with_load holds the register load
4182 with which we will merge the sp adjustment. */
4183 if (merge_sp_adjust_with_load
== 0
4185 && VAL_14_BITS_P (-actual_fsize
))
4186 merge_sp_adjust_with_load
= i
;
4188 load_reg (i
, offset
, STACK_POINTER_REGNUM
);
4189 offset
+= UNITS_PER_WORD
;
4194 /* Align pointer properly (doubleword boundary). */
4195 offset
= (offset
+ 7) & ~7;
4197 /* FP register restores. */
4200 /* Adjust the register to index off of. */
4201 if (frame_pointer_needed
)
4202 set_reg_plus_d (1, FRAME_POINTER_REGNUM
, offset
, 0);
4204 set_reg_plus_d (1, STACK_POINTER_REGNUM
, offset
, 0);
4206 /* Actually do the restores now. */
4207 for (i
= FP_SAVED_REG_LAST
; i
>= FP_SAVED_REG_FIRST
; i
-= FP_REG_STEP
)
4208 if (df_regs_ever_live_p (i
)
4209 || (! TARGET_64BIT
&& df_regs_ever_live_p (i
+ 1)))
4211 rtx src
= gen_rtx_MEM (DFmode
, gen_rtx_POST_INC (DFmode
, tmpreg
));
4212 rtx dest
= gen_rtx_REG (DFmode
, i
);
4213 emit_move_insn (dest
, src
);
4217 /* Emit a blockage insn here to keep these insns from being moved to
4218 an earlier spot in the epilogue, or into the main instruction stream.
4220 This is necessary as we must not cut the stack back before all the
4221 restores are finished. */
4222 emit_insn (gen_blockage ());
4224 /* Reset stack pointer (and possibly frame pointer). The stack
4225 pointer is initially set to fp + 64 to avoid a race condition. */
4226 if (frame_pointer_needed
)
4228 rtx delta
= GEN_INT (-64);
4230 set_reg_plus_d (STACK_POINTER_REGNUM
, FRAME_POINTER_REGNUM
, 64, 0);
4231 emit_insn (gen_pre_load (frame_pointer_rtx
, stack_pointer_rtx
, delta
));
4233 /* If we were deferring a callee register restore, do it now. */
4234 else if (merge_sp_adjust_with_load
)
4236 rtx delta
= GEN_INT (-actual_fsize
);
4237 rtx dest
= gen_rtx_REG (word_mode
, merge_sp_adjust_with_load
);
4239 emit_insn (gen_pre_load (dest
, stack_pointer_rtx
, delta
));
4241 else if (actual_fsize
!= 0)
4242 set_reg_plus_d (STACK_POINTER_REGNUM
, STACK_POINTER_REGNUM
,
4245 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4246 frame greater than 8k), do so now. */
4248 load_reg (2, ret_off
, STACK_POINTER_REGNUM
);
4250 if (DO_FRAME_NOTES
&& crtl
->calls_eh_return
)
4252 rtx sa
= EH_RETURN_STACKADJ_RTX
;
4254 emit_insn (gen_blockage ());
4255 emit_insn (TARGET_64BIT
4256 ? gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
)
4257 : gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, sa
));
4262 hppa_pic_save_rtx (void)
4264 return get_hard_reg_initial_val (word_mode
, PIC_OFFSET_TABLE_REGNUM
);
4267 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4268 #define NO_DEFERRED_PROFILE_COUNTERS 0
4272 /* Vector of funcdef numbers. */
4273 static VEC(int,heap
) *funcdef_nos
;
4275 /* Output deferred profile counters. */
4277 output_deferred_profile_counters (void)
4282 if (VEC_empty (int, funcdef_nos
))
4285 switch_to_section (data_section
);
4286 align
= MIN (BIGGEST_ALIGNMENT
, LONG_TYPE_SIZE
);
4287 ASM_OUTPUT_ALIGN (asm_out_file
, floor_log2 (align
/ BITS_PER_UNIT
));
4289 for (i
= 0; VEC_iterate (int, funcdef_nos
, i
, n
); i
++)
4291 targetm
.asm_out
.internal_label (asm_out_file
, "LP", n
);
4292 assemble_integer (const0_rtx
, LONG_TYPE_SIZE
/ BITS_PER_UNIT
, align
, 1);
4295 VEC_free (int, heap
, funcdef_nos
);
4299 hppa_profile_hook (int label_no
)
4301 /* We use SImode for the address of the function in both 32 and
4302 64-bit code to avoid having to provide DImode versions of the
4303 lcla2 and load_offset_label_address insn patterns. */
4304 rtx reg
= gen_reg_rtx (SImode
);
4305 rtx label_rtx
= gen_label_rtx ();
4306 rtx begin_label_rtx
, call_insn
;
4307 char begin_label_name
[16];
4309 ASM_GENERATE_INTERNAL_LABEL (begin_label_name
, FUNC_BEGIN_PROLOG_LABEL
,
4311 begin_label_rtx
= gen_rtx_SYMBOL_REF (SImode
, ggc_strdup (begin_label_name
));
4314 emit_move_insn (arg_pointer_rtx
,
4315 gen_rtx_PLUS (word_mode
, virtual_outgoing_args_rtx
,
4318 emit_move_insn (gen_rtx_REG (word_mode
, 26), gen_rtx_REG (word_mode
, 2));
4320 /* The address of the function is loaded into %r25 with an instruction-
4321 relative sequence that avoids the use of relocations. The sequence
4322 is split so that the load_offset_label_address instruction can
4323 occupy the delay slot of the call to _mcount. */
4325 emit_insn (gen_lcla2 (reg
, label_rtx
));
4327 emit_insn (gen_lcla1 (reg
, label_rtx
));
4329 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode
, 25),
4330 reg
, begin_label_rtx
, label_rtx
));
4332 #if !NO_DEFERRED_PROFILE_COUNTERS
4334 rtx count_label_rtx
, addr
, r24
;
4335 char count_label_name
[16];
4337 VEC_safe_push (int, heap
, funcdef_nos
, label_no
);
4338 ASM_GENERATE_INTERNAL_LABEL (count_label_name
, "LP", label_no
);
4339 count_label_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (count_label_name
));
4341 addr
= force_reg (Pmode
, count_label_rtx
);
4342 r24
= gen_rtx_REG (Pmode
, 24);
4343 emit_move_insn (r24
, addr
);
4346 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4347 gen_rtx_SYMBOL_REF (Pmode
,
4349 GEN_INT (TARGET_64BIT
? 24 : 12)));
4351 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), r24
);
4356 emit_call_insn (gen_call (gen_rtx_MEM (Pmode
,
4357 gen_rtx_SYMBOL_REF (Pmode
,
4359 GEN_INT (TARGET_64BIT
? 16 : 8)));
4363 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 25));
4364 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), gen_rtx_REG (SImode
, 26));
4366 /* Indicate the _mcount call cannot throw, nor will it execute a
4368 make_reg_eh_region_note_nothrow_nononlocal (call_insn
);
4371 /* Fetch the return address for the frame COUNT steps up from
4372 the current frame, after the prologue. FRAMEADDR is the
4373 frame pointer of the COUNT frame.
4375 We want to ignore any export stub remnants here. To handle this,
4376 we examine the code at the return address, and if it is an export
4377 stub, we return a memory rtx for the stub return address stored
4380 The value returned is used in two different ways:
4382 1. To find a function's caller.
4384 2. To change the return address for a function.
4386 This function handles most instances of case 1; however, it will
4387 fail if there are two levels of stubs to execute on the return
4388 path. The only way I believe that can happen is if the return value
4389 needs a parameter relocation, which never happens for C code.
4391 This function handles most instances of case 2; however, it will
4392 fail if we did not originally have stub code on the return path
4393 but will need stub code on the new return path. This can happen if
4394 the caller & callee are both in the main program, but the new
4395 return location is in a shared library. */
4398 return_addr_rtx (int count
, rtx frameaddr
)
4405 /* Instruction stream at the normal return address for the export stub:
4407 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4408 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4409 0x00011820 | stub+16: mtsp r1,sr0
4410 0xe0400002 | stub+20: be,n 0(sr0,rp)
4412 0xe0400002 must be specified as -532676606 so that it won't be
4413 rejected as an invalid immediate operand on 64-bit hosts. */
4415 HOST_WIDE_INT insns
[4] = {0x4bc23fd1, 0x004010a1, 0x00011820, -532676606};
4421 rp
= get_hard_reg_initial_val (Pmode
, 2);
4423 if (TARGET_64BIT
|| TARGET_NO_SPACE_REGS
)
4426 /* If there is no export stub then just use the value saved from
4427 the return pointer register. */
4429 saved_rp
= gen_reg_rtx (Pmode
);
4430 emit_move_insn (saved_rp
, rp
);
4432 /* Get pointer to the instruction stream. We have to mask out the
4433 privilege level from the two low order bits of the return address
4434 pointer here so that ins will point to the start of the first
4435 instruction that would have been executed if we returned. */
4436 ins
= copy_to_reg (gen_rtx_AND (Pmode
, rp
, MASK_RETURN_ADDR
));
4437 label
= gen_label_rtx ();
4439 /* Check the instruction stream at the normal return address for the
4440 export stub. If it is an export stub, than our return address is
4441 really in -24[frameaddr]. */
4443 for (i
= 0; i
< 3; i
++)
4445 rtx op0
= gen_rtx_MEM (SImode
, plus_constant (ins
, i
* 4));
4446 rtx op1
= GEN_INT (insns
[i
]);
4447 emit_cmp_and_jump_insns (op0
, op1
, NE
, NULL
, SImode
, 0, label
);
4450 /* Here we know that our return address points to an export
4451 stub. We don't want to return the address of the export stub,
4452 but rather the return address of the export stub. That return
4453 address is stored at -24[frameaddr]. */
4455 emit_move_insn (saved_rp
,
4457 memory_address (Pmode
,
4458 plus_constant (frameaddr
,
4467 emit_bcond_fp (rtx operands
[])
4469 enum rtx_code code
= GET_CODE (operands
[0]);
4470 rtx operand0
= operands
[1];
4471 rtx operand1
= operands
[2];
4472 rtx label
= operands
[3];
4474 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCFPmode
, 0),
4475 gen_rtx_fmt_ee (code
, CCFPmode
, operand0
, operand1
)));
4477 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
4478 gen_rtx_IF_THEN_ELSE (VOIDmode
,
4481 gen_rtx_REG (CCFPmode
, 0),
4483 gen_rtx_LABEL_REF (VOIDmode
, label
),
4488 /* Adjust the cost of a scheduling dependency. Return the new cost of
4489 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4492 pa_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4494 enum attr_type attr_type
;
4496 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4497 true dependencies as they are described with bypasses now. */
4498 if (pa_cpu
>= PROCESSOR_8000
|| REG_NOTE_KIND (link
) == 0)
4501 if (! recog_memoized (insn
))
4504 attr_type
= get_attr_type (insn
);
4506 switch (REG_NOTE_KIND (link
))
4509 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4512 if (attr_type
== TYPE_FPLOAD
)
4514 rtx pat
= PATTERN (insn
);
4515 rtx dep_pat
= PATTERN (dep_insn
);
4516 if (GET_CODE (pat
) == PARALLEL
)
4518 /* This happens for the fldXs,mb patterns. */
4519 pat
= XVECEXP (pat
, 0, 0);
4521 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4522 /* If this happens, we have to extend this to schedule
4523 optimally. Return 0 for now. */
4526 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4528 if (! recog_memoized (dep_insn
))
4530 switch (get_attr_type (dep_insn
))
4537 case TYPE_FPSQRTSGL
:
4538 case TYPE_FPSQRTDBL
:
4539 /* A fpload can't be issued until one cycle before a
4540 preceding arithmetic operation has finished if
4541 the target of the fpload is any of the sources
4542 (or destination) of the arithmetic operation. */
4543 return insn_default_latency (dep_insn
) - 1;
4550 else if (attr_type
== TYPE_FPALU
)
4552 rtx pat
= PATTERN (insn
);
4553 rtx dep_pat
= PATTERN (dep_insn
);
4554 if (GET_CODE (pat
) == PARALLEL
)
4556 /* This happens for the fldXs,mb patterns. */
4557 pat
= XVECEXP (pat
, 0, 0);
4559 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4560 /* If this happens, we have to extend this to schedule
4561 optimally. Return 0 for now. */
4564 if (reg_mentioned_p (SET_DEST (pat
), SET_SRC (dep_pat
)))
4566 if (! recog_memoized (dep_insn
))
4568 switch (get_attr_type (dep_insn
))
4572 case TYPE_FPSQRTSGL
:
4573 case TYPE_FPSQRTDBL
:
4574 /* An ALU flop can't be issued until two cycles before a
4575 preceding divide or sqrt operation has finished if
4576 the target of the ALU flop is any of the sources
4577 (or destination) of the divide or sqrt operation. */
4578 return insn_default_latency (dep_insn
) - 2;
4586 /* For other anti dependencies, the cost is 0. */
4589 case REG_DEP_OUTPUT
:
4590 /* Output dependency; DEP_INSN writes a register that INSN writes some
4592 if (attr_type
== TYPE_FPLOAD
)
4594 rtx pat
= PATTERN (insn
);
4595 rtx dep_pat
= PATTERN (dep_insn
);
4596 if (GET_CODE (pat
) == PARALLEL
)
4598 /* This happens for the fldXs,mb patterns. */
4599 pat
= XVECEXP (pat
, 0, 0);
4601 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4602 /* If this happens, we have to extend this to schedule
4603 optimally. Return 0 for now. */
4606 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4608 if (! recog_memoized (dep_insn
))
4610 switch (get_attr_type (dep_insn
))
4617 case TYPE_FPSQRTSGL
:
4618 case TYPE_FPSQRTDBL
:
4619 /* A fpload can't be issued until one cycle before a
4620 preceding arithmetic operation has finished if
4621 the target of the fpload is the destination of the
4622 arithmetic operation.
4624 Exception: For PA7100LC, PA7200 and PA7300, the cost
4625 is 3 cycles, unless they bundle together. We also
4626 pay the penalty if the second insn is a fpload. */
4627 return insn_default_latency (dep_insn
) - 1;
4634 else if (attr_type
== TYPE_FPALU
)
4636 rtx pat
= PATTERN (insn
);
4637 rtx dep_pat
= PATTERN (dep_insn
);
4638 if (GET_CODE (pat
) == PARALLEL
)
4640 /* This happens for the fldXs,mb patterns. */
4641 pat
= XVECEXP (pat
, 0, 0);
4643 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
4644 /* If this happens, we have to extend this to schedule
4645 optimally. Return 0 for now. */
4648 if (reg_mentioned_p (SET_DEST (pat
), SET_DEST (dep_pat
)))
4650 if (! recog_memoized (dep_insn
))
4652 switch (get_attr_type (dep_insn
))
4656 case TYPE_FPSQRTSGL
:
4657 case TYPE_FPSQRTDBL
:
4658 /* An ALU flop can't be issued until two cycles before a
4659 preceding divide or sqrt operation has finished if
4660 the target of the ALU flop is also the target of
4661 the divide or sqrt operation. */
4662 return insn_default_latency (dep_insn
) - 2;
4670 /* For other output dependencies, the cost is 0. */
4678 /* Adjust scheduling priorities. We use this to try and keep addil
4679 and the next use of %r1 close together. */
4681 pa_adjust_priority (rtx insn
, int priority
)
4683 rtx set
= single_set (insn
);
4687 src
= SET_SRC (set
);
4688 dest
= SET_DEST (set
);
4689 if (GET_CODE (src
) == LO_SUM
4690 && symbolic_operand (XEXP (src
, 1), VOIDmode
)
4691 && ! read_only_operand (XEXP (src
, 1), VOIDmode
))
4694 else if (GET_CODE (src
) == MEM
4695 && GET_CODE (XEXP (src
, 0)) == LO_SUM
4696 && symbolic_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
)
4697 && ! read_only_operand (XEXP (XEXP (src
, 0), 1), VOIDmode
))
4700 else if (GET_CODE (dest
) == MEM
4701 && GET_CODE (XEXP (dest
, 0)) == LO_SUM
4702 && symbolic_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
)
4703 && ! read_only_operand (XEXP (XEXP (dest
, 0), 1), VOIDmode
))
4709 /* The 700 can only issue a single insn at a time.
4710 The 7XXX processors can issue two insns at a time.
4711 The 8000 can issue 4 insns at a time. */
4713 pa_issue_rate (void)
4717 case PROCESSOR_700
: return 1;
4718 case PROCESSOR_7100
: return 2;
4719 case PROCESSOR_7100LC
: return 2;
4720 case PROCESSOR_7200
: return 2;
4721 case PROCESSOR_7300
: return 2;
4722 case PROCESSOR_8000
: return 4;
4731 /* Return any length adjustment needed by INSN which already has its length
4732 computed as LENGTH. Return zero if no adjustment is necessary.
4734 For the PA: function calls, millicode calls, and backwards short
4735 conditional branches with unfilled delay slots need an adjustment by +1
4736 (to account for the NOP which will be inserted into the instruction stream).
4738 Also compute the length of an inline block move here as it is too
4739 complicated to express as a length attribute in pa.md. */
4741 pa_adjust_insn_length (rtx insn
, int length
)
4743 rtx pat
= PATTERN (insn
);
4745 /* Jumps inside switch tables which have unfilled delay slots need
4747 if (GET_CODE (insn
) == JUMP_INSN
4748 && GET_CODE (pat
) == PARALLEL
4749 && get_attr_type (insn
) == TYPE_BTABLE_BRANCH
)
4751 /* Millicode insn with an unfilled delay slot. */
4752 else if (GET_CODE (insn
) == INSN
4753 && GET_CODE (pat
) != SEQUENCE
4754 && GET_CODE (pat
) != USE
4755 && GET_CODE (pat
) != CLOBBER
4756 && get_attr_type (insn
) == TYPE_MILLI
)
4758 /* Block move pattern. */
4759 else if (GET_CODE (insn
) == INSN
4760 && GET_CODE (pat
) == PARALLEL
4761 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4762 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4763 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == MEM
4764 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
4765 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 1)) == BLKmode
)
4766 return compute_movmem_length (insn
) - 4;
4767 /* Block clear pattern. */
4768 else if (GET_CODE (insn
) == INSN
4769 && GET_CODE (pat
) == PARALLEL
4770 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
4771 && GET_CODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == MEM
4772 && XEXP (XVECEXP (pat
, 0, 0), 1) == const0_rtx
4773 && GET_MODE (XEXP (XVECEXP (pat
, 0, 0), 0)) == BLKmode
)
4774 return compute_clrmem_length (insn
) - 4;
4775 /* Conditional branch with an unfilled delay slot. */
4776 else if (GET_CODE (insn
) == JUMP_INSN
&& ! simplejump_p (insn
))
4778 /* Adjust a short backwards conditional with an unfilled delay slot. */
4779 if (GET_CODE (pat
) == SET
4781 && JUMP_LABEL (insn
) != NULL_RTX
4782 && ! forward_branch_p (insn
))
4784 else if (GET_CODE (pat
) == PARALLEL
4785 && get_attr_type (insn
) == TYPE_PARALLEL_BRANCH
4788 /* Adjust dbra insn with short backwards conditional branch with
4789 unfilled delay slot -- only for case where counter is in a
4790 general register register. */
4791 else if (GET_CODE (pat
) == PARALLEL
4792 && GET_CODE (XVECEXP (pat
, 0, 1)) == SET
4793 && GET_CODE (XEXP (XVECEXP (pat
, 0, 1), 0)) == REG
4794 && ! FP_REG_P (XEXP (XVECEXP (pat
, 0, 1), 0))
4796 && ! forward_branch_p (insn
))
4804 /* Print operand X (an rtx) in assembler syntax to file FILE.
4805 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4806 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4809 print_operand (FILE *file
, rtx x
, int code
)
4814 /* Output a 'nop' if there's nothing for the delay slot. */
4815 if (dbr_sequence_length () == 0)
4816 fputs ("\n\tnop", file
);
4819 /* Output a nullification completer if there's nothing for the */
4820 /* delay slot or nullification is requested. */
4821 if (dbr_sequence_length () == 0 ||
4823 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))))
4827 /* Print out the second register name of a register pair.
4828 I.e., R (6) => 7. */
4829 fputs (reg_names
[REGNO (x
) + 1], file
);
4832 /* A register or zero. */
4834 || (x
== CONST0_RTX (DFmode
))
4835 || (x
== CONST0_RTX (SFmode
)))
4837 fputs ("%r0", file
);
4843 /* A register or zero (floating point). */
4845 || (x
== CONST0_RTX (DFmode
))
4846 || (x
== CONST0_RTX (SFmode
)))
4848 fputs ("%fr0", file
);
4857 xoperands
[0] = XEXP (XEXP (x
, 0), 0);
4858 xoperands
[1] = XVECEXP (XEXP (XEXP (x
, 0), 1), 0, 0);
4859 output_global_address (file
, xoperands
[1], 0);
4860 fprintf (file
, "(%s)", reg_names
[REGNO (xoperands
[0])]);
4864 case 'C': /* Plain (C)ondition */
4866 switch (GET_CODE (x
))
4869 fputs ("=", file
); break;
4871 fputs ("<>", file
); break;
4873 fputs (">", file
); break;
4875 fputs (">=", file
); break;
4877 fputs (">>=", file
); break;
4879 fputs (">>", file
); break;
4881 fputs ("<", file
); break;
4883 fputs ("<=", file
); break;
4885 fputs ("<<=", file
); break;
4887 fputs ("<<", file
); break;
4892 case 'N': /* Condition, (N)egated */
4893 switch (GET_CODE (x
))
4896 fputs ("<>", file
); break;
4898 fputs ("=", file
); break;
4900 fputs ("<=", file
); break;
4902 fputs ("<", file
); break;
4904 fputs ("<<", file
); break;
4906 fputs ("<<=", file
); break;
4908 fputs (">=", file
); break;
4910 fputs (">", file
); break;
4912 fputs (">>", file
); break;
4914 fputs (">>=", file
); break;
4919 /* For floating point comparisons. Note that the output
4920 predicates are the complement of the desired mode. The
4921 conditions for GT, GE, LT, LE and LTGT cause an invalid
4922 operation exception if the result is unordered and this
4923 exception is enabled in the floating-point status register. */
4925 switch (GET_CODE (x
))
4928 fputs ("!=", file
); break;
4930 fputs ("=", file
); break;
4932 fputs ("!>", file
); break;
4934 fputs ("!>=", file
); break;
4936 fputs ("!<", file
); break;
4938 fputs ("!<=", file
); break;
4940 fputs ("!<>", file
); break;
4942 fputs ("!?<=", file
); break;
4944 fputs ("!?<", file
); break;
4946 fputs ("!?>=", file
); break;
4948 fputs ("!?>", file
); break;
4950 fputs ("!?=", file
); break;
4952 fputs ("!?", file
); break;
4954 fputs ("?", file
); break;
4959 case 'S': /* Condition, operands are (S)wapped. */
4960 switch (GET_CODE (x
))
4963 fputs ("=", file
); break;
4965 fputs ("<>", file
); break;
4967 fputs ("<", file
); break;
4969 fputs ("<=", file
); break;
4971 fputs ("<<=", file
); break;
4973 fputs ("<<", file
); break;
4975 fputs (">", file
); break;
4977 fputs (">=", file
); break;
4979 fputs (">>=", file
); break;
4981 fputs (">>", file
); break;
4986 case 'B': /* Condition, (B)oth swapped and negate. */
4987 switch (GET_CODE (x
))
4990 fputs ("<>", file
); break;
4992 fputs ("=", file
); break;
4994 fputs (">=", file
); break;
4996 fputs (">", file
); break;
4998 fputs (">>", file
); break;
5000 fputs (">>=", file
); break;
5002 fputs ("<=", file
); break;
5004 fputs ("<", file
); break;
5006 fputs ("<<", file
); break;
5008 fputs ("<<=", file
); break;
5014 gcc_assert (GET_CODE (x
) == CONST_INT
);
5015 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~INTVAL (x
));
5018 gcc_assert (GET_CODE (x
) == CONST_INT
);
5019 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 64 - (INTVAL (x
) & 63));
5022 gcc_assert (GET_CODE (x
) == CONST_INT
);
5023 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 32 - (INTVAL (x
) & 31));
5026 gcc_assert (GET_CODE (x
) == CONST_INT
&& exact_log2 (INTVAL (x
)) >= 0);
5027 fprintf (file
, "%d", exact_log2 (INTVAL (x
)));
5030 gcc_assert (GET_CODE (x
) == CONST_INT
);
5031 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 63 - (INTVAL (x
) & 63));
5034 gcc_assert (GET_CODE (x
) == CONST_INT
);
5035 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, 31 - (INTVAL (x
) & 31));
5038 if (GET_CODE (x
) == CONST_INT
)
5043 switch (GET_CODE (XEXP (x
, 0)))
5047 if (ASSEMBLER_DIALECT
== 0)
5048 fputs ("s,mb", file
);
5050 fputs (",mb", file
);
5054 if (ASSEMBLER_DIALECT
== 0)
5055 fputs ("s,ma", file
);
5057 fputs (",ma", file
);
5060 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5061 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5063 if (ASSEMBLER_DIALECT
== 0)
5066 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5067 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5069 if (ASSEMBLER_DIALECT
== 0)
5070 fputs ("x,s", file
);
5074 else if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5078 if (code
== 'F' && ASSEMBLER_DIALECT
== 0)
5084 output_global_address (file
, x
, 0);
5087 output_global_address (file
, x
, 1);
5089 case 0: /* Don't do anything special */
5094 compute_zdepwi_operands (INTVAL (x
), op
);
5095 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5101 compute_zdepdi_operands (INTVAL (x
), op
);
5102 fprintf (file
, "%d,%d,%d", op
[0], op
[1], op
[2]);
5106 /* We can get here from a .vtable_inherit due to our
5107 CONSTANT_ADDRESS_P rejecting perfectly good constant
5113 if (GET_CODE (x
) == REG
)
5115 fputs (reg_names
[REGNO (x
)], file
);
5116 if (TARGET_64BIT
&& FP_REG_P (x
) && GET_MODE_SIZE (GET_MODE (x
)) <= 4)
5122 && GET_MODE_SIZE (GET_MODE (x
)) <= 4
5123 && (REGNO (x
) & 1) == 0)
5126 else if (GET_CODE (x
) == MEM
)
5128 int size
= GET_MODE_SIZE (GET_MODE (x
));
5129 rtx base
= NULL_RTX
;
5130 switch (GET_CODE (XEXP (x
, 0)))
5134 base
= XEXP (XEXP (x
, 0), 0);
5135 fprintf (file
, "-%d(%s)", size
, reg_names
[REGNO (base
)]);
5139 base
= XEXP (XEXP (x
, 0), 0);
5140 fprintf (file
, "%d(%s)", size
, reg_names
[REGNO (base
)]);
5143 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
)
5144 fprintf (file
, "%s(%s)",
5145 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 0), 0))],
5146 reg_names
[REGNO (XEXP (XEXP (x
, 0), 1))]);
5147 else if (GET_CODE (XEXP (XEXP (x
, 0), 1)) == MULT
)
5148 fprintf (file
, "%s(%s)",
5149 reg_names
[REGNO (XEXP (XEXP (XEXP (x
, 0), 1), 0))],
5150 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
5151 else if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
5152 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
)
5154 /* Because the REG_POINTER flag can get lost during reload,
5155 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5156 index and base registers in the combined move patterns. */
5157 rtx base
= XEXP (XEXP (x
, 0), 1);
5158 rtx index
= XEXP (XEXP (x
, 0), 0);
5160 fprintf (file
, "%s(%s)",
5161 reg_names
[REGNO (index
)], reg_names
[REGNO (base
)]);
5164 output_address (XEXP (x
, 0));
5167 output_address (XEXP (x
, 0));
5172 output_addr_const (file
, x
);
5175 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5178 output_global_address (FILE *file
, rtx x
, int round_constant
)
5181 /* Imagine (high (const (plus ...))). */
5182 if (GET_CODE (x
) == HIGH
)
5185 if (GET_CODE (x
) == SYMBOL_REF
&& read_only_operand (x
, VOIDmode
))
5186 output_addr_const (file
, x
);
5187 else if (GET_CODE (x
) == SYMBOL_REF
&& !flag_pic
)
5189 output_addr_const (file
, x
);
5190 fputs ("-$global$", file
);
5192 else if (GET_CODE (x
) == CONST
)
5194 const char *sep
= "";
5195 int offset
= 0; /* assembler wants -$global$ at end */
5196 rtx base
= NULL_RTX
;
5198 switch (GET_CODE (XEXP (XEXP (x
, 0), 0)))
5201 base
= XEXP (XEXP (x
, 0), 0);
5202 output_addr_const (file
, base
);
5205 offset
= INTVAL (XEXP (XEXP (x
, 0), 0));
5211 switch (GET_CODE (XEXP (XEXP (x
, 0), 1)))
5214 base
= XEXP (XEXP (x
, 0), 1);
5215 output_addr_const (file
, base
);
5218 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
5224 /* How bogus. The compiler is apparently responsible for
5225 rounding the constant if it uses an LR field selector.
5227 The linker and/or assembler seem a better place since
5228 they have to do this kind of thing already.
5230 If we fail to do this, HP's optimizing linker may eliminate
5231 an addil, but not update the ldw/stw/ldo instruction that
5232 uses the result of the addil. */
5234 offset
= ((offset
+ 0x1000) & ~0x1fff);
5236 switch (GET_CODE (XEXP (x
, 0)))
5249 gcc_assert (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
);
5257 if (!read_only_operand (base
, VOIDmode
) && !flag_pic
)
5258 fputs ("-$global$", file
);
5260 fprintf (file
, "%s%d", sep
, offset
);
5263 output_addr_const (file
, x
);
5266 /* Output boilerplate text to appear at the beginning of the file.
5267 There are several possible versions. */
5268 #define aputs(x) fputs(x, asm_out_file)
5270 pa_file_start_level (void)
5273 aputs ("\t.LEVEL 2.0w\n");
5274 else if (TARGET_PA_20
)
5275 aputs ("\t.LEVEL 2.0\n");
5276 else if (TARGET_PA_11
)
5277 aputs ("\t.LEVEL 1.1\n");
5279 aputs ("\t.LEVEL 1.0\n");
5283 pa_file_start_space (int sortspace
)
5285 aputs ("\t.SPACE $PRIVATE$");
5288 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5289 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5290 "\n\t.SPACE $TEXT$");
5293 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5294 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5298 pa_file_start_file (int want_version
)
5300 if (write_symbols
!= NO_DEBUG
)
5302 output_file_directive (asm_out_file
, main_input_filename
);
5304 aputs ("\t.version\t\"01.01\"\n");
5309 pa_file_start_mcount (const char *aswhat
)
5312 fprintf (asm_out_file
, "\t.IMPORT _mcount,%s\n", aswhat
);
5316 pa_elf_file_start (void)
5318 pa_file_start_level ();
5319 pa_file_start_mcount ("ENTRY");
5320 pa_file_start_file (0);
5324 pa_som_file_start (void)
5326 pa_file_start_level ();
5327 pa_file_start_space (0);
5328 aputs ("\t.IMPORT $global$,DATA\n"
5329 "\t.IMPORT $$dyncall,MILLICODE\n");
5330 pa_file_start_mcount ("CODE");
5331 pa_file_start_file (0);
5335 pa_linux_file_start (void)
5337 pa_file_start_file (1);
5338 pa_file_start_level ();
5339 pa_file_start_mcount ("CODE");
5343 pa_hpux64_gas_file_start (void)
5345 pa_file_start_level ();
5346 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5348 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file
, "_mcount", "function");
5350 pa_file_start_file (1);
5354 pa_hpux64_hpas_file_start (void)
5356 pa_file_start_level ();
5357 pa_file_start_space (1);
5358 pa_file_start_mcount ("CODE");
5359 pa_file_start_file (0);
5363 /* Search the deferred plabel list for SYMBOL and return its internal
5364 label. If an entry for SYMBOL is not found, a new entry is created. */
5367 get_deferred_plabel (rtx symbol
)
5369 const char *fname
= XSTR (symbol
, 0);
5372 /* See if we have already put this function on the list of deferred
5373 plabels. This list is generally small, so a liner search is not
5374 too ugly. If it proves too slow replace it with something faster. */
5375 for (i
= 0; i
< n_deferred_plabels
; i
++)
5376 if (strcmp (fname
, XSTR (deferred_plabels
[i
].symbol
, 0)) == 0)
5379 /* If the deferred plabel list is empty, or this entry was not found
5380 on the list, create a new entry on the list. */
5381 if (deferred_plabels
== NULL
|| i
== n_deferred_plabels
)
5385 if (deferred_plabels
== 0)
5386 deferred_plabels
= ggc_alloc_deferred_plabel ();
5388 deferred_plabels
= GGC_RESIZEVEC (struct deferred_plabel
,
5390 n_deferred_plabels
+ 1);
5392 i
= n_deferred_plabels
++;
5393 deferred_plabels
[i
].internal_label
= gen_label_rtx ();
5394 deferred_plabels
[i
].symbol
= symbol
;
5396 /* Gross. We have just implicitly taken the address of this
5397 function. Mark it in the same manner as assemble_name. */
5398 id
= maybe_get_identifier (targetm
.strip_name_encoding (fname
));
5400 mark_referenced (id
);
5403 return deferred_plabels
[i
].internal_label
;
5407 output_deferred_plabels (void)
5411 /* If we have some deferred plabels, then we need to switch into the
5412 data or readonly data section, and align it to a 4 byte boundary
5413 before outputting the deferred plabels. */
5414 if (n_deferred_plabels
)
5416 switch_to_section (flag_pic
? data_section
: readonly_data_section
);
5417 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
5420 /* Now output the deferred plabels. */
5421 for (i
= 0; i
< n_deferred_plabels
; i
++)
5423 targetm
.asm_out
.internal_label (asm_out_file
, "L",
5424 CODE_LABEL_NUMBER (deferred_plabels
[i
].internal_label
));
5425 assemble_integer (deferred_plabels
[i
].symbol
,
5426 TARGET_64BIT
? 8 : 4, TARGET_64BIT
? 64 : 32, 1);
5430 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5431 /* Initialize optabs to point to HPUX long double emulation routines. */
5433 pa_hpux_init_libfuncs (void)
5435 set_optab_libfunc (add_optab
, TFmode
, "_U_Qfadd");
5436 set_optab_libfunc (sub_optab
, TFmode
, "_U_Qfsub");
5437 set_optab_libfunc (smul_optab
, TFmode
, "_U_Qfmpy");
5438 set_optab_libfunc (sdiv_optab
, TFmode
, "_U_Qfdiv");
5439 set_optab_libfunc (smin_optab
, TFmode
, "_U_Qmin");
5440 set_optab_libfunc (smax_optab
, TFmode
, "_U_Qfmax");
5441 set_optab_libfunc (sqrt_optab
, TFmode
, "_U_Qfsqrt");
5442 set_optab_libfunc (abs_optab
, TFmode
, "_U_Qfabs");
5443 set_optab_libfunc (neg_optab
, TFmode
, "_U_Qfneg");
5445 set_optab_libfunc (eq_optab
, TFmode
, "_U_Qfeq");
5446 set_optab_libfunc (ne_optab
, TFmode
, "_U_Qfne");
5447 set_optab_libfunc (gt_optab
, TFmode
, "_U_Qfgt");
5448 set_optab_libfunc (ge_optab
, TFmode
, "_U_Qfge");
5449 set_optab_libfunc (lt_optab
, TFmode
, "_U_Qflt");
5450 set_optab_libfunc (le_optab
, TFmode
, "_U_Qfle");
5451 set_optab_libfunc (unord_optab
, TFmode
, "_U_Qfunord");
5453 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_U_Qfcnvff_sgl_to_quad");
5454 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_U_Qfcnvff_dbl_to_quad");
5455 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_U_Qfcnvff_quad_to_sgl");
5456 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_U_Qfcnvff_quad_to_dbl");
5458 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, TARGET_64BIT
5459 ? "__U_Qfcnvfxt_quad_to_sgl"
5460 : "_U_Qfcnvfxt_quad_to_sgl");
5461 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_dbl");
5462 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_U_Qfcnvfxt_quad_to_usgl");
5463 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_U_Qfcnvfxt_quad_to_udbl");
5465 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_sgl_to_quad");
5466 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_dbl_to_quad");
5467 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_U_Qfcnvxf_usgl_to_quad");
5468 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
, "_U_Qfcnvxf_udbl_to_quad");
5472 /* HP's millicode routines mean something special to the assembler.
5473 Keep track of which ones we have used. */
5475 enum millicodes
{ remI
, remU
, divI
, divU
, mulI
, end1000
};
5476 static void import_milli (enum millicodes
);
5477 static char imported
[(int) end1000
];
5478 static const char * const milli_names
[] = {"remI", "remU", "divI", "divU", "mulI"};
5479 static const char import_string
[] = ".IMPORT $$....,MILLICODE";
5480 #define MILLI_START 10
5483 import_milli (enum millicodes code
)
5485 char str
[sizeof (import_string
)];
5487 if (!imported
[(int) code
])
5489 imported
[(int) code
] = 1;
5490 strcpy (str
, import_string
);
5491 strncpy (str
+ MILLI_START
, milli_names
[(int) code
], 4);
5492 output_asm_insn (str
, 0);
5496 /* The register constraints have put the operands and return value in
5497 the proper registers. */
5500 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED
, rtx insn
)
5502 import_milli (mulI
);
5503 return output_millicode_call (insn
, gen_rtx_SYMBOL_REF (Pmode
, "$$mulI"));
5506 /* Emit the rtl for doing a division by a constant. */
5508 /* Do magic division millicodes exist for this value? */
5509 const int magic_milli
[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5511 /* We'll use an array to keep track of the magic millicodes and
5512 whether or not we've used them already. [n][0] is signed, [n][1] is
5515 static int div_milli
[16][2];
5518 emit_hpdiv_const (rtx
*operands
, int unsignedp
)
5520 if (GET_CODE (operands
[2]) == CONST_INT
5521 && INTVAL (operands
[2]) > 0
5522 && INTVAL (operands
[2]) < 16
5523 && magic_milli
[INTVAL (operands
[2])])
5525 rtx ret
= gen_rtx_REG (SImode
, TARGET_64BIT
? 2 : 31);
5527 emit_move_insn (gen_rtx_REG (SImode
, 26), operands
[1]);
5531 gen_rtvec (6, gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, 29),
5532 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
5534 gen_rtx_REG (SImode
, 26),
5536 gen_rtx_CLOBBER (VOIDmode
, operands
[4]),
5537 gen_rtx_CLOBBER (VOIDmode
, operands
[3]),
5538 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 26)),
5539 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 25)),
5540 gen_rtx_CLOBBER (VOIDmode
, ret
))));
5541 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 29));
5548 output_div_insn (rtx
*operands
, int unsignedp
, rtx insn
)
5552 /* If the divisor is a constant, try to use one of the special
5554 if (GET_CODE (operands
[0]) == CONST_INT
)
5556 static char buf
[100];
5557 divisor
= INTVAL (operands
[0]);
5558 if (!div_milli
[divisor
][unsignedp
])
5560 div_milli
[divisor
][unsignedp
] = 1;
5562 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands
);
5564 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands
);
5568 sprintf (buf
, "$$divU_" HOST_WIDE_INT_PRINT_DEC
,
5569 INTVAL (operands
[0]));
5570 return output_millicode_call (insn
,
5571 gen_rtx_SYMBOL_REF (SImode
, buf
));
5575 sprintf (buf
, "$$divI_" HOST_WIDE_INT_PRINT_DEC
,
5576 INTVAL (operands
[0]));
5577 return output_millicode_call (insn
,
5578 gen_rtx_SYMBOL_REF (SImode
, buf
));
5581 /* Divisor isn't a special constant. */
5586 import_milli (divU
);
5587 return output_millicode_call (insn
,
5588 gen_rtx_SYMBOL_REF (SImode
, "$$divU"));
5592 import_milli (divI
);
5593 return output_millicode_call (insn
,
5594 gen_rtx_SYMBOL_REF (SImode
, "$$divI"));
5599 /* Output a $$rem millicode to do mod. */
5602 output_mod_insn (int unsignedp
, rtx insn
)
5606 import_milli (remU
);
5607 return output_millicode_call (insn
,
5608 gen_rtx_SYMBOL_REF (SImode
, "$$remU"));
5612 import_milli (remI
);
5613 return output_millicode_call (insn
,
5614 gen_rtx_SYMBOL_REF (SImode
, "$$remI"));
5619 output_arg_descriptor (rtx call_insn
)
5621 const char *arg_regs
[4];
5622 enum machine_mode arg_mode
;
5624 int i
, output_flag
= 0;
5627 /* We neither need nor want argument location descriptors for the
5628 64bit runtime environment or the ELF32 environment. */
5629 if (TARGET_64BIT
|| TARGET_ELF32
)
5632 for (i
= 0; i
< 4; i
++)
5635 /* Specify explicitly that no argument relocations should take place
5636 if using the portable runtime calling conventions. */
5637 if (TARGET_PORTABLE_RUNTIME
)
5639 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5644 gcc_assert (GET_CODE (call_insn
) == CALL_INSN
);
5645 for (link
= CALL_INSN_FUNCTION_USAGE (call_insn
);
5646 link
; link
= XEXP (link
, 1))
5648 rtx use
= XEXP (link
, 0);
5650 if (! (GET_CODE (use
) == USE
5651 && GET_CODE (XEXP (use
, 0)) == REG
5652 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
5655 arg_mode
= GET_MODE (XEXP (use
, 0));
5656 regno
= REGNO (XEXP (use
, 0));
5657 if (regno
>= 23 && regno
<= 26)
5659 arg_regs
[26 - regno
] = "GR";
5660 if (arg_mode
== DImode
)
5661 arg_regs
[25 - regno
] = "GR";
5663 else if (regno
>= 32 && regno
<= 39)
5665 if (arg_mode
== SFmode
)
5666 arg_regs
[(regno
- 32) / 2] = "FR";
5669 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5670 arg_regs
[(regno
- 34) / 2] = "FR";
5671 arg_regs
[(regno
- 34) / 2 + 1] = "FU";
5673 arg_regs
[(regno
- 34) / 2] = "FU";
5674 arg_regs
[(regno
- 34) / 2 + 1] = "FR";
5679 fputs ("\t.CALL ", asm_out_file
);
5680 for (i
= 0; i
< 4; i
++)
5685 fputc (',', asm_out_file
);
5686 fprintf (asm_out_file
, "ARGW%d=%s", i
, arg_regs
[i
]);
5689 fputc ('\n', asm_out_file
);
5693 pa_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
5694 enum machine_mode mode
, secondary_reload_info
*sri
)
5696 int is_symbolic
, regno
;
5697 enum reg_class rclass
= (enum reg_class
) rclass_i
;
5699 /* Handle the easy stuff first. */
5700 if (rclass
== R1_REGS
)
5706 if (rclass
== BASE_REG_CLASS
&& regno
< FIRST_PSEUDO_REGISTER
)
5712 /* If we have something like (mem (mem (...)), we can safely assume the
5713 inner MEM will end up in a general register after reloading, so there's
5714 no need for a secondary reload. */
5715 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == MEM
)
5718 /* Trying to load a constant into a FP register during PIC code
5719 generation requires %r1 as a scratch register. */
5721 && (mode
== SImode
|| mode
== DImode
)
5722 && FP_REG_CLASS_P (rclass
)
5723 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
5725 sri
->icode
= (mode
== SImode
? CODE_FOR_reload_insi_r1
5726 : CODE_FOR_reload_indi_r1
);
5730 /* Profiling showed the PA port spends about 1.3% of its compilation
5731 time in true_regnum from calls inside pa_secondary_reload_class. */
5732 if (regno
>= FIRST_PSEUDO_REGISTER
|| GET_CODE (x
) == SUBREG
)
5733 regno
= true_regnum (x
);
5735 /* In order to allow 14-bit displacements in integer loads and stores,
5736 we need to prevent reload from generating out of range integer mode
5737 loads and stores to the floating point registers. Previously, we
5738 used to call for a secondary reload and have emit_move_sequence()
5739 fix the instruction sequence. However, reload occasionally wouldn't
5740 generate the reload and we would end up with an invalid REG+D memory
5741 address. So, now we use an intermediate general register for most
5742 memory loads and stores. */
5743 if ((regno
>= FIRST_PSEUDO_REGISTER
|| regno
== -1)
5744 && GET_MODE_CLASS (mode
) == MODE_INT
5745 && FP_REG_CLASS_P (rclass
))
5747 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5748 the secondary reload needed for a pseudo. It never passes a
5750 if (GET_CODE (x
) == MEM
)
5754 /* We don't need an intermediate for indexed and LO_SUM DLT
5755 memory addresses. When INT14_OK_STRICT is true, it might
5756 appear that we could directly allow register indirect
5757 memory addresses. However, this doesn't work because we
5758 don't support SUBREGs in floating-point register copies
5759 and reload doesn't tell us when it's going to use a SUBREG. */
5760 if (IS_INDEX_ADDR_P (x
)
5761 || IS_LO_SUM_DLT_ADDR_P (x
))
5764 /* Otherwise, we need an intermediate general register. */
5765 return GENERAL_REGS
;
5768 /* Request a secondary reload with a general scratch register
5769 for everthing else. ??? Could symbolic operands be handled
5770 directly when generating non-pic PA 2.0 code? */
5772 ? direct_optab_handler (reload_in_optab
, mode
)
5773 : direct_optab_handler (reload_out_optab
, mode
));
5777 /* We need a secondary register (GPR) for copies between the SAR
5778 and anything other than a general register. */
5779 if (rclass
== SHIFT_REGS
&& (regno
<= 0 || regno
>= 32))
5782 ? direct_optab_handler (reload_in_optab
, mode
)
5783 : direct_optab_handler (reload_out_optab
, mode
));
5787 /* A SAR<->FP register copy requires a secondary register (GPR) as
5788 well as secondary memory. */
5789 if (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
5790 && (REGNO_REG_CLASS (regno
) == SHIFT_REGS
5791 && FP_REG_CLASS_P (rclass
)))
5794 ? direct_optab_handler (reload_in_optab
, mode
)
5795 : direct_optab_handler (reload_out_optab
, mode
));
5799 /* Secondary reloads of symbolic operands require %r1 as a scratch
5800 register when we're generating PIC code and when the operand isn't
5802 if (GET_CODE (x
) == HIGH
)
5805 /* Profiling has showed GCC spends about 2.6% of its compilation
5806 time in symbolic_operand from calls inside pa_secondary_reload_class.
5807 So, we use an inline copy to avoid useless work. */
5808 switch (GET_CODE (x
))
5813 is_symbolic
= !SYMBOL_REF_TLS_MODEL (x
);
5820 is_symbolic
= (GET_CODE (op
) == PLUS
5821 && ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
5822 && !SYMBOL_REF_TLS_MODEL (XEXP (op
, 0)))
5823 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
5824 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
5831 if (is_symbolic
&& (flag_pic
|| !read_only_operand (x
, VOIDmode
)))
5833 gcc_assert (mode
== SImode
|| mode
== DImode
);
5834 sri
->icode
= (mode
== SImode
? CODE_FOR_reload_insi_r1
5835 : CODE_FOR_reload_indi_r1
);
5841 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5842 is only marked as live on entry by df-scan when it is a fixed
5843 register. It isn't a fixed register in the 64-bit runtime,
5844 so we need to mark it here. */
5847 pa_extra_live_on_entry (bitmap regs
)
5850 bitmap_set_bit (regs
, ARG_POINTER_REGNUM
);
5853 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5854 to prevent it from being deleted. */
5857 pa_eh_return_handler_rtx (void)
5861 tmp
= gen_rtx_PLUS (word_mode
, frame_pointer_rtx
,
5862 TARGET_64BIT
? GEN_INT (-16) : GEN_INT (-20));
5863 tmp
= gen_rtx_MEM (word_mode
, tmp
);
5868 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5869 by invisible reference. As a GCC extension, we also pass anything
5870 with a zero or variable size by reference.
5872 The 64-bit runtime does not describe passing any types by invisible
5873 reference. The internals of GCC can't currently handle passing
5874 empty structures, and zero or variable length arrays when they are
5875 not passed entirely on the stack or by reference. Thus, as a GCC
5876 extension, we pass these types by reference. The HP compiler doesn't
5877 support these types, so hopefully there shouldn't be any compatibility
5878 issues. This may have to be revisited when HP releases a C99 compiler
5879 or updates the ABI. */
5882 pa_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
5883 enum machine_mode mode
, const_tree type
,
5884 bool named ATTRIBUTE_UNUSED
)
5889 size
= int_size_in_bytes (type
);
5891 size
= GET_MODE_SIZE (mode
);
5896 return size
<= 0 || size
> 8;
5900 function_arg_padding (enum machine_mode mode
, const_tree type
)
5905 && (AGGREGATE_TYPE_P (type
)
5906 || TREE_CODE (type
) == COMPLEX_TYPE
5907 || TREE_CODE (type
) == VECTOR_TYPE
)))
5909 /* Return none if justification is not required. */
5911 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
5912 && (int_size_in_bytes (type
) * BITS_PER_UNIT
) % PARM_BOUNDARY
== 0)
5915 /* The directions set here are ignored when a BLKmode argument larger
5916 than a word is placed in a register. Different code is used for
5917 the stack and registers. This makes it difficult to have a
5918 consistent data representation for both the stack and registers.
5919 For both runtimes, the justification and padding for arguments on
5920 the stack and in registers should be identical. */
5922 /* The 64-bit runtime specifies left justification for aggregates. */
5925 /* The 32-bit runtime architecture specifies right justification.
5926 When the argument is passed on the stack, the argument is padded
5927 with garbage on the left. The HP compiler pads with zeros. */
5931 if (GET_MODE_BITSIZE (mode
) < PARM_BOUNDARY
)
5938 /* Do what is necessary for `va_start'. We look at the current function
5939 to determine if stdargs or varargs is used and fill in an initial
5940 va_list. A pointer to this constructor is returned. */
5943 hppa_builtin_saveregs (void)
5946 tree fntype
= TREE_TYPE (current_function_decl
);
5947 int argadj
= ((!stdarg_p (fntype
))
5948 ? UNITS_PER_WORD
: 0);
5951 offset
= plus_constant (crtl
->args
.arg_offset_rtx
, argadj
);
5953 offset
= crtl
->args
.arg_offset_rtx
;
5959 /* Adjust for varargs/stdarg differences. */
5961 offset
= plus_constant (crtl
->args
.arg_offset_rtx
, -argadj
);
5963 offset
= crtl
->args
.arg_offset_rtx
;
5965 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5966 from the incoming arg pointer and growing to larger addresses. */
5967 for (i
= 26, off
= -64; i
>= 19; i
--, off
+= 8)
5968 emit_move_insn (gen_rtx_MEM (word_mode
,
5969 plus_constant (arg_pointer_rtx
, off
)),
5970 gen_rtx_REG (word_mode
, i
));
5972 /* The incoming args pointer points just beyond the flushback area;
5973 normally this is not a serious concern. However, when we are doing
5974 varargs/stdargs we want to make the arg pointer point to the start
5975 of the incoming argument area. */
5976 emit_move_insn (virtual_incoming_args_rtx
,
5977 plus_constant (arg_pointer_rtx
, -64));
5979 /* Now return a pointer to the first anonymous argument. */
5980 return copy_to_reg (expand_binop (Pmode
, add_optab
,
5981 virtual_incoming_args_rtx
,
5982 offset
, 0, 0, OPTAB_LIB_WIDEN
));
5985 /* Store general registers on the stack. */
5986 dest
= gen_rtx_MEM (BLKmode
,
5987 plus_constant (crtl
->args
.internal_arg_pointer
,
5989 set_mem_alias_set (dest
, get_varargs_alias_set ());
5990 set_mem_align (dest
, BITS_PER_WORD
);
5991 move_block_from_reg (23, dest
, 4);
5993 /* move_block_from_reg will emit code to store the argument registers
5994 individually as scalar stores.
5996 However, other insns may later load from the same addresses for
5997 a structure load (passing a struct to a varargs routine).
5999 The alias code assumes that such aliasing can never happen, so we
6000 have to keep memory referencing insns from moving up beyond the
6001 last argument register store. So we emit a blockage insn here. */
6002 emit_insn (gen_blockage ());
6004 return copy_to_reg (expand_binop (Pmode
, add_optab
,
6005 crtl
->args
.internal_arg_pointer
,
6006 offset
, 0, 0, OPTAB_LIB_WIDEN
));
6010 hppa_va_start (tree valist
, rtx nextarg
)
6012 nextarg
= expand_builtin_saveregs ();
6013 std_expand_builtin_va_start (valist
, nextarg
);
6017 hppa_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
6022 /* Args grow upward. We can use the generic routines. */
6023 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6025 else /* !TARGET_64BIT */
6027 tree ptr
= build_pointer_type (type
);
6030 unsigned int size
, ofs
;
6033 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, 0);
6037 ptr
= build_pointer_type (type
);
6039 size
= int_size_in_bytes (type
);
6040 valist_type
= TREE_TYPE (valist
);
6042 /* Args grow down. Not handled by generic routines. */
6044 u
= fold_convert (sizetype
, size_in_bytes (type
));
6045 u
= fold_build1 (NEGATE_EXPR
, sizetype
, u
);
6046 t
= build2 (POINTER_PLUS_EXPR
, valist_type
, valist
, u
);
6048 /* Align to 4 or 8 byte boundary depending on argument size. */
6050 u
= build_int_cst (TREE_TYPE (t
), (HOST_WIDE_INT
)(size
> 4 ? -8 : -4));
6051 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
, u
);
6052 t
= fold_convert (valist_type
, t
);
6054 t
= build2 (MODIFY_EXPR
, valist_type
, valist
, t
);
6056 ofs
= (8 - size
) % 4;
6060 t
= build2 (POINTER_PLUS_EXPR
, valist_type
, t
, u
);
6063 t
= fold_convert (ptr
, t
);
6064 t
= build_va_arg_indirect_ref (t
);
6067 t
= build_va_arg_indirect_ref (t
);
6073 /* True if MODE is valid for the target. By "valid", we mean able to
6074 be manipulated in non-trivial ways. In particular, this means all
6075 the arithmetic is supported.
6077 Currently, TImode is not valid as the HP 64-bit runtime documentation
6078 doesn't document the alignment and calling conventions for this type.
6079 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6080 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6083 pa_scalar_mode_supported_p (enum machine_mode mode
)
6085 int precision
= GET_MODE_PRECISION (mode
);
6087 switch (GET_MODE_CLASS (mode
))
6089 case MODE_PARTIAL_INT
:
6091 if (precision
== CHAR_TYPE_SIZE
)
6093 if (precision
== SHORT_TYPE_SIZE
)
6095 if (precision
== INT_TYPE_SIZE
)
6097 if (precision
== LONG_TYPE_SIZE
)
6099 if (precision
== LONG_LONG_TYPE_SIZE
)
6104 if (precision
== FLOAT_TYPE_SIZE
)
6106 if (precision
== DOUBLE_TYPE_SIZE
)
6108 if (precision
== LONG_DOUBLE_TYPE_SIZE
)
6112 case MODE_DECIMAL_FLOAT
:
6120 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6121 it branches to the next real instruction. Otherwise, return FALSE. */
6124 branch_to_delay_slot_p (rtx insn
)
6126 if (dbr_sequence_length ())
6129 return next_real_insn (JUMP_LABEL (insn
)) == next_real_insn (insn
);
6132 /* Return TRUE if INSN, a jump insn, needs a nop in its delay slot.
6134 This occurs when INSN has an unfilled delay slot and is followed
6135 by an ASM_INPUT. Disaster can occur if the ASM_INPUT is empty and
6136 the jump branches into the delay slot. So, we add a nop in the delay
6137 slot just to be safe. This messes up our instruction count, but we
6138 don't know how big the ASM_INPUT insn is anyway. */
6141 branch_needs_nop_p (rtx insn
)
6145 if (dbr_sequence_length ())
6148 next_insn
= next_real_insn (insn
);
6149 return GET_CODE (PATTERN (next_insn
)) == ASM_INPUT
;
6152 /* This routine handles all the normal conditional branch sequences we
6153 might need to generate. It handles compare immediate vs compare
6154 register, nullification of delay slots, varying length branches,
6155 negated branches, and all combinations of the above. It returns the
6156 output appropriate to emit the branch corresponding to all given
6160 output_cbranch (rtx
*operands
, int negated
, rtx insn
)
6162 static char buf
[100];
6164 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6165 int length
= get_attr_length (insn
);
6168 /* A conditional branch to the following instruction (e.g. the delay slot)
6169 is asking for a disaster. This can happen when not optimizing and
6170 when jump optimization fails.
6172 While it is usually safe to emit nothing, this can fail if the
6173 preceding instruction is a nullified branch with an empty delay
6174 slot and the same branch target as this branch. We could check
6175 for this but jump optimization should eliminate nop jumps. It
6176 is always safe to emit a nop. */
6177 if (branch_to_delay_slot_p (insn
))
6180 /* The doubleword form of the cmpib instruction doesn't have the LEU
6181 and GTU conditions while the cmpb instruction does. Since we accept
6182 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6183 if (GET_MODE (operands
[1]) == DImode
&& operands
[2] == const0_rtx
)
6184 operands
[2] = gen_rtx_REG (DImode
, 0);
6185 if (GET_MODE (operands
[2]) == DImode
&& operands
[1] == const0_rtx
)
6186 operands
[1] = gen_rtx_REG (DImode
, 0);
6188 /* If this is a long branch with its delay slot unfilled, set `nullify'
6189 as it can nullify the delay slot and save a nop. */
6190 if (length
== 8 && dbr_sequence_length () == 0)
6193 /* If this is a short forward conditional branch which did not get
6194 its delay slot filled, the delay slot can still be nullified. */
6195 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6196 nullify
= forward_branch_p (insn
);
6198 /* A forward branch over a single nullified insn can be done with a
6199 comclr instruction. This avoids a single cycle penalty due to
6200 mis-predicted branch if we fall through (branch not taken). */
6202 && next_real_insn (insn
) != 0
6203 && get_attr_length (next_real_insn (insn
)) == 4
6204 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6210 /* All short conditional branches except backwards with an unfilled
6214 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6216 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6217 if (GET_MODE (operands
[1]) == DImode
)
6220 strcat (buf
, "%B3");
6222 strcat (buf
, "%S3");
6224 strcat (buf
, " %2,%r1,%%r0");
6227 if (branch_needs_nop_p (insn
))
6228 strcat (buf
, ",n %2,%r1,%0%#");
6230 strcat (buf
, ",n %2,%r1,%0");
6233 strcat (buf
, " %2,%r1,%0");
6236 /* All long conditionals. Note a short backward branch with an
6237 unfilled delay slot is treated just like a long backward branch
6238 with an unfilled delay slot. */
6240 /* Handle weird backwards branch with a filled delay slot
6241 which is nullified. */
6242 if (dbr_sequence_length () != 0
6243 && ! forward_branch_p (insn
)
6246 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6247 if (GET_MODE (operands
[1]) == DImode
)
6250 strcat (buf
, "%S3");
6252 strcat (buf
, "%B3");
6253 strcat (buf
, ",n %2,%r1,.+12\n\tb %0");
6255 /* Handle short backwards branch with an unfilled delay slot.
6256 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6257 taken and untaken branches. */
6258 else if (dbr_sequence_length () == 0
6259 && ! forward_branch_p (insn
)
6260 && INSN_ADDRESSES_SET_P ()
6261 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6262 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6264 strcpy (buf
, "{com%I2b,|cmp%I2b,}");
6265 if (GET_MODE (operands
[1]) == DImode
)
6268 strcat (buf
, "%B3 %2,%r1,%0%#");
6270 strcat (buf
, "%S3 %2,%r1,%0%#");
6274 strcpy (buf
, "{com%I2clr,|cmp%I2clr,}");
6275 if (GET_MODE (operands
[1]) == DImode
)
6278 strcat (buf
, "%S3");
6280 strcat (buf
, "%B3");
6282 strcat (buf
, " %2,%r1,%%r0\n\tb,n %0");
6284 strcat (buf
, " %2,%r1,%%r0\n\tb %0");
6289 /* The reversed conditional branch must branch over one additional
6290 instruction if the delay slot is filled and needs to be extracted
6291 by output_lbranch. If the delay slot is empty or this is a
6292 nullified forward branch, the instruction after the reversed
6293 condition branch must be nullified. */
6294 if (dbr_sequence_length () == 0
6295 || (nullify
&& forward_branch_p (insn
)))
6299 operands
[4] = GEN_INT (length
);
6304 operands
[4] = GEN_INT (length
+ 4);
6307 /* Create a reversed conditional branch which branches around
6308 the following insns. */
6309 if (GET_MODE (operands
[1]) != DImode
)
6315 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6318 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6324 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6327 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6336 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6339 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6345 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6348 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6352 output_asm_insn (buf
, operands
);
6353 return output_lbranch (operands
[0], insn
, xdelay
);
6358 /* This routine handles output of long unconditional branches that
6359 exceed the maximum range of a simple branch instruction. Since
6360 we don't have a register available for the branch, we save register
6361 %r1 in the frame marker, load the branch destination DEST into %r1,
6362 execute the branch, and restore %r1 in the delay slot of the branch.
6364 Since long branches may have an insn in the delay slot and the
6365 delay slot is used to restore %r1, we in general need to extract
6366 this insn and execute it before the branch. However, to facilitate
6367 use of this function by conditional branches, we also provide an
6368 option to not extract the delay insn so that it will be emitted
6369 after the long branch. So, if there is an insn in the delay slot,
6370 it is extracted if XDELAY is nonzero.
6372 The lengths of the various long-branch sequences are 20, 16 and 24
6373 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6376 output_lbranch (rtx dest
, rtx insn
, int xdelay
)
6380 xoperands
[0] = dest
;
6382 /* First, free up the delay slot. */
6383 if (xdelay
&& dbr_sequence_length () != 0)
6385 /* We can't handle a jump in the delay slot. */
6386 gcc_assert (GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
);
6388 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
6391 /* Now delete the delay insn. */
6392 SET_INSN_DELETED (NEXT_INSN (insn
));
6395 /* Output an insn to save %r1. The runtime documentation doesn't
6396 specify whether the "Clean Up" slot in the callers frame can
6397 be clobbered by the callee. It isn't copied by HP's builtin
6398 alloca, so this suggests that it can be clobbered if necessary.
6399 The "Static Link" location is copied by HP builtin alloca, so
6400 we avoid using it. Using the cleanup slot might be a problem
6401 if we have to interoperate with languages that pass cleanup
6402 information. However, it should be possible to handle these
6403 situations with GCC's asm feature.
6405 The "Current RP" slot is reserved for the called procedure, so
6406 we try to use it when we don't have a frame of our own. It's
6407 rather unlikely that we won't have a frame when we need to emit
6410 Really the way to go long term is a register scavenger; goto
6411 the target of the jump and find a register which we can use
6412 as a scratch to hold the value in %r1. Then, we wouldn't have
6413 to free up the delay slot or clobber a slot that may be needed
6414 for other purposes. */
6417 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6418 /* Use the return pointer slot in the frame marker. */
6419 output_asm_insn ("std %%r1,-16(%%r30)", xoperands
);
6421 /* Use the slot at -40 in the frame marker since HP builtin
6422 alloca doesn't copy it. */
6423 output_asm_insn ("std %%r1,-40(%%r30)", xoperands
);
6427 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6428 /* Use the return pointer slot in the frame marker. */
6429 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands
);
6431 /* Use the "Clean Up" slot in the frame marker. In GCC,
6432 the only other use of this location is for copying a
6433 floating point double argument from a floating-point
6434 register to two general registers. The copy is done
6435 as an "atomic" operation when outputting a call, so it
6436 won't interfere with our using the location here. */
6437 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands
);
6440 if (TARGET_PORTABLE_RUNTIME
)
6442 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
6443 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
6444 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6448 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
6449 if (TARGET_SOM
|| !TARGET_GAS
)
6451 xoperands
[1] = gen_label_rtx ();
6452 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands
);
6453 targetm
.asm_out
.internal_label (asm_out_file
, "L",
6454 CODE_LABEL_NUMBER (xoperands
[1]));
6455 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands
);
6459 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands
);
6460 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
6462 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
6465 /* Now output a very long branch to the original target. */
6466 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands
);
6468 /* Now restore the value of %r1 in the delay slot. */
6471 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6472 return "ldd -16(%%r30),%%r1";
6474 return "ldd -40(%%r30),%%r1";
6478 if (actual_fsize
== 0 && !df_regs_ever_live_p (2))
6479 return "ldw -20(%%r30),%%r1";
6481 return "ldw -12(%%r30),%%r1";
6485 /* This routine handles all the branch-on-bit conditional branch sequences we
6486 might need to generate. It handles nullification of delay slots,
6487 varying length branches, negated branches and all combinations of the
6488 above. it returns the appropriate output template to emit the branch. */
6491 output_bb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
, int which
)
6493 static char buf
[100];
6495 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6496 int length
= get_attr_length (insn
);
6499 /* A conditional branch to the following instruction (e.g. the delay slot) is
6500 asking for a disaster. I do not think this can happen as this pattern
6501 is only used when optimizing; jump optimization should eliminate the
6502 jump. But be prepared just in case. */
6504 if (branch_to_delay_slot_p (insn
))
6507 /* If this is a long branch with its delay slot unfilled, set `nullify'
6508 as it can nullify the delay slot and save a nop. */
6509 if (length
== 8 && dbr_sequence_length () == 0)
6512 /* If this is a short forward conditional branch which did not get
6513 its delay slot filled, the delay slot can still be nullified. */
6514 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6515 nullify
= forward_branch_p (insn
);
6517 /* A forward branch over a single nullified insn can be done with a
6518 extrs instruction. This avoids a single cycle penalty due to
6519 mis-predicted branch if we fall through (branch not taken). */
6522 && next_real_insn (insn
) != 0
6523 && get_attr_length (next_real_insn (insn
)) == 4
6524 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6531 /* All short conditional branches except backwards with an unfilled
6535 strcpy (buf
, "{extrs,|extrw,s,}");
6537 strcpy (buf
, "bb,");
6538 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6539 strcpy (buf
, "extrd,s,*");
6540 else if (GET_MODE (operands
[0]) == DImode
)
6541 strcpy (buf
, "bb,*");
6542 if ((which
== 0 && negated
)
6543 || (which
== 1 && ! negated
))
6548 strcat (buf
, " %0,%1,1,%%r0");
6549 else if (nullify
&& negated
)
6551 if (branch_needs_nop_p (insn
))
6552 strcat (buf
, ",n %0,%1,%3%#");
6554 strcat (buf
, ",n %0,%1,%3");
6556 else if (nullify
&& ! negated
)
6558 if (branch_needs_nop_p (insn
))
6559 strcat (buf
, ",n %0,%1,%2%#");
6561 strcat (buf
, ",n %0,%1,%2");
6563 else if (! nullify
&& negated
)
6564 strcat (buf
, " %0,%1,%3");
6565 else if (! nullify
&& ! negated
)
6566 strcat (buf
, " %0,%1,%2");
6569 /* All long conditionals. Note a short backward branch with an
6570 unfilled delay slot is treated just like a long backward branch
6571 with an unfilled delay slot. */
6573 /* Handle weird backwards branch with a filled delay slot
6574 which is nullified. */
6575 if (dbr_sequence_length () != 0
6576 && ! forward_branch_p (insn
)
6579 strcpy (buf
, "bb,");
6580 if (GET_MODE (operands
[0]) == DImode
)
6582 if ((which
== 0 && negated
)
6583 || (which
== 1 && ! negated
))
6588 strcat (buf
, ",n %0,%1,.+12\n\tb %3");
6590 strcat (buf
, ",n %0,%1,.+12\n\tb %2");
6592 /* Handle short backwards branch with an unfilled delay slot.
6593 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6594 taken and untaken branches. */
6595 else if (dbr_sequence_length () == 0
6596 && ! forward_branch_p (insn
)
6597 && INSN_ADDRESSES_SET_P ()
6598 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6599 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6601 strcpy (buf
, "bb,");
6602 if (GET_MODE (operands
[0]) == DImode
)
6604 if ((which
== 0 && negated
)
6605 || (which
== 1 && ! negated
))
6610 strcat (buf
, " %0,%1,%3%#");
6612 strcat (buf
, " %0,%1,%2%#");
6616 if (GET_MODE (operands
[0]) == DImode
)
6617 strcpy (buf
, "extrd,s,*");
6619 strcpy (buf
, "{extrs,|extrw,s,}");
6620 if ((which
== 0 && negated
)
6621 || (which
== 1 && ! negated
))
6625 if (nullify
&& negated
)
6626 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %3");
6627 else if (nullify
&& ! negated
)
6628 strcat (buf
, " %0,%1,1,%%r0\n\tb,n %2");
6630 strcat (buf
, " %0,%1,1,%%r0\n\tb %3");
6632 strcat (buf
, " %0,%1,1,%%r0\n\tb %2");
6637 /* The reversed conditional branch must branch over one additional
6638 instruction if the delay slot is filled and needs to be extracted
6639 by output_lbranch. If the delay slot is empty or this is a
6640 nullified forward branch, the instruction after the reversed
6641 condition branch must be nullified. */
6642 if (dbr_sequence_length () == 0
6643 || (nullify
&& forward_branch_p (insn
)))
6647 operands
[4] = GEN_INT (length
);
6652 operands
[4] = GEN_INT (length
+ 4);
6655 if (GET_MODE (operands
[0]) == DImode
)
6656 strcpy (buf
, "bb,*");
6658 strcpy (buf
, "bb,");
6659 if ((which
== 0 && negated
)
6660 || (which
== 1 && !negated
))
6665 strcat (buf
, ",n %0,%1,.+%4");
6667 strcat (buf
, " %0,%1,.+%4");
6668 output_asm_insn (buf
, operands
);
6669 return output_lbranch (negated
? operands
[3] : operands
[2],
6675 /* This routine handles all the branch-on-variable-bit conditional branch
6676 sequences we might need to generate. It handles nullification of delay
6677 slots, varying length branches, negated branches and all combinations
6678 of the above. it returns the appropriate output template to emit the
6682 output_bvb (rtx
*operands ATTRIBUTE_UNUSED
, int negated
, rtx insn
, int which
)
6684 static char buf
[100];
6686 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6687 int length
= get_attr_length (insn
);
6690 /* A conditional branch to the following instruction (e.g. the delay slot) is
6691 asking for a disaster. I do not think this can happen as this pattern
6692 is only used when optimizing; jump optimization should eliminate the
6693 jump. But be prepared just in case. */
6695 if (branch_to_delay_slot_p (insn
))
6698 /* If this is a long branch with its delay slot unfilled, set `nullify'
6699 as it can nullify the delay slot and save a nop. */
6700 if (length
== 8 && dbr_sequence_length () == 0)
6703 /* If this is a short forward conditional branch which did not get
6704 its delay slot filled, the delay slot can still be nullified. */
6705 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6706 nullify
= forward_branch_p (insn
);
6708 /* A forward branch over a single nullified insn can be done with a
6709 extrs instruction. This avoids a single cycle penalty due to
6710 mis-predicted branch if we fall through (branch not taken). */
6713 && next_real_insn (insn
) != 0
6714 && get_attr_length (next_real_insn (insn
)) == 4
6715 && JUMP_LABEL (insn
) == next_nonnote_insn (next_real_insn (insn
))
6722 /* All short conditional branches except backwards with an unfilled
6726 strcpy (buf
, "{vextrs,|extrw,s,}");
6728 strcpy (buf
, "{bvb,|bb,}");
6729 if (useskip
&& GET_MODE (operands
[0]) == DImode
)
6730 strcpy (buf
, "extrd,s,*");
6731 else if (GET_MODE (operands
[0]) == DImode
)
6732 strcpy (buf
, "bb,*");
6733 if ((which
== 0 && negated
)
6734 || (which
== 1 && ! negated
))
6739 strcat (buf
, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6740 else if (nullify
&& negated
)
6742 if (branch_needs_nop_p (insn
))
6743 strcat (buf
, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6745 strcat (buf
, "{,n %0,%3|,n %0,%%sar,%3}");
6747 else if (nullify
&& ! negated
)
6749 if (branch_needs_nop_p (insn
))
6750 strcat (buf
, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6752 strcat (buf
, "{,n %0,%2|,n %0,%%sar,%2}");
6754 else if (! nullify
&& negated
)
6755 strcat (buf
, "{ %0,%3| %0,%%sar,%3}");
6756 else if (! nullify
&& ! negated
)
6757 strcat (buf
, "{ %0,%2| %0,%%sar,%2}");
6760 /* All long conditionals. Note a short backward branch with an
6761 unfilled delay slot is treated just like a long backward branch
6762 with an unfilled delay slot. */
6764 /* Handle weird backwards branch with a filled delay slot
6765 which is nullified. */
6766 if (dbr_sequence_length () != 0
6767 && ! forward_branch_p (insn
)
6770 strcpy (buf
, "{bvb,|bb,}");
6771 if (GET_MODE (operands
[0]) == DImode
)
6773 if ((which
== 0 && negated
)
6774 || (which
== 1 && ! negated
))
6779 strcat (buf
, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6781 strcat (buf
, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6783 /* Handle short backwards branch with an unfilled delay slot.
6784 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6785 taken and untaken branches. */
6786 else if (dbr_sequence_length () == 0
6787 && ! forward_branch_p (insn
)
6788 && INSN_ADDRESSES_SET_P ()
6789 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6790 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6792 strcpy (buf
, "{bvb,|bb,}");
6793 if (GET_MODE (operands
[0]) == DImode
)
6795 if ((which
== 0 && negated
)
6796 || (which
== 1 && ! negated
))
6801 strcat (buf
, "{ %0,%3%#| %0,%%sar,%3%#}");
6803 strcat (buf
, "{ %0,%2%#| %0,%%sar,%2%#}");
6807 strcpy (buf
, "{vextrs,|extrw,s,}");
6808 if (GET_MODE (operands
[0]) == DImode
)
6809 strcpy (buf
, "extrd,s,*");
6810 if ((which
== 0 && negated
)
6811 || (which
== 1 && ! negated
))
6815 if (nullify
&& negated
)
6816 strcat (buf
, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6817 else if (nullify
&& ! negated
)
6818 strcat (buf
, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6820 strcat (buf
, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6822 strcat (buf
, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6827 /* The reversed conditional branch must branch over one additional
6828 instruction if the delay slot is filled and needs to be extracted
6829 by output_lbranch. If the delay slot is empty or this is a
6830 nullified forward branch, the instruction after the reversed
6831 condition branch must be nullified. */
6832 if (dbr_sequence_length () == 0
6833 || (nullify
&& forward_branch_p (insn
)))
6837 operands
[4] = GEN_INT (length
);
6842 operands
[4] = GEN_INT (length
+ 4);
6845 if (GET_MODE (operands
[0]) == DImode
)
6846 strcpy (buf
, "bb,*");
6848 strcpy (buf
, "{bvb,|bb,}");
6849 if ((which
== 0 && negated
)
6850 || (which
== 1 && !negated
))
6855 strcat (buf
, ",n {%0,.+%4|%0,%%sar,.+%4}");
6857 strcat (buf
, " {%0,.+%4|%0,%%sar,.+%4}");
6858 output_asm_insn (buf
, operands
);
6859 return output_lbranch (negated
? operands
[3] : operands
[2],
6865 /* Return the output template for emitting a dbra type insn.
6867 Note it may perform some output operations on its own before
6868 returning the final output string. */
6870 output_dbra (rtx
*operands
, rtx insn
, int which_alternative
)
6872 int length
= get_attr_length (insn
);
6874 /* A conditional branch to the following instruction (e.g. the delay slot) is
6875 asking for a disaster. Be prepared! */
6877 if (branch_to_delay_slot_p (insn
))
6879 if (which_alternative
== 0)
6880 return "ldo %1(%0),%0";
6881 else if (which_alternative
== 1)
6883 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands
);
6884 output_asm_insn ("ldw -16(%%r30),%4", operands
);
6885 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
6886 return "{fldws|fldw} -16(%%r30),%0";
6890 output_asm_insn ("ldw %0,%4", operands
);
6891 return "ldo %1(%4),%4\n\tstw %4,%0";
6895 if (which_alternative
== 0)
6897 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
6900 /* If this is a long branch with its delay slot unfilled, set `nullify'
6901 as it can nullify the delay slot and save a nop. */
6902 if (length
== 8 && dbr_sequence_length () == 0)
6905 /* If this is a short forward conditional branch which did not get
6906 its delay slot filled, the delay slot can still be nullified. */
6907 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
6908 nullify
= forward_branch_p (insn
);
6915 if (branch_needs_nop_p (insn
))
6916 return "addib,%C2,n %1,%0,%3%#";
6918 return "addib,%C2,n %1,%0,%3";
6921 return "addib,%C2 %1,%0,%3";
6924 /* Handle weird backwards branch with a fulled delay slot
6925 which is nullified. */
6926 if (dbr_sequence_length () != 0
6927 && ! forward_branch_p (insn
)
6929 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6930 /* Handle short backwards branch with an unfilled delay slot.
6931 Using a addb;nop rather than addi;bl saves 1 cycle for both
6932 taken and untaken branches. */
6933 else if (dbr_sequence_length () == 0
6934 && ! forward_branch_p (insn
)
6935 && INSN_ADDRESSES_SET_P ()
6936 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
6937 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
6938 return "addib,%C2 %1,%0,%3%#";
6940 /* Handle normal cases. */
6942 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6944 return "addi,%N2 %1,%0,%0\n\tb %3";
6947 /* The reversed conditional branch must branch over one additional
6948 instruction if the delay slot is filled and needs to be extracted
6949 by output_lbranch. If the delay slot is empty or this is a
6950 nullified forward branch, the instruction after the reversed
6951 condition branch must be nullified. */
6952 if (dbr_sequence_length () == 0
6953 || (nullify
&& forward_branch_p (insn
)))
6957 operands
[4] = GEN_INT (length
);
6962 operands
[4] = GEN_INT (length
+ 4);
6966 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands
);
6968 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands
);
6970 return output_lbranch (operands
[3], insn
, xdelay
);
6974 /* Deal with gross reload from FP register case. */
6975 else if (which_alternative
== 1)
6977 /* Move loop counter from FP register to MEM then into a GR,
6978 increment the GR, store the GR into MEM, and finally reload
6979 the FP register from MEM from within the branch's delay slot. */
6980 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6982 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands
);
6984 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6985 else if (length
== 28)
6986 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6989 operands
[5] = GEN_INT (length
- 16);
6990 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands
);
6991 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
6992 return output_lbranch (operands
[3], insn
, 0);
6995 /* Deal with gross reload from memory case. */
6998 /* Reload loop counter from memory, the store back to memory
6999 happens in the branch's delay slot. */
7000 output_asm_insn ("ldw %0,%4", operands
);
7002 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7003 else if (length
== 16)
7004 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7007 operands
[5] = GEN_INT (length
- 4);
7008 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands
);
7009 return output_lbranch (operands
[3], insn
, 0);
7014 /* Return the output template for emitting a movb type insn.
7016 Note it may perform some output operations on its own before
7017 returning the final output string. */
7019 output_movb (rtx
*operands
, rtx insn
, int which_alternative
,
7020 int reverse_comparison
)
7022 int length
= get_attr_length (insn
);
7024 /* A conditional branch to the following instruction (e.g. the delay slot) is
7025 asking for a disaster. Be prepared! */
7027 if (branch_to_delay_slot_p (insn
))
7029 if (which_alternative
== 0)
7030 return "copy %1,%0";
7031 else if (which_alternative
== 1)
7033 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7034 return "{fldws|fldw} -16(%%r30),%0";
7036 else if (which_alternative
== 2)
7042 /* Support the second variant. */
7043 if (reverse_comparison
)
7044 PUT_CODE (operands
[2], reverse_condition (GET_CODE (operands
[2])));
7046 if (which_alternative
== 0)
7048 int nullify
= INSN_ANNULLED_BRANCH_P (insn
);
7051 /* If this is a long branch with its delay slot unfilled, set `nullify'
7052 as it can nullify the delay slot and save a nop. */
7053 if (length
== 8 && dbr_sequence_length () == 0)
7056 /* If this is a short forward conditional branch which did not get
7057 its delay slot filled, the delay slot can still be nullified. */
7058 if (! nullify
&& length
== 4 && dbr_sequence_length () == 0)
7059 nullify
= forward_branch_p (insn
);
7066 if (branch_needs_nop_p (insn
))
7067 return "movb,%C2,n %1,%0,%3%#";
7069 return "movb,%C2,n %1,%0,%3";
7072 return "movb,%C2 %1,%0,%3";
7075 /* Handle weird backwards branch with a filled delay slot
7076 which is nullified. */
7077 if (dbr_sequence_length () != 0
7078 && ! forward_branch_p (insn
)
7080 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7082 /* Handle short backwards branch with an unfilled delay slot.
7083 Using a movb;nop rather than or;bl saves 1 cycle for both
7084 taken and untaken branches. */
7085 else if (dbr_sequence_length () == 0
7086 && ! forward_branch_p (insn
)
7087 && INSN_ADDRESSES_SET_P ()
7088 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn
)))
7089 - INSN_ADDRESSES (INSN_UID (insn
)) - 8))
7090 return "movb,%C2 %1,%0,%3%#";
7091 /* Handle normal cases. */
7093 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7095 return "or,%N2 %1,%%r0,%0\n\tb %3";
7098 /* The reversed conditional branch must branch over one additional
7099 instruction if the delay slot is filled and needs to be extracted
7100 by output_lbranch. If the delay slot is empty or this is a
7101 nullified forward branch, the instruction after the reversed
7102 condition branch must be nullified. */
7103 if (dbr_sequence_length () == 0
7104 || (nullify
&& forward_branch_p (insn
)))
7108 operands
[4] = GEN_INT (length
);
7113 operands
[4] = GEN_INT (length
+ 4);
7117 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands
);
7119 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands
);
7121 return output_lbranch (operands
[3], insn
, xdelay
);
7124 /* Deal with gross reload for FP destination register case. */
7125 else if (which_alternative
== 1)
7127 /* Move source register to MEM, perform the branch test, then
7128 finally load the FP register from MEM from within the branch's
7130 output_asm_insn ("stw %1,-16(%%r30)", operands
);
7132 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7133 else if (length
== 16)
7134 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7137 operands
[4] = GEN_INT (length
- 4);
7138 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands
);
7139 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands
);
7140 return output_lbranch (operands
[3], insn
, 0);
7143 /* Deal with gross reload from memory case. */
7144 else if (which_alternative
== 2)
7146 /* Reload loop counter from memory, the store back to memory
7147 happens in the branch's delay slot. */
7149 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7150 else if (length
== 12)
7151 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7154 operands
[4] = GEN_INT (length
);
7155 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7157 return output_lbranch (operands
[3], insn
, 0);
7160 /* Handle SAR as a destination. */
7164 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7165 else if (length
== 12)
7166 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7169 operands
[4] = GEN_INT (length
);
7170 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7172 return output_lbranch (operands
[3], insn
, 0);
7177 /* Copy any FP arguments in INSN into integer registers. */
7179 copy_fp_args (rtx insn
)
7184 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7186 int arg_mode
, regno
;
7187 rtx use
= XEXP (link
, 0);
7189 if (! (GET_CODE (use
) == USE
7190 && GET_CODE (XEXP (use
, 0)) == REG
7191 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7194 arg_mode
= GET_MODE (XEXP (use
, 0));
7195 regno
= REGNO (XEXP (use
, 0));
7197 /* Is it a floating point register? */
7198 if (regno
>= 32 && regno
<= 39)
7200 /* Copy the FP register into an integer register via memory. */
7201 if (arg_mode
== SFmode
)
7203 xoperands
[0] = XEXP (use
, 0);
7204 xoperands
[1] = gen_rtx_REG (SImode
, 26 - (regno
- 32) / 2);
7205 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands
);
7206 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7210 xoperands
[0] = XEXP (use
, 0);
7211 xoperands
[1] = gen_rtx_REG (DImode
, 25 - (regno
- 34) / 2);
7212 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands
);
7213 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands
);
7214 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands
);
7220 /* Compute length of the FP argument copy sequence for INSN. */
7222 length_fp_args (rtx insn
)
7227 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
7229 int arg_mode
, regno
;
7230 rtx use
= XEXP (link
, 0);
7232 if (! (GET_CODE (use
) == USE
7233 && GET_CODE (XEXP (use
, 0)) == REG
7234 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use
, 0)))))
7237 arg_mode
= GET_MODE (XEXP (use
, 0));
7238 regno
= REGNO (XEXP (use
, 0));
7240 /* Is it a floating point register? */
7241 if (regno
>= 32 && regno
<= 39)
7243 if (arg_mode
== SFmode
)
7253 /* Return the attribute length for the millicode call instruction INSN.
7254 The length must match the code generated by output_millicode_call.
7255 We include the delay slot in the returned length as it is better to
7256 over estimate the length than to under estimate it. */
7259 attr_length_millicode_call (rtx insn
)
7261 unsigned long distance
= -1;
7262 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7264 if (INSN_ADDRESSES_SET_P ())
7266 distance
= (total
+ insn_current_reference_address (insn
));
7267 if (distance
< total
)
7273 if (!TARGET_LONG_CALLS
&& distance
< 7600000)
7278 else if (TARGET_PORTABLE_RUNTIME
)
7282 if (!TARGET_LONG_CALLS
&& distance
< 240000)
7285 if (TARGET_LONG_ABS_CALL
&& !flag_pic
)
7292 /* INSN is a function call. It may have an unconditional jump
7295 CALL_DEST is the routine we are calling. */
7298 output_millicode_call (rtx insn
, rtx call_dest
)
7300 int attr_length
= get_attr_length (insn
);
7301 int seq_length
= dbr_sequence_length ();
7306 xoperands
[0] = call_dest
;
7307 xoperands
[2] = gen_rtx_REG (Pmode
, TARGET_64BIT
? 2 : 31);
7309 /* Handle the common case where we are sure that the branch will
7310 reach the beginning of the $CODE$ subspace. The within reach
7311 form of the $$sh_func_adrs call has a length of 28. Because
7312 it has an attribute type of multi, it never has a nonzero
7313 sequence length. The length of the $$sh_func_adrs is the same
7314 as certain out of reach PIC calls to other routines. */
7315 if (!TARGET_LONG_CALLS
7316 && ((seq_length
== 0
7317 && (attr_length
== 12
7318 || (attr_length
== 28 && get_attr_type (insn
) == TYPE_MULTI
)))
7319 || (seq_length
!= 0 && attr_length
== 8)))
7321 output_asm_insn ("{bl|b,l} %0,%2", xoperands
);
7327 /* It might seem that one insn could be saved by accessing
7328 the millicode function using the linkage table. However,
7329 this doesn't work in shared libraries and other dynamically
7330 loaded objects. Using a pc-relative sequence also avoids
7331 problems related to the implicit use of the gp register. */
7332 output_asm_insn ("b,l .+8,%%r1", xoperands
);
7336 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
7337 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
7341 xoperands
[1] = gen_label_rtx ();
7342 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7343 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7344 CODE_LABEL_NUMBER (xoperands
[1]));
7345 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7348 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7350 else if (TARGET_PORTABLE_RUNTIME
)
7352 /* Pure portable runtime doesn't allow be/ble; we also don't
7353 have PIC support in the assembler/linker, so this sequence
7356 /* Get the address of our target into %r1. */
7357 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7358 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands
);
7360 /* Get our return address into %r31. */
7361 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands
);
7362 output_asm_insn ("addi 8,%%r31,%%r31", xoperands
);
7364 /* Jump to our target address in %r1. */
7365 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7369 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7371 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands
);
7373 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7377 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7378 output_asm_insn ("addi 16,%%r1,%%r31", xoperands
);
7380 if (TARGET_SOM
|| !TARGET_GAS
)
7382 /* The HP assembler can generate relocations for the
7383 difference of two symbols. GAS can do this for a
7384 millicode symbol but not an arbitrary external
7385 symbol when generating SOM output. */
7386 xoperands
[1] = gen_label_rtx ();
7387 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7388 CODE_LABEL_NUMBER (xoperands
[1]));
7389 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7390 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7394 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands
);
7395 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7399 /* Jump to our target address in %r1. */
7400 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
7404 if (seq_length
== 0)
7405 output_asm_insn ("nop", xoperands
);
7407 /* We are done if there isn't a jump in the delay slot. */
7408 if (seq_length
== 0 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7411 /* This call has an unconditional jump in its delay slot. */
7412 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7414 /* See if the return address can be adjusted. Use the containing
7415 sequence insn's address. */
7416 if (INSN_ADDRESSES_SET_P ())
7418 seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7419 distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7420 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7422 if (VAL_14_BITS_P (distance
))
7424 xoperands
[1] = gen_label_rtx ();
7425 output_asm_insn ("ldo %0-%1(%2),%2", xoperands
);
7426 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7427 CODE_LABEL_NUMBER (xoperands
[1]));
7430 /* ??? This branch may not reach its target. */
7431 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7434 /* ??? This branch may not reach its target. */
7435 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7437 /* Delete the jump. */
7438 SET_INSN_DELETED (NEXT_INSN (insn
));
7443 /* Return the attribute length of the call instruction INSN. The SIBCALL
7444 flag indicates whether INSN is a regular call or a sibling call. The
7445 length returned must be longer than the code actually generated by
7446 output_call. Since branch shortening is done before delay branch
7447 sequencing, there is no way to determine whether or not the delay
7448 slot will be filled during branch shortening. Even when the delay
7449 slot is filled, we may have to add a nop if the delay slot contains
7450 a branch that can't reach its target. Thus, we always have to include
7451 the delay slot in the length estimate. This used to be done in
7452 pa_adjust_insn_length but we do it here now as some sequences always
7453 fill the delay slot and we can save four bytes in the estimate for
7457 attr_length_call (rtx insn
, int sibcall
)
7460 rtx call
, call_dest
;
7463 rtx pat
= PATTERN (insn
);
7464 unsigned long distance
= -1;
7466 gcc_assert (GET_CODE (insn
) == CALL_INSN
);
7468 if (INSN_ADDRESSES_SET_P ())
7470 unsigned long total
;
7472 total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7473 distance
= (total
+ insn_current_reference_address (insn
));
7474 if (distance
< total
)
7478 gcc_assert (GET_CODE (pat
) == PARALLEL
);
7480 /* Get the call rtx. */
7481 call
= XVECEXP (pat
, 0, 0);
7482 if (GET_CODE (call
) == SET
)
7483 call
= SET_SRC (call
);
7485 gcc_assert (GET_CODE (call
) == CALL
);
7487 /* Determine if this is a local call. */
7488 call_dest
= XEXP (XEXP (call
, 0), 0);
7489 call_decl
= SYMBOL_REF_DECL (call_dest
);
7490 local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7492 /* pc-relative branch. */
7493 if (!TARGET_LONG_CALLS
7494 && ((TARGET_PA_20
&& !sibcall
&& distance
< 7600000)
7495 || distance
< 240000))
7498 /* 64-bit plabel sequence. */
7499 else if (TARGET_64BIT
&& !local_call
)
7500 length
+= sibcall
? 28 : 24;
7502 /* non-pic long absolute branch sequence. */
7503 else if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7506 /* long pc-relative branch sequence. */
7507 else if (TARGET_LONG_PIC_SDIFF_CALL
7508 || (TARGET_GAS
&& !TARGET_SOM
7509 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
)))
7513 if (!TARGET_PA_20
&& !TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7517 /* 32-bit plabel sequence. */
7523 length
+= length_fp_args (insn
);
7533 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7541 /* INSN is a function call. It may have an unconditional jump
7544 CALL_DEST is the routine we are calling. */
7547 output_call (rtx insn
, rtx call_dest
, int sibcall
)
7549 int delay_insn_deleted
= 0;
7550 int delay_slot_filled
= 0;
7551 int seq_length
= dbr_sequence_length ();
7552 tree call_decl
= SYMBOL_REF_DECL (call_dest
);
7553 int local_call
= call_decl
&& targetm
.binds_local_p (call_decl
);
7556 xoperands
[0] = call_dest
;
7558 /* Handle the common case where we're sure that the branch will reach
7559 the beginning of the "$CODE$" subspace. This is the beginning of
7560 the current function if we are in a named section. */
7561 if (!TARGET_LONG_CALLS
&& attr_length_call (insn
, sibcall
) == 8)
7563 xoperands
[1] = gen_rtx_REG (word_mode
, sibcall
? 0 : 2);
7564 output_asm_insn ("{bl|b,l} %0,%1", xoperands
);
7568 if (TARGET_64BIT
&& !local_call
)
7570 /* ??? As far as I can tell, the HP linker doesn't support the
7571 long pc-relative sequence described in the 64-bit runtime
7572 architecture. So, we use a slightly longer indirect call. */
7573 xoperands
[0] = get_deferred_plabel (call_dest
);
7574 xoperands
[1] = gen_label_rtx ();
7576 /* If this isn't a sibcall, we put the load of %r27 into the
7577 delay slot. We can't do this in a sibcall as we don't
7578 have a second call-clobbered scratch register available. */
7580 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7583 final_scan_insn (NEXT_INSN (insn
), asm_out_file
,
7586 /* Now delete the delay insn. */
7587 SET_INSN_DELETED (NEXT_INSN (insn
));
7588 delay_insn_deleted
= 1;
7591 output_asm_insn ("addil LT'%0,%%r27", xoperands
);
7592 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands
);
7593 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands
);
7597 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7598 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands
);
7599 output_asm_insn ("bve (%%r1)", xoperands
);
7603 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands
);
7604 output_asm_insn ("bve,l (%%r2),%%r2", xoperands
);
7605 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands
);
7606 delay_slot_filled
= 1;
7611 int indirect_call
= 0;
7613 /* Emit a long call. There are several different sequences
7614 of increasing length and complexity. In most cases,
7615 they don't allow an instruction in the delay slot. */
7616 if (!((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7617 && !TARGET_LONG_PIC_SDIFF_CALL
7618 && !(TARGET_GAS
&& !TARGET_SOM
7619 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7624 && GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
7628 || ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)))
7630 /* A non-jump insn in the delay slot. By definition we can
7631 emit this insn before the call (and in fact before argument
7633 final_scan_insn (NEXT_INSN (insn
), asm_out_file
, optimize
, 0,
7636 /* Now delete the delay insn. */
7637 SET_INSN_DELETED (NEXT_INSN (insn
));
7638 delay_insn_deleted
= 1;
7641 if ((TARGET_LONG_ABS_CALL
|| local_call
) && !flag_pic
)
7643 /* This is the best sequence for making long calls in
7644 non-pic code. Unfortunately, GNU ld doesn't provide
7645 the stub needed for external calls, and GAS's support
7646 for this with the SOM linker is buggy. It is safe
7647 to use this for local calls. */
7648 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
7650 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands
);
7654 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7657 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands
);
7659 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7660 delay_slot_filled
= 1;
7665 if (TARGET_LONG_PIC_SDIFF_CALL
)
7667 /* The HP assembler and linker can handle relocations
7668 for the difference of two symbols. The HP assembler
7669 recognizes the sequence as a pc-relative call and
7670 the linker provides stubs when needed. */
7671 xoperands
[1] = gen_label_rtx ();
7672 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7673 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands
);
7674 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7675 CODE_LABEL_NUMBER (xoperands
[1]));
7676 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands
);
7678 else if (TARGET_GAS
&& !TARGET_SOM
7679 && (TARGET_LONG_PIC_PCREL_CALL
|| local_call
))
7681 /* GAS currently can't generate the relocations that
7682 are needed for the SOM linker under HP-UX using this
7683 sequence. The GNU linker doesn't generate the stubs
7684 that are needed for external calls on TARGET_ELF32
7685 with this sequence. For now, we have to use a
7686 longer plabel sequence when using GAS. */
7687 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7688 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7690 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7695 /* Emit a long plabel-based call sequence. This is
7696 essentially an inline implementation of $$dyncall.
7697 We don't actually try to call $$dyncall as this is
7698 as difficult as calling the function itself. */
7699 xoperands
[0] = get_deferred_plabel (call_dest
);
7700 xoperands
[1] = gen_label_rtx ();
7702 /* Since the call is indirect, FP arguments in registers
7703 need to be copied to the general registers. Then, the
7704 argument relocation stub will copy them back. */
7706 copy_fp_args (insn
);
7710 output_asm_insn ("addil LT'%0,%%r19", xoperands
);
7711 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands
);
7712 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands
);
7716 output_asm_insn ("addil LR'%0-$global$,%%r27",
7718 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7722 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands
);
7723 output_asm_insn ("depi 0,31,2,%%r1", xoperands
);
7724 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands
);
7725 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands
);
7727 if (!sibcall
&& !TARGET_PA_20
)
7729 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands
);
7730 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7731 output_asm_insn ("addi 8,%%r2,%%r2", xoperands
);
7733 output_asm_insn ("addi 16,%%r2,%%r2", xoperands
);
7740 output_asm_insn ("bve (%%r1)", xoperands
);
7745 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7746 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands
);
7747 delay_slot_filled
= 1;
7750 output_asm_insn ("bve,l (%%r1),%%r2", xoperands
);
7755 if (!TARGET_NO_SPACE_REGS
&& (!local_call
|| flag_pic
))
7756 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7761 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7762 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands
);
7764 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands
);
7768 if (TARGET_NO_SPACE_REGS
|| (local_call
&& !flag_pic
))
7769 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands
);
7771 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands
);
7774 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands
);
7776 output_asm_insn ("copy %%r31,%%r2", xoperands
);
7777 delay_slot_filled
= 1;
7784 if (!delay_slot_filled
&& (seq_length
== 0 || delay_insn_deleted
))
7785 output_asm_insn ("nop", xoperands
);
7787 /* We are done if there isn't a jump in the delay slot. */
7789 || delay_insn_deleted
7790 || GET_CODE (NEXT_INSN (insn
)) != JUMP_INSN
)
7793 /* A sibcall should never have a branch in the delay slot. */
7794 gcc_assert (!sibcall
);
7796 /* This call has an unconditional jump in its delay slot. */
7797 xoperands
[0] = XEXP (PATTERN (NEXT_INSN (insn
)), 1);
7799 if (!delay_slot_filled
&& INSN_ADDRESSES_SET_P ())
7801 /* See if the return address can be adjusted. Use the containing
7802 sequence insn's address. This would break the regular call/return@
7803 relationship assumed by the table based eh unwinder, so only do that
7804 if the call is not possibly throwing. */
7805 rtx seq_insn
= NEXT_INSN (PREV_INSN (XVECEXP (final_sequence
, 0, 0)));
7806 int distance
= (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn
))))
7807 - INSN_ADDRESSES (INSN_UID (seq_insn
)) - 8);
7809 if (VAL_14_BITS_P (distance
)
7810 && !(can_throw_internal (insn
) || can_throw_external (insn
)))
7812 xoperands
[1] = gen_label_rtx ();
7813 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands
);
7814 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7815 CODE_LABEL_NUMBER (xoperands
[1]));
7818 output_asm_insn ("nop\n\tb,n %0", xoperands
);
7821 output_asm_insn ("b,n %0", xoperands
);
7823 /* Delete the jump. */
7824 SET_INSN_DELETED (NEXT_INSN (insn
));
7829 /* Return the attribute length of the indirect call instruction INSN.
7830 The length must match the code generated by output_indirect call.
7831 The returned length includes the delay slot. Currently, the delay
7832 slot of an indirect call sequence is not exposed and it is used by
7833 the sequence itself. */
7836 attr_length_indirect_call (rtx insn
)
7838 unsigned long distance
= -1;
7839 unsigned long total
= IN_NAMED_SECTION_P (cfun
->decl
) ? 0 : total_code_bytes
;
7841 if (INSN_ADDRESSES_SET_P ())
7843 distance
= (total
+ insn_current_reference_address (insn
));
7844 if (distance
< total
)
7851 if (TARGET_FAST_INDIRECT_CALLS
7852 || (!TARGET_PORTABLE_RUNTIME
7853 && ((TARGET_PA_20
&& !TARGET_SOM
&& distance
< 7600000)
7854 || distance
< 240000)))
7860 if (TARGET_PORTABLE_RUNTIME
)
7863 /* Out of reach, can use ble. */
7868 output_indirect_call (rtx insn
, rtx call_dest
)
7874 xoperands
[0] = call_dest
;
7875 output_asm_insn ("ldd 16(%0),%%r2", xoperands
);
7876 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands
);
7880 /* First the special case for kernels, level 0 systems, etc. */
7881 if (TARGET_FAST_INDIRECT_CALLS
)
7882 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7884 /* Now the normal case -- we can reach $$dyncall directly or
7885 we're sure that we can get there via a long-branch stub.
7887 No need to check target flags as the length uniquely identifies
7888 the remaining cases. */
7889 if (attr_length_indirect_call (insn
) == 8)
7891 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7892 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7893 variant of the B,L instruction can't be used on the SOM target. */
7894 if (TARGET_PA_20
&& !TARGET_SOM
)
7895 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7897 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7900 /* Long millicode call, but we are not generating PIC or portable runtime
7902 if (attr_length_indirect_call (insn
) == 12)
7903 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7905 /* Long millicode call for portable runtime. */
7906 if (attr_length_indirect_call (insn
) == 20)
7907 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7909 /* We need a long PIC call to $$dyncall. */
7910 xoperands
[0] = NULL_RTX
;
7911 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
7912 if (TARGET_SOM
|| !TARGET_GAS
)
7914 xoperands
[0] = gen_label_rtx ();
7915 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands
);
7916 targetm
.asm_out
.internal_label (asm_out_file
, "L",
7917 CODE_LABEL_NUMBER (xoperands
[0]));
7918 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands
);
7922 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands
);
7923 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7926 output_asm_insn ("blr %%r0,%%r2", xoperands
);
7927 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands
);
7931 /* Return the total length of the save and restore instructions needed for
7932 the data linkage table pointer (i.e., the PIC register) across the call
7933 instruction INSN. No-return calls do not require a save and restore.
7934 In addition, we may be able to avoid the save and restore for calls
7935 within the same translation unit. */
7938 attr_length_save_restore_dltp (rtx insn
)
7940 if (find_reg_note (insn
, REG_NORETURN
, NULL_RTX
))
7946 /* In HPUX 8.0's shared library scheme, special relocations are needed
7947 for function labels if they might be passed to a function
7948 in a shared library (because shared libraries don't live in code
7949 space), and special magic is needed to construct their address. */
7952 hppa_encode_label (rtx sym
)
7954 const char *str
= XSTR (sym
, 0);
7955 int len
= strlen (str
) + 1;
7958 p
= newstr
= XALLOCAVEC (char, len
+ 1);
7962 XSTR (sym
, 0) = ggc_alloc_string (newstr
, len
);
7966 pa_encode_section_info (tree decl
, rtx rtl
, int first
)
7968 int old_referenced
= 0;
7970 if (!first
&& MEM_P (rtl
) && GET_CODE (XEXP (rtl
, 0)) == SYMBOL_REF
)
7972 = SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) & SYMBOL_FLAG_REFERENCED
;
7974 default_encode_section_info (decl
, rtl
, first
);
7976 if (first
&& TEXT_SPACE_P (decl
))
7978 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
7979 if (TREE_CODE (decl
) == FUNCTION_DECL
)
7980 hppa_encode_label (XEXP (rtl
, 0));
7982 else if (old_referenced
)
7983 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= old_referenced
;
7986 /* This is sort of inverse to pa_encode_section_info. */
7989 pa_strip_name_encoding (const char *str
)
7991 str
+= (*str
== '@');
7992 str
+= (*str
== '*');
7997 function_label_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
7999 return GET_CODE (op
) == SYMBOL_REF
&& FUNCTION_NAME_P (XSTR (op
, 0));
8002 /* Returns 1 if OP is a function label involved in a simple addition
8003 with a constant. Used to keep certain patterns from matching
8004 during instruction combination. */
8006 is_function_label_plus_const (rtx op
)
8008 /* Strip off any CONST. */
8009 if (GET_CODE (op
) == CONST
)
8012 return (GET_CODE (op
) == PLUS
8013 && function_label_operand (XEXP (op
, 0), Pmode
)
8014 && GET_CODE (XEXP (op
, 1)) == CONST_INT
);
8017 /* Output assembly code for a thunk to FUNCTION. */
8020 pa_asm_output_mi_thunk (FILE *file
, tree thunk_fndecl
, HOST_WIDE_INT delta
,
8021 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
8024 static unsigned int current_thunk_number
;
8025 int val_14
= VAL_14_BITS_P (delta
);
8026 unsigned int old_last_address
= last_address
, nbytes
= 0;
8030 xoperands
[0] = XEXP (DECL_RTL (function
), 0);
8031 xoperands
[1] = XEXP (DECL_RTL (thunk_fndecl
), 0);
8032 xoperands
[2] = GEN_INT (delta
);
8034 ASM_OUTPUT_LABEL (file
, XSTR (xoperands
[1], 0));
8035 fprintf (file
, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8037 /* Output the thunk. We know that the function is in the same
8038 translation unit (i.e., the same space) as the thunk, and that
8039 thunks are output after their method. Thus, we don't need an
8040 external branch to reach the function. With SOM and GAS,
8041 functions and thunks are effectively in different sections.
8042 Thus, we can always use a IA-relative branch and the linker
8043 will add a long branch stub if necessary.
8045 However, we have to be careful when generating PIC code on the
8046 SOM port to ensure that the sequence does not transfer to an
8047 import stub for the target function as this could clobber the
8048 return value saved at SP-24. This would also apply to the
8049 32-bit linux port if the multi-space model is implemented. */
8050 if ((!TARGET_LONG_CALLS
&& TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8051 && !(flag_pic
&& TREE_PUBLIC (function
))
8052 && (TARGET_GAS
|| last_address
< 262132))
8053 || (!TARGET_LONG_CALLS
&& !TARGET_SOM
&& !TARGET_PORTABLE_RUNTIME
8054 && ((targetm
.have_named_sections
8055 && DECL_SECTION_NAME (thunk_fndecl
) != NULL
8056 /* The GNU 64-bit linker has rather poor stub management.
8057 So, we use a long branch from thunks that aren't in
8058 the same section as the target function. */
8060 && (DECL_SECTION_NAME (thunk_fndecl
)
8061 != DECL_SECTION_NAME (function
)))
8062 || ((DECL_SECTION_NAME (thunk_fndecl
)
8063 == DECL_SECTION_NAME (function
))
8064 && last_address
< 262132)))
8065 || (targetm
.have_named_sections
8066 && DECL_SECTION_NAME (thunk_fndecl
) == NULL
8067 && DECL_SECTION_NAME (function
) == NULL
8068 && last_address
< 262132)
8069 || (!targetm
.have_named_sections
&& last_address
< 262132))))
8072 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8074 output_asm_insn ("b %0", xoperands
);
8078 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8083 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8087 else if (TARGET_64BIT
)
8089 /* We only have one call-clobbered scratch register, so we can't
8090 make use of the delay slot if delta doesn't fit in 14 bits. */
8093 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8094 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8097 output_asm_insn ("b,l .+8,%%r1", xoperands
);
8101 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8102 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands
);
8106 xoperands
[3] = GEN_INT (val_14
? 8 : 16);
8107 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands
);
8112 output_asm_insn ("bv %%r0(%%r1)", xoperands
);
8113 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8118 output_asm_insn ("bv,n %%r0(%%r1)", xoperands
);
8122 else if (TARGET_PORTABLE_RUNTIME
)
8124 output_asm_insn ("ldil L'%0,%%r1", xoperands
);
8125 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands
);
8128 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8130 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8134 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8139 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8143 else if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8145 /* The function is accessible from outside this module. The only
8146 way to avoid an import stub between the thunk and function is to
8147 call the function directly with an indirect sequence similar to
8148 that used by $$dyncall. This is possible because $$dyncall acts
8149 as the import stub in an indirect call. */
8150 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHN", current_thunk_number
);
8151 xoperands
[3] = gen_rtx_SYMBOL_REF (Pmode
, label
);
8152 output_asm_insn ("addil LT'%3,%%r19", xoperands
);
8153 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands
);
8154 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8155 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands
);
8156 output_asm_insn ("depi 0,31,2,%%r22", xoperands
);
8157 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands
);
8158 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands
);
8162 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8168 output_asm_insn ("bve (%%r22)", xoperands
);
8171 else if (TARGET_NO_SPACE_REGS
)
8173 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands
);
8178 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands
);
8179 output_asm_insn ("mtsp %%r21,%%sr0", xoperands
);
8180 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands
);
8185 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8187 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8191 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands
);
8193 if (TARGET_SOM
|| !TARGET_GAS
)
8195 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands
);
8196 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands
);
8200 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands
);
8201 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands
);
8205 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8207 output_asm_insn ("bv %%r0(%%r22)", xoperands
);
8211 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8216 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8223 output_asm_insn ("addil L'%2,%%r26", xoperands
);
8225 output_asm_insn ("ldil L'%0,%%r22", xoperands
);
8226 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands
);
8230 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands
);
8235 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands
);
8240 fprintf (file
, "\t.EXIT\n\t.PROCEND\n");
8242 if (TARGET_SOM
&& TARGET_GAS
)
8244 /* We done with this subspace except possibly for some additional
8245 debug information. Forget that we are in this subspace to ensure
8246 that the next function is output in its own subspace. */
8248 cfun
->machine
->in_nsubspa
= 2;
8251 if (TARGET_SOM
&& flag_pic
&& TREE_PUBLIC (function
))
8253 switch_to_section (data_section
);
8254 output_asm_insn (".align 4", xoperands
);
8255 ASM_OUTPUT_LABEL (file
, label
);
8256 output_asm_insn (".word P'%0", xoperands
);
8259 current_thunk_number
++;
8260 nbytes
= ((nbytes
+ FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1)
8261 & ~(FUNCTION_BOUNDARY
/ BITS_PER_UNIT
- 1));
8262 last_address
+= nbytes
;
8263 if (old_last_address
> last_address
)
8264 last_address
= UINT_MAX
;
8265 update_total_code_bytes (nbytes
);
8268 /* Only direct calls to static functions are allowed to be sibling (tail)
8271 This restriction is necessary because some linker generated stubs will
8272 store return pointers into rp' in some cases which might clobber a
8273 live value already in rp'.
8275 In a sibcall the current function and the target function share stack
8276 space. Thus if the path to the current function and the path to the
8277 target function save a value in rp', they save the value into the
8278 same stack slot, which has undesirable consequences.
8280 Because of the deferred binding nature of shared libraries any function
8281 with external scope could be in a different load module and thus require
8282 rp' to be saved when calling that function. So sibcall optimizations
8283 can only be safe for static function.
8285 Note that GCC never needs return value relocations, so we don't have to
8286 worry about static calls with return value relocations (which require
8289 It is safe to perform a sibcall optimization when the target function
8290 will never return. */
8292 pa_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
8294 if (TARGET_PORTABLE_RUNTIME
)
8297 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8298 single subspace mode and the call is not indirect. As far as I know,
8299 there is no operating system support for the multiple subspace mode.
8300 It might be possible to support indirect calls if we didn't use
8301 $$dyncall (see the indirect sequence generated in output_call). */
8303 return (decl
!= NULL_TREE
);
8305 /* Sibcalls are not ok because the arg pointer register is not a fixed
8306 register. This prevents the sibcall optimization from occurring. In
8307 addition, there are problems with stub placement using GNU ld. This
8308 is because a normal sibcall branch uses a 17-bit relocation while
8309 a regular call branch uses a 22-bit relocation. As a result, more
8310 care needs to be taken in the placement of long-branch stubs. */
8314 /* Sibcalls are only ok within a translation unit. */
8315 return (decl
&& !TREE_PUBLIC (decl
));
8318 /* ??? Addition is not commutative on the PA due to the weird implicit
8319 space register selection rules for memory addresses. Therefore, we
8320 don't consider a + b == b + a, as this might be inside a MEM. */
8322 pa_commutative_p (const_rtx x
, int outer_code
)
8324 return (COMMUTATIVE_P (x
)
8325 && (TARGET_NO_SPACE_REGS
8326 || (outer_code
!= UNKNOWN
&& outer_code
!= MEM
)
8327 || GET_CODE (x
) != PLUS
));
8330 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8331 use in fmpyadd instructions. */
8333 fmpyaddoperands (rtx
*operands
)
8335 enum machine_mode mode
= GET_MODE (operands
[0]);
8337 /* Must be a floating point mode. */
8338 if (mode
!= SFmode
&& mode
!= DFmode
)
8341 /* All modes must be the same. */
8342 if (! (mode
== GET_MODE (operands
[1])
8343 && mode
== GET_MODE (operands
[2])
8344 && mode
== GET_MODE (operands
[3])
8345 && mode
== GET_MODE (operands
[4])
8346 && mode
== GET_MODE (operands
[5])))
8349 /* All operands must be registers. */
8350 if (! (GET_CODE (operands
[1]) == REG
8351 && GET_CODE (operands
[2]) == REG
8352 && GET_CODE (operands
[3]) == REG
8353 && GET_CODE (operands
[4]) == REG
8354 && GET_CODE (operands
[5]) == REG
))
8357 /* Only 2 real operands to the addition. One of the input operands must
8358 be the same as the output operand. */
8359 if (! rtx_equal_p (operands
[3], operands
[4])
8360 && ! rtx_equal_p (operands
[3], operands
[5]))
8363 /* Inout operand of add cannot conflict with any operands from multiply. */
8364 if (rtx_equal_p (operands
[3], operands
[0])
8365 || rtx_equal_p (operands
[3], operands
[1])
8366 || rtx_equal_p (operands
[3], operands
[2]))
8369 /* multiply cannot feed into addition operands. */
8370 if (rtx_equal_p (operands
[4], operands
[0])
8371 || rtx_equal_p (operands
[5], operands
[0]))
8374 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8376 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8377 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8378 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8379 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8380 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8381 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8384 /* Passed. Operands are suitable for fmpyadd. */
8388 #if !defined(USE_COLLECT2)
8390 pa_asm_out_constructor (rtx symbol
, int priority
)
8392 if (!function_label_operand (symbol
, VOIDmode
))
8393 hppa_encode_label (symbol
);
8395 #ifdef CTORS_SECTION_ASM_OP
8396 default_ctor_section_asm_out_constructor (symbol
, priority
);
8398 # ifdef TARGET_ASM_NAMED_SECTION
8399 default_named_section_asm_out_constructor (symbol
, priority
);
8401 default_stabs_asm_out_constructor (symbol
, priority
);
8407 pa_asm_out_destructor (rtx symbol
, int priority
)
8409 if (!function_label_operand (symbol
, VOIDmode
))
8410 hppa_encode_label (symbol
);
8412 #ifdef DTORS_SECTION_ASM_OP
8413 default_dtor_section_asm_out_destructor (symbol
, priority
);
8415 # ifdef TARGET_ASM_NAMED_SECTION
8416 default_named_section_asm_out_destructor (symbol
, priority
);
8418 default_stabs_asm_out_destructor (symbol
, priority
);
8424 /* This function places uninitialized global data in the bss section.
8425 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8426 function on the SOM port to prevent uninitialized global data from
8427 being placed in the data section. */
8430 pa_asm_output_aligned_bss (FILE *stream
,
8432 unsigned HOST_WIDE_INT size
,
8435 switch_to_section (bss_section
);
8436 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8438 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8439 ASM_OUTPUT_TYPE_DIRECTIVE (stream
, name
, "object");
8442 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8443 ASM_OUTPUT_SIZE_DIRECTIVE (stream
, name
, size
);
8446 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8447 ASM_OUTPUT_LABEL (stream
, name
);
8448 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8451 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8452 that doesn't allow the alignment of global common storage to be directly
8453 specified. The SOM linker aligns common storage based on the rounded
8454 value of the NUM_BYTES parameter in the .comm directive. It's not
8455 possible to use the .align directive as it doesn't affect the alignment
8456 of the label associated with a .comm directive. */
8459 pa_asm_output_aligned_common (FILE *stream
,
8461 unsigned HOST_WIDE_INT size
,
8464 unsigned int max_common_align
;
8466 max_common_align
= TARGET_64BIT
? 128 : (size
>= 4096 ? 256 : 64);
8467 if (align
> max_common_align
)
8469 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8470 "for global common data. Using %u",
8471 align
/ BITS_PER_UNIT
, name
, max_common_align
/ BITS_PER_UNIT
);
8472 align
= max_common_align
;
8475 switch_to_section (bss_section
);
8477 assemble_name (stream
, name
);
8478 fprintf (stream
, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
8479 MAX (size
, align
/ BITS_PER_UNIT
));
8482 /* We can't use .comm for local common storage as the SOM linker effectively
8483 treats the symbol as universal and uses the same storage for local symbols
8484 with the same name in different object files. The .block directive
8485 reserves an uninitialized block of storage. However, it's not common
8486 storage. Fortunately, GCC never requests common storage with the same
8487 name in any given translation unit. */
8490 pa_asm_output_aligned_local (FILE *stream
,
8492 unsigned HOST_WIDE_INT size
,
8495 switch_to_section (bss_section
);
8496 fprintf (stream
, "\t.align %u\n", align
/ BITS_PER_UNIT
);
8499 fprintf (stream
, "%s", LOCAL_ASM_OP
);
8500 assemble_name (stream
, name
);
8501 fprintf (stream
, "\n");
8504 ASM_OUTPUT_LABEL (stream
, name
);
8505 fprintf (stream
, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED
"\n", size
);
8508 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8509 use in fmpysub instructions. */
8511 fmpysuboperands (rtx
*operands
)
8513 enum machine_mode mode
= GET_MODE (operands
[0]);
8515 /* Must be a floating point mode. */
8516 if (mode
!= SFmode
&& mode
!= DFmode
)
8519 /* All modes must be the same. */
8520 if (! (mode
== GET_MODE (operands
[1])
8521 && mode
== GET_MODE (operands
[2])
8522 && mode
== GET_MODE (operands
[3])
8523 && mode
== GET_MODE (operands
[4])
8524 && mode
== GET_MODE (operands
[5])))
8527 /* All operands must be registers. */
8528 if (! (GET_CODE (operands
[1]) == REG
8529 && GET_CODE (operands
[2]) == REG
8530 && GET_CODE (operands
[3]) == REG
8531 && GET_CODE (operands
[4]) == REG
8532 && GET_CODE (operands
[5]) == REG
))
8535 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8536 operation, so operands[4] must be the same as operand[3]. */
8537 if (! rtx_equal_p (operands
[3], operands
[4]))
8540 /* multiply cannot feed into subtraction. */
8541 if (rtx_equal_p (operands
[5], operands
[0]))
8544 /* Inout operand of sub cannot conflict with any operands from multiply. */
8545 if (rtx_equal_p (operands
[3], operands
[0])
8546 || rtx_equal_p (operands
[3], operands
[1])
8547 || rtx_equal_p (operands
[3], operands
[2]))
8550 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8552 && (REGNO_REG_CLASS (REGNO (operands
[0])) != FPUPPER_REGS
8553 || REGNO_REG_CLASS (REGNO (operands
[1])) != FPUPPER_REGS
8554 || REGNO_REG_CLASS (REGNO (operands
[2])) != FPUPPER_REGS
8555 || REGNO_REG_CLASS (REGNO (operands
[3])) != FPUPPER_REGS
8556 || REGNO_REG_CLASS (REGNO (operands
[4])) != FPUPPER_REGS
8557 || REGNO_REG_CLASS (REGNO (operands
[5])) != FPUPPER_REGS
))
8560 /* Passed. Operands are suitable for fmpysub. */
8564 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8565 constants for shadd instructions. */
8567 shadd_constant_p (int val
)
8569 if (val
== 2 || val
== 4 || val
== 8)
8575 /* Return 1 if OP is valid as a base or index register in a
8579 borx_reg_operand (rtx op
, enum machine_mode mode
)
8581 if (GET_CODE (op
) != REG
)
8584 /* We must reject virtual registers as the only expressions that
8585 can be instantiated are REG and REG+CONST. */
8586 if (op
== virtual_incoming_args_rtx
8587 || op
== virtual_stack_vars_rtx
8588 || op
== virtual_stack_dynamic_rtx
8589 || op
== virtual_outgoing_args_rtx
8590 || op
== virtual_cfa_rtx
)
8593 /* While it's always safe to index off the frame pointer, it's not
8594 profitable to do so when the frame pointer is being eliminated. */
8595 if (!reload_completed
8596 && flag_omit_frame_pointer
8597 && !cfun
->calls_alloca
8598 && op
== frame_pointer_rtx
)
8601 return register_operand (op
, mode
);
8604 /* Return 1 if this operand is anything other than a hard register. */
8607 non_hard_reg_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8609 return ! (GET_CODE (op
) == REG
&& REGNO (op
) < FIRST_PSEUDO_REGISTER
);
8612 /* Return TRUE if INSN branches forward. */
8615 forward_branch_p (rtx insn
)
8617 rtx lab
= JUMP_LABEL (insn
);
8619 /* The INSN must have a jump label. */
8620 gcc_assert (lab
!= NULL_RTX
);
8622 if (INSN_ADDRESSES_SET_P ())
8623 return INSN_ADDRESSES (INSN_UID (lab
)) > INSN_ADDRESSES (INSN_UID (insn
));
8630 insn
= NEXT_INSN (insn
);
8636 /* Return 1 if OP is an equality comparison, else return 0. */
8638 eq_neq_comparison_operator (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
8640 return (GET_CODE (op
) == EQ
|| GET_CODE (op
) == NE
);
8643 /* Return 1 if INSN is in the delay slot of a call instruction. */
8645 jump_in_call_delay (rtx insn
)
8648 if (GET_CODE (insn
) != JUMP_INSN
)
8651 if (PREV_INSN (insn
)
8652 && PREV_INSN (PREV_INSN (insn
))
8653 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn
)))) == INSN
)
8655 rtx test_insn
= next_real_insn (PREV_INSN (PREV_INSN (insn
)));
8657 return (GET_CODE (PATTERN (test_insn
)) == SEQUENCE
8658 && XVECEXP (PATTERN (test_insn
), 0, 1) == insn
);
8665 /* Output an unconditional move and branch insn. */
8668 output_parallel_movb (rtx
*operands
, rtx insn
)
8670 int length
= get_attr_length (insn
);
8672 /* These are the cases in which we win. */
8674 return "mov%I1b,tr %1,%0,%2";
8676 /* None of the following cases win, but they don't lose either. */
8679 if (dbr_sequence_length () == 0)
8681 /* Nothing in the delay slot, fake it by putting the combined
8682 insn (the copy or add) in the delay slot of a bl. */
8683 if (GET_CODE (operands
[1]) == CONST_INT
)
8684 return "b %2\n\tldi %1,%0";
8686 return "b %2\n\tcopy %1,%0";
8690 /* Something in the delay slot, but we've got a long branch. */
8691 if (GET_CODE (operands
[1]) == CONST_INT
)
8692 return "ldi %1,%0\n\tb %2";
8694 return "copy %1,%0\n\tb %2";
8698 if (GET_CODE (operands
[1]) == CONST_INT
)
8699 output_asm_insn ("ldi %1,%0", operands
);
8701 output_asm_insn ("copy %1,%0", operands
);
8702 return output_lbranch (operands
[2], insn
, 1);
8705 /* Output an unconditional add and branch insn. */
8708 output_parallel_addb (rtx
*operands
, rtx insn
)
8710 int length
= get_attr_length (insn
);
8712 /* To make life easy we want operand0 to be the shared input/output
8713 operand and operand1 to be the readonly operand. */
8714 if (operands
[0] == operands
[1])
8715 operands
[1] = operands
[2];
8717 /* These are the cases in which we win. */
8719 return "add%I1b,tr %1,%0,%3";
8721 /* None of the following cases win, but they don't lose either. */
8724 if (dbr_sequence_length () == 0)
8725 /* Nothing in the delay slot, fake it by putting the combined
8726 insn (the copy or add) in the delay slot of a bl. */
8727 return "b %3\n\tadd%I1 %1,%0,%0";
8729 /* Something in the delay slot, but we've got a long branch. */
8730 return "add%I1 %1,%0,%0\n\tb %3";
8733 output_asm_insn ("add%I1 %1,%0,%0", operands
);
8734 return output_lbranch (operands
[3], insn
, 1);
8737 /* Return nonzero if INSN (a jump insn) immediately follows a call
8738 to a named function. This is used to avoid filling the delay slot
8739 of the jump since it can usually be eliminated by modifying RP in
8740 the delay slot of the call. */
8743 following_call (rtx insn
)
8745 if (! TARGET_JUMP_IN_DELAY
)
8748 /* Find the previous real insn, skipping NOTEs. */
8749 insn
= PREV_INSN (insn
);
8750 while (insn
&& GET_CODE (insn
) == NOTE
)
8751 insn
= PREV_INSN (insn
);
8753 /* Check for CALL_INSNs and millicode calls. */
8755 && ((GET_CODE (insn
) == CALL_INSN
8756 && get_attr_type (insn
) != TYPE_DYNCALL
)
8757 || (GET_CODE (insn
) == INSN
8758 && GET_CODE (PATTERN (insn
)) != SEQUENCE
8759 && GET_CODE (PATTERN (insn
)) != USE
8760 && GET_CODE (PATTERN (insn
)) != CLOBBER
8761 && get_attr_type (insn
) == TYPE_MILLI
)))
8767 /* We use this hook to perform a PA specific optimization which is difficult
8768 to do in earlier passes.
8770 We want the delay slots of branches within jump tables to be filled.
8771 None of the compiler passes at the moment even has the notion that a
8772 PA jump table doesn't contain addresses, but instead contains actual
8775 Because we actually jump into the table, the addresses of each entry
8776 must stay constant in relation to the beginning of the table (which
8777 itself must stay constant relative to the instruction to jump into
8778 it). I don't believe we can guarantee earlier passes of the compiler
8779 will adhere to those rules.
8781 So, late in the compilation process we find all the jump tables, and
8782 expand them into real code -- e.g. each entry in the jump table vector
8783 will get an appropriate label followed by a jump to the final target.
8785 Reorg and the final jump pass can then optimize these branches and
8786 fill their delay slots. We end up with smaller, more efficient code.
8788 The jump instructions within the table are special; we must be able
8789 to identify them during assembly output (if the jumps don't get filled
8790 we need to emit a nop rather than nullifying the delay slot)). We
8791 identify jumps in switch tables by using insns with the attribute
8792 type TYPE_BTABLE_BRANCH.
8794 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8795 insns. This serves two purposes, first it prevents jump.c from
8796 noticing that the last N entries in the table jump to the instruction
8797 immediately after the table and deleting the jumps. Second, those
8798 insns mark where we should emit .begin_brtab and .end_brtab directives
8799 when using GAS (allows for better link time optimizations). */
8806 remove_useless_addtr_insns (1);
8808 if (pa_cpu
< PROCESSOR_8000
)
8809 pa_combine_instructions ();
8812 /* This is fairly cheap, so always run it if optimizing. */
8813 if (optimize
> 0 && !TARGET_BIG_SWITCH
)
8815 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8816 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8818 rtx pattern
, tmp
, location
, label
;
8819 unsigned int length
, i
;
8821 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8822 if (GET_CODE (insn
) != JUMP_INSN
8823 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8824 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8827 /* Emit marker for the beginning of the branch table. */
8828 emit_insn_before (gen_begin_brtab (), insn
);
8830 pattern
= PATTERN (insn
);
8831 location
= PREV_INSN (insn
);
8832 length
= XVECLEN (pattern
, GET_CODE (pattern
) == ADDR_DIFF_VEC
);
8834 for (i
= 0; i
< length
; i
++)
8836 /* Emit a label before each jump to keep jump.c from
8837 removing this code. */
8838 tmp
= gen_label_rtx ();
8839 LABEL_NUSES (tmp
) = 1;
8840 emit_label_after (tmp
, location
);
8841 location
= NEXT_INSN (location
);
8843 if (GET_CODE (pattern
) == ADDR_VEC
)
8844 label
= XEXP (XVECEXP (pattern
, 0, i
), 0);
8846 label
= XEXP (XVECEXP (pattern
, 1, i
), 0);
8848 tmp
= gen_short_jump (label
);
8850 /* Emit the jump itself. */
8851 tmp
= emit_jump_insn_after (tmp
, location
);
8852 JUMP_LABEL (tmp
) = label
;
8853 LABEL_NUSES (label
)++;
8854 location
= NEXT_INSN (location
);
8856 /* Emit a BARRIER after the jump. */
8857 emit_barrier_after (location
);
8858 location
= NEXT_INSN (location
);
8861 /* Emit marker for the end of the branch table. */
8862 emit_insn_before (gen_end_brtab (), location
);
8863 location
= NEXT_INSN (location
);
8864 emit_barrier_after (location
);
8866 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8872 /* Still need brtab marker insns. FIXME: the presence of these
8873 markers disables output of the branch table to readonly memory,
8874 and any alignment directives that might be needed. Possibly,
8875 the begin_brtab insn should be output before the label for the
8876 table. This doesn't matter at the moment since the tables are
8877 always output in the text section. */
8878 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
8880 /* Find an ADDR_VEC insn. */
8881 if (GET_CODE (insn
) != JUMP_INSN
8882 || (GET_CODE (PATTERN (insn
)) != ADDR_VEC
8883 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
))
8886 /* Now generate markers for the beginning and end of the
8888 emit_insn_before (gen_begin_brtab (), insn
);
8889 emit_insn_after (gen_end_brtab (), insn
);
8894 /* The PA has a number of odd instructions which can perform multiple
8895 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8896 it may be profitable to combine two instructions into one instruction
8897 with two outputs. It's not profitable PA2.0 machines because the
8898 two outputs would take two slots in the reorder buffers.
8900 This routine finds instructions which can be combined and combines
8901 them. We only support some of the potential combinations, and we
8902 only try common ways to find suitable instructions.
8904 * addb can add two registers or a register and a small integer
8905 and jump to a nearby (+-8k) location. Normally the jump to the
8906 nearby location is conditional on the result of the add, but by
8907 using the "true" condition we can make the jump unconditional.
8908 Thus addb can perform two independent operations in one insn.
8910 * movb is similar to addb in that it can perform a reg->reg
8911 or small immediate->reg copy and jump to a nearby (+-8k location).
8913 * fmpyadd and fmpysub can perform a FP multiply and either an
8914 FP add or FP sub if the operands of the multiply and add/sub are
8915 independent (there are other minor restrictions). Note both
8916 the fmpy and fadd/fsub can in theory move to better spots according
8917 to data dependencies, but for now we require the fmpy stay at a
8920 * Many of the memory operations can perform pre & post updates
8921 of index registers. GCC's pre/post increment/decrement addressing
8922 is far too simple to take advantage of all the possibilities. This
8923 pass may not be suitable since those insns may not be independent.
8925 * comclr can compare two ints or an int and a register, nullify
8926 the following instruction and zero some other register. This
8927 is more difficult to use as it's harder to find an insn which
8928 will generate a comclr than finding something like an unconditional
8929 branch. (conditional moves & long branches create comclr insns).
8931 * Most arithmetic operations can conditionally skip the next
8932 instruction. They can be viewed as "perform this operation
8933 and conditionally jump to this nearby location" (where nearby
8934 is an insns away). These are difficult to use due to the
8935 branch length restrictions. */
8938 pa_combine_instructions (void)
8940 rtx anchor
, new_rtx
;
8942 /* This can get expensive since the basic algorithm is on the
8943 order of O(n^2) (or worse). Only do it for -O2 or higher
8944 levels of optimization. */
8948 /* Walk down the list of insns looking for "anchor" insns which
8949 may be combined with "floating" insns. As the name implies,
8950 "anchor" instructions don't move, while "floating" insns may
8952 new_rtx
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, NULL_RTX
, NULL_RTX
));
8953 new_rtx
= make_insn_raw (new_rtx
);
8955 for (anchor
= get_insns (); anchor
; anchor
= NEXT_INSN (anchor
))
8957 enum attr_pa_combine_type anchor_attr
;
8958 enum attr_pa_combine_type floater_attr
;
8960 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8961 Also ignore any special USE insns. */
8962 if ((GET_CODE (anchor
) != INSN
8963 && GET_CODE (anchor
) != JUMP_INSN
8964 && GET_CODE (anchor
) != CALL_INSN
)
8965 || GET_CODE (PATTERN (anchor
)) == USE
8966 || GET_CODE (PATTERN (anchor
)) == CLOBBER
8967 || GET_CODE (PATTERN (anchor
)) == ADDR_VEC
8968 || GET_CODE (PATTERN (anchor
)) == ADDR_DIFF_VEC
)
8971 anchor_attr
= get_attr_pa_combine_type (anchor
);
8972 /* See if anchor is an insn suitable for combination. */
8973 if (anchor_attr
== PA_COMBINE_TYPE_FMPY
8974 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
8975 || (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
8976 && ! forward_branch_p (anchor
)))
8980 for (floater
= PREV_INSN (anchor
);
8982 floater
= PREV_INSN (floater
))
8984 if (GET_CODE (floater
) == NOTE
8985 || (GET_CODE (floater
) == INSN
8986 && (GET_CODE (PATTERN (floater
)) == USE
8987 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
8990 /* Anything except a regular INSN will stop our search. */
8991 if (GET_CODE (floater
) != INSN
8992 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
8993 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
8999 /* See if FLOATER is suitable for combination with the
9001 floater_attr
= get_attr_pa_combine_type (floater
);
9002 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9003 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9004 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9005 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9007 /* If ANCHOR and FLOATER can be combined, then we're
9008 done with this pass. */
9009 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9010 SET_DEST (PATTERN (floater
)),
9011 XEXP (SET_SRC (PATTERN (floater
)), 0),
9012 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9016 else if (anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
9017 && floater_attr
== PA_COMBINE_TYPE_ADDMOVE
)
9019 if (GET_CODE (SET_SRC (PATTERN (floater
))) == PLUS
)
9021 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9022 SET_DEST (PATTERN (floater
)),
9023 XEXP (SET_SRC (PATTERN (floater
)), 0),
9024 XEXP (SET_SRC (PATTERN (floater
)), 1)))
9029 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 0,
9030 SET_DEST (PATTERN (floater
)),
9031 SET_SRC (PATTERN (floater
)),
9032 SET_SRC (PATTERN (floater
))))
9038 /* If we didn't find anything on the backwards scan try forwards. */
9040 && (anchor_attr
== PA_COMBINE_TYPE_FMPY
9041 || anchor_attr
== PA_COMBINE_TYPE_FADDSUB
))
9043 for (floater
= anchor
; floater
; floater
= NEXT_INSN (floater
))
9045 if (GET_CODE (floater
) == NOTE
9046 || (GET_CODE (floater
) == INSN
9047 && (GET_CODE (PATTERN (floater
)) == USE
9048 || GET_CODE (PATTERN (floater
)) == CLOBBER
)))
9052 /* Anything except a regular INSN will stop our search. */
9053 if (GET_CODE (floater
) != INSN
9054 || GET_CODE (PATTERN (floater
)) == ADDR_VEC
9055 || GET_CODE (PATTERN (floater
)) == ADDR_DIFF_VEC
)
9061 /* See if FLOATER is suitable for combination with the
9063 floater_attr
= get_attr_pa_combine_type (floater
);
9064 if ((anchor_attr
== PA_COMBINE_TYPE_FMPY
9065 && floater_attr
== PA_COMBINE_TYPE_FADDSUB
)
9066 || (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9067 && floater_attr
== PA_COMBINE_TYPE_FMPY
))
9069 /* If ANCHOR and FLOATER can be combined, then we're
9070 done with this pass. */
9071 if (pa_can_combine_p (new_rtx
, anchor
, floater
, 1,
9072 SET_DEST (PATTERN (floater
)),
9073 XEXP (SET_SRC (PATTERN (floater
)),
9075 XEXP (SET_SRC (PATTERN (floater
)),
9082 /* FLOATER will be nonzero if we found a suitable floating
9083 insn for combination with ANCHOR. */
9085 && (anchor_attr
== PA_COMBINE_TYPE_FADDSUB
9086 || anchor_attr
== PA_COMBINE_TYPE_FMPY
))
9088 /* Emit the new instruction and delete the old anchor. */
9089 emit_insn_before (gen_rtx_PARALLEL
9091 gen_rtvec (2, PATTERN (anchor
),
9092 PATTERN (floater
))),
9095 SET_INSN_DELETED (anchor
);
9097 /* Emit a special USE insn for FLOATER, then delete
9098 the floating insn. */
9099 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9100 delete_insn (floater
);
9105 && anchor_attr
== PA_COMBINE_TYPE_UNCOND_BRANCH
)
9108 /* Emit the new_jump instruction and delete the old anchor. */
9110 = emit_jump_insn_before (gen_rtx_PARALLEL
9112 gen_rtvec (2, PATTERN (anchor
),
9113 PATTERN (floater
))),
9116 JUMP_LABEL (temp
) = JUMP_LABEL (anchor
);
9117 SET_INSN_DELETED (anchor
);
9119 /* Emit a special USE insn for FLOATER, then delete
9120 the floating insn. */
9121 emit_insn_before (gen_rtx_USE (VOIDmode
, floater
), floater
);
9122 delete_insn (floater
);
9130 pa_can_combine_p (rtx new_rtx
, rtx anchor
, rtx floater
, int reversed
, rtx dest
,
9133 int insn_code_number
;
9136 /* Create a PARALLEL with the patterns of ANCHOR and
9137 FLOATER, try to recognize it, then test constraints
9138 for the resulting pattern.
9140 If the pattern doesn't match or the constraints
9141 aren't met keep searching for a suitable floater
9143 XVECEXP (PATTERN (new_rtx
), 0, 0) = PATTERN (anchor
);
9144 XVECEXP (PATTERN (new_rtx
), 0, 1) = PATTERN (floater
);
9145 INSN_CODE (new_rtx
) = -1;
9146 insn_code_number
= recog_memoized (new_rtx
);
9147 if (insn_code_number
< 0
9148 || (extract_insn (new_rtx
), ! constrain_operands (1)))
9162 /* There's up to three operands to consider. One
9163 output and two inputs.
9165 The output must not be used between FLOATER & ANCHOR
9166 exclusive. The inputs must not be set between
9167 FLOATER and ANCHOR exclusive. */
9169 if (reg_used_between_p (dest
, start
, end
))
9172 if (reg_set_between_p (src1
, start
, end
))
9175 if (reg_set_between_p (src2
, start
, end
))
9178 /* If we get here, then everything is good. */
9182 /* Return nonzero if references for INSN are delayed.
9184 Millicode insns are actually function calls with some special
9185 constraints on arguments and register usage.
9187 Millicode calls always expect their arguments in the integer argument
9188 registers, and always return their result in %r29 (ret1). They
9189 are expected to clobber their arguments, %r1, %r29, and the return
9190 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9192 This function tells reorg that the references to arguments and
9193 millicode calls do not appear to happen until after the millicode call.
9194 This allows reorg to put insns which set the argument registers into the
9195 delay slot of the millicode call -- thus they act more like traditional
9198 Note we cannot consider side effects of the insn to be delayed because
9199 the branch and link insn will clobber the return pointer. If we happened
9200 to use the return pointer in the delay slot of the call, then we lose.
9202 get_attr_type will try to recognize the given insn, so make sure to
9203 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9206 insn_refs_are_delayed (rtx insn
)
9208 return ((GET_CODE (insn
) == INSN
9209 && GET_CODE (PATTERN (insn
)) != SEQUENCE
9210 && GET_CODE (PATTERN (insn
)) != USE
9211 && GET_CODE (PATTERN (insn
)) != CLOBBER
9212 && get_attr_type (insn
) == TYPE_MILLI
));
9215 /* Promote the return value, but not the arguments. */
9217 static enum machine_mode
9218 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
9219 enum machine_mode mode
,
9220 int *punsignedp ATTRIBUTE_UNUSED
,
9221 const_tree fntype ATTRIBUTE_UNUSED
,
9224 if (for_return
== 0)
9226 return promote_mode (type
, mode
, punsignedp
);
9229 /* On the HP-PA the value is found in register(s) 28(-29), unless
9230 the mode is SF or DF. Then the value is returned in fr4 (32).
9232 This must perform the same promotions as PROMOTE_MODE, else promoting
9233 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9235 Small structures must be returned in a PARALLEL on PA64 in order
9236 to match the HP Compiler ABI. */
9239 pa_function_value (const_tree valtype
,
9240 const_tree func ATTRIBUTE_UNUSED
,
9241 bool outgoing ATTRIBUTE_UNUSED
)
9243 enum machine_mode valmode
;
9245 if (AGGREGATE_TYPE_P (valtype
)
9246 || TREE_CODE (valtype
) == COMPLEX_TYPE
9247 || TREE_CODE (valtype
) == VECTOR_TYPE
)
9251 /* Aggregates with a size less than or equal to 128 bits are
9252 returned in GR 28(-29). They are left justified. The pad
9253 bits are undefined. Larger aggregates are returned in
9257 int ub
= int_size_in_bytes (valtype
) <= UNITS_PER_WORD
? 1 : 2;
9259 for (i
= 0; i
< ub
; i
++)
9261 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9262 gen_rtx_REG (DImode
, 28 + i
),
9267 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (ub
, loc
));
9269 else if (int_size_in_bytes (valtype
) > UNITS_PER_WORD
)
9271 /* Aggregates 5 to 8 bytes in size are returned in general
9272 registers r28-r29 in the same manner as other non
9273 floating-point objects. The data is right-justified and
9274 zero-extended to 64 bits. This is opposite to the normal
9275 justification used on big endian targets and requires
9276 special treatment. */
9277 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9278 gen_rtx_REG (DImode
, 28), const0_rtx
);
9279 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9283 if ((INTEGRAL_TYPE_P (valtype
)
9284 && GET_MODE_BITSIZE (TYPE_MODE (valtype
)) < BITS_PER_WORD
)
9285 || POINTER_TYPE_P (valtype
))
9286 valmode
= word_mode
;
9288 valmode
= TYPE_MODE (valtype
);
9290 if (TREE_CODE (valtype
) == REAL_TYPE
9291 && !AGGREGATE_TYPE_P (valtype
)
9292 && TYPE_MODE (valtype
) != TFmode
9293 && !TARGET_SOFT_FLOAT
)
9294 return gen_rtx_REG (valmode
, 32);
9296 return gen_rtx_REG (valmode
, 28);
9299 /* Return the location of a parameter that is passed in a register or NULL
9300 if the parameter has any component that is passed in memory.
9302 This is new code and will be pushed to into the net sources after
9305 ??? We might want to restructure this so that it looks more like other
9308 function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
, tree type
,
9309 int named ATTRIBUTE_UNUSED
)
9311 int max_arg_words
= (TARGET_64BIT
? 8 : 4);
9318 if (mode
== VOIDmode
)
9321 arg_size
= FUNCTION_ARG_SIZE (mode
, type
);
9323 /* If this arg would be passed partially or totally on the stack, then
9324 this routine should return zero. pa_arg_partial_bytes will
9325 handle arguments which are split between regs and stack slots if
9326 the ABI mandates split arguments. */
9329 /* The 32-bit ABI does not split arguments. */
9330 if (cum
->words
+ arg_size
> max_arg_words
)
9336 alignment
= cum
->words
& 1;
9337 if (cum
->words
+ alignment
>= max_arg_words
)
9341 /* The 32bit ABIs and the 64bit ABIs are rather different,
9342 particularly in their handling of FP registers. We might
9343 be able to cleverly share code between them, but I'm not
9344 going to bother in the hope that splitting them up results
9345 in code that is more easily understood. */
9349 /* Advance the base registers to their current locations.
9351 Remember, gprs grow towards smaller register numbers while
9352 fprs grow to higher register numbers. Also remember that
9353 although FP regs are 32-bit addressable, we pretend that
9354 the registers are 64-bits wide. */
9355 gpr_reg_base
= 26 - cum
->words
;
9356 fpr_reg_base
= 32 + cum
->words
;
9358 /* Arguments wider than one word and small aggregates need special
9362 || (type
&& (AGGREGATE_TYPE_P (type
)
9363 || TREE_CODE (type
) == COMPLEX_TYPE
9364 || TREE_CODE (type
) == VECTOR_TYPE
)))
9366 /* Double-extended precision (80-bit), quad-precision (128-bit)
9367 and aggregates including complex numbers are aligned on
9368 128-bit boundaries. The first eight 64-bit argument slots
9369 are associated one-to-one, with general registers r26
9370 through r19, and also with floating-point registers fr4
9371 through fr11. Arguments larger than one word are always
9372 passed in general registers.
9374 Using a PARALLEL with a word mode register results in left
9375 justified data on a big-endian target. */
9378 int i
, offset
= 0, ub
= arg_size
;
9380 /* Align the base register. */
9381 gpr_reg_base
-= alignment
;
9383 ub
= MIN (ub
, max_arg_words
- cum
->words
- alignment
);
9384 for (i
= 0; i
< ub
; i
++)
9386 loc
[i
] = gen_rtx_EXPR_LIST (VOIDmode
,
9387 gen_rtx_REG (DImode
, gpr_reg_base
),
9393 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (ub
, loc
));
9398 /* If the argument is larger than a word, then we know precisely
9399 which registers we must use. */
9413 /* Structures 5 to 8 bytes in size are passed in the general
9414 registers in the same manner as other non floating-point
9415 objects. The data is right-justified and zero-extended
9416 to 64 bits. This is opposite to the normal justification
9417 used on big endian targets and requires special treatment.
9418 We now define BLOCK_REG_PADDING to pad these objects.
9419 Aggregates, complex and vector types are passed in the same
9420 manner as structures. */
9422 || (type
&& (AGGREGATE_TYPE_P (type
)
9423 || TREE_CODE (type
) == COMPLEX_TYPE
9424 || TREE_CODE (type
) == VECTOR_TYPE
)))
9426 rtx loc
= gen_rtx_EXPR_LIST (VOIDmode
,
9427 gen_rtx_REG (DImode
, gpr_reg_base
),
9429 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec (1, loc
));
9434 /* We have a single word (32 bits). A simple computation
9435 will get us the register #s we need. */
9436 gpr_reg_base
= 26 - cum
->words
;
9437 fpr_reg_base
= 32 + 2 * cum
->words
;
9441 /* Determine if the argument needs to be passed in both general and
9442 floating point registers. */
9443 if (((TARGET_PORTABLE_RUNTIME
|| TARGET_64BIT
|| TARGET_ELF32
)
9444 /* If we are doing soft-float with portable runtime, then there
9445 is no need to worry about FP regs. */
9446 && !TARGET_SOFT_FLOAT
9447 /* The parameter must be some kind of scalar float, else we just
9448 pass it in integer registers. */
9449 && GET_MODE_CLASS (mode
) == MODE_FLOAT
9450 /* The target function must not have a prototype. */
9451 && cum
->nargs_prototype
<= 0
9452 /* libcalls do not need to pass items in both FP and general
9454 && type
!= NULL_TREE
9455 /* All this hair applies to "outgoing" args only. This includes
9456 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9458 /* Also pass outgoing floating arguments in both registers in indirect
9459 calls with the 32 bit ABI and the HP assembler since there is no
9460 way to the specify argument locations in static functions. */
9465 && GET_MODE_CLASS (mode
) == MODE_FLOAT
))
9471 gen_rtx_EXPR_LIST (VOIDmode
,
9472 gen_rtx_REG (mode
, fpr_reg_base
),
9474 gen_rtx_EXPR_LIST (VOIDmode
,
9475 gen_rtx_REG (mode
, gpr_reg_base
),
9480 /* See if we should pass this parameter in a general register. */
9481 if (TARGET_SOFT_FLOAT
9482 /* Indirect calls in the normal 32bit ABI require all arguments
9483 to be passed in general registers. */
9484 || (!TARGET_PORTABLE_RUNTIME
9488 /* If the parameter is not a scalar floating-point parameter,
9489 then it belongs in GPRs. */
9490 || GET_MODE_CLASS (mode
) != MODE_FLOAT
9491 /* Structure with single SFmode field belongs in GPR. */
9492 || (type
&& AGGREGATE_TYPE_P (type
)))
9493 retval
= gen_rtx_REG (mode
, gpr_reg_base
);
9495 retval
= gen_rtx_REG (mode
, fpr_reg_base
);
9501 /* If this arg would be passed totally in registers or totally on the stack,
9502 then this routine should return zero. */
9505 pa_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9506 tree type
, bool named ATTRIBUTE_UNUSED
)
9508 unsigned int max_arg_words
= 8;
9509 unsigned int offset
= 0;
9514 if (FUNCTION_ARG_SIZE (mode
, type
) > 1 && (cum
->words
& 1))
9517 if (cum
->words
+ offset
+ FUNCTION_ARG_SIZE (mode
, type
) <= max_arg_words
)
9518 /* Arg fits fully into registers. */
9520 else if (cum
->words
+ offset
>= max_arg_words
)
9521 /* Arg fully on the stack. */
9525 return (max_arg_words
- cum
->words
- offset
) * UNITS_PER_WORD
;
9529 /* A get_unnamed_section callback for switching to the text section.
9531 This function is only used with SOM. Because we don't support
9532 named subspaces, we can only create a new subspace or switch back
9533 to the default text subspace. */
9536 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9538 gcc_assert (TARGET_SOM
);
9541 if (cfun
&& cfun
->machine
&& !cfun
->machine
->in_nsubspa
)
9543 /* We only want to emit a .nsubspa directive once at the
9544 start of the function. */
9545 cfun
->machine
->in_nsubspa
= 1;
9547 /* Create a new subspace for the text. This provides
9548 better stub placement and one-only functions. */
9550 && DECL_ONE_ONLY (cfun
->decl
)
9551 && !DECL_WEAK (cfun
->decl
))
9553 output_section_asm_op ("\t.SPACE $TEXT$\n"
9554 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9555 "ACCESS=44,SORT=24,COMDAT");
9561 /* There isn't a current function or the body of the current
9562 function has been completed. So, we are changing to the
9563 text section to output debugging information. Thus, we
9564 need to forget that we are in the text section so that
9565 varasm.c will call us when text_section is selected again. */
9566 gcc_assert (!cfun
|| !cfun
->machine
9567 || cfun
->machine
->in_nsubspa
== 2);
9570 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9573 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9576 /* A get_unnamed_section callback for switching to comdat data
9577 sections. This function is only used with SOM. */
9580 som_output_comdat_data_section_asm_op (const void *data
)
9583 output_section_asm_op (data
);
9586 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9589 pa_som_asm_init_sections (void)
9592 = get_unnamed_section (0, som_output_text_section_asm_op
, NULL
);
9594 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9595 is not being generated. */
9596 som_readonly_data_section
9597 = get_unnamed_section (0, output_section_asm_op
,
9598 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9600 /* When secondary definitions are not supported, SOM makes readonly
9601 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9603 som_one_only_readonly_data_section
9604 = get_unnamed_section (0, som_output_comdat_data_section_asm_op
,
9606 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9607 "ACCESS=0x2c,SORT=16,COMDAT");
9610 /* When secondary definitions are not supported, SOM makes data one-only
9611 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9612 som_one_only_data_section
9613 = get_unnamed_section (SECTION_WRITE
,
9614 som_output_comdat_data_section_asm_op
,
9615 "\t.SPACE $PRIVATE$\n"
9616 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9617 "ACCESS=31,SORT=24,COMDAT");
9619 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9620 which reference data within the $TEXT$ space (for example constant
9621 strings in the $LIT$ subspace).
9623 The assemblers (GAS and HP as) both have problems with handling
9624 the difference of two symbols which is the other correct way to
9625 reference constant data during PIC code generation.
9627 So, there's no way to reference constant data which is in the
9628 $TEXT$ space during PIC generation. Instead place all constant
9629 data into the $PRIVATE$ subspace (this reduces sharing, but it
9630 works correctly). */
9631 readonly_data_section
= flag_pic
? data_section
: som_readonly_data_section
;
9633 /* We must not have a reference to an external symbol defined in a
9634 shared library in a readonly section, else the SOM linker will
9637 So, we force exception information into the data section. */
9638 exception_section
= data_section
;
9641 /* On hpux10, the linker will give an error if we have a reference
9642 in the read-only data section to a symbol defined in a shared
9643 library. Therefore, expressions that might require a reloc can
9644 not be placed in the read-only data section. */
9647 pa_select_section (tree exp
, int reloc
,
9648 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
9650 if (TREE_CODE (exp
) == VAR_DECL
9651 && TREE_READONLY (exp
)
9652 && !TREE_THIS_VOLATILE (exp
)
9653 && DECL_INITIAL (exp
)
9654 && (DECL_INITIAL (exp
) == error_mark_node
9655 || TREE_CONSTANT (DECL_INITIAL (exp
)))
9659 && DECL_ONE_ONLY (exp
)
9660 && !DECL_WEAK (exp
))
9661 return som_one_only_readonly_data_section
;
9663 return readonly_data_section
;
9665 else if (CONSTANT_CLASS_P (exp
) && !reloc
)
9666 return readonly_data_section
;
9668 && TREE_CODE (exp
) == VAR_DECL
9669 && DECL_ONE_ONLY (exp
)
9670 && !DECL_WEAK (exp
))
9671 return som_one_only_data_section
;
9673 return data_section
;
9677 pa_globalize_label (FILE *stream
, const char *name
)
9679 /* We only handle DATA objects here, functions are globalized in
9680 ASM_DECLARE_FUNCTION_NAME. */
9681 if (! FUNCTION_NAME_P (name
))
9683 fputs ("\t.EXPORT ", stream
);
9684 assemble_name (stream
, name
);
9685 fputs (",DATA\n", stream
);
9689 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9692 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
9693 int incoming ATTRIBUTE_UNUSED
)
9695 return gen_rtx_REG (Pmode
, PA_STRUCT_VALUE_REGNUM
);
9698 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9701 pa_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
9703 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9704 PA64 ABI says that objects larger than 128 bits are returned in memory.
9705 Note, int_size_in_bytes can return -1 if the size of the object is
9706 variable or larger than the maximum value that can be expressed as
9707 a HOST_WIDE_INT. It can also return zero for an empty type. The
9708 simplest way to handle variable and empty types is to pass them in
9709 memory. This avoids problems in defining the boundaries of argument
9710 slots, allocating registers, etc. */
9711 return (int_size_in_bytes (type
) > (TARGET_64BIT
? 16 : 8)
9712 || int_size_in_bytes (type
) <= 0);
9715 /* Structure to hold declaration and name of external symbols that are
9716 emitted by GCC. We generate a vector of these symbols and output them
9717 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9718 This avoids putting out names that are never really used. */
9720 typedef struct GTY(()) extern_symbol
9726 /* Define gc'd vector type for extern_symbol. */
9727 DEF_VEC_O(extern_symbol
);
9728 DEF_VEC_ALLOC_O(extern_symbol
,gc
);
9730 /* Vector of extern_symbol pointers. */
9731 static GTY(()) VEC(extern_symbol
,gc
) *extern_symbols
;
9733 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9734 /* Mark DECL (name NAME) as an external reference (assembler output
9735 file FILE). This saves the names to output at the end of the file
9736 if actually referenced. */
9739 pa_hpux_asm_output_external (FILE *file
, tree decl
, const char *name
)
9741 extern_symbol
* p
= VEC_safe_push (extern_symbol
, gc
, extern_symbols
, NULL
);
9743 gcc_assert (file
== asm_out_file
);
9748 /* Output text required at the end of an assembler file.
9749 This includes deferred plabels and .import directives for
9750 all external symbols that were actually referenced. */
9753 pa_hpux_file_end (void)
9758 if (!NO_DEFERRED_PROFILE_COUNTERS
)
9759 output_deferred_profile_counters ();
9761 output_deferred_plabels ();
9763 for (i
= 0; VEC_iterate (extern_symbol
, extern_symbols
, i
, p
); i
++)
9765 tree decl
= p
->decl
;
9767 if (!TREE_ASM_WRITTEN (decl
)
9768 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl
), 0)))
9769 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file
, decl
, p
->name
);
9772 VEC_free (extern_symbol
, gc
, extern_symbols
);
9776 /* Return true if a change from mode FROM to mode TO for a register
9777 in register class RCLASS is invalid. */
9780 pa_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
9781 enum reg_class rclass
)
9786 /* Reject changes to/from complex and vector modes. */
9787 if (COMPLEX_MODE_P (from
) || VECTOR_MODE_P (from
)
9788 || COMPLEX_MODE_P (to
) || VECTOR_MODE_P (to
))
9791 if (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
))
9794 /* There is no way to load QImode or HImode values directly from
9795 memory. SImode loads to the FP registers are not zero extended.
9796 On the 64-bit target, this conflicts with the definition of
9797 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9798 with different sizes in the floating-point registers. */
9799 if (MAYBE_FP_REG_CLASS_P (rclass
))
9802 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9803 in specific sets of registers. Thus, we cannot allow changing
9804 to a larger mode when it's larger than a word. */
9805 if (GET_MODE_SIZE (to
) > UNITS_PER_WORD
9806 && GET_MODE_SIZE (to
) > GET_MODE_SIZE (from
))
9812 /* Returns TRUE if it is a good idea to tie two pseudo registers
9813 when one has mode MODE1 and one has mode MODE2.
9814 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9815 for any hard reg, then this must be FALSE for correct output.
9817 We should return FALSE for QImode and HImode because these modes
9818 are not ok in the floating-point registers. However, this prevents
9819 tieing these modes to SImode and DImode in the general registers.
9820 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9821 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9822 in the floating-point registers. */
9825 pa_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
9827 /* Don't tie modes in different classes. */
9828 if (GET_MODE_CLASS (mode1
) != GET_MODE_CLASS (mode2
))
9835 /* Length in units of the trampoline instruction code. */
9837 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9840 /* Output assembler code for a block containing the constant parts
9841 of a trampoline, leaving space for the variable parts.\
9843 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9844 and then branches to the specified routine.
9846 This code template is copied from text segment to stack location
9847 and then patched with pa_trampoline_init to contain valid values,
9848 and then entered as a subroutine.
9850 It is best to keep this as small as possible to avoid having to
9851 flush multiple lines in the cache. */
9854 pa_asm_trampoline_template (FILE *f
)
9858 fputs ("\tldw 36(%r22),%r21\n", f
);
9859 fputs ("\tbb,>=,n %r21,30,.+16\n", f
);
9860 if (ASSEMBLER_DIALECT
== 0)
9861 fputs ("\tdepi 0,31,2,%r21\n", f
);
9863 fputs ("\tdepwi 0,31,2,%r21\n", f
);
9864 fputs ("\tldw 4(%r21),%r19\n", f
);
9865 fputs ("\tldw 0(%r21),%r21\n", f
);
9868 fputs ("\tbve (%r21)\n", f
);
9869 fputs ("\tldw 40(%r22),%r29\n", f
);
9870 fputs ("\t.word 0\n", f
);
9871 fputs ("\t.word 0\n", f
);
9875 fputs ("\tldsid (%r21),%r1\n", f
);
9876 fputs ("\tmtsp %r1,%sr0\n", f
);
9877 fputs ("\tbe 0(%sr0,%r21)\n", f
);
9878 fputs ("\tldw 40(%r22),%r29\n", f
);
9880 fputs ("\t.word 0\n", f
);
9881 fputs ("\t.word 0\n", f
);
9882 fputs ("\t.word 0\n", f
);
9883 fputs ("\t.word 0\n", f
);
9887 fputs ("\t.dword 0\n", f
);
9888 fputs ("\t.dword 0\n", f
);
9889 fputs ("\t.dword 0\n", f
);
9890 fputs ("\t.dword 0\n", f
);
9891 fputs ("\tmfia %r31\n", f
);
9892 fputs ("\tldd 24(%r31),%r1\n", f
);
9893 fputs ("\tldd 24(%r1),%r27\n", f
);
9894 fputs ("\tldd 16(%r1),%r1\n", f
);
9895 fputs ("\tbve (%r1)\n", f
);
9896 fputs ("\tldd 32(%r31),%r31\n", f
);
9897 fputs ("\t.dword 0 ; fptr\n", f
);
9898 fputs ("\t.dword 0 ; static link\n", f
);
9902 /* Emit RTL insns to initialize the variable parts of a trampoline.
9903 FNADDR is an RTX for the address of the function's pure code.
9904 CXT is an RTX for the static chain value for the function.
9906 Move the function address to the trampoline template at offset 36.
9907 Move the static chain value to trampoline template at offset 40.
9908 Move the trampoline address to trampoline template at offset 44.
9909 Move r19 to trampoline template at offset 48. The latter two
9910 words create a plabel for the indirect call to the trampoline.
9912 A similar sequence is used for the 64-bit port but the plabel is
9913 at the beginning of the trampoline.
9915 Finally, the cache entries for the trampoline code are flushed.
9916 This is necessary to ensure that the trampoline instruction sequence
9917 is written to memory prior to any attempts at prefetching the code
9921 pa_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
9923 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
9924 rtx start_addr
= gen_reg_rtx (Pmode
);
9925 rtx end_addr
= gen_reg_rtx (Pmode
);
9926 rtx line_length
= gen_reg_rtx (Pmode
);
9929 emit_block_move (m_tramp
, assemble_trampoline_template (),
9930 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
9931 r_tramp
= force_reg (Pmode
, XEXP (m_tramp
, 0));
9935 tmp
= adjust_address (m_tramp
, Pmode
, 36);
9936 emit_move_insn (tmp
, fnaddr
);
9937 tmp
= adjust_address (m_tramp
, Pmode
, 40);
9938 emit_move_insn (tmp
, chain_value
);
9940 /* Create a fat pointer for the trampoline. */
9941 tmp
= adjust_address (m_tramp
, Pmode
, 44);
9942 emit_move_insn (tmp
, r_tramp
);
9943 tmp
= adjust_address (m_tramp
, Pmode
, 48);
9944 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 19));
9946 /* fdc and fic only use registers for the address to flush,
9947 they do not accept integer displacements. We align the
9948 start and end addresses to the beginning of their respective
9949 cache lines to minimize the number of lines flushed. */
9950 emit_insn (gen_andsi3 (start_addr
, r_tramp
,
9951 GEN_INT (-MIN_CACHELINE_SIZE
)));
9952 tmp
= force_reg (Pmode
, plus_constant (r_tramp
, TRAMPOLINE_CODE_SIZE
-1));
9953 emit_insn (gen_andsi3 (end_addr
, tmp
,
9954 GEN_INT (-MIN_CACHELINE_SIZE
)));
9955 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
9956 emit_insn (gen_dcacheflushsi (start_addr
, end_addr
, line_length
));
9957 emit_insn (gen_icacheflushsi (start_addr
, end_addr
, line_length
,
9958 gen_reg_rtx (Pmode
),
9959 gen_reg_rtx (Pmode
)));
9963 tmp
= adjust_address (m_tramp
, Pmode
, 56);
9964 emit_move_insn (tmp
, fnaddr
);
9965 tmp
= adjust_address (m_tramp
, Pmode
, 64);
9966 emit_move_insn (tmp
, chain_value
);
9968 /* Create a fat pointer for the trampoline. */
9969 tmp
= adjust_address (m_tramp
, Pmode
, 16);
9970 emit_move_insn (tmp
, force_reg (Pmode
, plus_constant (r_tramp
, 32)));
9971 tmp
= adjust_address (m_tramp
, Pmode
, 24);
9972 emit_move_insn (tmp
, gen_rtx_REG (Pmode
, 27));
9974 /* fdc and fic only use registers for the address to flush,
9975 they do not accept integer displacements. We align the
9976 start and end addresses to the beginning of their respective
9977 cache lines to minimize the number of lines flushed. */
9978 tmp
= force_reg (Pmode
, plus_constant (r_tramp
, 32));
9979 emit_insn (gen_anddi3 (start_addr
, tmp
,
9980 GEN_INT (-MIN_CACHELINE_SIZE
)));
9981 tmp
= force_reg (Pmode
, plus_constant (tmp
, TRAMPOLINE_CODE_SIZE
- 1));
9982 emit_insn (gen_anddi3 (end_addr
, tmp
,
9983 GEN_INT (-MIN_CACHELINE_SIZE
)));
9984 emit_move_insn (line_length
, GEN_INT (MIN_CACHELINE_SIZE
));
9985 emit_insn (gen_dcacheflushdi (start_addr
, end_addr
, line_length
));
9986 emit_insn (gen_icacheflushdi (start_addr
, end_addr
, line_length
,
9987 gen_reg_rtx (Pmode
),
9988 gen_reg_rtx (Pmode
)));
9992 /* Perform any machine-specific adjustment in the address of the trampoline.
9993 ADDR contains the address that was passed to pa_trampoline_init.
9994 Adjust the trampoline address to point to the plabel at offset 44. */
9997 pa_trampoline_adjust_address (rtx addr
)
10000 addr
= memory_address (Pmode
, plus_constant (addr
, 46));
10005 pa_delegitimize_address (rtx orig_x
)
10007 rtx x
= delegitimize_mem_from_attrs (orig_x
);
10009 if (GET_CODE (x
) == LO_SUM
10010 && GET_CODE (XEXP (x
, 1)) == UNSPEC
10011 && XINT (XEXP (x
, 1), 1) == UNSPEC_DLTIND14R
)
10012 return gen_const_mem (Pmode
, XVECEXP (XEXP (x
, 1), 0, 0));