pa.c (pa_output_function_epilogue): Only update last_address when a nonnote insn...
[official-gcc.git] / gcc / config / pa / pa.c
blob59755f28da51bb1fd99addf0307f07e7b7cb42ae
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
55 #include "builtins.h"
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
59 int
60 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
62 enum machine_mode store_mode;
63 enum machine_mode other_mode;
64 rtx set;
66 if (recog_memoized (in_insn) < 0
67 || (get_attr_type (in_insn) != TYPE_FPSTORE
68 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
69 || recog_memoized (out_insn) < 0)
70 return 0;
72 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74 set = single_set (out_insn);
75 if (!set)
76 return 0;
78 other_mode = GET_MODE (SET_SRC (set));
80 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
87 #else
88 #define DO_FRAME_NOTES 0
89 #endif
90 #endif
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx, rtx);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
96 reg_class_t);
97 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
98 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
99 static inline rtx force_mode (enum machine_mode, rtx);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
103 rtx, rtx);
104 static bool forward_branch_p (rtx_insn *);
105 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static int compute_movmem_length (rtx);
108 static int compute_clrmem_length (rtx);
109 static bool pa_assemble_integer (rtx, unsigned int, int);
110 static void remove_useless_addtr_insns (int);
111 static void store_reg (int, HOST_WIDE_INT, int);
112 static void store_reg_modify (int, int, HOST_WIDE_INT);
113 static void load_reg (int, HOST_WIDE_INT, int);
114 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
115 static rtx pa_function_value (const_tree, const_tree, bool);
116 static rtx pa_libcall_value (enum machine_mode, const_rtx);
117 static bool pa_function_value_regno_p (const unsigned int);
118 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void update_total_code_bytes (unsigned int);
120 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
121 static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
122 static int pa_adjust_priority (rtx_insn *, int);
123 static int pa_issue_rate (void);
124 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
125 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
126 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
127 ATTRIBUTE_UNUSED;
128 static void pa_encode_section_info (tree, rtx, int);
129 static const char *pa_strip_name_encoding (const char *);
130 static bool pa_function_ok_for_sibcall (tree, tree);
131 static void pa_globalize_label (FILE *, const char *)
132 ATTRIBUTE_UNUSED;
133 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
134 HOST_WIDE_INT, tree);
135 #if !defined(USE_COLLECT2)
136 static void pa_asm_out_constructor (rtx, int);
137 static void pa_asm_out_destructor (rtx, int);
138 #endif
139 static void pa_init_builtins (void);
140 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
141 static rtx hppa_builtin_saveregs (void);
142 static void hppa_va_start (tree, rtx);
143 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
144 static bool pa_scalar_mode_supported_p (enum machine_mode);
145 static bool pa_commutative_p (const_rtx x, int outer_code);
146 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
147 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
148 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
149 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
153 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
158 static void output_deferred_plabels (void);
159 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
160 #ifdef ASM_OUTPUT_EXTERNAL_REAL
161 static void pa_hpux_file_end (void);
162 #endif
163 static void pa_init_libfuncs (void);
164 static rtx pa_struct_value_rtx (tree, int);
165 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
166 const_tree, bool);
167 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
168 tree, bool);
169 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
170 const_tree, bool);
171 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
172 const_tree, bool);
173 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
174 static struct machine_function * pa_init_machine_status (void);
175 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
176 enum machine_mode,
177 secondary_reload_info *);
178 static void pa_extra_live_on_entry (bitmap);
179 static enum machine_mode pa_promote_function_mode (const_tree,
180 enum machine_mode, int *,
181 const_tree, int);
183 static void pa_asm_trampoline_template (FILE *);
184 static void pa_trampoline_init (rtx, tree, rtx);
185 static rtx pa_trampoline_adjust_address (rtx);
186 static rtx pa_delegitimize_address (rtx);
187 static bool pa_print_operand_punct_valid_p (unsigned char);
188 static rtx pa_internal_arg_pointer (void);
189 static bool pa_can_eliminate (const int, const int);
190 static void pa_conditional_register_usage (void);
191 static enum machine_mode pa_c_mode_for_suffix (char);
192 static section *pa_function_section (tree, enum node_frequency, bool, bool);
193 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
194 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
195 static unsigned int pa_section_type_flags (tree, const char *, int);
196 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
198 /* The following extra sections are only used for SOM. */
199 static GTY(()) section *som_readonly_data_section;
200 static GTY(()) section *som_one_only_readonly_data_section;
201 static GTY(()) section *som_one_only_data_section;
202 static GTY(()) section *som_tm_clone_table_section;
204 /* Counts for the number of callee-saved general and floating point
205 registers which were saved by the current function's prologue. */
206 static int gr_saved, fr_saved;
208 /* Boolean indicating whether the return pointer was saved by the
209 current function's prologue. */
210 static bool rp_saved;
212 static rtx find_addr_reg (rtx);
214 /* Keep track of the number of bytes we have output in the CODE subspace
215 during this compilation so we'll know when to emit inline long-calls. */
216 unsigned long total_code_bytes;
218 /* The last address of the previous function plus the number of bytes in
219 associated thunks that have been output. This is used to determine if
220 a thunk can use an IA-relative branch to reach its target function. */
221 static unsigned int last_address;
223 /* Variables to handle plabels that we discover are necessary at assembly
224 output time. They are output after the current function. */
225 struct GTY(()) deferred_plabel
227 rtx internal_label;
228 rtx symbol;
230 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
231 deferred_plabels;
232 static size_t n_deferred_plabels = 0;
234 /* Initialize the GCC target structure. */
236 #undef TARGET_OPTION_OVERRIDE
237 #define TARGET_OPTION_OVERRIDE pa_option_override
239 #undef TARGET_ASM_ALIGNED_HI_OP
240 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
241 #undef TARGET_ASM_ALIGNED_SI_OP
242 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
243 #undef TARGET_ASM_ALIGNED_DI_OP
244 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
245 #undef TARGET_ASM_UNALIGNED_HI_OP
246 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
247 #undef TARGET_ASM_UNALIGNED_SI_OP
248 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
249 #undef TARGET_ASM_UNALIGNED_DI_OP
250 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
251 #undef TARGET_ASM_INTEGER
252 #define TARGET_ASM_INTEGER pa_assemble_integer
254 #undef TARGET_ASM_FUNCTION_PROLOGUE
255 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
256 #undef TARGET_ASM_FUNCTION_EPILOGUE
257 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
259 #undef TARGET_FUNCTION_VALUE
260 #define TARGET_FUNCTION_VALUE pa_function_value
261 #undef TARGET_LIBCALL_VALUE
262 #define TARGET_LIBCALL_VALUE pa_libcall_value
263 #undef TARGET_FUNCTION_VALUE_REGNO_P
264 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
266 #undef TARGET_LEGITIMIZE_ADDRESS
267 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
269 #undef TARGET_SCHED_ADJUST_COST
270 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
271 #undef TARGET_SCHED_ADJUST_PRIORITY
272 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
273 #undef TARGET_SCHED_ISSUE_RATE
274 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
276 #undef TARGET_ENCODE_SECTION_INFO
277 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
278 #undef TARGET_STRIP_NAME_ENCODING
279 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
281 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
282 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
284 #undef TARGET_COMMUTATIVE_P
285 #define TARGET_COMMUTATIVE_P pa_commutative_p
287 #undef TARGET_ASM_OUTPUT_MI_THUNK
288 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
289 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
290 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
292 #undef TARGET_ASM_FILE_END
293 #ifdef ASM_OUTPUT_EXTERNAL_REAL
294 #define TARGET_ASM_FILE_END pa_hpux_file_end
295 #else
296 #define TARGET_ASM_FILE_END output_deferred_plabels
297 #endif
299 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
300 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
302 #if !defined(USE_COLLECT2)
303 #undef TARGET_ASM_CONSTRUCTOR
304 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
305 #undef TARGET_ASM_DESTRUCTOR
306 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
307 #endif
309 #undef TARGET_INIT_BUILTINS
310 #define TARGET_INIT_BUILTINS pa_init_builtins
312 #undef TARGET_EXPAND_BUILTIN
313 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
315 #undef TARGET_REGISTER_MOVE_COST
316 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
317 #undef TARGET_RTX_COSTS
318 #define TARGET_RTX_COSTS hppa_rtx_costs
319 #undef TARGET_ADDRESS_COST
320 #define TARGET_ADDRESS_COST hppa_address_cost
322 #undef TARGET_MACHINE_DEPENDENT_REORG
323 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
325 #undef TARGET_INIT_LIBFUNCS
326 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
328 #undef TARGET_PROMOTE_FUNCTION_MODE
329 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
330 #undef TARGET_PROMOTE_PROTOTYPES
331 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
333 #undef TARGET_STRUCT_VALUE_RTX
334 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
335 #undef TARGET_RETURN_IN_MEMORY
336 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
337 #undef TARGET_MUST_PASS_IN_STACK
338 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
339 #undef TARGET_PASS_BY_REFERENCE
340 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
341 #undef TARGET_CALLEE_COPIES
342 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
343 #undef TARGET_ARG_PARTIAL_BYTES
344 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
345 #undef TARGET_FUNCTION_ARG
346 #define TARGET_FUNCTION_ARG pa_function_arg
347 #undef TARGET_FUNCTION_ARG_ADVANCE
348 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
349 #undef TARGET_FUNCTION_ARG_BOUNDARY
350 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
352 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
353 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
354 #undef TARGET_EXPAND_BUILTIN_VA_START
355 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
356 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
357 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
359 #undef TARGET_SCALAR_MODE_SUPPORTED_P
360 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
362 #undef TARGET_CANNOT_FORCE_CONST_MEM
363 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
365 #undef TARGET_SECONDARY_RELOAD
366 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
368 #undef TARGET_EXTRA_LIVE_ON_ENTRY
369 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
371 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
372 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
373 #undef TARGET_TRAMPOLINE_INIT
374 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
375 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
376 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
377 #undef TARGET_DELEGITIMIZE_ADDRESS
378 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
379 #undef TARGET_INTERNAL_ARG_POINTER
380 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
381 #undef TARGET_CAN_ELIMINATE
382 #define TARGET_CAN_ELIMINATE pa_can_eliminate
383 #undef TARGET_CONDITIONAL_REGISTER_USAGE
384 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
385 #undef TARGET_C_MODE_FOR_SUFFIX
386 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
387 #undef TARGET_ASM_FUNCTION_SECTION
388 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
390 #undef TARGET_LEGITIMATE_CONSTANT_P
391 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
392 #undef TARGET_SECTION_TYPE_FLAGS
393 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
394 #undef TARGET_LEGITIMATE_ADDRESS_P
395 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
397 struct gcc_target targetm = TARGET_INITIALIZER;
399 /* Parse the -mfixed-range= option string. */
401 static void
402 fix_range (const char *const_str)
404 int i, first, last;
405 char *str, *dash, *comma;
407 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
408 REG2 are either register names or register numbers. The effect
409 of this option is to mark the registers in the range from REG1 to
410 REG2 as ``fixed'' so they won't be used by the compiler. This is
411 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
413 i = strlen (const_str);
414 str = (char *) alloca (i + 1);
415 memcpy (str, const_str, i + 1);
417 while (1)
419 dash = strchr (str, '-');
420 if (!dash)
422 warning (0, "value of -mfixed-range must have form REG1-REG2");
423 return;
425 *dash = '\0';
427 comma = strchr (dash + 1, ',');
428 if (comma)
429 *comma = '\0';
431 first = decode_reg_name (str);
432 if (first < 0)
434 warning (0, "unknown register name: %s", str);
435 return;
438 last = decode_reg_name (dash + 1);
439 if (last < 0)
441 warning (0, "unknown register name: %s", dash + 1);
442 return;
445 *dash = '-';
447 if (first > last)
449 warning (0, "%s-%s is an empty range", str, dash + 1);
450 return;
453 for (i = first; i <= last; ++i)
454 fixed_regs[i] = call_used_regs[i] = 1;
456 if (!comma)
457 break;
459 *comma = ',';
460 str = comma + 1;
463 /* Check if all floating point registers have been fixed. */
464 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
465 if (!fixed_regs[i])
466 break;
468 if (i > FP_REG_LAST)
469 target_flags |= MASK_DISABLE_FPREGS;
472 /* Implement the TARGET_OPTION_OVERRIDE hook. */
474 static void
475 pa_option_override (void)
477 unsigned int i;
478 cl_deferred_option *opt;
479 vec<cl_deferred_option> *v
480 = (vec<cl_deferred_option> *) pa_deferred_options;
482 if (v)
483 FOR_EACH_VEC_ELT (*v, i, opt)
485 switch (opt->opt_index)
487 case OPT_mfixed_range_:
488 fix_range (opt->arg);
489 break;
491 default:
492 gcc_unreachable ();
496 /* Unconditional branches in the delay slot are not compatible with dwarf2
497 call frame information. There is no benefit in using this optimization
498 on PA8000 and later processors. */
499 if (pa_cpu >= PROCESSOR_8000
500 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
501 && flag_exceptions)
502 || flag_unwind_tables)
503 target_flags &= ~MASK_JUMP_IN_DELAY;
505 if (flag_pic && TARGET_PORTABLE_RUNTIME)
507 warning (0, "PIC code generation is not supported in the portable runtime model");
510 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
512 warning (0, "PIC code generation is not compatible with fast indirect calls");
515 if (! TARGET_GAS && write_symbols != NO_DEBUG)
517 warning (0, "-g is only supported when using GAS on this processor,");
518 warning (0, "-g option disabled");
519 write_symbols = NO_DEBUG;
522 /* We only support the "big PIC" model now. And we always generate PIC
523 code when in 64bit mode. */
524 if (flag_pic == 1 || TARGET_64BIT)
525 flag_pic = 2;
527 /* Disable -freorder-blocks-and-partition as we don't support hot and
528 cold partitioning. */
529 if (flag_reorder_blocks_and_partition)
531 inform (input_location,
532 "-freorder-blocks-and-partition does not work "
533 "on this architecture");
534 flag_reorder_blocks_and_partition = 0;
535 flag_reorder_blocks = 1;
538 /* We can't guarantee that .dword is available for 32-bit targets. */
539 if (UNITS_PER_WORD == 4)
540 targetm.asm_out.aligned_op.di = NULL;
542 /* The unaligned ops are only available when using GAS. */
543 if (!TARGET_GAS)
545 targetm.asm_out.unaligned_op.hi = NULL;
546 targetm.asm_out.unaligned_op.si = NULL;
547 targetm.asm_out.unaligned_op.di = NULL;
550 init_machine_status = pa_init_machine_status;
553 enum pa_builtins
555 PA_BUILTIN_COPYSIGNQ,
556 PA_BUILTIN_FABSQ,
557 PA_BUILTIN_INFQ,
558 PA_BUILTIN_HUGE_VALQ,
559 PA_BUILTIN_max
562 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
564 static void
565 pa_init_builtins (void)
567 #ifdef DONT_HAVE_FPUTC_UNLOCKED
569 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
570 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
571 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
573 #endif
574 #if TARGET_HPUX_11
576 tree decl;
578 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
579 set_user_assembler_name (decl, "_Isfinite");
580 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
581 set_user_assembler_name (decl, "_Isfinitef");
583 #endif
585 if (HPUX_LONG_DOUBLE_LIBRARY)
587 tree decl, ftype;
589 /* Under HPUX, the __float128 type is a synonym for "long double". */
590 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
591 "__float128");
593 /* TFmode support builtins. */
594 ftype = build_function_type_list (long_double_type_node,
595 long_double_type_node,
596 NULL_TREE);
597 decl = add_builtin_function ("__builtin_fabsq", ftype,
598 PA_BUILTIN_FABSQ, BUILT_IN_MD,
599 "_U_Qfabs", NULL_TREE);
600 TREE_READONLY (decl) = 1;
601 pa_builtins[PA_BUILTIN_FABSQ] = decl;
603 ftype = build_function_type_list (long_double_type_node,
604 long_double_type_node,
605 long_double_type_node,
606 NULL_TREE);
607 decl = add_builtin_function ("__builtin_copysignq", ftype,
608 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
609 "_U_Qfcopysign", NULL_TREE);
610 TREE_READONLY (decl) = 1;
611 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
613 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
614 decl = add_builtin_function ("__builtin_infq", ftype,
615 PA_BUILTIN_INFQ, BUILT_IN_MD,
616 NULL, NULL_TREE);
617 pa_builtins[PA_BUILTIN_INFQ] = decl;
619 decl = add_builtin_function ("__builtin_huge_valq", ftype,
620 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
621 NULL, NULL_TREE);
622 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
626 static rtx
627 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
628 enum machine_mode mode ATTRIBUTE_UNUSED,
629 int ignore ATTRIBUTE_UNUSED)
631 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
632 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
634 switch (fcode)
636 case PA_BUILTIN_FABSQ:
637 case PA_BUILTIN_COPYSIGNQ:
638 return expand_call (exp, target, ignore);
640 case PA_BUILTIN_INFQ:
641 case PA_BUILTIN_HUGE_VALQ:
643 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
644 REAL_VALUE_TYPE inf;
645 rtx tmp;
647 real_inf (&inf);
648 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
650 tmp = validize_mem (force_const_mem (target_mode, tmp));
652 if (target == 0)
653 target = gen_reg_rtx (target_mode);
655 emit_move_insn (target, tmp);
656 return target;
659 default:
660 gcc_unreachable ();
663 return NULL_RTX;
666 /* Function to init struct machine_function.
667 This will be called, via a pointer variable,
668 from push_function_context. */
670 static struct machine_function *
671 pa_init_machine_status (void)
673 return ggc_cleared_alloc<machine_function> ();
676 /* If FROM is a probable pointer register, mark TO as a probable
677 pointer register with the same pointer alignment as FROM. */
679 static void
680 copy_reg_pointer (rtx to, rtx from)
682 if (REG_POINTER (from))
683 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
686 /* Return 1 if X contains a symbolic expression. We know these
687 expressions will have one of a few well defined forms, so
688 we need only check those forms. */
690 pa_symbolic_expression_p (rtx x)
693 /* Strip off any HIGH. */
694 if (GET_CODE (x) == HIGH)
695 x = XEXP (x, 0);
697 return symbolic_operand (x, VOIDmode);
700 /* Accept any constant that can be moved in one instruction into a
701 general register. */
703 pa_cint_ok_for_move (HOST_WIDE_INT ival)
705 /* OK if ldo, ldil, or zdepi, can be used. */
706 return (VAL_14_BITS_P (ival)
707 || pa_ldil_cint_p (ival)
708 || pa_zdepi_cint_p (ival));
711 /* True iff ldil can be used to load this CONST_INT. The least
712 significant 11 bits of the value must be zero and the value must
713 not change sign when extended from 32 to 64 bits. */
715 pa_ldil_cint_p (HOST_WIDE_INT ival)
717 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
719 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
722 /* True iff zdepi can be used to generate this CONST_INT.
723 zdepi first sign extends a 5-bit signed number to a given field
724 length, then places this field anywhere in a zero. */
726 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
728 unsigned HOST_WIDE_INT lsb_mask, t;
730 /* This might not be obvious, but it's at least fast.
731 This function is critical; we don't have the time loops would take. */
732 lsb_mask = x & -x;
733 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
734 /* Return true iff t is a power of two. */
735 return ((t & (t - 1)) == 0);
738 /* True iff depi or extru can be used to compute (reg & mask).
739 Accept bit pattern like these:
740 0....01....1
741 1....10....0
742 1..10..01..1 */
744 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
746 mask = ~mask;
747 mask += mask & -mask;
748 return (mask & (mask - 1)) == 0;
751 /* True iff depi can be used to compute (reg | MASK). */
753 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
755 mask += mask & -mask;
756 return (mask & (mask - 1)) == 0;
759 /* Legitimize PIC addresses. If the address is already
760 position-independent, we return ORIG. Newly generated
761 position-independent addresses go to REG. If we need more
762 than one register, we lose. */
764 static rtx
765 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
767 rtx pic_ref = orig;
769 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
771 /* Labels need special handling. */
772 if (pic_label_operand (orig, mode))
774 rtx insn;
776 /* We do not want to go through the movXX expanders here since that
777 would create recursion.
779 Nor do we really want to call a generator for a named pattern
780 since that requires multiple patterns if we want to support
781 multiple word sizes.
783 So instead we just emit the raw set, which avoids the movXX
784 expanders completely. */
785 mark_reg_pointer (reg, BITS_PER_UNIT);
786 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
788 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
789 add_reg_note (insn, REG_EQUAL, orig);
791 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
792 and update LABEL_NUSES because this is not done automatically. */
793 if (reload_in_progress || reload_completed)
795 /* Extract LABEL_REF. */
796 if (GET_CODE (orig) == CONST)
797 orig = XEXP (XEXP (orig, 0), 0);
798 /* Extract CODE_LABEL. */
799 orig = XEXP (orig, 0);
800 add_reg_note (insn, REG_LABEL_OPERAND, orig);
801 /* Make sure we have label and not a note. */
802 if (LABEL_P (orig))
803 LABEL_NUSES (orig)++;
805 crtl->uses_pic_offset_table = 1;
806 return reg;
808 if (GET_CODE (orig) == SYMBOL_REF)
810 rtx insn, tmp_reg;
812 gcc_assert (reg);
814 /* Before reload, allocate a temporary register for the intermediate
815 result. This allows the sequence to be deleted when the final
816 result is unused and the insns are trivially dead. */
817 tmp_reg = ((reload_in_progress || reload_completed)
818 ? reg : gen_reg_rtx (Pmode));
820 if (function_label_operand (orig, VOIDmode))
822 /* Force function label into memory in word mode. */
823 orig = XEXP (force_const_mem (word_mode, orig), 0);
824 /* Load plabel address from DLT. */
825 emit_move_insn (tmp_reg,
826 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
827 gen_rtx_HIGH (word_mode, orig)));
828 pic_ref
829 = gen_const_mem (Pmode,
830 gen_rtx_LO_SUM (Pmode, tmp_reg,
831 gen_rtx_UNSPEC (Pmode,
832 gen_rtvec (1, orig),
833 UNSPEC_DLTIND14R)));
834 emit_move_insn (reg, pic_ref);
835 /* Now load address of function descriptor. */
836 pic_ref = gen_rtx_MEM (Pmode, reg);
838 else
840 /* Load symbol reference from DLT. */
841 emit_move_insn (tmp_reg,
842 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
843 gen_rtx_HIGH (word_mode, orig)));
844 pic_ref
845 = gen_const_mem (Pmode,
846 gen_rtx_LO_SUM (Pmode, tmp_reg,
847 gen_rtx_UNSPEC (Pmode,
848 gen_rtvec (1, orig),
849 UNSPEC_DLTIND14R)));
852 crtl->uses_pic_offset_table = 1;
853 mark_reg_pointer (reg, BITS_PER_UNIT);
854 insn = emit_move_insn (reg, pic_ref);
856 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
857 set_unique_reg_note (insn, REG_EQUAL, orig);
859 return reg;
861 else if (GET_CODE (orig) == CONST)
863 rtx base;
865 if (GET_CODE (XEXP (orig, 0)) == PLUS
866 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
867 return orig;
869 gcc_assert (reg);
870 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
872 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
873 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
874 base == reg ? 0 : reg);
876 if (GET_CODE (orig) == CONST_INT)
878 if (INT_14_BITS (orig))
879 return plus_constant (Pmode, base, INTVAL (orig));
880 orig = force_reg (Pmode, orig);
882 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
883 /* Likewise, should we set special REG_NOTEs here? */
886 return pic_ref;
889 static GTY(()) rtx gen_tls_tga;
891 static rtx
892 gen_tls_get_addr (void)
894 if (!gen_tls_tga)
895 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
896 return gen_tls_tga;
899 static rtx
900 hppa_tls_call (rtx arg)
902 rtx ret;
904 ret = gen_reg_rtx (Pmode);
905 emit_library_call_value (gen_tls_get_addr (), ret,
906 LCT_CONST, Pmode, 1, arg, Pmode);
908 return ret;
911 static rtx
912 legitimize_tls_address (rtx addr)
914 rtx ret, insn, tmp, t1, t2, tp;
916 /* Currently, we can't handle anything but a SYMBOL_REF. */
917 if (GET_CODE (addr) != SYMBOL_REF)
918 return addr;
920 switch (SYMBOL_REF_TLS_MODEL (addr))
922 case TLS_MODEL_GLOBAL_DYNAMIC:
923 tmp = gen_reg_rtx (Pmode);
924 if (flag_pic)
925 emit_insn (gen_tgd_load_pic (tmp, addr));
926 else
927 emit_insn (gen_tgd_load (tmp, addr));
928 ret = hppa_tls_call (tmp);
929 break;
931 case TLS_MODEL_LOCAL_DYNAMIC:
932 ret = gen_reg_rtx (Pmode);
933 tmp = gen_reg_rtx (Pmode);
934 start_sequence ();
935 if (flag_pic)
936 emit_insn (gen_tld_load_pic (tmp, addr));
937 else
938 emit_insn (gen_tld_load (tmp, addr));
939 t1 = hppa_tls_call (tmp);
940 insn = get_insns ();
941 end_sequence ();
942 t2 = gen_reg_rtx (Pmode);
943 emit_libcall_block (insn, t2, t1,
944 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
945 UNSPEC_TLSLDBASE));
946 emit_insn (gen_tld_offset_load (ret, addr, t2));
947 break;
949 case TLS_MODEL_INITIAL_EXEC:
950 tp = gen_reg_rtx (Pmode);
951 tmp = gen_reg_rtx (Pmode);
952 ret = gen_reg_rtx (Pmode);
953 emit_insn (gen_tp_load (tp));
954 if (flag_pic)
955 emit_insn (gen_tie_load_pic (tmp, addr));
956 else
957 emit_insn (gen_tie_load (tmp, addr));
958 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
959 break;
961 case TLS_MODEL_LOCAL_EXEC:
962 tp = gen_reg_rtx (Pmode);
963 ret = gen_reg_rtx (Pmode);
964 emit_insn (gen_tp_load (tp));
965 emit_insn (gen_tle_load (ret, addr, tp));
966 break;
968 default:
969 gcc_unreachable ();
972 return ret;
975 /* Try machine-dependent ways of modifying an illegitimate address
976 to be legitimate. If we find one, return the new, valid address.
977 This macro is used in only one place: `memory_address' in explow.c.
979 OLDX is the address as it was before break_out_memory_refs was called.
980 In some cases it is useful to look at this to decide what needs to be done.
982 It is always safe for this macro to do nothing. It exists to recognize
983 opportunities to optimize the output.
985 For the PA, transform:
987 memory(X + <large int>)
989 into:
991 if (<large int> & mask) >= 16
992 Y = (<large int> & ~mask) + mask + 1 Round up.
993 else
994 Y = (<large int> & ~mask) Round down.
995 Z = X + Y
996 memory (Z + (<large int> - Y));
998 This is for CSE to find several similar references, and only use one Z.
1000 X can either be a SYMBOL_REF or REG, but because combine cannot
1001 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1002 D will not fit in 14 bits.
1004 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1005 0x1f as the mask.
1007 MODE_INT references allow displacements which fit in 14 bits, so use
1008 0x3fff as the mask.
1010 This relies on the fact that most mode MODE_FLOAT references will use FP
1011 registers and most mode MODE_INT references will use integer registers.
1012 (In the rare case of an FP register used in an integer MODE, we depend
1013 on secondary reloads to clean things up.)
1016 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1017 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1018 addressing modes to be used).
1020 Put X and Z into registers. Then put the entire expression into
1021 a register. */
1024 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1025 enum machine_mode mode)
1027 rtx orig = x;
1029 /* We need to canonicalize the order of operands in unscaled indexed
1030 addresses since the code that checks if an address is valid doesn't
1031 always try both orders. */
1032 if (!TARGET_NO_SPACE_REGS
1033 && GET_CODE (x) == PLUS
1034 && GET_MODE (x) == Pmode
1035 && REG_P (XEXP (x, 0))
1036 && REG_P (XEXP (x, 1))
1037 && REG_POINTER (XEXP (x, 0))
1038 && !REG_POINTER (XEXP (x, 1)))
1039 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1041 if (tls_referenced_p (x))
1042 return legitimize_tls_address (x);
1043 else if (flag_pic)
1044 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1046 /* Strip off CONST. */
1047 if (GET_CODE (x) == CONST)
1048 x = XEXP (x, 0);
1050 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1051 That should always be safe. */
1052 if (GET_CODE (x) == PLUS
1053 && GET_CODE (XEXP (x, 0)) == REG
1054 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1056 rtx reg = force_reg (Pmode, XEXP (x, 1));
1057 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1060 /* Note we must reject symbols which represent function addresses
1061 since the assembler/linker can't handle arithmetic on plabels. */
1062 if (GET_CODE (x) == PLUS
1063 && GET_CODE (XEXP (x, 1)) == CONST_INT
1064 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1065 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1066 || GET_CODE (XEXP (x, 0)) == REG))
1068 rtx int_part, ptr_reg;
1069 int newoffset;
1070 int offset = INTVAL (XEXP (x, 1));
1071 int mask;
1073 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1074 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1076 /* Choose which way to round the offset. Round up if we
1077 are >= halfway to the next boundary. */
1078 if ((offset & mask) >= ((mask + 1) / 2))
1079 newoffset = (offset & ~ mask) + mask + 1;
1080 else
1081 newoffset = (offset & ~ mask);
1083 /* If the newoffset will not fit in 14 bits (ldo), then
1084 handling this would take 4 or 5 instructions (2 to load
1085 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1086 add the new offset and the SYMBOL_REF.) Combine can
1087 not handle 4->2 or 5->2 combinations, so do not create
1088 them. */
1089 if (! VAL_14_BITS_P (newoffset)
1090 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1092 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1093 rtx tmp_reg
1094 = force_reg (Pmode,
1095 gen_rtx_HIGH (Pmode, const_part));
1096 ptr_reg
1097 = force_reg (Pmode,
1098 gen_rtx_LO_SUM (Pmode,
1099 tmp_reg, const_part));
1101 else
1103 if (! VAL_14_BITS_P (newoffset))
1104 int_part = force_reg (Pmode, GEN_INT (newoffset));
1105 else
1106 int_part = GEN_INT (newoffset);
1108 ptr_reg = force_reg (Pmode,
1109 gen_rtx_PLUS (Pmode,
1110 force_reg (Pmode, XEXP (x, 0)),
1111 int_part));
1113 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1116 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1118 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1119 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1120 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1121 && (OBJECT_P (XEXP (x, 1))
1122 || GET_CODE (XEXP (x, 1)) == SUBREG)
1123 && GET_CODE (XEXP (x, 1)) != CONST)
1125 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1126 rtx reg1, reg2;
1128 reg1 = XEXP (x, 1);
1129 if (GET_CODE (reg1) != REG)
1130 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1132 reg2 = XEXP (XEXP (x, 0), 0);
1133 if (GET_CODE (reg2) != REG)
1134 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1136 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1137 gen_rtx_MULT (Pmode,
1138 reg2,
1139 GEN_INT (val)),
1140 reg1));
1143 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1145 Only do so for floating point modes since this is more speculative
1146 and we lose if it's an integer store. */
1147 if (GET_CODE (x) == PLUS
1148 && GET_CODE (XEXP (x, 0)) == PLUS
1149 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1150 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1151 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1152 && (mode == SFmode || mode == DFmode))
1155 /* First, try and figure out what to use as a base register. */
1156 rtx reg1, reg2, base, idx;
1158 reg1 = XEXP (XEXP (x, 0), 1);
1159 reg2 = XEXP (x, 1);
1160 base = NULL_RTX;
1161 idx = NULL_RTX;
1163 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1164 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1165 it's a base register below. */
1166 if (GET_CODE (reg1) != REG)
1167 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1169 if (GET_CODE (reg2) != REG)
1170 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1172 /* Figure out what the base and index are. */
1174 if (GET_CODE (reg1) == REG
1175 && REG_POINTER (reg1))
1177 base = reg1;
1178 idx = gen_rtx_PLUS (Pmode,
1179 gen_rtx_MULT (Pmode,
1180 XEXP (XEXP (XEXP (x, 0), 0), 0),
1181 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1182 XEXP (x, 1));
1184 else if (GET_CODE (reg2) == REG
1185 && REG_POINTER (reg2))
1187 base = reg2;
1188 idx = XEXP (x, 0);
1191 if (base == 0)
1192 return orig;
1194 /* If the index adds a large constant, try to scale the
1195 constant so that it can be loaded with only one insn. */
1196 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1197 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1198 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1199 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1201 /* Divide the CONST_INT by the scale factor, then add it to A. */
1202 int val = INTVAL (XEXP (idx, 1));
1204 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1205 reg1 = XEXP (XEXP (idx, 0), 0);
1206 if (GET_CODE (reg1) != REG)
1207 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1209 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1211 /* We can now generate a simple scaled indexed address. */
1212 return
1213 force_reg
1214 (Pmode, gen_rtx_PLUS (Pmode,
1215 gen_rtx_MULT (Pmode, reg1,
1216 XEXP (XEXP (idx, 0), 1)),
1217 base));
1220 /* If B + C is still a valid base register, then add them. */
1221 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1222 && INTVAL (XEXP (idx, 1)) <= 4096
1223 && INTVAL (XEXP (idx, 1)) >= -4096)
1225 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1226 rtx reg1, reg2;
1228 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1230 reg2 = XEXP (XEXP (idx, 0), 0);
1231 if (GET_CODE (reg2) != CONST_INT)
1232 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1234 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1235 gen_rtx_MULT (Pmode,
1236 reg2,
1237 GEN_INT (val)),
1238 reg1));
1241 /* Get the index into a register, then add the base + index and
1242 return a register holding the result. */
1244 /* First get A into a register. */
1245 reg1 = XEXP (XEXP (idx, 0), 0);
1246 if (GET_CODE (reg1) != REG)
1247 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1249 /* And get B into a register. */
1250 reg2 = XEXP (idx, 1);
1251 if (GET_CODE (reg2) != REG)
1252 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1254 reg1 = force_reg (Pmode,
1255 gen_rtx_PLUS (Pmode,
1256 gen_rtx_MULT (Pmode, reg1,
1257 XEXP (XEXP (idx, 0), 1)),
1258 reg2));
1260 /* Add the result to our base register and return. */
1261 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1265 /* Uh-oh. We might have an address for x[n-100000]. This needs
1266 special handling to avoid creating an indexed memory address
1267 with x-100000 as the base.
1269 If the constant part is small enough, then it's still safe because
1270 there is a guard page at the beginning and end of the data segment.
1272 Scaled references are common enough that we want to try and rearrange the
1273 terms so that we can use indexing for these addresses too. Only
1274 do the optimization for floatint point modes. */
1276 if (GET_CODE (x) == PLUS
1277 && pa_symbolic_expression_p (XEXP (x, 1)))
1279 /* Ugly. We modify things here so that the address offset specified
1280 by the index expression is computed first, then added to x to form
1281 the entire address. */
1283 rtx regx1, regx2, regy1, regy2, y;
1285 /* Strip off any CONST. */
1286 y = XEXP (x, 1);
1287 if (GET_CODE (y) == CONST)
1288 y = XEXP (y, 0);
1290 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1292 /* See if this looks like
1293 (plus (mult (reg) (shadd_const))
1294 (const (plus (symbol_ref) (const_int))))
1296 Where const_int is small. In that case the const
1297 expression is a valid pointer for indexing.
1299 If const_int is big, but can be divided evenly by shadd_const
1300 and added to (reg). This allows more scaled indexed addresses. */
1301 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1302 && GET_CODE (XEXP (x, 0)) == MULT
1303 && GET_CODE (XEXP (y, 1)) == CONST_INT
1304 && INTVAL (XEXP (y, 1)) >= -4096
1305 && INTVAL (XEXP (y, 1)) <= 4095
1306 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1307 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1309 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1310 rtx reg1, reg2;
1312 reg1 = XEXP (x, 1);
1313 if (GET_CODE (reg1) != REG)
1314 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1316 reg2 = XEXP (XEXP (x, 0), 0);
1317 if (GET_CODE (reg2) != REG)
1318 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1320 return force_reg (Pmode,
1321 gen_rtx_PLUS (Pmode,
1322 gen_rtx_MULT (Pmode,
1323 reg2,
1324 GEN_INT (val)),
1325 reg1));
1327 else if ((mode == DFmode || mode == SFmode)
1328 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1329 && GET_CODE (XEXP (x, 0)) == MULT
1330 && GET_CODE (XEXP (y, 1)) == CONST_INT
1331 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1332 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1333 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1335 regx1
1336 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1337 / INTVAL (XEXP (XEXP (x, 0), 1))));
1338 regx2 = XEXP (XEXP (x, 0), 0);
1339 if (GET_CODE (regx2) != REG)
1340 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1341 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1342 regx2, regx1));
1343 return
1344 force_reg (Pmode,
1345 gen_rtx_PLUS (Pmode,
1346 gen_rtx_MULT (Pmode, regx2,
1347 XEXP (XEXP (x, 0), 1)),
1348 force_reg (Pmode, XEXP (y, 0))));
1350 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1351 && INTVAL (XEXP (y, 1)) >= -4096
1352 && INTVAL (XEXP (y, 1)) <= 4095)
1354 /* This is safe because of the guard page at the
1355 beginning and end of the data space. Just
1356 return the original address. */
1357 return orig;
1359 else
1361 /* Doesn't look like one we can optimize. */
1362 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1363 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1364 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1365 regx1 = force_reg (Pmode,
1366 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1367 regx1, regy2));
1368 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1373 return orig;
1376 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1378 Compute extra cost of moving data between one register class
1379 and another.
1381 Make moves from SAR so expensive they should never happen. We used to
1382 have 0xffff here, but that generates overflow in rare cases.
1384 Copies involving a FP register and a non-FP register are relatively
1385 expensive because they must go through memory.
1387 Other copies are reasonably cheap. */
1389 static int
1390 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1391 reg_class_t from, reg_class_t to)
1393 if (from == SHIFT_REGS)
1394 return 0x100;
1395 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1396 return 18;
1397 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1398 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1399 return 16;
1400 else
1401 return 2;
1404 /* For the HPPA, REG and REG+CONST is cost 0
1405 and addresses involving symbolic constants are cost 2.
1407 PIC addresses are very expensive.
1409 It is no coincidence that this has the same structure
1410 as pa_legitimate_address_p. */
1412 static int
1413 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1414 addr_space_t as ATTRIBUTE_UNUSED,
1415 bool speed ATTRIBUTE_UNUSED)
1417 switch (GET_CODE (X))
1419 case REG:
1420 case PLUS:
1421 case LO_SUM:
1422 return 1;
1423 case HIGH:
1424 return 2;
1425 default:
1426 return 4;
1430 /* Compute a (partial) cost for rtx X. Return true if the complete
1431 cost has been computed, and false if subexpressions should be
1432 scanned. In either case, *TOTAL contains the cost result. */
1434 static bool
1435 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1436 int *total, bool speed ATTRIBUTE_UNUSED)
1438 int factor;
1440 switch (code)
1442 case CONST_INT:
1443 if (INTVAL (x) == 0)
1444 *total = 0;
1445 else if (INT_14_BITS (x))
1446 *total = 1;
1447 else
1448 *total = 2;
1449 return true;
1451 case HIGH:
1452 *total = 2;
1453 return true;
1455 case CONST:
1456 case LABEL_REF:
1457 case SYMBOL_REF:
1458 *total = 4;
1459 return true;
1461 case CONST_DOUBLE:
1462 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1463 && outer_code != SET)
1464 *total = 0;
1465 else
1466 *total = 8;
1467 return true;
1469 case MULT:
1470 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1472 *total = COSTS_N_INSNS (3);
1473 return true;
1476 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1477 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1478 if (factor == 0)
1479 factor = 1;
1481 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1482 *total = factor * factor * COSTS_N_INSNS (8);
1483 else
1484 *total = factor * factor * COSTS_N_INSNS (20);
1485 return true;
1487 case DIV:
1488 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1490 *total = COSTS_N_INSNS (14);
1491 return true;
1493 /* FALLTHRU */
1495 case UDIV:
1496 case MOD:
1497 case UMOD:
1498 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1499 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1500 if (factor == 0)
1501 factor = 1;
1503 *total = factor * factor * COSTS_N_INSNS (60);
1504 return true;
1506 case PLUS: /* this includes shNadd insns */
1507 case MINUS:
1508 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1510 *total = COSTS_N_INSNS (3);
1511 return true;
1514 /* A size N times larger than UNITS_PER_WORD needs N times as
1515 many insns, taking N times as long. */
1516 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1517 if (factor == 0)
1518 factor = 1;
1519 *total = factor * COSTS_N_INSNS (1);
1520 return true;
1522 case ASHIFT:
1523 case ASHIFTRT:
1524 case LSHIFTRT:
1525 *total = COSTS_N_INSNS (1);
1526 return true;
1528 default:
1529 return false;
1533 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1534 new rtx with the correct mode. */
1535 static inline rtx
1536 force_mode (enum machine_mode mode, rtx orig)
1538 if (mode == GET_MODE (orig))
1539 return orig;
1541 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1543 return gen_rtx_REG (mode, REGNO (orig));
1546 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1548 static bool
1549 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1551 return tls_referenced_p (x);
1554 /* Emit insns to move operands[1] into operands[0].
1556 Return 1 if we have written out everything that needs to be done to
1557 do the move. Otherwise, return 0 and the caller will emit the move
1558 normally.
1560 Note SCRATCH_REG may not be in the proper mode depending on how it
1561 will be used. This routine is responsible for creating a new copy
1562 of SCRATCH_REG in the proper mode. */
1565 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1567 register rtx operand0 = operands[0];
1568 register rtx operand1 = operands[1];
1569 register rtx tem;
1571 /* We can only handle indexed addresses in the destination operand
1572 of floating point stores. Thus, we need to break out indexed
1573 addresses from the destination operand. */
1574 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1576 gcc_assert (can_create_pseudo_p ());
1578 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1579 operand0 = replace_equiv_address (operand0, tem);
1582 /* On targets with non-equivalent space registers, break out unscaled
1583 indexed addresses from the source operand before the final CSE.
1584 We have to do this because the REG_POINTER flag is not correctly
1585 carried through various optimization passes and CSE may substitute
1586 a pseudo without the pointer set for one with the pointer set. As
1587 a result, we loose various opportunities to create insns with
1588 unscaled indexed addresses. */
1589 if (!TARGET_NO_SPACE_REGS
1590 && !cse_not_expected
1591 && GET_CODE (operand1) == MEM
1592 && GET_CODE (XEXP (operand1, 0)) == PLUS
1593 && REG_P (XEXP (XEXP (operand1, 0), 0))
1594 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1595 operand1
1596 = replace_equiv_address (operand1,
1597 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1599 if (scratch_reg
1600 && reload_in_progress && GET_CODE (operand0) == REG
1601 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1602 operand0 = reg_equiv_mem (REGNO (operand0));
1603 else if (scratch_reg
1604 && reload_in_progress && GET_CODE (operand0) == SUBREG
1605 && GET_CODE (SUBREG_REG (operand0)) == REG
1606 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1608 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1609 the code which tracks sets/uses for delete_output_reload. */
1610 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1611 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1612 SUBREG_BYTE (operand0));
1613 operand0 = alter_subreg (&temp, true);
1616 if (scratch_reg
1617 && reload_in_progress && GET_CODE (operand1) == REG
1618 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1619 operand1 = reg_equiv_mem (REGNO (operand1));
1620 else if (scratch_reg
1621 && reload_in_progress && GET_CODE (operand1) == SUBREG
1622 && GET_CODE (SUBREG_REG (operand1)) == REG
1623 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1625 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1626 the code which tracks sets/uses for delete_output_reload. */
1627 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1628 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1629 SUBREG_BYTE (operand1));
1630 operand1 = alter_subreg (&temp, true);
1633 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1634 && ((tem = find_replacement (&XEXP (operand0, 0)))
1635 != XEXP (operand0, 0)))
1636 operand0 = replace_equiv_address (operand0, tem);
1638 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1639 && ((tem = find_replacement (&XEXP (operand1, 0)))
1640 != XEXP (operand1, 0)))
1641 operand1 = replace_equiv_address (operand1, tem);
1643 /* Handle secondary reloads for loads/stores of FP registers from
1644 REG+D addresses where D does not fit in 5 or 14 bits, including
1645 (subreg (mem (addr))) cases. */
1646 if (scratch_reg
1647 && fp_reg_operand (operand0, mode)
1648 && (MEM_P (operand1)
1649 || (GET_CODE (operand1) == SUBREG
1650 && MEM_P (XEXP (operand1, 0))))
1651 && !floating_point_store_memory_operand (operand1, mode))
1653 if (GET_CODE (operand1) == SUBREG)
1654 operand1 = XEXP (operand1, 0);
1656 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1657 it in WORD_MODE regardless of what mode it was originally given
1658 to us. */
1659 scratch_reg = force_mode (word_mode, scratch_reg);
1661 /* D might not fit in 14 bits either; for such cases load D into
1662 scratch reg. */
1663 if (reg_plus_base_memory_operand (operand1, mode)
1664 && !(TARGET_PA_20
1665 && !TARGET_ELF32
1666 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1668 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1669 emit_move_insn (scratch_reg,
1670 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1671 Pmode,
1672 XEXP (XEXP (operand1, 0), 0),
1673 scratch_reg));
1675 else
1676 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1677 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1678 replace_equiv_address (operand1, scratch_reg)));
1679 return 1;
1681 else if (scratch_reg
1682 && fp_reg_operand (operand1, mode)
1683 && (MEM_P (operand0)
1684 || (GET_CODE (operand0) == SUBREG
1685 && MEM_P (XEXP (operand0, 0))))
1686 && !floating_point_store_memory_operand (operand0, mode))
1688 if (GET_CODE (operand0) == SUBREG)
1689 operand0 = XEXP (operand0, 0);
1691 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1692 it in WORD_MODE regardless of what mode it was originally given
1693 to us. */
1694 scratch_reg = force_mode (word_mode, scratch_reg);
1696 /* D might not fit in 14 bits either; for such cases load D into
1697 scratch reg. */
1698 if (reg_plus_base_memory_operand (operand0, mode)
1699 && !(TARGET_PA_20
1700 && !TARGET_ELF32
1701 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1703 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1704 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1705 0)),
1706 Pmode,
1707 XEXP (XEXP (operand0, 0),
1709 scratch_reg));
1711 else
1712 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1713 emit_insn (gen_rtx_SET (VOIDmode,
1714 replace_equiv_address (operand0, scratch_reg),
1715 operand1));
1716 return 1;
1718 /* Handle secondary reloads for loads of FP registers from constant
1719 expressions by forcing the constant into memory. For the most part,
1720 this is only necessary for SImode and DImode.
1722 Use scratch_reg to hold the address of the memory location. */
1723 else if (scratch_reg
1724 && CONSTANT_P (operand1)
1725 && fp_reg_operand (operand0, mode))
1727 rtx const_mem, xoperands[2];
1729 if (operand1 == CONST0_RTX (mode))
1731 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1732 return 1;
1735 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1736 it in WORD_MODE regardless of what mode it was originally given
1737 to us. */
1738 scratch_reg = force_mode (word_mode, scratch_reg);
1740 /* Force the constant into memory and put the address of the
1741 memory location into scratch_reg. */
1742 const_mem = force_const_mem (mode, operand1);
1743 xoperands[0] = scratch_reg;
1744 xoperands[1] = XEXP (const_mem, 0);
1745 pa_emit_move_sequence (xoperands, Pmode, 0);
1747 /* Now load the destination register. */
1748 emit_insn (gen_rtx_SET (mode, operand0,
1749 replace_equiv_address (const_mem, scratch_reg)));
1750 return 1;
1752 /* Handle secondary reloads for SAR. These occur when trying to load
1753 the SAR from memory or a constant. */
1754 else if (scratch_reg
1755 && GET_CODE (operand0) == REG
1756 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1757 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1758 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1760 /* D might not fit in 14 bits either; for such cases load D into
1761 scratch reg. */
1762 if (GET_CODE (operand1) == MEM
1763 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1765 /* We are reloading the address into the scratch register, so we
1766 want to make sure the scratch register is a full register. */
1767 scratch_reg = force_mode (word_mode, scratch_reg);
1769 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1770 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1771 0)),
1772 Pmode,
1773 XEXP (XEXP (operand1, 0),
1775 scratch_reg));
1777 /* Now we are going to load the scratch register from memory,
1778 we want to load it in the same width as the original MEM,
1779 which must be the same as the width of the ultimate destination,
1780 OPERAND0. */
1781 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1783 emit_move_insn (scratch_reg,
1784 replace_equiv_address (operand1, scratch_reg));
1786 else
1788 /* We want to load the scratch register using the same mode as
1789 the ultimate destination. */
1790 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1792 emit_move_insn (scratch_reg, operand1);
1795 /* And emit the insn to set the ultimate destination. We know that
1796 the scratch register has the same mode as the destination at this
1797 point. */
1798 emit_move_insn (operand0, scratch_reg);
1799 return 1;
1801 /* Handle the most common case: storing into a register. */
1802 else if (register_operand (operand0, mode))
1804 /* Legitimize TLS symbol references. This happens for references
1805 that aren't a legitimate constant. */
1806 if (PA_SYMBOL_REF_TLS_P (operand1))
1807 operand1 = legitimize_tls_address (operand1);
1809 if (register_operand (operand1, mode)
1810 || (GET_CODE (operand1) == CONST_INT
1811 && pa_cint_ok_for_move (INTVAL (operand1)))
1812 || (operand1 == CONST0_RTX (mode))
1813 || (GET_CODE (operand1) == HIGH
1814 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1815 /* Only `general_operands' can come here, so MEM is ok. */
1816 || GET_CODE (operand1) == MEM)
1818 /* Various sets are created during RTL generation which don't
1819 have the REG_POINTER flag correctly set. After the CSE pass,
1820 instruction recognition can fail if we don't consistently
1821 set this flag when performing register copies. This should
1822 also improve the opportunities for creating insns that use
1823 unscaled indexing. */
1824 if (REG_P (operand0) && REG_P (operand1))
1826 if (REG_POINTER (operand1)
1827 && !REG_POINTER (operand0)
1828 && !HARD_REGISTER_P (operand0))
1829 copy_reg_pointer (operand0, operand1);
1832 /* When MEMs are broken out, the REG_POINTER flag doesn't
1833 get set. In some cases, we can set the REG_POINTER flag
1834 from the declaration for the MEM. */
1835 if (REG_P (operand0)
1836 && GET_CODE (operand1) == MEM
1837 && !REG_POINTER (operand0))
1839 tree decl = MEM_EXPR (operand1);
1841 /* Set the register pointer flag and register alignment
1842 if the declaration for this memory reference is a
1843 pointer type. */
1844 if (decl)
1846 tree type;
1848 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1849 tree operand 1. */
1850 if (TREE_CODE (decl) == COMPONENT_REF)
1851 decl = TREE_OPERAND (decl, 1);
1853 type = TREE_TYPE (decl);
1854 type = strip_array_types (type);
1856 if (POINTER_TYPE_P (type))
1858 int align;
1860 type = TREE_TYPE (type);
1861 /* Using TYPE_ALIGN_OK is rather conservative as
1862 only the ada frontend actually sets it. */
1863 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1864 : BITS_PER_UNIT);
1865 mark_reg_pointer (operand0, align);
1870 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1871 return 1;
1874 else if (GET_CODE (operand0) == MEM)
1876 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1877 && !(reload_in_progress || reload_completed))
1879 rtx temp = gen_reg_rtx (DFmode);
1881 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1882 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1883 return 1;
1885 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1887 /* Run this case quickly. */
1888 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1889 return 1;
1891 if (! (reload_in_progress || reload_completed))
1893 operands[0] = validize_mem (operand0);
1894 operands[1] = operand1 = force_reg (mode, operand1);
1898 /* Simplify the source if we need to.
1899 Note we do have to handle function labels here, even though we do
1900 not consider them legitimate constants. Loop optimizations can
1901 call the emit_move_xxx with one as a source. */
1902 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1903 || (GET_CODE (operand1) == HIGH
1904 && symbolic_operand (XEXP (operand1, 0), mode))
1905 || function_label_operand (operand1, VOIDmode)
1906 || tls_referenced_p (operand1))
1908 int ishighonly = 0;
1910 if (GET_CODE (operand1) == HIGH)
1912 ishighonly = 1;
1913 operand1 = XEXP (operand1, 0);
1915 if (symbolic_operand (operand1, mode))
1917 /* Argh. The assembler and linker can't handle arithmetic
1918 involving plabels.
1920 So we force the plabel into memory, load operand0 from
1921 the memory location, then add in the constant part. */
1922 if ((GET_CODE (operand1) == CONST
1923 && GET_CODE (XEXP (operand1, 0)) == PLUS
1924 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1925 VOIDmode))
1926 || function_label_operand (operand1, VOIDmode))
1928 rtx temp, const_part;
1930 /* Figure out what (if any) scratch register to use. */
1931 if (reload_in_progress || reload_completed)
1933 scratch_reg = scratch_reg ? scratch_reg : operand0;
1934 /* SCRATCH_REG will hold an address and maybe the actual
1935 data. We want it in WORD_MODE regardless of what mode it
1936 was originally given to us. */
1937 scratch_reg = force_mode (word_mode, scratch_reg);
1939 else if (flag_pic)
1940 scratch_reg = gen_reg_rtx (Pmode);
1942 if (GET_CODE (operand1) == CONST)
1944 /* Save away the constant part of the expression. */
1945 const_part = XEXP (XEXP (operand1, 0), 1);
1946 gcc_assert (GET_CODE (const_part) == CONST_INT);
1948 /* Force the function label into memory. */
1949 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1951 else
1953 /* No constant part. */
1954 const_part = NULL_RTX;
1956 /* Force the function label into memory. */
1957 temp = force_const_mem (mode, operand1);
1961 /* Get the address of the memory location. PIC-ify it if
1962 necessary. */
1963 temp = XEXP (temp, 0);
1964 if (flag_pic)
1965 temp = legitimize_pic_address (temp, mode, scratch_reg);
1967 /* Put the address of the memory location into our destination
1968 register. */
1969 operands[1] = temp;
1970 pa_emit_move_sequence (operands, mode, scratch_reg);
1972 /* Now load from the memory location into our destination
1973 register. */
1974 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1975 pa_emit_move_sequence (operands, mode, scratch_reg);
1977 /* And add back in the constant part. */
1978 if (const_part != NULL_RTX)
1979 expand_inc (operand0, const_part);
1981 return 1;
1984 if (flag_pic)
1986 rtx temp;
1988 if (reload_in_progress || reload_completed)
1990 temp = scratch_reg ? scratch_reg : operand0;
1991 /* TEMP will hold an address and maybe the actual
1992 data. We want it in WORD_MODE regardless of what mode it
1993 was originally given to us. */
1994 temp = force_mode (word_mode, temp);
1996 else
1997 temp = gen_reg_rtx (Pmode);
1999 /* (const (plus (symbol) (const_int))) must be forced to
2000 memory during/after reload if the const_int will not fit
2001 in 14 bits. */
2002 if (GET_CODE (operand1) == CONST
2003 && GET_CODE (XEXP (operand1, 0)) == PLUS
2004 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2005 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2006 && (reload_completed || reload_in_progress)
2007 && flag_pic)
2009 rtx const_mem = force_const_mem (mode, operand1);
2010 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2011 mode, temp);
2012 operands[1] = replace_equiv_address (const_mem, operands[1]);
2013 pa_emit_move_sequence (operands, mode, temp);
2015 else
2017 operands[1] = legitimize_pic_address (operand1, mode, temp);
2018 if (REG_P (operand0) && REG_P (operands[1]))
2019 copy_reg_pointer (operand0, operands[1]);
2020 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2023 /* On the HPPA, references to data space are supposed to use dp,
2024 register 27, but showing it in the RTL inhibits various cse
2025 and loop optimizations. */
2026 else
2028 rtx temp, set;
2030 if (reload_in_progress || reload_completed)
2032 temp = scratch_reg ? scratch_reg : operand0;
2033 /* TEMP will hold an address and maybe the actual
2034 data. We want it in WORD_MODE regardless of what mode it
2035 was originally given to us. */
2036 temp = force_mode (word_mode, temp);
2038 else
2039 temp = gen_reg_rtx (mode);
2041 /* Loading a SYMBOL_REF into a register makes that register
2042 safe to be used as the base in an indexed address.
2044 Don't mark hard registers though. That loses. */
2045 if (GET_CODE (operand0) == REG
2046 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2047 mark_reg_pointer (operand0, BITS_PER_UNIT);
2048 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2049 mark_reg_pointer (temp, BITS_PER_UNIT);
2051 if (ishighonly)
2052 set = gen_rtx_SET (mode, operand0, temp);
2053 else
2054 set = gen_rtx_SET (VOIDmode,
2055 operand0,
2056 gen_rtx_LO_SUM (mode, temp, operand1));
2058 emit_insn (gen_rtx_SET (VOIDmode,
2059 temp,
2060 gen_rtx_HIGH (mode, operand1)));
2061 emit_insn (set);
2064 return 1;
2066 else if (tls_referenced_p (operand1))
2068 rtx tmp = operand1;
2069 rtx addend = NULL;
2071 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2073 addend = XEXP (XEXP (tmp, 0), 1);
2074 tmp = XEXP (XEXP (tmp, 0), 0);
2077 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2078 tmp = legitimize_tls_address (tmp);
2079 if (addend)
2081 tmp = gen_rtx_PLUS (mode, tmp, addend);
2082 tmp = force_operand (tmp, operands[0]);
2084 operands[1] = tmp;
2086 else if (GET_CODE (operand1) != CONST_INT
2087 || !pa_cint_ok_for_move (INTVAL (operand1)))
2089 rtx insn, temp;
2090 rtx op1 = operand1;
2091 HOST_WIDE_INT value = 0;
2092 HOST_WIDE_INT insv = 0;
2093 int insert = 0;
2095 if (GET_CODE (operand1) == CONST_INT)
2096 value = INTVAL (operand1);
2098 if (TARGET_64BIT
2099 && GET_CODE (operand1) == CONST_INT
2100 && HOST_BITS_PER_WIDE_INT > 32
2101 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2103 HOST_WIDE_INT nval;
2105 /* Extract the low order 32 bits of the value and sign extend.
2106 If the new value is the same as the original value, we can
2107 can use the original value as-is. If the new value is
2108 different, we use it and insert the most-significant 32-bits
2109 of the original value into the final result. */
2110 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2111 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2112 if (value != nval)
2114 #if HOST_BITS_PER_WIDE_INT > 32
2115 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2116 #endif
2117 insert = 1;
2118 value = nval;
2119 operand1 = GEN_INT (nval);
2123 if (reload_in_progress || reload_completed)
2124 temp = scratch_reg ? scratch_reg : operand0;
2125 else
2126 temp = gen_reg_rtx (mode);
2128 /* We don't directly split DImode constants on 32-bit targets
2129 because PLUS uses an 11-bit immediate and the insn sequence
2130 generated is not as efficient as the one using HIGH/LO_SUM. */
2131 if (GET_CODE (operand1) == CONST_INT
2132 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2133 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2134 && !insert)
2136 /* Directly break constant into high and low parts. This
2137 provides better optimization opportunities because various
2138 passes recognize constants split with PLUS but not LO_SUM.
2139 We use a 14-bit signed low part except when the addition
2140 of 0x4000 to the high part might change the sign of the
2141 high part. */
2142 HOST_WIDE_INT low = value & 0x3fff;
2143 HOST_WIDE_INT high = value & ~ 0x3fff;
2145 if (low >= 0x2000)
2147 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2148 high += 0x2000;
2149 else
2150 high += 0x4000;
2153 low = value - high;
2155 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2156 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2158 else
2160 emit_insn (gen_rtx_SET (VOIDmode, temp,
2161 gen_rtx_HIGH (mode, operand1)));
2162 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2165 insn = emit_move_insn (operands[0], operands[1]);
2167 /* Now insert the most significant 32 bits of the value
2168 into the register. When we don't have a second register
2169 available, it could take up to nine instructions to load
2170 a 64-bit integer constant. Prior to reload, we force
2171 constants that would take more than three instructions
2172 to load to the constant pool. During and after reload,
2173 we have to handle all possible values. */
2174 if (insert)
2176 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2177 register and the value to be inserted is outside the
2178 range that can be loaded with three depdi instructions. */
2179 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2181 operand1 = GEN_INT (insv);
2183 emit_insn (gen_rtx_SET (VOIDmode, temp,
2184 gen_rtx_HIGH (mode, operand1)));
2185 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2186 if (mode == DImode)
2187 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2188 const0_rtx, temp));
2189 else
2190 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2191 const0_rtx, temp));
2193 else
2195 int len = 5, pos = 27;
2197 /* Insert the bits using the depdi instruction. */
2198 while (pos >= 0)
2200 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2201 HOST_WIDE_INT sign = v5 < 0;
2203 /* Left extend the insertion. */
2204 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2205 while (pos > 0 && (insv & 1) == sign)
2207 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2208 len += 1;
2209 pos -= 1;
2212 if (mode == DImode)
2213 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2214 GEN_INT (pos), GEN_INT (v5)));
2215 else
2216 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2217 GEN_INT (pos), GEN_INT (v5)));
2219 len = pos > 0 && pos < 5 ? pos : 5;
2220 pos -= len;
2225 set_unique_reg_note (insn, REG_EQUAL, op1);
2227 return 1;
2230 /* Now have insn-emit do whatever it normally does. */
2231 return 0;
2234 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2235 it will need a link/runtime reloc). */
2238 pa_reloc_needed (tree exp)
2240 int reloc = 0;
2242 switch (TREE_CODE (exp))
2244 case ADDR_EXPR:
2245 return 1;
2247 case POINTER_PLUS_EXPR:
2248 case PLUS_EXPR:
2249 case MINUS_EXPR:
2250 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2251 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2252 break;
2254 CASE_CONVERT:
2255 case NON_LVALUE_EXPR:
2256 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2257 break;
2259 case CONSTRUCTOR:
2261 tree value;
2262 unsigned HOST_WIDE_INT ix;
2264 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2265 if (value)
2266 reloc |= pa_reloc_needed (value);
2268 break;
2270 case ERROR_MARK:
2271 break;
2273 default:
2274 break;
2276 return reloc;
2280 /* Return the best assembler insn template
2281 for moving operands[1] into operands[0] as a fullword. */
2282 const char *
2283 pa_singlemove_string (rtx *operands)
2285 HOST_WIDE_INT intval;
2287 if (GET_CODE (operands[0]) == MEM)
2288 return "stw %r1,%0";
2289 if (GET_CODE (operands[1]) == MEM)
2290 return "ldw %1,%0";
2291 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2293 long i;
2294 REAL_VALUE_TYPE d;
2296 gcc_assert (GET_MODE (operands[1]) == SFmode);
2298 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2299 bit pattern. */
2300 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2301 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2303 operands[1] = GEN_INT (i);
2304 /* Fall through to CONST_INT case. */
2306 if (GET_CODE (operands[1]) == CONST_INT)
2308 intval = INTVAL (operands[1]);
2310 if (VAL_14_BITS_P (intval))
2311 return "ldi %1,%0";
2312 else if ((intval & 0x7ff) == 0)
2313 return "ldil L'%1,%0";
2314 else if (pa_zdepi_cint_p (intval))
2315 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2316 else
2317 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2319 return "copy %1,%0";
2323 /* Compute position (in OP[1]) and width (in OP[2])
2324 useful for copying IMM to a register using the zdepi
2325 instructions. Store the immediate value to insert in OP[0]. */
2326 static void
2327 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2329 int lsb, len;
2331 /* Find the least significant set bit in IMM. */
2332 for (lsb = 0; lsb < 32; lsb++)
2334 if ((imm & 1) != 0)
2335 break;
2336 imm >>= 1;
2339 /* Choose variants based on *sign* of the 5-bit field. */
2340 if ((imm & 0x10) == 0)
2341 len = (lsb <= 28) ? 4 : 32 - lsb;
2342 else
2344 /* Find the width of the bitstring in IMM. */
2345 for (len = 5; len < 32 - lsb; len++)
2347 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2348 break;
2351 /* Sign extend IMM as a 5-bit value. */
2352 imm = (imm & 0xf) - 0x10;
2355 op[0] = imm;
2356 op[1] = 31 - lsb;
2357 op[2] = len;
2360 /* Compute position (in OP[1]) and width (in OP[2])
2361 useful for copying IMM to a register using the depdi,z
2362 instructions. Store the immediate value to insert in OP[0]. */
2364 static void
2365 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2367 int lsb, len, maxlen;
2369 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2371 /* Find the least significant set bit in IMM. */
2372 for (lsb = 0; lsb < maxlen; lsb++)
2374 if ((imm & 1) != 0)
2375 break;
2376 imm >>= 1;
2379 /* Choose variants based on *sign* of the 5-bit field. */
2380 if ((imm & 0x10) == 0)
2381 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2382 else
2384 /* Find the width of the bitstring in IMM. */
2385 for (len = 5; len < maxlen - lsb; len++)
2387 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2388 break;
2391 /* Extend length if host is narrow and IMM is negative. */
2392 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2393 len += 32;
2395 /* Sign extend IMM as a 5-bit value. */
2396 imm = (imm & 0xf) - 0x10;
2399 op[0] = imm;
2400 op[1] = 63 - lsb;
2401 op[2] = len;
2404 /* Output assembler code to perform a doubleword move insn
2405 with operands OPERANDS. */
2407 const char *
2408 pa_output_move_double (rtx *operands)
2410 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2411 rtx latehalf[2];
2412 rtx addreg0 = 0, addreg1 = 0;
2414 /* First classify both operands. */
2416 if (REG_P (operands[0]))
2417 optype0 = REGOP;
2418 else if (offsettable_memref_p (operands[0]))
2419 optype0 = OFFSOP;
2420 else if (GET_CODE (operands[0]) == MEM)
2421 optype0 = MEMOP;
2422 else
2423 optype0 = RNDOP;
2425 if (REG_P (operands[1]))
2426 optype1 = REGOP;
2427 else if (CONSTANT_P (operands[1]))
2428 optype1 = CNSTOP;
2429 else if (offsettable_memref_p (operands[1]))
2430 optype1 = OFFSOP;
2431 else if (GET_CODE (operands[1]) == MEM)
2432 optype1 = MEMOP;
2433 else
2434 optype1 = RNDOP;
2436 /* Check for the cases that the operand constraints are not
2437 supposed to allow to happen. */
2438 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2440 /* Handle copies between general and floating registers. */
2442 if (optype0 == REGOP && optype1 == REGOP
2443 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2445 if (FP_REG_P (operands[0]))
2447 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2448 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2449 return "{fldds|fldd} -16(%%sp),%0";
2451 else
2453 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2454 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2455 return "{ldws|ldw} -12(%%sp),%R0";
2459 /* Handle auto decrementing and incrementing loads and stores
2460 specifically, since the structure of the function doesn't work
2461 for them without major modification. Do it better when we learn
2462 this port about the general inc/dec addressing of PA.
2463 (This was written by tege. Chide him if it doesn't work.) */
2465 if (optype0 == MEMOP)
2467 /* We have to output the address syntax ourselves, since print_operand
2468 doesn't deal with the addresses we want to use. Fix this later. */
2470 rtx addr = XEXP (operands[0], 0);
2471 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2473 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2475 operands[0] = XEXP (addr, 0);
2476 gcc_assert (GET_CODE (operands[1]) == REG
2477 && GET_CODE (operands[0]) == REG);
2479 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2481 /* No overlap between high target register and address
2482 register. (We do this in a non-obvious way to
2483 save a register file writeback) */
2484 if (GET_CODE (addr) == POST_INC)
2485 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2486 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2488 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2490 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2492 operands[0] = XEXP (addr, 0);
2493 gcc_assert (GET_CODE (operands[1]) == REG
2494 && GET_CODE (operands[0]) == REG);
2496 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2497 /* No overlap between high target register and address
2498 register. (We do this in a non-obvious way to save a
2499 register file writeback) */
2500 if (GET_CODE (addr) == PRE_INC)
2501 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2502 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2505 if (optype1 == MEMOP)
2507 /* We have to output the address syntax ourselves, since print_operand
2508 doesn't deal with the addresses we want to use. Fix this later. */
2510 rtx addr = XEXP (operands[1], 0);
2511 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2513 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2515 operands[1] = XEXP (addr, 0);
2516 gcc_assert (GET_CODE (operands[0]) == REG
2517 && GET_CODE (operands[1]) == REG);
2519 if (!reg_overlap_mentioned_p (high_reg, addr))
2521 /* No overlap between high target register and address
2522 register. (We do this in a non-obvious way to
2523 save a register file writeback) */
2524 if (GET_CODE (addr) == POST_INC)
2525 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2526 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2528 else
2530 /* This is an undefined situation. We should load into the
2531 address register *and* update that register. Probably
2532 we don't need to handle this at all. */
2533 if (GET_CODE (addr) == POST_INC)
2534 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2535 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2538 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2540 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2542 operands[1] = XEXP (addr, 0);
2543 gcc_assert (GET_CODE (operands[0]) == REG
2544 && GET_CODE (operands[1]) == REG);
2546 if (!reg_overlap_mentioned_p (high_reg, addr))
2548 /* No overlap between high target register and address
2549 register. (We do this in a non-obvious way to
2550 save a register file writeback) */
2551 if (GET_CODE (addr) == PRE_INC)
2552 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2553 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2555 else
2557 /* This is an undefined situation. We should load into the
2558 address register *and* update that register. Probably
2559 we don't need to handle this at all. */
2560 if (GET_CODE (addr) == PRE_INC)
2561 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2562 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2565 else if (GET_CODE (addr) == PLUS
2566 && GET_CODE (XEXP (addr, 0)) == MULT)
2568 rtx xoperands[4];
2569 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2571 if (!reg_overlap_mentioned_p (high_reg, addr))
2573 xoperands[0] = high_reg;
2574 xoperands[1] = XEXP (addr, 1);
2575 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2576 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2577 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2578 xoperands);
2579 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2581 else
2583 xoperands[0] = high_reg;
2584 xoperands[1] = XEXP (addr, 1);
2585 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2586 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2587 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2588 xoperands);
2589 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2594 /* If an operand is an unoffsettable memory ref, find a register
2595 we can increment temporarily to make it refer to the second word. */
2597 if (optype0 == MEMOP)
2598 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2600 if (optype1 == MEMOP)
2601 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2603 /* Ok, we can do one word at a time.
2604 Normally we do the low-numbered word first.
2606 In either case, set up in LATEHALF the operands to use
2607 for the high-numbered word and in some cases alter the
2608 operands in OPERANDS to be suitable for the low-numbered word. */
2610 if (optype0 == REGOP)
2611 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2612 else if (optype0 == OFFSOP)
2613 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2614 else
2615 latehalf[0] = operands[0];
2617 if (optype1 == REGOP)
2618 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2619 else if (optype1 == OFFSOP)
2620 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2621 else if (optype1 == CNSTOP)
2622 split_double (operands[1], &operands[1], &latehalf[1]);
2623 else
2624 latehalf[1] = operands[1];
2626 /* If the first move would clobber the source of the second one,
2627 do them in the other order.
2629 This can happen in two cases:
2631 mem -> register where the first half of the destination register
2632 is the same register used in the memory's address. Reload
2633 can create such insns.
2635 mem in this case will be either register indirect or register
2636 indirect plus a valid offset.
2638 register -> register move where REGNO(dst) == REGNO(src + 1)
2639 someone (Tim/Tege?) claimed this can happen for parameter loads.
2641 Handle mem -> register case first. */
2642 if (optype0 == REGOP
2643 && (optype1 == MEMOP || optype1 == OFFSOP)
2644 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2645 operands[1], 0))
2647 /* Do the late half first. */
2648 if (addreg1)
2649 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2650 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2652 /* Then clobber. */
2653 if (addreg1)
2654 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2655 return pa_singlemove_string (operands);
2658 /* Now handle register -> register case. */
2659 if (optype0 == REGOP && optype1 == REGOP
2660 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2662 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2663 return pa_singlemove_string (operands);
2666 /* Normal case: do the two words, low-numbered first. */
2668 output_asm_insn (pa_singlemove_string (operands), operands);
2670 /* Make any unoffsettable addresses point at high-numbered word. */
2671 if (addreg0)
2672 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2673 if (addreg1)
2674 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2676 /* Do that word. */
2677 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2679 /* Undo the adds we just did. */
2680 if (addreg0)
2681 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2682 if (addreg1)
2683 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2685 return "";
2688 const char *
2689 pa_output_fp_move_double (rtx *operands)
2691 if (FP_REG_P (operands[0]))
2693 if (FP_REG_P (operands[1])
2694 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2695 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2696 else
2697 output_asm_insn ("fldd%F1 %1,%0", operands);
2699 else if (FP_REG_P (operands[1]))
2701 output_asm_insn ("fstd%F0 %1,%0", operands);
2703 else
2705 rtx xoperands[2];
2707 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2709 /* This is a pain. You have to be prepared to deal with an
2710 arbitrary address here including pre/post increment/decrement.
2712 so avoid this in the MD. */
2713 gcc_assert (GET_CODE (operands[0]) == REG);
2715 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2716 xoperands[0] = operands[0];
2717 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2719 return "";
2722 /* Return a REG that occurs in ADDR with coefficient 1.
2723 ADDR can be effectively incremented by incrementing REG. */
2725 static rtx
2726 find_addr_reg (rtx addr)
2728 while (GET_CODE (addr) == PLUS)
2730 if (GET_CODE (XEXP (addr, 0)) == REG)
2731 addr = XEXP (addr, 0);
2732 else if (GET_CODE (XEXP (addr, 1)) == REG)
2733 addr = XEXP (addr, 1);
2734 else if (CONSTANT_P (XEXP (addr, 0)))
2735 addr = XEXP (addr, 1);
2736 else if (CONSTANT_P (XEXP (addr, 1)))
2737 addr = XEXP (addr, 0);
2738 else
2739 gcc_unreachable ();
2741 gcc_assert (GET_CODE (addr) == REG);
2742 return addr;
2745 /* Emit code to perform a block move.
2747 OPERANDS[0] is the destination pointer as a REG, clobbered.
2748 OPERANDS[1] is the source pointer as a REG, clobbered.
2749 OPERANDS[2] is a register for temporary storage.
2750 OPERANDS[3] is a register for temporary storage.
2751 OPERANDS[4] is the size as a CONST_INT
2752 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2753 OPERANDS[6] is another temporary register. */
2755 const char *
2756 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2758 int align = INTVAL (operands[5]);
2759 unsigned long n_bytes = INTVAL (operands[4]);
2761 /* We can't move more than a word at a time because the PA
2762 has no longer integer move insns. (Could use fp mem ops?) */
2763 if (align > (TARGET_64BIT ? 8 : 4))
2764 align = (TARGET_64BIT ? 8 : 4);
2766 /* Note that we know each loop below will execute at least twice
2767 (else we would have open-coded the copy). */
2768 switch (align)
2770 case 8:
2771 /* Pre-adjust the loop counter. */
2772 operands[4] = GEN_INT (n_bytes - 16);
2773 output_asm_insn ("ldi %4,%2", operands);
2775 /* Copying loop. */
2776 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2777 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2778 output_asm_insn ("std,ma %3,8(%0)", operands);
2779 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2780 output_asm_insn ("std,ma %6,8(%0)", operands);
2782 /* Handle the residual. There could be up to 7 bytes of
2783 residual to copy! */
2784 if (n_bytes % 16 != 0)
2786 operands[4] = GEN_INT (n_bytes % 8);
2787 if (n_bytes % 16 >= 8)
2788 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2789 if (n_bytes % 8 != 0)
2790 output_asm_insn ("ldd 0(%1),%6", operands);
2791 if (n_bytes % 16 >= 8)
2792 output_asm_insn ("std,ma %3,8(%0)", operands);
2793 if (n_bytes % 8 != 0)
2794 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2796 return "";
2798 case 4:
2799 /* Pre-adjust the loop counter. */
2800 operands[4] = GEN_INT (n_bytes - 8);
2801 output_asm_insn ("ldi %4,%2", operands);
2803 /* Copying loop. */
2804 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2805 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2806 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2807 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2808 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2810 /* Handle the residual. There could be up to 7 bytes of
2811 residual to copy! */
2812 if (n_bytes % 8 != 0)
2814 operands[4] = GEN_INT (n_bytes % 4);
2815 if (n_bytes % 8 >= 4)
2816 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2817 if (n_bytes % 4 != 0)
2818 output_asm_insn ("ldw 0(%1),%6", operands);
2819 if (n_bytes % 8 >= 4)
2820 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2821 if (n_bytes % 4 != 0)
2822 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2824 return "";
2826 case 2:
2827 /* Pre-adjust the loop counter. */
2828 operands[4] = GEN_INT (n_bytes - 4);
2829 output_asm_insn ("ldi %4,%2", operands);
2831 /* Copying loop. */
2832 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2833 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2834 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2835 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2836 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2838 /* Handle the residual. */
2839 if (n_bytes % 4 != 0)
2841 if (n_bytes % 4 >= 2)
2842 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2843 if (n_bytes % 2 != 0)
2844 output_asm_insn ("ldb 0(%1),%6", operands);
2845 if (n_bytes % 4 >= 2)
2846 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2847 if (n_bytes % 2 != 0)
2848 output_asm_insn ("stb %6,0(%0)", operands);
2850 return "";
2852 case 1:
2853 /* Pre-adjust the loop counter. */
2854 operands[4] = GEN_INT (n_bytes - 2);
2855 output_asm_insn ("ldi %4,%2", operands);
2857 /* Copying loop. */
2858 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2859 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2860 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2861 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2862 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2864 /* Handle the residual. */
2865 if (n_bytes % 2 != 0)
2867 output_asm_insn ("ldb 0(%1),%3", operands);
2868 output_asm_insn ("stb %3,0(%0)", operands);
2870 return "";
2872 default:
2873 gcc_unreachable ();
2877 /* Count the number of insns necessary to handle this block move.
2879 Basic structure is the same as emit_block_move, except that we
2880 count insns rather than emit them. */
2882 static int
2883 compute_movmem_length (rtx insn)
2885 rtx pat = PATTERN (insn);
2886 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2887 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2888 unsigned int n_insns = 0;
2890 /* We can't move more than four bytes at a time because the PA
2891 has no longer integer move insns. (Could use fp mem ops?) */
2892 if (align > (TARGET_64BIT ? 8 : 4))
2893 align = (TARGET_64BIT ? 8 : 4);
2895 /* The basic copying loop. */
2896 n_insns = 6;
2898 /* Residuals. */
2899 if (n_bytes % (2 * align) != 0)
2901 if ((n_bytes % (2 * align)) >= align)
2902 n_insns += 2;
2904 if ((n_bytes % align) != 0)
2905 n_insns += 2;
2908 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2909 return n_insns * 4;
2912 /* Emit code to perform a block clear.
2914 OPERANDS[0] is the destination pointer as a REG, clobbered.
2915 OPERANDS[1] is a register for temporary storage.
2916 OPERANDS[2] is the size as a CONST_INT
2917 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2919 const char *
2920 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2922 int align = INTVAL (operands[3]);
2923 unsigned long n_bytes = INTVAL (operands[2]);
2925 /* We can't clear more than a word at a time because the PA
2926 has no longer integer move insns. */
2927 if (align > (TARGET_64BIT ? 8 : 4))
2928 align = (TARGET_64BIT ? 8 : 4);
2930 /* Note that we know each loop below will execute at least twice
2931 (else we would have open-coded the copy). */
2932 switch (align)
2934 case 8:
2935 /* Pre-adjust the loop counter. */
2936 operands[2] = GEN_INT (n_bytes - 16);
2937 output_asm_insn ("ldi %2,%1", operands);
2939 /* Loop. */
2940 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2941 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2942 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2944 /* Handle the residual. There could be up to 7 bytes of
2945 residual to copy! */
2946 if (n_bytes % 16 != 0)
2948 operands[2] = GEN_INT (n_bytes % 8);
2949 if (n_bytes % 16 >= 8)
2950 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2951 if (n_bytes % 8 != 0)
2952 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2954 return "";
2956 case 4:
2957 /* Pre-adjust the loop counter. */
2958 operands[2] = GEN_INT (n_bytes - 8);
2959 output_asm_insn ("ldi %2,%1", operands);
2961 /* Loop. */
2962 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2963 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2964 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2966 /* Handle the residual. There could be up to 7 bytes of
2967 residual to copy! */
2968 if (n_bytes % 8 != 0)
2970 operands[2] = GEN_INT (n_bytes % 4);
2971 if (n_bytes % 8 >= 4)
2972 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2973 if (n_bytes % 4 != 0)
2974 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2976 return "";
2978 case 2:
2979 /* Pre-adjust the loop counter. */
2980 operands[2] = GEN_INT (n_bytes - 4);
2981 output_asm_insn ("ldi %2,%1", operands);
2983 /* Loop. */
2984 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2985 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2986 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2988 /* Handle the residual. */
2989 if (n_bytes % 4 != 0)
2991 if (n_bytes % 4 >= 2)
2992 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2993 if (n_bytes % 2 != 0)
2994 output_asm_insn ("stb %%r0,0(%0)", operands);
2996 return "";
2998 case 1:
2999 /* Pre-adjust the loop counter. */
3000 operands[2] = GEN_INT (n_bytes - 2);
3001 output_asm_insn ("ldi %2,%1", operands);
3003 /* Loop. */
3004 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3005 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3006 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3008 /* Handle the residual. */
3009 if (n_bytes % 2 != 0)
3010 output_asm_insn ("stb %%r0,0(%0)", operands);
3012 return "";
3014 default:
3015 gcc_unreachable ();
3019 /* Count the number of insns necessary to handle this block move.
3021 Basic structure is the same as emit_block_move, except that we
3022 count insns rather than emit them. */
3024 static int
3025 compute_clrmem_length (rtx insn)
3027 rtx pat = PATTERN (insn);
3028 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3029 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3030 unsigned int n_insns = 0;
3032 /* We can't clear more than a word at a time because the PA
3033 has no longer integer move insns. */
3034 if (align > (TARGET_64BIT ? 8 : 4))
3035 align = (TARGET_64BIT ? 8 : 4);
3037 /* The basic loop. */
3038 n_insns = 4;
3040 /* Residuals. */
3041 if (n_bytes % (2 * align) != 0)
3043 if ((n_bytes % (2 * align)) >= align)
3044 n_insns++;
3046 if ((n_bytes % align) != 0)
3047 n_insns++;
3050 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3051 return n_insns * 4;
3055 const char *
3056 pa_output_and (rtx *operands)
3058 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3060 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3061 int ls0, ls1, ms0, p, len;
3063 for (ls0 = 0; ls0 < 32; ls0++)
3064 if ((mask & (1 << ls0)) == 0)
3065 break;
3067 for (ls1 = ls0; ls1 < 32; ls1++)
3068 if ((mask & (1 << ls1)) != 0)
3069 break;
3071 for (ms0 = ls1; ms0 < 32; ms0++)
3072 if ((mask & (1 << ms0)) == 0)
3073 break;
3075 gcc_assert (ms0 == 32);
3077 if (ls1 == 32)
3079 len = ls0;
3081 gcc_assert (len);
3083 operands[2] = GEN_INT (len);
3084 return "{extru|extrw,u} %1,31,%2,%0";
3086 else
3088 /* We could use this `depi' for the case above as well, but `depi'
3089 requires one more register file access than an `extru'. */
3091 p = 31 - ls0;
3092 len = ls1 - ls0;
3094 operands[2] = GEN_INT (p);
3095 operands[3] = GEN_INT (len);
3096 return "{depi|depwi} 0,%2,%3,%0";
3099 else
3100 return "and %1,%2,%0";
3103 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3104 storing the result in operands[0]. */
3105 const char *
3106 pa_output_64bit_and (rtx *operands)
3108 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3110 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3111 int ls0, ls1, ms0, p, len;
3113 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3114 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3115 break;
3117 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3118 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3119 break;
3121 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3122 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3123 break;
3125 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3127 if (ls1 == HOST_BITS_PER_WIDE_INT)
3129 len = ls0;
3131 gcc_assert (len);
3133 operands[2] = GEN_INT (len);
3134 return "extrd,u %1,63,%2,%0";
3136 else
3138 /* We could use this `depi' for the case above as well, but `depi'
3139 requires one more register file access than an `extru'. */
3141 p = 63 - ls0;
3142 len = ls1 - ls0;
3144 operands[2] = GEN_INT (p);
3145 operands[3] = GEN_INT (len);
3146 return "depdi 0,%2,%3,%0";
3149 else
3150 return "and %1,%2,%0";
3153 const char *
3154 pa_output_ior (rtx *operands)
3156 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3157 int bs0, bs1, p, len;
3159 if (INTVAL (operands[2]) == 0)
3160 return "copy %1,%0";
3162 for (bs0 = 0; bs0 < 32; bs0++)
3163 if ((mask & (1 << bs0)) != 0)
3164 break;
3166 for (bs1 = bs0; bs1 < 32; bs1++)
3167 if ((mask & (1 << bs1)) == 0)
3168 break;
3170 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3172 p = 31 - bs0;
3173 len = bs1 - bs0;
3175 operands[2] = GEN_INT (p);
3176 operands[3] = GEN_INT (len);
3177 return "{depi|depwi} -1,%2,%3,%0";
3180 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3181 storing the result in operands[0]. */
3182 const char *
3183 pa_output_64bit_ior (rtx *operands)
3185 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3186 int bs0, bs1, p, len;
3188 if (INTVAL (operands[2]) == 0)
3189 return "copy %1,%0";
3191 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3192 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3193 break;
3195 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3196 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3197 break;
3199 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3200 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3202 p = 63 - bs0;
3203 len = bs1 - bs0;
3205 operands[2] = GEN_INT (p);
3206 operands[3] = GEN_INT (len);
3207 return "depdi -1,%2,%3,%0";
3210 /* Target hook for assembling integer objects. This code handles
3211 aligned SI and DI integers specially since function references
3212 must be preceded by P%. */
3214 static bool
3215 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3217 if (size == UNITS_PER_WORD
3218 && aligned_p
3219 && function_label_operand (x, VOIDmode))
3221 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3223 /* We don't want an OPD when generating fast indirect calls. */
3224 if (!TARGET_FAST_INDIRECT_CALLS)
3225 fputs ("P%", asm_out_file);
3227 output_addr_const (asm_out_file, x);
3228 fputc ('\n', asm_out_file);
3229 return true;
3231 return default_assemble_integer (x, size, aligned_p);
3234 /* Output an ascii string. */
3235 void
3236 pa_output_ascii (FILE *file, const char *p, int size)
3238 int i;
3239 int chars_output;
3240 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3242 /* The HP assembler can only take strings of 256 characters at one
3243 time. This is a limitation on input line length, *not* the
3244 length of the string. Sigh. Even worse, it seems that the
3245 restriction is in number of input characters (see \xnn &
3246 \whatever). So we have to do this very carefully. */
3248 fputs ("\t.STRING \"", file);
3250 chars_output = 0;
3251 for (i = 0; i < size; i += 4)
3253 int co = 0;
3254 int io = 0;
3255 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3257 register unsigned int c = (unsigned char) p[i + io];
3259 if (c == '\"' || c == '\\')
3260 partial_output[co++] = '\\';
3261 if (c >= ' ' && c < 0177)
3262 partial_output[co++] = c;
3263 else
3265 unsigned int hexd;
3266 partial_output[co++] = '\\';
3267 partial_output[co++] = 'x';
3268 hexd = c / 16 - 0 + '0';
3269 if (hexd > '9')
3270 hexd -= '9' - 'a' + 1;
3271 partial_output[co++] = hexd;
3272 hexd = c % 16 - 0 + '0';
3273 if (hexd > '9')
3274 hexd -= '9' - 'a' + 1;
3275 partial_output[co++] = hexd;
3278 if (chars_output + co > 243)
3280 fputs ("\"\n\t.STRING \"", file);
3281 chars_output = 0;
3283 fwrite (partial_output, 1, (size_t) co, file);
3284 chars_output += co;
3285 co = 0;
3287 fputs ("\"\n", file);
3290 /* Try to rewrite floating point comparisons & branches to avoid
3291 useless add,tr insns.
3293 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3294 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3295 first attempt to remove useless add,tr insns. It is zero
3296 for the second pass as reorg sometimes leaves bogus REG_DEAD
3297 notes lying around.
3299 When CHECK_NOTES is zero we can only eliminate add,tr insns
3300 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3301 instructions. */
3302 static void
3303 remove_useless_addtr_insns (int check_notes)
3305 rtx_insn *insn;
3306 static int pass = 0;
3308 /* This is fairly cheap, so always run it when optimizing. */
3309 if (optimize > 0)
3311 int fcmp_count = 0;
3312 int fbranch_count = 0;
3314 /* Walk all the insns in this function looking for fcmp & fbranch
3315 instructions. Keep track of how many of each we find. */
3316 for (insn = get_insns (); insn; insn = next_insn (insn))
3318 rtx tmp;
3320 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3321 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3322 continue;
3324 tmp = PATTERN (insn);
3326 /* It must be a set. */
3327 if (GET_CODE (tmp) != SET)
3328 continue;
3330 /* If the destination is CCFP, then we've found an fcmp insn. */
3331 tmp = SET_DEST (tmp);
3332 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3334 fcmp_count++;
3335 continue;
3338 tmp = PATTERN (insn);
3339 /* If this is an fbranch instruction, bump the fbranch counter. */
3340 if (GET_CODE (tmp) == SET
3341 && SET_DEST (tmp) == pc_rtx
3342 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3343 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3344 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3345 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3347 fbranch_count++;
3348 continue;
3353 /* Find all floating point compare + branch insns. If possible,
3354 reverse the comparison & the branch to avoid add,tr insns. */
3355 for (insn = get_insns (); insn; insn = next_insn (insn))
3357 rtx tmp;
3358 rtx_insn *next;
3360 /* Ignore anything that isn't an INSN. */
3361 if (! NONJUMP_INSN_P (insn))
3362 continue;
3364 tmp = PATTERN (insn);
3366 /* It must be a set. */
3367 if (GET_CODE (tmp) != SET)
3368 continue;
3370 /* The destination must be CCFP, which is register zero. */
3371 tmp = SET_DEST (tmp);
3372 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3373 continue;
3375 /* INSN should be a set of CCFP.
3377 See if the result of this insn is used in a reversed FP
3378 conditional branch. If so, reverse our condition and
3379 the branch. Doing so avoids useless add,tr insns. */
3380 next = next_insn (insn);
3381 while (next)
3383 /* Jumps, calls and labels stop our search. */
3384 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3385 break;
3387 /* As does another fcmp insn. */
3388 if (NONJUMP_INSN_P (next)
3389 && GET_CODE (PATTERN (next)) == SET
3390 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3391 && REGNO (SET_DEST (PATTERN (next))) == 0)
3392 break;
3394 next = next_insn (next);
3397 /* Is NEXT_INSN a branch? */
3398 if (next && JUMP_P (next))
3400 rtx pattern = PATTERN (next);
3402 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3403 and CCFP dies, then reverse our conditional and the branch
3404 to avoid the add,tr. */
3405 if (GET_CODE (pattern) == SET
3406 && SET_DEST (pattern) == pc_rtx
3407 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3408 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3409 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3410 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3411 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3412 && (fcmp_count == fbranch_count
3413 || (check_notes
3414 && find_regno_note (next, REG_DEAD, 0))))
3416 /* Reverse the branch. */
3417 tmp = XEXP (SET_SRC (pattern), 1);
3418 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3419 XEXP (SET_SRC (pattern), 2) = tmp;
3420 INSN_CODE (next) = -1;
3422 /* Reverse our condition. */
3423 tmp = PATTERN (insn);
3424 PUT_CODE (XEXP (tmp, 1),
3425 (reverse_condition_maybe_unordered
3426 (GET_CODE (XEXP (tmp, 1)))));
3432 pass = !pass;
3436 /* You may have trouble believing this, but this is the 32 bit HP-PA
3437 stack layout. Wow.
3439 Offset Contents
3441 Variable arguments (optional; any number may be allocated)
3443 SP-(4*(N+9)) arg word N
3445 SP-56 arg word 5
3446 SP-52 arg word 4
3448 Fixed arguments (must be allocated; may remain unused)
3450 SP-48 arg word 3
3451 SP-44 arg word 2
3452 SP-40 arg word 1
3453 SP-36 arg word 0
3455 Frame Marker
3457 SP-32 External Data Pointer (DP)
3458 SP-28 External sr4
3459 SP-24 External/stub RP (RP')
3460 SP-20 Current RP
3461 SP-16 Static Link
3462 SP-12 Clean up
3463 SP-8 Calling Stub RP (RP'')
3464 SP-4 Previous SP
3466 Top of Frame
3468 SP-0 Stack Pointer (points to next available address)
3472 /* This function saves registers as follows. Registers marked with ' are
3473 this function's registers (as opposed to the previous function's).
3474 If a frame_pointer isn't needed, r4 is saved as a general register;
3475 the space for the frame pointer is still allocated, though, to keep
3476 things simple.
3479 Top of Frame
3481 SP (FP') Previous FP
3482 SP + 4 Alignment filler (sigh)
3483 SP + 8 Space for locals reserved here.
3487 SP + n All call saved register used.
3491 SP + o All call saved fp registers used.
3495 SP + p (SP') points to next available address.
3499 /* Global variables set by output_function_prologue(). */
3500 /* Size of frame. Need to know this to emit return insns from
3501 leaf procedures. */
3502 static HOST_WIDE_INT actual_fsize, local_fsize;
3503 static int save_fregs;
3505 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3506 Handle case where DISP > 8k by using the add_high_const patterns.
3508 Note in DISP > 8k case, we will leave the high part of the address
3509 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3511 static void
3512 store_reg (int reg, HOST_WIDE_INT disp, int base)
3514 rtx insn, dest, src, basereg;
3516 src = gen_rtx_REG (word_mode, reg);
3517 basereg = gen_rtx_REG (Pmode, base);
3518 if (VAL_14_BITS_P (disp))
3520 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3521 insn = emit_move_insn (dest, src);
3523 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3525 rtx delta = GEN_INT (disp);
3526 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3528 emit_move_insn (tmpreg, delta);
3529 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3530 if (DO_FRAME_NOTES)
3532 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3533 gen_rtx_SET (VOIDmode, tmpreg,
3534 gen_rtx_PLUS (Pmode, basereg, delta)));
3535 RTX_FRAME_RELATED_P (insn) = 1;
3537 dest = gen_rtx_MEM (word_mode, tmpreg);
3538 insn = emit_move_insn (dest, src);
3540 else
3542 rtx delta = GEN_INT (disp);
3543 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3544 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3546 emit_move_insn (tmpreg, high);
3547 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3548 insn = emit_move_insn (dest, src);
3549 if (DO_FRAME_NOTES)
3550 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3551 gen_rtx_SET (VOIDmode,
3552 gen_rtx_MEM (word_mode,
3553 gen_rtx_PLUS (word_mode,
3554 basereg,
3555 delta)),
3556 src));
3559 if (DO_FRAME_NOTES)
3560 RTX_FRAME_RELATED_P (insn) = 1;
3563 /* Emit RTL to store REG at the memory location specified by BASE and then
3564 add MOD to BASE. MOD must be <= 8k. */
3566 static void
3567 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3569 rtx insn, basereg, srcreg, delta;
3571 gcc_assert (VAL_14_BITS_P (mod));
3573 basereg = gen_rtx_REG (Pmode, base);
3574 srcreg = gen_rtx_REG (word_mode, reg);
3575 delta = GEN_INT (mod);
3577 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3578 if (DO_FRAME_NOTES)
3580 RTX_FRAME_RELATED_P (insn) = 1;
3582 /* RTX_FRAME_RELATED_P must be set on each frame related set
3583 in a parallel with more than one element. */
3584 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3585 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3589 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3590 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3591 whether to add a frame note or not.
3593 In the DISP > 8k case, we leave the high part of the address in %r1.
3594 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3596 static void
3597 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3599 rtx insn;
3601 if (VAL_14_BITS_P (disp))
3603 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3604 plus_constant (Pmode,
3605 gen_rtx_REG (Pmode, base), disp));
3607 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3609 rtx basereg = gen_rtx_REG (Pmode, base);
3610 rtx delta = GEN_INT (disp);
3611 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3613 emit_move_insn (tmpreg, delta);
3614 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3615 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3616 if (DO_FRAME_NOTES)
3617 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3618 gen_rtx_SET (VOIDmode, tmpreg,
3619 gen_rtx_PLUS (Pmode, basereg, delta)));
3621 else
3623 rtx basereg = gen_rtx_REG (Pmode, base);
3624 rtx delta = GEN_INT (disp);
3625 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3627 emit_move_insn (tmpreg,
3628 gen_rtx_PLUS (Pmode, basereg,
3629 gen_rtx_HIGH (Pmode, delta)));
3630 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3631 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3634 if (DO_FRAME_NOTES && note)
3635 RTX_FRAME_RELATED_P (insn) = 1;
3638 HOST_WIDE_INT
3639 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3641 int freg_saved = 0;
3642 int i, j;
3644 /* The code in pa_expand_prologue and pa_expand_epilogue must
3645 be consistent with the rounding and size calculation done here.
3646 Change them at the same time. */
3648 /* We do our own stack alignment. First, round the size of the
3649 stack locals up to a word boundary. */
3650 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3652 /* Space for previous frame pointer + filler. If any frame is
3653 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3654 waste some space here for the sake of HP compatibility. The
3655 first slot is only used when the frame pointer is needed. */
3656 if (size || frame_pointer_needed)
3657 size += STARTING_FRAME_OFFSET;
3659 /* If the current function calls __builtin_eh_return, then we need
3660 to allocate stack space for registers that will hold data for
3661 the exception handler. */
3662 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3664 unsigned int i;
3666 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3667 continue;
3668 size += i * UNITS_PER_WORD;
3671 /* Account for space used by the callee general register saves. */
3672 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3673 if (df_regs_ever_live_p (i))
3674 size += UNITS_PER_WORD;
3676 /* Account for space used by the callee floating point register saves. */
3677 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3678 if (df_regs_ever_live_p (i)
3679 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3681 freg_saved = 1;
3683 /* We always save both halves of the FP register, so always
3684 increment the frame size by 8 bytes. */
3685 size += 8;
3688 /* If any of the floating registers are saved, account for the
3689 alignment needed for the floating point register save block. */
3690 if (freg_saved)
3692 size = (size + 7) & ~7;
3693 if (fregs_live)
3694 *fregs_live = 1;
3697 /* The various ABIs include space for the outgoing parameters in the
3698 size of the current function's stack frame. We don't need to align
3699 for the outgoing arguments as their alignment is set by the final
3700 rounding for the frame as a whole. */
3701 size += crtl->outgoing_args_size;
3703 /* Allocate space for the fixed frame marker. This space must be
3704 allocated for any function that makes calls or allocates
3705 stack space. */
3706 if (!crtl->is_leaf || size)
3707 size += TARGET_64BIT ? 48 : 32;
3709 /* Finally, round to the preferred stack boundary. */
3710 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3711 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3714 /* Generate the assembly code for function entry. FILE is a stdio
3715 stream to output the code to. SIZE is an int: how many units of
3716 temporary storage to allocate.
3718 Refer to the array `regs_ever_live' to determine which registers to
3719 save; `regs_ever_live[I]' is nonzero if register number I is ever
3720 used in the function. This function is responsible for knowing
3721 which registers should not be saved even if used. */
3723 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3724 of memory. If any fpu reg is used in the function, we allocate
3725 such a block here, at the bottom of the frame, just in case it's needed.
3727 If this function is a leaf procedure, then we may choose not
3728 to do a "save" insn. The decision about whether or not
3729 to do this is made in regclass.c. */
3731 static void
3732 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3734 /* The function's label and associated .PROC must never be
3735 separated and must be output *after* any profiling declarations
3736 to avoid changing spaces/subspaces within a procedure. */
3737 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3738 fputs ("\t.PROC\n", file);
3740 /* pa_expand_prologue does the dirty work now. We just need
3741 to output the assembler directives which denote the start
3742 of a function. */
3743 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3744 if (crtl->is_leaf)
3745 fputs (",NO_CALLS", file);
3746 else
3747 fputs (",CALLS", file);
3748 if (rp_saved)
3749 fputs (",SAVE_RP", file);
3751 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3752 at the beginning of the frame and that it is used as the frame
3753 pointer for the frame. We do this because our current frame
3754 layout doesn't conform to that specified in the HP runtime
3755 documentation and we need a way to indicate to programs such as
3756 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3757 isn't used by HP compilers but is supported by the assembler.
3758 However, SAVE_SP is supposed to indicate that the previous stack
3759 pointer has been saved in the frame marker. */
3760 if (frame_pointer_needed)
3761 fputs (",SAVE_SP", file);
3763 /* Pass on information about the number of callee register saves
3764 performed in the prologue.
3766 The compiler is supposed to pass the highest register number
3767 saved, the assembler then has to adjust that number before
3768 entering it into the unwind descriptor (to account for any
3769 caller saved registers with lower register numbers than the
3770 first callee saved register). */
3771 if (gr_saved)
3772 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3774 if (fr_saved)
3775 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3777 fputs ("\n\t.ENTRY\n", file);
3779 remove_useless_addtr_insns (0);
3782 void
3783 pa_expand_prologue (void)
3785 int merge_sp_adjust_with_store = 0;
3786 HOST_WIDE_INT size = get_frame_size ();
3787 HOST_WIDE_INT offset;
3788 int i;
3789 rtx insn, tmpreg;
3791 gr_saved = 0;
3792 fr_saved = 0;
3793 save_fregs = 0;
3795 /* Compute total size for frame pointer, filler, locals and rounding to
3796 the next word boundary. Similar code appears in pa_compute_frame_size
3797 and must be changed in tandem with this code. */
3798 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3799 if (local_fsize || frame_pointer_needed)
3800 local_fsize += STARTING_FRAME_OFFSET;
3802 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3803 if (flag_stack_usage_info)
3804 current_function_static_stack_size = actual_fsize;
3806 /* Compute a few things we will use often. */
3807 tmpreg = gen_rtx_REG (word_mode, 1);
3809 /* Save RP first. The calling conventions manual states RP will
3810 always be stored into the caller's frame at sp - 20 or sp - 16
3811 depending on which ABI is in use. */
3812 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3814 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3815 rp_saved = true;
3817 else
3818 rp_saved = false;
3820 /* Allocate the local frame and set up the frame pointer if needed. */
3821 if (actual_fsize != 0)
3823 if (frame_pointer_needed)
3825 /* Copy the old frame pointer temporarily into %r1. Set up the
3826 new stack pointer, then store away the saved old frame pointer
3827 into the stack at sp and at the same time update the stack
3828 pointer by actual_fsize bytes. Two versions, first
3829 handles small (<8k) frames. The second handles large (>=8k)
3830 frames. */
3831 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3832 if (DO_FRAME_NOTES)
3833 RTX_FRAME_RELATED_P (insn) = 1;
3835 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3836 if (DO_FRAME_NOTES)
3837 RTX_FRAME_RELATED_P (insn) = 1;
3839 if (VAL_14_BITS_P (actual_fsize))
3840 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3841 else
3843 /* It is incorrect to store the saved frame pointer at *sp,
3844 then increment sp (writes beyond the current stack boundary).
3846 So instead use stwm to store at *sp and post-increment the
3847 stack pointer as an atomic operation. Then increment sp to
3848 finish allocating the new frame. */
3849 HOST_WIDE_INT adjust1 = 8192 - 64;
3850 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3852 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3853 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3854 adjust2, 1);
3857 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3858 we need to store the previous stack pointer (frame pointer)
3859 into the frame marker on targets that use the HP unwind
3860 library. This allows the HP unwind library to be used to
3861 unwind GCC frames. However, we are not fully compatible
3862 with the HP library because our frame layout differs from
3863 that specified in the HP runtime specification.
3865 We don't want a frame note on this instruction as the frame
3866 marker moves during dynamic stack allocation.
3868 This instruction also serves as a blockage to prevent
3869 register spills from being scheduled before the stack
3870 pointer is raised. This is necessary as we store
3871 registers using the frame pointer as a base register,
3872 and the frame pointer is set before sp is raised. */
3873 if (TARGET_HPUX_UNWIND_LIBRARY)
3875 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3876 GEN_INT (TARGET_64BIT ? -8 : -4));
3878 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3879 hard_frame_pointer_rtx);
3881 else
3882 emit_insn (gen_blockage ());
3884 /* no frame pointer needed. */
3885 else
3887 /* In some cases we can perform the first callee register save
3888 and allocating the stack frame at the same time. If so, just
3889 make a note of it and defer allocating the frame until saving
3890 the callee registers. */
3891 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3892 merge_sp_adjust_with_store = 1;
3893 /* Can not optimize. Adjust the stack frame by actual_fsize
3894 bytes. */
3895 else
3896 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3897 actual_fsize, 1);
3901 /* Normal register save.
3903 Do not save the frame pointer in the frame_pointer_needed case. It
3904 was done earlier. */
3905 if (frame_pointer_needed)
3907 offset = local_fsize;
3909 /* Saving the EH return data registers in the frame is the simplest
3910 way to get the frame unwind information emitted. We put them
3911 just before the general registers. */
3912 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3914 unsigned int i, regno;
3916 for (i = 0; ; ++i)
3918 regno = EH_RETURN_DATA_REGNO (i);
3919 if (regno == INVALID_REGNUM)
3920 break;
3922 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3923 offset += UNITS_PER_WORD;
3927 for (i = 18; i >= 4; i--)
3928 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3930 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3931 offset += UNITS_PER_WORD;
3932 gr_saved++;
3934 /* Account for %r3 which is saved in a special place. */
3935 gr_saved++;
3937 /* No frame pointer needed. */
3938 else
3940 offset = local_fsize - actual_fsize;
3942 /* Saving the EH return data registers in the frame is the simplest
3943 way to get the frame unwind information emitted. */
3944 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3946 unsigned int i, regno;
3948 for (i = 0; ; ++i)
3950 regno = EH_RETURN_DATA_REGNO (i);
3951 if (regno == INVALID_REGNUM)
3952 break;
3954 /* If merge_sp_adjust_with_store is nonzero, then we can
3955 optimize the first save. */
3956 if (merge_sp_adjust_with_store)
3958 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3959 merge_sp_adjust_with_store = 0;
3961 else
3962 store_reg (regno, offset, STACK_POINTER_REGNUM);
3963 offset += UNITS_PER_WORD;
3967 for (i = 18; i >= 3; i--)
3968 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3970 /* If merge_sp_adjust_with_store is nonzero, then we can
3971 optimize the first GR save. */
3972 if (merge_sp_adjust_with_store)
3974 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3975 merge_sp_adjust_with_store = 0;
3977 else
3978 store_reg (i, offset, STACK_POINTER_REGNUM);
3979 offset += UNITS_PER_WORD;
3980 gr_saved++;
3983 /* If we wanted to merge the SP adjustment with a GR save, but we never
3984 did any GR saves, then just emit the adjustment here. */
3985 if (merge_sp_adjust_with_store)
3986 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3987 actual_fsize, 1);
3990 /* The hppa calling conventions say that %r19, the pic offset
3991 register, is saved at sp - 32 (in this function's frame)
3992 when generating PIC code. FIXME: What is the correct thing
3993 to do for functions which make no calls and allocate no
3994 frame? Do we need to allocate a frame, or can we just omit
3995 the save? For now we'll just omit the save.
3997 We don't want a note on this insn as the frame marker can
3998 move if there is a dynamic stack allocation. */
3999 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4001 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4003 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4007 /* Align pointer properly (doubleword boundary). */
4008 offset = (offset + 7) & ~7;
4010 /* Floating point register store. */
4011 if (save_fregs)
4013 rtx base;
4015 /* First get the frame or stack pointer to the start of the FP register
4016 save area. */
4017 if (frame_pointer_needed)
4019 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4020 base = hard_frame_pointer_rtx;
4022 else
4024 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4025 base = stack_pointer_rtx;
4028 /* Now actually save the FP registers. */
4029 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4031 if (df_regs_ever_live_p (i)
4032 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4034 rtx addr, insn, reg;
4035 addr = gen_rtx_MEM (DFmode,
4036 gen_rtx_POST_INC (word_mode, tmpreg));
4037 reg = gen_rtx_REG (DFmode, i);
4038 insn = emit_move_insn (addr, reg);
4039 if (DO_FRAME_NOTES)
4041 RTX_FRAME_RELATED_P (insn) = 1;
4042 if (TARGET_64BIT)
4044 rtx mem = gen_rtx_MEM (DFmode,
4045 plus_constant (Pmode, base,
4046 offset));
4047 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4048 gen_rtx_SET (VOIDmode, mem, reg));
4050 else
4052 rtx meml = gen_rtx_MEM (SFmode,
4053 plus_constant (Pmode, base,
4054 offset));
4055 rtx memr = gen_rtx_MEM (SFmode,
4056 plus_constant (Pmode, base,
4057 offset + 4));
4058 rtx regl = gen_rtx_REG (SFmode, i);
4059 rtx regr = gen_rtx_REG (SFmode, i + 1);
4060 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4061 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4062 rtvec vec;
4064 RTX_FRAME_RELATED_P (setl) = 1;
4065 RTX_FRAME_RELATED_P (setr) = 1;
4066 vec = gen_rtvec (2, setl, setr);
4067 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4068 gen_rtx_SEQUENCE (VOIDmode, vec));
4071 offset += GET_MODE_SIZE (DFmode);
4072 fr_saved++;
4078 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4079 Handle case where DISP > 8k by using the add_high_const patterns. */
4081 static void
4082 load_reg (int reg, HOST_WIDE_INT disp, int base)
4084 rtx dest = gen_rtx_REG (word_mode, reg);
4085 rtx basereg = gen_rtx_REG (Pmode, base);
4086 rtx src;
4088 if (VAL_14_BITS_P (disp))
4089 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4090 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4092 rtx delta = GEN_INT (disp);
4093 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4095 emit_move_insn (tmpreg, delta);
4096 if (TARGET_DISABLE_INDEXING)
4098 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4099 src = gen_rtx_MEM (word_mode, tmpreg);
4101 else
4102 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4104 else
4106 rtx delta = GEN_INT (disp);
4107 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4108 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4110 emit_move_insn (tmpreg, high);
4111 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4114 emit_move_insn (dest, src);
4117 /* Update the total code bytes output to the text section. */
4119 static void
4120 update_total_code_bytes (unsigned int nbytes)
4122 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4123 && !IN_NAMED_SECTION_P (cfun->decl))
4125 unsigned int old_total = total_code_bytes;
4127 total_code_bytes += nbytes;
4129 /* Be prepared to handle overflows. */
4130 if (old_total > total_code_bytes)
4131 total_code_bytes = UINT_MAX;
4135 /* This function generates the assembly code for function exit.
4136 Args are as for output_function_prologue ().
4138 The function epilogue should not depend on the current stack
4139 pointer! It should use the frame pointer only. This is mandatory
4140 because of alloca; we also take advantage of it to omit stack
4141 adjustments before returning. */
4143 static void
4144 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4146 rtx_insn *insn = get_last_insn ();
4147 bool extra_nop;
4149 /* pa_expand_epilogue does the dirty work now. We just need
4150 to output the assembler directives which denote the end
4151 of a function.
4153 To make debuggers happy, emit a nop if the epilogue was completely
4154 eliminated due to a volatile call as the last insn in the
4155 current function. That way the return address (in %r2) will
4156 always point to a valid instruction in the current function. */
4158 /* Get the last real insn. */
4159 if (NOTE_P (insn))
4160 insn = prev_real_insn (insn);
4162 /* If it is a sequence, then look inside. */
4163 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4164 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4166 /* If insn is a CALL_INSN, then it must be a call to a volatile
4167 function (otherwise there would be epilogue insns). */
4168 if (insn && CALL_P (insn))
4170 fputs ("\tnop\n", file);
4171 extra_nop = true;
4173 else
4174 extra_nop = false;
4176 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4178 if (TARGET_SOM && TARGET_GAS)
4180 /* We are done with this subspace except possibly for some additional
4181 debug information. Forget that we are in this subspace to ensure
4182 that the next function is output in its own subspace. */
4183 in_section = NULL;
4184 cfun->machine->in_nsubspa = 2;
4187 /* Thunks do their own insn accounting. */
4188 if (cfun->is_thunk)
4189 return;
4191 if (INSN_ADDRESSES_SET_P ())
4193 last_address = extra_nop ? 4 : 0;
4194 insn = get_last_nonnote_insn ();
4195 if (insn)
4197 last_address += INSN_ADDRESSES (INSN_UID (insn));
4198 if (INSN_P (insn))
4199 last_address += insn_default_length (insn);
4201 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4202 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4204 else
4205 last_address = UINT_MAX;
4207 /* Finally, update the total number of code bytes output so far. */
4208 update_total_code_bytes (last_address);
4211 void
4212 pa_expand_epilogue (void)
4214 rtx tmpreg;
4215 HOST_WIDE_INT offset;
4216 HOST_WIDE_INT ret_off = 0;
4217 int i;
4218 int merge_sp_adjust_with_load = 0;
4220 /* We will use this often. */
4221 tmpreg = gen_rtx_REG (word_mode, 1);
4223 /* Try to restore RP early to avoid load/use interlocks when
4224 RP gets used in the return (bv) instruction. This appears to still
4225 be necessary even when we schedule the prologue and epilogue. */
4226 if (rp_saved)
4228 ret_off = TARGET_64BIT ? -16 : -20;
4229 if (frame_pointer_needed)
4231 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4232 ret_off = 0;
4234 else
4236 /* No frame pointer, and stack is smaller than 8k. */
4237 if (VAL_14_BITS_P (ret_off - actual_fsize))
4239 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4240 ret_off = 0;
4245 /* General register restores. */
4246 if (frame_pointer_needed)
4248 offset = local_fsize;
4250 /* If the current function calls __builtin_eh_return, then we need
4251 to restore the saved EH data registers. */
4252 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4254 unsigned int i, regno;
4256 for (i = 0; ; ++i)
4258 regno = EH_RETURN_DATA_REGNO (i);
4259 if (regno == INVALID_REGNUM)
4260 break;
4262 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4263 offset += UNITS_PER_WORD;
4267 for (i = 18; i >= 4; i--)
4268 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4270 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4271 offset += UNITS_PER_WORD;
4274 else
4276 offset = local_fsize - actual_fsize;
4278 /* If the current function calls __builtin_eh_return, then we need
4279 to restore the saved EH data registers. */
4280 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4282 unsigned int i, regno;
4284 for (i = 0; ; ++i)
4286 regno = EH_RETURN_DATA_REGNO (i);
4287 if (regno == INVALID_REGNUM)
4288 break;
4290 /* Only for the first load.
4291 merge_sp_adjust_with_load holds the register load
4292 with which we will merge the sp adjustment. */
4293 if (merge_sp_adjust_with_load == 0
4294 && local_fsize == 0
4295 && VAL_14_BITS_P (-actual_fsize))
4296 merge_sp_adjust_with_load = regno;
4297 else
4298 load_reg (regno, offset, STACK_POINTER_REGNUM);
4299 offset += UNITS_PER_WORD;
4303 for (i = 18; i >= 3; i--)
4305 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4307 /* Only for the first load.
4308 merge_sp_adjust_with_load holds the register load
4309 with which we will merge the sp adjustment. */
4310 if (merge_sp_adjust_with_load == 0
4311 && local_fsize == 0
4312 && VAL_14_BITS_P (-actual_fsize))
4313 merge_sp_adjust_with_load = i;
4314 else
4315 load_reg (i, offset, STACK_POINTER_REGNUM);
4316 offset += UNITS_PER_WORD;
4321 /* Align pointer properly (doubleword boundary). */
4322 offset = (offset + 7) & ~7;
4324 /* FP register restores. */
4325 if (save_fregs)
4327 /* Adjust the register to index off of. */
4328 if (frame_pointer_needed)
4329 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4330 else
4331 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4333 /* Actually do the restores now. */
4334 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4335 if (df_regs_ever_live_p (i)
4336 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4338 rtx src = gen_rtx_MEM (DFmode,
4339 gen_rtx_POST_INC (word_mode, tmpreg));
4340 rtx dest = gen_rtx_REG (DFmode, i);
4341 emit_move_insn (dest, src);
4345 /* Emit a blockage insn here to keep these insns from being moved to
4346 an earlier spot in the epilogue, or into the main instruction stream.
4348 This is necessary as we must not cut the stack back before all the
4349 restores are finished. */
4350 emit_insn (gen_blockage ());
4352 /* Reset stack pointer (and possibly frame pointer). The stack
4353 pointer is initially set to fp + 64 to avoid a race condition. */
4354 if (frame_pointer_needed)
4356 rtx delta = GEN_INT (-64);
4358 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4359 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4360 stack_pointer_rtx, delta));
4362 /* If we were deferring a callee register restore, do it now. */
4363 else if (merge_sp_adjust_with_load)
4365 rtx delta = GEN_INT (-actual_fsize);
4366 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4368 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4370 else if (actual_fsize != 0)
4371 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4372 - actual_fsize, 0);
4374 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4375 frame greater than 8k), do so now. */
4376 if (ret_off != 0)
4377 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4379 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4381 rtx sa = EH_RETURN_STACKADJ_RTX;
4383 emit_insn (gen_blockage ());
4384 emit_insn (TARGET_64BIT
4385 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4386 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4390 bool
4391 pa_can_use_return_insn (void)
4393 if (!reload_completed)
4394 return false;
4396 if (frame_pointer_needed)
4397 return false;
4399 if (df_regs_ever_live_p (2))
4400 return false;
4402 if (crtl->profile)
4403 return false;
4405 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4409 hppa_pic_save_rtx (void)
4411 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4414 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4415 #define NO_DEFERRED_PROFILE_COUNTERS 0
4416 #endif
4419 /* Vector of funcdef numbers. */
4420 static vec<int> funcdef_nos;
4422 /* Output deferred profile counters. */
4423 static void
4424 output_deferred_profile_counters (void)
4426 unsigned int i;
4427 int align, n;
4429 if (funcdef_nos.is_empty ())
4430 return;
4432 switch_to_section (data_section);
4433 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4434 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4436 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4438 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4439 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4442 funcdef_nos.release ();
4445 void
4446 hppa_profile_hook (int label_no)
4448 /* We use SImode for the address of the function in both 32 and
4449 64-bit code to avoid having to provide DImode versions of the
4450 lcla2 and load_offset_label_address insn patterns. */
4451 rtx reg = gen_reg_rtx (SImode);
4452 rtx_code_label *label_rtx = gen_label_rtx ();
4453 rtx begin_label_rtx, call_insn;
4454 char begin_label_name[16];
4456 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4457 label_no);
4458 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4460 if (TARGET_64BIT)
4461 emit_move_insn (arg_pointer_rtx,
4462 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4463 GEN_INT (64)));
4465 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4467 /* The address of the function is loaded into %r25 with an instruction-
4468 relative sequence that avoids the use of relocations. The sequence
4469 is split so that the load_offset_label_address instruction can
4470 occupy the delay slot of the call to _mcount. */
4471 if (TARGET_PA_20)
4472 emit_insn (gen_lcla2 (reg, label_rtx));
4473 else
4474 emit_insn (gen_lcla1 (reg, label_rtx));
4476 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4477 reg, begin_label_rtx, label_rtx));
4479 #if !NO_DEFERRED_PROFILE_COUNTERS
4481 rtx count_label_rtx, addr, r24;
4482 char count_label_name[16];
4484 funcdef_nos.safe_push (label_no);
4485 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4486 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4488 addr = force_reg (Pmode, count_label_rtx);
4489 r24 = gen_rtx_REG (Pmode, 24);
4490 emit_move_insn (r24, addr);
4492 call_insn =
4493 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4494 gen_rtx_SYMBOL_REF (Pmode,
4495 "_mcount")),
4496 GEN_INT (TARGET_64BIT ? 24 : 12)));
4498 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4500 #else
4502 call_insn =
4503 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4504 gen_rtx_SYMBOL_REF (Pmode,
4505 "_mcount")),
4506 GEN_INT (TARGET_64BIT ? 16 : 8)));
4508 #endif
4510 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4511 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4513 /* Indicate the _mcount call cannot throw, nor will it execute a
4514 non-local goto. */
4515 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4518 /* Fetch the return address for the frame COUNT steps up from
4519 the current frame, after the prologue. FRAMEADDR is the
4520 frame pointer of the COUNT frame.
4522 We want to ignore any export stub remnants here. To handle this,
4523 we examine the code at the return address, and if it is an export
4524 stub, we return a memory rtx for the stub return address stored
4525 at frame-24.
4527 The value returned is used in two different ways:
4529 1. To find a function's caller.
4531 2. To change the return address for a function.
4533 This function handles most instances of case 1; however, it will
4534 fail if there are two levels of stubs to execute on the return
4535 path. The only way I believe that can happen is if the return value
4536 needs a parameter relocation, which never happens for C code.
4538 This function handles most instances of case 2; however, it will
4539 fail if we did not originally have stub code on the return path
4540 but will need stub code on the new return path. This can happen if
4541 the caller & callee are both in the main program, but the new
4542 return location is in a shared library. */
4545 pa_return_addr_rtx (int count, rtx frameaddr)
4547 rtx label;
4548 rtx rp;
4549 rtx saved_rp;
4550 rtx ins;
4552 /* The instruction stream at the return address of a PA1.X export stub is:
4554 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4555 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4556 0x00011820 | stub+16: mtsp r1,sr0
4557 0xe0400002 | stub+20: be,n 0(sr0,rp)
4559 0xe0400002 must be specified as -532676606 so that it won't be
4560 rejected as an invalid immediate operand on 64-bit hosts.
4562 The instruction stream at the return address of a PA2.0 export stub is:
4564 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4565 0xe840d002 | stub+12: bve,n (rp)
4568 HOST_WIDE_INT insns[4];
4569 int i, len;
4571 if (count != 0)
4572 return NULL_RTX;
4574 rp = get_hard_reg_initial_val (Pmode, 2);
4576 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4577 return rp;
4579 /* If there is no export stub then just use the value saved from
4580 the return pointer register. */
4582 saved_rp = gen_reg_rtx (Pmode);
4583 emit_move_insn (saved_rp, rp);
4585 /* Get pointer to the instruction stream. We have to mask out the
4586 privilege level from the two low order bits of the return address
4587 pointer here so that ins will point to the start of the first
4588 instruction that would have been executed if we returned. */
4589 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4590 label = gen_label_rtx ();
4592 if (TARGET_PA_20)
4594 insns[0] = 0x4bc23fd1;
4595 insns[1] = -398405630;
4596 len = 2;
4598 else
4600 insns[0] = 0x4bc23fd1;
4601 insns[1] = 0x004010a1;
4602 insns[2] = 0x00011820;
4603 insns[3] = -532676606;
4604 len = 4;
4607 /* Check the instruction stream at the normal return address for the
4608 export stub. If it is an export stub, than our return address is
4609 really in -24[frameaddr]. */
4611 for (i = 0; i < len; i++)
4613 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4614 rtx op1 = GEN_INT (insns[i]);
4615 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4618 /* Here we know that our return address points to an export
4619 stub. We don't want to return the address of the export stub,
4620 but rather the return address of the export stub. That return
4621 address is stored at -24[frameaddr]. */
4623 emit_move_insn (saved_rp,
4624 gen_rtx_MEM (Pmode,
4625 memory_address (Pmode,
4626 plus_constant (Pmode, frameaddr,
4627 -24))));
4629 emit_label (label);
4631 return saved_rp;
4634 void
4635 pa_emit_bcond_fp (rtx operands[])
4637 enum rtx_code code = GET_CODE (operands[0]);
4638 rtx operand0 = operands[1];
4639 rtx operand1 = operands[2];
4640 rtx label = operands[3];
4642 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4643 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4645 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4646 gen_rtx_IF_THEN_ELSE (VOIDmode,
4647 gen_rtx_fmt_ee (NE,
4648 VOIDmode,
4649 gen_rtx_REG (CCFPmode, 0),
4650 const0_rtx),
4651 gen_rtx_LABEL_REF (VOIDmode, label),
4652 pc_rtx)));
4656 /* Adjust the cost of a scheduling dependency. Return the new cost of
4657 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4659 static int
4660 pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4662 enum attr_type attr_type;
4664 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4665 true dependencies as they are described with bypasses now. */
4666 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4667 return cost;
4669 if (! recog_memoized (insn))
4670 return 0;
4672 attr_type = get_attr_type (insn);
4674 switch (REG_NOTE_KIND (link))
4676 case REG_DEP_ANTI:
4677 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4678 cycles later. */
4680 if (attr_type == TYPE_FPLOAD)
4682 rtx pat = PATTERN (insn);
4683 rtx dep_pat = PATTERN (dep_insn);
4684 if (GET_CODE (pat) == PARALLEL)
4686 /* This happens for the fldXs,mb patterns. */
4687 pat = XVECEXP (pat, 0, 0);
4689 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4690 /* If this happens, we have to extend this to schedule
4691 optimally. Return 0 for now. */
4692 return 0;
4694 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4696 if (! recog_memoized (dep_insn))
4697 return 0;
4698 switch (get_attr_type (dep_insn))
4700 case TYPE_FPALU:
4701 case TYPE_FPMULSGL:
4702 case TYPE_FPMULDBL:
4703 case TYPE_FPDIVSGL:
4704 case TYPE_FPDIVDBL:
4705 case TYPE_FPSQRTSGL:
4706 case TYPE_FPSQRTDBL:
4707 /* A fpload can't be issued until one cycle before a
4708 preceding arithmetic operation has finished if
4709 the target of the fpload is any of the sources
4710 (or destination) of the arithmetic operation. */
4711 return insn_default_latency (dep_insn) - 1;
4713 default:
4714 return 0;
4718 else if (attr_type == TYPE_FPALU)
4720 rtx pat = PATTERN (insn);
4721 rtx dep_pat = PATTERN (dep_insn);
4722 if (GET_CODE (pat) == PARALLEL)
4724 /* This happens for the fldXs,mb patterns. */
4725 pat = XVECEXP (pat, 0, 0);
4727 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4728 /* If this happens, we have to extend this to schedule
4729 optimally. Return 0 for now. */
4730 return 0;
4732 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4734 if (! recog_memoized (dep_insn))
4735 return 0;
4736 switch (get_attr_type (dep_insn))
4738 case TYPE_FPDIVSGL:
4739 case TYPE_FPDIVDBL:
4740 case TYPE_FPSQRTSGL:
4741 case TYPE_FPSQRTDBL:
4742 /* An ALU flop can't be issued until two cycles before a
4743 preceding divide or sqrt operation has finished if
4744 the target of the ALU flop is any of the sources
4745 (or destination) of the divide or sqrt operation. */
4746 return insn_default_latency (dep_insn) - 2;
4748 default:
4749 return 0;
4754 /* For other anti dependencies, the cost is 0. */
4755 return 0;
4757 case REG_DEP_OUTPUT:
4758 /* Output dependency; DEP_INSN writes a register that INSN writes some
4759 cycles later. */
4760 if (attr_type == TYPE_FPLOAD)
4762 rtx pat = PATTERN (insn);
4763 rtx dep_pat = PATTERN (dep_insn);
4764 if (GET_CODE (pat) == PARALLEL)
4766 /* This happens for the fldXs,mb patterns. */
4767 pat = XVECEXP (pat, 0, 0);
4769 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4770 /* If this happens, we have to extend this to schedule
4771 optimally. Return 0 for now. */
4772 return 0;
4774 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4776 if (! recog_memoized (dep_insn))
4777 return 0;
4778 switch (get_attr_type (dep_insn))
4780 case TYPE_FPALU:
4781 case TYPE_FPMULSGL:
4782 case TYPE_FPMULDBL:
4783 case TYPE_FPDIVSGL:
4784 case TYPE_FPDIVDBL:
4785 case TYPE_FPSQRTSGL:
4786 case TYPE_FPSQRTDBL:
4787 /* A fpload can't be issued until one cycle before a
4788 preceding arithmetic operation has finished if
4789 the target of the fpload is the destination of the
4790 arithmetic operation.
4792 Exception: For PA7100LC, PA7200 and PA7300, the cost
4793 is 3 cycles, unless they bundle together. We also
4794 pay the penalty if the second insn is a fpload. */
4795 return insn_default_latency (dep_insn) - 1;
4797 default:
4798 return 0;
4802 else if (attr_type == TYPE_FPALU)
4804 rtx pat = PATTERN (insn);
4805 rtx dep_pat = PATTERN (dep_insn);
4806 if (GET_CODE (pat) == PARALLEL)
4808 /* This happens for the fldXs,mb patterns. */
4809 pat = XVECEXP (pat, 0, 0);
4811 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4812 /* If this happens, we have to extend this to schedule
4813 optimally. Return 0 for now. */
4814 return 0;
4816 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4818 if (! recog_memoized (dep_insn))
4819 return 0;
4820 switch (get_attr_type (dep_insn))
4822 case TYPE_FPDIVSGL:
4823 case TYPE_FPDIVDBL:
4824 case TYPE_FPSQRTSGL:
4825 case TYPE_FPSQRTDBL:
4826 /* An ALU flop can't be issued until two cycles before a
4827 preceding divide or sqrt operation has finished if
4828 the target of the ALU flop is also the target of
4829 the divide or sqrt operation. */
4830 return insn_default_latency (dep_insn) - 2;
4832 default:
4833 return 0;
4838 /* For other output dependencies, the cost is 0. */
4839 return 0;
4841 default:
4842 gcc_unreachable ();
4846 /* Adjust scheduling priorities. We use this to try and keep addil
4847 and the next use of %r1 close together. */
4848 static int
4849 pa_adjust_priority (rtx_insn *insn, int priority)
4851 rtx set = single_set (insn);
4852 rtx src, dest;
4853 if (set)
4855 src = SET_SRC (set);
4856 dest = SET_DEST (set);
4857 if (GET_CODE (src) == LO_SUM
4858 && symbolic_operand (XEXP (src, 1), VOIDmode)
4859 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4860 priority >>= 3;
4862 else if (GET_CODE (src) == MEM
4863 && GET_CODE (XEXP (src, 0)) == LO_SUM
4864 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4865 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4866 priority >>= 1;
4868 else if (GET_CODE (dest) == MEM
4869 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4870 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4871 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4872 priority >>= 3;
4874 return priority;
4877 /* The 700 can only issue a single insn at a time.
4878 The 7XXX processors can issue two insns at a time.
4879 The 8000 can issue 4 insns at a time. */
4880 static int
4881 pa_issue_rate (void)
4883 switch (pa_cpu)
4885 case PROCESSOR_700: return 1;
4886 case PROCESSOR_7100: return 2;
4887 case PROCESSOR_7100LC: return 2;
4888 case PROCESSOR_7200: return 2;
4889 case PROCESSOR_7300: return 2;
4890 case PROCESSOR_8000: return 4;
4892 default:
4893 gcc_unreachable ();
4899 /* Return any length plus adjustment needed by INSN which already has
4900 its length computed as LENGTH. Return LENGTH if no adjustment is
4901 necessary.
4903 Also compute the length of an inline block move here as it is too
4904 complicated to express as a length attribute in pa.md. */
4906 pa_adjust_insn_length (rtx_insn *insn, int length)
4908 rtx pat = PATTERN (insn);
4910 /* If length is negative or undefined, provide initial length. */
4911 if ((unsigned int) length >= INT_MAX)
4913 if (GET_CODE (pat) == SEQUENCE)
4914 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4916 switch (get_attr_type (insn))
4918 case TYPE_MILLI:
4919 length = pa_attr_length_millicode_call (insn);
4920 break;
4921 case TYPE_CALL:
4922 length = pa_attr_length_call (insn, 0);
4923 break;
4924 case TYPE_SIBCALL:
4925 length = pa_attr_length_call (insn, 1);
4926 break;
4927 case TYPE_DYNCALL:
4928 length = pa_attr_length_indirect_call (insn);
4929 break;
4930 case TYPE_SH_FUNC_ADRS:
4931 length = pa_attr_length_millicode_call (insn) + 20;
4932 break;
4933 default:
4934 gcc_unreachable ();
4938 /* Block move pattern. */
4939 if (NONJUMP_INSN_P (insn)
4940 && GET_CODE (pat) == PARALLEL
4941 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4942 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4943 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4944 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4945 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4946 length += compute_movmem_length (insn) - 4;
4947 /* Block clear pattern. */
4948 else if (NONJUMP_INSN_P (insn)
4949 && GET_CODE (pat) == PARALLEL
4950 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4951 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4952 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4953 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4954 length += compute_clrmem_length (insn) - 4;
4955 /* Conditional branch with an unfilled delay slot. */
4956 else if (JUMP_P (insn) && ! simplejump_p (insn))
4958 /* Adjust a short backwards conditional with an unfilled delay slot. */
4959 if (GET_CODE (pat) == SET
4960 && length == 4
4961 && JUMP_LABEL (insn) != NULL_RTX
4962 && ! forward_branch_p (insn))
4963 length += 4;
4964 else if (GET_CODE (pat) == PARALLEL
4965 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4966 && length == 4)
4967 length += 4;
4968 /* Adjust dbra insn with short backwards conditional branch with
4969 unfilled delay slot -- only for case where counter is in a
4970 general register register. */
4971 else if (GET_CODE (pat) == PARALLEL
4972 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4973 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4974 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4975 && length == 4
4976 && ! forward_branch_p (insn))
4977 length += 4;
4979 return length;
4982 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4984 static bool
4985 pa_print_operand_punct_valid_p (unsigned char code)
4987 if (code == '@'
4988 || code == '#'
4989 || code == '*'
4990 || code == '^')
4991 return true;
4993 return false;
4996 /* Print operand X (an rtx) in assembler syntax to file FILE.
4997 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4998 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5000 void
5001 pa_print_operand (FILE *file, rtx x, int code)
5003 switch (code)
5005 case '#':
5006 /* Output a 'nop' if there's nothing for the delay slot. */
5007 if (dbr_sequence_length () == 0)
5008 fputs ("\n\tnop", file);
5009 return;
5010 case '*':
5011 /* Output a nullification completer if there's nothing for the */
5012 /* delay slot or nullification is requested. */
5013 if (dbr_sequence_length () == 0 ||
5014 (final_sequence &&
5015 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5016 fputs (",n", file);
5017 return;
5018 case 'R':
5019 /* Print out the second register name of a register pair.
5020 I.e., R (6) => 7. */
5021 fputs (reg_names[REGNO (x) + 1], file);
5022 return;
5023 case 'r':
5024 /* A register or zero. */
5025 if (x == const0_rtx
5026 || (x == CONST0_RTX (DFmode))
5027 || (x == CONST0_RTX (SFmode)))
5029 fputs ("%r0", file);
5030 return;
5032 else
5033 break;
5034 case 'f':
5035 /* A register or zero (floating point). */
5036 if (x == const0_rtx
5037 || (x == CONST0_RTX (DFmode))
5038 || (x == CONST0_RTX (SFmode)))
5040 fputs ("%fr0", file);
5041 return;
5043 else
5044 break;
5045 case 'A':
5047 rtx xoperands[2];
5049 xoperands[0] = XEXP (XEXP (x, 0), 0);
5050 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5051 pa_output_global_address (file, xoperands[1], 0);
5052 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5053 return;
5056 case 'C': /* Plain (C)ondition */
5057 case 'X':
5058 switch (GET_CODE (x))
5060 case EQ:
5061 fputs ("=", file); break;
5062 case NE:
5063 fputs ("<>", file); break;
5064 case GT:
5065 fputs (">", file); break;
5066 case GE:
5067 fputs (">=", file); break;
5068 case GEU:
5069 fputs (">>=", file); break;
5070 case GTU:
5071 fputs (">>", file); break;
5072 case LT:
5073 fputs ("<", file); break;
5074 case LE:
5075 fputs ("<=", file); break;
5076 case LEU:
5077 fputs ("<<=", file); break;
5078 case LTU:
5079 fputs ("<<", file); break;
5080 default:
5081 gcc_unreachable ();
5083 return;
5084 case 'N': /* Condition, (N)egated */
5085 switch (GET_CODE (x))
5087 case EQ:
5088 fputs ("<>", file); break;
5089 case NE:
5090 fputs ("=", file); break;
5091 case GT:
5092 fputs ("<=", file); break;
5093 case GE:
5094 fputs ("<", file); break;
5095 case GEU:
5096 fputs ("<<", file); break;
5097 case GTU:
5098 fputs ("<<=", file); break;
5099 case LT:
5100 fputs (">=", file); break;
5101 case LE:
5102 fputs (">", file); break;
5103 case LEU:
5104 fputs (">>", file); break;
5105 case LTU:
5106 fputs (">>=", file); break;
5107 default:
5108 gcc_unreachable ();
5110 return;
5111 /* For floating point comparisons. Note that the output
5112 predicates are the complement of the desired mode. The
5113 conditions for GT, GE, LT, LE and LTGT cause an invalid
5114 operation exception if the result is unordered and this
5115 exception is enabled in the floating-point status register. */
5116 case 'Y':
5117 switch (GET_CODE (x))
5119 case EQ:
5120 fputs ("!=", file); break;
5121 case NE:
5122 fputs ("=", file); break;
5123 case GT:
5124 fputs ("!>", file); break;
5125 case GE:
5126 fputs ("!>=", file); break;
5127 case LT:
5128 fputs ("!<", file); break;
5129 case LE:
5130 fputs ("!<=", file); break;
5131 case LTGT:
5132 fputs ("!<>", file); break;
5133 case UNLE:
5134 fputs ("!?<=", file); break;
5135 case UNLT:
5136 fputs ("!?<", file); break;
5137 case UNGE:
5138 fputs ("!?>=", file); break;
5139 case UNGT:
5140 fputs ("!?>", file); break;
5141 case UNEQ:
5142 fputs ("!?=", file); break;
5143 case UNORDERED:
5144 fputs ("!?", file); break;
5145 case ORDERED:
5146 fputs ("?", file); break;
5147 default:
5148 gcc_unreachable ();
5150 return;
5151 case 'S': /* Condition, operands are (S)wapped. */
5152 switch (GET_CODE (x))
5154 case EQ:
5155 fputs ("=", file); break;
5156 case NE:
5157 fputs ("<>", file); break;
5158 case GT:
5159 fputs ("<", file); break;
5160 case GE:
5161 fputs ("<=", file); break;
5162 case GEU:
5163 fputs ("<<=", file); break;
5164 case GTU:
5165 fputs ("<<", file); break;
5166 case LT:
5167 fputs (">", file); break;
5168 case LE:
5169 fputs (">=", file); break;
5170 case LEU:
5171 fputs (">>=", file); break;
5172 case LTU:
5173 fputs (">>", file); break;
5174 default:
5175 gcc_unreachable ();
5177 return;
5178 case 'B': /* Condition, (B)oth swapped and negate. */
5179 switch (GET_CODE (x))
5181 case EQ:
5182 fputs ("<>", file); break;
5183 case NE:
5184 fputs ("=", file); break;
5185 case GT:
5186 fputs (">=", file); break;
5187 case GE:
5188 fputs (">", file); break;
5189 case GEU:
5190 fputs (">>", file); break;
5191 case GTU:
5192 fputs (">>=", file); break;
5193 case LT:
5194 fputs ("<=", file); break;
5195 case LE:
5196 fputs ("<", file); break;
5197 case LEU:
5198 fputs ("<<", file); break;
5199 case LTU:
5200 fputs ("<<=", file); break;
5201 default:
5202 gcc_unreachable ();
5204 return;
5205 case 'k':
5206 gcc_assert (GET_CODE (x) == CONST_INT);
5207 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5208 return;
5209 case 'Q':
5210 gcc_assert (GET_CODE (x) == CONST_INT);
5211 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5212 return;
5213 case 'L':
5214 gcc_assert (GET_CODE (x) == CONST_INT);
5215 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5216 return;
5217 case 'O':
5218 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5219 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5220 return;
5221 case 'p':
5222 gcc_assert (GET_CODE (x) == CONST_INT);
5223 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5224 return;
5225 case 'P':
5226 gcc_assert (GET_CODE (x) == CONST_INT);
5227 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5228 return;
5229 case 'I':
5230 if (GET_CODE (x) == CONST_INT)
5231 fputs ("i", file);
5232 return;
5233 case 'M':
5234 case 'F':
5235 switch (GET_CODE (XEXP (x, 0)))
5237 case PRE_DEC:
5238 case PRE_INC:
5239 if (ASSEMBLER_DIALECT == 0)
5240 fputs ("s,mb", file);
5241 else
5242 fputs (",mb", file);
5243 break;
5244 case POST_DEC:
5245 case POST_INC:
5246 if (ASSEMBLER_DIALECT == 0)
5247 fputs ("s,ma", file);
5248 else
5249 fputs (",ma", file);
5250 break;
5251 case PLUS:
5252 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5253 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5255 if (ASSEMBLER_DIALECT == 0)
5256 fputs ("x", file);
5258 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5259 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5261 if (ASSEMBLER_DIALECT == 0)
5262 fputs ("x,s", file);
5263 else
5264 fputs (",s", file);
5266 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5267 fputs ("s", file);
5268 break;
5269 default:
5270 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5271 fputs ("s", file);
5272 break;
5274 return;
5275 case 'G':
5276 pa_output_global_address (file, x, 0);
5277 return;
5278 case 'H':
5279 pa_output_global_address (file, x, 1);
5280 return;
5281 case 0: /* Don't do anything special */
5282 break;
5283 case 'Z':
5285 unsigned op[3];
5286 compute_zdepwi_operands (INTVAL (x), op);
5287 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5288 return;
5290 case 'z':
5292 unsigned op[3];
5293 compute_zdepdi_operands (INTVAL (x), op);
5294 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5295 return;
5297 case 'c':
5298 /* We can get here from a .vtable_inherit due to our
5299 CONSTANT_ADDRESS_P rejecting perfectly good constant
5300 addresses. */
5301 break;
5302 default:
5303 gcc_unreachable ();
5305 if (GET_CODE (x) == REG)
5307 fputs (reg_names [REGNO (x)], file);
5308 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5310 fputs ("R", file);
5311 return;
5313 if (FP_REG_P (x)
5314 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5315 && (REGNO (x) & 1) == 0)
5316 fputs ("L", file);
5318 else if (GET_CODE (x) == MEM)
5320 int size = GET_MODE_SIZE (GET_MODE (x));
5321 rtx base = NULL_RTX;
5322 switch (GET_CODE (XEXP (x, 0)))
5324 case PRE_DEC:
5325 case POST_DEC:
5326 base = XEXP (XEXP (x, 0), 0);
5327 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5328 break;
5329 case PRE_INC:
5330 case POST_INC:
5331 base = XEXP (XEXP (x, 0), 0);
5332 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5333 break;
5334 case PLUS:
5335 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5336 fprintf (file, "%s(%s)",
5337 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5338 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5339 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5340 fprintf (file, "%s(%s)",
5341 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5342 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5343 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5344 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5346 /* Because the REG_POINTER flag can get lost during reload,
5347 pa_legitimate_address_p canonicalizes the order of the
5348 index and base registers in the combined move patterns. */
5349 rtx base = XEXP (XEXP (x, 0), 1);
5350 rtx index = XEXP (XEXP (x, 0), 0);
5352 fprintf (file, "%s(%s)",
5353 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5355 else
5356 output_address (XEXP (x, 0));
5357 break;
5358 default:
5359 output_address (XEXP (x, 0));
5360 break;
5363 else
5364 output_addr_const (file, x);
5367 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5369 void
5370 pa_output_global_address (FILE *file, rtx x, int round_constant)
5373 /* Imagine (high (const (plus ...))). */
5374 if (GET_CODE (x) == HIGH)
5375 x = XEXP (x, 0);
5377 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5378 output_addr_const (file, x);
5379 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5381 output_addr_const (file, x);
5382 fputs ("-$global$", file);
5384 else if (GET_CODE (x) == CONST)
5386 const char *sep = "";
5387 int offset = 0; /* assembler wants -$global$ at end */
5388 rtx base = NULL_RTX;
5390 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5392 case SYMBOL_REF:
5393 base = XEXP (XEXP (x, 0), 0);
5394 output_addr_const (file, base);
5395 break;
5396 case CONST_INT:
5397 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5398 break;
5399 default:
5400 gcc_unreachable ();
5403 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5405 case SYMBOL_REF:
5406 base = XEXP (XEXP (x, 0), 1);
5407 output_addr_const (file, base);
5408 break;
5409 case CONST_INT:
5410 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5411 break;
5412 default:
5413 gcc_unreachable ();
5416 /* How bogus. The compiler is apparently responsible for
5417 rounding the constant if it uses an LR field selector.
5419 The linker and/or assembler seem a better place since
5420 they have to do this kind of thing already.
5422 If we fail to do this, HP's optimizing linker may eliminate
5423 an addil, but not update the ldw/stw/ldo instruction that
5424 uses the result of the addil. */
5425 if (round_constant)
5426 offset = ((offset + 0x1000) & ~0x1fff);
5428 switch (GET_CODE (XEXP (x, 0)))
5430 case PLUS:
5431 if (offset < 0)
5433 offset = -offset;
5434 sep = "-";
5436 else
5437 sep = "+";
5438 break;
5440 case MINUS:
5441 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5442 sep = "-";
5443 break;
5445 default:
5446 gcc_unreachable ();
5449 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5450 fputs ("-$global$", file);
5451 if (offset)
5452 fprintf (file, "%s%d", sep, offset);
5454 else
5455 output_addr_const (file, x);
5458 /* Output boilerplate text to appear at the beginning of the file.
5459 There are several possible versions. */
5460 #define aputs(x) fputs(x, asm_out_file)
5461 static inline void
5462 pa_file_start_level (void)
5464 if (TARGET_64BIT)
5465 aputs ("\t.LEVEL 2.0w\n");
5466 else if (TARGET_PA_20)
5467 aputs ("\t.LEVEL 2.0\n");
5468 else if (TARGET_PA_11)
5469 aputs ("\t.LEVEL 1.1\n");
5470 else
5471 aputs ("\t.LEVEL 1.0\n");
5474 static inline void
5475 pa_file_start_space (int sortspace)
5477 aputs ("\t.SPACE $PRIVATE$");
5478 if (sortspace)
5479 aputs (",SORT=16");
5480 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5481 if (flag_tm)
5482 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5483 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5484 "\n\t.SPACE $TEXT$");
5485 if (sortspace)
5486 aputs (",SORT=8");
5487 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5488 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5491 static inline void
5492 pa_file_start_file (int want_version)
5494 if (write_symbols != NO_DEBUG)
5496 output_file_directive (asm_out_file, main_input_filename);
5497 if (want_version)
5498 aputs ("\t.version\t\"01.01\"\n");
5502 static inline void
5503 pa_file_start_mcount (const char *aswhat)
5505 if (profile_flag)
5506 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5509 static void
5510 pa_elf_file_start (void)
5512 pa_file_start_level ();
5513 pa_file_start_mcount ("ENTRY");
5514 pa_file_start_file (0);
5517 static void
5518 pa_som_file_start (void)
5520 pa_file_start_level ();
5521 pa_file_start_space (0);
5522 aputs ("\t.IMPORT $global$,DATA\n"
5523 "\t.IMPORT $$dyncall,MILLICODE\n");
5524 pa_file_start_mcount ("CODE");
5525 pa_file_start_file (0);
5528 static void
5529 pa_linux_file_start (void)
5531 pa_file_start_file (1);
5532 pa_file_start_level ();
5533 pa_file_start_mcount ("CODE");
5536 static void
5537 pa_hpux64_gas_file_start (void)
5539 pa_file_start_level ();
5540 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5541 if (profile_flag)
5542 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5543 #endif
5544 pa_file_start_file (1);
5547 static void
5548 pa_hpux64_hpas_file_start (void)
5550 pa_file_start_level ();
5551 pa_file_start_space (1);
5552 pa_file_start_mcount ("CODE");
5553 pa_file_start_file (0);
5555 #undef aputs
5557 /* Search the deferred plabel list for SYMBOL and return its internal
5558 label. If an entry for SYMBOL is not found, a new entry is created. */
5561 pa_get_deferred_plabel (rtx symbol)
5563 const char *fname = XSTR (symbol, 0);
5564 size_t i;
5566 /* See if we have already put this function on the list of deferred
5567 plabels. This list is generally small, so a liner search is not
5568 too ugly. If it proves too slow replace it with something faster. */
5569 for (i = 0; i < n_deferred_plabels; i++)
5570 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5571 break;
5573 /* If the deferred plabel list is empty, or this entry was not found
5574 on the list, create a new entry on the list. */
5575 if (deferred_plabels == NULL || i == n_deferred_plabels)
5577 tree id;
5579 if (deferred_plabels == 0)
5580 deferred_plabels = ggc_alloc<deferred_plabel> ();
5581 else
5582 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5583 deferred_plabels,
5584 n_deferred_plabels + 1);
5586 i = n_deferred_plabels++;
5587 deferred_plabels[i].internal_label = gen_label_rtx ();
5588 deferred_plabels[i].symbol = symbol;
5590 /* Gross. We have just implicitly taken the address of this
5591 function. Mark it in the same manner as assemble_name. */
5592 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5593 if (id)
5594 mark_referenced (id);
5597 return deferred_plabels[i].internal_label;
5600 static void
5601 output_deferred_plabels (void)
5603 size_t i;
5605 /* If we have some deferred plabels, then we need to switch into the
5606 data or readonly data section, and align it to a 4 byte boundary
5607 before outputting the deferred plabels. */
5608 if (n_deferred_plabels)
5610 switch_to_section (flag_pic ? data_section : readonly_data_section);
5611 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5614 /* Now output the deferred plabels. */
5615 for (i = 0; i < n_deferred_plabels; i++)
5617 targetm.asm_out.internal_label (asm_out_file, "L",
5618 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5619 assemble_integer (deferred_plabels[i].symbol,
5620 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5624 /* Initialize optabs to point to emulation routines. */
5626 static void
5627 pa_init_libfuncs (void)
5629 if (HPUX_LONG_DOUBLE_LIBRARY)
5631 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5632 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5633 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5634 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5635 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5636 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5637 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5638 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5639 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5641 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5642 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5643 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5644 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5645 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5646 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5647 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5649 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5650 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5651 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5652 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5654 set_conv_libfunc (sfix_optab, SImode, TFmode,
5655 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5656 : "_U_Qfcnvfxt_quad_to_sgl");
5657 set_conv_libfunc (sfix_optab, DImode, TFmode,
5658 "_U_Qfcnvfxt_quad_to_dbl");
5659 set_conv_libfunc (ufix_optab, SImode, TFmode,
5660 "_U_Qfcnvfxt_quad_to_usgl");
5661 set_conv_libfunc (ufix_optab, DImode, TFmode,
5662 "_U_Qfcnvfxt_quad_to_udbl");
5664 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5665 "_U_Qfcnvxf_sgl_to_quad");
5666 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5667 "_U_Qfcnvxf_dbl_to_quad");
5668 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5669 "_U_Qfcnvxf_usgl_to_quad");
5670 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5671 "_U_Qfcnvxf_udbl_to_quad");
5674 if (TARGET_SYNC_LIBCALL)
5675 init_sync_libfuncs (UNITS_PER_WORD);
5678 /* HP's millicode routines mean something special to the assembler.
5679 Keep track of which ones we have used. */
5681 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5682 static void import_milli (enum millicodes);
5683 static char imported[(int) end1000];
5684 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5685 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5686 #define MILLI_START 10
5688 static void
5689 import_milli (enum millicodes code)
5691 char str[sizeof (import_string)];
5693 if (!imported[(int) code])
5695 imported[(int) code] = 1;
5696 strcpy (str, import_string);
5697 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5698 output_asm_insn (str, 0);
5702 /* The register constraints have put the operands and return value in
5703 the proper registers. */
5705 const char *
5706 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5708 import_milli (mulI);
5709 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5712 /* Emit the rtl for doing a division by a constant. */
5714 /* Do magic division millicodes exist for this value? */
5715 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5717 /* We'll use an array to keep track of the magic millicodes and
5718 whether or not we've used them already. [n][0] is signed, [n][1] is
5719 unsigned. */
5721 static int div_milli[16][2];
5724 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5726 if (GET_CODE (operands[2]) == CONST_INT
5727 && INTVAL (operands[2]) > 0
5728 && INTVAL (operands[2]) < 16
5729 && pa_magic_milli[INTVAL (operands[2])])
5731 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5733 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5734 emit
5735 (gen_rtx_PARALLEL
5736 (VOIDmode,
5737 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5738 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5739 SImode,
5740 gen_rtx_REG (SImode, 26),
5741 operands[2])),
5742 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5743 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5744 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5745 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5746 gen_rtx_CLOBBER (VOIDmode, ret))));
5747 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5748 return 1;
5750 return 0;
5753 const char *
5754 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5756 int divisor;
5758 /* If the divisor is a constant, try to use one of the special
5759 opcodes .*/
5760 if (GET_CODE (operands[0]) == CONST_INT)
5762 static char buf[100];
5763 divisor = INTVAL (operands[0]);
5764 if (!div_milli[divisor][unsignedp])
5766 div_milli[divisor][unsignedp] = 1;
5767 if (unsignedp)
5768 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5769 else
5770 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5772 if (unsignedp)
5774 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5775 INTVAL (operands[0]));
5776 return pa_output_millicode_call (insn,
5777 gen_rtx_SYMBOL_REF (SImode, buf));
5779 else
5781 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5782 INTVAL (operands[0]));
5783 return pa_output_millicode_call (insn,
5784 gen_rtx_SYMBOL_REF (SImode, buf));
5787 /* Divisor isn't a special constant. */
5788 else
5790 if (unsignedp)
5792 import_milli (divU);
5793 return pa_output_millicode_call (insn,
5794 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5796 else
5798 import_milli (divI);
5799 return pa_output_millicode_call (insn,
5800 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5805 /* Output a $$rem millicode to do mod. */
5807 const char *
5808 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5810 if (unsignedp)
5812 import_milli (remU);
5813 return pa_output_millicode_call (insn,
5814 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5816 else
5818 import_milli (remI);
5819 return pa_output_millicode_call (insn,
5820 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5824 void
5825 pa_output_arg_descriptor (rtx call_insn)
5827 const char *arg_regs[4];
5828 enum machine_mode arg_mode;
5829 rtx link;
5830 int i, output_flag = 0;
5831 int regno;
5833 /* We neither need nor want argument location descriptors for the
5834 64bit runtime environment or the ELF32 environment. */
5835 if (TARGET_64BIT || TARGET_ELF32)
5836 return;
5838 for (i = 0; i < 4; i++)
5839 arg_regs[i] = 0;
5841 /* Specify explicitly that no argument relocations should take place
5842 if using the portable runtime calling conventions. */
5843 if (TARGET_PORTABLE_RUNTIME)
5845 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5846 asm_out_file);
5847 return;
5850 gcc_assert (CALL_P (call_insn));
5851 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5852 link; link = XEXP (link, 1))
5854 rtx use = XEXP (link, 0);
5856 if (! (GET_CODE (use) == USE
5857 && GET_CODE (XEXP (use, 0)) == REG
5858 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5859 continue;
5861 arg_mode = GET_MODE (XEXP (use, 0));
5862 regno = REGNO (XEXP (use, 0));
5863 if (regno >= 23 && regno <= 26)
5865 arg_regs[26 - regno] = "GR";
5866 if (arg_mode == DImode)
5867 arg_regs[25 - regno] = "GR";
5869 else if (regno >= 32 && regno <= 39)
5871 if (arg_mode == SFmode)
5872 arg_regs[(regno - 32) / 2] = "FR";
5873 else
5875 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5876 arg_regs[(regno - 34) / 2] = "FR";
5877 arg_regs[(regno - 34) / 2 + 1] = "FU";
5878 #else
5879 arg_regs[(regno - 34) / 2] = "FU";
5880 arg_regs[(regno - 34) / 2 + 1] = "FR";
5881 #endif
5885 fputs ("\t.CALL ", asm_out_file);
5886 for (i = 0; i < 4; i++)
5888 if (arg_regs[i])
5890 if (output_flag++)
5891 fputc (',', asm_out_file);
5892 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5895 fputc ('\n', asm_out_file);
5898 /* Inform reload about cases where moving X with a mode MODE to or from
5899 a register in RCLASS requires an extra scratch or immediate register.
5900 Return the class needed for the immediate register. */
5902 static reg_class_t
5903 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5904 enum machine_mode mode, secondary_reload_info *sri)
5906 int regno;
5907 enum reg_class rclass = (enum reg_class) rclass_i;
5909 /* Handle the easy stuff first. */
5910 if (rclass == R1_REGS)
5911 return NO_REGS;
5913 if (REG_P (x))
5915 regno = REGNO (x);
5916 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5917 return NO_REGS;
5919 else
5920 regno = -1;
5922 /* If we have something like (mem (mem (...)), we can safely assume the
5923 inner MEM will end up in a general register after reloading, so there's
5924 no need for a secondary reload. */
5925 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5926 return NO_REGS;
5928 /* Trying to load a constant into a FP register during PIC code
5929 generation requires %r1 as a scratch register. For float modes,
5930 the only legitimate constant is CONST0_RTX. However, there are
5931 a few patterns that accept constant double operands. */
5932 if (flag_pic
5933 && FP_REG_CLASS_P (rclass)
5934 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5936 switch (mode)
5938 case SImode:
5939 sri->icode = CODE_FOR_reload_insi_r1;
5940 break;
5942 case DImode:
5943 sri->icode = CODE_FOR_reload_indi_r1;
5944 break;
5946 case SFmode:
5947 sri->icode = CODE_FOR_reload_insf_r1;
5948 break;
5950 case DFmode:
5951 sri->icode = CODE_FOR_reload_indf_r1;
5952 break;
5954 default:
5955 gcc_unreachable ();
5957 return NO_REGS;
5960 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5961 register when we're generating PIC code or when the operand isn't
5962 readonly. */
5963 if (pa_symbolic_expression_p (x))
5965 if (GET_CODE (x) == HIGH)
5966 x = XEXP (x, 0);
5968 if (flag_pic || !read_only_operand (x, VOIDmode))
5970 switch (mode)
5972 case SImode:
5973 sri->icode = CODE_FOR_reload_insi_r1;
5974 break;
5976 case DImode:
5977 sri->icode = CODE_FOR_reload_indi_r1;
5978 break;
5980 default:
5981 gcc_unreachable ();
5983 return NO_REGS;
5987 /* Profiling showed the PA port spends about 1.3% of its compilation
5988 time in true_regnum from calls inside pa_secondary_reload_class. */
5989 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5990 regno = true_regnum (x);
5992 /* Handle reloads for floating point loads and stores. */
5993 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5994 && FP_REG_CLASS_P (rclass))
5996 if (MEM_P (x))
5998 x = XEXP (x, 0);
6000 /* We don't need an intermediate for indexed and LO_SUM DLT
6001 memory addresses. When INT14_OK_STRICT is true, it might
6002 appear that we could directly allow register indirect
6003 memory addresses. However, this doesn't work because we
6004 don't support SUBREGs in floating-point register copies
6005 and reload doesn't tell us when it's going to use a SUBREG. */
6006 if (IS_INDEX_ADDR_P (x)
6007 || IS_LO_SUM_DLT_ADDR_P (x))
6008 return NO_REGS;
6010 /* Request intermediate general register. */
6011 return GENERAL_REGS;
6014 /* Request a secondary reload with a general scratch register
6015 for everything else. ??? Could symbolic operands be handled
6016 directly when generating non-pic PA 2.0 code? */
6017 sri->icode = (in_p
6018 ? direct_optab_handler (reload_in_optab, mode)
6019 : direct_optab_handler (reload_out_optab, mode));
6020 return NO_REGS;
6023 /* A SAR<->FP register copy requires an intermediate general register
6024 and secondary memory. We need a secondary reload with a general
6025 scratch register for spills. */
6026 if (rclass == SHIFT_REGS)
6028 /* Handle spill. */
6029 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6031 sri->icode = (in_p
6032 ? direct_optab_handler (reload_in_optab, mode)
6033 : direct_optab_handler (reload_out_optab, mode));
6034 return NO_REGS;
6037 /* Handle FP copy. */
6038 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6039 return GENERAL_REGS;
6042 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6043 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6044 && FP_REG_CLASS_P (rclass))
6045 return GENERAL_REGS;
6047 return NO_REGS;
6050 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6051 is only marked as live on entry by df-scan when it is a fixed
6052 register. It isn't a fixed register in the 64-bit runtime,
6053 so we need to mark it here. */
6055 static void
6056 pa_extra_live_on_entry (bitmap regs)
6058 if (TARGET_64BIT)
6059 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6062 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6063 to prevent it from being deleted. */
6066 pa_eh_return_handler_rtx (void)
6068 rtx tmp;
6070 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6071 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6072 tmp = gen_rtx_MEM (word_mode, tmp);
6073 tmp->volatil = 1;
6074 return tmp;
6077 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6078 by invisible reference. As a GCC extension, we also pass anything
6079 with a zero or variable size by reference.
6081 The 64-bit runtime does not describe passing any types by invisible
6082 reference. The internals of GCC can't currently handle passing
6083 empty structures, and zero or variable length arrays when they are
6084 not passed entirely on the stack or by reference. Thus, as a GCC
6085 extension, we pass these types by reference. The HP compiler doesn't
6086 support these types, so hopefully there shouldn't be any compatibility
6087 issues. This may have to be revisited when HP releases a C99 compiler
6088 or updates the ABI. */
6090 static bool
6091 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6092 enum machine_mode mode, const_tree type,
6093 bool named ATTRIBUTE_UNUSED)
6095 HOST_WIDE_INT size;
6097 if (type)
6098 size = int_size_in_bytes (type);
6099 else
6100 size = GET_MODE_SIZE (mode);
6102 if (TARGET_64BIT)
6103 return size <= 0;
6104 else
6105 return size <= 0 || size > 8;
6108 enum direction
6109 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6111 if (mode == BLKmode
6112 || (TARGET_64BIT
6113 && type
6114 && (AGGREGATE_TYPE_P (type)
6115 || TREE_CODE (type) == COMPLEX_TYPE
6116 || TREE_CODE (type) == VECTOR_TYPE)))
6118 /* Return none if justification is not required. */
6119 if (type
6120 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6121 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6122 return none;
6124 /* The directions set here are ignored when a BLKmode argument larger
6125 than a word is placed in a register. Different code is used for
6126 the stack and registers. This makes it difficult to have a
6127 consistent data representation for both the stack and registers.
6128 For both runtimes, the justification and padding for arguments on
6129 the stack and in registers should be identical. */
6130 if (TARGET_64BIT)
6131 /* The 64-bit runtime specifies left justification for aggregates. */
6132 return upward;
6133 else
6134 /* The 32-bit runtime architecture specifies right justification.
6135 When the argument is passed on the stack, the argument is padded
6136 with garbage on the left. The HP compiler pads with zeros. */
6137 return downward;
6140 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6141 return downward;
6142 else
6143 return none;
6147 /* Do what is necessary for `va_start'. We look at the current function
6148 to determine if stdargs or varargs is used and fill in an initial
6149 va_list. A pointer to this constructor is returned. */
6151 static rtx
6152 hppa_builtin_saveregs (void)
6154 rtx offset, dest;
6155 tree fntype = TREE_TYPE (current_function_decl);
6156 int argadj = ((!stdarg_p (fntype))
6157 ? UNITS_PER_WORD : 0);
6159 if (argadj)
6160 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6161 else
6162 offset = crtl->args.arg_offset_rtx;
6164 if (TARGET_64BIT)
6166 int i, off;
6168 /* Adjust for varargs/stdarg differences. */
6169 if (argadj)
6170 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6171 else
6172 offset = crtl->args.arg_offset_rtx;
6174 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6175 from the incoming arg pointer and growing to larger addresses. */
6176 for (i = 26, off = -64; i >= 19; i--, off += 8)
6177 emit_move_insn (gen_rtx_MEM (word_mode,
6178 plus_constant (Pmode,
6179 arg_pointer_rtx, off)),
6180 gen_rtx_REG (word_mode, i));
6182 /* The incoming args pointer points just beyond the flushback area;
6183 normally this is not a serious concern. However, when we are doing
6184 varargs/stdargs we want to make the arg pointer point to the start
6185 of the incoming argument area. */
6186 emit_move_insn (virtual_incoming_args_rtx,
6187 plus_constant (Pmode, arg_pointer_rtx, -64));
6189 /* Now return a pointer to the first anonymous argument. */
6190 return copy_to_reg (expand_binop (Pmode, add_optab,
6191 virtual_incoming_args_rtx,
6192 offset, 0, 0, OPTAB_LIB_WIDEN));
6195 /* Store general registers on the stack. */
6196 dest = gen_rtx_MEM (BLKmode,
6197 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6198 -16));
6199 set_mem_alias_set (dest, get_varargs_alias_set ());
6200 set_mem_align (dest, BITS_PER_WORD);
6201 move_block_from_reg (23, dest, 4);
6203 /* move_block_from_reg will emit code to store the argument registers
6204 individually as scalar stores.
6206 However, other insns may later load from the same addresses for
6207 a structure load (passing a struct to a varargs routine).
6209 The alias code assumes that such aliasing can never happen, so we
6210 have to keep memory referencing insns from moving up beyond the
6211 last argument register store. So we emit a blockage insn here. */
6212 emit_insn (gen_blockage ());
6214 return copy_to_reg (expand_binop (Pmode, add_optab,
6215 crtl->args.internal_arg_pointer,
6216 offset, 0, 0, OPTAB_LIB_WIDEN));
6219 static void
6220 hppa_va_start (tree valist, rtx nextarg)
6222 nextarg = expand_builtin_saveregs ();
6223 std_expand_builtin_va_start (valist, nextarg);
6226 static tree
6227 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6228 gimple_seq *post_p)
6230 if (TARGET_64BIT)
6232 /* Args grow upward. We can use the generic routines. */
6233 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6235 else /* !TARGET_64BIT */
6237 tree ptr = build_pointer_type (type);
6238 tree valist_type;
6239 tree t, u;
6240 unsigned int size, ofs;
6241 bool indirect;
6243 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6244 if (indirect)
6246 type = ptr;
6247 ptr = build_pointer_type (type);
6249 size = int_size_in_bytes (type);
6250 valist_type = TREE_TYPE (valist);
6252 /* Args grow down. Not handled by generic routines. */
6254 u = fold_convert (sizetype, size_in_bytes (type));
6255 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6256 t = fold_build_pointer_plus (valist, u);
6258 /* Align to 4 or 8 byte boundary depending on argument size. */
6260 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6261 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6262 t = fold_convert (valist_type, t);
6264 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6266 ofs = (8 - size) % 4;
6267 if (ofs != 0)
6268 t = fold_build_pointer_plus_hwi (t, ofs);
6270 t = fold_convert (ptr, t);
6271 t = build_va_arg_indirect_ref (t);
6273 if (indirect)
6274 t = build_va_arg_indirect_ref (t);
6276 return t;
6280 /* True if MODE is valid for the target. By "valid", we mean able to
6281 be manipulated in non-trivial ways. In particular, this means all
6282 the arithmetic is supported.
6284 Currently, TImode is not valid as the HP 64-bit runtime documentation
6285 doesn't document the alignment and calling conventions for this type.
6286 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6287 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6289 static bool
6290 pa_scalar_mode_supported_p (enum machine_mode mode)
6292 int precision = GET_MODE_PRECISION (mode);
6294 switch (GET_MODE_CLASS (mode))
6296 case MODE_PARTIAL_INT:
6297 case MODE_INT:
6298 if (precision == CHAR_TYPE_SIZE)
6299 return true;
6300 if (precision == SHORT_TYPE_SIZE)
6301 return true;
6302 if (precision == INT_TYPE_SIZE)
6303 return true;
6304 if (precision == LONG_TYPE_SIZE)
6305 return true;
6306 if (precision == LONG_LONG_TYPE_SIZE)
6307 return true;
6308 return false;
6310 case MODE_FLOAT:
6311 if (precision == FLOAT_TYPE_SIZE)
6312 return true;
6313 if (precision == DOUBLE_TYPE_SIZE)
6314 return true;
6315 if (precision == LONG_DOUBLE_TYPE_SIZE)
6316 return true;
6317 return false;
6319 case MODE_DECIMAL_FLOAT:
6320 return false;
6322 default:
6323 gcc_unreachable ();
6327 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6328 it branches into the delay slot. Otherwise, return FALSE. */
6330 static bool
6331 branch_to_delay_slot_p (rtx_insn *insn)
6333 rtx jump_insn;
6335 if (dbr_sequence_length ())
6336 return FALSE;
6338 jump_insn = next_active_insn (JUMP_LABEL (insn));
6339 while (insn)
6341 insn = next_active_insn (insn);
6342 if (jump_insn == insn)
6343 return TRUE;
6345 /* We can't rely on the length of asms. So, we return FALSE when
6346 the branch is followed by an asm. */
6347 if (!insn
6348 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6349 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6350 || get_attr_length (insn) > 0)
6351 break;
6354 return FALSE;
6357 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6359 This occurs when INSN has an unfilled delay slot and is followed
6360 by an asm. Disaster can occur if the asm is empty and the jump
6361 branches into the delay slot. So, we add a nop in the delay slot
6362 when this occurs. */
6364 static bool
6365 branch_needs_nop_p (rtx_insn *insn)
6367 rtx jump_insn;
6369 if (dbr_sequence_length ())
6370 return FALSE;
6372 jump_insn = next_active_insn (JUMP_LABEL (insn));
6373 while (insn)
6375 insn = next_active_insn (insn);
6376 if (!insn || jump_insn == insn)
6377 return TRUE;
6379 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6380 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6381 && get_attr_length (insn) > 0)
6382 break;
6385 return FALSE;
6388 /* Return TRUE if INSN, a forward jump insn, can use nullification
6389 to skip the following instruction. This avoids an extra cycle due
6390 to a mis-predicted branch when we fall through. */
6392 static bool
6393 use_skip_p (rtx_insn *insn)
6395 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6397 while (insn)
6399 insn = next_active_insn (insn);
6401 /* We can't rely on the length of asms, so we can't skip asms. */
6402 if (!insn
6403 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6404 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6405 break;
6406 if (get_attr_length (insn) == 4
6407 && jump_insn == next_active_insn (insn))
6408 return TRUE;
6409 if (get_attr_length (insn) > 0)
6410 break;
6413 return FALSE;
6416 /* This routine handles all the normal conditional branch sequences we
6417 might need to generate. It handles compare immediate vs compare
6418 register, nullification of delay slots, varying length branches,
6419 negated branches, and all combinations of the above. It returns the
6420 output appropriate to emit the branch corresponding to all given
6421 parameters. */
6423 const char *
6424 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6426 static char buf[100];
6427 bool useskip;
6428 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6429 int length = get_attr_length (insn);
6430 int xdelay;
6432 /* A conditional branch to the following instruction (e.g. the delay slot)
6433 is asking for a disaster. This can happen when not optimizing and
6434 when jump optimization fails.
6436 While it is usually safe to emit nothing, this can fail if the
6437 preceding instruction is a nullified branch with an empty delay
6438 slot and the same branch target as this branch. We could check
6439 for this but jump optimization should eliminate nop jumps. It
6440 is always safe to emit a nop. */
6441 if (branch_to_delay_slot_p (insn))
6442 return "nop";
6444 /* The doubleword form of the cmpib instruction doesn't have the LEU
6445 and GTU conditions while the cmpb instruction does. Since we accept
6446 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6447 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6448 operands[2] = gen_rtx_REG (DImode, 0);
6449 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6450 operands[1] = gen_rtx_REG (DImode, 0);
6452 /* If this is a long branch with its delay slot unfilled, set `nullify'
6453 as it can nullify the delay slot and save a nop. */
6454 if (length == 8 && dbr_sequence_length () == 0)
6455 nullify = 1;
6457 /* If this is a short forward conditional branch which did not get
6458 its delay slot filled, the delay slot can still be nullified. */
6459 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6460 nullify = forward_branch_p (insn);
6462 /* A forward branch over a single nullified insn can be done with a
6463 comclr instruction. This avoids a single cycle penalty due to
6464 mis-predicted branch if we fall through (branch not taken). */
6465 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6467 switch (length)
6469 /* All short conditional branches except backwards with an unfilled
6470 delay slot. */
6471 case 4:
6472 if (useskip)
6473 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6474 else
6475 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6476 if (GET_MODE (operands[1]) == DImode)
6477 strcat (buf, "*");
6478 if (negated)
6479 strcat (buf, "%B3");
6480 else
6481 strcat (buf, "%S3");
6482 if (useskip)
6483 strcat (buf, " %2,%r1,%%r0");
6484 else if (nullify)
6486 if (branch_needs_nop_p (insn))
6487 strcat (buf, ",n %2,%r1,%0%#");
6488 else
6489 strcat (buf, ",n %2,%r1,%0");
6491 else
6492 strcat (buf, " %2,%r1,%0");
6493 break;
6495 /* All long conditionals. Note a short backward branch with an
6496 unfilled delay slot is treated just like a long backward branch
6497 with an unfilled delay slot. */
6498 case 8:
6499 /* Handle weird backwards branch with a filled delay slot
6500 which is nullified. */
6501 if (dbr_sequence_length () != 0
6502 && ! forward_branch_p (insn)
6503 && nullify)
6505 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6506 if (GET_MODE (operands[1]) == DImode)
6507 strcat (buf, "*");
6508 if (negated)
6509 strcat (buf, "%S3");
6510 else
6511 strcat (buf, "%B3");
6512 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6514 /* Handle short backwards branch with an unfilled delay slot.
6515 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6516 taken and untaken branches. */
6517 else if (dbr_sequence_length () == 0
6518 && ! forward_branch_p (insn)
6519 && INSN_ADDRESSES_SET_P ()
6520 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6521 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6523 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6524 if (GET_MODE (operands[1]) == DImode)
6525 strcat (buf, "*");
6526 if (negated)
6527 strcat (buf, "%B3 %2,%r1,%0%#");
6528 else
6529 strcat (buf, "%S3 %2,%r1,%0%#");
6531 else
6533 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6534 if (GET_MODE (operands[1]) == DImode)
6535 strcat (buf, "*");
6536 if (negated)
6537 strcat (buf, "%S3");
6538 else
6539 strcat (buf, "%B3");
6540 if (nullify)
6541 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6542 else
6543 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6545 break;
6547 default:
6548 /* The reversed conditional branch must branch over one additional
6549 instruction if the delay slot is filled and needs to be extracted
6550 by pa_output_lbranch. If the delay slot is empty or this is a
6551 nullified forward branch, the instruction after the reversed
6552 condition branch must be nullified. */
6553 if (dbr_sequence_length () == 0
6554 || (nullify && forward_branch_p (insn)))
6556 nullify = 1;
6557 xdelay = 0;
6558 operands[4] = GEN_INT (length);
6560 else
6562 xdelay = 1;
6563 operands[4] = GEN_INT (length + 4);
6566 /* Create a reversed conditional branch which branches around
6567 the following insns. */
6568 if (GET_MODE (operands[1]) != DImode)
6570 if (nullify)
6572 if (negated)
6573 strcpy (buf,
6574 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6575 else
6576 strcpy (buf,
6577 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6579 else
6581 if (negated)
6582 strcpy (buf,
6583 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6584 else
6585 strcpy (buf,
6586 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6589 else
6591 if (nullify)
6593 if (negated)
6594 strcpy (buf,
6595 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6596 else
6597 strcpy (buf,
6598 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6600 else
6602 if (negated)
6603 strcpy (buf,
6604 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6605 else
6606 strcpy (buf,
6607 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6611 output_asm_insn (buf, operands);
6612 return pa_output_lbranch (operands[0], insn, xdelay);
6614 return buf;
6617 /* This routine handles output of long unconditional branches that
6618 exceed the maximum range of a simple branch instruction. Since
6619 we don't have a register available for the branch, we save register
6620 %r1 in the frame marker, load the branch destination DEST into %r1,
6621 execute the branch, and restore %r1 in the delay slot of the branch.
6623 Since long branches may have an insn in the delay slot and the
6624 delay slot is used to restore %r1, we in general need to extract
6625 this insn and execute it before the branch. However, to facilitate
6626 use of this function by conditional branches, we also provide an
6627 option to not extract the delay insn so that it will be emitted
6628 after the long branch. So, if there is an insn in the delay slot,
6629 it is extracted if XDELAY is nonzero.
6631 The lengths of the various long-branch sequences are 20, 16 and 24
6632 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6634 const char *
6635 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6637 rtx xoperands[2];
6639 xoperands[0] = dest;
6641 /* First, free up the delay slot. */
6642 if (xdelay && dbr_sequence_length () != 0)
6644 /* We can't handle a jump in the delay slot. */
6645 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6647 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6648 optimize, 0, NULL);
6650 /* Now delete the delay insn. */
6651 SET_INSN_DELETED (NEXT_INSN (insn));
6654 /* Output an insn to save %r1. The runtime documentation doesn't
6655 specify whether the "Clean Up" slot in the callers frame can
6656 be clobbered by the callee. It isn't copied by HP's builtin
6657 alloca, so this suggests that it can be clobbered if necessary.
6658 The "Static Link" location is copied by HP builtin alloca, so
6659 we avoid using it. Using the cleanup slot might be a problem
6660 if we have to interoperate with languages that pass cleanup
6661 information. However, it should be possible to handle these
6662 situations with GCC's asm feature.
6664 The "Current RP" slot is reserved for the called procedure, so
6665 we try to use it when we don't have a frame of our own. It's
6666 rather unlikely that we won't have a frame when we need to emit
6667 a very long branch.
6669 Really the way to go long term is a register scavenger; goto
6670 the target of the jump and find a register which we can use
6671 as a scratch to hold the value in %r1. Then, we wouldn't have
6672 to free up the delay slot or clobber a slot that may be needed
6673 for other purposes. */
6674 if (TARGET_64BIT)
6676 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6677 /* Use the return pointer slot in the frame marker. */
6678 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6679 else
6680 /* Use the slot at -40 in the frame marker since HP builtin
6681 alloca doesn't copy it. */
6682 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6684 else
6686 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6687 /* Use the return pointer slot in the frame marker. */
6688 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6689 else
6690 /* Use the "Clean Up" slot in the frame marker. In GCC,
6691 the only other use of this location is for copying a
6692 floating point double argument from a floating-point
6693 register to two general registers. The copy is done
6694 as an "atomic" operation when outputting a call, so it
6695 won't interfere with our using the location here. */
6696 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6699 if (TARGET_PORTABLE_RUNTIME)
6701 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6702 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6703 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6705 else if (flag_pic)
6707 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6708 if (TARGET_SOM || !TARGET_GAS)
6710 xoperands[1] = gen_label_rtx ();
6711 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6712 targetm.asm_out.internal_label (asm_out_file, "L",
6713 CODE_LABEL_NUMBER (xoperands[1]));
6714 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6716 else
6718 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6719 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6721 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6723 else
6724 /* Now output a very long branch to the original target. */
6725 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6727 /* Now restore the value of %r1 in the delay slot. */
6728 if (TARGET_64BIT)
6730 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6731 return "ldd -16(%%r30),%%r1";
6732 else
6733 return "ldd -40(%%r30),%%r1";
6735 else
6737 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6738 return "ldw -20(%%r30),%%r1";
6739 else
6740 return "ldw -12(%%r30),%%r1";
6744 /* This routine handles all the branch-on-bit conditional branch sequences we
6745 might need to generate. It handles nullification of delay slots,
6746 varying length branches, negated branches and all combinations of the
6747 above. it returns the appropriate output template to emit the branch. */
6749 const char *
6750 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6752 static char buf[100];
6753 bool useskip;
6754 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6755 int length = get_attr_length (insn);
6756 int xdelay;
6758 /* A conditional branch to the following instruction (e.g. the delay slot) is
6759 asking for a disaster. I do not think this can happen as this pattern
6760 is only used when optimizing; jump optimization should eliminate the
6761 jump. But be prepared just in case. */
6763 if (branch_to_delay_slot_p (insn))
6764 return "nop";
6766 /* If this is a long branch with its delay slot unfilled, set `nullify'
6767 as it can nullify the delay slot and save a nop. */
6768 if (length == 8 && dbr_sequence_length () == 0)
6769 nullify = 1;
6771 /* If this is a short forward conditional branch which did not get
6772 its delay slot filled, the delay slot can still be nullified. */
6773 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6774 nullify = forward_branch_p (insn);
6776 /* A forward branch over a single nullified insn can be done with a
6777 extrs instruction. This avoids a single cycle penalty due to
6778 mis-predicted branch if we fall through (branch not taken). */
6779 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6781 switch (length)
6784 /* All short conditional branches except backwards with an unfilled
6785 delay slot. */
6786 case 4:
6787 if (useskip)
6788 strcpy (buf, "{extrs,|extrw,s,}");
6789 else
6790 strcpy (buf, "bb,");
6791 if (useskip && GET_MODE (operands[0]) == DImode)
6792 strcpy (buf, "extrd,s,*");
6793 else if (GET_MODE (operands[0]) == DImode)
6794 strcpy (buf, "bb,*");
6795 if ((which == 0 && negated)
6796 || (which == 1 && ! negated))
6797 strcat (buf, ">=");
6798 else
6799 strcat (buf, "<");
6800 if (useskip)
6801 strcat (buf, " %0,%1,1,%%r0");
6802 else if (nullify && negated)
6804 if (branch_needs_nop_p (insn))
6805 strcat (buf, ",n %0,%1,%3%#");
6806 else
6807 strcat (buf, ",n %0,%1,%3");
6809 else if (nullify && ! negated)
6811 if (branch_needs_nop_p (insn))
6812 strcat (buf, ",n %0,%1,%2%#");
6813 else
6814 strcat (buf, ",n %0,%1,%2");
6816 else if (! nullify && negated)
6817 strcat (buf, " %0,%1,%3");
6818 else if (! nullify && ! negated)
6819 strcat (buf, " %0,%1,%2");
6820 break;
6822 /* All long conditionals. Note a short backward branch with an
6823 unfilled delay slot is treated just like a long backward branch
6824 with an unfilled delay slot. */
6825 case 8:
6826 /* Handle weird backwards branch with a filled delay slot
6827 which is nullified. */
6828 if (dbr_sequence_length () != 0
6829 && ! forward_branch_p (insn)
6830 && nullify)
6832 strcpy (buf, "bb,");
6833 if (GET_MODE (operands[0]) == DImode)
6834 strcat (buf, "*");
6835 if ((which == 0 && negated)
6836 || (which == 1 && ! negated))
6837 strcat (buf, "<");
6838 else
6839 strcat (buf, ">=");
6840 if (negated)
6841 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6842 else
6843 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6845 /* Handle short backwards branch with an unfilled delay slot.
6846 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6847 taken and untaken branches. */
6848 else if (dbr_sequence_length () == 0
6849 && ! forward_branch_p (insn)
6850 && INSN_ADDRESSES_SET_P ()
6851 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6852 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6854 strcpy (buf, "bb,");
6855 if (GET_MODE (operands[0]) == DImode)
6856 strcat (buf, "*");
6857 if ((which == 0 && negated)
6858 || (which == 1 && ! negated))
6859 strcat (buf, ">=");
6860 else
6861 strcat (buf, "<");
6862 if (negated)
6863 strcat (buf, " %0,%1,%3%#");
6864 else
6865 strcat (buf, " %0,%1,%2%#");
6867 else
6869 if (GET_MODE (operands[0]) == DImode)
6870 strcpy (buf, "extrd,s,*");
6871 else
6872 strcpy (buf, "{extrs,|extrw,s,}");
6873 if ((which == 0 && negated)
6874 || (which == 1 && ! negated))
6875 strcat (buf, "<");
6876 else
6877 strcat (buf, ">=");
6878 if (nullify && negated)
6879 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6880 else if (nullify && ! negated)
6881 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6882 else if (negated)
6883 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6884 else
6885 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6887 break;
6889 default:
6890 /* The reversed conditional branch must branch over one additional
6891 instruction if the delay slot is filled and needs to be extracted
6892 by pa_output_lbranch. If the delay slot is empty or this is a
6893 nullified forward branch, the instruction after the reversed
6894 condition branch must be nullified. */
6895 if (dbr_sequence_length () == 0
6896 || (nullify && forward_branch_p (insn)))
6898 nullify = 1;
6899 xdelay = 0;
6900 operands[4] = GEN_INT (length);
6902 else
6904 xdelay = 1;
6905 operands[4] = GEN_INT (length + 4);
6908 if (GET_MODE (operands[0]) == DImode)
6909 strcpy (buf, "bb,*");
6910 else
6911 strcpy (buf, "bb,");
6912 if ((which == 0 && negated)
6913 || (which == 1 && !negated))
6914 strcat (buf, "<");
6915 else
6916 strcat (buf, ">=");
6917 if (nullify)
6918 strcat (buf, ",n %0,%1,.+%4");
6919 else
6920 strcat (buf, " %0,%1,.+%4");
6921 output_asm_insn (buf, operands);
6922 return pa_output_lbranch (negated ? operands[3] : operands[2],
6923 insn, xdelay);
6925 return buf;
6928 /* This routine handles all the branch-on-variable-bit conditional branch
6929 sequences we might need to generate. It handles nullification of delay
6930 slots, varying length branches, negated branches and all combinations
6931 of the above. it returns the appropriate output template to emit the
6932 branch. */
6934 const char *
6935 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
6936 int which)
6938 static char buf[100];
6939 bool useskip;
6940 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6941 int length = get_attr_length (insn);
6942 int xdelay;
6944 /* A conditional branch to the following instruction (e.g. the delay slot) is
6945 asking for a disaster. I do not think this can happen as this pattern
6946 is only used when optimizing; jump optimization should eliminate the
6947 jump. But be prepared just in case. */
6949 if (branch_to_delay_slot_p (insn))
6950 return "nop";
6952 /* If this is a long branch with its delay slot unfilled, set `nullify'
6953 as it can nullify the delay slot and save a nop. */
6954 if (length == 8 && dbr_sequence_length () == 0)
6955 nullify = 1;
6957 /* If this is a short forward conditional branch which did not get
6958 its delay slot filled, the delay slot can still be nullified. */
6959 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6960 nullify = forward_branch_p (insn);
6962 /* A forward branch over a single nullified insn can be done with a
6963 extrs instruction. This avoids a single cycle penalty due to
6964 mis-predicted branch if we fall through (branch not taken). */
6965 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6967 switch (length)
6970 /* All short conditional branches except backwards with an unfilled
6971 delay slot. */
6972 case 4:
6973 if (useskip)
6974 strcpy (buf, "{vextrs,|extrw,s,}");
6975 else
6976 strcpy (buf, "{bvb,|bb,}");
6977 if (useskip && GET_MODE (operands[0]) == DImode)
6978 strcpy (buf, "extrd,s,*");
6979 else if (GET_MODE (operands[0]) == DImode)
6980 strcpy (buf, "bb,*");
6981 if ((which == 0 && negated)
6982 || (which == 1 && ! negated))
6983 strcat (buf, ">=");
6984 else
6985 strcat (buf, "<");
6986 if (useskip)
6987 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6988 else if (nullify && negated)
6990 if (branch_needs_nop_p (insn))
6991 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6992 else
6993 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6995 else if (nullify && ! negated)
6997 if (branch_needs_nop_p (insn))
6998 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6999 else
7000 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7002 else if (! nullify && negated)
7003 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7004 else if (! nullify && ! negated)
7005 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7006 break;
7008 /* All long conditionals. Note a short backward branch with an
7009 unfilled delay slot is treated just like a long backward branch
7010 with an unfilled delay slot. */
7011 case 8:
7012 /* Handle weird backwards branch with a filled delay slot
7013 which is nullified. */
7014 if (dbr_sequence_length () != 0
7015 && ! forward_branch_p (insn)
7016 && nullify)
7018 strcpy (buf, "{bvb,|bb,}");
7019 if (GET_MODE (operands[0]) == DImode)
7020 strcat (buf, "*");
7021 if ((which == 0 && negated)
7022 || (which == 1 && ! negated))
7023 strcat (buf, "<");
7024 else
7025 strcat (buf, ">=");
7026 if (negated)
7027 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7028 else
7029 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7031 /* Handle short backwards branch with an unfilled delay slot.
7032 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7033 taken and untaken branches. */
7034 else if (dbr_sequence_length () == 0
7035 && ! forward_branch_p (insn)
7036 && INSN_ADDRESSES_SET_P ()
7037 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7038 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7040 strcpy (buf, "{bvb,|bb,}");
7041 if (GET_MODE (operands[0]) == DImode)
7042 strcat (buf, "*");
7043 if ((which == 0 && negated)
7044 || (which == 1 && ! negated))
7045 strcat (buf, ">=");
7046 else
7047 strcat (buf, "<");
7048 if (negated)
7049 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7050 else
7051 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7053 else
7055 strcpy (buf, "{vextrs,|extrw,s,}");
7056 if (GET_MODE (operands[0]) == DImode)
7057 strcpy (buf, "extrd,s,*");
7058 if ((which == 0 && negated)
7059 || (which == 1 && ! negated))
7060 strcat (buf, "<");
7061 else
7062 strcat (buf, ">=");
7063 if (nullify && negated)
7064 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7065 else if (nullify && ! negated)
7066 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7067 else if (negated)
7068 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7069 else
7070 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7072 break;
7074 default:
7075 /* The reversed conditional branch must branch over one additional
7076 instruction if the delay slot is filled and needs to be extracted
7077 by pa_output_lbranch. If the delay slot is empty or this is a
7078 nullified forward branch, the instruction after the reversed
7079 condition branch must be nullified. */
7080 if (dbr_sequence_length () == 0
7081 || (nullify && forward_branch_p (insn)))
7083 nullify = 1;
7084 xdelay = 0;
7085 operands[4] = GEN_INT (length);
7087 else
7089 xdelay = 1;
7090 operands[4] = GEN_INT (length + 4);
7093 if (GET_MODE (operands[0]) == DImode)
7094 strcpy (buf, "bb,*");
7095 else
7096 strcpy (buf, "{bvb,|bb,}");
7097 if ((which == 0 && negated)
7098 || (which == 1 && !negated))
7099 strcat (buf, "<");
7100 else
7101 strcat (buf, ">=");
7102 if (nullify)
7103 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7104 else
7105 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7106 output_asm_insn (buf, operands);
7107 return pa_output_lbranch (negated ? operands[3] : operands[2],
7108 insn, xdelay);
7110 return buf;
7113 /* Return the output template for emitting a dbra type insn.
7115 Note it may perform some output operations on its own before
7116 returning the final output string. */
7117 const char *
7118 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7120 int length = get_attr_length (insn);
7122 /* A conditional branch to the following instruction (e.g. the delay slot) is
7123 asking for a disaster. Be prepared! */
7125 if (branch_to_delay_slot_p (insn))
7127 if (which_alternative == 0)
7128 return "ldo %1(%0),%0";
7129 else if (which_alternative == 1)
7131 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7132 output_asm_insn ("ldw -16(%%r30),%4", operands);
7133 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7134 return "{fldws|fldw} -16(%%r30),%0";
7136 else
7138 output_asm_insn ("ldw %0,%4", operands);
7139 return "ldo %1(%4),%4\n\tstw %4,%0";
7143 if (which_alternative == 0)
7145 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7146 int xdelay;
7148 /* If this is a long branch with its delay slot unfilled, set `nullify'
7149 as it can nullify the delay slot and save a nop. */
7150 if (length == 8 && dbr_sequence_length () == 0)
7151 nullify = 1;
7153 /* If this is a short forward conditional branch which did not get
7154 its delay slot filled, the delay slot can still be nullified. */
7155 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7156 nullify = forward_branch_p (insn);
7158 switch (length)
7160 case 4:
7161 if (nullify)
7163 if (branch_needs_nop_p (insn))
7164 return "addib,%C2,n %1,%0,%3%#";
7165 else
7166 return "addib,%C2,n %1,%0,%3";
7168 else
7169 return "addib,%C2 %1,%0,%3";
7171 case 8:
7172 /* Handle weird backwards branch with a fulled delay slot
7173 which is nullified. */
7174 if (dbr_sequence_length () != 0
7175 && ! forward_branch_p (insn)
7176 && nullify)
7177 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7178 /* Handle short backwards branch with an unfilled delay slot.
7179 Using a addb;nop rather than addi;bl saves 1 cycle for both
7180 taken and untaken branches. */
7181 else if (dbr_sequence_length () == 0
7182 && ! forward_branch_p (insn)
7183 && INSN_ADDRESSES_SET_P ()
7184 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7185 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7186 return "addib,%C2 %1,%0,%3%#";
7188 /* Handle normal cases. */
7189 if (nullify)
7190 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7191 else
7192 return "addi,%N2 %1,%0,%0\n\tb %3";
7194 default:
7195 /* The reversed conditional branch must branch over one additional
7196 instruction if the delay slot is filled and needs to be extracted
7197 by pa_output_lbranch. If the delay slot is empty or this is a
7198 nullified forward branch, the instruction after the reversed
7199 condition branch must be nullified. */
7200 if (dbr_sequence_length () == 0
7201 || (nullify && forward_branch_p (insn)))
7203 nullify = 1;
7204 xdelay = 0;
7205 operands[4] = GEN_INT (length);
7207 else
7209 xdelay = 1;
7210 operands[4] = GEN_INT (length + 4);
7213 if (nullify)
7214 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7215 else
7216 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7218 return pa_output_lbranch (operands[3], insn, xdelay);
7222 /* Deal with gross reload from FP register case. */
7223 else if (which_alternative == 1)
7225 /* Move loop counter from FP register to MEM then into a GR,
7226 increment the GR, store the GR into MEM, and finally reload
7227 the FP register from MEM from within the branch's delay slot. */
7228 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7229 operands);
7230 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7231 if (length == 24)
7232 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7233 else if (length == 28)
7234 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7235 else
7237 operands[5] = GEN_INT (length - 16);
7238 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7239 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7240 return pa_output_lbranch (operands[3], insn, 0);
7243 /* Deal with gross reload from memory case. */
7244 else
7246 /* Reload loop counter from memory, the store back to memory
7247 happens in the branch's delay slot. */
7248 output_asm_insn ("ldw %0,%4", operands);
7249 if (length == 12)
7250 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7251 else if (length == 16)
7252 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7253 else
7255 operands[5] = GEN_INT (length - 4);
7256 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7257 return pa_output_lbranch (operands[3], insn, 0);
7262 /* Return the output template for emitting a movb type insn.
7264 Note it may perform some output operations on its own before
7265 returning the final output string. */
7266 const char *
7267 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7268 int reverse_comparison)
7270 int length = get_attr_length (insn);
7272 /* A conditional branch to the following instruction (e.g. the delay slot) is
7273 asking for a disaster. Be prepared! */
7275 if (branch_to_delay_slot_p (insn))
7277 if (which_alternative == 0)
7278 return "copy %1,%0";
7279 else if (which_alternative == 1)
7281 output_asm_insn ("stw %1,-16(%%r30)", operands);
7282 return "{fldws|fldw} -16(%%r30),%0";
7284 else if (which_alternative == 2)
7285 return "stw %1,%0";
7286 else
7287 return "mtsar %r1";
7290 /* Support the second variant. */
7291 if (reverse_comparison)
7292 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7294 if (which_alternative == 0)
7296 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7297 int xdelay;
7299 /* If this is a long branch with its delay slot unfilled, set `nullify'
7300 as it can nullify the delay slot and save a nop. */
7301 if (length == 8 && dbr_sequence_length () == 0)
7302 nullify = 1;
7304 /* If this is a short forward conditional branch which did not get
7305 its delay slot filled, the delay slot can still be nullified. */
7306 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7307 nullify = forward_branch_p (insn);
7309 switch (length)
7311 case 4:
7312 if (nullify)
7314 if (branch_needs_nop_p (insn))
7315 return "movb,%C2,n %1,%0,%3%#";
7316 else
7317 return "movb,%C2,n %1,%0,%3";
7319 else
7320 return "movb,%C2 %1,%0,%3";
7322 case 8:
7323 /* Handle weird backwards branch with a filled delay slot
7324 which is nullified. */
7325 if (dbr_sequence_length () != 0
7326 && ! forward_branch_p (insn)
7327 && nullify)
7328 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7330 /* Handle short backwards branch with an unfilled delay slot.
7331 Using a movb;nop rather than or;bl saves 1 cycle for both
7332 taken and untaken branches. */
7333 else if (dbr_sequence_length () == 0
7334 && ! forward_branch_p (insn)
7335 && INSN_ADDRESSES_SET_P ()
7336 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7337 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7338 return "movb,%C2 %1,%0,%3%#";
7339 /* Handle normal cases. */
7340 if (nullify)
7341 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7342 else
7343 return "or,%N2 %1,%%r0,%0\n\tb %3";
7345 default:
7346 /* The reversed conditional branch must branch over one additional
7347 instruction if the delay slot is filled and needs to be extracted
7348 by pa_output_lbranch. If the delay slot is empty or this is a
7349 nullified forward branch, the instruction after the reversed
7350 condition branch must be nullified. */
7351 if (dbr_sequence_length () == 0
7352 || (nullify && forward_branch_p (insn)))
7354 nullify = 1;
7355 xdelay = 0;
7356 operands[4] = GEN_INT (length);
7358 else
7360 xdelay = 1;
7361 operands[4] = GEN_INT (length + 4);
7364 if (nullify)
7365 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7366 else
7367 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7369 return pa_output_lbranch (operands[3], insn, xdelay);
7372 /* Deal with gross reload for FP destination register case. */
7373 else if (which_alternative == 1)
7375 /* Move source register to MEM, perform the branch test, then
7376 finally load the FP register from MEM from within the branch's
7377 delay slot. */
7378 output_asm_insn ("stw %1,-16(%%r30)", operands);
7379 if (length == 12)
7380 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7381 else if (length == 16)
7382 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7383 else
7385 operands[4] = GEN_INT (length - 4);
7386 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7387 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7388 return pa_output_lbranch (operands[3], insn, 0);
7391 /* Deal with gross reload from memory case. */
7392 else if (which_alternative == 2)
7394 /* Reload loop counter from memory, the store back to memory
7395 happens in the branch's delay slot. */
7396 if (length == 8)
7397 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7398 else if (length == 12)
7399 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7400 else
7402 operands[4] = GEN_INT (length);
7403 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7404 operands);
7405 return pa_output_lbranch (operands[3], insn, 0);
7408 /* Handle SAR as a destination. */
7409 else
7411 if (length == 8)
7412 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7413 else if (length == 12)
7414 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7415 else
7417 operands[4] = GEN_INT (length);
7418 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7419 operands);
7420 return pa_output_lbranch (operands[3], insn, 0);
7425 /* Copy any FP arguments in INSN into integer registers. */
7426 static void
7427 copy_fp_args (rtx insn)
7429 rtx link;
7430 rtx xoperands[2];
7432 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7434 int arg_mode, regno;
7435 rtx use = XEXP (link, 0);
7437 if (! (GET_CODE (use) == USE
7438 && GET_CODE (XEXP (use, 0)) == REG
7439 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7440 continue;
7442 arg_mode = GET_MODE (XEXP (use, 0));
7443 regno = REGNO (XEXP (use, 0));
7445 /* Is it a floating point register? */
7446 if (regno >= 32 && regno <= 39)
7448 /* Copy the FP register into an integer register via memory. */
7449 if (arg_mode == SFmode)
7451 xoperands[0] = XEXP (use, 0);
7452 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7453 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7454 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7456 else
7458 xoperands[0] = XEXP (use, 0);
7459 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7460 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7461 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7462 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7468 /* Compute length of the FP argument copy sequence for INSN. */
7469 static int
7470 length_fp_args (rtx insn)
7472 int length = 0;
7473 rtx link;
7475 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7477 int arg_mode, regno;
7478 rtx use = XEXP (link, 0);
7480 if (! (GET_CODE (use) == USE
7481 && GET_CODE (XEXP (use, 0)) == REG
7482 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7483 continue;
7485 arg_mode = GET_MODE (XEXP (use, 0));
7486 regno = REGNO (XEXP (use, 0));
7488 /* Is it a floating point register? */
7489 if (regno >= 32 && regno <= 39)
7491 if (arg_mode == SFmode)
7492 length += 8;
7493 else
7494 length += 12;
7498 return length;
7501 /* Return the attribute length for the millicode call instruction INSN.
7502 The length must match the code generated by pa_output_millicode_call.
7503 We include the delay slot in the returned length as it is better to
7504 over estimate the length than to under estimate it. */
7507 pa_attr_length_millicode_call (rtx_insn *insn)
7509 unsigned long distance = -1;
7510 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7512 if (INSN_ADDRESSES_SET_P ())
7514 distance = (total + insn_current_reference_address (insn));
7515 if (distance < total)
7516 distance = -1;
7519 if (TARGET_64BIT)
7521 if (!TARGET_LONG_CALLS && distance < 7600000)
7522 return 8;
7524 return 20;
7526 else if (TARGET_PORTABLE_RUNTIME)
7527 return 24;
7528 else
7530 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7531 return 8;
7533 if (!flag_pic)
7534 return 12;
7536 return 24;
7540 /* INSN is a function call. It may have an unconditional jump
7541 in its delay slot.
7543 CALL_DEST is the routine we are calling. */
7545 const char *
7546 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7548 int attr_length = get_attr_length (insn);
7549 int seq_length = dbr_sequence_length ();
7550 int distance;
7551 rtx seq_insn;
7552 rtx xoperands[3];
7554 xoperands[0] = call_dest;
7555 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7557 /* Handle the common case where we are sure that the branch will
7558 reach the beginning of the $CODE$ subspace. The within reach
7559 form of the $$sh_func_adrs call has a length of 28. Because it
7560 has an attribute type of sh_func_adrs, it never has a nonzero
7561 sequence length (i.e., the delay slot is never filled). */
7562 if (!TARGET_LONG_CALLS
7563 && (attr_length == 8
7564 || (attr_length == 28
7565 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7567 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7569 else
7571 if (TARGET_64BIT)
7573 /* It might seem that one insn could be saved by accessing
7574 the millicode function using the linkage table. However,
7575 this doesn't work in shared libraries and other dynamically
7576 loaded objects. Using a pc-relative sequence also avoids
7577 problems related to the implicit use of the gp register. */
7578 output_asm_insn ("b,l .+8,%%r1", xoperands);
7580 if (TARGET_GAS)
7582 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7583 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7585 else
7587 xoperands[1] = gen_label_rtx ();
7588 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7589 targetm.asm_out.internal_label (asm_out_file, "L",
7590 CODE_LABEL_NUMBER (xoperands[1]));
7591 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7594 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7596 else if (TARGET_PORTABLE_RUNTIME)
7598 /* Pure portable runtime doesn't allow be/ble; we also don't
7599 have PIC support in the assembler/linker, so this sequence
7600 is needed. */
7602 /* Get the address of our target into %r1. */
7603 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7604 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7606 /* Get our return address into %r31. */
7607 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7608 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7610 /* Jump to our target address in %r1. */
7611 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7613 else if (!flag_pic)
7615 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7616 if (TARGET_PA_20)
7617 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7618 else
7619 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7621 else
7623 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7624 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7626 if (TARGET_SOM || !TARGET_GAS)
7628 /* The HP assembler can generate relocations for the
7629 difference of two symbols. GAS can do this for a
7630 millicode symbol but not an arbitrary external
7631 symbol when generating SOM output. */
7632 xoperands[1] = gen_label_rtx ();
7633 targetm.asm_out.internal_label (asm_out_file, "L",
7634 CODE_LABEL_NUMBER (xoperands[1]));
7635 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7636 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7638 else
7640 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7641 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7642 xoperands);
7645 /* Jump to our target address in %r1. */
7646 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7650 if (seq_length == 0)
7651 output_asm_insn ("nop", xoperands);
7653 /* We are done if there isn't a jump in the delay slot. */
7654 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7655 return "";
7657 /* This call has an unconditional jump in its delay slot. */
7658 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7660 /* See if the return address can be adjusted. Use the containing
7661 sequence insn's address. */
7662 if (INSN_ADDRESSES_SET_P ())
7664 seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
7665 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7666 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7668 if (VAL_14_BITS_P (distance))
7670 xoperands[1] = gen_label_rtx ();
7671 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7672 targetm.asm_out.internal_label (asm_out_file, "L",
7673 CODE_LABEL_NUMBER (xoperands[1]));
7675 else
7676 /* ??? This branch may not reach its target. */
7677 output_asm_insn ("nop\n\tb,n %0", xoperands);
7679 else
7680 /* ??? This branch may not reach its target. */
7681 output_asm_insn ("nop\n\tb,n %0", xoperands);
7683 /* Delete the jump. */
7684 SET_INSN_DELETED (NEXT_INSN (insn));
7686 return "";
7689 /* Return the attribute length of the call instruction INSN. The SIBCALL
7690 flag indicates whether INSN is a regular call or a sibling call. The
7691 length returned must be longer than the code actually generated by
7692 pa_output_call. Since branch shortening is done before delay branch
7693 sequencing, there is no way to determine whether or not the delay
7694 slot will be filled during branch shortening. Even when the delay
7695 slot is filled, we may have to add a nop if the delay slot contains
7696 a branch that can't reach its target. Thus, we always have to include
7697 the delay slot in the length estimate. This used to be done in
7698 pa_adjust_insn_length but we do it here now as some sequences always
7699 fill the delay slot and we can save four bytes in the estimate for
7700 these sequences. */
7703 pa_attr_length_call (rtx_insn *insn, int sibcall)
7705 int local_call;
7706 rtx call, call_dest;
7707 tree call_decl;
7708 int length = 0;
7709 rtx pat = PATTERN (insn);
7710 unsigned long distance = -1;
7712 gcc_assert (CALL_P (insn));
7714 if (INSN_ADDRESSES_SET_P ())
7716 unsigned long total;
7718 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7719 distance = (total + insn_current_reference_address (insn));
7720 if (distance < total)
7721 distance = -1;
7724 gcc_assert (GET_CODE (pat) == PARALLEL);
7726 /* Get the call rtx. */
7727 call = XVECEXP (pat, 0, 0);
7728 if (GET_CODE (call) == SET)
7729 call = SET_SRC (call);
7731 gcc_assert (GET_CODE (call) == CALL);
7733 /* Determine if this is a local call. */
7734 call_dest = XEXP (XEXP (call, 0), 0);
7735 call_decl = SYMBOL_REF_DECL (call_dest);
7736 local_call = call_decl && targetm.binds_local_p (call_decl);
7738 /* pc-relative branch. */
7739 if (!TARGET_LONG_CALLS
7740 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7741 || distance < MAX_PCREL17F_OFFSET))
7742 length += 8;
7744 /* 64-bit plabel sequence. */
7745 else if (TARGET_64BIT && !local_call)
7746 length += sibcall ? 28 : 24;
7748 /* non-pic long absolute branch sequence. */
7749 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7750 length += 12;
7752 /* long pc-relative branch sequence. */
7753 else if (TARGET_LONG_PIC_SDIFF_CALL
7754 || (TARGET_GAS && !TARGET_SOM
7755 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7757 length += 20;
7759 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7760 length += 8;
7763 /* 32-bit plabel sequence. */
7764 else
7766 length += 32;
7768 if (TARGET_SOM)
7769 length += length_fp_args (insn);
7771 if (flag_pic)
7772 length += 4;
7774 if (!TARGET_PA_20)
7776 if (!sibcall)
7777 length += 8;
7779 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7780 length += 8;
7784 return length;
7787 /* INSN is a function call. It may have an unconditional jump
7788 in its delay slot.
7790 CALL_DEST is the routine we are calling. */
7792 const char *
7793 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7795 int delay_insn_deleted = 0;
7796 int delay_slot_filled = 0;
7797 int seq_length = dbr_sequence_length ();
7798 tree call_decl = SYMBOL_REF_DECL (call_dest);
7799 int local_call = call_decl && targetm.binds_local_p (call_decl);
7800 rtx xoperands[2];
7802 xoperands[0] = call_dest;
7804 /* Handle the common case where we're sure that the branch will reach
7805 the beginning of the "$CODE$" subspace. This is the beginning of
7806 the current function if we are in a named section. */
7807 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7809 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7810 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7812 else
7814 if (TARGET_64BIT && !local_call)
7816 /* ??? As far as I can tell, the HP linker doesn't support the
7817 long pc-relative sequence described in the 64-bit runtime
7818 architecture. So, we use a slightly longer indirect call. */
7819 xoperands[0] = pa_get_deferred_plabel (call_dest);
7820 xoperands[1] = gen_label_rtx ();
7822 /* If this isn't a sibcall, we put the load of %r27 into the
7823 delay slot. We can't do this in a sibcall as we don't
7824 have a second call-clobbered scratch register available. */
7825 if (seq_length != 0
7826 && ! JUMP_P (NEXT_INSN (insn))
7827 && !sibcall)
7829 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7830 optimize, 0, NULL);
7832 /* Now delete the delay insn. */
7833 SET_INSN_DELETED (NEXT_INSN (insn));
7834 delay_insn_deleted = 1;
7837 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7838 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7839 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7841 if (sibcall)
7843 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7844 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7845 output_asm_insn ("bve (%%r1)", xoperands);
7847 else
7849 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7850 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7851 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7852 delay_slot_filled = 1;
7855 else
7857 int indirect_call = 0;
7859 /* Emit a long call. There are several different sequences
7860 of increasing length and complexity. In most cases,
7861 they don't allow an instruction in the delay slot. */
7862 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7863 && !TARGET_LONG_PIC_SDIFF_CALL
7864 && !(TARGET_GAS && !TARGET_SOM
7865 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7866 && !TARGET_64BIT)
7867 indirect_call = 1;
7869 if (seq_length != 0
7870 && ! JUMP_P (NEXT_INSN (insn))
7871 && !sibcall
7872 && (!TARGET_PA_20
7873 || indirect_call
7874 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7876 /* A non-jump insn in the delay slot. By definition we can
7877 emit this insn before the call (and in fact before argument
7878 relocating. */
7879 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7880 NULL);
7882 /* Now delete the delay insn. */
7883 SET_INSN_DELETED (NEXT_INSN (insn));
7884 delay_insn_deleted = 1;
7887 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7889 /* This is the best sequence for making long calls in
7890 non-pic code. Unfortunately, GNU ld doesn't provide
7891 the stub needed for external calls, and GAS's support
7892 for this with the SOM linker is buggy. It is safe
7893 to use this for local calls. */
7894 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7895 if (sibcall)
7896 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7897 else
7899 if (TARGET_PA_20)
7900 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7901 xoperands);
7902 else
7903 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7905 output_asm_insn ("copy %%r31,%%r2", xoperands);
7906 delay_slot_filled = 1;
7909 else
7911 if (TARGET_LONG_PIC_SDIFF_CALL)
7913 /* The HP assembler and linker can handle relocations
7914 for the difference of two symbols. The HP assembler
7915 recognizes the sequence as a pc-relative call and
7916 the linker provides stubs when needed. */
7917 xoperands[1] = gen_label_rtx ();
7918 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7919 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7920 targetm.asm_out.internal_label (asm_out_file, "L",
7921 CODE_LABEL_NUMBER (xoperands[1]));
7922 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7924 else if (TARGET_GAS && !TARGET_SOM
7925 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7927 /* GAS currently can't generate the relocations that
7928 are needed for the SOM linker under HP-UX using this
7929 sequence. The GNU linker doesn't generate the stubs
7930 that are needed for external calls on TARGET_ELF32
7931 with this sequence. For now, we have to use a
7932 longer plabel sequence when using GAS. */
7933 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7934 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7935 xoperands);
7936 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7937 xoperands);
7939 else
7941 /* Emit a long plabel-based call sequence. This is
7942 essentially an inline implementation of $$dyncall.
7943 We don't actually try to call $$dyncall as this is
7944 as difficult as calling the function itself. */
7945 xoperands[0] = pa_get_deferred_plabel (call_dest);
7946 xoperands[1] = gen_label_rtx ();
7948 /* Since the call is indirect, FP arguments in registers
7949 need to be copied to the general registers. Then, the
7950 argument relocation stub will copy them back. */
7951 if (TARGET_SOM)
7952 copy_fp_args (insn);
7954 if (flag_pic)
7956 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7957 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7958 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7960 else
7962 output_asm_insn ("addil LR'%0-$global$,%%r27",
7963 xoperands);
7964 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7965 xoperands);
7968 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7969 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7970 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7971 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7973 if (!sibcall && !TARGET_PA_20)
7975 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7976 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7977 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7978 else
7979 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7983 if (TARGET_PA_20)
7985 if (sibcall)
7986 output_asm_insn ("bve (%%r1)", xoperands);
7987 else
7989 if (indirect_call)
7991 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7992 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7993 delay_slot_filled = 1;
7995 else
7996 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7999 else
8001 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8002 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8003 xoperands);
8005 if (sibcall)
8007 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8008 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8009 else
8010 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8012 else
8014 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8015 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8016 else
8017 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8019 if (indirect_call)
8020 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8021 else
8022 output_asm_insn ("copy %%r31,%%r2", xoperands);
8023 delay_slot_filled = 1;
8030 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8031 output_asm_insn ("nop", xoperands);
8033 /* We are done if there isn't a jump in the delay slot. */
8034 if (seq_length == 0
8035 || delay_insn_deleted
8036 || ! JUMP_P (NEXT_INSN (insn)))
8037 return "";
8039 /* A sibcall should never have a branch in the delay slot. */
8040 gcc_assert (!sibcall);
8042 /* This call has an unconditional jump in its delay slot. */
8043 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8045 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8047 /* See if the return address can be adjusted. Use the containing
8048 sequence insn's address. This would break the regular call/return@
8049 relationship assumed by the table based eh unwinder, so only do that
8050 if the call is not possibly throwing. */
8051 rtx seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
8052 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8053 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8055 if (VAL_14_BITS_P (distance)
8056 && !(can_throw_internal (insn) || can_throw_external (insn)))
8058 xoperands[1] = gen_label_rtx ();
8059 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8060 targetm.asm_out.internal_label (asm_out_file, "L",
8061 CODE_LABEL_NUMBER (xoperands[1]));
8063 else
8064 output_asm_insn ("nop\n\tb,n %0", xoperands);
8066 else
8067 output_asm_insn ("b,n %0", xoperands);
8069 /* Delete the jump. */
8070 SET_INSN_DELETED (NEXT_INSN (insn));
8072 return "";
8075 /* Return the attribute length of the indirect call instruction INSN.
8076 The length must match the code generated by output_indirect call.
8077 The returned length includes the delay slot. Currently, the delay
8078 slot of an indirect call sequence is not exposed and it is used by
8079 the sequence itself. */
8082 pa_attr_length_indirect_call (rtx_insn *insn)
8084 unsigned long distance = -1;
8085 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8087 if (INSN_ADDRESSES_SET_P ())
8089 distance = (total + insn_current_reference_address (insn));
8090 if (distance < total)
8091 distance = -1;
8094 if (TARGET_64BIT)
8095 return 12;
8097 if (TARGET_FAST_INDIRECT_CALLS
8098 || (!TARGET_LONG_CALLS
8099 && !TARGET_PORTABLE_RUNTIME
8100 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8101 || distance < MAX_PCREL17F_OFFSET)))
8102 return 8;
8104 if (flag_pic)
8105 return 20;
8107 if (TARGET_PORTABLE_RUNTIME)
8108 return 16;
8110 /* Out of reach, can use ble. */
8111 return 12;
8114 const char *
8115 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8117 rtx xoperands[1];
8119 if (TARGET_64BIT)
8121 xoperands[0] = call_dest;
8122 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8123 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8124 return "";
8127 /* First the special case for kernels, level 0 systems, etc. */
8128 if (TARGET_FAST_INDIRECT_CALLS)
8129 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8131 /* Now the normal case -- we can reach $$dyncall directly or
8132 we're sure that we can get there via a long-branch stub.
8134 No need to check target flags as the length uniquely identifies
8135 the remaining cases. */
8136 if (pa_attr_length_indirect_call (insn) == 8)
8138 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8139 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8140 variant of the B,L instruction can't be used on the SOM target. */
8141 if (TARGET_PA_20 && !TARGET_SOM)
8142 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8143 else
8144 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8147 /* Long millicode call, but we are not generating PIC or portable runtime
8148 code. */
8149 if (pa_attr_length_indirect_call (insn) == 12)
8150 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8152 /* Long millicode call for portable runtime. */
8153 if (pa_attr_length_indirect_call (insn) == 16)
8154 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8156 /* We need a long PIC call to $$dyncall. */
8157 xoperands[0] = NULL_RTX;
8158 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8159 if (TARGET_SOM || !TARGET_GAS)
8161 xoperands[0] = gen_label_rtx ();
8162 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8163 targetm.asm_out.internal_label (asm_out_file, "L",
8164 CODE_LABEL_NUMBER (xoperands[0]));
8165 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8167 else
8169 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8170 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8171 xoperands);
8173 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8174 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8175 return "";
8178 /* In HPUX 8.0's shared library scheme, special relocations are needed
8179 for function labels if they might be passed to a function
8180 in a shared library (because shared libraries don't live in code
8181 space), and special magic is needed to construct their address. */
8183 void
8184 pa_encode_label (rtx sym)
8186 const char *str = XSTR (sym, 0);
8187 int len = strlen (str) + 1;
8188 char *newstr, *p;
8190 p = newstr = XALLOCAVEC (char, len + 1);
8191 *p++ = '@';
8192 strcpy (p, str);
8194 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8197 static void
8198 pa_encode_section_info (tree decl, rtx rtl, int first)
8200 int old_referenced = 0;
8202 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8203 old_referenced
8204 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8206 default_encode_section_info (decl, rtl, first);
8208 if (first && TEXT_SPACE_P (decl))
8210 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8211 if (TREE_CODE (decl) == FUNCTION_DECL)
8212 pa_encode_label (XEXP (rtl, 0));
8214 else if (old_referenced)
8215 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8218 /* This is sort of inverse to pa_encode_section_info. */
8220 static const char *
8221 pa_strip_name_encoding (const char *str)
8223 str += (*str == '@');
8224 str += (*str == '*');
8225 return str;
8228 /* Returns 1 if OP is a function label involved in a simple addition
8229 with a constant. Used to keep certain patterns from matching
8230 during instruction combination. */
8232 pa_is_function_label_plus_const (rtx op)
8234 /* Strip off any CONST. */
8235 if (GET_CODE (op) == CONST)
8236 op = XEXP (op, 0);
8238 return (GET_CODE (op) == PLUS
8239 && function_label_operand (XEXP (op, 0), VOIDmode)
8240 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8243 /* Output assembly code for a thunk to FUNCTION. */
8245 static void
8246 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8247 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8248 tree function)
8250 static unsigned int current_thunk_number;
8251 int val_14 = VAL_14_BITS_P (delta);
8252 unsigned int old_last_address = last_address, nbytes = 0;
8253 char label[16];
8254 rtx xoperands[4];
8256 xoperands[0] = XEXP (DECL_RTL (function), 0);
8257 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8258 xoperands[2] = GEN_INT (delta);
8260 final_start_function (emit_barrier (), file, 1);
8262 /* Output the thunk. We know that the function is in the same
8263 translation unit (i.e., the same space) as the thunk, and that
8264 thunks are output after their method. Thus, we don't need an
8265 external branch to reach the function. With SOM and GAS,
8266 functions and thunks are effectively in different sections.
8267 Thus, we can always use a IA-relative branch and the linker
8268 will add a long branch stub if necessary.
8270 However, we have to be careful when generating PIC code on the
8271 SOM port to ensure that the sequence does not transfer to an
8272 import stub for the target function as this could clobber the
8273 return value saved at SP-24. This would also apply to the
8274 32-bit linux port if the multi-space model is implemented. */
8275 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8276 && !(flag_pic && TREE_PUBLIC (function))
8277 && (TARGET_GAS || last_address < 262132))
8278 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8279 && ((targetm_common.have_named_sections
8280 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8281 /* The GNU 64-bit linker has rather poor stub management.
8282 So, we use a long branch from thunks that aren't in
8283 the same section as the target function. */
8284 && ((!TARGET_64BIT
8285 && (DECL_SECTION_NAME (thunk_fndecl)
8286 != DECL_SECTION_NAME (function)))
8287 || ((DECL_SECTION_NAME (thunk_fndecl)
8288 == DECL_SECTION_NAME (function))
8289 && last_address < 262132)))
8290 /* In this case, we need to be able to reach the start of
8291 the stub table even though the function is likely closer
8292 and can be jumped to directly. */
8293 || (targetm_common.have_named_sections
8294 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8295 && DECL_SECTION_NAME (function) == NULL
8296 && total_code_bytes < MAX_PCREL17F_OFFSET)
8297 /* Likewise. */
8298 || (!targetm_common.have_named_sections
8299 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8301 if (!val_14)
8302 output_asm_insn ("addil L'%2,%%r26", xoperands);
8304 output_asm_insn ("b %0", xoperands);
8306 if (val_14)
8308 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8309 nbytes += 8;
8311 else
8313 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8314 nbytes += 12;
8317 else if (TARGET_64BIT)
8319 /* We only have one call-clobbered scratch register, so we can't
8320 make use of the delay slot if delta doesn't fit in 14 bits. */
8321 if (!val_14)
8323 output_asm_insn ("addil L'%2,%%r26", xoperands);
8324 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8327 output_asm_insn ("b,l .+8,%%r1", xoperands);
8329 if (TARGET_GAS)
8331 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8332 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8334 else
8336 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8337 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8340 if (val_14)
8342 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8343 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8344 nbytes += 20;
8346 else
8348 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8349 nbytes += 24;
8352 else if (TARGET_PORTABLE_RUNTIME)
8354 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8355 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8357 if (!val_14)
8358 output_asm_insn ("addil L'%2,%%r26", xoperands);
8360 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8362 if (val_14)
8364 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8365 nbytes += 16;
8367 else
8369 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8370 nbytes += 20;
8373 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8375 /* The function is accessible from outside this module. The only
8376 way to avoid an import stub between the thunk and function is to
8377 call the function directly with an indirect sequence similar to
8378 that used by $$dyncall. This is possible because $$dyncall acts
8379 as the import stub in an indirect call. */
8380 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8381 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8382 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8383 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8384 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8385 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8386 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8387 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8388 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8390 if (!val_14)
8392 output_asm_insn ("addil L'%2,%%r26", xoperands);
8393 nbytes += 4;
8396 if (TARGET_PA_20)
8398 output_asm_insn ("bve (%%r22)", xoperands);
8399 nbytes += 36;
8401 else if (TARGET_NO_SPACE_REGS)
8403 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8404 nbytes += 36;
8406 else
8408 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8409 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8410 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8411 nbytes += 44;
8414 if (val_14)
8415 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8416 else
8417 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8419 else if (flag_pic)
8421 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8423 if (TARGET_SOM || !TARGET_GAS)
8425 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8426 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8428 else
8430 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8431 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8434 if (!val_14)
8435 output_asm_insn ("addil L'%2,%%r26", xoperands);
8437 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8439 if (val_14)
8441 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8442 nbytes += 20;
8444 else
8446 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8447 nbytes += 24;
8450 else
8452 if (!val_14)
8453 output_asm_insn ("addil L'%2,%%r26", xoperands);
8455 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8456 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8458 if (val_14)
8460 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8461 nbytes += 12;
8463 else
8465 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8466 nbytes += 16;
8470 final_end_function ();
8472 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8474 switch_to_section (data_section);
8475 output_asm_insn (".align 4", xoperands);
8476 ASM_OUTPUT_LABEL (file, label);
8477 output_asm_insn (".word P'%0", xoperands);
8480 current_thunk_number++;
8481 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8482 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8483 last_address += nbytes;
8484 if (old_last_address > last_address)
8485 last_address = UINT_MAX;
8486 update_total_code_bytes (nbytes);
8489 /* Only direct calls to static functions are allowed to be sibling (tail)
8490 call optimized.
8492 This restriction is necessary because some linker generated stubs will
8493 store return pointers into rp' in some cases which might clobber a
8494 live value already in rp'.
8496 In a sibcall the current function and the target function share stack
8497 space. Thus if the path to the current function and the path to the
8498 target function save a value in rp', they save the value into the
8499 same stack slot, which has undesirable consequences.
8501 Because of the deferred binding nature of shared libraries any function
8502 with external scope could be in a different load module and thus require
8503 rp' to be saved when calling that function. So sibcall optimizations
8504 can only be safe for static function.
8506 Note that GCC never needs return value relocations, so we don't have to
8507 worry about static calls with return value relocations (which require
8508 saving rp').
8510 It is safe to perform a sibcall optimization when the target function
8511 will never return. */
8512 static bool
8513 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8515 if (TARGET_PORTABLE_RUNTIME)
8516 return false;
8518 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8519 single subspace mode and the call is not indirect. As far as I know,
8520 there is no operating system support for the multiple subspace mode.
8521 It might be possible to support indirect calls if we didn't use
8522 $$dyncall (see the indirect sequence generated in pa_output_call). */
8523 if (TARGET_ELF32)
8524 return (decl != NULL_TREE);
8526 /* Sibcalls are not ok because the arg pointer register is not a fixed
8527 register. This prevents the sibcall optimization from occurring. In
8528 addition, there are problems with stub placement using GNU ld. This
8529 is because a normal sibcall branch uses a 17-bit relocation while
8530 a regular call branch uses a 22-bit relocation. As a result, more
8531 care needs to be taken in the placement of long-branch stubs. */
8532 if (TARGET_64BIT)
8533 return false;
8535 /* Sibcalls are only ok within a translation unit. */
8536 return (decl && !TREE_PUBLIC (decl));
8539 /* ??? Addition is not commutative on the PA due to the weird implicit
8540 space register selection rules for memory addresses. Therefore, we
8541 don't consider a + b == b + a, as this might be inside a MEM. */
8542 static bool
8543 pa_commutative_p (const_rtx x, int outer_code)
8545 return (COMMUTATIVE_P (x)
8546 && (TARGET_NO_SPACE_REGS
8547 || (outer_code != UNKNOWN && outer_code != MEM)
8548 || GET_CODE (x) != PLUS));
8551 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8552 use in fmpyadd instructions. */
8554 pa_fmpyaddoperands (rtx *operands)
8556 enum machine_mode mode = GET_MODE (operands[0]);
8558 /* Must be a floating point mode. */
8559 if (mode != SFmode && mode != DFmode)
8560 return 0;
8562 /* All modes must be the same. */
8563 if (! (mode == GET_MODE (operands[1])
8564 && mode == GET_MODE (operands[2])
8565 && mode == GET_MODE (operands[3])
8566 && mode == GET_MODE (operands[4])
8567 && mode == GET_MODE (operands[5])))
8568 return 0;
8570 /* All operands must be registers. */
8571 if (! (GET_CODE (operands[1]) == REG
8572 && GET_CODE (operands[2]) == REG
8573 && GET_CODE (operands[3]) == REG
8574 && GET_CODE (operands[4]) == REG
8575 && GET_CODE (operands[5]) == REG))
8576 return 0;
8578 /* Only 2 real operands to the addition. One of the input operands must
8579 be the same as the output operand. */
8580 if (! rtx_equal_p (operands[3], operands[4])
8581 && ! rtx_equal_p (operands[3], operands[5]))
8582 return 0;
8584 /* Inout operand of add cannot conflict with any operands from multiply. */
8585 if (rtx_equal_p (operands[3], operands[0])
8586 || rtx_equal_p (operands[3], operands[1])
8587 || rtx_equal_p (operands[3], operands[2]))
8588 return 0;
8590 /* multiply cannot feed into addition operands. */
8591 if (rtx_equal_p (operands[4], operands[0])
8592 || rtx_equal_p (operands[5], operands[0]))
8593 return 0;
8595 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8596 if (mode == SFmode
8597 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8598 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8599 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8600 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8601 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8602 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8603 return 0;
8605 /* Passed. Operands are suitable for fmpyadd. */
8606 return 1;
8609 #if !defined(USE_COLLECT2)
8610 static void
8611 pa_asm_out_constructor (rtx symbol, int priority)
8613 if (!function_label_operand (symbol, VOIDmode))
8614 pa_encode_label (symbol);
8616 #ifdef CTORS_SECTION_ASM_OP
8617 default_ctor_section_asm_out_constructor (symbol, priority);
8618 #else
8619 # ifdef TARGET_ASM_NAMED_SECTION
8620 default_named_section_asm_out_constructor (symbol, priority);
8621 # else
8622 default_stabs_asm_out_constructor (symbol, priority);
8623 # endif
8624 #endif
8627 static void
8628 pa_asm_out_destructor (rtx symbol, int priority)
8630 if (!function_label_operand (symbol, VOIDmode))
8631 pa_encode_label (symbol);
8633 #ifdef DTORS_SECTION_ASM_OP
8634 default_dtor_section_asm_out_destructor (symbol, priority);
8635 #else
8636 # ifdef TARGET_ASM_NAMED_SECTION
8637 default_named_section_asm_out_destructor (symbol, priority);
8638 # else
8639 default_stabs_asm_out_destructor (symbol, priority);
8640 # endif
8641 #endif
8643 #endif
8645 /* This function places uninitialized global data in the bss section.
8646 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8647 function on the SOM port to prevent uninitialized global data from
8648 being placed in the data section. */
8650 void
8651 pa_asm_output_aligned_bss (FILE *stream,
8652 const char *name,
8653 unsigned HOST_WIDE_INT size,
8654 unsigned int align)
8656 switch_to_section (bss_section);
8657 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8659 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8660 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8661 #endif
8663 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8664 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8665 #endif
8667 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8668 ASM_OUTPUT_LABEL (stream, name);
8669 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8672 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8673 that doesn't allow the alignment of global common storage to be directly
8674 specified. The SOM linker aligns common storage based on the rounded
8675 value of the NUM_BYTES parameter in the .comm directive. It's not
8676 possible to use the .align directive as it doesn't affect the alignment
8677 of the label associated with a .comm directive. */
8679 void
8680 pa_asm_output_aligned_common (FILE *stream,
8681 const char *name,
8682 unsigned HOST_WIDE_INT size,
8683 unsigned int align)
8685 unsigned int max_common_align;
8687 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8688 if (align > max_common_align)
8690 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8691 "for global common data. Using %u",
8692 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8693 align = max_common_align;
8696 switch_to_section (bss_section);
8698 assemble_name (stream, name);
8699 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8700 MAX (size, align / BITS_PER_UNIT));
8703 /* We can't use .comm for local common storage as the SOM linker effectively
8704 treats the symbol as universal and uses the same storage for local symbols
8705 with the same name in different object files. The .block directive
8706 reserves an uninitialized block of storage. However, it's not common
8707 storage. Fortunately, GCC never requests common storage with the same
8708 name in any given translation unit. */
8710 void
8711 pa_asm_output_aligned_local (FILE *stream,
8712 const char *name,
8713 unsigned HOST_WIDE_INT size,
8714 unsigned int align)
8716 switch_to_section (bss_section);
8717 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8719 #ifdef LOCAL_ASM_OP
8720 fprintf (stream, "%s", LOCAL_ASM_OP);
8721 assemble_name (stream, name);
8722 fprintf (stream, "\n");
8723 #endif
8725 ASM_OUTPUT_LABEL (stream, name);
8726 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8729 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8730 use in fmpysub instructions. */
8732 pa_fmpysuboperands (rtx *operands)
8734 enum machine_mode mode = GET_MODE (operands[0]);
8736 /* Must be a floating point mode. */
8737 if (mode != SFmode && mode != DFmode)
8738 return 0;
8740 /* All modes must be the same. */
8741 if (! (mode == GET_MODE (operands[1])
8742 && mode == GET_MODE (operands[2])
8743 && mode == GET_MODE (operands[3])
8744 && mode == GET_MODE (operands[4])
8745 && mode == GET_MODE (operands[5])))
8746 return 0;
8748 /* All operands must be registers. */
8749 if (! (GET_CODE (operands[1]) == REG
8750 && GET_CODE (operands[2]) == REG
8751 && GET_CODE (operands[3]) == REG
8752 && GET_CODE (operands[4]) == REG
8753 && GET_CODE (operands[5]) == REG))
8754 return 0;
8756 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8757 operation, so operands[4] must be the same as operand[3]. */
8758 if (! rtx_equal_p (operands[3], operands[4]))
8759 return 0;
8761 /* multiply cannot feed into subtraction. */
8762 if (rtx_equal_p (operands[5], operands[0]))
8763 return 0;
8765 /* Inout operand of sub cannot conflict with any operands from multiply. */
8766 if (rtx_equal_p (operands[3], operands[0])
8767 || rtx_equal_p (operands[3], operands[1])
8768 || rtx_equal_p (operands[3], operands[2]))
8769 return 0;
8771 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8772 if (mode == SFmode
8773 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8774 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8775 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8776 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8777 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8778 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8779 return 0;
8781 /* Passed. Operands are suitable for fmpysub. */
8782 return 1;
8785 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8786 constants for shadd instructions. */
8788 pa_shadd_constant_p (int val)
8790 if (val == 2 || val == 4 || val == 8)
8791 return 1;
8792 else
8793 return 0;
8796 /* Return TRUE if INSN branches forward. */
8798 static bool
8799 forward_branch_p (rtx_insn *insn)
8801 rtx lab = JUMP_LABEL (insn);
8803 /* The INSN must have a jump label. */
8804 gcc_assert (lab != NULL_RTX);
8806 if (INSN_ADDRESSES_SET_P ())
8807 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8809 while (insn)
8811 if (insn == lab)
8812 return true;
8813 else
8814 insn = NEXT_INSN (insn);
8817 return false;
8820 /* Return 1 if INSN is in the delay slot of a call instruction. */
8822 pa_jump_in_call_delay (rtx_insn *insn)
8825 if (! JUMP_P (insn))
8826 return 0;
8828 if (PREV_INSN (insn)
8829 && PREV_INSN (PREV_INSN (insn))
8830 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8832 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8834 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8835 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8838 else
8839 return 0;
8842 /* Output an unconditional move and branch insn. */
8844 const char *
8845 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8847 int length = get_attr_length (insn);
8849 /* These are the cases in which we win. */
8850 if (length == 4)
8851 return "mov%I1b,tr %1,%0,%2";
8853 /* None of the following cases win, but they don't lose either. */
8854 if (length == 8)
8856 if (dbr_sequence_length () == 0)
8858 /* Nothing in the delay slot, fake it by putting the combined
8859 insn (the copy or add) in the delay slot of a bl. */
8860 if (GET_CODE (operands[1]) == CONST_INT)
8861 return "b %2\n\tldi %1,%0";
8862 else
8863 return "b %2\n\tcopy %1,%0";
8865 else
8867 /* Something in the delay slot, but we've got a long branch. */
8868 if (GET_CODE (operands[1]) == CONST_INT)
8869 return "ldi %1,%0\n\tb %2";
8870 else
8871 return "copy %1,%0\n\tb %2";
8875 if (GET_CODE (operands[1]) == CONST_INT)
8876 output_asm_insn ("ldi %1,%0", operands);
8877 else
8878 output_asm_insn ("copy %1,%0", operands);
8879 return pa_output_lbranch (operands[2], insn, 1);
8882 /* Output an unconditional add and branch insn. */
8884 const char *
8885 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8887 int length = get_attr_length (insn);
8889 /* To make life easy we want operand0 to be the shared input/output
8890 operand and operand1 to be the readonly operand. */
8891 if (operands[0] == operands[1])
8892 operands[1] = operands[2];
8894 /* These are the cases in which we win. */
8895 if (length == 4)
8896 return "add%I1b,tr %1,%0,%3";
8898 /* None of the following cases win, but they don't lose either. */
8899 if (length == 8)
8901 if (dbr_sequence_length () == 0)
8902 /* Nothing in the delay slot, fake it by putting the combined
8903 insn (the copy or add) in the delay slot of a bl. */
8904 return "b %3\n\tadd%I1 %1,%0,%0";
8905 else
8906 /* Something in the delay slot, but we've got a long branch. */
8907 return "add%I1 %1,%0,%0\n\tb %3";
8910 output_asm_insn ("add%I1 %1,%0,%0", operands);
8911 return pa_output_lbranch (operands[3], insn, 1);
8914 /* Return nonzero if INSN (a jump insn) immediately follows a call
8915 to a named function. This is used to avoid filling the delay slot
8916 of the jump since it can usually be eliminated by modifying RP in
8917 the delay slot of the call. */
8920 pa_following_call (rtx_insn *insn)
8922 if (! TARGET_JUMP_IN_DELAY)
8923 return 0;
8925 /* Find the previous real insn, skipping NOTEs. */
8926 insn = PREV_INSN (insn);
8927 while (insn && NOTE_P (insn))
8928 insn = PREV_INSN (insn);
8930 /* Check for CALL_INSNs and millicode calls. */
8931 if (insn
8932 && ((CALL_P (insn)
8933 && get_attr_type (insn) != TYPE_DYNCALL)
8934 || (NONJUMP_INSN_P (insn)
8935 && GET_CODE (PATTERN (insn)) != SEQUENCE
8936 && GET_CODE (PATTERN (insn)) != USE
8937 && GET_CODE (PATTERN (insn)) != CLOBBER
8938 && get_attr_type (insn) == TYPE_MILLI)))
8939 return 1;
8941 return 0;
8944 /* We use this hook to perform a PA specific optimization which is difficult
8945 to do in earlier passes. */
8947 static void
8948 pa_reorg (void)
8950 remove_useless_addtr_insns (1);
8952 if (pa_cpu < PROCESSOR_8000)
8953 pa_combine_instructions ();
8956 /* The PA has a number of odd instructions which can perform multiple
8957 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8958 it may be profitable to combine two instructions into one instruction
8959 with two outputs. It's not profitable PA2.0 machines because the
8960 two outputs would take two slots in the reorder buffers.
8962 This routine finds instructions which can be combined and combines
8963 them. We only support some of the potential combinations, and we
8964 only try common ways to find suitable instructions.
8966 * addb can add two registers or a register and a small integer
8967 and jump to a nearby (+-8k) location. Normally the jump to the
8968 nearby location is conditional on the result of the add, but by
8969 using the "true" condition we can make the jump unconditional.
8970 Thus addb can perform two independent operations in one insn.
8972 * movb is similar to addb in that it can perform a reg->reg
8973 or small immediate->reg copy and jump to a nearby (+-8k location).
8975 * fmpyadd and fmpysub can perform a FP multiply and either an
8976 FP add or FP sub if the operands of the multiply and add/sub are
8977 independent (there are other minor restrictions). Note both
8978 the fmpy and fadd/fsub can in theory move to better spots according
8979 to data dependencies, but for now we require the fmpy stay at a
8980 fixed location.
8982 * Many of the memory operations can perform pre & post updates
8983 of index registers. GCC's pre/post increment/decrement addressing
8984 is far too simple to take advantage of all the possibilities. This
8985 pass may not be suitable since those insns may not be independent.
8987 * comclr can compare two ints or an int and a register, nullify
8988 the following instruction and zero some other register. This
8989 is more difficult to use as it's harder to find an insn which
8990 will generate a comclr than finding something like an unconditional
8991 branch. (conditional moves & long branches create comclr insns).
8993 * Most arithmetic operations can conditionally skip the next
8994 instruction. They can be viewed as "perform this operation
8995 and conditionally jump to this nearby location" (where nearby
8996 is an insns away). These are difficult to use due to the
8997 branch length restrictions. */
8999 static void
9000 pa_combine_instructions (void)
9002 rtx_insn *anchor;
9004 /* This can get expensive since the basic algorithm is on the
9005 order of O(n^2) (or worse). Only do it for -O2 or higher
9006 levels of optimization. */
9007 if (optimize < 2)
9008 return;
9010 /* Walk down the list of insns looking for "anchor" insns which
9011 may be combined with "floating" insns. As the name implies,
9012 "anchor" instructions don't move, while "floating" insns may
9013 move around. */
9014 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9015 rtx_insn *new_rtx = make_insn_raw (par);
9017 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9019 enum attr_pa_combine_type anchor_attr;
9020 enum attr_pa_combine_type floater_attr;
9022 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9023 Also ignore any special USE insns. */
9024 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9025 || GET_CODE (PATTERN (anchor)) == USE
9026 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9027 continue;
9029 anchor_attr = get_attr_pa_combine_type (anchor);
9030 /* See if anchor is an insn suitable for combination. */
9031 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9032 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9033 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9034 && ! forward_branch_p (anchor)))
9036 rtx_insn *floater;
9038 for (floater = PREV_INSN (anchor);
9039 floater;
9040 floater = PREV_INSN (floater))
9042 if (NOTE_P (floater)
9043 || (NONJUMP_INSN_P (floater)
9044 && (GET_CODE (PATTERN (floater)) == USE
9045 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9046 continue;
9048 /* Anything except a regular INSN will stop our search. */
9049 if (! NONJUMP_INSN_P (floater))
9051 floater = NULL;
9052 break;
9055 /* See if FLOATER is suitable for combination with the
9056 anchor. */
9057 floater_attr = get_attr_pa_combine_type (floater);
9058 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9059 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9060 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9061 && floater_attr == PA_COMBINE_TYPE_FMPY))
9063 /* If ANCHOR and FLOATER can be combined, then we're
9064 done with this pass. */
9065 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9066 SET_DEST (PATTERN (floater)),
9067 XEXP (SET_SRC (PATTERN (floater)), 0),
9068 XEXP (SET_SRC (PATTERN (floater)), 1)))
9069 break;
9072 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9073 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9075 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9077 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9078 SET_DEST (PATTERN (floater)),
9079 XEXP (SET_SRC (PATTERN (floater)), 0),
9080 XEXP (SET_SRC (PATTERN (floater)), 1)))
9081 break;
9083 else
9085 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9086 SET_DEST (PATTERN (floater)),
9087 SET_SRC (PATTERN (floater)),
9088 SET_SRC (PATTERN (floater))))
9089 break;
9094 /* If we didn't find anything on the backwards scan try forwards. */
9095 if (!floater
9096 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9097 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9099 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9101 if (NOTE_P (floater)
9102 || (NONJUMP_INSN_P (floater)
9103 && (GET_CODE (PATTERN (floater)) == USE
9104 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9106 continue;
9108 /* Anything except a regular INSN will stop our search. */
9109 if (! NONJUMP_INSN_P (floater))
9111 floater = NULL;
9112 break;
9115 /* See if FLOATER is suitable for combination with the
9116 anchor. */
9117 floater_attr = get_attr_pa_combine_type (floater);
9118 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9119 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9120 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9121 && floater_attr == PA_COMBINE_TYPE_FMPY))
9123 /* If ANCHOR and FLOATER can be combined, then we're
9124 done with this pass. */
9125 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9126 SET_DEST (PATTERN (floater)),
9127 XEXP (SET_SRC (PATTERN (floater)),
9129 XEXP (SET_SRC (PATTERN (floater)),
9130 1)))
9131 break;
9136 /* FLOATER will be nonzero if we found a suitable floating
9137 insn for combination with ANCHOR. */
9138 if (floater
9139 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9140 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9142 /* Emit the new instruction and delete the old anchor. */
9143 emit_insn_before (gen_rtx_PARALLEL
9144 (VOIDmode,
9145 gen_rtvec (2, PATTERN (anchor),
9146 PATTERN (floater))),
9147 anchor);
9149 SET_INSN_DELETED (anchor);
9151 /* Emit a special USE insn for FLOATER, then delete
9152 the floating insn. */
9153 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9154 delete_insn (floater);
9156 continue;
9158 else if (floater
9159 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9161 rtx temp;
9162 /* Emit the new_jump instruction and delete the old anchor. */
9163 temp
9164 = emit_jump_insn_before (gen_rtx_PARALLEL
9165 (VOIDmode,
9166 gen_rtvec (2, PATTERN (anchor),
9167 PATTERN (floater))),
9168 anchor);
9170 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9171 SET_INSN_DELETED (anchor);
9173 /* Emit a special USE insn for FLOATER, then delete
9174 the floating insn. */
9175 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9176 delete_insn (floater);
9177 continue;
9183 static int
9184 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9185 int reversed, rtx dest,
9186 rtx src1, rtx src2)
9188 int insn_code_number;
9189 rtx_insn *start, *end;
9191 /* Create a PARALLEL with the patterns of ANCHOR and
9192 FLOATER, try to recognize it, then test constraints
9193 for the resulting pattern.
9195 If the pattern doesn't match or the constraints
9196 aren't met keep searching for a suitable floater
9197 insn. */
9198 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9199 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9200 INSN_CODE (new_rtx) = -1;
9201 insn_code_number = recog_memoized (new_rtx);
9202 if (insn_code_number < 0
9203 || (extract_insn (new_rtx), ! constrain_operands (1)))
9204 return 0;
9206 if (reversed)
9208 start = anchor;
9209 end = floater;
9211 else
9213 start = floater;
9214 end = anchor;
9217 /* There's up to three operands to consider. One
9218 output and two inputs.
9220 The output must not be used between FLOATER & ANCHOR
9221 exclusive. The inputs must not be set between
9222 FLOATER and ANCHOR exclusive. */
9224 if (reg_used_between_p (dest, start, end))
9225 return 0;
9227 if (reg_set_between_p (src1, start, end))
9228 return 0;
9230 if (reg_set_between_p (src2, start, end))
9231 return 0;
9233 /* If we get here, then everything is good. */
9234 return 1;
9237 /* Return nonzero if references for INSN are delayed.
9239 Millicode insns are actually function calls with some special
9240 constraints on arguments and register usage.
9242 Millicode calls always expect their arguments in the integer argument
9243 registers, and always return their result in %r29 (ret1). They
9244 are expected to clobber their arguments, %r1, %r29, and the return
9245 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9247 This function tells reorg that the references to arguments and
9248 millicode calls do not appear to happen until after the millicode call.
9249 This allows reorg to put insns which set the argument registers into the
9250 delay slot of the millicode call -- thus they act more like traditional
9251 CALL_INSNs.
9253 Note we cannot consider side effects of the insn to be delayed because
9254 the branch and link insn will clobber the return pointer. If we happened
9255 to use the return pointer in the delay slot of the call, then we lose.
9257 get_attr_type will try to recognize the given insn, so make sure to
9258 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9259 in particular. */
9261 pa_insn_refs_are_delayed (rtx_insn *insn)
9263 return ((NONJUMP_INSN_P (insn)
9264 && GET_CODE (PATTERN (insn)) != SEQUENCE
9265 && GET_CODE (PATTERN (insn)) != USE
9266 && GET_CODE (PATTERN (insn)) != CLOBBER
9267 && get_attr_type (insn) == TYPE_MILLI));
9270 /* Promote the return value, but not the arguments. */
9272 static enum machine_mode
9273 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9274 enum machine_mode mode,
9275 int *punsignedp ATTRIBUTE_UNUSED,
9276 const_tree fntype ATTRIBUTE_UNUSED,
9277 int for_return)
9279 if (for_return == 0)
9280 return mode;
9281 return promote_mode (type, mode, punsignedp);
9284 /* On the HP-PA the value is found in register(s) 28(-29), unless
9285 the mode is SF or DF. Then the value is returned in fr4 (32).
9287 This must perform the same promotions as PROMOTE_MODE, else promoting
9288 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9290 Small structures must be returned in a PARALLEL on PA64 in order
9291 to match the HP Compiler ABI. */
9293 static rtx
9294 pa_function_value (const_tree valtype,
9295 const_tree func ATTRIBUTE_UNUSED,
9296 bool outgoing ATTRIBUTE_UNUSED)
9298 enum machine_mode valmode;
9300 if (AGGREGATE_TYPE_P (valtype)
9301 || TREE_CODE (valtype) == COMPLEX_TYPE
9302 || TREE_CODE (valtype) == VECTOR_TYPE)
9304 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9306 /* Handle aggregates that fit exactly in a word or double word. */
9307 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9308 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9310 if (TARGET_64BIT)
9312 /* Aggregates with a size less than or equal to 128 bits are
9313 returned in GR 28(-29). They are left justified. The pad
9314 bits are undefined. Larger aggregates are returned in
9315 memory. */
9316 rtx loc[2];
9317 int i, offset = 0;
9318 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9320 for (i = 0; i < ub; i++)
9322 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9323 gen_rtx_REG (DImode, 28 + i),
9324 GEN_INT (offset));
9325 offset += 8;
9328 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9330 else if (valsize > UNITS_PER_WORD)
9332 /* Aggregates 5 to 8 bytes in size are returned in general
9333 registers r28-r29 in the same manner as other non
9334 floating-point objects. The data is right-justified and
9335 zero-extended to 64 bits. This is opposite to the normal
9336 justification used on big endian targets and requires
9337 special treatment. */
9338 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9339 gen_rtx_REG (DImode, 28), const0_rtx);
9340 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9344 if ((INTEGRAL_TYPE_P (valtype)
9345 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9346 || POINTER_TYPE_P (valtype))
9347 valmode = word_mode;
9348 else
9349 valmode = TYPE_MODE (valtype);
9351 if (TREE_CODE (valtype) == REAL_TYPE
9352 && !AGGREGATE_TYPE_P (valtype)
9353 && TYPE_MODE (valtype) != TFmode
9354 && !TARGET_SOFT_FLOAT)
9355 return gen_rtx_REG (valmode, 32);
9357 return gen_rtx_REG (valmode, 28);
9360 /* Implement the TARGET_LIBCALL_VALUE hook. */
9362 static rtx
9363 pa_libcall_value (enum machine_mode mode,
9364 const_rtx fun ATTRIBUTE_UNUSED)
9366 if (! TARGET_SOFT_FLOAT
9367 && (mode == SFmode || mode == DFmode))
9368 return gen_rtx_REG (mode, 32);
9369 else
9370 return gen_rtx_REG (mode, 28);
9373 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9375 static bool
9376 pa_function_value_regno_p (const unsigned int regno)
9378 if (regno == 28
9379 || (! TARGET_SOFT_FLOAT && regno == 32))
9380 return true;
9382 return false;
9385 /* Update the data in CUM to advance over an argument
9386 of mode MODE and data type TYPE.
9387 (TYPE is null for libcalls where that information may not be available.) */
9389 static void
9390 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9391 const_tree type, bool named ATTRIBUTE_UNUSED)
9393 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9394 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9396 cum->nargs_prototype--;
9397 cum->words += (arg_size
9398 + ((cum->words & 01)
9399 && type != NULL_TREE
9400 && arg_size > 1));
9403 /* Return the location of a parameter that is passed in a register or NULL
9404 if the parameter has any component that is passed in memory.
9406 This is new code and will be pushed to into the net sources after
9407 further testing.
9409 ??? We might want to restructure this so that it looks more like other
9410 ports. */
9411 static rtx
9412 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9413 const_tree type, bool named ATTRIBUTE_UNUSED)
9415 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9416 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9417 int alignment = 0;
9418 int arg_size;
9419 int fpr_reg_base;
9420 int gpr_reg_base;
9421 rtx retval;
9423 if (mode == VOIDmode)
9424 return NULL_RTX;
9426 arg_size = FUNCTION_ARG_SIZE (mode, type);
9428 /* If this arg would be passed partially or totally on the stack, then
9429 this routine should return zero. pa_arg_partial_bytes will
9430 handle arguments which are split between regs and stack slots if
9431 the ABI mandates split arguments. */
9432 if (!TARGET_64BIT)
9434 /* The 32-bit ABI does not split arguments. */
9435 if (cum->words + arg_size > max_arg_words)
9436 return NULL_RTX;
9438 else
9440 if (arg_size > 1)
9441 alignment = cum->words & 1;
9442 if (cum->words + alignment >= max_arg_words)
9443 return NULL_RTX;
9446 /* The 32bit ABIs and the 64bit ABIs are rather different,
9447 particularly in their handling of FP registers. We might
9448 be able to cleverly share code between them, but I'm not
9449 going to bother in the hope that splitting them up results
9450 in code that is more easily understood. */
9452 if (TARGET_64BIT)
9454 /* Advance the base registers to their current locations.
9456 Remember, gprs grow towards smaller register numbers while
9457 fprs grow to higher register numbers. Also remember that
9458 although FP regs are 32-bit addressable, we pretend that
9459 the registers are 64-bits wide. */
9460 gpr_reg_base = 26 - cum->words;
9461 fpr_reg_base = 32 + cum->words;
9463 /* Arguments wider than one word and small aggregates need special
9464 treatment. */
9465 if (arg_size > 1
9466 || mode == BLKmode
9467 || (type && (AGGREGATE_TYPE_P (type)
9468 || TREE_CODE (type) == COMPLEX_TYPE
9469 || TREE_CODE (type) == VECTOR_TYPE)))
9471 /* Double-extended precision (80-bit), quad-precision (128-bit)
9472 and aggregates including complex numbers are aligned on
9473 128-bit boundaries. The first eight 64-bit argument slots
9474 are associated one-to-one, with general registers r26
9475 through r19, and also with floating-point registers fr4
9476 through fr11. Arguments larger than one word are always
9477 passed in general registers.
9479 Using a PARALLEL with a word mode register results in left
9480 justified data on a big-endian target. */
9482 rtx loc[8];
9483 int i, offset = 0, ub = arg_size;
9485 /* Align the base register. */
9486 gpr_reg_base -= alignment;
9488 ub = MIN (ub, max_arg_words - cum->words - alignment);
9489 for (i = 0; i < ub; i++)
9491 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9492 gen_rtx_REG (DImode, gpr_reg_base),
9493 GEN_INT (offset));
9494 gpr_reg_base -= 1;
9495 offset += 8;
9498 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9501 else
9503 /* If the argument is larger than a word, then we know precisely
9504 which registers we must use. */
9505 if (arg_size > 1)
9507 if (cum->words)
9509 gpr_reg_base = 23;
9510 fpr_reg_base = 38;
9512 else
9514 gpr_reg_base = 25;
9515 fpr_reg_base = 34;
9518 /* Structures 5 to 8 bytes in size are passed in the general
9519 registers in the same manner as other non floating-point
9520 objects. The data is right-justified and zero-extended
9521 to 64 bits. This is opposite to the normal justification
9522 used on big endian targets and requires special treatment.
9523 We now define BLOCK_REG_PADDING to pad these objects.
9524 Aggregates, complex and vector types are passed in the same
9525 manner as structures. */
9526 if (mode == BLKmode
9527 || (type && (AGGREGATE_TYPE_P (type)
9528 || TREE_CODE (type) == COMPLEX_TYPE
9529 || TREE_CODE (type) == VECTOR_TYPE)))
9531 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9532 gen_rtx_REG (DImode, gpr_reg_base),
9533 const0_rtx);
9534 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9537 else
9539 /* We have a single word (32 bits). A simple computation
9540 will get us the register #s we need. */
9541 gpr_reg_base = 26 - cum->words;
9542 fpr_reg_base = 32 + 2 * cum->words;
9546 /* Determine if the argument needs to be passed in both general and
9547 floating point registers. */
9548 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9549 /* If we are doing soft-float with portable runtime, then there
9550 is no need to worry about FP regs. */
9551 && !TARGET_SOFT_FLOAT
9552 /* The parameter must be some kind of scalar float, else we just
9553 pass it in integer registers. */
9554 && GET_MODE_CLASS (mode) == MODE_FLOAT
9555 /* The target function must not have a prototype. */
9556 && cum->nargs_prototype <= 0
9557 /* libcalls do not need to pass items in both FP and general
9558 registers. */
9559 && type != NULL_TREE
9560 /* All this hair applies to "outgoing" args only. This includes
9561 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9562 && !cum->incoming)
9563 /* Also pass outgoing floating arguments in both registers in indirect
9564 calls with the 32 bit ABI and the HP assembler since there is no
9565 way to the specify argument locations in static functions. */
9566 || (!TARGET_64BIT
9567 && !TARGET_GAS
9568 && !cum->incoming
9569 && cum->indirect
9570 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9572 retval
9573 = gen_rtx_PARALLEL
9574 (mode,
9575 gen_rtvec (2,
9576 gen_rtx_EXPR_LIST (VOIDmode,
9577 gen_rtx_REG (mode, fpr_reg_base),
9578 const0_rtx),
9579 gen_rtx_EXPR_LIST (VOIDmode,
9580 gen_rtx_REG (mode, gpr_reg_base),
9581 const0_rtx)));
9583 else
9585 /* See if we should pass this parameter in a general register. */
9586 if (TARGET_SOFT_FLOAT
9587 /* Indirect calls in the normal 32bit ABI require all arguments
9588 to be passed in general registers. */
9589 || (!TARGET_PORTABLE_RUNTIME
9590 && !TARGET_64BIT
9591 && !TARGET_ELF32
9592 && cum->indirect)
9593 /* If the parameter is not a scalar floating-point parameter,
9594 then it belongs in GPRs. */
9595 || GET_MODE_CLASS (mode) != MODE_FLOAT
9596 /* Structure with single SFmode field belongs in GPR. */
9597 || (type && AGGREGATE_TYPE_P (type)))
9598 retval = gen_rtx_REG (mode, gpr_reg_base);
9599 else
9600 retval = gen_rtx_REG (mode, fpr_reg_base);
9602 return retval;
9605 /* Arguments larger than one word are double word aligned. */
9607 static unsigned int
9608 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9610 bool singleword = (type
9611 ? (integer_zerop (TYPE_SIZE (type))
9612 || !TREE_CONSTANT (TYPE_SIZE (type))
9613 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9614 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9616 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9619 /* If this arg would be passed totally in registers or totally on the stack,
9620 then this routine should return zero. */
9622 static int
9623 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9624 tree type, bool named ATTRIBUTE_UNUSED)
9626 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9627 unsigned int max_arg_words = 8;
9628 unsigned int offset = 0;
9630 if (!TARGET_64BIT)
9631 return 0;
9633 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9634 offset = 1;
9636 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9637 /* Arg fits fully into registers. */
9638 return 0;
9639 else if (cum->words + offset >= max_arg_words)
9640 /* Arg fully on the stack. */
9641 return 0;
9642 else
9643 /* Arg is split. */
9644 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9648 /* A get_unnamed_section callback for switching to the text section.
9650 This function is only used with SOM. Because we don't support
9651 named subspaces, we can only create a new subspace or switch back
9652 to the default text subspace. */
9654 static void
9655 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9657 gcc_assert (TARGET_SOM);
9658 if (TARGET_GAS)
9660 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9662 /* We only want to emit a .nsubspa directive once at the
9663 start of the function. */
9664 cfun->machine->in_nsubspa = 1;
9666 /* Create a new subspace for the text. This provides
9667 better stub placement and one-only functions. */
9668 if (cfun->decl
9669 && DECL_ONE_ONLY (cfun->decl)
9670 && !DECL_WEAK (cfun->decl))
9672 output_section_asm_op ("\t.SPACE $TEXT$\n"
9673 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9674 "ACCESS=44,SORT=24,COMDAT");
9675 return;
9678 else
9680 /* There isn't a current function or the body of the current
9681 function has been completed. So, we are changing to the
9682 text section to output debugging information. Thus, we
9683 need to forget that we are in the text section so that
9684 varasm.c will call us when text_section is selected again. */
9685 gcc_assert (!cfun || !cfun->machine
9686 || cfun->machine->in_nsubspa == 2);
9687 in_section = NULL;
9689 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9690 return;
9692 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9695 /* A get_unnamed_section callback for switching to comdat data
9696 sections. This function is only used with SOM. */
9698 static void
9699 som_output_comdat_data_section_asm_op (const void *data)
9701 in_section = NULL;
9702 output_section_asm_op (data);
9705 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9707 static void
9708 pa_som_asm_init_sections (void)
9710 text_section
9711 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9713 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9714 is not being generated. */
9715 som_readonly_data_section
9716 = get_unnamed_section (0, output_section_asm_op,
9717 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9719 /* When secondary definitions are not supported, SOM makes readonly
9720 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9721 the comdat flag. */
9722 som_one_only_readonly_data_section
9723 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9724 "\t.SPACE $TEXT$\n"
9725 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9726 "ACCESS=0x2c,SORT=16,COMDAT");
9729 /* When secondary definitions are not supported, SOM makes data one-only
9730 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9731 som_one_only_data_section
9732 = get_unnamed_section (SECTION_WRITE,
9733 som_output_comdat_data_section_asm_op,
9734 "\t.SPACE $PRIVATE$\n"
9735 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9736 "ACCESS=31,SORT=24,COMDAT");
9738 if (flag_tm)
9739 som_tm_clone_table_section
9740 = get_unnamed_section (0, output_section_asm_op,
9741 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9743 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9744 which reference data within the $TEXT$ space (for example constant
9745 strings in the $LIT$ subspace).
9747 The assemblers (GAS and HP as) both have problems with handling
9748 the difference of two symbols which is the other correct way to
9749 reference constant data during PIC code generation.
9751 So, there's no way to reference constant data which is in the
9752 $TEXT$ space during PIC generation. Instead place all constant
9753 data into the $PRIVATE$ subspace (this reduces sharing, but it
9754 works correctly). */
9755 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9757 /* We must not have a reference to an external symbol defined in a
9758 shared library in a readonly section, else the SOM linker will
9759 complain.
9761 So, we force exception information into the data section. */
9762 exception_section = data_section;
9765 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9767 static section *
9768 pa_som_tm_clone_table_section (void)
9770 return som_tm_clone_table_section;
9773 /* On hpux10, the linker will give an error if we have a reference
9774 in the read-only data section to a symbol defined in a shared
9775 library. Therefore, expressions that might require a reloc can
9776 not be placed in the read-only data section. */
9778 static section *
9779 pa_select_section (tree exp, int reloc,
9780 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9782 if (TREE_CODE (exp) == VAR_DECL
9783 && TREE_READONLY (exp)
9784 && !TREE_THIS_VOLATILE (exp)
9785 && DECL_INITIAL (exp)
9786 && (DECL_INITIAL (exp) == error_mark_node
9787 || TREE_CONSTANT (DECL_INITIAL (exp)))
9788 && !reloc)
9790 if (TARGET_SOM
9791 && DECL_ONE_ONLY (exp)
9792 && !DECL_WEAK (exp))
9793 return som_one_only_readonly_data_section;
9794 else
9795 return readonly_data_section;
9797 else if (CONSTANT_CLASS_P (exp) && !reloc)
9798 return readonly_data_section;
9799 else if (TARGET_SOM
9800 && TREE_CODE (exp) == VAR_DECL
9801 && DECL_ONE_ONLY (exp)
9802 && !DECL_WEAK (exp))
9803 return som_one_only_data_section;
9804 else
9805 return data_section;
9808 static void
9809 pa_globalize_label (FILE *stream, const char *name)
9811 /* We only handle DATA objects here, functions are globalized in
9812 ASM_DECLARE_FUNCTION_NAME. */
9813 if (! FUNCTION_NAME_P (name))
9815 fputs ("\t.EXPORT ", stream);
9816 assemble_name (stream, name);
9817 fputs (",DATA\n", stream);
9821 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9823 static rtx
9824 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9825 int incoming ATTRIBUTE_UNUSED)
9827 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9830 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9832 bool
9833 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9835 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9836 PA64 ABI says that objects larger than 128 bits are returned in memory.
9837 Note, int_size_in_bytes can return -1 if the size of the object is
9838 variable or larger than the maximum value that can be expressed as
9839 a HOST_WIDE_INT. It can also return zero for an empty type. The
9840 simplest way to handle variable and empty types is to pass them in
9841 memory. This avoids problems in defining the boundaries of argument
9842 slots, allocating registers, etc. */
9843 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9844 || int_size_in_bytes (type) <= 0);
9847 /* Structure to hold declaration and name of external symbols that are
9848 emitted by GCC. We generate a vector of these symbols and output them
9849 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9850 This avoids putting out names that are never really used. */
9852 typedef struct GTY(()) extern_symbol
9854 tree decl;
9855 const char *name;
9856 } extern_symbol;
9858 /* Define gc'd vector type for extern_symbol. */
9860 /* Vector of extern_symbol pointers. */
9861 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9863 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9864 /* Mark DECL (name NAME) as an external reference (assembler output
9865 file FILE). This saves the names to output at the end of the file
9866 if actually referenced. */
9868 void
9869 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9871 gcc_assert (file == asm_out_file);
9872 extern_symbol p = {decl, name};
9873 vec_safe_push (extern_symbols, p);
9876 /* Output text required at the end of an assembler file.
9877 This includes deferred plabels and .import directives for
9878 all external symbols that were actually referenced. */
9880 static void
9881 pa_hpux_file_end (void)
9883 unsigned int i;
9884 extern_symbol *p;
9886 if (!NO_DEFERRED_PROFILE_COUNTERS)
9887 output_deferred_profile_counters ();
9889 output_deferred_plabels ();
9891 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9893 tree decl = p->decl;
9895 if (!TREE_ASM_WRITTEN (decl)
9896 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9897 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9900 vec_free (extern_symbols);
9902 #endif
9904 /* Return true if a change from mode FROM to mode TO for a register
9905 in register class RCLASS is invalid. */
9907 bool
9908 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9909 enum reg_class rclass)
9911 if (from == to)
9912 return false;
9914 /* Reject changes to/from complex and vector modes. */
9915 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9916 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9917 return true;
9919 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9920 return false;
9922 /* There is no way to load QImode or HImode values directly from
9923 memory. SImode loads to the FP registers are not zero extended.
9924 On the 64-bit target, this conflicts with the definition of
9925 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9926 with different sizes in the floating-point registers. */
9927 if (MAYBE_FP_REG_CLASS_P (rclass))
9928 return true;
9930 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9931 in specific sets of registers. Thus, we cannot allow changing
9932 to a larger mode when it's larger than a word. */
9933 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9934 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9935 return true;
9937 return false;
9940 /* Returns TRUE if it is a good idea to tie two pseudo registers
9941 when one has mode MODE1 and one has mode MODE2.
9942 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9943 for any hard reg, then this must be FALSE for correct output.
9945 We should return FALSE for QImode and HImode because these modes
9946 are not ok in the floating-point registers. However, this prevents
9947 tieing these modes to SImode and DImode in the general registers.
9948 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9949 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9950 in the floating-point registers. */
9952 bool
9953 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9955 /* Don't tie modes in different classes. */
9956 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9957 return false;
9959 return true;
9963 /* Length in units of the trampoline instruction code. */
9965 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9968 /* Output assembler code for a block containing the constant parts
9969 of a trampoline, leaving space for the variable parts.\
9971 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9972 and then branches to the specified routine.
9974 This code template is copied from text segment to stack location
9975 and then patched with pa_trampoline_init to contain valid values,
9976 and then entered as a subroutine.
9978 It is best to keep this as small as possible to avoid having to
9979 flush multiple lines in the cache. */
9981 static void
9982 pa_asm_trampoline_template (FILE *f)
9984 if (!TARGET_64BIT)
9986 fputs ("\tldw 36(%r22),%r21\n", f);
9987 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9988 if (ASSEMBLER_DIALECT == 0)
9989 fputs ("\tdepi 0,31,2,%r21\n", f);
9990 else
9991 fputs ("\tdepwi 0,31,2,%r21\n", f);
9992 fputs ("\tldw 4(%r21),%r19\n", f);
9993 fputs ("\tldw 0(%r21),%r21\n", f);
9994 if (TARGET_PA_20)
9996 fputs ("\tbve (%r21)\n", f);
9997 fputs ("\tldw 40(%r22),%r29\n", f);
9998 fputs ("\t.word 0\n", f);
9999 fputs ("\t.word 0\n", f);
10001 else
10003 fputs ("\tldsid (%r21),%r1\n", f);
10004 fputs ("\tmtsp %r1,%sr0\n", f);
10005 fputs ("\tbe 0(%sr0,%r21)\n", f);
10006 fputs ("\tldw 40(%r22),%r29\n", f);
10008 fputs ("\t.word 0\n", f);
10009 fputs ("\t.word 0\n", f);
10010 fputs ("\t.word 0\n", f);
10011 fputs ("\t.word 0\n", f);
10013 else
10015 fputs ("\t.dword 0\n", f);
10016 fputs ("\t.dword 0\n", f);
10017 fputs ("\t.dword 0\n", f);
10018 fputs ("\t.dword 0\n", f);
10019 fputs ("\tmfia %r31\n", f);
10020 fputs ("\tldd 24(%r31),%r1\n", f);
10021 fputs ("\tldd 24(%r1),%r27\n", f);
10022 fputs ("\tldd 16(%r1),%r1\n", f);
10023 fputs ("\tbve (%r1)\n", f);
10024 fputs ("\tldd 32(%r31),%r31\n", f);
10025 fputs ("\t.dword 0 ; fptr\n", f);
10026 fputs ("\t.dword 0 ; static link\n", f);
10030 /* Emit RTL insns to initialize the variable parts of a trampoline.
10031 FNADDR is an RTX for the address of the function's pure code.
10032 CXT is an RTX for the static chain value for the function.
10034 Move the function address to the trampoline template at offset 36.
10035 Move the static chain value to trampoline template at offset 40.
10036 Move the trampoline address to trampoline template at offset 44.
10037 Move r19 to trampoline template at offset 48. The latter two
10038 words create a plabel for the indirect call to the trampoline.
10040 A similar sequence is used for the 64-bit port but the plabel is
10041 at the beginning of the trampoline.
10043 Finally, the cache entries for the trampoline code are flushed.
10044 This is necessary to ensure that the trampoline instruction sequence
10045 is written to memory prior to any attempts at prefetching the code
10046 sequence. */
10048 static void
10049 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10051 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10052 rtx start_addr = gen_reg_rtx (Pmode);
10053 rtx end_addr = gen_reg_rtx (Pmode);
10054 rtx line_length = gen_reg_rtx (Pmode);
10055 rtx r_tramp, tmp;
10057 emit_block_move (m_tramp, assemble_trampoline_template (),
10058 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10059 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10061 if (!TARGET_64BIT)
10063 tmp = adjust_address (m_tramp, Pmode, 36);
10064 emit_move_insn (tmp, fnaddr);
10065 tmp = adjust_address (m_tramp, Pmode, 40);
10066 emit_move_insn (tmp, chain_value);
10068 /* Create a fat pointer for the trampoline. */
10069 tmp = adjust_address (m_tramp, Pmode, 44);
10070 emit_move_insn (tmp, r_tramp);
10071 tmp = adjust_address (m_tramp, Pmode, 48);
10072 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10074 /* fdc and fic only use registers for the address to flush,
10075 they do not accept integer displacements. We align the
10076 start and end addresses to the beginning of their respective
10077 cache lines to minimize the number of lines flushed. */
10078 emit_insn (gen_andsi3 (start_addr, r_tramp,
10079 GEN_INT (-MIN_CACHELINE_SIZE)));
10080 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10081 TRAMPOLINE_CODE_SIZE-1));
10082 emit_insn (gen_andsi3 (end_addr, tmp,
10083 GEN_INT (-MIN_CACHELINE_SIZE)));
10084 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10085 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10086 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10087 gen_reg_rtx (Pmode),
10088 gen_reg_rtx (Pmode)));
10090 else
10092 tmp = adjust_address (m_tramp, Pmode, 56);
10093 emit_move_insn (tmp, fnaddr);
10094 tmp = adjust_address (m_tramp, Pmode, 64);
10095 emit_move_insn (tmp, chain_value);
10097 /* Create a fat pointer for the trampoline. */
10098 tmp = adjust_address (m_tramp, Pmode, 16);
10099 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10100 r_tramp, 32)));
10101 tmp = adjust_address (m_tramp, Pmode, 24);
10102 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10104 /* fdc and fic only use registers for the address to flush,
10105 they do not accept integer displacements. We align the
10106 start and end addresses to the beginning of their respective
10107 cache lines to minimize the number of lines flushed. */
10108 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10109 emit_insn (gen_anddi3 (start_addr, tmp,
10110 GEN_INT (-MIN_CACHELINE_SIZE)));
10111 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10112 TRAMPOLINE_CODE_SIZE - 1));
10113 emit_insn (gen_anddi3 (end_addr, tmp,
10114 GEN_INT (-MIN_CACHELINE_SIZE)));
10115 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10116 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10117 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10118 gen_reg_rtx (Pmode),
10119 gen_reg_rtx (Pmode)));
10122 #ifdef HAVE_ENABLE_EXECUTE_STACK
10123  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10124      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10125 #endif
10128 /* Perform any machine-specific adjustment in the address of the trampoline.
10129 ADDR contains the address that was passed to pa_trampoline_init.
10130 Adjust the trampoline address to point to the plabel at offset 44. */
10132 static rtx
10133 pa_trampoline_adjust_address (rtx addr)
10135 if (!TARGET_64BIT)
10136 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10137 return addr;
10140 static rtx
10141 pa_delegitimize_address (rtx orig_x)
10143 rtx x = delegitimize_mem_from_attrs (orig_x);
10145 if (GET_CODE (x) == LO_SUM
10146 && GET_CODE (XEXP (x, 1)) == UNSPEC
10147 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10148 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10149 return x;
10152 static rtx
10153 pa_internal_arg_pointer (void)
10155 /* The argument pointer and the hard frame pointer are the same in
10156 the 32-bit runtime, so we don't need a copy. */
10157 if (TARGET_64BIT)
10158 return copy_to_reg (virtual_incoming_args_rtx);
10159 else
10160 return virtual_incoming_args_rtx;
10163 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10164 Frame pointer elimination is automatically handled. */
10166 static bool
10167 pa_can_eliminate (const int from, const int to)
10169 /* The argument cannot be eliminated in the 64-bit runtime. */
10170 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10171 return false;
10173 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10174 ? ! frame_pointer_needed
10175 : true);
10178 /* Define the offset between two registers, FROM to be eliminated and its
10179 replacement TO, at the start of a routine. */
10180 HOST_WIDE_INT
10181 pa_initial_elimination_offset (int from, int to)
10183 HOST_WIDE_INT offset;
10185 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10186 && to == STACK_POINTER_REGNUM)
10187 offset = -pa_compute_frame_size (get_frame_size (), 0);
10188 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10189 offset = 0;
10190 else
10191 gcc_unreachable ();
10193 return offset;
10196 static void
10197 pa_conditional_register_usage (void)
10199 int i;
10201 if (!TARGET_64BIT && !TARGET_PA_11)
10203 for (i = 56; i <= FP_REG_LAST; i++)
10204 fixed_regs[i] = call_used_regs[i] = 1;
10205 for (i = 33; i < 56; i += 2)
10206 fixed_regs[i] = call_used_regs[i] = 1;
10208 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10210 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10211 fixed_regs[i] = call_used_regs[i] = 1;
10213 if (flag_pic)
10214 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10217 /* Target hook for c_mode_for_suffix. */
10219 static enum machine_mode
10220 pa_c_mode_for_suffix (char suffix)
10222 if (HPUX_LONG_DOUBLE_LIBRARY)
10224 if (suffix == 'q')
10225 return TFmode;
10228 return VOIDmode;
10231 /* Target hook for function_section. */
10233 static section *
10234 pa_function_section (tree decl, enum node_frequency freq,
10235 bool startup, bool exit)
10237 /* Put functions in text section if target doesn't have named sections. */
10238 if (!targetm_common.have_named_sections)
10239 return text_section;
10241 /* Force nested functions into the same section as the containing
10242 function. */
10243 if (decl
10244 && DECL_SECTION_NAME (decl) == NULL
10245 && DECL_CONTEXT (decl) != NULL_TREE
10246 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10247 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10248 return function_section (DECL_CONTEXT (decl));
10250 /* Otherwise, use the default function section. */
10251 return default_function_section (decl, freq, startup, exit);
10254 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10256 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10257 that need more than three instructions to load prior to reload. This
10258 limit is somewhat arbitrary. It takes three instructions to load a
10259 CONST_INT from memory but two are memory accesses. It may be better
10260 to increase the allowed range for CONST_INTS. We may also be able
10261 to handle CONST_DOUBLES. */
10263 static bool
10264 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10266 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10267 return false;
10269 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10270 return false;
10272 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10273 legitimate constants. The other variants can't be handled by
10274 the move patterns after reload starts. */
10275 if (tls_referenced_p (x))
10276 return false;
10278 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10279 return false;
10281 if (TARGET_64BIT
10282 && HOST_BITS_PER_WIDE_INT > 32
10283 && GET_CODE (x) == CONST_INT
10284 && !reload_in_progress
10285 && !reload_completed
10286 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10287 && !pa_cint_ok_for_move (INTVAL (x)))
10288 return false;
10290 if (function_label_operand (x, mode))
10291 return false;
10293 return true;
10296 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10298 static unsigned int
10299 pa_section_type_flags (tree decl, const char *name, int reloc)
10301 unsigned int flags;
10303 flags = default_section_type_flags (decl, name, reloc);
10305 /* Function labels are placed in the constant pool. This can
10306 cause a section conflict if decls are put in ".data.rel.ro"
10307 or ".data.rel.ro.local" using the __attribute__ construct. */
10308 if (strcmp (name, ".data.rel.ro") == 0
10309 || strcmp (name, ".data.rel.ro.local") == 0)
10310 flags |= SECTION_WRITE | SECTION_RELRO;
10312 return flags;
10315 /* pa_legitimate_address_p recognizes an RTL expression that is a
10316 valid memory address for an instruction. The MODE argument is the
10317 machine mode for the MEM expression that wants to use this address.
10319 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10320 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10321 available with floating point loads and stores, and integer loads.
10322 We get better code by allowing indexed addresses in the initial
10323 RTL generation.
10325 The acceptance of indexed addresses as legitimate implies that we
10326 must provide patterns for doing indexed integer stores, or the move
10327 expanders must force the address of an indexed store to a register.
10328 We have adopted the latter approach.
10330 Another function of pa_legitimate_address_p is to ensure that
10331 the base register is a valid pointer for indexed instructions.
10332 On targets that have non-equivalent space registers, we have to
10333 know at the time of assembler output which register in a REG+REG
10334 pair is the base register. The REG_POINTER flag is sometimes lost
10335 in reload and the following passes, so it can't be relied on during
10336 code generation. Thus, we either have to canonicalize the order
10337 of the registers in REG+REG indexed addresses, or treat REG+REG
10338 addresses separately and provide patterns for both permutations.
10340 The latter approach requires several hundred additional lines of
10341 code in pa.md. The downside to canonicalizing is that a PLUS
10342 in the wrong order can't combine to form to make a scaled indexed
10343 memory operand. As we won't need to canonicalize the operands if
10344 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10346 We initially break out scaled indexed addresses in canonical order
10347 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10348 scaled indexed addresses during RTL generation. However, fold_rtx
10349 has its own opinion on how the operands of a PLUS should be ordered.
10350 If one of the operands is equivalent to a constant, it will make
10351 that operand the second operand. As the base register is likely to
10352 be equivalent to a SYMBOL_REF, we have made it the second operand.
10354 pa_legitimate_address_p accepts REG+REG as legitimate when the
10355 operands are in the order INDEX+BASE on targets with non-equivalent
10356 space registers, and in any order on targets with equivalent space
10357 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10359 We treat a SYMBOL_REF as legitimate if it is part of the current
10360 function's constant-pool, because such addresses can actually be
10361 output as REG+SMALLINT. */
10363 static bool
10364 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10366 if ((REG_P (x)
10367 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10368 : REG_OK_FOR_BASE_P (x)))
10369 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10370 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10371 && REG_P (XEXP (x, 0))
10372 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10373 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10374 return true;
10376 if (GET_CODE (x) == PLUS)
10378 rtx base, index;
10380 /* For REG+REG, the base register should be in XEXP (x, 1),
10381 so check it first. */
10382 if (REG_P (XEXP (x, 1))
10383 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10384 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10385 base = XEXP (x, 1), index = XEXP (x, 0);
10386 else if (REG_P (XEXP (x, 0))
10387 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10388 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10389 base = XEXP (x, 0), index = XEXP (x, 1);
10390 else
10391 return false;
10393 if (GET_CODE (index) == CONST_INT)
10395 if (INT_5_BITS (index))
10396 return true;
10398 /* When INT14_OK_STRICT is false, a secondary reload is needed
10399 to adjust the displacement of SImode and DImode floating point
10400 instructions but this may fail when the register also needs
10401 reloading. So, we return false when STRICT is true. We
10402 also reject long displacements for float mode addresses since
10403 the majority of accesses will use floating point instructions
10404 that don't support 14-bit offsets. */
10405 if (!INT14_OK_STRICT
10406 && (strict || !(reload_in_progress || reload_completed))
10407 && mode != QImode
10408 && mode != HImode)
10409 return false;
10411 return base14_operand (index, mode);
10414 if (!TARGET_DISABLE_INDEXING
10415 /* Only accept the "canonical" INDEX+BASE operand order
10416 on targets with non-equivalent space registers. */
10417 && (TARGET_NO_SPACE_REGS
10418 ? REG_P (index)
10419 : (base == XEXP (x, 1) && REG_P (index)
10420 && (reload_completed
10421 || (reload_in_progress && HARD_REGISTER_P (base))
10422 || REG_POINTER (base))
10423 && (reload_completed
10424 || (reload_in_progress && HARD_REGISTER_P (index))
10425 || !REG_POINTER (index))))
10426 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10427 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10428 : REG_OK_FOR_INDEX_P (index))
10429 && borx_reg_operand (base, Pmode)
10430 && borx_reg_operand (index, Pmode))
10431 return true;
10433 if (!TARGET_DISABLE_INDEXING
10434 && GET_CODE (index) == MULT
10435 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10436 && REG_P (XEXP (index, 0))
10437 && GET_MODE (XEXP (index, 0)) == Pmode
10438 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10439 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10440 && GET_CODE (XEXP (index, 1)) == CONST_INT
10441 && INTVAL (XEXP (index, 1))
10442 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10443 && borx_reg_operand (base, Pmode))
10444 return true;
10446 return false;
10449 if (GET_CODE (x) == LO_SUM)
10451 rtx y = XEXP (x, 0);
10453 if (GET_CODE (y) == SUBREG)
10454 y = SUBREG_REG (y);
10456 if (REG_P (y)
10457 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10458 : REG_OK_FOR_BASE_P (y)))
10460 /* Needed for -fPIC */
10461 if (mode == Pmode
10462 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10463 return true;
10465 if (!INT14_OK_STRICT
10466 && (strict || !(reload_in_progress || reload_completed))
10467 && mode != QImode
10468 && mode != HImode)
10469 return false;
10471 if (CONSTANT_P (XEXP (x, 1)))
10472 return true;
10474 return false;
10477 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10478 return true;
10480 return false;
10483 /* Look for machine dependent ways to make the invalid address AD a
10484 valid address.
10486 For the PA, transform:
10488 memory(X + <large int>)
10490 into:
10492 if (<large int> & mask) >= 16
10493 Y = (<large int> & ~mask) + mask + 1 Round up.
10494 else
10495 Y = (<large int> & ~mask) Round down.
10496 Z = X + Y
10497 memory (Z + (<large int> - Y));
10499 This makes reload inheritance and reload_cse work better since Z
10500 can be reused.
10502 There may be more opportunities to improve code with this hook. */
10505 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10506 int opnum, int type,
10507 int ind_levels ATTRIBUTE_UNUSED)
10509 long offset, newoffset, mask;
10510 rtx new_rtx, temp = NULL_RTX;
10512 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10513 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10515 if (optimize && GET_CODE (ad) == PLUS)
10516 temp = simplify_binary_operation (PLUS, Pmode,
10517 XEXP (ad, 0), XEXP (ad, 1));
10519 new_rtx = temp ? temp : ad;
10521 if (optimize
10522 && GET_CODE (new_rtx) == PLUS
10523 && GET_CODE (XEXP (new_rtx, 0)) == REG
10524 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10526 offset = INTVAL (XEXP ((new_rtx), 1));
10528 /* Choose rounding direction. Round up if we are >= halfway. */
10529 if ((offset & mask) >= ((mask + 1) / 2))
10530 newoffset = (offset & ~mask) + mask + 1;
10531 else
10532 newoffset = offset & ~mask;
10534 /* Ensure that long displacements are aligned. */
10535 if (mask == 0x3fff
10536 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10537 || (TARGET_64BIT && (mode) == DImode)))
10538 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10540 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10542 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10543 GEN_INT (newoffset));
10544 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10545 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10546 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10547 opnum, (enum reload_type) type);
10548 return ad;
10552 return NULL_RTX;
10555 /* Output address vector. */
10557 void
10558 pa_output_addr_vec (rtx lab, rtx body)
10560 int idx, vlen = XVECLEN (body, 0);
10562 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10563 if (TARGET_GAS)
10564 fputs ("\t.begin_brtab\n", asm_out_file);
10565 for (idx = 0; idx < vlen; idx++)
10567 ASM_OUTPUT_ADDR_VEC_ELT
10568 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10570 if (TARGET_GAS)
10571 fputs ("\t.end_brtab\n", asm_out_file);
10574 /* Output address difference vector. */
10576 void
10577 pa_output_addr_diff_vec (rtx lab, rtx body)
10579 rtx base = XEXP (XEXP (body, 0), 0);
10580 int idx, vlen = XVECLEN (body, 1);
10582 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10583 if (TARGET_GAS)
10584 fputs ("\t.begin_brtab\n", asm_out_file);
10585 for (idx = 0; idx < vlen; idx++)
10587 ASM_OUTPUT_ADDR_DIFF_ELT
10588 (asm_out_file,
10589 body,
10590 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10591 CODE_LABEL_NUMBER (base));
10593 if (TARGET_GAS)
10594 fputs ("\t.end_brtab\n", asm_out_file);
10597 #include "gt-pa.h"