2016-11-10 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / config / pa / pa.c
blobc8ce083858fc2e1aaff708b7988149d17985a631
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2016 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "memmodel.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "stringpool.h"
32 #include "optabs.h"
33 #include "regs.h"
34 #include "emit-rtl.h"
35 #include "recog.h"
36 #include "diagnostic-core.h"
37 #include "insn-attr.h"
38 #include "alias.h"
39 #include "fold-const.h"
40 #include "stor-layout.h"
41 #include "varasm.h"
42 #include "calls.h"
43 #include "output.h"
44 #include "except.h"
45 #include "explow.h"
46 #include "expr.h"
47 #include "reload.h"
48 #include "common/common-target.h"
49 #include "langhooks.h"
50 #include "cfgrtl.h"
51 #include "opts.h"
52 #include "builtins.h"
54 /* This file should be included last. */
55 #include "target-def.h"
57 /* Return nonzero if there is a bypass for the output of
58 OUT_INSN and the fp store IN_INSN. */
59 int
60 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
62 machine_mode store_mode;
63 machine_mode other_mode;
64 rtx set;
66 if (recog_memoized (in_insn) < 0
67 || (get_attr_type (in_insn) != TYPE_FPSTORE
68 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
69 || recog_memoized (out_insn) < 0)
70 return 0;
72 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74 set = single_set (out_insn);
75 if (!set)
76 return 0;
78 other_mode = GET_MODE (SET_SRC (set));
80 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
84 #ifndef DO_FRAME_NOTES
85 #ifdef INCOMING_RETURN_ADDR_RTX
86 #define DO_FRAME_NOTES 1
87 #else
88 #define DO_FRAME_NOTES 0
89 #endif
90 #endif
92 static void pa_option_override (void);
93 static void copy_reg_pointer (rtx, rtx);
94 static void fix_range (const char *);
95 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
96 reg_class_t);
97 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
98 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
99 static inline rtx force_mode (machine_mode, rtx);
100 static void pa_reorg (void);
101 static void pa_combine_instructions (void);
102 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
103 rtx, rtx);
104 static bool forward_branch_p (rtx_insn *);
105 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static int compute_movmem_length (rtx_insn *);
108 static int compute_clrmem_length (rtx_insn *);
109 static bool pa_assemble_integer (rtx, unsigned int, int);
110 static void remove_useless_addtr_insns (int);
111 static void store_reg (int, HOST_WIDE_INT, int);
112 static void store_reg_modify (int, int, HOST_WIDE_INT);
113 static void load_reg (int, HOST_WIDE_INT, int);
114 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
115 static rtx pa_function_value (const_tree, const_tree, bool);
116 static rtx pa_libcall_value (machine_mode, const_rtx);
117 static bool pa_function_value_regno_p (const unsigned int);
118 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void update_total_code_bytes (unsigned int);
120 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
121 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
122 static int pa_adjust_priority (rtx_insn *, int);
123 static int pa_issue_rate (void);
124 static int pa_reloc_rw_mask (void);
125 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
126 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
127 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
128 ATTRIBUTE_UNUSED;
129 static void pa_encode_section_info (tree, rtx, int);
130 static const char *pa_strip_name_encoding (const char *);
131 static bool pa_function_ok_for_sibcall (tree, tree);
132 static void pa_globalize_label (FILE *, const char *)
133 ATTRIBUTE_UNUSED;
134 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
135 HOST_WIDE_INT, tree);
136 #if !defined(USE_COLLECT2)
137 static void pa_asm_out_constructor (rtx, int);
138 static void pa_asm_out_destructor (rtx, int);
139 #endif
140 static void pa_init_builtins (void);
141 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
142 static rtx hppa_builtin_saveregs (void);
143 static void hppa_va_start (tree, rtx);
144 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
145 static bool pa_scalar_mode_supported_p (machine_mode);
146 static bool pa_commutative_p (const_rtx x, int outer_code);
147 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
148 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
149 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
150 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
153 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
154 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
159 static void output_deferred_plabels (void);
160 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
161 #ifdef ASM_OUTPUT_EXTERNAL_REAL
162 static void pa_hpux_file_end (void);
163 #endif
164 static void pa_init_libfuncs (void);
165 static rtx pa_struct_value_rtx (tree, int);
166 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
167 const_tree, bool);
168 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
169 tree, bool);
170 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
171 const_tree, bool);
172 static rtx pa_function_arg (cumulative_args_t, machine_mode,
173 const_tree, bool);
174 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
175 static struct machine_function * pa_init_machine_status (void);
176 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
177 machine_mode,
178 secondary_reload_info *);
179 static void pa_extra_live_on_entry (bitmap);
180 static machine_mode pa_promote_function_mode (const_tree,
181 machine_mode, int *,
182 const_tree, int);
184 static void pa_asm_trampoline_template (FILE *);
185 static void pa_trampoline_init (rtx, tree, rtx);
186 static rtx pa_trampoline_adjust_address (rtx);
187 static rtx pa_delegitimize_address (rtx);
188 static bool pa_print_operand_punct_valid_p (unsigned char);
189 static rtx pa_internal_arg_pointer (void);
190 static bool pa_can_eliminate (const int, const int);
191 static void pa_conditional_register_usage (void);
192 static machine_mode pa_c_mode_for_suffix (char);
193 static section *pa_function_section (tree, enum node_frequency, bool, bool);
194 static bool pa_cannot_force_const_mem (machine_mode, rtx);
195 static bool pa_legitimate_constant_p (machine_mode, rtx);
196 static unsigned int pa_section_type_flags (tree, const char *, int);
197 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
199 /* The following extra sections are only used for SOM. */
200 static GTY(()) section *som_readonly_data_section;
201 static GTY(()) section *som_one_only_readonly_data_section;
202 static GTY(()) section *som_one_only_data_section;
203 static GTY(()) section *som_tm_clone_table_section;
205 /* Counts for the number of callee-saved general and floating point
206 registers which were saved by the current function's prologue. */
207 static int gr_saved, fr_saved;
209 /* Boolean indicating whether the return pointer was saved by the
210 current function's prologue. */
211 static bool rp_saved;
213 static rtx find_addr_reg (rtx);
215 /* Keep track of the number of bytes we have output in the CODE subspace
216 during this compilation so we'll know when to emit inline long-calls. */
217 unsigned long total_code_bytes;
219 /* The last address of the previous function plus the number of bytes in
220 associated thunks that have been output. This is used to determine if
221 a thunk can use an IA-relative branch to reach its target function. */
222 static unsigned int last_address;
224 /* Variables to handle plabels that we discover are necessary at assembly
225 output time. They are output after the current function. */
226 struct GTY(()) deferred_plabel
228 rtx internal_label;
229 rtx symbol;
231 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
232 deferred_plabels;
233 static size_t n_deferred_plabels = 0;
235 /* Initialize the GCC target structure. */
237 #undef TARGET_OPTION_OVERRIDE
238 #define TARGET_OPTION_OVERRIDE pa_option_override
240 #undef TARGET_ASM_ALIGNED_HI_OP
241 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
242 #undef TARGET_ASM_ALIGNED_SI_OP
243 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
244 #undef TARGET_ASM_ALIGNED_DI_OP
245 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
246 #undef TARGET_ASM_UNALIGNED_HI_OP
247 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
248 #undef TARGET_ASM_UNALIGNED_SI_OP
249 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
250 #undef TARGET_ASM_UNALIGNED_DI_OP
251 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
252 #undef TARGET_ASM_INTEGER
253 #define TARGET_ASM_INTEGER pa_assemble_integer
255 #undef TARGET_ASM_FUNCTION_PROLOGUE
256 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
257 #undef TARGET_ASM_FUNCTION_EPILOGUE
258 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
260 #undef TARGET_FUNCTION_VALUE
261 #define TARGET_FUNCTION_VALUE pa_function_value
262 #undef TARGET_LIBCALL_VALUE
263 #define TARGET_LIBCALL_VALUE pa_libcall_value
264 #undef TARGET_FUNCTION_VALUE_REGNO_P
265 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
267 #undef TARGET_LEGITIMIZE_ADDRESS
268 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
270 #undef TARGET_SCHED_ADJUST_COST
271 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
272 #undef TARGET_SCHED_ADJUST_PRIORITY
273 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
274 #undef TARGET_SCHED_ISSUE_RATE
275 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
277 #undef TARGET_ENCODE_SECTION_INFO
278 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
279 #undef TARGET_STRIP_NAME_ENCODING
280 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
282 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
283 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
285 #undef TARGET_COMMUTATIVE_P
286 #define TARGET_COMMUTATIVE_P pa_commutative_p
288 #undef TARGET_ASM_OUTPUT_MI_THUNK
289 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
290 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
291 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
293 #undef TARGET_ASM_FILE_END
294 #ifdef ASM_OUTPUT_EXTERNAL_REAL
295 #define TARGET_ASM_FILE_END pa_hpux_file_end
296 #else
297 #define TARGET_ASM_FILE_END output_deferred_plabels
298 #endif
300 #undef TARGET_ASM_RELOC_RW_MASK
301 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
303 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
304 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
306 #if !defined(USE_COLLECT2)
307 #undef TARGET_ASM_CONSTRUCTOR
308 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
309 #undef TARGET_ASM_DESTRUCTOR
310 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
311 #endif
313 #undef TARGET_INIT_BUILTINS
314 #define TARGET_INIT_BUILTINS pa_init_builtins
316 #undef TARGET_EXPAND_BUILTIN
317 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
319 #undef TARGET_REGISTER_MOVE_COST
320 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
321 #undef TARGET_RTX_COSTS
322 #define TARGET_RTX_COSTS hppa_rtx_costs
323 #undef TARGET_ADDRESS_COST
324 #define TARGET_ADDRESS_COST hppa_address_cost
326 #undef TARGET_MACHINE_DEPENDENT_REORG
327 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
329 #undef TARGET_INIT_LIBFUNCS
330 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
332 #undef TARGET_PROMOTE_FUNCTION_MODE
333 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
334 #undef TARGET_PROMOTE_PROTOTYPES
335 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
337 #undef TARGET_STRUCT_VALUE_RTX
338 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
339 #undef TARGET_RETURN_IN_MEMORY
340 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
341 #undef TARGET_MUST_PASS_IN_STACK
342 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
343 #undef TARGET_PASS_BY_REFERENCE
344 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
345 #undef TARGET_CALLEE_COPIES
346 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
347 #undef TARGET_ARG_PARTIAL_BYTES
348 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
349 #undef TARGET_FUNCTION_ARG
350 #define TARGET_FUNCTION_ARG pa_function_arg
351 #undef TARGET_FUNCTION_ARG_ADVANCE
352 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
353 #undef TARGET_FUNCTION_ARG_BOUNDARY
354 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
356 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
357 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
358 #undef TARGET_EXPAND_BUILTIN_VA_START
359 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
360 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
361 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
363 #undef TARGET_SCALAR_MODE_SUPPORTED_P
364 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
366 #undef TARGET_CANNOT_FORCE_CONST_MEM
367 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
369 #undef TARGET_SECONDARY_RELOAD
370 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
372 #undef TARGET_EXTRA_LIVE_ON_ENTRY
373 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
375 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
376 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
377 #undef TARGET_TRAMPOLINE_INIT
378 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
379 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
380 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
381 #undef TARGET_DELEGITIMIZE_ADDRESS
382 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
383 #undef TARGET_INTERNAL_ARG_POINTER
384 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
385 #undef TARGET_CAN_ELIMINATE
386 #define TARGET_CAN_ELIMINATE pa_can_eliminate
387 #undef TARGET_CONDITIONAL_REGISTER_USAGE
388 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
389 #undef TARGET_C_MODE_FOR_SUFFIX
390 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
391 #undef TARGET_ASM_FUNCTION_SECTION
392 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
394 #undef TARGET_LEGITIMATE_CONSTANT_P
395 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
396 #undef TARGET_SECTION_TYPE_FLAGS
397 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
398 #undef TARGET_LEGITIMATE_ADDRESS_P
399 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
401 #undef TARGET_LRA_P
402 #define TARGET_LRA_P hook_bool_void_false
404 struct gcc_target targetm = TARGET_INITIALIZER;
406 /* Parse the -mfixed-range= option string. */
408 static void
409 fix_range (const char *const_str)
411 int i, first, last;
412 char *str, *dash, *comma;
414 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
415 REG2 are either register names or register numbers. The effect
416 of this option is to mark the registers in the range from REG1 to
417 REG2 as ``fixed'' so they won't be used by the compiler. This is
418 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
420 i = strlen (const_str);
421 str = (char *) alloca (i + 1);
422 memcpy (str, const_str, i + 1);
424 while (1)
426 dash = strchr (str, '-');
427 if (!dash)
429 warning (0, "value of -mfixed-range must have form REG1-REG2");
430 return;
432 *dash = '\0';
434 comma = strchr (dash + 1, ',');
435 if (comma)
436 *comma = '\0';
438 first = decode_reg_name (str);
439 if (first < 0)
441 warning (0, "unknown register name: %s", str);
442 return;
445 last = decode_reg_name (dash + 1);
446 if (last < 0)
448 warning (0, "unknown register name: %s", dash + 1);
449 return;
452 *dash = '-';
454 if (first > last)
456 warning (0, "%s-%s is an empty range", str, dash + 1);
457 return;
460 for (i = first; i <= last; ++i)
461 fixed_regs[i] = call_used_regs[i] = 1;
463 if (!comma)
464 break;
466 *comma = ',';
467 str = comma + 1;
470 /* Check if all floating point registers have been fixed. */
471 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
472 if (!fixed_regs[i])
473 break;
475 if (i > FP_REG_LAST)
476 target_flags |= MASK_DISABLE_FPREGS;
479 /* Implement the TARGET_OPTION_OVERRIDE hook. */
481 static void
482 pa_option_override (void)
484 unsigned int i;
485 cl_deferred_option *opt;
486 vec<cl_deferred_option> *v
487 = (vec<cl_deferred_option> *) pa_deferred_options;
489 if (v)
490 FOR_EACH_VEC_ELT (*v, i, opt)
492 switch (opt->opt_index)
494 case OPT_mfixed_range_:
495 fix_range (opt->arg);
496 break;
498 default:
499 gcc_unreachable ();
503 if (flag_pic && TARGET_PORTABLE_RUNTIME)
505 warning (0, "PIC code generation is not supported in the portable runtime model");
508 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
510 warning (0, "PIC code generation is not compatible with fast indirect calls");
513 if (! TARGET_GAS && write_symbols != NO_DEBUG)
515 warning (0, "-g is only supported when using GAS on this processor,");
516 warning (0, "-g option disabled");
517 write_symbols = NO_DEBUG;
520 /* We only support the "big PIC" model now. And we always generate PIC
521 code when in 64bit mode. */
522 if (flag_pic == 1 || TARGET_64BIT)
523 flag_pic = 2;
525 /* Disable -freorder-blocks-and-partition as we don't support hot and
526 cold partitioning. */
527 if (flag_reorder_blocks_and_partition)
529 inform (input_location,
530 "-freorder-blocks-and-partition does not work "
531 "on this architecture");
532 flag_reorder_blocks_and_partition = 0;
533 flag_reorder_blocks = 1;
536 /* We can't guarantee that .dword is available for 32-bit targets. */
537 if (UNITS_PER_WORD == 4)
538 targetm.asm_out.aligned_op.di = NULL;
540 /* The unaligned ops are only available when using GAS. */
541 if (!TARGET_GAS)
543 targetm.asm_out.unaligned_op.hi = NULL;
544 targetm.asm_out.unaligned_op.si = NULL;
545 targetm.asm_out.unaligned_op.di = NULL;
548 init_machine_status = pa_init_machine_status;
551 enum pa_builtins
553 PA_BUILTIN_COPYSIGNQ,
554 PA_BUILTIN_FABSQ,
555 PA_BUILTIN_INFQ,
556 PA_BUILTIN_HUGE_VALQ,
557 PA_BUILTIN_max
560 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
562 static void
563 pa_init_builtins (void)
565 #ifdef DONT_HAVE_FPUTC_UNLOCKED
567 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
568 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
569 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
571 #endif
572 #if TARGET_HPUX_11
574 tree decl;
576 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
577 set_user_assembler_name (decl, "_Isfinite");
578 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
579 set_user_assembler_name (decl, "_Isfinitef");
581 #endif
583 if (HPUX_LONG_DOUBLE_LIBRARY)
585 tree decl, ftype;
587 /* Under HPUX, the __float128 type is a synonym for "long double". */
588 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
589 "__float128");
591 /* TFmode support builtins. */
592 ftype = build_function_type_list (long_double_type_node,
593 long_double_type_node,
594 NULL_TREE);
595 decl = add_builtin_function ("__builtin_fabsq", ftype,
596 PA_BUILTIN_FABSQ, BUILT_IN_MD,
597 "_U_Qfabs", NULL_TREE);
598 TREE_READONLY (decl) = 1;
599 pa_builtins[PA_BUILTIN_FABSQ] = decl;
601 ftype = build_function_type_list (long_double_type_node,
602 long_double_type_node,
603 long_double_type_node,
604 NULL_TREE);
605 decl = add_builtin_function ("__builtin_copysignq", ftype,
606 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
607 "_U_Qfcopysign", NULL_TREE);
608 TREE_READONLY (decl) = 1;
609 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
611 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
612 decl = add_builtin_function ("__builtin_infq", ftype,
613 PA_BUILTIN_INFQ, BUILT_IN_MD,
614 NULL, NULL_TREE);
615 pa_builtins[PA_BUILTIN_INFQ] = decl;
617 decl = add_builtin_function ("__builtin_huge_valq", ftype,
618 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
619 NULL, NULL_TREE);
620 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
624 static rtx
625 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
626 machine_mode mode ATTRIBUTE_UNUSED,
627 int ignore ATTRIBUTE_UNUSED)
629 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
630 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
632 switch (fcode)
634 case PA_BUILTIN_FABSQ:
635 case PA_BUILTIN_COPYSIGNQ:
636 return expand_call (exp, target, ignore);
638 case PA_BUILTIN_INFQ:
639 case PA_BUILTIN_HUGE_VALQ:
641 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
642 REAL_VALUE_TYPE inf;
643 rtx tmp;
645 real_inf (&inf);
646 tmp = const_double_from_real_value (inf, target_mode);
648 tmp = validize_mem (force_const_mem (target_mode, tmp));
650 if (target == 0)
651 target = gen_reg_rtx (target_mode);
653 emit_move_insn (target, tmp);
654 return target;
657 default:
658 gcc_unreachable ();
661 return NULL_RTX;
664 /* Function to init struct machine_function.
665 This will be called, via a pointer variable,
666 from push_function_context. */
668 static struct machine_function *
669 pa_init_machine_status (void)
671 return ggc_cleared_alloc<machine_function> ();
674 /* If FROM is a probable pointer register, mark TO as a probable
675 pointer register with the same pointer alignment as FROM. */
677 static void
678 copy_reg_pointer (rtx to, rtx from)
680 if (REG_POINTER (from))
681 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
684 /* Return 1 if X contains a symbolic expression. We know these
685 expressions will have one of a few well defined forms, so
686 we need only check those forms. */
688 pa_symbolic_expression_p (rtx x)
691 /* Strip off any HIGH. */
692 if (GET_CODE (x) == HIGH)
693 x = XEXP (x, 0);
695 return symbolic_operand (x, VOIDmode);
698 /* Accept any constant that can be moved in one instruction into a
699 general register. */
701 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
703 /* OK if ldo, ldil, or zdepi, can be used. */
704 return (VAL_14_BITS_P (ival)
705 || pa_ldil_cint_p (ival)
706 || pa_zdepi_cint_p (ival));
709 /* True iff ldil can be used to load this CONST_INT. The least
710 significant 11 bits of the value must be zero and the value must
711 not change sign when extended from 32 to 64 bits. */
713 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
715 unsigned HOST_WIDE_INT x;
717 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
718 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
721 /* True iff zdepi can be used to generate this CONST_INT.
722 zdepi first sign extends a 5-bit signed number to a given field
723 length, then places this field anywhere in a zero. */
725 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
727 unsigned HOST_WIDE_INT lsb_mask, t;
729 /* This might not be obvious, but it's at least fast.
730 This function is critical; we don't have the time loops would take. */
731 lsb_mask = x & -x;
732 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
733 /* Return true iff t is a power of two. */
734 return ((t & (t - 1)) == 0);
737 /* True iff depi or extru can be used to compute (reg & mask).
738 Accept bit pattern like these:
739 0....01....1
740 1....10....0
741 1..10..01..1 */
743 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
745 mask = ~mask;
746 mask += mask & -mask;
747 return (mask & (mask - 1)) == 0;
750 /* True iff depi can be used to compute (reg | MASK). */
752 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
754 mask += mask & -mask;
755 return (mask & (mask - 1)) == 0;
758 /* Legitimize PIC addresses. If the address is already
759 position-independent, we return ORIG. Newly generated
760 position-independent addresses go to REG. If we need more
761 than one register, we lose. */
763 static rtx
764 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
766 rtx pic_ref = orig;
768 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
770 /* Labels need special handling. */
771 if (pic_label_operand (orig, mode))
773 rtx_insn *insn;
775 /* We do not want to go through the movXX expanders here since that
776 would create recursion.
778 Nor do we really want to call a generator for a named pattern
779 since that requires multiple patterns if we want to support
780 multiple word sizes.
782 So instead we just emit the raw set, which avoids the movXX
783 expanders completely. */
784 mark_reg_pointer (reg, BITS_PER_UNIT);
785 insn = emit_insn (gen_rtx_SET (reg, orig));
787 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
788 add_reg_note (insn, REG_EQUAL, orig);
790 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
791 and update LABEL_NUSES because this is not done automatically. */
792 if (reload_in_progress || reload_completed)
794 /* Extract LABEL_REF. */
795 if (GET_CODE (orig) == CONST)
796 orig = XEXP (XEXP (orig, 0), 0);
797 /* Extract CODE_LABEL. */
798 orig = XEXP (orig, 0);
799 add_reg_note (insn, REG_LABEL_OPERAND, orig);
800 /* Make sure we have label and not a note. */
801 if (LABEL_P (orig))
802 LABEL_NUSES (orig)++;
804 crtl->uses_pic_offset_table = 1;
805 return reg;
807 if (GET_CODE (orig) == SYMBOL_REF)
809 rtx_insn *insn;
810 rtx tmp_reg;
812 gcc_assert (reg);
814 /* Before reload, allocate a temporary register for the intermediate
815 result. This allows the sequence to be deleted when the final
816 result is unused and the insns are trivially dead. */
817 tmp_reg = ((reload_in_progress || reload_completed)
818 ? reg : gen_reg_rtx (Pmode));
820 if (function_label_operand (orig, VOIDmode))
822 /* Force function label into memory in word mode. */
823 orig = XEXP (force_const_mem (word_mode, orig), 0);
824 /* Load plabel address from DLT. */
825 emit_move_insn (tmp_reg,
826 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
827 gen_rtx_HIGH (word_mode, orig)));
828 pic_ref
829 = gen_const_mem (Pmode,
830 gen_rtx_LO_SUM (Pmode, tmp_reg,
831 gen_rtx_UNSPEC (Pmode,
832 gen_rtvec (1, orig),
833 UNSPEC_DLTIND14R)));
834 emit_move_insn (reg, pic_ref);
835 /* Now load address of function descriptor. */
836 pic_ref = gen_rtx_MEM (Pmode, reg);
838 else
840 /* Load symbol reference from DLT. */
841 emit_move_insn (tmp_reg,
842 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
843 gen_rtx_HIGH (word_mode, orig)));
844 pic_ref
845 = gen_const_mem (Pmode,
846 gen_rtx_LO_SUM (Pmode, tmp_reg,
847 gen_rtx_UNSPEC (Pmode,
848 gen_rtvec (1, orig),
849 UNSPEC_DLTIND14R)));
852 crtl->uses_pic_offset_table = 1;
853 mark_reg_pointer (reg, BITS_PER_UNIT);
854 insn = emit_move_insn (reg, pic_ref);
856 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
857 set_unique_reg_note (insn, REG_EQUAL, orig);
859 return reg;
861 else if (GET_CODE (orig) == CONST)
863 rtx base;
865 if (GET_CODE (XEXP (orig, 0)) == PLUS
866 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
867 return orig;
869 gcc_assert (reg);
870 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
872 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
873 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
874 base == reg ? 0 : reg);
876 if (GET_CODE (orig) == CONST_INT)
878 if (INT_14_BITS (orig))
879 return plus_constant (Pmode, base, INTVAL (orig));
880 orig = force_reg (Pmode, orig);
882 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
883 /* Likewise, should we set special REG_NOTEs here? */
886 return pic_ref;
889 static GTY(()) rtx gen_tls_tga;
891 static rtx
892 gen_tls_get_addr (void)
894 if (!gen_tls_tga)
895 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
896 return gen_tls_tga;
899 static rtx
900 hppa_tls_call (rtx arg)
902 rtx ret;
904 ret = gen_reg_rtx (Pmode);
905 emit_library_call_value (gen_tls_get_addr (), ret,
906 LCT_CONST, Pmode, 1, arg, Pmode);
908 return ret;
911 static rtx
912 legitimize_tls_address (rtx addr)
914 rtx ret, tmp, t1, t2, tp;
915 rtx_insn *insn;
917 /* Currently, we can't handle anything but a SYMBOL_REF. */
918 if (GET_CODE (addr) != SYMBOL_REF)
919 return addr;
921 switch (SYMBOL_REF_TLS_MODEL (addr))
923 case TLS_MODEL_GLOBAL_DYNAMIC:
924 tmp = gen_reg_rtx (Pmode);
925 if (flag_pic)
926 emit_insn (gen_tgd_load_pic (tmp, addr));
927 else
928 emit_insn (gen_tgd_load (tmp, addr));
929 ret = hppa_tls_call (tmp);
930 break;
932 case TLS_MODEL_LOCAL_DYNAMIC:
933 ret = gen_reg_rtx (Pmode);
934 tmp = gen_reg_rtx (Pmode);
935 start_sequence ();
936 if (flag_pic)
937 emit_insn (gen_tld_load_pic (tmp, addr));
938 else
939 emit_insn (gen_tld_load (tmp, addr));
940 t1 = hppa_tls_call (tmp);
941 insn = get_insns ();
942 end_sequence ();
943 t2 = gen_reg_rtx (Pmode);
944 emit_libcall_block (insn, t2, t1,
945 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
946 UNSPEC_TLSLDBASE));
947 emit_insn (gen_tld_offset_load (ret, addr, t2));
948 break;
950 case TLS_MODEL_INITIAL_EXEC:
951 tp = gen_reg_rtx (Pmode);
952 tmp = gen_reg_rtx (Pmode);
953 ret = gen_reg_rtx (Pmode);
954 emit_insn (gen_tp_load (tp));
955 if (flag_pic)
956 emit_insn (gen_tie_load_pic (tmp, addr));
957 else
958 emit_insn (gen_tie_load (tmp, addr));
959 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
960 break;
962 case TLS_MODEL_LOCAL_EXEC:
963 tp = gen_reg_rtx (Pmode);
964 ret = gen_reg_rtx (Pmode);
965 emit_insn (gen_tp_load (tp));
966 emit_insn (gen_tle_load (ret, addr, tp));
967 break;
969 default:
970 gcc_unreachable ();
973 return ret;
976 /* Helper for hppa_legitimize_address. Given X, return true if it
977 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
979 This respectively represent canonical shift-add rtxs or scaled
980 memory addresses. */
981 static bool
982 mem_shadd_or_shadd_rtx_p (rtx x)
984 return ((GET_CODE (x) == ASHIFT
985 || GET_CODE (x) == MULT)
986 && GET_CODE (XEXP (x, 1)) == CONST_INT
987 && ((GET_CODE (x) == ASHIFT
988 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
989 || (GET_CODE (x) == MULT
990 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
993 /* Try machine-dependent ways of modifying an illegitimate address
994 to be legitimate. If we find one, return the new, valid address.
995 This macro is used in only one place: `memory_address' in explow.c.
997 OLDX is the address as it was before break_out_memory_refs was called.
998 In some cases it is useful to look at this to decide what needs to be done.
1000 It is always safe for this macro to do nothing. It exists to recognize
1001 opportunities to optimize the output.
1003 For the PA, transform:
1005 memory(X + <large int>)
1007 into:
1009 if (<large int> & mask) >= 16
1010 Y = (<large int> & ~mask) + mask + 1 Round up.
1011 else
1012 Y = (<large int> & ~mask) Round down.
1013 Z = X + Y
1014 memory (Z + (<large int> - Y));
1016 This is for CSE to find several similar references, and only use one Z.
1018 X can either be a SYMBOL_REF or REG, but because combine cannot
1019 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1020 D will not fit in 14 bits.
1022 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1023 0x1f as the mask.
1025 MODE_INT references allow displacements which fit in 14 bits, so use
1026 0x3fff as the mask.
1028 This relies on the fact that most mode MODE_FLOAT references will use FP
1029 registers and most mode MODE_INT references will use integer registers.
1030 (In the rare case of an FP register used in an integer MODE, we depend
1031 on secondary reloads to clean things up.)
1034 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1035 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1036 addressing modes to be used).
1038 Note that the addresses passed into hppa_legitimize_address always
1039 come from a MEM, so we only have to match the MULT form on incoming
1040 addresses. But to be future proof we also match the ASHIFT form.
1042 However, this routine always places those shift-add sequences into
1043 registers, so we have to generate the ASHIFT form as our output.
1045 Put X and Z into registers. Then put the entire expression into
1046 a register. */
1049 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1050 machine_mode mode)
1052 rtx orig = x;
1054 /* We need to canonicalize the order of operands in unscaled indexed
1055 addresses since the code that checks if an address is valid doesn't
1056 always try both orders. */
1057 if (!TARGET_NO_SPACE_REGS
1058 && GET_CODE (x) == PLUS
1059 && GET_MODE (x) == Pmode
1060 && REG_P (XEXP (x, 0))
1061 && REG_P (XEXP (x, 1))
1062 && REG_POINTER (XEXP (x, 0))
1063 && !REG_POINTER (XEXP (x, 1)))
1064 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1066 if (tls_referenced_p (x))
1067 return legitimize_tls_address (x);
1068 else if (flag_pic)
1069 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1071 /* Strip off CONST. */
1072 if (GET_CODE (x) == CONST)
1073 x = XEXP (x, 0);
1075 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1076 That should always be safe. */
1077 if (GET_CODE (x) == PLUS
1078 && GET_CODE (XEXP (x, 0)) == REG
1079 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1081 rtx reg = force_reg (Pmode, XEXP (x, 1));
1082 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1085 /* Note we must reject symbols which represent function addresses
1086 since the assembler/linker can't handle arithmetic on plabels. */
1087 if (GET_CODE (x) == PLUS
1088 && GET_CODE (XEXP (x, 1)) == CONST_INT
1089 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1090 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1091 || GET_CODE (XEXP (x, 0)) == REG))
1093 rtx int_part, ptr_reg;
1094 int newoffset;
1095 int offset = INTVAL (XEXP (x, 1));
1096 int mask;
1098 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1099 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1101 /* Choose which way to round the offset. Round up if we
1102 are >= halfway to the next boundary. */
1103 if ((offset & mask) >= ((mask + 1) / 2))
1104 newoffset = (offset & ~ mask) + mask + 1;
1105 else
1106 newoffset = (offset & ~ mask);
1108 /* If the newoffset will not fit in 14 bits (ldo), then
1109 handling this would take 4 or 5 instructions (2 to load
1110 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1111 add the new offset and the SYMBOL_REF.) Combine can
1112 not handle 4->2 or 5->2 combinations, so do not create
1113 them. */
1114 if (! VAL_14_BITS_P (newoffset)
1115 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1117 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1118 rtx tmp_reg
1119 = force_reg (Pmode,
1120 gen_rtx_HIGH (Pmode, const_part));
1121 ptr_reg
1122 = force_reg (Pmode,
1123 gen_rtx_LO_SUM (Pmode,
1124 tmp_reg, const_part));
1126 else
1128 if (! VAL_14_BITS_P (newoffset))
1129 int_part = force_reg (Pmode, GEN_INT (newoffset));
1130 else
1131 int_part = GEN_INT (newoffset);
1133 ptr_reg = force_reg (Pmode,
1134 gen_rtx_PLUS (Pmode,
1135 force_reg (Pmode, XEXP (x, 0)),
1136 int_part));
1138 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1141 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1143 if (GET_CODE (x) == PLUS
1144 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1145 && (OBJECT_P (XEXP (x, 1))
1146 || GET_CODE (XEXP (x, 1)) == SUBREG)
1147 && GET_CODE (XEXP (x, 1)) != CONST)
1149 /* If we were given a MULT, we must fix the constant
1150 as we're going to create the ASHIFT form. */
1151 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1152 if (GET_CODE (XEXP (x, 0)) == MULT)
1153 shift_val = exact_log2 (shift_val);
1155 rtx reg1, reg2;
1156 reg1 = XEXP (x, 1);
1157 if (GET_CODE (reg1) != REG)
1158 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1160 reg2 = XEXP (XEXP (x, 0), 0);
1161 if (GET_CODE (reg2) != REG)
1162 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1164 return force_reg (Pmode,
1165 gen_rtx_PLUS (Pmode,
1166 gen_rtx_ASHIFT (Pmode, reg2,
1167 GEN_INT (shift_val)),
1168 reg1));
1171 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1173 Only do so for floating point modes since this is more speculative
1174 and we lose if it's an integer store. */
1175 if (GET_CODE (x) == PLUS
1176 && GET_CODE (XEXP (x, 0)) == PLUS
1177 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1178 && (mode == SFmode || mode == DFmode))
1180 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1182 /* If we were given a MULT, we must fix the constant
1183 as we're going to create the ASHIFT form. */
1184 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1185 shift_val = exact_log2 (shift_val);
1187 /* Try and figure out what to use as a base register. */
1188 rtx reg1, reg2, base, idx;
1190 reg1 = XEXP (XEXP (x, 0), 1);
1191 reg2 = XEXP (x, 1);
1192 base = NULL_RTX;
1193 idx = NULL_RTX;
1195 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1196 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1197 it's a base register below. */
1198 if (GET_CODE (reg1) != REG)
1199 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1201 if (GET_CODE (reg2) != REG)
1202 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1204 /* Figure out what the base and index are. */
1206 if (GET_CODE (reg1) == REG
1207 && REG_POINTER (reg1))
1209 base = reg1;
1210 idx = gen_rtx_PLUS (Pmode,
1211 gen_rtx_ASHIFT (Pmode,
1212 XEXP (XEXP (XEXP (x, 0), 0), 0),
1213 GEN_INT (shift_val)),
1214 XEXP (x, 1));
1216 else if (GET_CODE (reg2) == REG
1217 && REG_POINTER (reg2))
1219 base = reg2;
1220 idx = XEXP (x, 0);
1223 if (base == 0)
1224 return orig;
1226 /* If the index adds a large constant, try to scale the
1227 constant so that it can be loaded with only one insn. */
1228 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1229 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1230 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1231 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1233 /* Divide the CONST_INT by the scale factor, then add it to A. */
1234 int val = INTVAL (XEXP (idx, 1));
1235 val /= (1 << shift_val);
1237 reg1 = XEXP (XEXP (idx, 0), 0);
1238 if (GET_CODE (reg1) != REG)
1239 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1241 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1243 /* We can now generate a simple scaled indexed address. */
1244 return
1245 force_reg
1246 (Pmode, gen_rtx_PLUS (Pmode,
1247 gen_rtx_ASHIFT (Pmode, reg1,
1248 GEN_INT (shift_val)),
1249 base));
1252 /* If B + C is still a valid base register, then add them. */
1253 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1254 && INTVAL (XEXP (idx, 1)) <= 4096
1255 && INTVAL (XEXP (idx, 1)) >= -4096)
1257 rtx reg1, reg2;
1259 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1261 reg2 = XEXP (XEXP (idx, 0), 0);
1262 if (GET_CODE (reg2) != CONST_INT)
1263 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1265 return force_reg (Pmode,
1266 gen_rtx_PLUS (Pmode,
1267 gen_rtx_ASHIFT (Pmode, reg2,
1268 GEN_INT (shift_val)),
1269 reg1));
1272 /* Get the index into a register, then add the base + index and
1273 return a register holding the result. */
1275 /* First get A into a register. */
1276 reg1 = XEXP (XEXP (idx, 0), 0);
1277 if (GET_CODE (reg1) != REG)
1278 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1280 /* And get B into a register. */
1281 reg2 = XEXP (idx, 1);
1282 if (GET_CODE (reg2) != REG)
1283 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1285 reg1 = force_reg (Pmode,
1286 gen_rtx_PLUS (Pmode,
1287 gen_rtx_ASHIFT (Pmode, reg1,
1288 GEN_INT (shift_val)),
1289 reg2));
1291 /* Add the result to our base register and return. */
1292 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1296 /* Uh-oh. We might have an address for x[n-100000]. This needs
1297 special handling to avoid creating an indexed memory address
1298 with x-100000 as the base.
1300 If the constant part is small enough, then it's still safe because
1301 there is a guard page at the beginning and end of the data segment.
1303 Scaled references are common enough that we want to try and rearrange the
1304 terms so that we can use indexing for these addresses too. Only
1305 do the optimization for floatint point modes. */
1307 if (GET_CODE (x) == PLUS
1308 && pa_symbolic_expression_p (XEXP (x, 1)))
1310 /* Ugly. We modify things here so that the address offset specified
1311 by the index expression is computed first, then added to x to form
1312 the entire address. */
1314 rtx regx1, regx2, regy1, regy2, y;
1316 /* Strip off any CONST. */
1317 y = XEXP (x, 1);
1318 if (GET_CODE (y) == CONST)
1319 y = XEXP (y, 0);
1321 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1323 /* See if this looks like
1324 (plus (mult (reg) (mem_shadd_const))
1325 (const (plus (symbol_ref) (const_int))))
1327 Where const_int is small. In that case the const
1328 expression is a valid pointer for indexing.
1330 If const_int is big, but can be divided evenly by shadd_const
1331 and added to (reg). This allows more scaled indexed addresses. */
1332 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1333 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1334 && GET_CODE (XEXP (y, 1)) == CONST_INT
1335 && INTVAL (XEXP (y, 1)) >= -4096
1336 && INTVAL (XEXP (y, 1)) <= 4095)
1338 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1340 /* If we were given a MULT, we must fix the constant
1341 as we're going to create the ASHIFT form. */
1342 if (GET_CODE (XEXP (x, 0)) == MULT)
1343 shift_val = exact_log2 (shift_val);
1345 rtx reg1, reg2;
1347 reg1 = XEXP (x, 1);
1348 if (GET_CODE (reg1) != REG)
1349 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1351 reg2 = XEXP (XEXP (x, 0), 0);
1352 if (GET_CODE (reg2) != REG)
1353 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1355 return
1356 force_reg (Pmode,
1357 gen_rtx_PLUS (Pmode,
1358 gen_rtx_ASHIFT (Pmode,
1359 reg2,
1360 GEN_INT (shift_val)),
1361 reg1));
1363 else if ((mode == DFmode || mode == SFmode)
1364 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1365 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1366 && GET_CODE (XEXP (y, 1)) == CONST_INT
1367 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1369 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1371 /* If we were given a MULT, we must fix the constant
1372 as we're going to create the ASHIFT form. */
1373 if (GET_CODE (XEXP (x, 0)) == MULT)
1374 shift_val = exact_log2 (shift_val);
1376 regx1
1377 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1378 / INTVAL (XEXP (XEXP (x, 0), 1))));
1379 regx2 = XEXP (XEXP (x, 0), 0);
1380 if (GET_CODE (regx2) != REG)
1381 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1382 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1383 regx2, regx1));
1384 return
1385 force_reg (Pmode,
1386 gen_rtx_PLUS (Pmode,
1387 gen_rtx_ASHIFT (Pmode, regx2,
1388 GEN_INT (shift_val)),
1389 force_reg (Pmode, XEXP (y, 0))));
1391 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1392 && INTVAL (XEXP (y, 1)) >= -4096
1393 && INTVAL (XEXP (y, 1)) <= 4095)
1395 /* This is safe because of the guard page at the
1396 beginning and end of the data space. Just
1397 return the original address. */
1398 return orig;
1400 else
1402 /* Doesn't look like one we can optimize. */
1403 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1404 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1405 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1406 regx1 = force_reg (Pmode,
1407 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1408 regx1, regy2));
1409 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1414 return orig;
1417 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1419 Compute extra cost of moving data between one register class
1420 and another.
1422 Make moves from SAR so expensive they should never happen. We used to
1423 have 0xffff here, but that generates overflow in rare cases.
1425 Copies involving a FP register and a non-FP register are relatively
1426 expensive because they must go through memory.
1428 Other copies are reasonably cheap. */
1430 static int
1431 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1432 reg_class_t from, reg_class_t to)
1434 if (from == SHIFT_REGS)
1435 return 0x100;
1436 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1437 return 18;
1438 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1439 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1440 return 16;
1441 else
1442 return 2;
1445 /* For the HPPA, REG and REG+CONST is cost 0
1446 and addresses involving symbolic constants are cost 2.
1448 PIC addresses are very expensive.
1450 It is no coincidence that this has the same structure
1451 as pa_legitimate_address_p. */
1453 static int
1454 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1455 addr_space_t as ATTRIBUTE_UNUSED,
1456 bool speed ATTRIBUTE_UNUSED)
1458 switch (GET_CODE (X))
1460 case REG:
1461 case PLUS:
1462 case LO_SUM:
1463 return 1;
1464 case HIGH:
1465 return 2;
1466 default:
1467 return 4;
1471 /* Compute a (partial) cost for rtx X. Return true if the complete
1472 cost has been computed, and false if subexpressions should be
1473 scanned. In either case, *TOTAL contains the cost result. */
1475 static bool
1476 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1477 int opno ATTRIBUTE_UNUSED,
1478 int *total, bool speed ATTRIBUTE_UNUSED)
1480 int factor;
1481 int code = GET_CODE (x);
1483 switch (code)
1485 case CONST_INT:
1486 if (INTVAL (x) == 0)
1487 *total = 0;
1488 else if (INT_14_BITS (x))
1489 *total = 1;
1490 else
1491 *total = 2;
1492 return true;
1494 case HIGH:
1495 *total = 2;
1496 return true;
1498 case CONST:
1499 case LABEL_REF:
1500 case SYMBOL_REF:
1501 *total = 4;
1502 return true;
1504 case CONST_DOUBLE:
1505 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1506 && outer_code != SET)
1507 *total = 0;
1508 else
1509 *total = 8;
1510 return true;
1512 case MULT:
1513 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1515 *total = COSTS_N_INSNS (3);
1516 return true;
1519 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1520 factor = GET_MODE_SIZE (mode) / 4;
1521 if (factor == 0)
1522 factor = 1;
1524 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1525 *total = factor * factor * COSTS_N_INSNS (8);
1526 else
1527 *total = factor * factor * COSTS_N_INSNS (20);
1528 return true;
1530 case DIV:
1531 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1533 *total = COSTS_N_INSNS (14);
1534 return true;
1536 /* FALLTHRU */
1538 case UDIV:
1539 case MOD:
1540 case UMOD:
1541 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1542 factor = GET_MODE_SIZE (mode) / 4;
1543 if (factor == 0)
1544 factor = 1;
1546 *total = factor * factor * COSTS_N_INSNS (60);
1547 return true;
1549 case PLUS: /* this includes shNadd insns */
1550 case MINUS:
1551 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1553 *total = COSTS_N_INSNS (3);
1554 return true;
1557 /* A size N times larger than UNITS_PER_WORD needs N times as
1558 many insns, taking N times as long. */
1559 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1560 if (factor == 0)
1561 factor = 1;
1562 *total = factor * COSTS_N_INSNS (1);
1563 return true;
1565 case ASHIFT:
1566 case ASHIFTRT:
1567 case LSHIFTRT:
1568 *total = COSTS_N_INSNS (1);
1569 return true;
1571 default:
1572 return false;
1576 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1577 new rtx with the correct mode. */
1578 static inline rtx
1579 force_mode (machine_mode mode, rtx orig)
1581 if (mode == GET_MODE (orig))
1582 return orig;
1584 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1586 return gen_rtx_REG (mode, REGNO (orig));
1589 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1591 static bool
1592 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1594 return tls_referenced_p (x);
1597 /* Emit insns to move operands[1] into operands[0].
1599 Return 1 if we have written out everything that needs to be done to
1600 do the move. Otherwise, return 0 and the caller will emit the move
1601 normally.
1603 Note SCRATCH_REG may not be in the proper mode depending on how it
1604 will be used. This routine is responsible for creating a new copy
1605 of SCRATCH_REG in the proper mode. */
1608 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1610 register rtx operand0 = operands[0];
1611 register rtx operand1 = operands[1];
1612 register rtx tem;
1614 /* We can only handle indexed addresses in the destination operand
1615 of floating point stores. Thus, we need to break out indexed
1616 addresses from the destination operand. */
1617 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1619 gcc_assert (can_create_pseudo_p ());
1621 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1622 operand0 = replace_equiv_address (operand0, tem);
1625 /* On targets with non-equivalent space registers, break out unscaled
1626 indexed addresses from the source operand before the final CSE.
1627 We have to do this because the REG_POINTER flag is not correctly
1628 carried through various optimization passes and CSE may substitute
1629 a pseudo without the pointer set for one with the pointer set. As
1630 a result, we loose various opportunities to create insns with
1631 unscaled indexed addresses. */
1632 if (!TARGET_NO_SPACE_REGS
1633 && !cse_not_expected
1634 && GET_CODE (operand1) == MEM
1635 && GET_CODE (XEXP (operand1, 0)) == PLUS
1636 && REG_P (XEXP (XEXP (operand1, 0), 0))
1637 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1638 operand1
1639 = replace_equiv_address (operand1,
1640 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1642 if (scratch_reg
1643 && reload_in_progress && GET_CODE (operand0) == REG
1644 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1645 operand0 = reg_equiv_mem (REGNO (operand0));
1646 else if (scratch_reg
1647 && reload_in_progress && GET_CODE (operand0) == SUBREG
1648 && GET_CODE (SUBREG_REG (operand0)) == REG
1649 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1651 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1652 the code which tracks sets/uses for delete_output_reload. */
1653 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1654 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1655 SUBREG_BYTE (operand0));
1656 operand0 = alter_subreg (&temp, true);
1659 if (scratch_reg
1660 && reload_in_progress && GET_CODE (operand1) == REG
1661 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1662 operand1 = reg_equiv_mem (REGNO (operand1));
1663 else if (scratch_reg
1664 && reload_in_progress && GET_CODE (operand1) == SUBREG
1665 && GET_CODE (SUBREG_REG (operand1)) == REG
1666 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1668 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1669 the code which tracks sets/uses for delete_output_reload. */
1670 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1671 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1672 SUBREG_BYTE (operand1));
1673 operand1 = alter_subreg (&temp, true);
1676 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1677 && ((tem = find_replacement (&XEXP (operand0, 0)))
1678 != XEXP (operand0, 0)))
1679 operand0 = replace_equiv_address (operand0, tem);
1681 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1682 && ((tem = find_replacement (&XEXP (operand1, 0)))
1683 != XEXP (operand1, 0)))
1684 operand1 = replace_equiv_address (operand1, tem);
1686 /* Handle secondary reloads for loads/stores of FP registers from
1687 REG+D addresses where D does not fit in 5 or 14 bits, including
1688 (subreg (mem (addr))) cases, and reloads for other unsupported
1689 memory operands. */
1690 if (scratch_reg
1691 && FP_REG_P (operand0)
1692 && (MEM_P (operand1)
1693 || (GET_CODE (operand1) == SUBREG
1694 && MEM_P (XEXP (operand1, 0)))))
1696 rtx op1 = operand1;
1698 if (GET_CODE (op1) == SUBREG)
1699 op1 = XEXP (op1, 0);
1701 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1703 if (!(TARGET_PA_20
1704 && !TARGET_ELF32
1705 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1706 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1708 /* SCRATCH_REG will hold an address and maybe the actual data.
1709 We want it in WORD_MODE regardless of what mode it was
1710 originally given to us. */
1711 scratch_reg = force_mode (word_mode, scratch_reg);
1713 /* D might not fit in 14 bits either; for such cases load D
1714 into scratch reg. */
1715 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1717 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1718 emit_move_insn (scratch_reg,
1719 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1720 Pmode,
1721 XEXP (XEXP (op1, 0), 0),
1722 scratch_reg));
1724 else
1725 emit_move_insn (scratch_reg, XEXP (op1, 0));
1726 emit_insn (gen_rtx_SET (operand0,
1727 replace_equiv_address (op1, scratch_reg)));
1728 return 1;
1731 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1732 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1733 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1735 /* Load memory address into SCRATCH_REG. */
1736 scratch_reg = force_mode (word_mode, scratch_reg);
1737 emit_move_insn (scratch_reg, XEXP (op1, 0));
1738 emit_insn (gen_rtx_SET (operand0,
1739 replace_equiv_address (op1, scratch_reg)));
1740 return 1;
1743 else if (scratch_reg
1744 && FP_REG_P (operand1)
1745 && (MEM_P (operand0)
1746 || (GET_CODE (operand0) == SUBREG
1747 && MEM_P (XEXP (operand0, 0)))))
1749 rtx op0 = operand0;
1751 if (GET_CODE (op0) == SUBREG)
1752 op0 = XEXP (op0, 0);
1754 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1756 if (!(TARGET_PA_20
1757 && !TARGET_ELF32
1758 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1759 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1761 /* SCRATCH_REG will hold an address and maybe the actual data.
1762 We want it in WORD_MODE regardless of what mode it was
1763 originally given to us. */
1764 scratch_reg = force_mode (word_mode, scratch_reg);
1766 /* D might not fit in 14 bits either; for such cases load D
1767 into scratch reg. */
1768 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1770 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1771 emit_move_insn (scratch_reg,
1772 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1773 Pmode,
1774 XEXP (XEXP (op0, 0), 0),
1775 scratch_reg));
1777 else
1778 emit_move_insn (scratch_reg, XEXP (op0, 0));
1779 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1780 operand1));
1781 return 1;
1784 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1785 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1786 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1788 /* Load memory address into SCRATCH_REG. */
1789 scratch_reg = force_mode (word_mode, scratch_reg);
1790 emit_move_insn (scratch_reg, XEXP (op0, 0));
1791 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1792 operand1));
1793 return 1;
1796 /* Handle secondary reloads for loads of FP registers from constant
1797 expressions by forcing the constant into memory. For the most part,
1798 this is only necessary for SImode and DImode.
1800 Use scratch_reg to hold the address of the memory location. */
1801 else if (scratch_reg
1802 && CONSTANT_P (operand1)
1803 && FP_REG_P (operand0))
1805 rtx const_mem, xoperands[2];
1807 if (operand1 == CONST0_RTX (mode))
1809 emit_insn (gen_rtx_SET (operand0, operand1));
1810 return 1;
1813 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1814 it in WORD_MODE regardless of what mode it was originally given
1815 to us. */
1816 scratch_reg = force_mode (word_mode, scratch_reg);
1818 /* Force the constant into memory and put the address of the
1819 memory location into scratch_reg. */
1820 const_mem = force_const_mem (mode, operand1);
1821 xoperands[0] = scratch_reg;
1822 xoperands[1] = XEXP (const_mem, 0);
1823 pa_emit_move_sequence (xoperands, Pmode, 0);
1825 /* Now load the destination register. */
1826 emit_insn (gen_rtx_SET (operand0,
1827 replace_equiv_address (const_mem, scratch_reg)));
1828 return 1;
1830 /* Handle secondary reloads for SAR. These occur when trying to load
1831 the SAR from memory or a constant. */
1832 else if (scratch_reg
1833 && GET_CODE (operand0) == REG
1834 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1835 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1836 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1838 /* D might not fit in 14 bits either; for such cases load D into
1839 scratch reg. */
1840 if (GET_CODE (operand1) == MEM
1841 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1843 /* We are reloading the address into the scratch register, so we
1844 want to make sure the scratch register is a full register. */
1845 scratch_reg = force_mode (word_mode, scratch_reg);
1847 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1848 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1849 0)),
1850 Pmode,
1851 XEXP (XEXP (operand1, 0),
1853 scratch_reg));
1855 /* Now we are going to load the scratch register from memory,
1856 we want to load it in the same width as the original MEM,
1857 which must be the same as the width of the ultimate destination,
1858 OPERAND0. */
1859 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1861 emit_move_insn (scratch_reg,
1862 replace_equiv_address (operand1, scratch_reg));
1864 else
1866 /* We want to load the scratch register using the same mode as
1867 the ultimate destination. */
1868 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1870 emit_move_insn (scratch_reg, operand1);
1873 /* And emit the insn to set the ultimate destination. We know that
1874 the scratch register has the same mode as the destination at this
1875 point. */
1876 emit_move_insn (operand0, scratch_reg);
1877 return 1;
1880 /* Handle the most common case: storing into a register. */
1881 if (register_operand (operand0, mode))
1883 /* Legitimize TLS symbol references. This happens for references
1884 that aren't a legitimate constant. */
1885 if (PA_SYMBOL_REF_TLS_P (operand1))
1886 operand1 = legitimize_tls_address (operand1);
1888 if (register_operand (operand1, mode)
1889 || (GET_CODE (operand1) == CONST_INT
1890 && pa_cint_ok_for_move (UINTVAL (operand1)))
1891 || (operand1 == CONST0_RTX (mode))
1892 || (GET_CODE (operand1) == HIGH
1893 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1894 /* Only `general_operands' can come here, so MEM is ok. */
1895 || GET_CODE (operand1) == MEM)
1897 /* Various sets are created during RTL generation which don't
1898 have the REG_POINTER flag correctly set. After the CSE pass,
1899 instruction recognition can fail if we don't consistently
1900 set this flag when performing register copies. This should
1901 also improve the opportunities for creating insns that use
1902 unscaled indexing. */
1903 if (REG_P (operand0) && REG_P (operand1))
1905 if (REG_POINTER (operand1)
1906 && !REG_POINTER (operand0)
1907 && !HARD_REGISTER_P (operand0))
1908 copy_reg_pointer (operand0, operand1);
1911 /* When MEMs are broken out, the REG_POINTER flag doesn't
1912 get set. In some cases, we can set the REG_POINTER flag
1913 from the declaration for the MEM. */
1914 if (REG_P (operand0)
1915 && GET_CODE (operand1) == MEM
1916 && !REG_POINTER (operand0))
1918 tree decl = MEM_EXPR (operand1);
1920 /* Set the register pointer flag and register alignment
1921 if the declaration for this memory reference is a
1922 pointer type. */
1923 if (decl)
1925 tree type;
1927 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1928 tree operand 1. */
1929 if (TREE_CODE (decl) == COMPONENT_REF)
1930 decl = TREE_OPERAND (decl, 1);
1932 type = TREE_TYPE (decl);
1933 type = strip_array_types (type);
1935 if (POINTER_TYPE_P (type))
1936 mark_reg_pointer (operand0, BITS_PER_UNIT);
1940 emit_insn (gen_rtx_SET (operand0, operand1));
1941 return 1;
1944 else if (GET_CODE (operand0) == MEM)
1946 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1947 && !(reload_in_progress || reload_completed))
1949 rtx temp = gen_reg_rtx (DFmode);
1951 emit_insn (gen_rtx_SET (temp, operand1));
1952 emit_insn (gen_rtx_SET (operand0, temp));
1953 return 1;
1955 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1957 /* Run this case quickly. */
1958 emit_insn (gen_rtx_SET (operand0, operand1));
1959 return 1;
1961 if (! (reload_in_progress || reload_completed))
1963 operands[0] = validize_mem (operand0);
1964 operands[1] = operand1 = force_reg (mode, operand1);
1968 /* Simplify the source if we need to.
1969 Note we do have to handle function labels here, even though we do
1970 not consider them legitimate constants. Loop optimizations can
1971 call the emit_move_xxx with one as a source. */
1972 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1973 || (GET_CODE (operand1) == HIGH
1974 && symbolic_operand (XEXP (operand1, 0), mode))
1975 || function_label_operand (operand1, VOIDmode)
1976 || tls_referenced_p (operand1))
1978 int ishighonly = 0;
1980 if (GET_CODE (operand1) == HIGH)
1982 ishighonly = 1;
1983 operand1 = XEXP (operand1, 0);
1985 if (symbolic_operand (operand1, mode))
1987 /* Argh. The assembler and linker can't handle arithmetic
1988 involving plabels.
1990 So we force the plabel into memory, load operand0 from
1991 the memory location, then add in the constant part. */
1992 if ((GET_CODE (operand1) == CONST
1993 && GET_CODE (XEXP (operand1, 0)) == PLUS
1994 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1995 VOIDmode))
1996 || function_label_operand (operand1, VOIDmode))
1998 rtx temp, const_part;
2000 /* Figure out what (if any) scratch register to use. */
2001 if (reload_in_progress || reload_completed)
2003 scratch_reg = scratch_reg ? scratch_reg : operand0;
2004 /* SCRATCH_REG will hold an address and maybe the actual
2005 data. We want it in WORD_MODE regardless of what mode it
2006 was originally given to us. */
2007 scratch_reg = force_mode (word_mode, scratch_reg);
2009 else if (flag_pic)
2010 scratch_reg = gen_reg_rtx (Pmode);
2012 if (GET_CODE (operand1) == CONST)
2014 /* Save away the constant part of the expression. */
2015 const_part = XEXP (XEXP (operand1, 0), 1);
2016 gcc_assert (GET_CODE (const_part) == CONST_INT);
2018 /* Force the function label into memory. */
2019 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2021 else
2023 /* No constant part. */
2024 const_part = NULL_RTX;
2026 /* Force the function label into memory. */
2027 temp = force_const_mem (mode, operand1);
2031 /* Get the address of the memory location. PIC-ify it if
2032 necessary. */
2033 temp = XEXP (temp, 0);
2034 if (flag_pic)
2035 temp = legitimize_pic_address (temp, mode, scratch_reg);
2037 /* Put the address of the memory location into our destination
2038 register. */
2039 operands[1] = temp;
2040 pa_emit_move_sequence (operands, mode, scratch_reg);
2042 /* Now load from the memory location into our destination
2043 register. */
2044 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2045 pa_emit_move_sequence (operands, mode, scratch_reg);
2047 /* And add back in the constant part. */
2048 if (const_part != NULL_RTX)
2049 expand_inc (operand0, const_part);
2051 return 1;
2054 if (flag_pic)
2056 rtx_insn *insn;
2057 rtx temp;
2059 if (reload_in_progress || reload_completed)
2061 temp = scratch_reg ? scratch_reg : operand0;
2062 /* TEMP will hold an address and maybe the actual
2063 data. We want it in WORD_MODE regardless of what mode it
2064 was originally given to us. */
2065 temp = force_mode (word_mode, temp);
2067 else
2068 temp = gen_reg_rtx (Pmode);
2070 /* Force (const (plus (symbol) (const_int))) to memory
2071 if the const_int will not fit in 14 bits. Although
2072 this requires a relocation, the instruction sequence
2073 needed to load the value is shorter. */
2074 if (GET_CODE (operand1) == CONST
2075 && GET_CODE (XEXP (operand1, 0)) == PLUS
2076 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2077 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2079 rtx x, m = force_const_mem (mode, operand1);
2081 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2082 x = replace_equiv_address (m, x);
2083 insn = emit_move_insn (operand0, x);
2085 else
2087 operands[1] = legitimize_pic_address (operand1, mode, temp);
2088 if (REG_P (operand0) && REG_P (operands[1]))
2089 copy_reg_pointer (operand0, operands[1]);
2090 insn = emit_move_insn (operand0, operands[1]);
2093 /* Put a REG_EQUAL note on this insn. */
2094 set_unique_reg_note (insn, REG_EQUAL, operand1);
2096 /* On the HPPA, references to data space are supposed to use dp,
2097 register 27, but showing it in the RTL inhibits various cse
2098 and loop optimizations. */
2099 else
2101 rtx temp, set;
2103 if (reload_in_progress || reload_completed)
2105 temp = scratch_reg ? scratch_reg : operand0;
2106 /* TEMP will hold an address and maybe the actual
2107 data. We want it in WORD_MODE regardless of what mode it
2108 was originally given to us. */
2109 temp = force_mode (word_mode, temp);
2111 else
2112 temp = gen_reg_rtx (mode);
2114 /* Loading a SYMBOL_REF into a register makes that register
2115 safe to be used as the base in an indexed address.
2117 Don't mark hard registers though. That loses. */
2118 if (GET_CODE (operand0) == REG
2119 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2120 mark_reg_pointer (operand0, BITS_PER_UNIT);
2121 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2122 mark_reg_pointer (temp, BITS_PER_UNIT);
2124 if (ishighonly)
2125 set = gen_rtx_SET (operand0, temp);
2126 else
2127 set = gen_rtx_SET (operand0,
2128 gen_rtx_LO_SUM (mode, temp, operand1));
2130 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2131 emit_insn (set);
2134 return 1;
2136 else if (tls_referenced_p (operand1))
2138 rtx tmp = operand1;
2139 rtx addend = NULL;
2141 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2143 addend = XEXP (XEXP (tmp, 0), 1);
2144 tmp = XEXP (XEXP (tmp, 0), 0);
2147 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2148 tmp = legitimize_tls_address (tmp);
2149 if (addend)
2151 tmp = gen_rtx_PLUS (mode, tmp, addend);
2152 tmp = force_operand (tmp, operands[0]);
2154 operands[1] = tmp;
2156 else if (GET_CODE (operand1) != CONST_INT
2157 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2159 rtx temp;
2160 rtx_insn *insn;
2161 rtx op1 = operand1;
2162 HOST_WIDE_INT value = 0;
2163 HOST_WIDE_INT insv = 0;
2164 int insert = 0;
2166 if (GET_CODE (operand1) == CONST_INT)
2167 value = INTVAL (operand1);
2169 if (TARGET_64BIT
2170 && GET_CODE (operand1) == CONST_INT
2171 && HOST_BITS_PER_WIDE_INT > 32
2172 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2174 HOST_WIDE_INT nval;
2176 /* Extract the low order 32 bits of the value and sign extend.
2177 If the new value is the same as the original value, we can
2178 can use the original value as-is. If the new value is
2179 different, we use it and insert the most-significant 32-bits
2180 of the original value into the final result. */
2181 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2182 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2183 if (value != nval)
2185 #if HOST_BITS_PER_WIDE_INT > 32
2186 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2187 #endif
2188 insert = 1;
2189 value = nval;
2190 operand1 = GEN_INT (nval);
2194 if (reload_in_progress || reload_completed)
2195 temp = scratch_reg ? scratch_reg : operand0;
2196 else
2197 temp = gen_reg_rtx (mode);
2199 /* We don't directly split DImode constants on 32-bit targets
2200 because PLUS uses an 11-bit immediate and the insn sequence
2201 generated is not as efficient as the one using HIGH/LO_SUM. */
2202 if (GET_CODE (operand1) == CONST_INT
2203 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2204 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2205 && !insert)
2207 /* Directly break constant into high and low parts. This
2208 provides better optimization opportunities because various
2209 passes recognize constants split with PLUS but not LO_SUM.
2210 We use a 14-bit signed low part except when the addition
2211 of 0x4000 to the high part might change the sign of the
2212 high part. */
2213 HOST_WIDE_INT low = value & 0x3fff;
2214 HOST_WIDE_INT high = value & ~ 0x3fff;
2216 if (low >= 0x2000)
2218 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2219 high += 0x2000;
2220 else
2221 high += 0x4000;
2224 low = value - high;
2226 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2227 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2229 else
2231 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2232 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2235 insn = emit_move_insn (operands[0], operands[1]);
2237 /* Now insert the most significant 32 bits of the value
2238 into the register. When we don't have a second register
2239 available, it could take up to nine instructions to load
2240 a 64-bit integer constant. Prior to reload, we force
2241 constants that would take more than three instructions
2242 to load to the constant pool. During and after reload,
2243 we have to handle all possible values. */
2244 if (insert)
2246 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2247 register and the value to be inserted is outside the
2248 range that can be loaded with three depdi instructions. */
2249 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2251 operand1 = GEN_INT (insv);
2253 emit_insn (gen_rtx_SET (temp,
2254 gen_rtx_HIGH (mode, operand1)));
2255 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2256 if (mode == DImode)
2257 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2258 const0_rtx, temp));
2259 else
2260 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2261 const0_rtx, temp));
2263 else
2265 int len = 5, pos = 27;
2267 /* Insert the bits using the depdi instruction. */
2268 while (pos >= 0)
2270 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2271 HOST_WIDE_INT sign = v5 < 0;
2273 /* Left extend the insertion. */
2274 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2275 while (pos > 0 && (insv & 1) == sign)
2277 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2278 len += 1;
2279 pos -= 1;
2282 if (mode == DImode)
2283 insn = emit_insn (gen_insvdi (operand0,
2284 GEN_INT (len),
2285 GEN_INT (pos),
2286 GEN_INT (v5)));
2287 else
2288 insn = emit_insn (gen_insvsi (operand0,
2289 GEN_INT (len),
2290 GEN_INT (pos),
2291 GEN_INT (v5)));
2293 len = pos > 0 && pos < 5 ? pos : 5;
2294 pos -= len;
2299 set_unique_reg_note (insn, REG_EQUAL, op1);
2301 return 1;
2304 /* Now have insn-emit do whatever it normally does. */
2305 return 0;
2308 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2309 it will need a link/runtime reloc). */
2312 pa_reloc_needed (tree exp)
2314 int reloc = 0;
2316 switch (TREE_CODE (exp))
2318 case ADDR_EXPR:
2319 return 1;
2321 case POINTER_PLUS_EXPR:
2322 case PLUS_EXPR:
2323 case MINUS_EXPR:
2324 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2325 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2326 break;
2328 CASE_CONVERT:
2329 case NON_LVALUE_EXPR:
2330 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2331 break;
2333 case CONSTRUCTOR:
2335 tree value;
2336 unsigned HOST_WIDE_INT ix;
2338 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2339 if (value)
2340 reloc |= pa_reloc_needed (value);
2342 break;
2344 case ERROR_MARK:
2345 break;
2347 default:
2348 break;
2350 return reloc;
2354 /* Return the best assembler insn template
2355 for moving operands[1] into operands[0] as a fullword. */
2356 const char *
2357 pa_singlemove_string (rtx *operands)
2359 HOST_WIDE_INT intval;
2361 if (GET_CODE (operands[0]) == MEM)
2362 return "stw %r1,%0";
2363 if (GET_CODE (operands[1]) == MEM)
2364 return "ldw %1,%0";
2365 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2367 long i;
2369 gcc_assert (GET_MODE (operands[1]) == SFmode);
2371 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2372 bit pattern. */
2373 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2375 operands[1] = GEN_INT (i);
2376 /* Fall through to CONST_INT case. */
2378 if (GET_CODE (operands[1]) == CONST_INT)
2380 intval = INTVAL (operands[1]);
2382 if (VAL_14_BITS_P (intval))
2383 return "ldi %1,%0";
2384 else if ((intval & 0x7ff) == 0)
2385 return "ldil L'%1,%0";
2386 else if (pa_zdepi_cint_p (intval))
2387 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2388 else
2389 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2391 return "copy %1,%0";
2395 /* Compute position (in OP[1]) and width (in OP[2])
2396 useful for copying IMM to a register using the zdepi
2397 instructions. Store the immediate value to insert in OP[0]. */
2398 static void
2399 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2401 int lsb, len;
2403 /* Find the least significant set bit in IMM. */
2404 for (lsb = 0; lsb < 32; lsb++)
2406 if ((imm & 1) != 0)
2407 break;
2408 imm >>= 1;
2411 /* Choose variants based on *sign* of the 5-bit field. */
2412 if ((imm & 0x10) == 0)
2413 len = (lsb <= 28) ? 4 : 32 - lsb;
2414 else
2416 /* Find the width of the bitstring in IMM. */
2417 for (len = 5; len < 32 - lsb; len++)
2419 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2420 break;
2423 /* Sign extend IMM as a 5-bit value. */
2424 imm = (imm & 0xf) - 0x10;
2427 op[0] = imm;
2428 op[1] = 31 - lsb;
2429 op[2] = len;
2432 /* Compute position (in OP[1]) and width (in OP[2])
2433 useful for copying IMM to a register using the depdi,z
2434 instructions. Store the immediate value to insert in OP[0]. */
2436 static void
2437 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2439 int lsb, len, maxlen;
2441 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2443 /* Find the least significant set bit in IMM. */
2444 for (lsb = 0; lsb < maxlen; lsb++)
2446 if ((imm & 1) != 0)
2447 break;
2448 imm >>= 1;
2451 /* Choose variants based on *sign* of the 5-bit field. */
2452 if ((imm & 0x10) == 0)
2453 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2454 else
2456 /* Find the width of the bitstring in IMM. */
2457 for (len = 5; len < maxlen - lsb; len++)
2459 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2460 break;
2463 /* Extend length if host is narrow and IMM is negative. */
2464 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2465 len += 32;
2467 /* Sign extend IMM as a 5-bit value. */
2468 imm = (imm & 0xf) - 0x10;
2471 op[0] = imm;
2472 op[1] = 63 - lsb;
2473 op[2] = len;
2476 /* Output assembler code to perform a doubleword move insn
2477 with operands OPERANDS. */
2479 const char *
2480 pa_output_move_double (rtx *operands)
2482 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2483 rtx latehalf[2];
2484 rtx addreg0 = 0, addreg1 = 0;
2485 int highonly = 0;
2487 /* First classify both operands. */
2489 if (REG_P (operands[0]))
2490 optype0 = REGOP;
2491 else if (offsettable_memref_p (operands[0]))
2492 optype0 = OFFSOP;
2493 else if (GET_CODE (operands[0]) == MEM)
2494 optype0 = MEMOP;
2495 else
2496 optype0 = RNDOP;
2498 if (REG_P (operands[1]))
2499 optype1 = REGOP;
2500 else if (CONSTANT_P (operands[1]))
2501 optype1 = CNSTOP;
2502 else if (offsettable_memref_p (operands[1]))
2503 optype1 = OFFSOP;
2504 else if (GET_CODE (operands[1]) == MEM)
2505 optype1 = MEMOP;
2506 else
2507 optype1 = RNDOP;
2509 /* Check for the cases that the operand constraints are not
2510 supposed to allow to happen. */
2511 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2513 /* Handle copies between general and floating registers. */
2515 if (optype0 == REGOP && optype1 == REGOP
2516 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2518 if (FP_REG_P (operands[0]))
2520 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2521 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2522 return "{fldds|fldd} -16(%%sp),%0";
2524 else
2526 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2527 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2528 return "{ldws|ldw} -12(%%sp),%R0";
2532 /* Handle auto decrementing and incrementing loads and stores
2533 specifically, since the structure of the function doesn't work
2534 for them without major modification. Do it better when we learn
2535 this port about the general inc/dec addressing of PA.
2536 (This was written by tege. Chide him if it doesn't work.) */
2538 if (optype0 == MEMOP)
2540 /* We have to output the address syntax ourselves, since print_operand
2541 doesn't deal with the addresses we want to use. Fix this later. */
2543 rtx addr = XEXP (operands[0], 0);
2544 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2546 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2548 operands[0] = XEXP (addr, 0);
2549 gcc_assert (GET_CODE (operands[1]) == REG
2550 && GET_CODE (operands[0]) == REG);
2552 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2554 /* No overlap between high target register and address
2555 register. (We do this in a non-obvious way to
2556 save a register file writeback) */
2557 if (GET_CODE (addr) == POST_INC)
2558 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2559 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2561 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2563 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2565 operands[0] = XEXP (addr, 0);
2566 gcc_assert (GET_CODE (operands[1]) == REG
2567 && GET_CODE (operands[0]) == REG);
2569 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2570 /* No overlap between high target register and address
2571 register. (We do this in a non-obvious way to save a
2572 register file writeback) */
2573 if (GET_CODE (addr) == PRE_INC)
2574 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2575 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2578 if (optype1 == MEMOP)
2580 /* We have to output the address syntax ourselves, since print_operand
2581 doesn't deal with the addresses we want to use. Fix this later. */
2583 rtx addr = XEXP (operands[1], 0);
2584 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2586 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2588 operands[1] = XEXP (addr, 0);
2589 gcc_assert (GET_CODE (operands[0]) == REG
2590 && GET_CODE (operands[1]) == REG);
2592 if (!reg_overlap_mentioned_p (high_reg, addr))
2594 /* No overlap between high target register and address
2595 register. (We do this in a non-obvious way to
2596 save a register file writeback) */
2597 if (GET_CODE (addr) == POST_INC)
2598 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2599 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2601 else
2603 /* This is an undefined situation. We should load into the
2604 address register *and* update that register. Probably
2605 we don't need to handle this at all. */
2606 if (GET_CODE (addr) == POST_INC)
2607 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2608 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2611 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2613 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2615 operands[1] = XEXP (addr, 0);
2616 gcc_assert (GET_CODE (operands[0]) == REG
2617 && GET_CODE (operands[1]) == REG);
2619 if (!reg_overlap_mentioned_p (high_reg, addr))
2621 /* No overlap between high target register and address
2622 register. (We do this in a non-obvious way to
2623 save a register file writeback) */
2624 if (GET_CODE (addr) == PRE_INC)
2625 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2626 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2628 else
2630 /* This is an undefined situation. We should load into the
2631 address register *and* update that register. Probably
2632 we don't need to handle this at all. */
2633 if (GET_CODE (addr) == PRE_INC)
2634 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2635 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2638 else if (GET_CODE (addr) == PLUS
2639 && GET_CODE (XEXP (addr, 0)) == MULT)
2641 rtx xoperands[4];
2643 /* Load address into left half of destination register. */
2644 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2645 xoperands[1] = XEXP (addr, 1);
2646 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2647 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2648 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2649 xoperands);
2650 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2652 else if (GET_CODE (addr) == PLUS
2653 && REG_P (XEXP (addr, 0))
2654 && REG_P (XEXP (addr, 1)))
2656 rtx xoperands[3];
2658 /* Load address into left half of destination register. */
2659 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2660 xoperands[1] = XEXP (addr, 0);
2661 xoperands[2] = XEXP (addr, 1);
2662 output_asm_insn ("{addl|add,l} %1,%2,%0",
2663 xoperands);
2664 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2668 /* If an operand is an unoffsettable memory ref, find a register
2669 we can increment temporarily to make it refer to the second word. */
2671 if (optype0 == MEMOP)
2672 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2674 if (optype1 == MEMOP)
2675 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2677 /* Ok, we can do one word at a time.
2678 Normally we do the low-numbered word first.
2680 In either case, set up in LATEHALF the operands to use
2681 for the high-numbered word and in some cases alter the
2682 operands in OPERANDS to be suitable for the low-numbered word. */
2684 if (optype0 == REGOP)
2685 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2686 else if (optype0 == OFFSOP)
2687 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2688 else
2689 latehalf[0] = operands[0];
2691 if (optype1 == REGOP)
2692 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2693 else if (optype1 == OFFSOP)
2694 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2695 else if (optype1 == CNSTOP)
2697 if (GET_CODE (operands[1]) == HIGH)
2699 operands[1] = XEXP (operands[1], 0);
2700 highonly = 1;
2702 split_double (operands[1], &operands[1], &latehalf[1]);
2704 else
2705 latehalf[1] = operands[1];
2707 /* If the first move would clobber the source of the second one,
2708 do them in the other order.
2710 This can happen in two cases:
2712 mem -> register where the first half of the destination register
2713 is the same register used in the memory's address. Reload
2714 can create such insns.
2716 mem in this case will be either register indirect or register
2717 indirect plus a valid offset.
2719 register -> register move where REGNO(dst) == REGNO(src + 1)
2720 someone (Tim/Tege?) claimed this can happen for parameter loads.
2722 Handle mem -> register case first. */
2723 if (optype0 == REGOP
2724 && (optype1 == MEMOP || optype1 == OFFSOP)
2725 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2727 /* Do the late half first. */
2728 if (addreg1)
2729 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2730 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2732 /* Then clobber. */
2733 if (addreg1)
2734 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2735 return pa_singlemove_string (operands);
2738 /* Now handle register -> register case. */
2739 if (optype0 == REGOP && optype1 == REGOP
2740 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2742 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2743 return pa_singlemove_string (operands);
2746 /* Normal case: do the two words, low-numbered first. */
2748 output_asm_insn (pa_singlemove_string (operands), operands);
2750 /* Make any unoffsettable addresses point at high-numbered word. */
2751 if (addreg0)
2752 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2753 if (addreg1)
2754 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2756 /* Do high-numbered word. */
2757 if (highonly)
2758 output_asm_insn ("ldil L'%1,%0", latehalf);
2759 else
2760 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2762 /* Undo the adds we just did. */
2763 if (addreg0)
2764 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2765 if (addreg1)
2766 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2768 return "";
2771 const char *
2772 pa_output_fp_move_double (rtx *operands)
2774 if (FP_REG_P (operands[0]))
2776 if (FP_REG_P (operands[1])
2777 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2778 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2779 else
2780 output_asm_insn ("fldd%F1 %1,%0", operands);
2782 else if (FP_REG_P (operands[1]))
2784 output_asm_insn ("fstd%F0 %1,%0", operands);
2786 else
2788 rtx xoperands[2];
2790 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2792 /* This is a pain. You have to be prepared to deal with an
2793 arbitrary address here including pre/post increment/decrement.
2795 so avoid this in the MD. */
2796 gcc_assert (GET_CODE (operands[0]) == REG);
2798 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2799 xoperands[0] = operands[0];
2800 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2802 return "";
2805 /* Return a REG that occurs in ADDR with coefficient 1.
2806 ADDR can be effectively incremented by incrementing REG. */
2808 static rtx
2809 find_addr_reg (rtx addr)
2811 while (GET_CODE (addr) == PLUS)
2813 if (GET_CODE (XEXP (addr, 0)) == REG)
2814 addr = XEXP (addr, 0);
2815 else if (GET_CODE (XEXP (addr, 1)) == REG)
2816 addr = XEXP (addr, 1);
2817 else if (CONSTANT_P (XEXP (addr, 0)))
2818 addr = XEXP (addr, 1);
2819 else if (CONSTANT_P (XEXP (addr, 1)))
2820 addr = XEXP (addr, 0);
2821 else
2822 gcc_unreachable ();
2824 gcc_assert (GET_CODE (addr) == REG);
2825 return addr;
2828 /* Emit code to perform a block move.
2830 OPERANDS[0] is the destination pointer as a REG, clobbered.
2831 OPERANDS[1] is the source pointer as a REG, clobbered.
2832 OPERANDS[2] is a register for temporary storage.
2833 OPERANDS[3] is a register for temporary storage.
2834 OPERANDS[4] is the size as a CONST_INT
2835 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2836 OPERANDS[6] is another temporary register. */
2838 const char *
2839 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2841 int align = INTVAL (operands[5]);
2842 unsigned long n_bytes = INTVAL (operands[4]);
2844 /* We can't move more than a word at a time because the PA
2845 has no longer integer move insns. (Could use fp mem ops?) */
2846 if (align > (TARGET_64BIT ? 8 : 4))
2847 align = (TARGET_64BIT ? 8 : 4);
2849 /* Note that we know each loop below will execute at least twice
2850 (else we would have open-coded the copy). */
2851 switch (align)
2853 case 8:
2854 /* Pre-adjust the loop counter. */
2855 operands[4] = GEN_INT (n_bytes - 16);
2856 output_asm_insn ("ldi %4,%2", operands);
2858 /* Copying loop. */
2859 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2860 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2861 output_asm_insn ("std,ma %3,8(%0)", operands);
2862 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2863 output_asm_insn ("std,ma %6,8(%0)", operands);
2865 /* Handle the residual. There could be up to 7 bytes of
2866 residual to copy! */
2867 if (n_bytes % 16 != 0)
2869 operands[4] = GEN_INT (n_bytes % 8);
2870 if (n_bytes % 16 >= 8)
2871 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2872 if (n_bytes % 8 != 0)
2873 output_asm_insn ("ldd 0(%1),%6", operands);
2874 if (n_bytes % 16 >= 8)
2875 output_asm_insn ("std,ma %3,8(%0)", operands);
2876 if (n_bytes % 8 != 0)
2877 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2879 return "";
2881 case 4:
2882 /* Pre-adjust the loop counter. */
2883 operands[4] = GEN_INT (n_bytes - 8);
2884 output_asm_insn ("ldi %4,%2", operands);
2886 /* Copying loop. */
2887 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2888 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2889 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2890 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2891 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2893 /* Handle the residual. There could be up to 7 bytes of
2894 residual to copy! */
2895 if (n_bytes % 8 != 0)
2897 operands[4] = GEN_INT (n_bytes % 4);
2898 if (n_bytes % 8 >= 4)
2899 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2900 if (n_bytes % 4 != 0)
2901 output_asm_insn ("ldw 0(%1),%6", operands);
2902 if (n_bytes % 8 >= 4)
2903 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2904 if (n_bytes % 4 != 0)
2905 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2907 return "";
2909 case 2:
2910 /* Pre-adjust the loop counter. */
2911 operands[4] = GEN_INT (n_bytes - 4);
2912 output_asm_insn ("ldi %4,%2", operands);
2914 /* Copying loop. */
2915 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2916 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2917 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2918 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2919 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2921 /* Handle the residual. */
2922 if (n_bytes % 4 != 0)
2924 if (n_bytes % 4 >= 2)
2925 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2926 if (n_bytes % 2 != 0)
2927 output_asm_insn ("ldb 0(%1),%6", operands);
2928 if (n_bytes % 4 >= 2)
2929 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2930 if (n_bytes % 2 != 0)
2931 output_asm_insn ("stb %6,0(%0)", operands);
2933 return "";
2935 case 1:
2936 /* Pre-adjust the loop counter. */
2937 operands[4] = GEN_INT (n_bytes - 2);
2938 output_asm_insn ("ldi %4,%2", operands);
2940 /* Copying loop. */
2941 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2942 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2943 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2944 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2945 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2947 /* Handle the residual. */
2948 if (n_bytes % 2 != 0)
2950 output_asm_insn ("ldb 0(%1),%3", operands);
2951 output_asm_insn ("stb %3,0(%0)", operands);
2953 return "";
2955 default:
2956 gcc_unreachable ();
2960 /* Count the number of insns necessary to handle this block move.
2962 Basic structure is the same as emit_block_move, except that we
2963 count insns rather than emit them. */
2965 static int
2966 compute_movmem_length (rtx_insn *insn)
2968 rtx pat = PATTERN (insn);
2969 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2970 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2971 unsigned int n_insns = 0;
2973 /* We can't move more than four bytes at a time because the PA
2974 has no longer integer move insns. (Could use fp mem ops?) */
2975 if (align > (TARGET_64BIT ? 8 : 4))
2976 align = (TARGET_64BIT ? 8 : 4);
2978 /* The basic copying loop. */
2979 n_insns = 6;
2981 /* Residuals. */
2982 if (n_bytes % (2 * align) != 0)
2984 if ((n_bytes % (2 * align)) >= align)
2985 n_insns += 2;
2987 if ((n_bytes % align) != 0)
2988 n_insns += 2;
2991 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2992 return n_insns * 4;
2995 /* Emit code to perform a block clear.
2997 OPERANDS[0] is the destination pointer as a REG, clobbered.
2998 OPERANDS[1] is a register for temporary storage.
2999 OPERANDS[2] is the size as a CONST_INT
3000 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3002 const char *
3003 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3005 int align = INTVAL (operands[3]);
3006 unsigned long n_bytes = INTVAL (operands[2]);
3008 /* We can't clear more than a word at a time because the PA
3009 has no longer integer move insns. */
3010 if (align > (TARGET_64BIT ? 8 : 4))
3011 align = (TARGET_64BIT ? 8 : 4);
3013 /* Note that we know each loop below will execute at least twice
3014 (else we would have open-coded the copy). */
3015 switch (align)
3017 case 8:
3018 /* Pre-adjust the loop counter. */
3019 operands[2] = GEN_INT (n_bytes - 16);
3020 output_asm_insn ("ldi %2,%1", operands);
3022 /* Loop. */
3023 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3024 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3025 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3027 /* Handle the residual. There could be up to 7 bytes of
3028 residual to copy! */
3029 if (n_bytes % 16 != 0)
3031 operands[2] = GEN_INT (n_bytes % 8);
3032 if (n_bytes % 16 >= 8)
3033 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3034 if (n_bytes % 8 != 0)
3035 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3037 return "";
3039 case 4:
3040 /* Pre-adjust the loop counter. */
3041 operands[2] = GEN_INT (n_bytes - 8);
3042 output_asm_insn ("ldi %2,%1", operands);
3044 /* Loop. */
3045 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3046 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3047 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3049 /* Handle the residual. There could be up to 7 bytes of
3050 residual to copy! */
3051 if (n_bytes % 8 != 0)
3053 operands[2] = GEN_INT (n_bytes % 4);
3054 if (n_bytes % 8 >= 4)
3055 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3056 if (n_bytes % 4 != 0)
3057 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3059 return "";
3061 case 2:
3062 /* Pre-adjust the loop counter. */
3063 operands[2] = GEN_INT (n_bytes - 4);
3064 output_asm_insn ("ldi %2,%1", operands);
3066 /* Loop. */
3067 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3068 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3069 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3071 /* Handle the residual. */
3072 if (n_bytes % 4 != 0)
3074 if (n_bytes % 4 >= 2)
3075 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3076 if (n_bytes % 2 != 0)
3077 output_asm_insn ("stb %%r0,0(%0)", operands);
3079 return "";
3081 case 1:
3082 /* Pre-adjust the loop counter. */
3083 operands[2] = GEN_INT (n_bytes - 2);
3084 output_asm_insn ("ldi %2,%1", operands);
3086 /* Loop. */
3087 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3088 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3089 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3091 /* Handle the residual. */
3092 if (n_bytes % 2 != 0)
3093 output_asm_insn ("stb %%r0,0(%0)", operands);
3095 return "";
3097 default:
3098 gcc_unreachable ();
3102 /* Count the number of insns necessary to handle this block move.
3104 Basic structure is the same as emit_block_move, except that we
3105 count insns rather than emit them. */
3107 static int
3108 compute_clrmem_length (rtx_insn *insn)
3110 rtx pat = PATTERN (insn);
3111 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3112 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3113 unsigned int n_insns = 0;
3115 /* We can't clear more than a word at a time because the PA
3116 has no longer integer move insns. */
3117 if (align > (TARGET_64BIT ? 8 : 4))
3118 align = (TARGET_64BIT ? 8 : 4);
3120 /* The basic loop. */
3121 n_insns = 4;
3123 /* Residuals. */
3124 if (n_bytes % (2 * align) != 0)
3126 if ((n_bytes % (2 * align)) >= align)
3127 n_insns++;
3129 if ((n_bytes % align) != 0)
3130 n_insns++;
3133 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3134 return n_insns * 4;
3138 const char *
3139 pa_output_and (rtx *operands)
3141 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3143 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3144 int ls0, ls1, ms0, p, len;
3146 for (ls0 = 0; ls0 < 32; ls0++)
3147 if ((mask & (1 << ls0)) == 0)
3148 break;
3150 for (ls1 = ls0; ls1 < 32; ls1++)
3151 if ((mask & (1 << ls1)) != 0)
3152 break;
3154 for (ms0 = ls1; ms0 < 32; ms0++)
3155 if ((mask & (1 << ms0)) == 0)
3156 break;
3158 gcc_assert (ms0 == 32);
3160 if (ls1 == 32)
3162 len = ls0;
3164 gcc_assert (len);
3166 operands[2] = GEN_INT (len);
3167 return "{extru|extrw,u} %1,31,%2,%0";
3169 else
3171 /* We could use this `depi' for the case above as well, but `depi'
3172 requires one more register file access than an `extru'. */
3174 p = 31 - ls0;
3175 len = ls1 - ls0;
3177 operands[2] = GEN_INT (p);
3178 operands[3] = GEN_INT (len);
3179 return "{depi|depwi} 0,%2,%3,%0";
3182 else
3183 return "and %1,%2,%0";
3186 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3187 storing the result in operands[0]. */
3188 const char *
3189 pa_output_64bit_and (rtx *operands)
3191 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3193 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3194 int ls0, ls1, ms0, p, len;
3196 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3197 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3198 break;
3200 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3201 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3202 break;
3204 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3205 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3206 break;
3208 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3210 if (ls1 == HOST_BITS_PER_WIDE_INT)
3212 len = ls0;
3214 gcc_assert (len);
3216 operands[2] = GEN_INT (len);
3217 return "extrd,u %1,63,%2,%0";
3219 else
3221 /* We could use this `depi' for the case above as well, but `depi'
3222 requires one more register file access than an `extru'. */
3224 p = 63 - ls0;
3225 len = ls1 - ls0;
3227 operands[2] = GEN_INT (p);
3228 operands[3] = GEN_INT (len);
3229 return "depdi 0,%2,%3,%0";
3232 else
3233 return "and %1,%2,%0";
3236 const char *
3237 pa_output_ior (rtx *operands)
3239 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3240 int bs0, bs1, p, len;
3242 if (INTVAL (operands[2]) == 0)
3243 return "copy %1,%0";
3245 for (bs0 = 0; bs0 < 32; bs0++)
3246 if ((mask & (1 << bs0)) != 0)
3247 break;
3249 for (bs1 = bs0; bs1 < 32; bs1++)
3250 if ((mask & (1 << bs1)) == 0)
3251 break;
3253 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3255 p = 31 - bs0;
3256 len = bs1 - bs0;
3258 operands[2] = GEN_INT (p);
3259 operands[3] = GEN_INT (len);
3260 return "{depi|depwi} -1,%2,%3,%0";
3263 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3264 storing the result in operands[0]. */
3265 const char *
3266 pa_output_64bit_ior (rtx *operands)
3268 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3269 int bs0, bs1, p, len;
3271 if (INTVAL (operands[2]) == 0)
3272 return "copy %1,%0";
3274 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3275 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3276 break;
3278 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3279 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3280 break;
3282 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3283 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3285 p = 63 - bs0;
3286 len = bs1 - bs0;
3288 operands[2] = GEN_INT (p);
3289 operands[3] = GEN_INT (len);
3290 return "depdi -1,%2,%3,%0";
3293 /* Target hook for assembling integer objects. This code handles
3294 aligned SI and DI integers specially since function references
3295 must be preceded by P%. */
3297 static bool
3298 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3300 if (size == UNITS_PER_WORD
3301 && aligned_p
3302 && function_label_operand (x, VOIDmode))
3304 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3306 /* We don't want an OPD when generating fast indirect calls. */
3307 if (!TARGET_FAST_INDIRECT_CALLS)
3308 fputs ("P%", asm_out_file);
3310 output_addr_const (asm_out_file, x);
3311 fputc ('\n', asm_out_file);
3312 return true;
3314 return default_assemble_integer (x, size, aligned_p);
3317 /* Output an ascii string. */
3318 void
3319 pa_output_ascii (FILE *file, const char *p, int size)
3321 int i;
3322 int chars_output;
3323 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3325 /* The HP assembler can only take strings of 256 characters at one
3326 time. This is a limitation on input line length, *not* the
3327 length of the string. Sigh. Even worse, it seems that the
3328 restriction is in number of input characters (see \xnn &
3329 \whatever). So we have to do this very carefully. */
3331 fputs ("\t.STRING \"", file);
3333 chars_output = 0;
3334 for (i = 0; i < size; i += 4)
3336 int co = 0;
3337 int io = 0;
3338 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3340 register unsigned int c = (unsigned char) p[i + io];
3342 if (c == '\"' || c == '\\')
3343 partial_output[co++] = '\\';
3344 if (c >= ' ' && c < 0177)
3345 partial_output[co++] = c;
3346 else
3348 unsigned int hexd;
3349 partial_output[co++] = '\\';
3350 partial_output[co++] = 'x';
3351 hexd = c / 16 - 0 + '0';
3352 if (hexd > '9')
3353 hexd -= '9' - 'a' + 1;
3354 partial_output[co++] = hexd;
3355 hexd = c % 16 - 0 + '0';
3356 if (hexd > '9')
3357 hexd -= '9' - 'a' + 1;
3358 partial_output[co++] = hexd;
3361 if (chars_output + co > 243)
3363 fputs ("\"\n\t.STRING \"", file);
3364 chars_output = 0;
3366 fwrite (partial_output, 1, (size_t) co, file);
3367 chars_output += co;
3368 co = 0;
3370 fputs ("\"\n", file);
3373 /* Try to rewrite floating point comparisons & branches to avoid
3374 useless add,tr insns.
3376 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3377 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3378 first attempt to remove useless add,tr insns. It is zero
3379 for the second pass as reorg sometimes leaves bogus REG_DEAD
3380 notes lying around.
3382 When CHECK_NOTES is zero we can only eliminate add,tr insns
3383 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3384 instructions. */
3385 static void
3386 remove_useless_addtr_insns (int check_notes)
3388 rtx_insn *insn;
3389 static int pass = 0;
3391 /* This is fairly cheap, so always run it when optimizing. */
3392 if (optimize > 0)
3394 int fcmp_count = 0;
3395 int fbranch_count = 0;
3397 /* Walk all the insns in this function looking for fcmp & fbranch
3398 instructions. Keep track of how many of each we find. */
3399 for (insn = get_insns (); insn; insn = next_insn (insn))
3401 rtx tmp;
3403 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3404 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3405 continue;
3407 tmp = PATTERN (insn);
3409 /* It must be a set. */
3410 if (GET_CODE (tmp) != SET)
3411 continue;
3413 /* If the destination is CCFP, then we've found an fcmp insn. */
3414 tmp = SET_DEST (tmp);
3415 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3417 fcmp_count++;
3418 continue;
3421 tmp = PATTERN (insn);
3422 /* If this is an fbranch instruction, bump the fbranch counter. */
3423 if (GET_CODE (tmp) == SET
3424 && SET_DEST (tmp) == pc_rtx
3425 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3426 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3427 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3428 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3430 fbranch_count++;
3431 continue;
3436 /* Find all floating point compare + branch insns. If possible,
3437 reverse the comparison & the branch to avoid add,tr insns. */
3438 for (insn = get_insns (); insn; insn = next_insn (insn))
3440 rtx tmp;
3441 rtx_insn *next;
3443 /* Ignore anything that isn't an INSN. */
3444 if (! NONJUMP_INSN_P (insn))
3445 continue;
3447 tmp = PATTERN (insn);
3449 /* It must be a set. */
3450 if (GET_CODE (tmp) != SET)
3451 continue;
3453 /* The destination must be CCFP, which is register zero. */
3454 tmp = SET_DEST (tmp);
3455 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3456 continue;
3458 /* INSN should be a set of CCFP.
3460 See if the result of this insn is used in a reversed FP
3461 conditional branch. If so, reverse our condition and
3462 the branch. Doing so avoids useless add,tr insns. */
3463 next = next_insn (insn);
3464 while (next)
3466 /* Jumps, calls and labels stop our search. */
3467 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3468 break;
3470 /* As does another fcmp insn. */
3471 if (NONJUMP_INSN_P (next)
3472 && GET_CODE (PATTERN (next)) == SET
3473 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3474 && REGNO (SET_DEST (PATTERN (next))) == 0)
3475 break;
3477 next = next_insn (next);
3480 /* Is NEXT_INSN a branch? */
3481 if (next && JUMP_P (next))
3483 rtx pattern = PATTERN (next);
3485 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3486 and CCFP dies, then reverse our conditional and the branch
3487 to avoid the add,tr. */
3488 if (GET_CODE (pattern) == SET
3489 && SET_DEST (pattern) == pc_rtx
3490 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3491 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3492 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3493 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3494 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3495 && (fcmp_count == fbranch_count
3496 || (check_notes
3497 && find_regno_note (next, REG_DEAD, 0))))
3499 /* Reverse the branch. */
3500 tmp = XEXP (SET_SRC (pattern), 1);
3501 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3502 XEXP (SET_SRC (pattern), 2) = tmp;
3503 INSN_CODE (next) = -1;
3505 /* Reverse our condition. */
3506 tmp = PATTERN (insn);
3507 PUT_CODE (XEXP (tmp, 1),
3508 (reverse_condition_maybe_unordered
3509 (GET_CODE (XEXP (tmp, 1)))));
3515 pass = !pass;
3519 /* You may have trouble believing this, but this is the 32 bit HP-PA
3520 stack layout. Wow.
3522 Offset Contents
3524 Variable arguments (optional; any number may be allocated)
3526 SP-(4*(N+9)) arg word N
3528 SP-56 arg word 5
3529 SP-52 arg word 4
3531 Fixed arguments (must be allocated; may remain unused)
3533 SP-48 arg word 3
3534 SP-44 arg word 2
3535 SP-40 arg word 1
3536 SP-36 arg word 0
3538 Frame Marker
3540 SP-32 External Data Pointer (DP)
3541 SP-28 External sr4
3542 SP-24 External/stub RP (RP')
3543 SP-20 Current RP
3544 SP-16 Static Link
3545 SP-12 Clean up
3546 SP-8 Calling Stub RP (RP'')
3547 SP-4 Previous SP
3549 Top of Frame
3551 SP-0 Stack Pointer (points to next available address)
3555 /* This function saves registers as follows. Registers marked with ' are
3556 this function's registers (as opposed to the previous function's).
3557 If a frame_pointer isn't needed, r4 is saved as a general register;
3558 the space for the frame pointer is still allocated, though, to keep
3559 things simple.
3562 Top of Frame
3564 SP (FP') Previous FP
3565 SP + 4 Alignment filler (sigh)
3566 SP + 8 Space for locals reserved here.
3570 SP + n All call saved register used.
3574 SP + o All call saved fp registers used.
3578 SP + p (SP') points to next available address.
3582 /* Global variables set by output_function_prologue(). */
3583 /* Size of frame. Need to know this to emit return insns from
3584 leaf procedures. */
3585 static HOST_WIDE_INT actual_fsize, local_fsize;
3586 static int save_fregs;
3588 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3589 Handle case where DISP > 8k by using the add_high_const patterns.
3591 Note in DISP > 8k case, we will leave the high part of the address
3592 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3594 static void
3595 store_reg (int reg, HOST_WIDE_INT disp, int base)
3597 rtx dest, src, basereg;
3598 rtx_insn *insn;
3600 src = gen_rtx_REG (word_mode, reg);
3601 basereg = gen_rtx_REG (Pmode, base);
3602 if (VAL_14_BITS_P (disp))
3604 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3605 insn = emit_move_insn (dest, src);
3607 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3609 rtx delta = GEN_INT (disp);
3610 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3612 emit_move_insn (tmpreg, delta);
3613 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3614 if (DO_FRAME_NOTES)
3616 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3617 gen_rtx_SET (tmpreg,
3618 gen_rtx_PLUS (Pmode, basereg, delta)));
3619 RTX_FRAME_RELATED_P (insn) = 1;
3621 dest = gen_rtx_MEM (word_mode, tmpreg);
3622 insn = emit_move_insn (dest, src);
3624 else
3626 rtx delta = GEN_INT (disp);
3627 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3628 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3630 emit_move_insn (tmpreg, high);
3631 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3632 insn = emit_move_insn (dest, src);
3633 if (DO_FRAME_NOTES)
3634 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3635 gen_rtx_SET (gen_rtx_MEM (word_mode,
3636 gen_rtx_PLUS (word_mode,
3637 basereg,
3638 delta)),
3639 src));
3642 if (DO_FRAME_NOTES)
3643 RTX_FRAME_RELATED_P (insn) = 1;
3646 /* Emit RTL to store REG at the memory location specified by BASE and then
3647 add MOD to BASE. MOD must be <= 8k. */
3649 static void
3650 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3652 rtx basereg, srcreg, delta;
3653 rtx_insn *insn;
3655 gcc_assert (VAL_14_BITS_P (mod));
3657 basereg = gen_rtx_REG (Pmode, base);
3658 srcreg = gen_rtx_REG (word_mode, reg);
3659 delta = GEN_INT (mod);
3661 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3662 if (DO_FRAME_NOTES)
3664 RTX_FRAME_RELATED_P (insn) = 1;
3666 /* RTX_FRAME_RELATED_P must be set on each frame related set
3667 in a parallel with more than one element. */
3668 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3669 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3673 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3674 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3675 whether to add a frame note or not.
3677 In the DISP > 8k case, we leave the high part of the address in %r1.
3678 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3680 static void
3681 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3683 rtx_insn *insn;
3685 if (VAL_14_BITS_P (disp))
3687 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3688 plus_constant (Pmode,
3689 gen_rtx_REG (Pmode, base), disp));
3691 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3693 rtx basereg = gen_rtx_REG (Pmode, base);
3694 rtx delta = GEN_INT (disp);
3695 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3697 emit_move_insn (tmpreg, delta);
3698 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3699 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3700 if (DO_FRAME_NOTES)
3701 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3702 gen_rtx_SET (tmpreg,
3703 gen_rtx_PLUS (Pmode, basereg, delta)));
3705 else
3707 rtx basereg = gen_rtx_REG (Pmode, base);
3708 rtx delta = GEN_INT (disp);
3709 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3711 emit_move_insn (tmpreg,
3712 gen_rtx_PLUS (Pmode, basereg,
3713 gen_rtx_HIGH (Pmode, delta)));
3714 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3715 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3718 if (DO_FRAME_NOTES && note)
3719 RTX_FRAME_RELATED_P (insn) = 1;
3722 HOST_WIDE_INT
3723 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3725 int freg_saved = 0;
3726 int i, j;
3728 /* The code in pa_expand_prologue and pa_expand_epilogue must
3729 be consistent with the rounding and size calculation done here.
3730 Change them at the same time. */
3732 /* We do our own stack alignment. First, round the size of the
3733 stack locals up to a word boundary. */
3734 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3736 /* Space for previous frame pointer + filler. If any frame is
3737 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3738 waste some space here for the sake of HP compatibility. The
3739 first slot is only used when the frame pointer is needed. */
3740 if (size || frame_pointer_needed)
3741 size += STARTING_FRAME_OFFSET;
3743 /* If the current function calls __builtin_eh_return, then we need
3744 to allocate stack space for registers that will hold data for
3745 the exception handler. */
3746 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3748 unsigned int i;
3750 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3751 continue;
3752 size += i * UNITS_PER_WORD;
3755 /* Account for space used by the callee general register saves. */
3756 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3757 if (df_regs_ever_live_p (i))
3758 size += UNITS_PER_WORD;
3760 /* Account for space used by the callee floating point register saves. */
3761 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3762 if (df_regs_ever_live_p (i)
3763 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3765 freg_saved = 1;
3767 /* We always save both halves of the FP register, so always
3768 increment the frame size by 8 bytes. */
3769 size += 8;
3772 /* If any of the floating registers are saved, account for the
3773 alignment needed for the floating point register save block. */
3774 if (freg_saved)
3776 size = (size + 7) & ~7;
3777 if (fregs_live)
3778 *fregs_live = 1;
3781 /* The various ABIs include space for the outgoing parameters in the
3782 size of the current function's stack frame. We don't need to align
3783 for the outgoing arguments as their alignment is set by the final
3784 rounding for the frame as a whole. */
3785 size += crtl->outgoing_args_size;
3787 /* Allocate space for the fixed frame marker. This space must be
3788 allocated for any function that makes calls or allocates
3789 stack space. */
3790 if (!crtl->is_leaf || size)
3791 size += TARGET_64BIT ? 48 : 32;
3793 /* Finally, round to the preferred stack boundary. */
3794 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3795 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3798 /* Generate the assembly code for function entry. FILE is a stdio
3799 stream to output the code to. SIZE is an int: how many units of
3800 temporary storage to allocate.
3802 Refer to the array `regs_ever_live' to determine which registers to
3803 save; `regs_ever_live[I]' is nonzero if register number I is ever
3804 used in the function. This function is responsible for knowing
3805 which registers should not be saved even if used. */
3807 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3808 of memory. If any fpu reg is used in the function, we allocate
3809 such a block here, at the bottom of the frame, just in case it's needed.
3811 If this function is a leaf procedure, then we may choose not
3812 to do a "save" insn. The decision about whether or not
3813 to do this is made in regclass.c. */
3815 static void
3816 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3818 /* The function's label and associated .PROC must never be
3819 separated and must be output *after* any profiling declarations
3820 to avoid changing spaces/subspaces within a procedure. */
3821 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3822 fputs ("\t.PROC\n", file);
3824 /* pa_expand_prologue does the dirty work now. We just need
3825 to output the assembler directives which denote the start
3826 of a function. */
3827 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3828 if (crtl->is_leaf)
3829 fputs (",NO_CALLS", file);
3830 else
3831 fputs (",CALLS", file);
3832 if (rp_saved)
3833 fputs (",SAVE_RP", file);
3835 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3836 at the beginning of the frame and that it is used as the frame
3837 pointer for the frame. We do this because our current frame
3838 layout doesn't conform to that specified in the HP runtime
3839 documentation and we need a way to indicate to programs such as
3840 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3841 isn't used by HP compilers but is supported by the assembler.
3842 However, SAVE_SP is supposed to indicate that the previous stack
3843 pointer has been saved in the frame marker. */
3844 if (frame_pointer_needed)
3845 fputs (",SAVE_SP", file);
3847 /* Pass on information about the number of callee register saves
3848 performed in the prologue.
3850 The compiler is supposed to pass the highest register number
3851 saved, the assembler then has to adjust that number before
3852 entering it into the unwind descriptor (to account for any
3853 caller saved registers with lower register numbers than the
3854 first callee saved register). */
3855 if (gr_saved)
3856 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3858 if (fr_saved)
3859 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3861 fputs ("\n\t.ENTRY\n", file);
3863 remove_useless_addtr_insns (0);
3866 void
3867 pa_expand_prologue (void)
3869 int merge_sp_adjust_with_store = 0;
3870 HOST_WIDE_INT size = get_frame_size ();
3871 HOST_WIDE_INT offset;
3872 int i;
3873 rtx tmpreg;
3874 rtx_insn *insn;
3876 gr_saved = 0;
3877 fr_saved = 0;
3878 save_fregs = 0;
3880 /* Compute total size for frame pointer, filler, locals and rounding to
3881 the next word boundary. Similar code appears in pa_compute_frame_size
3882 and must be changed in tandem with this code. */
3883 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3884 if (local_fsize || frame_pointer_needed)
3885 local_fsize += STARTING_FRAME_OFFSET;
3887 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3888 if (flag_stack_usage_info)
3889 current_function_static_stack_size = actual_fsize;
3891 /* Compute a few things we will use often. */
3892 tmpreg = gen_rtx_REG (word_mode, 1);
3894 /* Save RP first. The calling conventions manual states RP will
3895 always be stored into the caller's frame at sp - 20 or sp - 16
3896 depending on which ABI is in use. */
3897 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3899 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3900 rp_saved = true;
3902 else
3903 rp_saved = false;
3905 /* Allocate the local frame and set up the frame pointer if needed. */
3906 if (actual_fsize != 0)
3908 if (frame_pointer_needed)
3910 /* Copy the old frame pointer temporarily into %r1. Set up the
3911 new stack pointer, then store away the saved old frame pointer
3912 into the stack at sp and at the same time update the stack
3913 pointer by actual_fsize bytes. Two versions, first
3914 handles small (<8k) frames. The second handles large (>=8k)
3915 frames. */
3916 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3917 if (DO_FRAME_NOTES)
3918 RTX_FRAME_RELATED_P (insn) = 1;
3920 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3921 if (DO_FRAME_NOTES)
3922 RTX_FRAME_RELATED_P (insn) = 1;
3924 if (VAL_14_BITS_P (actual_fsize))
3925 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3926 else
3928 /* It is incorrect to store the saved frame pointer at *sp,
3929 then increment sp (writes beyond the current stack boundary).
3931 So instead use stwm to store at *sp and post-increment the
3932 stack pointer as an atomic operation. Then increment sp to
3933 finish allocating the new frame. */
3934 HOST_WIDE_INT adjust1 = 8192 - 64;
3935 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3937 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3938 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3939 adjust2, 1);
3942 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3943 we need to store the previous stack pointer (frame pointer)
3944 into the frame marker on targets that use the HP unwind
3945 library. This allows the HP unwind library to be used to
3946 unwind GCC frames. However, we are not fully compatible
3947 with the HP library because our frame layout differs from
3948 that specified in the HP runtime specification.
3950 We don't want a frame note on this instruction as the frame
3951 marker moves during dynamic stack allocation.
3953 This instruction also serves as a blockage to prevent
3954 register spills from being scheduled before the stack
3955 pointer is raised. This is necessary as we store
3956 registers using the frame pointer as a base register,
3957 and the frame pointer is set before sp is raised. */
3958 if (TARGET_HPUX_UNWIND_LIBRARY)
3960 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3961 GEN_INT (TARGET_64BIT ? -8 : -4));
3963 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3964 hard_frame_pointer_rtx);
3966 else
3967 emit_insn (gen_blockage ());
3969 /* no frame pointer needed. */
3970 else
3972 /* In some cases we can perform the first callee register save
3973 and allocating the stack frame at the same time. If so, just
3974 make a note of it and defer allocating the frame until saving
3975 the callee registers. */
3976 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3977 merge_sp_adjust_with_store = 1;
3978 /* Can not optimize. Adjust the stack frame by actual_fsize
3979 bytes. */
3980 else
3981 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3982 actual_fsize, 1);
3986 /* Normal register save.
3988 Do not save the frame pointer in the frame_pointer_needed case. It
3989 was done earlier. */
3990 if (frame_pointer_needed)
3992 offset = local_fsize;
3994 /* Saving the EH return data registers in the frame is the simplest
3995 way to get the frame unwind information emitted. We put them
3996 just before the general registers. */
3997 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3999 unsigned int i, regno;
4001 for (i = 0; ; ++i)
4003 regno = EH_RETURN_DATA_REGNO (i);
4004 if (regno == INVALID_REGNUM)
4005 break;
4007 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4008 offset += UNITS_PER_WORD;
4012 for (i = 18; i >= 4; i--)
4013 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4015 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4016 offset += UNITS_PER_WORD;
4017 gr_saved++;
4019 /* Account for %r3 which is saved in a special place. */
4020 gr_saved++;
4022 /* No frame pointer needed. */
4023 else
4025 offset = local_fsize - actual_fsize;
4027 /* Saving the EH return data registers in the frame is the simplest
4028 way to get the frame unwind information emitted. */
4029 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4031 unsigned int i, regno;
4033 for (i = 0; ; ++i)
4035 regno = EH_RETURN_DATA_REGNO (i);
4036 if (regno == INVALID_REGNUM)
4037 break;
4039 /* If merge_sp_adjust_with_store is nonzero, then we can
4040 optimize the first save. */
4041 if (merge_sp_adjust_with_store)
4043 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4044 merge_sp_adjust_with_store = 0;
4046 else
4047 store_reg (regno, offset, STACK_POINTER_REGNUM);
4048 offset += UNITS_PER_WORD;
4052 for (i = 18; i >= 3; i--)
4053 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4055 /* If merge_sp_adjust_with_store is nonzero, then we can
4056 optimize the first GR save. */
4057 if (merge_sp_adjust_with_store)
4059 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4060 merge_sp_adjust_with_store = 0;
4062 else
4063 store_reg (i, offset, STACK_POINTER_REGNUM);
4064 offset += UNITS_PER_WORD;
4065 gr_saved++;
4068 /* If we wanted to merge the SP adjustment with a GR save, but we never
4069 did any GR saves, then just emit the adjustment here. */
4070 if (merge_sp_adjust_with_store)
4071 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4072 actual_fsize, 1);
4075 /* The hppa calling conventions say that %r19, the pic offset
4076 register, is saved at sp - 32 (in this function's frame)
4077 when generating PIC code. FIXME: What is the correct thing
4078 to do for functions which make no calls and allocate no
4079 frame? Do we need to allocate a frame, or can we just omit
4080 the save? For now we'll just omit the save.
4082 We don't want a note on this insn as the frame marker can
4083 move if there is a dynamic stack allocation. */
4084 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4086 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4088 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4092 /* Align pointer properly (doubleword boundary). */
4093 offset = (offset + 7) & ~7;
4095 /* Floating point register store. */
4096 if (save_fregs)
4098 rtx base;
4100 /* First get the frame or stack pointer to the start of the FP register
4101 save area. */
4102 if (frame_pointer_needed)
4104 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4105 base = hard_frame_pointer_rtx;
4107 else
4109 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4110 base = stack_pointer_rtx;
4113 /* Now actually save the FP registers. */
4114 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4116 if (df_regs_ever_live_p (i)
4117 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4119 rtx addr, reg;
4120 rtx_insn *insn;
4121 addr = gen_rtx_MEM (DFmode,
4122 gen_rtx_POST_INC (word_mode, tmpreg));
4123 reg = gen_rtx_REG (DFmode, i);
4124 insn = emit_move_insn (addr, reg);
4125 if (DO_FRAME_NOTES)
4127 RTX_FRAME_RELATED_P (insn) = 1;
4128 if (TARGET_64BIT)
4130 rtx mem = gen_rtx_MEM (DFmode,
4131 plus_constant (Pmode, base,
4132 offset));
4133 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4134 gen_rtx_SET (mem, reg));
4136 else
4138 rtx meml = gen_rtx_MEM (SFmode,
4139 plus_constant (Pmode, base,
4140 offset));
4141 rtx memr = gen_rtx_MEM (SFmode,
4142 plus_constant (Pmode, base,
4143 offset + 4));
4144 rtx regl = gen_rtx_REG (SFmode, i);
4145 rtx regr = gen_rtx_REG (SFmode, i + 1);
4146 rtx setl = gen_rtx_SET (meml, regl);
4147 rtx setr = gen_rtx_SET (memr, regr);
4148 rtvec vec;
4150 RTX_FRAME_RELATED_P (setl) = 1;
4151 RTX_FRAME_RELATED_P (setr) = 1;
4152 vec = gen_rtvec (2, setl, setr);
4153 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4154 gen_rtx_SEQUENCE (VOIDmode, vec));
4157 offset += GET_MODE_SIZE (DFmode);
4158 fr_saved++;
4164 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4165 Handle case where DISP > 8k by using the add_high_const patterns. */
4167 static void
4168 load_reg (int reg, HOST_WIDE_INT disp, int base)
4170 rtx dest = gen_rtx_REG (word_mode, reg);
4171 rtx basereg = gen_rtx_REG (Pmode, base);
4172 rtx src;
4174 if (VAL_14_BITS_P (disp))
4175 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4176 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4178 rtx delta = GEN_INT (disp);
4179 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4181 emit_move_insn (tmpreg, delta);
4182 if (TARGET_DISABLE_INDEXING)
4184 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4185 src = gen_rtx_MEM (word_mode, tmpreg);
4187 else
4188 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4190 else
4192 rtx delta = GEN_INT (disp);
4193 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4194 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4196 emit_move_insn (tmpreg, high);
4197 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4200 emit_move_insn (dest, src);
4203 /* Update the total code bytes output to the text section. */
4205 static void
4206 update_total_code_bytes (unsigned int nbytes)
4208 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4209 && !IN_NAMED_SECTION_P (cfun->decl))
4211 unsigned int old_total = total_code_bytes;
4213 total_code_bytes += nbytes;
4215 /* Be prepared to handle overflows. */
4216 if (old_total > total_code_bytes)
4217 total_code_bytes = UINT_MAX;
4221 /* This function generates the assembly code for function exit.
4222 Args are as for output_function_prologue ().
4224 The function epilogue should not depend on the current stack
4225 pointer! It should use the frame pointer only. This is mandatory
4226 because of alloca; we also take advantage of it to omit stack
4227 adjustments before returning. */
4229 static void
4230 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4232 rtx_insn *insn = get_last_insn ();
4233 bool extra_nop;
4235 /* pa_expand_epilogue does the dirty work now. We just need
4236 to output the assembler directives which denote the end
4237 of a function.
4239 To make debuggers happy, emit a nop if the epilogue was completely
4240 eliminated due to a volatile call as the last insn in the
4241 current function. That way the return address (in %r2) will
4242 always point to a valid instruction in the current function. */
4244 /* Get the last real insn. */
4245 if (NOTE_P (insn))
4246 insn = prev_real_insn (insn);
4248 /* If it is a sequence, then look inside. */
4249 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4250 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4252 /* If insn is a CALL_INSN, then it must be a call to a volatile
4253 function (otherwise there would be epilogue insns). */
4254 if (insn && CALL_P (insn))
4256 fputs ("\tnop\n", file);
4257 extra_nop = true;
4259 else
4260 extra_nop = false;
4262 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4264 if (TARGET_SOM && TARGET_GAS)
4266 /* We are done with this subspace except possibly for some additional
4267 debug information. Forget that we are in this subspace to ensure
4268 that the next function is output in its own subspace. */
4269 in_section = NULL;
4270 cfun->machine->in_nsubspa = 2;
4273 /* Thunks do their own insn accounting. */
4274 if (cfun->is_thunk)
4275 return;
4277 if (INSN_ADDRESSES_SET_P ())
4279 last_address = extra_nop ? 4 : 0;
4280 insn = get_last_nonnote_insn ();
4281 if (insn)
4283 last_address += INSN_ADDRESSES (INSN_UID (insn));
4284 if (INSN_P (insn))
4285 last_address += insn_default_length (insn);
4287 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4288 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4290 else
4291 last_address = UINT_MAX;
4293 /* Finally, update the total number of code bytes output so far. */
4294 update_total_code_bytes (last_address);
4297 void
4298 pa_expand_epilogue (void)
4300 rtx tmpreg;
4301 HOST_WIDE_INT offset;
4302 HOST_WIDE_INT ret_off = 0;
4303 int i;
4304 int merge_sp_adjust_with_load = 0;
4306 /* We will use this often. */
4307 tmpreg = gen_rtx_REG (word_mode, 1);
4309 /* Try to restore RP early to avoid load/use interlocks when
4310 RP gets used in the return (bv) instruction. This appears to still
4311 be necessary even when we schedule the prologue and epilogue. */
4312 if (rp_saved)
4314 ret_off = TARGET_64BIT ? -16 : -20;
4315 if (frame_pointer_needed)
4317 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4318 ret_off = 0;
4320 else
4322 /* No frame pointer, and stack is smaller than 8k. */
4323 if (VAL_14_BITS_P (ret_off - actual_fsize))
4325 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4326 ret_off = 0;
4331 /* General register restores. */
4332 if (frame_pointer_needed)
4334 offset = local_fsize;
4336 /* If the current function calls __builtin_eh_return, then we need
4337 to restore the saved EH data registers. */
4338 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4340 unsigned int i, regno;
4342 for (i = 0; ; ++i)
4344 regno = EH_RETURN_DATA_REGNO (i);
4345 if (regno == INVALID_REGNUM)
4346 break;
4348 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4349 offset += UNITS_PER_WORD;
4353 for (i = 18; i >= 4; i--)
4354 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4356 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4357 offset += UNITS_PER_WORD;
4360 else
4362 offset = local_fsize - actual_fsize;
4364 /* If the current function calls __builtin_eh_return, then we need
4365 to restore the saved EH data registers. */
4366 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4368 unsigned int i, regno;
4370 for (i = 0; ; ++i)
4372 regno = EH_RETURN_DATA_REGNO (i);
4373 if (regno == INVALID_REGNUM)
4374 break;
4376 /* Only for the first load.
4377 merge_sp_adjust_with_load holds the register load
4378 with which we will merge the sp adjustment. */
4379 if (merge_sp_adjust_with_load == 0
4380 && local_fsize == 0
4381 && VAL_14_BITS_P (-actual_fsize))
4382 merge_sp_adjust_with_load = regno;
4383 else
4384 load_reg (regno, offset, STACK_POINTER_REGNUM);
4385 offset += UNITS_PER_WORD;
4389 for (i = 18; i >= 3; i--)
4391 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4393 /* Only for the first load.
4394 merge_sp_adjust_with_load holds the register load
4395 with which we will merge the sp adjustment. */
4396 if (merge_sp_adjust_with_load == 0
4397 && local_fsize == 0
4398 && VAL_14_BITS_P (-actual_fsize))
4399 merge_sp_adjust_with_load = i;
4400 else
4401 load_reg (i, offset, STACK_POINTER_REGNUM);
4402 offset += UNITS_PER_WORD;
4407 /* Align pointer properly (doubleword boundary). */
4408 offset = (offset + 7) & ~7;
4410 /* FP register restores. */
4411 if (save_fregs)
4413 /* Adjust the register to index off of. */
4414 if (frame_pointer_needed)
4415 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4416 else
4417 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4419 /* Actually do the restores now. */
4420 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4421 if (df_regs_ever_live_p (i)
4422 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4424 rtx src = gen_rtx_MEM (DFmode,
4425 gen_rtx_POST_INC (word_mode, tmpreg));
4426 rtx dest = gen_rtx_REG (DFmode, i);
4427 emit_move_insn (dest, src);
4431 /* Emit a blockage insn here to keep these insns from being moved to
4432 an earlier spot in the epilogue, or into the main instruction stream.
4434 This is necessary as we must not cut the stack back before all the
4435 restores are finished. */
4436 emit_insn (gen_blockage ());
4438 /* Reset stack pointer (and possibly frame pointer). The stack
4439 pointer is initially set to fp + 64 to avoid a race condition. */
4440 if (frame_pointer_needed)
4442 rtx delta = GEN_INT (-64);
4444 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4445 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4446 stack_pointer_rtx, delta));
4448 /* If we were deferring a callee register restore, do it now. */
4449 else if (merge_sp_adjust_with_load)
4451 rtx delta = GEN_INT (-actual_fsize);
4452 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4454 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4456 else if (actual_fsize != 0)
4457 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4458 - actual_fsize, 0);
4460 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4461 frame greater than 8k), do so now. */
4462 if (ret_off != 0)
4463 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4465 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4467 rtx sa = EH_RETURN_STACKADJ_RTX;
4469 emit_insn (gen_blockage ());
4470 emit_insn (TARGET_64BIT
4471 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4472 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4476 bool
4477 pa_can_use_return_insn (void)
4479 if (!reload_completed)
4480 return false;
4482 if (frame_pointer_needed)
4483 return false;
4485 if (df_regs_ever_live_p (2))
4486 return false;
4488 if (crtl->profile)
4489 return false;
4491 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4495 hppa_pic_save_rtx (void)
4497 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4500 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4501 #define NO_DEFERRED_PROFILE_COUNTERS 0
4502 #endif
4505 /* Vector of funcdef numbers. */
4506 static vec<int> funcdef_nos;
4508 /* Output deferred profile counters. */
4509 static void
4510 output_deferred_profile_counters (void)
4512 unsigned int i;
4513 int align, n;
4515 if (funcdef_nos.is_empty ())
4516 return;
4518 switch_to_section (data_section);
4519 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4520 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4522 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4524 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4525 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4528 funcdef_nos.release ();
4531 void
4532 hppa_profile_hook (int label_no)
4534 /* We use SImode for the address of the function in both 32 and
4535 64-bit code to avoid having to provide DImode versions of the
4536 lcla2 and load_offset_label_address insn patterns. */
4537 rtx reg = gen_reg_rtx (SImode);
4538 rtx_code_label *label_rtx = gen_label_rtx ();
4539 rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
4540 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4541 rtx arg_bytes, begin_label_rtx;
4542 rtx_insn *call_insn;
4543 char begin_label_name[16];
4544 bool use_mcount_pcrel_call;
4546 /* If we can reach _mcount with a pc-relative call, we can optimize
4547 loading the address of the current function. This requires linker
4548 long branch stub support. */
4549 if (!TARGET_PORTABLE_RUNTIME
4550 && !TARGET_LONG_CALLS
4551 && (TARGET_SOM || flag_function_sections))
4552 use_mcount_pcrel_call = TRUE;
4553 else
4554 use_mcount_pcrel_call = FALSE;
4556 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4557 label_no);
4558 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4560 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4562 if (!use_mcount_pcrel_call)
4564 /* The address of the function is loaded into %r25 with an instruction-
4565 relative sequence that avoids the use of relocations. The sequence
4566 is split so that the load_offset_label_address instruction can
4567 occupy the delay slot of the call to _mcount. */
4568 if (TARGET_PA_20)
4569 emit_insn (gen_lcla2 (reg, label_rtx));
4570 else
4571 emit_insn (gen_lcla1 (reg, label_rtx));
4573 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4574 reg,
4575 begin_label_rtx,
4576 label_rtx));
4579 if (!NO_DEFERRED_PROFILE_COUNTERS)
4581 rtx count_label_rtx, addr, r24;
4582 char count_label_name[16];
4584 funcdef_nos.safe_push (label_no);
4585 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4586 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4587 ggc_strdup (count_label_name));
4589 addr = force_reg (Pmode, count_label_rtx);
4590 r24 = gen_rtx_REG (Pmode, 24);
4591 emit_move_insn (r24, addr);
4593 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4594 if (use_mcount_pcrel_call)
4595 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4596 begin_label_rtx));
4597 else
4598 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4600 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4602 else
4604 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4605 if (use_mcount_pcrel_call)
4606 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4607 begin_label_rtx));
4608 else
4609 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4612 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4613 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4615 /* Indicate the _mcount call cannot throw, nor will it execute a
4616 non-local goto. */
4617 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4619 /* Allocate space for fixed arguments. */
4620 if (reg_parm_stack_space > crtl->outgoing_args_size)
4621 crtl->outgoing_args_size = reg_parm_stack_space;
4624 /* Fetch the return address for the frame COUNT steps up from
4625 the current frame, after the prologue. FRAMEADDR is the
4626 frame pointer of the COUNT frame.
4628 We want to ignore any export stub remnants here. To handle this,
4629 we examine the code at the return address, and if it is an export
4630 stub, we return a memory rtx for the stub return address stored
4631 at frame-24.
4633 The value returned is used in two different ways:
4635 1. To find a function's caller.
4637 2. To change the return address for a function.
4639 This function handles most instances of case 1; however, it will
4640 fail if there are two levels of stubs to execute on the return
4641 path. The only way I believe that can happen is if the return value
4642 needs a parameter relocation, which never happens for C code.
4644 This function handles most instances of case 2; however, it will
4645 fail if we did not originally have stub code on the return path
4646 but will need stub code on the new return path. This can happen if
4647 the caller & callee are both in the main program, but the new
4648 return location is in a shared library. */
4651 pa_return_addr_rtx (int count, rtx frameaddr)
4653 rtx label;
4654 rtx rp;
4655 rtx saved_rp;
4656 rtx ins;
4658 /* The instruction stream at the return address of a PA1.X export stub is:
4660 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4661 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4662 0x00011820 | stub+16: mtsp r1,sr0
4663 0xe0400002 | stub+20: be,n 0(sr0,rp)
4665 0xe0400002 must be specified as -532676606 so that it won't be
4666 rejected as an invalid immediate operand on 64-bit hosts.
4668 The instruction stream at the return address of a PA2.0 export stub is:
4670 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4671 0xe840d002 | stub+12: bve,n (rp)
4674 HOST_WIDE_INT insns[4];
4675 int i, len;
4677 if (count != 0)
4678 return NULL_RTX;
4680 rp = get_hard_reg_initial_val (Pmode, 2);
4682 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4683 return rp;
4685 /* If there is no export stub then just use the value saved from
4686 the return pointer register. */
4688 saved_rp = gen_reg_rtx (Pmode);
4689 emit_move_insn (saved_rp, rp);
4691 /* Get pointer to the instruction stream. We have to mask out the
4692 privilege level from the two low order bits of the return address
4693 pointer here so that ins will point to the start of the first
4694 instruction that would have been executed if we returned. */
4695 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4696 label = gen_label_rtx ();
4698 if (TARGET_PA_20)
4700 insns[0] = 0x4bc23fd1;
4701 insns[1] = -398405630;
4702 len = 2;
4704 else
4706 insns[0] = 0x4bc23fd1;
4707 insns[1] = 0x004010a1;
4708 insns[2] = 0x00011820;
4709 insns[3] = -532676606;
4710 len = 4;
4713 /* Check the instruction stream at the normal return address for the
4714 export stub. If it is an export stub, than our return address is
4715 really in -24[frameaddr]. */
4717 for (i = 0; i < len; i++)
4719 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4720 rtx op1 = GEN_INT (insns[i]);
4721 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4724 /* Here we know that our return address points to an export
4725 stub. We don't want to return the address of the export stub,
4726 but rather the return address of the export stub. That return
4727 address is stored at -24[frameaddr]. */
4729 emit_move_insn (saved_rp,
4730 gen_rtx_MEM (Pmode,
4731 memory_address (Pmode,
4732 plus_constant (Pmode, frameaddr,
4733 -24))));
4735 emit_label (label);
4737 return saved_rp;
4740 void
4741 pa_emit_bcond_fp (rtx operands[])
4743 enum rtx_code code = GET_CODE (operands[0]);
4744 rtx operand0 = operands[1];
4745 rtx operand1 = operands[2];
4746 rtx label = operands[3];
4748 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4749 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4751 emit_jump_insn (gen_rtx_SET (pc_rtx,
4752 gen_rtx_IF_THEN_ELSE (VOIDmode,
4753 gen_rtx_fmt_ee (NE,
4754 VOIDmode,
4755 gen_rtx_REG (CCFPmode, 0),
4756 const0_rtx),
4757 gen_rtx_LABEL_REF (VOIDmode, label),
4758 pc_rtx)));
4762 /* Adjust the cost of a scheduling dependency. Return the new cost of
4763 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4765 static int
4766 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4767 unsigned int)
4769 enum attr_type attr_type;
4771 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4772 true dependencies as they are described with bypasses now. */
4773 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4774 return cost;
4776 if (! recog_memoized (insn))
4777 return 0;
4779 attr_type = get_attr_type (insn);
4781 switch (dep_type)
4783 case REG_DEP_ANTI:
4784 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4785 cycles later. */
4787 if (attr_type == TYPE_FPLOAD)
4789 rtx pat = PATTERN (insn);
4790 rtx dep_pat = PATTERN (dep_insn);
4791 if (GET_CODE (pat) == PARALLEL)
4793 /* This happens for the fldXs,mb patterns. */
4794 pat = XVECEXP (pat, 0, 0);
4796 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4797 /* If this happens, we have to extend this to schedule
4798 optimally. Return 0 for now. */
4799 return 0;
4801 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4803 if (! recog_memoized (dep_insn))
4804 return 0;
4805 switch (get_attr_type (dep_insn))
4807 case TYPE_FPALU:
4808 case TYPE_FPMULSGL:
4809 case TYPE_FPMULDBL:
4810 case TYPE_FPDIVSGL:
4811 case TYPE_FPDIVDBL:
4812 case TYPE_FPSQRTSGL:
4813 case TYPE_FPSQRTDBL:
4814 /* A fpload can't be issued until one cycle before a
4815 preceding arithmetic operation has finished if
4816 the target of the fpload is any of the sources
4817 (or destination) of the arithmetic operation. */
4818 return insn_default_latency (dep_insn) - 1;
4820 default:
4821 return 0;
4825 else if (attr_type == TYPE_FPALU)
4827 rtx pat = PATTERN (insn);
4828 rtx dep_pat = PATTERN (dep_insn);
4829 if (GET_CODE (pat) == PARALLEL)
4831 /* This happens for the fldXs,mb patterns. */
4832 pat = XVECEXP (pat, 0, 0);
4834 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4835 /* If this happens, we have to extend this to schedule
4836 optimally. Return 0 for now. */
4837 return 0;
4839 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4841 if (! recog_memoized (dep_insn))
4842 return 0;
4843 switch (get_attr_type (dep_insn))
4845 case TYPE_FPDIVSGL:
4846 case TYPE_FPDIVDBL:
4847 case TYPE_FPSQRTSGL:
4848 case TYPE_FPSQRTDBL:
4849 /* An ALU flop can't be issued until two cycles before a
4850 preceding divide or sqrt operation has finished if
4851 the target of the ALU flop is any of the sources
4852 (or destination) of the divide or sqrt operation. */
4853 return insn_default_latency (dep_insn) - 2;
4855 default:
4856 return 0;
4861 /* For other anti dependencies, the cost is 0. */
4862 return 0;
4864 case REG_DEP_OUTPUT:
4865 /* Output dependency; DEP_INSN writes a register that INSN writes some
4866 cycles later. */
4867 if (attr_type == TYPE_FPLOAD)
4869 rtx pat = PATTERN (insn);
4870 rtx dep_pat = PATTERN (dep_insn);
4871 if (GET_CODE (pat) == PARALLEL)
4873 /* This happens for the fldXs,mb patterns. */
4874 pat = XVECEXP (pat, 0, 0);
4876 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4877 /* If this happens, we have to extend this to schedule
4878 optimally. Return 0 for now. */
4879 return 0;
4881 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4883 if (! recog_memoized (dep_insn))
4884 return 0;
4885 switch (get_attr_type (dep_insn))
4887 case TYPE_FPALU:
4888 case TYPE_FPMULSGL:
4889 case TYPE_FPMULDBL:
4890 case TYPE_FPDIVSGL:
4891 case TYPE_FPDIVDBL:
4892 case TYPE_FPSQRTSGL:
4893 case TYPE_FPSQRTDBL:
4894 /* A fpload can't be issued until one cycle before a
4895 preceding arithmetic operation has finished if
4896 the target of the fpload is the destination of the
4897 arithmetic operation.
4899 Exception: For PA7100LC, PA7200 and PA7300, the cost
4900 is 3 cycles, unless they bundle together. We also
4901 pay the penalty if the second insn is a fpload. */
4902 return insn_default_latency (dep_insn) - 1;
4904 default:
4905 return 0;
4909 else if (attr_type == TYPE_FPALU)
4911 rtx pat = PATTERN (insn);
4912 rtx dep_pat = PATTERN (dep_insn);
4913 if (GET_CODE (pat) == PARALLEL)
4915 /* This happens for the fldXs,mb patterns. */
4916 pat = XVECEXP (pat, 0, 0);
4918 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4919 /* If this happens, we have to extend this to schedule
4920 optimally. Return 0 for now. */
4921 return 0;
4923 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4925 if (! recog_memoized (dep_insn))
4926 return 0;
4927 switch (get_attr_type (dep_insn))
4929 case TYPE_FPDIVSGL:
4930 case TYPE_FPDIVDBL:
4931 case TYPE_FPSQRTSGL:
4932 case TYPE_FPSQRTDBL:
4933 /* An ALU flop can't be issued until two cycles before a
4934 preceding divide or sqrt operation has finished if
4935 the target of the ALU flop is also the target of
4936 the divide or sqrt operation. */
4937 return insn_default_latency (dep_insn) - 2;
4939 default:
4940 return 0;
4945 /* For other output dependencies, the cost is 0. */
4946 return 0;
4948 default:
4949 gcc_unreachable ();
4953 /* Adjust scheduling priorities. We use this to try and keep addil
4954 and the next use of %r1 close together. */
4955 static int
4956 pa_adjust_priority (rtx_insn *insn, int priority)
4958 rtx set = single_set (insn);
4959 rtx src, dest;
4960 if (set)
4962 src = SET_SRC (set);
4963 dest = SET_DEST (set);
4964 if (GET_CODE (src) == LO_SUM
4965 && symbolic_operand (XEXP (src, 1), VOIDmode)
4966 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4967 priority >>= 3;
4969 else if (GET_CODE (src) == MEM
4970 && GET_CODE (XEXP (src, 0)) == LO_SUM
4971 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4972 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4973 priority >>= 1;
4975 else if (GET_CODE (dest) == MEM
4976 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4977 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4978 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4979 priority >>= 3;
4981 return priority;
4984 /* The 700 can only issue a single insn at a time.
4985 The 7XXX processors can issue two insns at a time.
4986 The 8000 can issue 4 insns at a time. */
4987 static int
4988 pa_issue_rate (void)
4990 switch (pa_cpu)
4992 case PROCESSOR_700: return 1;
4993 case PROCESSOR_7100: return 2;
4994 case PROCESSOR_7100LC: return 2;
4995 case PROCESSOR_7200: return 2;
4996 case PROCESSOR_7300: return 2;
4997 case PROCESSOR_8000: return 4;
4999 default:
5000 gcc_unreachable ();
5006 /* Return any length plus adjustment needed by INSN which already has
5007 its length computed as LENGTH. Return LENGTH if no adjustment is
5008 necessary.
5010 Also compute the length of an inline block move here as it is too
5011 complicated to express as a length attribute in pa.md. */
5013 pa_adjust_insn_length (rtx_insn *insn, int length)
5015 rtx pat = PATTERN (insn);
5017 /* If length is negative or undefined, provide initial length. */
5018 if ((unsigned int) length >= INT_MAX)
5020 if (GET_CODE (pat) == SEQUENCE)
5021 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5023 switch (get_attr_type (insn))
5025 case TYPE_MILLI:
5026 length = pa_attr_length_millicode_call (insn);
5027 break;
5028 case TYPE_CALL:
5029 length = pa_attr_length_call (insn, 0);
5030 break;
5031 case TYPE_SIBCALL:
5032 length = pa_attr_length_call (insn, 1);
5033 break;
5034 case TYPE_DYNCALL:
5035 length = pa_attr_length_indirect_call (insn);
5036 break;
5037 case TYPE_SH_FUNC_ADRS:
5038 length = pa_attr_length_millicode_call (insn) + 20;
5039 break;
5040 default:
5041 gcc_unreachable ();
5045 /* Block move pattern. */
5046 if (NONJUMP_INSN_P (insn)
5047 && GET_CODE (pat) == PARALLEL
5048 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5049 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5050 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5051 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5052 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5053 length += compute_movmem_length (insn) - 4;
5054 /* Block clear pattern. */
5055 else if (NONJUMP_INSN_P (insn)
5056 && GET_CODE (pat) == PARALLEL
5057 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5058 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5059 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5060 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5061 length += compute_clrmem_length (insn) - 4;
5062 /* Conditional branch with an unfilled delay slot. */
5063 else if (JUMP_P (insn) && ! simplejump_p (insn))
5065 /* Adjust a short backwards conditional with an unfilled delay slot. */
5066 if (GET_CODE (pat) == SET
5067 && length == 4
5068 && JUMP_LABEL (insn) != NULL_RTX
5069 && ! forward_branch_p (insn))
5070 length += 4;
5071 else if (GET_CODE (pat) == PARALLEL
5072 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5073 && length == 4)
5074 length += 4;
5075 /* Adjust dbra insn with short backwards conditional branch with
5076 unfilled delay slot -- only for case where counter is in a
5077 general register register. */
5078 else if (GET_CODE (pat) == PARALLEL
5079 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5080 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5081 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5082 && length == 4
5083 && ! forward_branch_p (insn))
5084 length += 4;
5086 return length;
5089 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5091 static bool
5092 pa_print_operand_punct_valid_p (unsigned char code)
5094 if (code == '@'
5095 || code == '#'
5096 || code == '*'
5097 || code == '^')
5098 return true;
5100 return false;
5103 /* Print operand X (an rtx) in assembler syntax to file FILE.
5104 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5105 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5107 void
5108 pa_print_operand (FILE *file, rtx x, int code)
5110 switch (code)
5112 case '#':
5113 /* Output a 'nop' if there's nothing for the delay slot. */
5114 if (dbr_sequence_length () == 0)
5115 fputs ("\n\tnop", file);
5116 return;
5117 case '*':
5118 /* Output a nullification completer if there's nothing for the */
5119 /* delay slot or nullification is requested. */
5120 if (dbr_sequence_length () == 0 ||
5121 (final_sequence &&
5122 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5123 fputs (",n", file);
5124 return;
5125 case 'R':
5126 /* Print out the second register name of a register pair.
5127 I.e., R (6) => 7. */
5128 fputs (reg_names[REGNO (x) + 1], file);
5129 return;
5130 case 'r':
5131 /* A register or zero. */
5132 if (x == const0_rtx
5133 || (x == CONST0_RTX (DFmode))
5134 || (x == CONST0_RTX (SFmode)))
5136 fputs ("%r0", file);
5137 return;
5139 else
5140 break;
5141 case 'f':
5142 /* A register or zero (floating point). */
5143 if (x == const0_rtx
5144 || (x == CONST0_RTX (DFmode))
5145 || (x == CONST0_RTX (SFmode)))
5147 fputs ("%fr0", file);
5148 return;
5150 else
5151 break;
5152 case 'A':
5154 rtx xoperands[2];
5156 xoperands[0] = XEXP (XEXP (x, 0), 0);
5157 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5158 pa_output_global_address (file, xoperands[1], 0);
5159 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5160 return;
5163 case 'C': /* Plain (C)ondition */
5164 case 'X':
5165 switch (GET_CODE (x))
5167 case EQ:
5168 fputs ("=", file); break;
5169 case NE:
5170 fputs ("<>", file); break;
5171 case GT:
5172 fputs (">", file); break;
5173 case GE:
5174 fputs (">=", file); break;
5175 case GEU:
5176 fputs (">>=", file); break;
5177 case GTU:
5178 fputs (">>", file); break;
5179 case LT:
5180 fputs ("<", file); break;
5181 case LE:
5182 fputs ("<=", file); break;
5183 case LEU:
5184 fputs ("<<=", file); break;
5185 case LTU:
5186 fputs ("<<", file); break;
5187 default:
5188 gcc_unreachable ();
5190 return;
5191 case 'N': /* Condition, (N)egated */
5192 switch (GET_CODE (x))
5194 case EQ:
5195 fputs ("<>", file); break;
5196 case NE:
5197 fputs ("=", file); break;
5198 case GT:
5199 fputs ("<=", file); break;
5200 case GE:
5201 fputs ("<", file); break;
5202 case GEU:
5203 fputs ("<<", file); break;
5204 case GTU:
5205 fputs ("<<=", file); break;
5206 case LT:
5207 fputs (">=", file); break;
5208 case LE:
5209 fputs (">", file); break;
5210 case LEU:
5211 fputs (">>", file); break;
5212 case LTU:
5213 fputs (">>=", file); break;
5214 default:
5215 gcc_unreachable ();
5217 return;
5218 /* For floating point comparisons. Note that the output
5219 predicates are the complement of the desired mode. The
5220 conditions for GT, GE, LT, LE and LTGT cause an invalid
5221 operation exception if the result is unordered and this
5222 exception is enabled in the floating-point status register. */
5223 case 'Y':
5224 switch (GET_CODE (x))
5226 case EQ:
5227 fputs ("!=", file); break;
5228 case NE:
5229 fputs ("=", file); break;
5230 case GT:
5231 fputs ("!>", file); break;
5232 case GE:
5233 fputs ("!>=", file); break;
5234 case LT:
5235 fputs ("!<", file); break;
5236 case LE:
5237 fputs ("!<=", file); break;
5238 case LTGT:
5239 fputs ("!<>", file); break;
5240 case UNLE:
5241 fputs ("!?<=", file); break;
5242 case UNLT:
5243 fputs ("!?<", file); break;
5244 case UNGE:
5245 fputs ("!?>=", file); break;
5246 case UNGT:
5247 fputs ("!?>", file); break;
5248 case UNEQ:
5249 fputs ("!?=", file); break;
5250 case UNORDERED:
5251 fputs ("!?", file); break;
5252 case ORDERED:
5253 fputs ("?", file); break;
5254 default:
5255 gcc_unreachable ();
5257 return;
5258 case 'S': /* Condition, operands are (S)wapped. */
5259 switch (GET_CODE (x))
5261 case EQ:
5262 fputs ("=", file); break;
5263 case NE:
5264 fputs ("<>", file); break;
5265 case GT:
5266 fputs ("<", file); break;
5267 case GE:
5268 fputs ("<=", file); break;
5269 case GEU:
5270 fputs ("<<=", file); break;
5271 case GTU:
5272 fputs ("<<", file); break;
5273 case LT:
5274 fputs (">", file); break;
5275 case LE:
5276 fputs (">=", file); break;
5277 case LEU:
5278 fputs (">>=", file); break;
5279 case LTU:
5280 fputs (">>", file); break;
5281 default:
5282 gcc_unreachable ();
5284 return;
5285 case 'B': /* Condition, (B)oth swapped and negate. */
5286 switch (GET_CODE (x))
5288 case EQ:
5289 fputs ("<>", file); break;
5290 case NE:
5291 fputs ("=", file); break;
5292 case GT:
5293 fputs (">=", file); break;
5294 case GE:
5295 fputs (">", file); break;
5296 case GEU:
5297 fputs (">>", file); break;
5298 case GTU:
5299 fputs (">>=", file); break;
5300 case LT:
5301 fputs ("<=", file); break;
5302 case LE:
5303 fputs ("<", file); break;
5304 case LEU:
5305 fputs ("<<", file); break;
5306 case LTU:
5307 fputs ("<<=", file); break;
5308 default:
5309 gcc_unreachable ();
5311 return;
5312 case 'k':
5313 gcc_assert (GET_CODE (x) == CONST_INT);
5314 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5315 return;
5316 case 'Q':
5317 gcc_assert (GET_CODE (x) == CONST_INT);
5318 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5319 return;
5320 case 'L':
5321 gcc_assert (GET_CODE (x) == CONST_INT);
5322 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5323 return;
5324 case 'o':
5325 gcc_assert (GET_CODE (x) == CONST_INT
5326 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5327 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5328 return;
5329 case 'O':
5330 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5331 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5332 return;
5333 case 'p':
5334 gcc_assert (GET_CODE (x) == CONST_INT);
5335 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5336 return;
5337 case 'P':
5338 gcc_assert (GET_CODE (x) == CONST_INT);
5339 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5340 return;
5341 case 'I':
5342 if (GET_CODE (x) == CONST_INT)
5343 fputs ("i", file);
5344 return;
5345 case 'M':
5346 case 'F':
5347 switch (GET_CODE (XEXP (x, 0)))
5349 case PRE_DEC:
5350 case PRE_INC:
5351 if (ASSEMBLER_DIALECT == 0)
5352 fputs ("s,mb", file);
5353 else
5354 fputs (",mb", file);
5355 break;
5356 case POST_DEC:
5357 case POST_INC:
5358 if (ASSEMBLER_DIALECT == 0)
5359 fputs ("s,ma", file);
5360 else
5361 fputs (",ma", file);
5362 break;
5363 case PLUS:
5364 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5365 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5367 if (ASSEMBLER_DIALECT == 0)
5368 fputs ("x", file);
5370 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5371 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5373 if (ASSEMBLER_DIALECT == 0)
5374 fputs ("x,s", file);
5375 else
5376 fputs (",s", file);
5378 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5379 fputs ("s", file);
5380 break;
5381 default:
5382 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5383 fputs ("s", file);
5384 break;
5386 return;
5387 case 'G':
5388 pa_output_global_address (file, x, 0);
5389 return;
5390 case 'H':
5391 pa_output_global_address (file, x, 1);
5392 return;
5393 case 0: /* Don't do anything special */
5394 break;
5395 case 'Z':
5397 unsigned op[3];
5398 compute_zdepwi_operands (INTVAL (x), op);
5399 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5400 return;
5402 case 'z':
5404 unsigned op[3];
5405 compute_zdepdi_operands (INTVAL (x), op);
5406 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5407 return;
5409 case 'c':
5410 /* We can get here from a .vtable_inherit due to our
5411 CONSTANT_ADDRESS_P rejecting perfectly good constant
5412 addresses. */
5413 break;
5414 default:
5415 gcc_unreachable ();
5417 if (GET_CODE (x) == REG)
5419 fputs (reg_names [REGNO (x)], file);
5420 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5422 fputs ("R", file);
5423 return;
5425 if (FP_REG_P (x)
5426 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5427 && (REGNO (x) & 1) == 0)
5428 fputs ("L", file);
5430 else if (GET_CODE (x) == MEM)
5432 int size = GET_MODE_SIZE (GET_MODE (x));
5433 rtx base = NULL_RTX;
5434 switch (GET_CODE (XEXP (x, 0)))
5436 case PRE_DEC:
5437 case POST_DEC:
5438 base = XEXP (XEXP (x, 0), 0);
5439 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5440 break;
5441 case PRE_INC:
5442 case POST_INC:
5443 base = XEXP (XEXP (x, 0), 0);
5444 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5445 break;
5446 case PLUS:
5447 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5448 fprintf (file, "%s(%s)",
5449 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5450 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5451 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5452 fprintf (file, "%s(%s)",
5453 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5454 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5455 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5456 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5458 /* Because the REG_POINTER flag can get lost during reload,
5459 pa_legitimate_address_p canonicalizes the order of the
5460 index and base registers in the combined move patterns. */
5461 rtx base = XEXP (XEXP (x, 0), 1);
5462 rtx index = XEXP (XEXP (x, 0), 0);
5464 fprintf (file, "%s(%s)",
5465 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5467 else
5468 output_address (GET_MODE (x), XEXP (x, 0));
5469 break;
5470 default:
5471 output_address (GET_MODE (x), XEXP (x, 0));
5472 break;
5475 else
5476 output_addr_const (file, x);
5479 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5481 void
5482 pa_output_global_address (FILE *file, rtx x, int round_constant)
5485 /* Imagine (high (const (plus ...))). */
5486 if (GET_CODE (x) == HIGH)
5487 x = XEXP (x, 0);
5489 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5490 output_addr_const (file, x);
5491 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5493 output_addr_const (file, x);
5494 fputs ("-$global$", file);
5496 else if (GET_CODE (x) == CONST)
5498 const char *sep = "";
5499 int offset = 0; /* assembler wants -$global$ at end */
5500 rtx base = NULL_RTX;
5502 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5504 case LABEL_REF:
5505 case SYMBOL_REF:
5506 base = XEXP (XEXP (x, 0), 0);
5507 output_addr_const (file, base);
5508 break;
5509 case CONST_INT:
5510 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5511 break;
5512 default:
5513 gcc_unreachable ();
5516 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5518 case LABEL_REF:
5519 case SYMBOL_REF:
5520 base = XEXP (XEXP (x, 0), 1);
5521 output_addr_const (file, base);
5522 break;
5523 case CONST_INT:
5524 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5525 break;
5526 default:
5527 gcc_unreachable ();
5530 /* How bogus. The compiler is apparently responsible for
5531 rounding the constant if it uses an LR field selector.
5533 The linker and/or assembler seem a better place since
5534 they have to do this kind of thing already.
5536 If we fail to do this, HP's optimizing linker may eliminate
5537 an addil, but not update the ldw/stw/ldo instruction that
5538 uses the result of the addil. */
5539 if (round_constant)
5540 offset = ((offset + 0x1000) & ~0x1fff);
5542 switch (GET_CODE (XEXP (x, 0)))
5544 case PLUS:
5545 if (offset < 0)
5547 offset = -offset;
5548 sep = "-";
5550 else
5551 sep = "+";
5552 break;
5554 case MINUS:
5555 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5556 sep = "-";
5557 break;
5559 default:
5560 gcc_unreachable ();
5563 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5564 fputs ("-$global$", file);
5565 if (offset)
5566 fprintf (file, "%s%d", sep, offset);
5568 else
5569 output_addr_const (file, x);
5572 /* Output boilerplate text to appear at the beginning of the file.
5573 There are several possible versions. */
5574 #define aputs(x) fputs(x, asm_out_file)
5575 static inline void
5576 pa_file_start_level (void)
5578 if (TARGET_64BIT)
5579 aputs ("\t.LEVEL 2.0w\n");
5580 else if (TARGET_PA_20)
5581 aputs ("\t.LEVEL 2.0\n");
5582 else if (TARGET_PA_11)
5583 aputs ("\t.LEVEL 1.1\n");
5584 else
5585 aputs ("\t.LEVEL 1.0\n");
5588 static inline void
5589 pa_file_start_space (int sortspace)
5591 aputs ("\t.SPACE $PRIVATE$");
5592 if (sortspace)
5593 aputs (",SORT=16");
5594 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5595 if (flag_tm)
5596 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5597 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5598 "\n\t.SPACE $TEXT$");
5599 if (sortspace)
5600 aputs (",SORT=8");
5601 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5602 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5605 static inline void
5606 pa_file_start_file (int want_version)
5608 if (write_symbols != NO_DEBUG)
5610 output_file_directive (asm_out_file, main_input_filename);
5611 if (want_version)
5612 aputs ("\t.version\t\"01.01\"\n");
5616 static inline void
5617 pa_file_start_mcount (const char *aswhat)
5619 if (profile_flag)
5620 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5623 static void
5624 pa_elf_file_start (void)
5626 pa_file_start_level ();
5627 pa_file_start_mcount ("ENTRY");
5628 pa_file_start_file (0);
5631 static void
5632 pa_som_file_start (void)
5634 pa_file_start_level ();
5635 pa_file_start_space (0);
5636 aputs ("\t.IMPORT $global$,DATA\n"
5637 "\t.IMPORT $$dyncall,MILLICODE\n");
5638 pa_file_start_mcount ("CODE");
5639 pa_file_start_file (0);
5642 static void
5643 pa_linux_file_start (void)
5645 pa_file_start_file (1);
5646 pa_file_start_level ();
5647 pa_file_start_mcount ("CODE");
5650 static void
5651 pa_hpux64_gas_file_start (void)
5653 pa_file_start_level ();
5654 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5655 if (profile_flag)
5656 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5657 #endif
5658 pa_file_start_file (1);
5661 static void
5662 pa_hpux64_hpas_file_start (void)
5664 pa_file_start_level ();
5665 pa_file_start_space (1);
5666 pa_file_start_mcount ("CODE");
5667 pa_file_start_file (0);
5669 #undef aputs
5671 /* Search the deferred plabel list for SYMBOL and return its internal
5672 label. If an entry for SYMBOL is not found, a new entry is created. */
5675 pa_get_deferred_plabel (rtx symbol)
5677 const char *fname = XSTR (symbol, 0);
5678 size_t i;
5680 /* See if we have already put this function on the list of deferred
5681 plabels. This list is generally small, so a liner search is not
5682 too ugly. If it proves too slow replace it with something faster. */
5683 for (i = 0; i < n_deferred_plabels; i++)
5684 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5685 break;
5687 /* If the deferred plabel list is empty, or this entry was not found
5688 on the list, create a new entry on the list. */
5689 if (deferred_plabels == NULL || i == n_deferred_plabels)
5691 tree id;
5693 if (deferred_plabels == 0)
5694 deferred_plabels = ggc_alloc<deferred_plabel> ();
5695 else
5696 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5697 deferred_plabels,
5698 n_deferred_plabels + 1);
5700 i = n_deferred_plabels++;
5701 deferred_plabels[i].internal_label = gen_label_rtx ();
5702 deferred_plabels[i].symbol = symbol;
5704 /* Gross. We have just implicitly taken the address of this
5705 function. Mark it in the same manner as assemble_name. */
5706 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5707 if (id)
5708 mark_referenced (id);
5711 return deferred_plabels[i].internal_label;
5714 static void
5715 output_deferred_plabels (void)
5717 size_t i;
5719 /* If we have some deferred plabels, then we need to switch into the
5720 data or readonly data section, and align it to a 4 byte boundary
5721 before outputting the deferred plabels. */
5722 if (n_deferred_plabels)
5724 switch_to_section (flag_pic ? data_section : readonly_data_section);
5725 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5728 /* Now output the deferred plabels. */
5729 for (i = 0; i < n_deferred_plabels; i++)
5731 targetm.asm_out.internal_label (asm_out_file, "L",
5732 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5733 assemble_integer (deferred_plabels[i].symbol,
5734 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5738 /* Initialize optabs to point to emulation routines. */
5740 static void
5741 pa_init_libfuncs (void)
5743 if (HPUX_LONG_DOUBLE_LIBRARY)
5745 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5746 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5747 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5748 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5749 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5750 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5751 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5752 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5753 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5755 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5756 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5757 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5758 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5759 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5760 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5761 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5763 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5764 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5765 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5766 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5768 set_conv_libfunc (sfix_optab, SImode, TFmode,
5769 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5770 : "_U_Qfcnvfxt_quad_to_sgl");
5771 set_conv_libfunc (sfix_optab, DImode, TFmode,
5772 "_U_Qfcnvfxt_quad_to_dbl");
5773 set_conv_libfunc (ufix_optab, SImode, TFmode,
5774 "_U_Qfcnvfxt_quad_to_usgl");
5775 set_conv_libfunc (ufix_optab, DImode, TFmode,
5776 "_U_Qfcnvfxt_quad_to_udbl");
5778 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5779 "_U_Qfcnvxf_sgl_to_quad");
5780 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5781 "_U_Qfcnvxf_dbl_to_quad");
5782 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5783 "_U_Qfcnvxf_usgl_to_quad");
5784 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5785 "_U_Qfcnvxf_udbl_to_quad");
5788 if (TARGET_SYNC_LIBCALL)
5789 init_sync_libfuncs (8);
5792 /* HP's millicode routines mean something special to the assembler.
5793 Keep track of which ones we have used. */
5795 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5796 static void import_milli (enum millicodes);
5797 static char imported[(int) end1000];
5798 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5799 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5800 #define MILLI_START 10
5802 static void
5803 import_milli (enum millicodes code)
5805 char str[sizeof (import_string)];
5807 if (!imported[(int) code])
5809 imported[(int) code] = 1;
5810 strcpy (str, import_string);
5811 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5812 output_asm_insn (str, 0);
5816 /* The register constraints have put the operands and return value in
5817 the proper registers. */
5819 const char *
5820 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5822 import_milli (mulI);
5823 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5826 /* Emit the rtl for doing a division by a constant. */
5828 /* Do magic division millicodes exist for this value? */
5829 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5831 /* We'll use an array to keep track of the magic millicodes and
5832 whether or not we've used them already. [n][0] is signed, [n][1] is
5833 unsigned. */
5835 static int div_milli[16][2];
5838 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5840 if (GET_CODE (operands[2]) == CONST_INT
5841 && INTVAL (operands[2]) > 0
5842 && INTVAL (operands[2]) < 16
5843 && pa_magic_milli[INTVAL (operands[2])])
5845 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5847 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5848 emit
5849 (gen_rtx_PARALLEL
5850 (VOIDmode,
5851 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5852 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5853 SImode,
5854 gen_rtx_REG (SImode, 26),
5855 operands[2])),
5856 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5857 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5858 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5859 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5860 gen_rtx_CLOBBER (VOIDmode, ret))));
5861 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5862 return 1;
5864 return 0;
5867 const char *
5868 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5870 int divisor;
5872 /* If the divisor is a constant, try to use one of the special
5873 opcodes .*/
5874 if (GET_CODE (operands[0]) == CONST_INT)
5876 static char buf[100];
5877 divisor = INTVAL (operands[0]);
5878 if (!div_milli[divisor][unsignedp])
5880 div_milli[divisor][unsignedp] = 1;
5881 if (unsignedp)
5882 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5883 else
5884 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5886 if (unsignedp)
5888 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5889 INTVAL (operands[0]));
5890 return pa_output_millicode_call (insn,
5891 gen_rtx_SYMBOL_REF (SImode, buf));
5893 else
5895 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5896 INTVAL (operands[0]));
5897 return pa_output_millicode_call (insn,
5898 gen_rtx_SYMBOL_REF (SImode, buf));
5901 /* Divisor isn't a special constant. */
5902 else
5904 if (unsignedp)
5906 import_milli (divU);
5907 return pa_output_millicode_call (insn,
5908 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5910 else
5912 import_milli (divI);
5913 return pa_output_millicode_call (insn,
5914 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5919 /* Output a $$rem millicode to do mod. */
5921 const char *
5922 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5924 if (unsignedp)
5926 import_milli (remU);
5927 return pa_output_millicode_call (insn,
5928 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5930 else
5932 import_milli (remI);
5933 return pa_output_millicode_call (insn,
5934 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5938 void
5939 pa_output_arg_descriptor (rtx_insn *call_insn)
5941 const char *arg_regs[4];
5942 machine_mode arg_mode;
5943 rtx link;
5944 int i, output_flag = 0;
5945 int regno;
5947 /* We neither need nor want argument location descriptors for the
5948 64bit runtime environment or the ELF32 environment. */
5949 if (TARGET_64BIT || TARGET_ELF32)
5950 return;
5952 for (i = 0; i < 4; i++)
5953 arg_regs[i] = 0;
5955 /* Specify explicitly that no argument relocations should take place
5956 if using the portable runtime calling conventions. */
5957 if (TARGET_PORTABLE_RUNTIME)
5959 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5960 asm_out_file);
5961 return;
5964 gcc_assert (CALL_P (call_insn));
5965 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5966 link; link = XEXP (link, 1))
5968 rtx use = XEXP (link, 0);
5970 if (! (GET_CODE (use) == USE
5971 && GET_CODE (XEXP (use, 0)) == REG
5972 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5973 continue;
5975 arg_mode = GET_MODE (XEXP (use, 0));
5976 regno = REGNO (XEXP (use, 0));
5977 if (regno >= 23 && regno <= 26)
5979 arg_regs[26 - regno] = "GR";
5980 if (arg_mode == DImode)
5981 arg_regs[25 - regno] = "GR";
5983 else if (regno >= 32 && regno <= 39)
5985 if (arg_mode == SFmode)
5986 arg_regs[(regno - 32) / 2] = "FR";
5987 else
5989 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5990 arg_regs[(regno - 34) / 2] = "FR";
5991 arg_regs[(regno - 34) / 2 + 1] = "FU";
5992 #else
5993 arg_regs[(regno - 34) / 2] = "FU";
5994 arg_regs[(regno - 34) / 2 + 1] = "FR";
5995 #endif
5999 fputs ("\t.CALL ", asm_out_file);
6000 for (i = 0; i < 4; i++)
6002 if (arg_regs[i])
6004 if (output_flag++)
6005 fputc (',', asm_out_file);
6006 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6009 fputc ('\n', asm_out_file);
6012 /* Inform reload about cases where moving X with a mode MODE to or from
6013 a register in RCLASS requires an extra scratch or immediate register.
6014 Return the class needed for the immediate register. */
6016 static reg_class_t
6017 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6018 machine_mode mode, secondary_reload_info *sri)
6020 int regno;
6021 enum reg_class rclass = (enum reg_class) rclass_i;
6023 /* Handle the easy stuff first. */
6024 if (rclass == R1_REGS)
6025 return NO_REGS;
6027 if (REG_P (x))
6029 regno = REGNO (x);
6030 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6031 return NO_REGS;
6033 else
6034 regno = -1;
6036 /* If we have something like (mem (mem (...)), we can safely assume the
6037 inner MEM will end up in a general register after reloading, so there's
6038 no need for a secondary reload. */
6039 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6040 return NO_REGS;
6042 /* Trying to load a constant into a FP register during PIC code
6043 generation requires %r1 as a scratch register. For float modes,
6044 the only legitimate constant is CONST0_RTX. However, there are
6045 a few patterns that accept constant double operands. */
6046 if (flag_pic
6047 && FP_REG_CLASS_P (rclass)
6048 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6050 switch (mode)
6052 case SImode:
6053 sri->icode = CODE_FOR_reload_insi_r1;
6054 break;
6056 case DImode:
6057 sri->icode = CODE_FOR_reload_indi_r1;
6058 break;
6060 case SFmode:
6061 sri->icode = CODE_FOR_reload_insf_r1;
6062 break;
6064 case DFmode:
6065 sri->icode = CODE_FOR_reload_indf_r1;
6066 break;
6068 default:
6069 gcc_unreachable ();
6071 return NO_REGS;
6074 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6075 register when we're generating PIC code or when the operand isn't
6076 readonly. */
6077 if (pa_symbolic_expression_p (x))
6079 if (GET_CODE (x) == HIGH)
6080 x = XEXP (x, 0);
6082 if (flag_pic || !read_only_operand (x, VOIDmode))
6084 switch (mode)
6086 case SImode:
6087 sri->icode = CODE_FOR_reload_insi_r1;
6088 break;
6090 case DImode:
6091 sri->icode = CODE_FOR_reload_indi_r1;
6092 break;
6094 default:
6095 gcc_unreachable ();
6097 return NO_REGS;
6101 /* Profiling showed the PA port spends about 1.3% of its compilation
6102 time in true_regnum from calls inside pa_secondary_reload_class. */
6103 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6104 regno = true_regnum (x);
6106 /* Handle reloads for floating point loads and stores. */
6107 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6108 && FP_REG_CLASS_P (rclass))
6110 if (MEM_P (x))
6112 x = XEXP (x, 0);
6114 /* We don't need a secondary reload for indexed memory addresses.
6116 When INT14_OK_STRICT is true, it might appear that we could
6117 directly allow register indirect memory addresses. However,
6118 this doesn't work because we don't support SUBREGs in
6119 floating-point register copies and reload doesn't tell us
6120 when it's going to use a SUBREG. */
6121 if (IS_INDEX_ADDR_P (x))
6122 return NO_REGS;
6125 /* Request a secondary reload with a general scratch register
6126 for everything else. ??? Could symbolic operands be handled
6127 directly when generating non-pic PA 2.0 code? */
6128 sri->icode = (in_p
6129 ? direct_optab_handler (reload_in_optab, mode)
6130 : direct_optab_handler (reload_out_optab, mode));
6131 return NO_REGS;
6134 /* A SAR<->FP register copy requires an intermediate general register
6135 and secondary memory. We need a secondary reload with a general
6136 scratch register for spills. */
6137 if (rclass == SHIFT_REGS)
6139 /* Handle spill. */
6140 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6142 sri->icode = (in_p
6143 ? direct_optab_handler (reload_in_optab, mode)
6144 : direct_optab_handler (reload_out_optab, mode));
6145 return NO_REGS;
6148 /* Handle FP copy. */
6149 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6150 return GENERAL_REGS;
6153 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6154 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6155 && FP_REG_CLASS_P (rclass))
6156 return GENERAL_REGS;
6158 return NO_REGS;
6161 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6162 is only marked as live on entry by df-scan when it is a fixed
6163 register. It isn't a fixed register in the 64-bit runtime,
6164 so we need to mark it here. */
6166 static void
6167 pa_extra_live_on_entry (bitmap regs)
6169 if (TARGET_64BIT)
6170 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6173 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6174 to prevent it from being deleted. */
6177 pa_eh_return_handler_rtx (void)
6179 rtx tmp;
6181 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6182 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6183 tmp = gen_rtx_MEM (word_mode, tmp);
6184 tmp->volatil = 1;
6185 return tmp;
6188 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6189 by invisible reference. As a GCC extension, we also pass anything
6190 with a zero or variable size by reference.
6192 The 64-bit runtime does not describe passing any types by invisible
6193 reference. The internals of GCC can't currently handle passing
6194 empty structures, and zero or variable length arrays when they are
6195 not passed entirely on the stack or by reference. Thus, as a GCC
6196 extension, we pass these types by reference. The HP compiler doesn't
6197 support these types, so hopefully there shouldn't be any compatibility
6198 issues. This may have to be revisited when HP releases a C99 compiler
6199 or updates the ABI. */
6201 static bool
6202 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6203 machine_mode mode, const_tree type,
6204 bool named ATTRIBUTE_UNUSED)
6206 HOST_WIDE_INT size;
6208 if (type)
6209 size = int_size_in_bytes (type);
6210 else
6211 size = GET_MODE_SIZE (mode);
6213 if (TARGET_64BIT)
6214 return size <= 0;
6215 else
6216 return size <= 0 || size > 8;
6219 enum direction
6220 pa_function_arg_padding (machine_mode mode, const_tree type)
6222 if (mode == BLKmode
6223 || (TARGET_64BIT
6224 && type
6225 && (AGGREGATE_TYPE_P (type)
6226 || TREE_CODE (type) == COMPLEX_TYPE
6227 || TREE_CODE (type) == VECTOR_TYPE)))
6229 /* Return none if justification is not required. */
6230 if (type
6231 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6232 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6233 return none;
6235 /* The directions set here are ignored when a BLKmode argument larger
6236 than a word is placed in a register. Different code is used for
6237 the stack and registers. This makes it difficult to have a
6238 consistent data representation for both the stack and registers.
6239 For both runtimes, the justification and padding for arguments on
6240 the stack and in registers should be identical. */
6241 if (TARGET_64BIT)
6242 /* The 64-bit runtime specifies left justification for aggregates. */
6243 return upward;
6244 else
6245 /* The 32-bit runtime architecture specifies right justification.
6246 When the argument is passed on the stack, the argument is padded
6247 with garbage on the left. The HP compiler pads with zeros. */
6248 return downward;
6251 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6252 return downward;
6253 else
6254 return none;
6258 /* Do what is necessary for `va_start'. We look at the current function
6259 to determine if stdargs or varargs is used and fill in an initial
6260 va_list. A pointer to this constructor is returned. */
6262 static rtx
6263 hppa_builtin_saveregs (void)
6265 rtx offset, dest;
6266 tree fntype = TREE_TYPE (current_function_decl);
6267 int argadj = ((!stdarg_p (fntype))
6268 ? UNITS_PER_WORD : 0);
6270 if (argadj)
6271 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6272 else
6273 offset = crtl->args.arg_offset_rtx;
6275 if (TARGET_64BIT)
6277 int i, off;
6279 /* Adjust for varargs/stdarg differences. */
6280 if (argadj)
6281 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6282 else
6283 offset = crtl->args.arg_offset_rtx;
6285 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6286 from the incoming arg pointer and growing to larger addresses. */
6287 for (i = 26, off = -64; i >= 19; i--, off += 8)
6288 emit_move_insn (gen_rtx_MEM (word_mode,
6289 plus_constant (Pmode,
6290 arg_pointer_rtx, off)),
6291 gen_rtx_REG (word_mode, i));
6293 /* The incoming args pointer points just beyond the flushback area;
6294 normally this is not a serious concern. However, when we are doing
6295 varargs/stdargs we want to make the arg pointer point to the start
6296 of the incoming argument area. */
6297 emit_move_insn (virtual_incoming_args_rtx,
6298 plus_constant (Pmode, arg_pointer_rtx, -64));
6300 /* Now return a pointer to the first anonymous argument. */
6301 return copy_to_reg (expand_binop (Pmode, add_optab,
6302 virtual_incoming_args_rtx,
6303 offset, 0, 0, OPTAB_LIB_WIDEN));
6306 /* Store general registers on the stack. */
6307 dest = gen_rtx_MEM (BLKmode,
6308 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6309 -16));
6310 set_mem_alias_set (dest, get_varargs_alias_set ());
6311 set_mem_align (dest, BITS_PER_WORD);
6312 move_block_from_reg (23, dest, 4);
6314 /* move_block_from_reg will emit code to store the argument registers
6315 individually as scalar stores.
6317 However, other insns may later load from the same addresses for
6318 a structure load (passing a struct to a varargs routine).
6320 The alias code assumes that such aliasing can never happen, so we
6321 have to keep memory referencing insns from moving up beyond the
6322 last argument register store. So we emit a blockage insn here. */
6323 emit_insn (gen_blockage ());
6325 return copy_to_reg (expand_binop (Pmode, add_optab,
6326 crtl->args.internal_arg_pointer,
6327 offset, 0, 0, OPTAB_LIB_WIDEN));
6330 static void
6331 hppa_va_start (tree valist, rtx nextarg)
6333 nextarg = expand_builtin_saveregs ();
6334 std_expand_builtin_va_start (valist, nextarg);
6337 static tree
6338 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6339 gimple_seq *post_p)
6341 if (TARGET_64BIT)
6343 /* Args grow upward. We can use the generic routines. */
6344 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6346 else /* !TARGET_64BIT */
6348 tree ptr = build_pointer_type (type);
6349 tree valist_type;
6350 tree t, u;
6351 unsigned int size, ofs;
6352 bool indirect;
6354 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6355 if (indirect)
6357 type = ptr;
6358 ptr = build_pointer_type (type);
6360 size = int_size_in_bytes (type);
6361 valist_type = TREE_TYPE (valist);
6363 /* Args grow down. Not handled by generic routines. */
6365 u = fold_convert (sizetype, size_in_bytes (type));
6366 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6367 t = fold_build_pointer_plus (valist, u);
6369 /* Align to 4 or 8 byte boundary depending on argument size. */
6371 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6372 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6373 t = fold_convert (valist_type, t);
6375 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6377 ofs = (8 - size) % 4;
6378 if (ofs != 0)
6379 t = fold_build_pointer_plus_hwi (t, ofs);
6381 t = fold_convert (ptr, t);
6382 t = build_va_arg_indirect_ref (t);
6384 if (indirect)
6385 t = build_va_arg_indirect_ref (t);
6387 return t;
6391 /* True if MODE is valid for the target. By "valid", we mean able to
6392 be manipulated in non-trivial ways. In particular, this means all
6393 the arithmetic is supported.
6395 Currently, TImode is not valid as the HP 64-bit runtime documentation
6396 doesn't document the alignment and calling conventions for this type.
6397 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6398 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6400 static bool
6401 pa_scalar_mode_supported_p (machine_mode mode)
6403 int precision = GET_MODE_PRECISION (mode);
6405 switch (GET_MODE_CLASS (mode))
6407 case MODE_PARTIAL_INT:
6408 case MODE_INT:
6409 if (precision == CHAR_TYPE_SIZE)
6410 return true;
6411 if (precision == SHORT_TYPE_SIZE)
6412 return true;
6413 if (precision == INT_TYPE_SIZE)
6414 return true;
6415 if (precision == LONG_TYPE_SIZE)
6416 return true;
6417 if (precision == LONG_LONG_TYPE_SIZE)
6418 return true;
6419 return false;
6421 case MODE_FLOAT:
6422 if (precision == FLOAT_TYPE_SIZE)
6423 return true;
6424 if (precision == DOUBLE_TYPE_SIZE)
6425 return true;
6426 if (precision == LONG_DOUBLE_TYPE_SIZE)
6427 return true;
6428 return false;
6430 case MODE_DECIMAL_FLOAT:
6431 return false;
6433 default:
6434 gcc_unreachable ();
6438 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6439 it branches into the delay slot. Otherwise, return FALSE. */
6441 static bool
6442 branch_to_delay_slot_p (rtx_insn *insn)
6444 rtx_insn *jump_insn;
6446 if (dbr_sequence_length ())
6447 return FALSE;
6449 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6450 while (insn)
6452 insn = next_active_insn (insn);
6453 if (jump_insn == insn)
6454 return TRUE;
6456 /* We can't rely on the length of asms. So, we return FALSE when
6457 the branch is followed by an asm. */
6458 if (!insn
6459 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6460 || asm_noperands (PATTERN (insn)) >= 0
6461 || get_attr_length (insn) > 0)
6462 break;
6465 return FALSE;
6468 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6470 This occurs when INSN has an unfilled delay slot and is followed
6471 by an asm. Disaster can occur if the asm is empty and the jump
6472 branches into the delay slot. So, we add a nop in the delay slot
6473 when this occurs. */
6475 static bool
6476 branch_needs_nop_p (rtx_insn *insn)
6478 rtx_insn *jump_insn;
6480 if (dbr_sequence_length ())
6481 return FALSE;
6483 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6484 while (insn)
6486 insn = next_active_insn (insn);
6487 if (!insn || jump_insn == insn)
6488 return TRUE;
6490 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6491 || asm_noperands (PATTERN (insn)) >= 0)
6492 && get_attr_length (insn) > 0)
6493 break;
6496 return FALSE;
6499 /* Return TRUE if INSN, a forward jump insn, can use nullification
6500 to skip the following instruction. This avoids an extra cycle due
6501 to a mis-predicted branch when we fall through. */
6503 static bool
6504 use_skip_p (rtx_insn *insn)
6506 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6508 while (insn)
6510 insn = next_active_insn (insn);
6512 /* We can't rely on the length of asms, so we can't skip asms. */
6513 if (!insn
6514 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6515 || asm_noperands (PATTERN (insn)) >= 0)
6516 break;
6517 if (get_attr_length (insn) == 4
6518 && jump_insn == next_active_insn (insn))
6519 return TRUE;
6520 if (get_attr_length (insn) > 0)
6521 break;
6524 return FALSE;
6527 /* This routine handles all the normal conditional branch sequences we
6528 might need to generate. It handles compare immediate vs compare
6529 register, nullification of delay slots, varying length branches,
6530 negated branches, and all combinations of the above. It returns the
6531 output appropriate to emit the branch corresponding to all given
6532 parameters. */
6534 const char *
6535 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6537 static char buf[100];
6538 bool useskip;
6539 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6540 int length = get_attr_length (insn);
6541 int xdelay;
6543 /* A conditional branch to the following instruction (e.g. the delay slot)
6544 is asking for a disaster. This can happen when not optimizing and
6545 when jump optimization fails.
6547 While it is usually safe to emit nothing, this can fail if the
6548 preceding instruction is a nullified branch with an empty delay
6549 slot and the same branch target as this branch. We could check
6550 for this but jump optimization should eliminate nop jumps. It
6551 is always safe to emit a nop. */
6552 if (branch_to_delay_slot_p (insn))
6553 return "nop";
6555 /* The doubleword form of the cmpib instruction doesn't have the LEU
6556 and GTU conditions while the cmpb instruction does. Since we accept
6557 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6558 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6559 operands[2] = gen_rtx_REG (DImode, 0);
6560 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6561 operands[1] = gen_rtx_REG (DImode, 0);
6563 /* If this is a long branch with its delay slot unfilled, set `nullify'
6564 as it can nullify the delay slot and save a nop. */
6565 if (length == 8 && dbr_sequence_length () == 0)
6566 nullify = 1;
6568 /* If this is a short forward conditional branch which did not get
6569 its delay slot filled, the delay slot can still be nullified. */
6570 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6571 nullify = forward_branch_p (insn);
6573 /* A forward branch over a single nullified insn can be done with a
6574 comclr instruction. This avoids a single cycle penalty due to
6575 mis-predicted branch if we fall through (branch not taken). */
6576 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6578 switch (length)
6580 /* All short conditional branches except backwards with an unfilled
6581 delay slot. */
6582 case 4:
6583 if (useskip)
6584 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6585 else
6586 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6587 if (GET_MODE (operands[1]) == DImode)
6588 strcat (buf, "*");
6589 if (negated)
6590 strcat (buf, "%B3");
6591 else
6592 strcat (buf, "%S3");
6593 if (useskip)
6594 strcat (buf, " %2,%r1,%%r0");
6595 else if (nullify)
6597 if (branch_needs_nop_p (insn))
6598 strcat (buf, ",n %2,%r1,%0%#");
6599 else
6600 strcat (buf, ",n %2,%r1,%0");
6602 else
6603 strcat (buf, " %2,%r1,%0");
6604 break;
6606 /* All long conditionals. Note a short backward branch with an
6607 unfilled delay slot is treated just like a long backward branch
6608 with an unfilled delay slot. */
6609 case 8:
6610 /* Handle weird backwards branch with a filled delay slot
6611 which is nullified. */
6612 if (dbr_sequence_length () != 0
6613 && ! forward_branch_p (insn)
6614 && nullify)
6616 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6617 if (GET_MODE (operands[1]) == DImode)
6618 strcat (buf, "*");
6619 if (negated)
6620 strcat (buf, "%S3");
6621 else
6622 strcat (buf, "%B3");
6623 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6625 /* Handle short backwards branch with an unfilled delay slot.
6626 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6627 taken and untaken branches. */
6628 else if (dbr_sequence_length () == 0
6629 && ! forward_branch_p (insn)
6630 && INSN_ADDRESSES_SET_P ()
6631 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6632 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6634 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6635 if (GET_MODE (operands[1]) == DImode)
6636 strcat (buf, "*");
6637 if (negated)
6638 strcat (buf, "%B3 %2,%r1,%0%#");
6639 else
6640 strcat (buf, "%S3 %2,%r1,%0%#");
6642 else
6644 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6645 if (GET_MODE (operands[1]) == DImode)
6646 strcat (buf, "*");
6647 if (negated)
6648 strcat (buf, "%S3");
6649 else
6650 strcat (buf, "%B3");
6651 if (nullify)
6652 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6653 else
6654 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6656 break;
6658 default:
6659 /* The reversed conditional branch must branch over one additional
6660 instruction if the delay slot is filled and needs to be extracted
6661 by pa_output_lbranch. If the delay slot is empty or this is a
6662 nullified forward branch, the instruction after the reversed
6663 condition branch must be nullified. */
6664 if (dbr_sequence_length () == 0
6665 || (nullify && forward_branch_p (insn)))
6667 nullify = 1;
6668 xdelay = 0;
6669 operands[4] = GEN_INT (length);
6671 else
6673 xdelay = 1;
6674 operands[4] = GEN_INT (length + 4);
6677 /* Create a reversed conditional branch which branches around
6678 the following insns. */
6679 if (GET_MODE (operands[1]) != DImode)
6681 if (nullify)
6683 if (negated)
6684 strcpy (buf,
6685 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6686 else
6687 strcpy (buf,
6688 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6690 else
6692 if (negated)
6693 strcpy (buf,
6694 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6695 else
6696 strcpy (buf,
6697 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6700 else
6702 if (nullify)
6704 if (negated)
6705 strcpy (buf,
6706 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6707 else
6708 strcpy (buf,
6709 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6711 else
6713 if (negated)
6714 strcpy (buf,
6715 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6716 else
6717 strcpy (buf,
6718 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6722 output_asm_insn (buf, operands);
6723 return pa_output_lbranch (operands[0], insn, xdelay);
6725 return buf;
6728 /* Output a PIC pc-relative instruction sequence to load the address of
6729 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6730 or a code label. OPERANDS[1] specifies the register to use to load
6731 the program counter. OPERANDS[3] may be used for label generation
6732 The sequence is always three instructions in length. The program
6733 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6734 Register %r1 is clobbered. */
6736 static void
6737 pa_output_pic_pcrel_sequence (rtx *operands)
6739 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6740 if (TARGET_PA_20)
6742 /* We can use mfia to determine the current program counter. */
6743 if (TARGET_SOM || !TARGET_GAS)
6745 operands[3] = gen_label_rtx ();
6746 targetm.asm_out.internal_label (asm_out_file, "L",
6747 CODE_LABEL_NUMBER (operands[3]));
6748 output_asm_insn ("mfia %1", operands);
6749 output_asm_insn ("addil L'%0-%l3,%1", operands);
6750 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6752 else
6754 output_asm_insn ("mfia %1", operands);
6755 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6756 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6759 else
6761 /* We need to use a branch to determine the current program counter. */
6762 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6763 if (TARGET_SOM || !TARGET_GAS)
6765 operands[3] = gen_label_rtx ();
6766 output_asm_insn ("addil L'%0-%l3,%1", operands);
6767 targetm.asm_out.internal_label (asm_out_file, "L",
6768 CODE_LABEL_NUMBER (operands[3]));
6769 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6771 else
6773 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6774 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6779 /* This routine handles output of long unconditional branches that
6780 exceed the maximum range of a simple branch instruction. Since
6781 we don't have a register available for the branch, we save register
6782 %r1 in the frame marker, load the branch destination DEST into %r1,
6783 execute the branch, and restore %r1 in the delay slot of the branch.
6785 Since long branches may have an insn in the delay slot and the
6786 delay slot is used to restore %r1, we in general need to extract
6787 this insn and execute it before the branch. However, to facilitate
6788 use of this function by conditional branches, we also provide an
6789 option to not extract the delay insn so that it will be emitted
6790 after the long branch. So, if there is an insn in the delay slot,
6791 it is extracted if XDELAY is nonzero.
6793 The lengths of the various long-branch sequences are 20, 16 and 24
6794 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6796 const char *
6797 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6799 rtx xoperands[4];
6801 xoperands[0] = dest;
6803 /* First, free up the delay slot. */
6804 if (xdelay && dbr_sequence_length () != 0)
6806 /* We can't handle a jump in the delay slot. */
6807 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6809 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6810 optimize, 0, NULL);
6812 /* Now delete the delay insn. */
6813 SET_INSN_DELETED (NEXT_INSN (insn));
6816 /* Output an insn to save %r1. The runtime documentation doesn't
6817 specify whether the "Clean Up" slot in the callers frame can
6818 be clobbered by the callee. It isn't copied by HP's builtin
6819 alloca, so this suggests that it can be clobbered if necessary.
6820 The "Static Link" location is copied by HP builtin alloca, so
6821 we avoid using it. Using the cleanup slot might be a problem
6822 if we have to interoperate with languages that pass cleanup
6823 information. However, it should be possible to handle these
6824 situations with GCC's asm feature.
6826 The "Current RP" slot is reserved for the called procedure, so
6827 we try to use it when we don't have a frame of our own. It's
6828 rather unlikely that we won't have a frame when we need to emit
6829 a very long branch.
6831 Really the way to go long term is a register scavenger; goto
6832 the target of the jump and find a register which we can use
6833 as a scratch to hold the value in %r1. Then, we wouldn't have
6834 to free up the delay slot or clobber a slot that may be needed
6835 for other purposes. */
6836 if (TARGET_64BIT)
6838 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6839 /* Use the return pointer slot in the frame marker. */
6840 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6841 else
6842 /* Use the slot at -40 in the frame marker since HP builtin
6843 alloca doesn't copy it. */
6844 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6846 else
6848 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6849 /* Use the return pointer slot in the frame marker. */
6850 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6851 else
6852 /* Use the "Clean Up" slot in the frame marker. In GCC,
6853 the only other use of this location is for copying a
6854 floating point double argument from a floating-point
6855 register to two general registers. The copy is done
6856 as an "atomic" operation when outputting a call, so it
6857 won't interfere with our using the location here. */
6858 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6861 if (TARGET_PORTABLE_RUNTIME)
6863 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6864 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6865 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6867 else if (flag_pic)
6869 xoperands[1] = gen_rtx_REG (Pmode, 1);
6870 xoperands[2] = xoperands[1];
6871 pa_output_pic_pcrel_sequence (xoperands);
6872 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6874 else
6875 /* Now output a very long branch to the original target. */
6876 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6878 /* Now restore the value of %r1 in the delay slot. */
6879 if (TARGET_64BIT)
6881 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6882 return "ldd -16(%%r30),%%r1";
6883 else
6884 return "ldd -40(%%r30),%%r1";
6886 else
6888 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6889 return "ldw -20(%%r30),%%r1";
6890 else
6891 return "ldw -12(%%r30),%%r1";
6895 /* This routine handles all the branch-on-bit conditional branch sequences we
6896 might need to generate. It handles nullification of delay slots,
6897 varying length branches, negated branches and all combinations of the
6898 above. it returns the appropriate output template to emit the branch. */
6900 const char *
6901 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6903 static char buf[100];
6904 bool useskip;
6905 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6906 int length = get_attr_length (insn);
6907 int xdelay;
6909 /* A conditional branch to the following instruction (e.g. the delay slot) is
6910 asking for a disaster. I do not think this can happen as this pattern
6911 is only used when optimizing; jump optimization should eliminate the
6912 jump. But be prepared just in case. */
6914 if (branch_to_delay_slot_p (insn))
6915 return "nop";
6917 /* If this is a long branch with its delay slot unfilled, set `nullify'
6918 as it can nullify the delay slot and save a nop. */
6919 if (length == 8 && dbr_sequence_length () == 0)
6920 nullify = 1;
6922 /* If this is a short forward conditional branch which did not get
6923 its delay slot filled, the delay slot can still be nullified. */
6924 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6925 nullify = forward_branch_p (insn);
6927 /* A forward branch over a single nullified insn can be done with a
6928 extrs instruction. This avoids a single cycle penalty due to
6929 mis-predicted branch if we fall through (branch not taken). */
6930 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6932 switch (length)
6935 /* All short conditional branches except backwards with an unfilled
6936 delay slot. */
6937 case 4:
6938 if (useskip)
6939 strcpy (buf, "{extrs,|extrw,s,}");
6940 else
6941 strcpy (buf, "bb,");
6942 if (useskip && GET_MODE (operands[0]) == DImode)
6943 strcpy (buf, "extrd,s,*");
6944 else if (GET_MODE (operands[0]) == DImode)
6945 strcpy (buf, "bb,*");
6946 if ((which == 0 && negated)
6947 || (which == 1 && ! negated))
6948 strcat (buf, ">=");
6949 else
6950 strcat (buf, "<");
6951 if (useskip)
6952 strcat (buf, " %0,%1,1,%%r0");
6953 else if (nullify && negated)
6955 if (branch_needs_nop_p (insn))
6956 strcat (buf, ",n %0,%1,%3%#");
6957 else
6958 strcat (buf, ",n %0,%1,%3");
6960 else if (nullify && ! negated)
6962 if (branch_needs_nop_p (insn))
6963 strcat (buf, ",n %0,%1,%2%#");
6964 else
6965 strcat (buf, ",n %0,%1,%2");
6967 else if (! nullify && negated)
6968 strcat (buf, " %0,%1,%3");
6969 else if (! nullify && ! negated)
6970 strcat (buf, " %0,%1,%2");
6971 break;
6973 /* All long conditionals. Note a short backward branch with an
6974 unfilled delay slot is treated just like a long backward branch
6975 with an unfilled delay slot. */
6976 case 8:
6977 /* Handle weird backwards branch with a filled delay slot
6978 which is nullified. */
6979 if (dbr_sequence_length () != 0
6980 && ! forward_branch_p (insn)
6981 && nullify)
6983 strcpy (buf, "bb,");
6984 if (GET_MODE (operands[0]) == DImode)
6985 strcat (buf, "*");
6986 if ((which == 0 && negated)
6987 || (which == 1 && ! negated))
6988 strcat (buf, "<");
6989 else
6990 strcat (buf, ">=");
6991 if (negated)
6992 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6993 else
6994 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6996 /* Handle short backwards branch with an unfilled delay slot.
6997 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6998 taken and untaken branches. */
6999 else if (dbr_sequence_length () == 0
7000 && ! forward_branch_p (insn)
7001 && INSN_ADDRESSES_SET_P ()
7002 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7003 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7005 strcpy (buf, "bb,");
7006 if (GET_MODE (operands[0]) == DImode)
7007 strcat (buf, "*");
7008 if ((which == 0 && negated)
7009 || (which == 1 && ! negated))
7010 strcat (buf, ">=");
7011 else
7012 strcat (buf, "<");
7013 if (negated)
7014 strcat (buf, " %0,%1,%3%#");
7015 else
7016 strcat (buf, " %0,%1,%2%#");
7018 else
7020 if (GET_MODE (operands[0]) == DImode)
7021 strcpy (buf, "extrd,s,*");
7022 else
7023 strcpy (buf, "{extrs,|extrw,s,}");
7024 if ((which == 0 && negated)
7025 || (which == 1 && ! negated))
7026 strcat (buf, "<");
7027 else
7028 strcat (buf, ">=");
7029 if (nullify && negated)
7030 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7031 else if (nullify && ! negated)
7032 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7033 else if (negated)
7034 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7035 else
7036 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7038 break;
7040 default:
7041 /* The reversed conditional branch must branch over one additional
7042 instruction if the delay slot is filled and needs to be extracted
7043 by pa_output_lbranch. If the delay slot is empty or this is a
7044 nullified forward branch, the instruction after the reversed
7045 condition branch must be nullified. */
7046 if (dbr_sequence_length () == 0
7047 || (nullify && forward_branch_p (insn)))
7049 nullify = 1;
7050 xdelay = 0;
7051 operands[4] = GEN_INT (length);
7053 else
7055 xdelay = 1;
7056 operands[4] = GEN_INT (length + 4);
7059 if (GET_MODE (operands[0]) == DImode)
7060 strcpy (buf, "bb,*");
7061 else
7062 strcpy (buf, "bb,");
7063 if ((which == 0 && negated)
7064 || (which == 1 && !negated))
7065 strcat (buf, "<");
7066 else
7067 strcat (buf, ">=");
7068 if (nullify)
7069 strcat (buf, ",n %0,%1,.+%4");
7070 else
7071 strcat (buf, " %0,%1,.+%4");
7072 output_asm_insn (buf, operands);
7073 return pa_output_lbranch (negated ? operands[3] : operands[2],
7074 insn, xdelay);
7076 return buf;
7079 /* This routine handles all the branch-on-variable-bit conditional branch
7080 sequences we might need to generate. It handles nullification of delay
7081 slots, varying length branches, negated branches and all combinations
7082 of the above. it returns the appropriate output template to emit the
7083 branch. */
7085 const char *
7086 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7087 int which)
7089 static char buf[100];
7090 bool useskip;
7091 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7092 int length = get_attr_length (insn);
7093 int xdelay;
7095 /* A conditional branch to the following instruction (e.g. the delay slot) is
7096 asking for a disaster. I do not think this can happen as this pattern
7097 is only used when optimizing; jump optimization should eliminate the
7098 jump. But be prepared just in case. */
7100 if (branch_to_delay_slot_p (insn))
7101 return "nop";
7103 /* If this is a long branch with its delay slot unfilled, set `nullify'
7104 as it can nullify the delay slot and save a nop. */
7105 if (length == 8 && dbr_sequence_length () == 0)
7106 nullify = 1;
7108 /* If this is a short forward conditional branch which did not get
7109 its delay slot filled, the delay slot can still be nullified. */
7110 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7111 nullify = forward_branch_p (insn);
7113 /* A forward branch over a single nullified insn can be done with a
7114 extrs instruction. This avoids a single cycle penalty due to
7115 mis-predicted branch if we fall through (branch not taken). */
7116 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7118 switch (length)
7121 /* All short conditional branches except backwards with an unfilled
7122 delay slot. */
7123 case 4:
7124 if (useskip)
7125 strcpy (buf, "{vextrs,|extrw,s,}");
7126 else
7127 strcpy (buf, "{bvb,|bb,}");
7128 if (useskip && GET_MODE (operands[0]) == DImode)
7129 strcpy (buf, "extrd,s,*");
7130 else if (GET_MODE (operands[0]) == DImode)
7131 strcpy (buf, "bb,*");
7132 if ((which == 0 && negated)
7133 || (which == 1 && ! negated))
7134 strcat (buf, ">=");
7135 else
7136 strcat (buf, "<");
7137 if (useskip)
7138 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7139 else if (nullify && negated)
7141 if (branch_needs_nop_p (insn))
7142 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7143 else
7144 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7146 else if (nullify && ! negated)
7148 if (branch_needs_nop_p (insn))
7149 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7150 else
7151 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7153 else if (! nullify && negated)
7154 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7155 else if (! nullify && ! negated)
7156 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7157 break;
7159 /* All long conditionals. Note a short backward branch with an
7160 unfilled delay slot is treated just like a long backward branch
7161 with an unfilled delay slot. */
7162 case 8:
7163 /* Handle weird backwards branch with a filled delay slot
7164 which is nullified. */
7165 if (dbr_sequence_length () != 0
7166 && ! forward_branch_p (insn)
7167 && nullify)
7169 strcpy (buf, "{bvb,|bb,}");
7170 if (GET_MODE (operands[0]) == DImode)
7171 strcat (buf, "*");
7172 if ((which == 0 && negated)
7173 || (which == 1 && ! negated))
7174 strcat (buf, "<");
7175 else
7176 strcat (buf, ">=");
7177 if (negated)
7178 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7179 else
7180 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7182 /* Handle short backwards branch with an unfilled delay slot.
7183 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7184 taken and untaken branches. */
7185 else if (dbr_sequence_length () == 0
7186 && ! forward_branch_p (insn)
7187 && INSN_ADDRESSES_SET_P ()
7188 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7189 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7191 strcpy (buf, "{bvb,|bb,}");
7192 if (GET_MODE (operands[0]) == DImode)
7193 strcat (buf, "*");
7194 if ((which == 0 && negated)
7195 || (which == 1 && ! negated))
7196 strcat (buf, ">=");
7197 else
7198 strcat (buf, "<");
7199 if (negated)
7200 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7201 else
7202 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7204 else
7206 strcpy (buf, "{vextrs,|extrw,s,}");
7207 if (GET_MODE (operands[0]) == DImode)
7208 strcpy (buf, "extrd,s,*");
7209 if ((which == 0 && negated)
7210 || (which == 1 && ! negated))
7211 strcat (buf, "<");
7212 else
7213 strcat (buf, ">=");
7214 if (nullify && negated)
7215 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7216 else if (nullify && ! negated)
7217 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7218 else if (negated)
7219 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7220 else
7221 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7223 break;
7225 default:
7226 /* The reversed conditional branch must branch over one additional
7227 instruction if the delay slot is filled and needs to be extracted
7228 by pa_output_lbranch. If the delay slot is empty or this is a
7229 nullified forward branch, the instruction after the reversed
7230 condition branch must be nullified. */
7231 if (dbr_sequence_length () == 0
7232 || (nullify && forward_branch_p (insn)))
7234 nullify = 1;
7235 xdelay = 0;
7236 operands[4] = GEN_INT (length);
7238 else
7240 xdelay = 1;
7241 operands[4] = GEN_INT (length + 4);
7244 if (GET_MODE (operands[0]) == DImode)
7245 strcpy (buf, "bb,*");
7246 else
7247 strcpy (buf, "{bvb,|bb,}");
7248 if ((which == 0 && negated)
7249 || (which == 1 && !negated))
7250 strcat (buf, "<");
7251 else
7252 strcat (buf, ">=");
7253 if (nullify)
7254 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7255 else
7256 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7257 output_asm_insn (buf, operands);
7258 return pa_output_lbranch (negated ? operands[3] : operands[2],
7259 insn, xdelay);
7261 return buf;
7264 /* Return the output template for emitting a dbra type insn.
7266 Note it may perform some output operations on its own before
7267 returning the final output string. */
7268 const char *
7269 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7271 int length = get_attr_length (insn);
7273 /* A conditional branch to the following instruction (e.g. the delay slot) is
7274 asking for a disaster. Be prepared! */
7276 if (branch_to_delay_slot_p (insn))
7278 if (which_alternative == 0)
7279 return "ldo %1(%0),%0";
7280 else if (which_alternative == 1)
7282 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7283 output_asm_insn ("ldw -16(%%r30),%4", operands);
7284 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7285 return "{fldws|fldw} -16(%%r30),%0";
7287 else
7289 output_asm_insn ("ldw %0,%4", operands);
7290 return "ldo %1(%4),%4\n\tstw %4,%0";
7294 if (which_alternative == 0)
7296 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7297 int xdelay;
7299 /* If this is a long branch with its delay slot unfilled, set `nullify'
7300 as it can nullify the delay slot and save a nop. */
7301 if (length == 8 && dbr_sequence_length () == 0)
7302 nullify = 1;
7304 /* If this is a short forward conditional branch which did not get
7305 its delay slot filled, the delay slot can still be nullified. */
7306 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7307 nullify = forward_branch_p (insn);
7309 switch (length)
7311 case 4:
7312 if (nullify)
7314 if (branch_needs_nop_p (insn))
7315 return "addib,%C2,n %1,%0,%3%#";
7316 else
7317 return "addib,%C2,n %1,%0,%3";
7319 else
7320 return "addib,%C2 %1,%0,%3";
7322 case 8:
7323 /* Handle weird backwards branch with a fulled delay slot
7324 which is nullified. */
7325 if (dbr_sequence_length () != 0
7326 && ! forward_branch_p (insn)
7327 && nullify)
7328 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7329 /* Handle short backwards branch with an unfilled delay slot.
7330 Using a addb;nop rather than addi;bl saves 1 cycle for both
7331 taken and untaken branches. */
7332 else if (dbr_sequence_length () == 0
7333 && ! forward_branch_p (insn)
7334 && INSN_ADDRESSES_SET_P ()
7335 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7336 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7337 return "addib,%C2 %1,%0,%3%#";
7339 /* Handle normal cases. */
7340 if (nullify)
7341 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7342 else
7343 return "addi,%N2 %1,%0,%0\n\tb %3";
7345 default:
7346 /* The reversed conditional branch must branch over one additional
7347 instruction if the delay slot is filled and needs to be extracted
7348 by pa_output_lbranch. If the delay slot is empty or this is a
7349 nullified forward branch, the instruction after the reversed
7350 condition branch must be nullified. */
7351 if (dbr_sequence_length () == 0
7352 || (nullify && forward_branch_p (insn)))
7354 nullify = 1;
7355 xdelay = 0;
7356 operands[4] = GEN_INT (length);
7358 else
7360 xdelay = 1;
7361 operands[4] = GEN_INT (length + 4);
7364 if (nullify)
7365 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7366 else
7367 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7369 return pa_output_lbranch (operands[3], insn, xdelay);
7373 /* Deal with gross reload from FP register case. */
7374 else if (which_alternative == 1)
7376 /* Move loop counter from FP register to MEM then into a GR,
7377 increment the GR, store the GR into MEM, and finally reload
7378 the FP register from MEM from within the branch's delay slot. */
7379 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7380 operands);
7381 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7382 if (length == 24)
7383 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7384 else if (length == 28)
7385 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7386 else
7388 operands[5] = GEN_INT (length - 16);
7389 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7390 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7391 return pa_output_lbranch (operands[3], insn, 0);
7394 /* Deal with gross reload from memory case. */
7395 else
7397 /* Reload loop counter from memory, the store back to memory
7398 happens in the branch's delay slot. */
7399 output_asm_insn ("ldw %0,%4", operands);
7400 if (length == 12)
7401 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7402 else if (length == 16)
7403 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7404 else
7406 operands[5] = GEN_INT (length - 4);
7407 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7408 return pa_output_lbranch (operands[3], insn, 0);
7413 /* Return the output template for emitting a movb type insn.
7415 Note it may perform some output operations on its own before
7416 returning the final output string. */
7417 const char *
7418 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7419 int reverse_comparison)
7421 int length = get_attr_length (insn);
7423 /* A conditional branch to the following instruction (e.g. the delay slot) is
7424 asking for a disaster. Be prepared! */
7426 if (branch_to_delay_slot_p (insn))
7428 if (which_alternative == 0)
7429 return "copy %1,%0";
7430 else if (which_alternative == 1)
7432 output_asm_insn ("stw %1,-16(%%r30)", operands);
7433 return "{fldws|fldw} -16(%%r30),%0";
7435 else if (which_alternative == 2)
7436 return "stw %1,%0";
7437 else
7438 return "mtsar %r1";
7441 /* Support the second variant. */
7442 if (reverse_comparison)
7443 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7445 if (which_alternative == 0)
7447 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7448 int xdelay;
7450 /* If this is a long branch with its delay slot unfilled, set `nullify'
7451 as it can nullify the delay slot and save a nop. */
7452 if (length == 8 && dbr_sequence_length () == 0)
7453 nullify = 1;
7455 /* If this is a short forward conditional branch which did not get
7456 its delay slot filled, the delay slot can still be nullified. */
7457 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7458 nullify = forward_branch_p (insn);
7460 switch (length)
7462 case 4:
7463 if (nullify)
7465 if (branch_needs_nop_p (insn))
7466 return "movb,%C2,n %1,%0,%3%#";
7467 else
7468 return "movb,%C2,n %1,%0,%3";
7470 else
7471 return "movb,%C2 %1,%0,%3";
7473 case 8:
7474 /* Handle weird backwards branch with a filled delay slot
7475 which is nullified. */
7476 if (dbr_sequence_length () != 0
7477 && ! forward_branch_p (insn)
7478 && nullify)
7479 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7481 /* Handle short backwards branch with an unfilled delay slot.
7482 Using a movb;nop rather than or;bl saves 1 cycle for both
7483 taken and untaken branches. */
7484 else if (dbr_sequence_length () == 0
7485 && ! forward_branch_p (insn)
7486 && INSN_ADDRESSES_SET_P ()
7487 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7488 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7489 return "movb,%C2 %1,%0,%3%#";
7490 /* Handle normal cases. */
7491 if (nullify)
7492 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7493 else
7494 return "or,%N2 %1,%%r0,%0\n\tb %3";
7496 default:
7497 /* The reversed conditional branch must branch over one additional
7498 instruction if the delay slot is filled and needs to be extracted
7499 by pa_output_lbranch. If the delay slot is empty or this is a
7500 nullified forward branch, the instruction after the reversed
7501 condition branch must be nullified. */
7502 if (dbr_sequence_length () == 0
7503 || (nullify && forward_branch_p (insn)))
7505 nullify = 1;
7506 xdelay = 0;
7507 operands[4] = GEN_INT (length);
7509 else
7511 xdelay = 1;
7512 operands[4] = GEN_INT (length + 4);
7515 if (nullify)
7516 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7517 else
7518 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7520 return pa_output_lbranch (operands[3], insn, xdelay);
7523 /* Deal with gross reload for FP destination register case. */
7524 else if (which_alternative == 1)
7526 /* Move source register to MEM, perform the branch test, then
7527 finally load the FP register from MEM from within the branch's
7528 delay slot. */
7529 output_asm_insn ("stw %1,-16(%%r30)", operands);
7530 if (length == 12)
7531 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7532 else if (length == 16)
7533 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7534 else
7536 operands[4] = GEN_INT (length - 4);
7537 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7538 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7539 return pa_output_lbranch (operands[3], insn, 0);
7542 /* Deal with gross reload from memory case. */
7543 else if (which_alternative == 2)
7545 /* Reload loop counter from memory, the store back to memory
7546 happens in the branch's delay slot. */
7547 if (length == 8)
7548 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7549 else if (length == 12)
7550 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7551 else
7553 operands[4] = GEN_INT (length);
7554 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7555 operands);
7556 return pa_output_lbranch (operands[3], insn, 0);
7559 /* Handle SAR as a destination. */
7560 else
7562 if (length == 8)
7563 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7564 else if (length == 12)
7565 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7566 else
7568 operands[4] = GEN_INT (length);
7569 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7570 operands);
7571 return pa_output_lbranch (operands[3], insn, 0);
7576 /* Copy any FP arguments in INSN into integer registers. */
7577 static void
7578 copy_fp_args (rtx_insn *insn)
7580 rtx link;
7581 rtx xoperands[2];
7583 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7585 int arg_mode, regno;
7586 rtx use = XEXP (link, 0);
7588 if (! (GET_CODE (use) == USE
7589 && GET_CODE (XEXP (use, 0)) == REG
7590 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7591 continue;
7593 arg_mode = GET_MODE (XEXP (use, 0));
7594 regno = REGNO (XEXP (use, 0));
7596 /* Is it a floating point register? */
7597 if (regno >= 32 && regno <= 39)
7599 /* Copy the FP register into an integer register via memory. */
7600 if (arg_mode == SFmode)
7602 xoperands[0] = XEXP (use, 0);
7603 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7604 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7605 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7607 else
7609 xoperands[0] = XEXP (use, 0);
7610 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7611 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7612 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7613 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7619 /* Compute length of the FP argument copy sequence for INSN. */
7620 static int
7621 length_fp_args (rtx_insn *insn)
7623 int length = 0;
7624 rtx link;
7626 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7628 int arg_mode, regno;
7629 rtx use = XEXP (link, 0);
7631 if (! (GET_CODE (use) == USE
7632 && GET_CODE (XEXP (use, 0)) == REG
7633 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7634 continue;
7636 arg_mode = GET_MODE (XEXP (use, 0));
7637 regno = REGNO (XEXP (use, 0));
7639 /* Is it a floating point register? */
7640 if (regno >= 32 && regno <= 39)
7642 if (arg_mode == SFmode)
7643 length += 8;
7644 else
7645 length += 12;
7649 return length;
7652 /* Return the attribute length for the millicode call instruction INSN.
7653 The length must match the code generated by pa_output_millicode_call.
7654 We include the delay slot in the returned length as it is better to
7655 over estimate the length than to under estimate it. */
7658 pa_attr_length_millicode_call (rtx_insn *insn)
7660 unsigned long distance = -1;
7661 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7663 if (INSN_ADDRESSES_SET_P ())
7665 distance = (total + insn_current_reference_address (insn));
7666 if (distance < total)
7667 distance = -1;
7670 if (TARGET_64BIT)
7672 if (!TARGET_LONG_CALLS && distance < 7600000)
7673 return 8;
7675 return 20;
7677 else if (TARGET_PORTABLE_RUNTIME)
7678 return 24;
7679 else
7681 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7682 return 8;
7684 if (!flag_pic)
7685 return 12;
7687 return 24;
7691 /* INSN is a function call.
7693 CALL_DEST is the routine we are calling. */
7695 const char *
7696 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7698 int attr_length = get_attr_length (insn);
7699 int seq_length = dbr_sequence_length ();
7700 rtx xoperands[4];
7702 xoperands[0] = call_dest;
7704 /* Handle the common case where we are sure that the branch will
7705 reach the beginning of the $CODE$ subspace. The within reach
7706 form of the $$sh_func_adrs call has a length of 28. Because it
7707 has an attribute type of sh_func_adrs, it never has a nonzero
7708 sequence length (i.e., the delay slot is never filled). */
7709 if (!TARGET_LONG_CALLS
7710 && (attr_length == 8
7711 || (attr_length == 28
7712 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7714 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7715 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7717 else
7719 if (TARGET_64BIT)
7721 /* It might seem that one insn could be saved by accessing
7722 the millicode function using the linkage table. However,
7723 this doesn't work in shared libraries and other dynamically
7724 loaded objects. Using a pc-relative sequence also avoids
7725 problems related to the implicit use of the gp register. */
7726 xoperands[1] = gen_rtx_REG (Pmode, 1);
7727 xoperands[2] = xoperands[1];
7728 pa_output_pic_pcrel_sequence (xoperands);
7729 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7731 else if (TARGET_PORTABLE_RUNTIME)
7733 /* Pure portable runtime doesn't allow be/ble; we also don't
7734 have PIC support in the assembler/linker, so this sequence
7735 is needed. */
7737 /* Get the address of our target into %r1. */
7738 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7739 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7741 /* Get our return address into %r31. */
7742 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7743 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7745 /* Jump to our target address in %r1. */
7746 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7748 else if (!flag_pic)
7750 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7751 if (TARGET_PA_20)
7752 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7753 else
7754 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7756 else
7758 xoperands[1] = gen_rtx_REG (Pmode, 31);
7759 xoperands[2] = gen_rtx_REG (Pmode, 1);
7760 pa_output_pic_pcrel_sequence (xoperands);
7762 /* Adjust return address. */
7763 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7765 /* Jump to our target address in %r1. */
7766 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7770 if (seq_length == 0)
7771 output_asm_insn ("nop", xoperands);
7773 return "";
7776 /* Return the attribute length of the call instruction INSN. The SIBCALL
7777 flag indicates whether INSN is a regular call or a sibling call. The
7778 length returned must be longer than the code actually generated by
7779 pa_output_call. Since branch shortening is done before delay branch
7780 sequencing, there is no way to determine whether or not the delay
7781 slot will be filled during branch shortening. Even when the delay
7782 slot is filled, we may have to add a nop if the delay slot contains
7783 a branch that can't reach its target. Thus, we always have to include
7784 the delay slot in the length estimate. This used to be done in
7785 pa_adjust_insn_length but we do it here now as some sequences always
7786 fill the delay slot and we can save four bytes in the estimate for
7787 these sequences. */
7790 pa_attr_length_call (rtx_insn *insn, int sibcall)
7792 int local_call;
7793 rtx call, call_dest;
7794 tree call_decl;
7795 int length = 0;
7796 rtx pat = PATTERN (insn);
7797 unsigned long distance = -1;
7799 gcc_assert (CALL_P (insn));
7801 if (INSN_ADDRESSES_SET_P ())
7803 unsigned long total;
7805 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7806 distance = (total + insn_current_reference_address (insn));
7807 if (distance < total)
7808 distance = -1;
7811 gcc_assert (GET_CODE (pat) == PARALLEL);
7813 /* Get the call rtx. */
7814 call = XVECEXP (pat, 0, 0);
7815 if (GET_CODE (call) == SET)
7816 call = SET_SRC (call);
7818 gcc_assert (GET_CODE (call) == CALL);
7820 /* Determine if this is a local call. */
7821 call_dest = XEXP (XEXP (call, 0), 0);
7822 call_decl = SYMBOL_REF_DECL (call_dest);
7823 local_call = call_decl && targetm.binds_local_p (call_decl);
7825 /* pc-relative branch. */
7826 if (!TARGET_LONG_CALLS
7827 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7828 || distance < MAX_PCREL17F_OFFSET))
7829 length += 8;
7831 /* 64-bit plabel sequence. */
7832 else if (TARGET_64BIT && !local_call)
7833 length += sibcall ? 28 : 24;
7835 /* non-pic long absolute branch sequence. */
7836 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7837 length += 12;
7839 /* long pc-relative branch sequence. */
7840 else if (TARGET_LONG_PIC_SDIFF_CALL
7841 || (TARGET_GAS && !TARGET_SOM && local_call))
7843 length += 20;
7845 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7846 length += 8;
7849 /* 32-bit plabel sequence. */
7850 else
7852 length += 32;
7854 if (TARGET_SOM)
7855 length += length_fp_args (insn);
7857 if (flag_pic)
7858 length += 4;
7860 if (!TARGET_PA_20)
7862 if (!sibcall)
7863 length += 8;
7865 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7866 length += 8;
7870 return length;
7873 /* INSN is a function call.
7875 CALL_DEST is the routine we are calling. */
7877 const char *
7878 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7880 int seq_length = dbr_sequence_length ();
7881 tree call_decl = SYMBOL_REF_DECL (call_dest);
7882 int local_call = call_decl && targetm.binds_local_p (call_decl);
7883 rtx xoperands[4];
7885 xoperands[0] = call_dest;
7887 /* Handle the common case where we're sure that the branch will reach
7888 the beginning of the "$CODE$" subspace. This is the beginning of
7889 the current function if we are in a named section. */
7890 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7892 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7893 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7895 else
7897 if (TARGET_64BIT && !local_call)
7899 /* ??? As far as I can tell, the HP linker doesn't support the
7900 long pc-relative sequence described in the 64-bit runtime
7901 architecture. So, we use a slightly longer indirect call. */
7902 xoperands[0] = pa_get_deferred_plabel (call_dest);
7903 xoperands[1] = gen_label_rtx ();
7905 /* If this isn't a sibcall, we put the load of %r27 into the
7906 delay slot. We can't do this in a sibcall as we don't
7907 have a second call-clobbered scratch register available.
7908 We don't need to do anything when generating fast indirect
7909 calls. */
7910 if (seq_length != 0 && !sibcall)
7912 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7913 optimize, 0, NULL);
7915 /* Now delete the delay insn. */
7916 SET_INSN_DELETED (NEXT_INSN (insn));
7917 seq_length = 0;
7920 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7921 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7922 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7924 if (sibcall)
7926 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7927 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7928 output_asm_insn ("bve (%%r1)", xoperands);
7930 else
7932 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7933 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7934 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7935 seq_length = 1;
7938 else
7940 int indirect_call = 0;
7942 /* Emit a long call. There are several different sequences
7943 of increasing length and complexity. In most cases,
7944 they don't allow an instruction in the delay slot. */
7945 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7946 && !TARGET_LONG_PIC_SDIFF_CALL
7947 && !(TARGET_GAS && !TARGET_SOM && local_call)
7948 && !TARGET_64BIT)
7949 indirect_call = 1;
7951 if (seq_length != 0
7952 && !sibcall
7953 && (!TARGET_PA_20
7954 || indirect_call
7955 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7957 /* A non-jump insn in the delay slot. By definition we can
7958 emit this insn before the call (and in fact before argument
7959 relocating. */
7960 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7961 NULL);
7963 /* Now delete the delay insn. */
7964 SET_INSN_DELETED (NEXT_INSN (insn));
7965 seq_length = 0;
7968 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7970 /* This is the best sequence for making long calls in
7971 non-pic code. Unfortunately, GNU ld doesn't provide
7972 the stub needed for external calls, and GAS's support
7973 for this with the SOM linker is buggy. It is safe
7974 to use this for local calls. */
7975 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7976 if (sibcall)
7977 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7978 else
7980 if (TARGET_PA_20)
7981 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7982 xoperands);
7983 else
7984 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7986 output_asm_insn ("copy %%r31,%%r2", xoperands);
7987 seq_length = 1;
7990 else
7992 /* The HP assembler and linker can handle relocations for
7993 the difference of two symbols. The HP assembler
7994 recognizes the sequence as a pc-relative call and
7995 the linker provides stubs when needed. */
7997 /* GAS currently can't generate the relocations that
7998 are needed for the SOM linker under HP-UX using this
7999 sequence. The GNU linker doesn't generate the stubs
8000 that are needed for external calls on TARGET_ELF32
8001 with this sequence. For now, we have to use a longer
8002 plabel sequence when using GAS for non local calls. */
8003 if (TARGET_LONG_PIC_SDIFF_CALL
8004 || (TARGET_GAS && !TARGET_SOM && local_call))
8006 xoperands[1] = gen_rtx_REG (Pmode, 1);
8007 xoperands[2] = xoperands[1];
8008 pa_output_pic_pcrel_sequence (xoperands);
8010 else
8012 /* Emit a long plabel-based call sequence. This is
8013 essentially an inline implementation of $$dyncall.
8014 We don't actually try to call $$dyncall as this is
8015 as difficult as calling the function itself. */
8016 xoperands[0] = pa_get_deferred_plabel (call_dest);
8017 xoperands[1] = gen_label_rtx ();
8019 /* Since the call is indirect, FP arguments in registers
8020 need to be copied to the general registers. Then, the
8021 argument relocation stub will copy them back. */
8022 if (TARGET_SOM)
8023 copy_fp_args (insn);
8025 if (flag_pic)
8027 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8028 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8029 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8031 else
8033 output_asm_insn ("addil LR'%0-$global$,%%r27",
8034 xoperands);
8035 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8036 xoperands);
8039 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8040 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8041 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8042 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8044 if (!sibcall && !TARGET_PA_20)
8046 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8047 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8048 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8049 else
8050 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8054 if (TARGET_PA_20)
8056 if (sibcall)
8057 output_asm_insn ("bve (%%r1)", xoperands);
8058 else
8060 if (indirect_call)
8062 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8063 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8064 seq_length = 1;
8066 else
8067 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8070 else
8072 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8073 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8074 xoperands);
8076 if (sibcall)
8078 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8079 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8080 else
8081 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8083 else
8085 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8086 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8087 else
8088 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8090 if (indirect_call)
8091 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8092 else
8093 output_asm_insn ("copy %%r31,%%r2", xoperands);
8094 seq_length = 1;
8101 if (seq_length == 0)
8102 output_asm_insn ("nop", xoperands);
8104 return "";
8107 /* Return the attribute length of the indirect call instruction INSN.
8108 The length must match the code generated by output_indirect call.
8109 The returned length includes the delay slot. Currently, the delay
8110 slot of an indirect call sequence is not exposed and it is used by
8111 the sequence itself. */
8114 pa_attr_length_indirect_call (rtx_insn *insn)
8116 unsigned long distance = -1;
8117 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8119 if (INSN_ADDRESSES_SET_P ())
8121 distance = (total + insn_current_reference_address (insn));
8122 if (distance < total)
8123 distance = -1;
8126 if (TARGET_64BIT)
8127 return 12;
8129 if (TARGET_FAST_INDIRECT_CALLS)
8130 return 8;
8132 if (TARGET_PORTABLE_RUNTIME)
8133 return 16;
8135 /* Inline version of $$dyncall. */
8136 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8137 return 20;
8139 if (!TARGET_LONG_CALLS
8140 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8141 || distance < MAX_PCREL17F_OFFSET))
8142 return 8;
8144 /* Out of reach, can use ble. */
8145 if (!flag_pic)
8146 return 12;
8148 /* Inline version of $$dyncall. */
8149 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8150 return 20;
8152 if (!optimize_size)
8153 return 36;
8155 /* Long PIC pc-relative call. */
8156 return 20;
8159 const char *
8160 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8162 rtx xoperands[4];
8163 int length;
8165 if (TARGET_64BIT)
8167 xoperands[0] = call_dest;
8168 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8169 "bve,l (%%r2),%%r2\n\t"
8170 "ldd 24(%0),%%r27", xoperands);
8171 return "";
8174 /* First the special case for kernels, level 0 systems, etc. */
8175 if (TARGET_FAST_INDIRECT_CALLS)
8177 pa_output_arg_descriptor (insn);
8178 if (TARGET_PA_20)
8179 return "bve,l,n (%%r22),%%r2\n\tnop";
8180 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8183 if (TARGET_PORTABLE_RUNTIME)
8185 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8186 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8187 pa_output_arg_descriptor (insn);
8188 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8191 /* Maybe emit a fast inline version of $$dyncall. */
8192 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8194 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8195 "ldw 2(%%r22),%%r19\n\t"
8196 "ldw -2(%%r22),%%r22", xoperands);
8197 pa_output_arg_descriptor (insn);
8198 if (TARGET_NO_SPACE_REGS)
8200 if (TARGET_PA_20)
8201 return "bve,l,n (%%r22),%%r2\n\tnop";
8202 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8204 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8207 /* Now the normal case -- we can reach $$dyncall directly or
8208 we're sure that we can get there via a long-branch stub.
8210 No need to check target flags as the length uniquely identifies
8211 the remaining cases. */
8212 length = pa_attr_length_indirect_call (insn);
8213 if (length == 8)
8215 pa_output_arg_descriptor (insn);
8217 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8218 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8219 variant of the B,L instruction can't be used on the SOM target. */
8220 if (TARGET_PA_20 && !TARGET_SOM)
8221 return "b,l,n $$dyncall,%%r2\n\tnop";
8222 else
8223 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8226 /* Long millicode call, but we are not generating PIC or portable runtime
8227 code. */
8228 if (length == 12)
8230 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8231 pa_output_arg_descriptor (insn);
8232 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8235 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8236 pc-relative call sequence is five instructions. The inline PA 2.0
8237 version of $$dyncall is also five instructions. The PA 1.X versions
8238 are longer but still an overall win. */
8239 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8241 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8242 "ldw 2(%%r22),%%r19\n\t"
8243 "ldw -2(%%r22),%%r22", xoperands);
8244 if (TARGET_NO_SPACE_REGS)
8246 pa_output_arg_descriptor (insn);
8247 if (TARGET_PA_20)
8248 return "bve,l,n (%%r22),%%r2\n\tnop";
8249 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8251 if (TARGET_PA_20)
8253 pa_output_arg_descriptor (insn);
8254 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8256 output_asm_insn ("bl .+8,%%r2\n\t"
8257 "ldo 16(%%r2),%%r2\n\t"
8258 "ldsid (%%r22),%%r1\n\t"
8259 "mtsp %%r1,%%sr0", xoperands);
8260 pa_output_arg_descriptor (insn);
8261 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8264 /* We need a long PIC call to $$dyncall. */
8265 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8266 xoperands[1] = gen_rtx_REG (Pmode, 2);
8267 xoperands[2] = gen_rtx_REG (Pmode, 1);
8268 pa_output_pic_pcrel_sequence (xoperands);
8269 pa_output_arg_descriptor (insn);
8270 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8273 /* In HPUX 8.0's shared library scheme, special relocations are needed
8274 for function labels if they might be passed to a function
8275 in a shared library (because shared libraries don't live in code
8276 space), and special magic is needed to construct their address. */
8278 void
8279 pa_encode_label (rtx sym)
8281 const char *str = XSTR (sym, 0);
8282 int len = strlen (str) + 1;
8283 char *newstr, *p;
8285 p = newstr = XALLOCAVEC (char, len + 1);
8286 *p++ = '@';
8287 strcpy (p, str);
8289 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8292 static void
8293 pa_encode_section_info (tree decl, rtx rtl, int first)
8295 int old_referenced = 0;
8297 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8298 old_referenced
8299 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8301 default_encode_section_info (decl, rtl, first);
8303 if (first && TEXT_SPACE_P (decl))
8305 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8306 if (TREE_CODE (decl) == FUNCTION_DECL)
8307 pa_encode_label (XEXP (rtl, 0));
8309 else if (old_referenced)
8310 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8313 /* This is sort of inverse to pa_encode_section_info. */
8315 static const char *
8316 pa_strip_name_encoding (const char *str)
8318 str += (*str == '@');
8319 str += (*str == '*');
8320 return str;
8323 /* Returns 1 if OP is a function label involved in a simple addition
8324 with a constant. Used to keep certain patterns from matching
8325 during instruction combination. */
8327 pa_is_function_label_plus_const (rtx op)
8329 /* Strip off any CONST. */
8330 if (GET_CODE (op) == CONST)
8331 op = XEXP (op, 0);
8333 return (GET_CODE (op) == PLUS
8334 && function_label_operand (XEXP (op, 0), VOIDmode)
8335 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8338 /* Output assembly code for a thunk to FUNCTION. */
8340 static void
8341 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8342 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8343 tree function)
8345 static unsigned int current_thunk_number;
8346 int val_14 = VAL_14_BITS_P (delta);
8347 unsigned int old_last_address = last_address, nbytes = 0;
8348 char label[17];
8349 rtx xoperands[4];
8351 xoperands[0] = XEXP (DECL_RTL (function), 0);
8352 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8353 xoperands[2] = GEN_INT (delta);
8355 final_start_function (emit_barrier (), file, 1);
8357 /* Output the thunk. We know that the function is in the same
8358 translation unit (i.e., the same space) as the thunk, and that
8359 thunks are output after their method. Thus, we don't need an
8360 external branch to reach the function. With SOM and GAS,
8361 functions and thunks are effectively in different sections.
8362 Thus, we can always use a IA-relative branch and the linker
8363 will add a long branch stub if necessary.
8365 However, we have to be careful when generating PIC code on the
8366 SOM port to ensure that the sequence does not transfer to an
8367 import stub for the target function as this could clobber the
8368 return value saved at SP-24. This would also apply to the
8369 32-bit linux port if the multi-space model is implemented. */
8370 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8371 && !(flag_pic && TREE_PUBLIC (function))
8372 && (TARGET_GAS || last_address < 262132))
8373 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8374 && ((targetm_common.have_named_sections
8375 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8376 /* The GNU 64-bit linker has rather poor stub management.
8377 So, we use a long branch from thunks that aren't in
8378 the same section as the target function. */
8379 && ((!TARGET_64BIT
8380 && (DECL_SECTION_NAME (thunk_fndecl)
8381 != DECL_SECTION_NAME (function)))
8382 || ((DECL_SECTION_NAME (thunk_fndecl)
8383 == DECL_SECTION_NAME (function))
8384 && last_address < 262132)))
8385 /* In this case, we need to be able to reach the start of
8386 the stub table even though the function is likely closer
8387 and can be jumped to directly. */
8388 || (targetm_common.have_named_sections
8389 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8390 && DECL_SECTION_NAME (function) == NULL
8391 && total_code_bytes < MAX_PCREL17F_OFFSET)
8392 /* Likewise. */
8393 || (!targetm_common.have_named_sections
8394 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8396 if (!val_14)
8397 output_asm_insn ("addil L'%2,%%r26", xoperands);
8399 output_asm_insn ("b %0", xoperands);
8401 if (val_14)
8403 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8404 nbytes += 8;
8406 else
8408 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8409 nbytes += 12;
8412 else if (TARGET_64BIT)
8414 rtx xop[4];
8416 /* We only have one call-clobbered scratch register, so we can't
8417 make use of the delay slot if delta doesn't fit in 14 bits. */
8418 if (!val_14)
8420 output_asm_insn ("addil L'%2,%%r26", xoperands);
8421 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8424 /* Load function address into %r1. */
8425 xop[0] = xoperands[0];
8426 xop[1] = gen_rtx_REG (Pmode, 1);
8427 xop[2] = xop[1];
8428 pa_output_pic_pcrel_sequence (xop);
8430 if (val_14)
8432 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8433 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8434 nbytes += 20;
8436 else
8438 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8439 nbytes += 24;
8442 else if (TARGET_PORTABLE_RUNTIME)
8444 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8445 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8447 if (!val_14)
8448 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8450 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8452 if (val_14)
8454 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8455 nbytes += 16;
8457 else
8459 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8460 nbytes += 20;
8463 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8465 /* The function is accessible from outside this module. The only
8466 way to avoid an import stub between the thunk and function is to
8467 call the function directly with an indirect sequence similar to
8468 that used by $$dyncall. This is possible because $$dyncall acts
8469 as the import stub in an indirect call. */
8470 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8471 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8472 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8473 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8474 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8475 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8476 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8477 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8478 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8480 if (!val_14)
8482 output_asm_insn ("addil L'%2,%%r26", xoperands);
8483 nbytes += 4;
8486 if (TARGET_PA_20)
8488 output_asm_insn ("bve (%%r22)", xoperands);
8489 nbytes += 36;
8491 else if (TARGET_NO_SPACE_REGS)
8493 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8494 nbytes += 36;
8496 else
8498 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8499 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8500 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8501 nbytes += 44;
8504 if (val_14)
8505 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8506 else
8507 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8509 else if (flag_pic)
8511 rtx xop[4];
8513 /* Load function address into %r22. */
8514 xop[0] = xoperands[0];
8515 xop[1] = gen_rtx_REG (Pmode, 1);
8516 xop[2] = gen_rtx_REG (Pmode, 22);
8517 pa_output_pic_pcrel_sequence (xop);
8519 if (!val_14)
8520 output_asm_insn ("addil L'%2,%%r26", xoperands);
8522 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8524 if (val_14)
8526 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8527 nbytes += 20;
8529 else
8531 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8532 nbytes += 24;
8535 else
8537 if (!val_14)
8538 output_asm_insn ("addil L'%2,%%r26", xoperands);
8540 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8541 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8543 if (val_14)
8545 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8546 nbytes += 12;
8548 else
8550 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8551 nbytes += 16;
8555 final_end_function ();
8557 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8559 switch_to_section (data_section);
8560 output_asm_insn (".align 4", xoperands);
8561 ASM_OUTPUT_LABEL (file, label);
8562 output_asm_insn (".word P'%0", xoperands);
8565 current_thunk_number++;
8566 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8567 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8568 last_address += nbytes;
8569 if (old_last_address > last_address)
8570 last_address = UINT_MAX;
8571 update_total_code_bytes (nbytes);
8574 /* Only direct calls to static functions are allowed to be sibling (tail)
8575 call optimized.
8577 This restriction is necessary because some linker generated stubs will
8578 store return pointers into rp' in some cases which might clobber a
8579 live value already in rp'.
8581 In a sibcall the current function and the target function share stack
8582 space. Thus if the path to the current function and the path to the
8583 target function save a value in rp', they save the value into the
8584 same stack slot, which has undesirable consequences.
8586 Because of the deferred binding nature of shared libraries any function
8587 with external scope could be in a different load module and thus require
8588 rp' to be saved when calling that function. So sibcall optimizations
8589 can only be safe for static function.
8591 Note that GCC never needs return value relocations, so we don't have to
8592 worry about static calls with return value relocations (which require
8593 saving rp').
8595 It is safe to perform a sibcall optimization when the target function
8596 will never return. */
8597 static bool
8598 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8600 if (TARGET_PORTABLE_RUNTIME)
8601 return false;
8603 /* Sibcalls are not ok because the arg pointer register is not a fixed
8604 register. This prevents the sibcall optimization from occurring. In
8605 addition, there are problems with stub placement using GNU ld. This
8606 is because a normal sibcall branch uses a 17-bit relocation while
8607 a regular call branch uses a 22-bit relocation. As a result, more
8608 care needs to be taken in the placement of long-branch stubs. */
8609 if (TARGET_64BIT)
8610 return false;
8612 /* Sibcalls are only ok within a translation unit. */
8613 return (decl && !TREE_PUBLIC (decl));
8616 /* ??? Addition is not commutative on the PA due to the weird implicit
8617 space register selection rules for memory addresses. Therefore, we
8618 don't consider a + b == b + a, as this might be inside a MEM. */
8619 static bool
8620 pa_commutative_p (const_rtx x, int outer_code)
8622 return (COMMUTATIVE_P (x)
8623 && (TARGET_NO_SPACE_REGS
8624 || (outer_code != UNKNOWN && outer_code != MEM)
8625 || GET_CODE (x) != PLUS));
8628 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8629 use in fmpyadd instructions. */
8631 pa_fmpyaddoperands (rtx *operands)
8633 machine_mode mode = GET_MODE (operands[0]);
8635 /* Must be a floating point mode. */
8636 if (mode != SFmode && mode != DFmode)
8637 return 0;
8639 /* All modes must be the same. */
8640 if (! (mode == GET_MODE (operands[1])
8641 && mode == GET_MODE (operands[2])
8642 && mode == GET_MODE (operands[3])
8643 && mode == GET_MODE (operands[4])
8644 && mode == GET_MODE (operands[5])))
8645 return 0;
8647 /* All operands must be registers. */
8648 if (! (GET_CODE (operands[1]) == REG
8649 && GET_CODE (operands[2]) == REG
8650 && GET_CODE (operands[3]) == REG
8651 && GET_CODE (operands[4]) == REG
8652 && GET_CODE (operands[5]) == REG))
8653 return 0;
8655 /* Only 2 real operands to the addition. One of the input operands must
8656 be the same as the output operand. */
8657 if (! rtx_equal_p (operands[3], operands[4])
8658 && ! rtx_equal_p (operands[3], operands[5]))
8659 return 0;
8661 /* Inout operand of add cannot conflict with any operands from multiply. */
8662 if (rtx_equal_p (operands[3], operands[0])
8663 || rtx_equal_p (operands[3], operands[1])
8664 || rtx_equal_p (operands[3], operands[2]))
8665 return 0;
8667 /* multiply cannot feed into addition operands. */
8668 if (rtx_equal_p (operands[4], operands[0])
8669 || rtx_equal_p (operands[5], operands[0]))
8670 return 0;
8672 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8673 if (mode == SFmode
8674 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8675 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8676 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8677 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8678 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8679 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8680 return 0;
8682 /* Passed. Operands are suitable for fmpyadd. */
8683 return 1;
8686 #if !defined(USE_COLLECT2)
8687 static void
8688 pa_asm_out_constructor (rtx symbol, int priority)
8690 if (!function_label_operand (symbol, VOIDmode))
8691 pa_encode_label (symbol);
8693 #ifdef CTORS_SECTION_ASM_OP
8694 default_ctor_section_asm_out_constructor (symbol, priority);
8695 #else
8696 # ifdef TARGET_ASM_NAMED_SECTION
8697 default_named_section_asm_out_constructor (symbol, priority);
8698 # else
8699 default_stabs_asm_out_constructor (symbol, priority);
8700 # endif
8701 #endif
8704 static void
8705 pa_asm_out_destructor (rtx symbol, int priority)
8707 if (!function_label_operand (symbol, VOIDmode))
8708 pa_encode_label (symbol);
8710 #ifdef DTORS_SECTION_ASM_OP
8711 default_dtor_section_asm_out_destructor (symbol, priority);
8712 #else
8713 # ifdef TARGET_ASM_NAMED_SECTION
8714 default_named_section_asm_out_destructor (symbol, priority);
8715 # else
8716 default_stabs_asm_out_destructor (symbol, priority);
8717 # endif
8718 #endif
8720 #endif
8722 /* This function places uninitialized global data in the bss section.
8723 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8724 function on the SOM port to prevent uninitialized global data from
8725 being placed in the data section. */
8727 void
8728 pa_asm_output_aligned_bss (FILE *stream,
8729 const char *name,
8730 unsigned HOST_WIDE_INT size,
8731 unsigned int align)
8733 switch_to_section (bss_section);
8734 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8736 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8737 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8738 #endif
8740 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8741 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8742 #endif
8744 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8745 ASM_OUTPUT_LABEL (stream, name);
8746 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8749 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8750 that doesn't allow the alignment of global common storage to be directly
8751 specified. The SOM linker aligns common storage based on the rounded
8752 value of the NUM_BYTES parameter in the .comm directive. It's not
8753 possible to use the .align directive as it doesn't affect the alignment
8754 of the label associated with a .comm directive. */
8756 void
8757 pa_asm_output_aligned_common (FILE *stream,
8758 const char *name,
8759 unsigned HOST_WIDE_INT size,
8760 unsigned int align)
8762 unsigned int max_common_align;
8764 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8765 if (align > max_common_align)
8767 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8768 "for global common data. Using %u",
8769 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8770 align = max_common_align;
8773 switch_to_section (bss_section);
8775 assemble_name (stream, name);
8776 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8777 MAX (size, align / BITS_PER_UNIT));
8780 /* We can't use .comm for local common storage as the SOM linker effectively
8781 treats the symbol as universal and uses the same storage for local symbols
8782 with the same name in different object files. The .block directive
8783 reserves an uninitialized block of storage. However, it's not common
8784 storage. Fortunately, GCC never requests common storage with the same
8785 name in any given translation unit. */
8787 void
8788 pa_asm_output_aligned_local (FILE *stream,
8789 const char *name,
8790 unsigned HOST_WIDE_INT size,
8791 unsigned int align)
8793 switch_to_section (bss_section);
8794 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8796 #ifdef LOCAL_ASM_OP
8797 fprintf (stream, "%s", LOCAL_ASM_OP);
8798 assemble_name (stream, name);
8799 fprintf (stream, "\n");
8800 #endif
8802 ASM_OUTPUT_LABEL (stream, name);
8803 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8806 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8807 use in fmpysub instructions. */
8809 pa_fmpysuboperands (rtx *operands)
8811 machine_mode mode = GET_MODE (operands[0]);
8813 /* Must be a floating point mode. */
8814 if (mode != SFmode && mode != DFmode)
8815 return 0;
8817 /* All modes must be the same. */
8818 if (! (mode == GET_MODE (operands[1])
8819 && mode == GET_MODE (operands[2])
8820 && mode == GET_MODE (operands[3])
8821 && mode == GET_MODE (operands[4])
8822 && mode == GET_MODE (operands[5])))
8823 return 0;
8825 /* All operands must be registers. */
8826 if (! (GET_CODE (operands[1]) == REG
8827 && GET_CODE (operands[2]) == REG
8828 && GET_CODE (operands[3]) == REG
8829 && GET_CODE (operands[4]) == REG
8830 && GET_CODE (operands[5]) == REG))
8831 return 0;
8833 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8834 operation, so operands[4] must be the same as operand[3]. */
8835 if (! rtx_equal_p (operands[3], operands[4]))
8836 return 0;
8838 /* multiply cannot feed into subtraction. */
8839 if (rtx_equal_p (operands[5], operands[0]))
8840 return 0;
8842 /* Inout operand of sub cannot conflict with any operands from multiply. */
8843 if (rtx_equal_p (operands[3], operands[0])
8844 || rtx_equal_p (operands[3], operands[1])
8845 || rtx_equal_p (operands[3], operands[2]))
8846 return 0;
8848 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8849 if (mode == SFmode
8850 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8851 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8852 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8853 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8854 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8855 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8856 return 0;
8858 /* Passed. Operands are suitable for fmpysub. */
8859 return 1;
8862 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8863 constants for a MULT embedded inside a memory address. */
8865 pa_mem_shadd_constant_p (int val)
8867 if (val == 2 || val == 4 || val == 8)
8868 return 1;
8869 else
8870 return 0;
8873 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8874 constants for shadd instructions. */
8876 pa_shadd_constant_p (int val)
8878 if (val == 1 || val == 2 || val == 3)
8879 return 1;
8880 else
8881 return 0;
8884 /* Return TRUE if INSN branches forward. */
8886 static bool
8887 forward_branch_p (rtx_insn *insn)
8889 rtx lab = JUMP_LABEL (insn);
8891 /* The INSN must have a jump label. */
8892 gcc_assert (lab != NULL_RTX);
8894 if (INSN_ADDRESSES_SET_P ())
8895 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8897 while (insn)
8899 if (insn == lab)
8900 return true;
8901 else
8902 insn = NEXT_INSN (insn);
8905 return false;
8908 /* Output an unconditional move and branch insn. */
8910 const char *
8911 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8913 int length = get_attr_length (insn);
8915 /* These are the cases in which we win. */
8916 if (length == 4)
8917 return "mov%I1b,tr %1,%0,%2";
8919 /* None of the following cases win, but they don't lose either. */
8920 if (length == 8)
8922 if (dbr_sequence_length () == 0)
8924 /* Nothing in the delay slot, fake it by putting the combined
8925 insn (the copy or add) in the delay slot of a bl. */
8926 if (GET_CODE (operands[1]) == CONST_INT)
8927 return "b %2\n\tldi %1,%0";
8928 else
8929 return "b %2\n\tcopy %1,%0";
8931 else
8933 /* Something in the delay slot, but we've got a long branch. */
8934 if (GET_CODE (operands[1]) == CONST_INT)
8935 return "ldi %1,%0\n\tb %2";
8936 else
8937 return "copy %1,%0\n\tb %2";
8941 if (GET_CODE (operands[1]) == CONST_INT)
8942 output_asm_insn ("ldi %1,%0", operands);
8943 else
8944 output_asm_insn ("copy %1,%0", operands);
8945 return pa_output_lbranch (operands[2], insn, 1);
8948 /* Output an unconditional add and branch insn. */
8950 const char *
8951 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8953 int length = get_attr_length (insn);
8955 /* To make life easy we want operand0 to be the shared input/output
8956 operand and operand1 to be the readonly operand. */
8957 if (operands[0] == operands[1])
8958 operands[1] = operands[2];
8960 /* These are the cases in which we win. */
8961 if (length == 4)
8962 return "add%I1b,tr %1,%0,%3";
8964 /* None of the following cases win, but they don't lose either. */
8965 if (length == 8)
8967 if (dbr_sequence_length () == 0)
8968 /* Nothing in the delay slot, fake it by putting the combined
8969 insn (the copy or add) in the delay slot of a bl. */
8970 return "b %3\n\tadd%I1 %1,%0,%0";
8971 else
8972 /* Something in the delay slot, but we've got a long branch. */
8973 return "add%I1 %1,%0,%0\n\tb %3";
8976 output_asm_insn ("add%I1 %1,%0,%0", operands);
8977 return pa_output_lbranch (operands[3], insn, 1);
8980 /* We use this hook to perform a PA specific optimization which is difficult
8981 to do in earlier passes. */
8983 static void
8984 pa_reorg (void)
8986 remove_useless_addtr_insns (1);
8988 if (pa_cpu < PROCESSOR_8000)
8989 pa_combine_instructions ();
8992 /* The PA has a number of odd instructions which can perform multiple
8993 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8994 it may be profitable to combine two instructions into one instruction
8995 with two outputs. It's not profitable PA2.0 machines because the
8996 two outputs would take two slots in the reorder buffers.
8998 This routine finds instructions which can be combined and combines
8999 them. We only support some of the potential combinations, and we
9000 only try common ways to find suitable instructions.
9002 * addb can add two registers or a register and a small integer
9003 and jump to a nearby (+-8k) location. Normally the jump to the
9004 nearby location is conditional on the result of the add, but by
9005 using the "true" condition we can make the jump unconditional.
9006 Thus addb can perform two independent operations in one insn.
9008 * movb is similar to addb in that it can perform a reg->reg
9009 or small immediate->reg copy and jump to a nearby (+-8k location).
9011 * fmpyadd and fmpysub can perform a FP multiply and either an
9012 FP add or FP sub if the operands of the multiply and add/sub are
9013 independent (there are other minor restrictions). Note both
9014 the fmpy and fadd/fsub can in theory move to better spots according
9015 to data dependencies, but for now we require the fmpy stay at a
9016 fixed location.
9018 * Many of the memory operations can perform pre & post updates
9019 of index registers. GCC's pre/post increment/decrement addressing
9020 is far too simple to take advantage of all the possibilities. This
9021 pass may not be suitable since those insns may not be independent.
9023 * comclr can compare two ints or an int and a register, nullify
9024 the following instruction and zero some other register. This
9025 is more difficult to use as it's harder to find an insn which
9026 will generate a comclr than finding something like an unconditional
9027 branch. (conditional moves & long branches create comclr insns).
9029 * Most arithmetic operations can conditionally skip the next
9030 instruction. They can be viewed as "perform this operation
9031 and conditionally jump to this nearby location" (where nearby
9032 is an insns away). These are difficult to use due to the
9033 branch length restrictions. */
9035 static void
9036 pa_combine_instructions (void)
9038 rtx_insn *anchor;
9040 /* This can get expensive since the basic algorithm is on the
9041 order of O(n^2) (or worse). Only do it for -O2 or higher
9042 levels of optimization. */
9043 if (optimize < 2)
9044 return;
9046 /* Walk down the list of insns looking for "anchor" insns which
9047 may be combined with "floating" insns. As the name implies,
9048 "anchor" instructions don't move, while "floating" insns may
9049 move around. */
9050 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9051 rtx_insn *new_rtx = make_insn_raw (par);
9053 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9055 enum attr_pa_combine_type anchor_attr;
9056 enum attr_pa_combine_type floater_attr;
9058 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9059 Also ignore any special USE insns. */
9060 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9061 || GET_CODE (PATTERN (anchor)) == USE
9062 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9063 continue;
9065 anchor_attr = get_attr_pa_combine_type (anchor);
9066 /* See if anchor is an insn suitable for combination. */
9067 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9068 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9069 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9070 && ! forward_branch_p (anchor)))
9072 rtx_insn *floater;
9074 for (floater = PREV_INSN (anchor);
9075 floater;
9076 floater = PREV_INSN (floater))
9078 if (NOTE_P (floater)
9079 || (NONJUMP_INSN_P (floater)
9080 && (GET_CODE (PATTERN (floater)) == USE
9081 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9082 continue;
9084 /* Anything except a regular INSN will stop our search. */
9085 if (! NONJUMP_INSN_P (floater))
9087 floater = NULL;
9088 break;
9091 /* See if FLOATER is suitable for combination with the
9092 anchor. */
9093 floater_attr = get_attr_pa_combine_type (floater);
9094 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9095 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9096 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9097 && floater_attr == PA_COMBINE_TYPE_FMPY))
9099 /* If ANCHOR and FLOATER can be combined, then we're
9100 done with this pass. */
9101 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9102 SET_DEST (PATTERN (floater)),
9103 XEXP (SET_SRC (PATTERN (floater)), 0),
9104 XEXP (SET_SRC (PATTERN (floater)), 1)))
9105 break;
9108 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9109 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9111 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9113 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9114 SET_DEST (PATTERN (floater)),
9115 XEXP (SET_SRC (PATTERN (floater)), 0),
9116 XEXP (SET_SRC (PATTERN (floater)), 1)))
9117 break;
9119 else
9121 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9122 SET_DEST (PATTERN (floater)),
9123 SET_SRC (PATTERN (floater)),
9124 SET_SRC (PATTERN (floater))))
9125 break;
9130 /* If we didn't find anything on the backwards scan try forwards. */
9131 if (!floater
9132 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9133 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9135 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9137 if (NOTE_P (floater)
9138 || (NONJUMP_INSN_P (floater)
9139 && (GET_CODE (PATTERN (floater)) == USE
9140 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9142 continue;
9144 /* Anything except a regular INSN will stop our search. */
9145 if (! NONJUMP_INSN_P (floater))
9147 floater = NULL;
9148 break;
9151 /* See if FLOATER is suitable for combination with the
9152 anchor. */
9153 floater_attr = get_attr_pa_combine_type (floater);
9154 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9155 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9156 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9157 && floater_attr == PA_COMBINE_TYPE_FMPY))
9159 /* If ANCHOR and FLOATER can be combined, then we're
9160 done with this pass. */
9161 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9162 SET_DEST (PATTERN (floater)),
9163 XEXP (SET_SRC (PATTERN (floater)),
9165 XEXP (SET_SRC (PATTERN (floater)),
9166 1)))
9167 break;
9172 /* FLOATER will be nonzero if we found a suitable floating
9173 insn for combination with ANCHOR. */
9174 if (floater
9175 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9176 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9178 /* Emit the new instruction and delete the old anchor. */
9179 emit_insn_before (gen_rtx_PARALLEL
9180 (VOIDmode,
9181 gen_rtvec (2, PATTERN (anchor),
9182 PATTERN (floater))),
9183 anchor);
9185 SET_INSN_DELETED (anchor);
9187 /* Emit a special USE insn for FLOATER, then delete
9188 the floating insn. */
9189 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9190 delete_insn (floater);
9192 continue;
9194 else if (floater
9195 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9197 rtx temp;
9198 /* Emit the new_jump instruction and delete the old anchor. */
9199 temp
9200 = emit_jump_insn_before (gen_rtx_PARALLEL
9201 (VOIDmode,
9202 gen_rtvec (2, PATTERN (anchor),
9203 PATTERN (floater))),
9204 anchor);
9206 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9207 SET_INSN_DELETED (anchor);
9209 /* Emit a special USE insn for FLOATER, then delete
9210 the floating insn. */
9211 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9212 delete_insn (floater);
9213 continue;
9219 static int
9220 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9221 int reversed, rtx dest,
9222 rtx src1, rtx src2)
9224 int insn_code_number;
9225 rtx_insn *start, *end;
9227 /* Create a PARALLEL with the patterns of ANCHOR and
9228 FLOATER, try to recognize it, then test constraints
9229 for the resulting pattern.
9231 If the pattern doesn't match or the constraints
9232 aren't met keep searching for a suitable floater
9233 insn. */
9234 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9235 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9236 INSN_CODE (new_rtx) = -1;
9237 insn_code_number = recog_memoized (new_rtx);
9238 basic_block bb = BLOCK_FOR_INSN (anchor);
9239 if (insn_code_number < 0
9240 || (extract_insn (new_rtx),
9241 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9242 return 0;
9244 if (reversed)
9246 start = anchor;
9247 end = floater;
9249 else
9251 start = floater;
9252 end = anchor;
9255 /* There's up to three operands to consider. One
9256 output and two inputs.
9258 The output must not be used between FLOATER & ANCHOR
9259 exclusive. The inputs must not be set between
9260 FLOATER and ANCHOR exclusive. */
9262 if (reg_used_between_p (dest, start, end))
9263 return 0;
9265 if (reg_set_between_p (src1, start, end))
9266 return 0;
9268 if (reg_set_between_p (src2, start, end))
9269 return 0;
9271 /* If we get here, then everything is good. */
9272 return 1;
9275 /* Return nonzero if references for INSN are delayed.
9277 Millicode insns are actually function calls with some special
9278 constraints on arguments and register usage.
9280 Millicode calls always expect their arguments in the integer argument
9281 registers, and always return their result in %r29 (ret1). They
9282 are expected to clobber their arguments, %r1, %r29, and the return
9283 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9285 This function tells reorg that the references to arguments and
9286 millicode calls do not appear to happen until after the millicode call.
9287 This allows reorg to put insns which set the argument registers into the
9288 delay slot of the millicode call -- thus they act more like traditional
9289 CALL_INSNs.
9291 Note we cannot consider side effects of the insn to be delayed because
9292 the branch and link insn will clobber the return pointer. If we happened
9293 to use the return pointer in the delay slot of the call, then we lose.
9295 get_attr_type will try to recognize the given insn, so make sure to
9296 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9297 in particular. */
9299 pa_insn_refs_are_delayed (rtx_insn *insn)
9301 return ((NONJUMP_INSN_P (insn)
9302 && GET_CODE (PATTERN (insn)) != SEQUENCE
9303 && GET_CODE (PATTERN (insn)) != USE
9304 && GET_CODE (PATTERN (insn)) != CLOBBER
9305 && get_attr_type (insn) == TYPE_MILLI));
9308 /* Promote the return value, but not the arguments. */
9310 static machine_mode
9311 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9312 machine_mode mode,
9313 int *punsignedp ATTRIBUTE_UNUSED,
9314 const_tree fntype ATTRIBUTE_UNUSED,
9315 int for_return)
9317 if (for_return == 0)
9318 return mode;
9319 return promote_mode (type, mode, punsignedp);
9322 /* On the HP-PA the value is found in register(s) 28(-29), unless
9323 the mode is SF or DF. Then the value is returned in fr4 (32).
9325 This must perform the same promotions as PROMOTE_MODE, else promoting
9326 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9328 Small structures must be returned in a PARALLEL on PA64 in order
9329 to match the HP Compiler ABI. */
9331 static rtx
9332 pa_function_value (const_tree valtype,
9333 const_tree func ATTRIBUTE_UNUSED,
9334 bool outgoing ATTRIBUTE_UNUSED)
9336 machine_mode valmode;
9338 if (AGGREGATE_TYPE_P (valtype)
9339 || TREE_CODE (valtype) == COMPLEX_TYPE
9340 || TREE_CODE (valtype) == VECTOR_TYPE)
9342 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9344 /* Handle aggregates that fit exactly in a word or double word. */
9345 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9346 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9348 if (TARGET_64BIT)
9350 /* Aggregates with a size less than or equal to 128 bits are
9351 returned in GR 28(-29). They are left justified. The pad
9352 bits are undefined. Larger aggregates are returned in
9353 memory. */
9354 rtx loc[2];
9355 int i, offset = 0;
9356 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9358 for (i = 0; i < ub; i++)
9360 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9361 gen_rtx_REG (DImode, 28 + i),
9362 GEN_INT (offset));
9363 offset += 8;
9366 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9368 else if (valsize > UNITS_PER_WORD)
9370 /* Aggregates 5 to 8 bytes in size are returned in general
9371 registers r28-r29 in the same manner as other non
9372 floating-point objects. The data is right-justified and
9373 zero-extended to 64 bits. This is opposite to the normal
9374 justification used on big endian targets and requires
9375 special treatment. */
9376 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9377 gen_rtx_REG (DImode, 28), const0_rtx);
9378 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9382 if ((INTEGRAL_TYPE_P (valtype)
9383 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9384 || POINTER_TYPE_P (valtype))
9385 valmode = word_mode;
9386 else
9387 valmode = TYPE_MODE (valtype);
9389 if (TREE_CODE (valtype) == REAL_TYPE
9390 && !AGGREGATE_TYPE_P (valtype)
9391 && TYPE_MODE (valtype) != TFmode
9392 && !TARGET_SOFT_FLOAT)
9393 return gen_rtx_REG (valmode, 32);
9395 return gen_rtx_REG (valmode, 28);
9398 /* Implement the TARGET_LIBCALL_VALUE hook. */
9400 static rtx
9401 pa_libcall_value (machine_mode mode,
9402 const_rtx fun ATTRIBUTE_UNUSED)
9404 if (! TARGET_SOFT_FLOAT
9405 && (mode == SFmode || mode == DFmode))
9406 return gen_rtx_REG (mode, 32);
9407 else
9408 return gen_rtx_REG (mode, 28);
9411 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9413 static bool
9414 pa_function_value_regno_p (const unsigned int regno)
9416 if (regno == 28
9417 || (! TARGET_SOFT_FLOAT && regno == 32))
9418 return true;
9420 return false;
9423 /* Update the data in CUM to advance over an argument
9424 of mode MODE and data type TYPE.
9425 (TYPE is null for libcalls where that information may not be available.) */
9427 static void
9428 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9429 const_tree type, bool named ATTRIBUTE_UNUSED)
9431 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9432 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9434 cum->nargs_prototype--;
9435 cum->words += (arg_size
9436 + ((cum->words & 01)
9437 && type != NULL_TREE
9438 && arg_size > 1));
9441 /* Return the location of a parameter that is passed in a register or NULL
9442 if the parameter has any component that is passed in memory.
9444 This is new code and will be pushed to into the net sources after
9445 further testing.
9447 ??? We might want to restructure this so that it looks more like other
9448 ports. */
9449 static rtx
9450 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9451 const_tree type, bool named ATTRIBUTE_UNUSED)
9453 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9454 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9455 int alignment = 0;
9456 int arg_size;
9457 int fpr_reg_base;
9458 int gpr_reg_base;
9459 rtx retval;
9461 if (mode == VOIDmode)
9462 return NULL_RTX;
9464 arg_size = FUNCTION_ARG_SIZE (mode, type);
9466 /* If this arg would be passed partially or totally on the stack, then
9467 this routine should return zero. pa_arg_partial_bytes will
9468 handle arguments which are split between regs and stack slots if
9469 the ABI mandates split arguments. */
9470 if (!TARGET_64BIT)
9472 /* The 32-bit ABI does not split arguments. */
9473 if (cum->words + arg_size > max_arg_words)
9474 return NULL_RTX;
9476 else
9478 if (arg_size > 1)
9479 alignment = cum->words & 1;
9480 if (cum->words + alignment >= max_arg_words)
9481 return NULL_RTX;
9484 /* The 32bit ABIs and the 64bit ABIs are rather different,
9485 particularly in their handling of FP registers. We might
9486 be able to cleverly share code between them, but I'm not
9487 going to bother in the hope that splitting them up results
9488 in code that is more easily understood. */
9490 if (TARGET_64BIT)
9492 /* Advance the base registers to their current locations.
9494 Remember, gprs grow towards smaller register numbers while
9495 fprs grow to higher register numbers. Also remember that
9496 although FP regs are 32-bit addressable, we pretend that
9497 the registers are 64-bits wide. */
9498 gpr_reg_base = 26 - cum->words;
9499 fpr_reg_base = 32 + cum->words;
9501 /* Arguments wider than one word and small aggregates need special
9502 treatment. */
9503 if (arg_size > 1
9504 || mode == BLKmode
9505 || (type && (AGGREGATE_TYPE_P (type)
9506 || TREE_CODE (type) == COMPLEX_TYPE
9507 || TREE_CODE (type) == VECTOR_TYPE)))
9509 /* Double-extended precision (80-bit), quad-precision (128-bit)
9510 and aggregates including complex numbers are aligned on
9511 128-bit boundaries. The first eight 64-bit argument slots
9512 are associated one-to-one, with general registers r26
9513 through r19, and also with floating-point registers fr4
9514 through fr11. Arguments larger than one word are always
9515 passed in general registers.
9517 Using a PARALLEL with a word mode register results in left
9518 justified data on a big-endian target. */
9520 rtx loc[8];
9521 int i, offset = 0, ub = arg_size;
9523 /* Align the base register. */
9524 gpr_reg_base -= alignment;
9526 ub = MIN (ub, max_arg_words - cum->words - alignment);
9527 for (i = 0; i < ub; i++)
9529 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9530 gen_rtx_REG (DImode, gpr_reg_base),
9531 GEN_INT (offset));
9532 gpr_reg_base -= 1;
9533 offset += 8;
9536 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9539 else
9541 /* If the argument is larger than a word, then we know precisely
9542 which registers we must use. */
9543 if (arg_size > 1)
9545 if (cum->words)
9547 gpr_reg_base = 23;
9548 fpr_reg_base = 38;
9550 else
9552 gpr_reg_base = 25;
9553 fpr_reg_base = 34;
9556 /* Structures 5 to 8 bytes in size are passed in the general
9557 registers in the same manner as other non floating-point
9558 objects. The data is right-justified and zero-extended
9559 to 64 bits. This is opposite to the normal justification
9560 used on big endian targets and requires special treatment.
9561 We now define BLOCK_REG_PADDING to pad these objects.
9562 Aggregates, complex and vector types are passed in the same
9563 manner as structures. */
9564 if (mode == BLKmode
9565 || (type && (AGGREGATE_TYPE_P (type)
9566 || TREE_CODE (type) == COMPLEX_TYPE
9567 || TREE_CODE (type) == VECTOR_TYPE)))
9569 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9570 gen_rtx_REG (DImode, gpr_reg_base),
9571 const0_rtx);
9572 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9575 else
9577 /* We have a single word (32 bits). A simple computation
9578 will get us the register #s we need. */
9579 gpr_reg_base = 26 - cum->words;
9580 fpr_reg_base = 32 + 2 * cum->words;
9584 /* Determine if the argument needs to be passed in both general and
9585 floating point registers. */
9586 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9587 /* If we are doing soft-float with portable runtime, then there
9588 is no need to worry about FP regs. */
9589 && !TARGET_SOFT_FLOAT
9590 /* The parameter must be some kind of scalar float, else we just
9591 pass it in integer registers. */
9592 && GET_MODE_CLASS (mode) == MODE_FLOAT
9593 /* The target function must not have a prototype. */
9594 && cum->nargs_prototype <= 0
9595 /* libcalls do not need to pass items in both FP and general
9596 registers. */
9597 && type != NULL_TREE
9598 /* All this hair applies to "outgoing" args only. This includes
9599 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9600 && !cum->incoming)
9601 /* Also pass outgoing floating arguments in both registers in indirect
9602 calls with the 32 bit ABI and the HP assembler since there is no
9603 way to the specify argument locations in static functions. */
9604 || (!TARGET_64BIT
9605 && !TARGET_GAS
9606 && !cum->incoming
9607 && cum->indirect
9608 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9610 retval
9611 = gen_rtx_PARALLEL
9612 (mode,
9613 gen_rtvec (2,
9614 gen_rtx_EXPR_LIST (VOIDmode,
9615 gen_rtx_REG (mode, fpr_reg_base),
9616 const0_rtx),
9617 gen_rtx_EXPR_LIST (VOIDmode,
9618 gen_rtx_REG (mode, gpr_reg_base),
9619 const0_rtx)));
9621 else
9623 /* See if we should pass this parameter in a general register. */
9624 if (TARGET_SOFT_FLOAT
9625 /* Indirect calls in the normal 32bit ABI require all arguments
9626 to be passed in general registers. */
9627 || (!TARGET_PORTABLE_RUNTIME
9628 && !TARGET_64BIT
9629 && !TARGET_ELF32
9630 && cum->indirect)
9631 /* If the parameter is not a scalar floating-point parameter,
9632 then it belongs in GPRs. */
9633 || GET_MODE_CLASS (mode) != MODE_FLOAT
9634 /* Structure with single SFmode field belongs in GPR. */
9635 || (type && AGGREGATE_TYPE_P (type)))
9636 retval = gen_rtx_REG (mode, gpr_reg_base);
9637 else
9638 retval = gen_rtx_REG (mode, fpr_reg_base);
9640 return retval;
9643 /* Arguments larger than one word are double word aligned. */
9645 static unsigned int
9646 pa_function_arg_boundary (machine_mode mode, const_tree type)
9648 bool singleword = (type
9649 ? (integer_zerop (TYPE_SIZE (type))
9650 || !TREE_CONSTANT (TYPE_SIZE (type))
9651 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9652 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9654 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9657 /* If this arg would be passed totally in registers or totally on the stack,
9658 then this routine should return zero. */
9660 static int
9661 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9662 tree type, bool named ATTRIBUTE_UNUSED)
9664 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9665 unsigned int max_arg_words = 8;
9666 unsigned int offset = 0;
9668 if (!TARGET_64BIT)
9669 return 0;
9671 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9672 offset = 1;
9674 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9675 /* Arg fits fully into registers. */
9676 return 0;
9677 else if (cum->words + offset >= max_arg_words)
9678 /* Arg fully on the stack. */
9679 return 0;
9680 else
9681 /* Arg is split. */
9682 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9686 /* A get_unnamed_section callback for switching to the text section.
9688 This function is only used with SOM. Because we don't support
9689 named subspaces, we can only create a new subspace or switch back
9690 to the default text subspace. */
9692 static void
9693 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9695 gcc_assert (TARGET_SOM);
9696 if (TARGET_GAS)
9698 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9700 /* We only want to emit a .nsubspa directive once at the
9701 start of the function. */
9702 cfun->machine->in_nsubspa = 1;
9704 /* Create a new subspace for the text. This provides
9705 better stub placement and one-only functions. */
9706 if (cfun->decl
9707 && DECL_ONE_ONLY (cfun->decl)
9708 && !DECL_WEAK (cfun->decl))
9710 output_section_asm_op ("\t.SPACE $TEXT$\n"
9711 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9712 "ACCESS=44,SORT=24,COMDAT");
9713 return;
9716 else
9718 /* There isn't a current function or the body of the current
9719 function has been completed. So, we are changing to the
9720 text section to output debugging information. Thus, we
9721 need to forget that we are in the text section so that
9722 varasm.c will call us when text_section is selected again. */
9723 gcc_assert (!cfun || !cfun->machine
9724 || cfun->machine->in_nsubspa == 2);
9725 in_section = NULL;
9727 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9728 return;
9730 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9733 /* A get_unnamed_section callback for switching to comdat data
9734 sections. This function is only used with SOM. */
9736 static void
9737 som_output_comdat_data_section_asm_op (const void *data)
9739 in_section = NULL;
9740 output_section_asm_op (data);
9743 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9745 static void
9746 pa_som_asm_init_sections (void)
9748 text_section
9749 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9751 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9752 is not being generated. */
9753 som_readonly_data_section
9754 = get_unnamed_section (0, output_section_asm_op,
9755 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9757 /* When secondary definitions are not supported, SOM makes readonly
9758 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9759 the comdat flag. */
9760 som_one_only_readonly_data_section
9761 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9762 "\t.SPACE $TEXT$\n"
9763 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9764 "ACCESS=0x2c,SORT=16,COMDAT");
9767 /* When secondary definitions are not supported, SOM makes data one-only
9768 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9769 som_one_only_data_section
9770 = get_unnamed_section (SECTION_WRITE,
9771 som_output_comdat_data_section_asm_op,
9772 "\t.SPACE $PRIVATE$\n"
9773 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9774 "ACCESS=31,SORT=24,COMDAT");
9776 if (flag_tm)
9777 som_tm_clone_table_section
9778 = get_unnamed_section (0, output_section_asm_op,
9779 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9781 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9782 which reference data within the $TEXT$ space (for example constant
9783 strings in the $LIT$ subspace).
9785 The assemblers (GAS and HP as) both have problems with handling
9786 the difference of two symbols which is the other correct way to
9787 reference constant data during PIC code generation.
9789 So, there's no way to reference constant data which is in the
9790 $TEXT$ space during PIC generation. Instead place all constant
9791 data into the $PRIVATE$ subspace (this reduces sharing, but it
9792 works correctly). */
9793 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9795 /* We must not have a reference to an external symbol defined in a
9796 shared library in a readonly section, else the SOM linker will
9797 complain.
9799 So, we force exception information into the data section. */
9800 exception_section = data_section;
9803 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9805 static section *
9806 pa_som_tm_clone_table_section (void)
9808 return som_tm_clone_table_section;
9811 /* On hpux10, the linker will give an error if we have a reference
9812 in the read-only data section to a symbol defined in a shared
9813 library. Therefore, expressions that might require a reloc can
9814 not be placed in the read-only data section. */
9816 static section *
9817 pa_select_section (tree exp, int reloc,
9818 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9820 if (TREE_CODE (exp) == VAR_DECL
9821 && TREE_READONLY (exp)
9822 && !TREE_THIS_VOLATILE (exp)
9823 && DECL_INITIAL (exp)
9824 && (DECL_INITIAL (exp) == error_mark_node
9825 || TREE_CONSTANT (DECL_INITIAL (exp)))
9826 && !reloc)
9828 if (TARGET_SOM
9829 && DECL_ONE_ONLY (exp)
9830 && !DECL_WEAK (exp))
9831 return som_one_only_readonly_data_section;
9832 else
9833 return readonly_data_section;
9835 else if (CONSTANT_CLASS_P (exp) && !reloc)
9836 return readonly_data_section;
9837 else if (TARGET_SOM
9838 && TREE_CODE (exp) == VAR_DECL
9839 && DECL_ONE_ONLY (exp)
9840 && !DECL_WEAK (exp))
9841 return som_one_only_data_section;
9842 else
9843 return data_section;
9846 /* Implement pa_reloc_rw_mask. */
9848 static int
9849 pa_reloc_rw_mask (void)
9851 /* We force (const (plus (symbol) (const_int))) to memory when the
9852 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9853 handle this construct in read-only memory and we want to avoid
9854 this for ELF. So, we always force an RTX needing relocation to
9855 the data section. */
9856 return 3;
9859 static void
9860 pa_globalize_label (FILE *stream, const char *name)
9862 /* We only handle DATA objects here, functions are globalized in
9863 ASM_DECLARE_FUNCTION_NAME. */
9864 if (! FUNCTION_NAME_P (name))
9866 fputs ("\t.EXPORT ", stream);
9867 assemble_name (stream, name);
9868 fputs (",DATA\n", stream);
9872 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9874 static rtx
9875 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9876 int incoming ATTRIBUTE_UNUSED)
9878 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9881 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9883 bool
9884 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9886 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9887 PA64 ABI says that objects larger than 128 bits are returned in memory.
9888 Note, int_size_in_bytes can return -1 if the size of the object is
9889 variable or larger than the maximum value that can be expressed as
9890 a HOST_WIDE_INT. It can also return zero for an empty type. The
9891 simplest way to handle variable and empty types is to pass them in
9892 memory. This avoids problems in defining the boundaries of argument
9893 slots, allocating registers, etc. */
9894 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9895 || int_size_in_bytes (type) <= 0);
9898 /* Structure to hold declaration and name of external symbols that are
9899 emitted by GCC. We generate a vector of these symbols and output them
9900 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9901 This avoids putting out names that are never really used. */
9903 typedef struct GTY(()) extern_symbol
9905 tree decl;
9906 const char *name;
9907 } extern_symbol;
9909 /* Define gc'd vector type for extern_symbol. */
9911 /* Vector of extern_symbol pointers. */
9912 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9914 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9915 /* Mark DECL (name NAME) as an external reference (assembler output
9916 file FILE). This saves the names to output at the end of the file
9917 if actually referenced. */
9919 void
9920 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9922 gcc_assert (file == asm_out_file);
9923 extern_symbol p = {decl, name};
9924 vec_safe_push (extern_symbols, p);
9927 /* Output text required at the end of an assembler file.
9928 This includes deferred plabels and .import directives for
9929 all external symbols that were actually referenced. */
9931 static void
9932 pa_hpux_file_end (void)
9934 unsigned int i;
9935 extern_symbol *p;
9937 if (!NO_DEFERRED_PROFILE_COUNTERS)
9938 output_deferred_profile_counters ();
9940 output_deferred_plabels ();
9942 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9944 tree decl = p->decl;
9946 if (!TREE_ASM_WRITTEN (decl)
9947 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9948 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9951 vec_free (extern_symbols);
9953 #endif
9955 /* Return true if a change from mode FROM to mode TO for a register
9956 in register class RCLASS is invalid. */
9958 bool
9959 pa_cannot_change_mode_class (machine_mode from, machine_mode to,
9960 enum reg_class rclass)
9962 if (from == to)
9963 return false;
9965 /* Reject changes to/from complex and vector modes. */
9966 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9967 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9968 return true;
9970 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9971 return false;
9973 /* There is no way to load QImode or HImode values directly from
9974 memory. SImode loads to the FP registers are not zero extended.
9975 On the 64-bit target, this conflicts with the definition of
9976 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9977 with different sizes in the floating-point registers. */
9978 if (MAYBE_FP_REG_CLASS_P (rclass))
9979 return true;
9981 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9982 in specific sets of registers. Thus, we cannot allow changing
9983 to a larger mode when it's larger than a word. */
9984 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9985 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9986 return true;
9988 return false;
9991 /* Returns TRUE if it is a good idea to tie two pseudo registers
9992 when one has mode MODE1 and one has mode MODE2.
9993 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9994 for any hard reg, then this must be FALSE for correct output.
9996 We should return FALSE for QImode and HImode because these modes
9997 are not ok in the floating-point registers. However, this prevents
9998 tieing these modes to SImode and DImode in the general registers.
9999 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
10000 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10001 in the floating-point registers. */
10003 bool
10004 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10006 /* Don't tie modes in different classes. */
10007 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10008 return false;
10010 return true;
10014 /* Length in units of the trampoline instruction code. */
10016 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10019 /* Output assembler code for a block containing the constant parts
10020 of a trampoline, leaving space for the variable parts.\
10022 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10023 and then branches to the specified routine.
10025 This code template is copied from text segment to stack location
10026 and then patched with pa_trampoline_init to contain valid values,
10027 and then entered as a subroutine.
10029 It is best to keep this as small as possible to avoid having to
10030 flush multiple lines in the cache. */
10032 static void
10033 pa_asm_trampoline_template (FILE *f)
10035 if (!TARGET_64BIT)
10037 fputs ("\tldw 36(%r22),%r21\n", f);
10038 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10039 if (ASSEMBLER_DIALECT == 0)
10040 fputs ("\tdepi 0,31,2,%r21\n", f);
10041 else
10042 fputs ("\tdepwi 0,31,2,%r21\n", f);
10043 fputs ("\tldw 4(%r21),%r19\n", f);
10044 fputs ("\tldw 0(%r21),%r21\n", f);
10045 if (TARGET_PA_20)
10047 fputs ("\tbve (%r21)\n", f);
10048 fputs ("\tldw 40(%r22),%r29\n", f);
10049 fputs ("\t.word 0\n", f);
10050 fputs ("\t.word 0\n", f);
10052 else
10054 fputs ("\tldsid (%r21),%r1\n", f);
10055 fputs ("\tmtsp %r1,%sr0\n", f);
10056 fputs ("\tbe 0(%sr0,%r21)\n", f);
10057 fputs ("\tldw 40(%r22),%r29\n", f);
10059 fputs ("\t.word 0\n", f);
10060 fputs ("\t.word 0\n", f);
10061 fputs ("\t.word 0\n", f);
10062 fputs ("\t.word 0\n", f);
10064 else
10066 fputs ("\t.dword 0\n", f);
10067 fputs ("\t.dword 0\n", f);
10068 fputs ("\t.dword 0\n", f);
10069 fputs ("\t.dword 0\n", f);
10070 fputs ("\tmfia %r31\n", f);
10071 fputs ("\tldd 24(%r31),%r1\n", f);
10072 fputs ("\tldd 24(%r1),%r27\n", f);
10073 fputs ("\tldd 16(%r1),%r1\n", f);
10074 fputs ("\tbve (%r1)\n", f);
10075 fputs ("\tldd 32(%r31),%r31\n", f);
10076 fputs ("\t.dword 0 ; fptr\n", f);
10077 fputs ("\t.dword 0 ; static link\n", f);
10081 /* Emit RTL insns to initialize the variable parts of a trampoline.
10082 FNADDR is an RTX for the address of the function's pure code.
10083 CXT is an RTX for the static chain value for the function.
10085 Move the function address to the trampoline template at offset 36.
10086 Move the static chain value to trampoline template at offset 40.
10087 Move the trampoline address to trampoline template at offset 44.
10088 Move r19 to trampoline template at offset 48. The latter two
10089 words create a plabel for the indirect call to the trampoline.
10091 A similar sequence is used for the 64-bit port but the plabel is
10092 at the beginning of the trampoline.
10094 Finally, the cache entries for the trampoline code are flushed.
10095 This is necessary to ensure that the trampoline instruction sequence
10096 is written to memory prior to any attempts at prefetching the code
10097 sequence. */
10099 static void
10100 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10102 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10103 rtx start_addr = gen_reg_rtx (Pmode);
10104 rtx end_addr = gen_reg_rtx (Pmode);
10105 rtx line_length = gen_reg_rtx (Pmode);
10106 rtx r_tramp, tmp;
10108 emit_block_move (m_tramp, assemble_trampoline_template (),
10109 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10110 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10112 if (!TARGET_64BIT)
10114 tmp = adjust_address (m_tramp, Pmode, 36);
10115 emit_move_insn (tmp, fnaddr);
10116 tmp = adjust_address (m_tramp, Pmode, 40);
10117 emit_move_insn (tmp, chain_value);
10119 /* Create a fat pointer for the trampoline. */
10120 tmp = adjust_address (m_tramp, Pmode, 44);
10121 emit_move_insn (tmp, r_tramp);
10122 tmp = adjust_address (m_tramp, Pmode, 48);
10123 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10125 /* fdc and fic only use registers for the address to flush,
10126 they do not accept integer displacements. We align the
10127 start and end addresses to the beginning of their respective
10128 cache lines to minimize the number of lines flushed. */
10129 emit_insn (gen_andsi3 (start_addr, r_tramp,
10130 GEN_INT (-MIN_CACHELINE_SIZE)));
10131 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10132 TRAMPOLINE_CODE_SIZE-1));
10133 emit_insn (gen_andsi3 (end_addr, tmp,
10134 GEN_INT (-MIN_CACHELINE_SIZE)));
10135 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10136 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10137 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10138 gen_reg_rtx (Pmode),
10139 gen_reg_rtx (Pmode)));
10141 else
10143 tmp = adjust_address (m_tramp, Pmode, 56);
10144 emit_move_insn (tmp, fnaddr);
10145 tmp = adjust_address (m_tramp, Pmode, 64);
10146 emit_move_insn (tmp, chain_value);
10148 /* Create a fat pointer for the trampoline. */
10149 tmp = adjust_address (m_tramp, Pmode, 16);
10150 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10151 r_tramp, 32)));
10152 tmp = adjust_address (m_tramp, Pmode, 24);
10153 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10155 /* fdc and fic only use registers for the address to flush,
10156 they do not accept integer displacements. We align the
10157 start and end addresses to the beginning of their respective
10158 cache lines to minimize the number of lines flushed. */
10159 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10160 emit_insn (gen_anddi3 (start_addr, tmp,
10161 GEN_INT (-MIN_CACHELINE_SIZE)));
10162 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10163 TRAMPOLINE_CODE_SIZE - 1));
10164 emit_insn (gen_anddi3 (end_addr, tmp,
10165 GEN_INT (-MIN_CACHELINE_SIZE)));
10166 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10167 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10168 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10169 gen_reg_rtx (Pmode),
10170 gen_reg_rtx (Pmode)));
10173 #ifdef HAVE_ENABLE_EXECUTE_STACK
10174  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10175      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10176 #endif
10179 /* Perform any machine-specific adjustment in the address of the trampoline.
10180 ADDR contains the address that was passed to pa_trampoline_init.
10181 Adjust the trampoline address to point to the plabel at offset 44. */
10183 static rtx
10184 pa_trampoline_adjust_address (rtx addr)
10186 if (!TARGET_64BIT)
10187 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10188 return addr;
10191 static rtx
10192 pa_delegitimize_address (rtx orig_x)
10194 rtx x = delegitimize_mem_from_attrs (orig_x);
10196 if (GET_CODE (x) == LO_SUM
10197 && GET_CODE (XEXP (x, 1)) == UNSPEC
10198 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10199 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10200 return x;
10203 static rtx
10204 pa_internal_arg_pointer (void)
10206 /* The argument pointer and the hard frame pointer are the same in
10207 the 32-bit runtime, so we don't need a copy. */
10208 if (TARGET_64BIT)
10209 return copy_to_reg (virtual_incoming_args_rtx);
10210 else
10211 return virtual_incoming_args_rtx;
10214 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10215 Frame pointer elimination is automatically handled. */
10217 static bool
10218 pa_can_eliminate (const int from, const int to)
10220 /* The argument cannot be eliminated in the 64-bit runtime. */
10221 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10222 return false;
10224 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10225 ? ! frame_pointer_needed
10226 : true);
10229 /* Define the offset between two registers, FROM to be eliminated and its
10230 replacement TO, at the start of a routine. */
10231 HOST_WIDE_INT
10232 pa_initial_elimination_offset (int from, int to)
10234 HOST_WIDE_INT offset;
10236 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10237 && to == STACK_POINTER_REGNUM)
10238 offset = -pa_compute_frame_size (get_frame_size (), 0);
10239 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10240 offset = 0;
10241 else
10242 gcc_unreachable ();
10244 return offset;
10247 static void
10248 pa_conditional_register_usage (void)
10250 int i;
10252 if (!TARGET_64BIT && !TARGET_PA_11)
10254 for (i = 56; i <= FP_REG_LAST; i++)
10255 fixed_regs[i] = call_used_regs[i] = 1;
10256 for (i = 33; i < 56; i += 2)
10257 fixed_regs[i] = call_used_regs[i] = 1;
10259 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10261 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10262 fixed_regs[i] = call_used_regs[i] = 1;
10264 if (flag_pic)
10265 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10268 /* Target hook for c_mode_for_suffix. */
10270 static machine_mode
10271 pa_c_mode_for_suffix (char suffix)
10273 if (HPUX_LONG_DOUBLE_LIBRARY)
10275 if (suffix == 'q')
10276 return TFmode;
10279 return VOIDmode;
10282 /* Target hook for function_section. */
10284 static section *
10285 pa_function_section (tree decl, enum node_frequency freq,
10286 bool startup, bool exit)
10288 /* Put functions in text section if target doesn't have named sections. */
10289 if (!targetm_common.have_named_sections)
10290 return text_section;
10292 /* Force nested functions into the same section as the containing
10293 function. */
10294 if (decl
10295 && DECL_SECTION_NAME (decl) == NULL
10296 && DECL_CONTEXT (decl) != NULL_TREE
10297 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10298 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10299 return function_section (DECL_CONTEXT (decl));
10301 /* Otherwise, use the default function section. */
10302 return default_function_section (decl, freq, startup, exit);
10305 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10307 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10308 that need more than three instructions to load prior to reload. This
10309 limit is somewhat arbitrary. It takes three instructions to load a
10310 CONST_INT from memory but two are memory accesses. It may be better
10311 to increase the allowed range for CONST_INTS. We may also be able
10312 to handle CONST_DOUBLES. */
10314 static bool
10315 pa_legitimate_constant_p (machine_mode mode, rtx x)
10317 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10318 return false;
10320 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10321 return false;
10323 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10324 legitimate constants. The other variants can't be handled by
10325 the move patterns after reload starts. */
10326 if (tls_referenced_p (x))
10327 return false;
10329 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10330 return false;
10332 if (TARGET_64BIT
10333 && HOST_BITS_PER_WIDE_INT > 32
10334 && GET_CODE (x) == CONST_INT
10335 && !reload_in_progress
10336 && !reload_completed
10337 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10338 && !pa_cint_ok_for_move (UINTVAL (x)))
10339 return false;
10341 if (function_label_operand (x, mode))
10342 return false;
10344 return true;
10347 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10349 static unsigned int
10350 pa_section_type_flags (tree decl, const char *name, int reloc)
10352 unsigned int flags;
10354 flags = default_section_type_flags (decl, name, reloc);
10356 /* Function labels are placed in the constant pool. This can
10357 cause a section conflict if decls are put in ".data.rel.ro"
10358 or ".data.rel.ro.local" using the __attribute__ construct. */
10359 if (strcmp (name, ".data.rel.ro") == 0
10360 || strcmp (name, ".data.rel.ro.local") == 0)
10361 flags |= SECTION_WRITE | SECTION_RELRO;
10363 return flags;
10366 /* pa_legitimate_address_p recognizes an RTL expression that is a
10367 valid memory address for an instruction. The MODE argument is the
10368 machine mode for the MEM expression that wants to use this address.
10370 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10371 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10372 available with floating point loads and stores, and integer loads.
10373 We get better code by allowing indexed addresses in the initial
10374 RTL generation.
10376 The acceptance of indexed addresses as legitimate implies that we
10377 must provide patterns for doing indexed integer stores, or the move
10378 expanders must force the address of an indexed store to a register.
10379 We have adopted the latter approach.
10381 Another function of pa_legitimate_address_p is to ensure that
10382 the base register is a valid pointer for indexed instructions.
10383 On targets that have non-equivalent space registers, we have to
10384 know at the time of assembler output which register in a REG+REG
10385 pair is the base register. The REG_POINTER flag is sometimes lost
10386 in reload and the following passes, so it can't be relied on during
10387 code generation. Thus, we either have to canonicalize the order
10388 of the registers in REG+REG indexed addresses, or treat REG+REG
10389 addresses separately and provide patterns for both permutations.
10391 The latter approach requires several hundred additional lines of
10392 code in pa.md. The downside to canonicalizing is that a PLUS
10393 in the wrong order can't combine to form to make a scaled indexed
10394 memory operand. As we won't need to canonicalize the operands if
10395 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10397 We initially break out scaled indexed addresses in canonical order
10398 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10399 scaled indexed addresses during RTL generation. However, fold_rtx
10400 has its own opinion on how the operands of a PLUS should be ordered.
10401 If one of the operands is equivalent to a constant, it will make
10402 that operand the second operand. As the base register is likely to
10403 be equivalent to a SYMBOL_REF, we have made it the second operand.
10405 pa_legitimate_address_p accepts REG+REG as legitimate when the
10406 operands are in the order INDEX+BASE on targets with non-equivalent
10407 space registers, and in any order on targets with equivalent space
10408 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10410 We treat a SYMBOL_REF as legitimate if it is part of the current
10411 function's constant-pool, because such addresses can actually be
10412 output as REG+SMALLINT. */
10414 static bool
10415 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10417 if ((REG_P (x)
10418 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10419 : REG_OK_FOR_BASE_P (x)))
10420 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10421 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10422 && REG_P (XEXP (x, 0))
10423 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10424 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10425 return true;
10427 if (GET_CODE (x) == PLUS)
10429 rtx base, index;
10431 /* For REG+REG, the base register should be in XEXP (x, 1),
10432 so check it first. */
10433 if (REG_P (XEXP (x, 1))
10434 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10435 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10436 base = XEXP (x, 1), index = XEXP (x, 0);
10437 else if (REG_P (XEXP (x, 0))
10438 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10439 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10440 base = XEXP (x, 0), index = XEXP (x, 1);
10441 else
10442 return false;
10444 if (GET_CODE (index) == CONST_INT)
10446 if (INT_5_BITS (index))
10447 return true;
10449 /* When INT14_OK_STRICT is false, a secondary reload is needed
10450 to adjust the displacement of SImode and DImode floating point
10451 instructions but this may fail when the register also needs
10452 reloading. So, we return false when STRICT is true. We
10453 also reject long displacements for float mode addresses since
10454 the majority of accesses will use floating point instructions
10455 that don't support 14-bit offsets. */
10456 if (!INT14_OK_STRICT
10457 && (strict || !(reload_in_progress || reload_completed))
10458 && mode != QImode
10459 && mode != HImode)
10460 return false;
10462 return base14_operand (index, mode);
10465 if (!TARGET_DISABLE_INDEXING
10466 /* Only accept the "canonical" INDEX+BASE operand order
10467 on targets with non-equivalent space registers. */
10468 && (TARGET_NO_SPACE_REGS
10469 ? REG_P (index)
10470 : (base == XEXP (x, 1) && REG_P (index)
10471 && (reload_completed
10472 || (reload_in_progress && HARD_REGISTER_P (base))
10473 || REG_POINTER (base))
10474 && (reload_completed
10475 || (reload_in_progress && HARD_REGISTER_P (index))
10476 || !REG_POINTER (index))))
10477 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10478 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10479 : REG_OK_FOR_INDEX_P (index))
10480 && borx_reg_operand (base, Pmode)
10481 && borx_reg_operand (index, Pmode))
10482 return true;
10484 if (!TARGET_DISABLE_INDEXING
10485 && GET_CODE (index) == MULT
10486 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10487 && REG_P (XEXP (index, 0))
10488 && GET_MODE (XEXP (index, 0)) == Pmode
10489 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10490 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10491 && GET_CODE (XEXP (index, 1)) == CONST_INT
10492 && INTVAL (XEXP (index, 1))
10493 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10494 && borx_reg_operand (base, Pmode))
10495 return true;
10497 return false;
10500 if (GET_CODE (x) == LO_SUM)
10502 rtx y = XEXP (x, 0);
10504 if (GET_CODE (y) == SUBREG)
10505 y = SUBREG_REG (y);
10507 if (REG_P (y)
10508 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10509 : REG_OK_FOR_BASE_P (y)))
10511 /* Needed for -fPIC */
10512 if (mode == Pmode
10513 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10514 return true;
10516 if (!INT14_OK_STRICT
10517 && (strict || !(reload_in_progress || reload_completed))
10518 && mode != QImode
10519 && mode != HImode)
10520 return false;
10522 if (CONSTANT_P (XEXP (x, 1)))
10523 return true;
10525 return false;
10528 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10529 return true;
10531 return false;
10534 /* Look for machine dependent ways to make the invalid address AD a
10535 valid address.
10537 For the PA, transform:
10539 memory(X + <large int>)
10541 into:
10543 if (<large int> & mask) >= 16
10544 Y = (<large int> & ~mask) + mask + 1 Round up.
10545 else
10546 Y = (<large int> & ~mask) Round down.
10547 Z = X + Y
10548 memory (Z + (<large int> - Y));
10550 This makes reload inheritance and reload_cse work better since Z
10551 can be reused.
10553 There may be more opportunities to improve code with this hook. */
10556 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10557 int opnum, int type,
10558 int ind_levels ATTRIBUTE_UNUSED)
10560 long offset, newoffset, mask;
10561 rtx new_rtx, temp = NULL_RTX;
10563 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10564 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10566 if (optimize && GET_CODE (ad) == PLUS)
10567 temp = simplify_binary_operation (PLUS, Pmode,
10568 XEXP (ad, 0), XEXP (ad, 1));
10570 new_rtx = temp ? temp : ad;
10572 if (optimize
10573 && GET_CODE (new_rtx) == PLUS
10574 && GET_CODE (XEXP (new_rtx, 0)) == REG
10575 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10577 offset = INTVAL (XEXP ((new_rtx), 1));
10579 /* Choose rounding direction. Round up if we are >= halfway. */
10580 if ((offset & mask) >= ((mask + 1) / 2))
10581 newoffset = (offset & ~mask) + mask + 1;
10582 else
10583 newoffset = offset & ~mask;
10585 /* Ensure that long displacements are aligned. */
10586 if (mask == 0x3fff
10587 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10588 || (TARGET_64BIT && (mode) == DImode)))
10589 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10591 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10593 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10594 GEN_INT (newoffset));
10595 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10596 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10597 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10598 opnum, (enum reload_type) type);
10599 return ad;
10603 return NULL_RTX;
10606 /* Output address vector. */
10608 void
10609 pa_output_addr_vec (rtx lab, rtx body)
10611 int idx, vlen = XVECLEN (body, 0);
10613 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10614 if (TARGET_GAS)
10615 fputs ("\t.begin_brtab\n", asm_out_file);
10616 for (idx = 0; idx < vlen; idx++)
10618 ASM_OUTPUT_ADDR_VEC_ELT
10619 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10621 if (TARGET_GAS)
10622 fputs ("\t.end_brtab\n", asm_out_file);
10625 /* Output address difference vector. */
10627 void
10628 pa_output_addr_diff_vec (rtx lab, rtx body)
10630 rtx base = XEXP (XEXP (body, 0), 0);
10631 int idx, vlen = XVECLEN (body, 1);
10633 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10634 if (TARGET_GAS)
10635 fputs ("\t.begin_brtab\n", asm_out_file);
10636 for (idx = 0; idx < vlen; idx++)
10638 ASM_OUTPUT_ADDR_DIFF_ELT
10639 (asm_out_file,
10640 body,
10641 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10642 CODE_LABEL_NUMBER (base));
10644 if (TARGET_GAS)
10645 fputs ("\t.end_brtab\n", asm_out_file);
10648 /* This is a helper function for the other atomic operations. This function
10649 emits a loop that contains SEQ that iterates until a compare-and-swap
10650 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10651 a set of instructions that takes a value from OLD_REG as an input and
10652 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10653 set to the current contents of MEM. After SEQ, a compare-and-swap will
10654 attempt to update MEM with NEW_REG. The function returns true when the
10655 loop was generated successfully. */
10657 static bool
10658 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10660 machine_mode mode = GET_MODE (mem);
10661 rtx_code_label *label;
10662 rtx cmp_reg, success, oldval;
10664 /* The loop we want to generate looks like
10666 cmp_reg = mem;
10667 label:
10668 old_reg = cmp_reg;
10669 seq;
10670 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10671 if (success)
10672 goto label;
10674 Note that we only do the plain load from memory once. Subsequent
10675 iterations use the value loaded by the compare-and-swap pattern. */
10677 label = gen_label_rtx ();
10678 cmp_reg = gen_reg_rtx (mode);
10680 emit_move_insn (cmp_reg, mem);
10681 emit_label (label);
10682 emit_move_insn (old_reg, cmp_reg);
10683 if (seq)
10684 emit_insn (seq);
10686 success = NULL_RTX;
10687 oldval = cmp_reg;
10688 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10689 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10690 MEMMODEL_RELAXED))
10691 return false;
10693 if (oldval != cmp_reg)
10694 emit_move_insn (cmp_reg, oldval);
10696 /* Mark this jump predicted not taken. */
10697 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10698 GET_MODE (success), 1, label, 0);
10699 return true;
10702 /* This function tries to implement an atomic exchange operation using a
10703 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10704 *MEM are returned, using TARGET if possible. No memory model is required
10705 since a compare_and_swap loop is seq-cst. */
10708 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10710 machine_mode mode = GET_MODE (mem);
10712 if (can_compare_and_swap_p (mode, true))
10714 if (!target || !register_operand (target, mode))
10715 target = gen_reg_rtx (mode);
10716 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10717 return target;
10720 return NULL_RTX;
10723 #include "gt-pa.h"