Merge branches/gcc-4_9-branch rev 225109.
[official-gcc.git] / gcc-4_9-branch / gcc / config / pa / pa.c
blobbae1cf424f557ab070e0bcaed43b4b4e0acfebca
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "tree.h"
33 #include "stor-layout.h"
34 #include "stringpool.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "output.h"
38 #include "dbxout.h"
39 #include "except.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "reload.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "target-def.h"
52 #include "langhooks.h"
53 #include "df.h"
54 #include "opts.h"
56 /* Return nonzero if there is a bypass for the output of
57 OUT_INSN and the fp store IN_INSN. */
58 int
59 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
61 enum machine_mode store_mode;
62 enum machine_mode other_mode;
63 rtx set;
65 if (recog_memoized (in_insn) < 0
66 || (get_attr_type (in_insn) != TYPE_FPSTORE
67 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
68 || recog_memoized (out_insn) < 0)
69 return 0;
71 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
73 set = single_set (out_insn);
74 if (!set)
75 return 0;
77 other_mode = GET_MODE (SET_SRC (set));
79 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
83 #ifndef DO_FRAME_NOTES
84 #ifdef INCOMING_RETURN_ADDR_RTX
85 #define DO_FRAME_NOTES 1
86 #else
87 #define DO_FRAME_NOTES 0
88 #endif
89 #endif
91 static void pa_option_override (void);
92 static void copy_reg_pointer (rtx, rtx);
93 static void fix_range (const char *);
94 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
95 reg_class_t);
96 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
97 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
98 static inline rtx force_mode (enum machine_mode, rtx);
99 static void pa_reorg (void);
100 static void pa_combine_instructions (void);
101 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
102 static bool forward_branch_p (rtx);
103 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
104 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static int compute_movmem_length (rtx);
106 static int compute_clrmem_length (rtx);
107 static bool pa_assemble_integer (rtx, unsigned int, int);
108 static void remove_useless_addtr_insns (int);
109 static void store_reg (int, HOST_WIDE_INT, int);
110 static void store_reg_modify (int, int, HOST_WIDE_INT);
111 static void load_reg (int, HOST_WIDE_INT, int);
112 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
113 static rtx pa_function_value (const_tree, const_tree, bool);
114 static rtx pa_libcall_value (enum machine_mode, const_rtx);
115 static bool pa_function_value_regno_p (const unsigned int);
116 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void update_total_code_bytes (unsigned int);
118 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
119 static int pa_adjust_cost (rtx, rtx, rtx, int);
120 static int pa_adjust_priority (rtx, int);
121 static int pa_issue_rate (void);
122 static int pa_reloc_rw_mask (void);
123 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
124 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
125 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
126 ATTRIBUTE_UNUSED;
127 static void pa_encode_section_info (tree, rtx, int);
128 static const char *pa_strip_name_encoding (const char *);
129 static bool pa_function_ok_for_sibcall (tree, tree);
130 static void pa_globalize_label (FILE *, const char *)
131 ATTRIBUTE_UNUSED;
132 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
133 HOST_WIDE_INT, tree);
134 #if !defined(USE_COLLECT2)
135 static void pa_asm_out_constructor (rtx, int);
136 static void pa_asm_out_destructor (rtx, int);
137 #endif
138 static void pa_init_builtins (void);
139 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
140 static rtx hppa_builtin_saveregs (void);
141 static void hppa_va_start (tree, rtx);
142 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
143 static bool pa_scalar_mode_supported_p (enum machine_mode);
144 static bool pa_commutative_p (const_rtx x, int outer_code);
145 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
146 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
147 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
148 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
149 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
152 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
153 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
157 static void output_deferred_plabels (void);
158 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
159 #ifdef ASM_OUTPUT_EXTERNAL_REAL
160 static void pa_hpux_file_end (void);
161 #endif
162 static void pa_init_libfuncs (void);
163 static rtx pa_struct_value_rtx (tree, int);
164 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
165 const_tree, bool);
166 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
167 tree, bool);
168 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
169 const_tree, bool);
170 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
171 const_tree, bool);
172 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
173 static struct machine_function * pa_init_machine_status (void);
174 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
175 enum machine_mode,
176 secondary_reload_info *);
177 static void pa_extra_live_on_entry (bitmap);
178 static enum machine_mode pa_promote_function_mode (const_tree,
179 enum machine_mode, int *,
180 const_tree, int);
182 static void pa_asm_trampoline_template (FILE *);
183 static void pa_trampoline_init (rtx, tree, rtx);
184 static rtx pa_trampoline_adjust_address (rtx);
185 static rtx pa_delegitimize_address (rtx);
186 static bool pa_print_operand_punct_valid_p (unsigned char);
187 static rtx pa_internal_arg_pointer (void);
188 static bool pa_can_eliminate (const int, const int);
189 static void pa_conditional_register_usage (void);
190 static enum machine_mode pa_c_mode_for_suffix (char);
191 static section *pa_function_section (tree, enum node_frequency, bool, bool);
192 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
193 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
194 static unsigned int pa_section_type_flags (tree, const char *, int);
195 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
197 /* The following extra sections are only used for SOM. */
198 static GTY(()) section *som_readonly_data_section;
199 static GTY(()) section *som_one_only_readonly_data_section;
200 static GTY(()) section *som_one_only_data_section;
201 static GTY(()) section *som_tm_clone_table_section;
203 /* Counts for the number of callee-saved general and floating point
204 registers which were saved by the current function's prologue. */
205 static int gr_saved, fr_saved;
207 /* Boolean indicating whether the return pointer was saved by the
208 current function's prologue. */
209 static bool rp_saved;
211 static rtx find_addr_reg (rtx);
213 /* Keep track of the number of bytes we have output in the CODE subspace
214 during this compilation so we'll know when to emit inline long-calls. */
215 unsigned long total_code_bytes;
217 /* The last address of the previous function plus the number of bytes in
218 associated thunks that have been output. This is used to determine if
219 a thunk can use an IA-relative branch to reach its target function. */
220 static unsigned int last_address;
222 /* Variables to handle plabels that we discover are necessary at assembly
223 output time. They are output after the current function. */
224 struct GTY(()) deferred_plabel
226 rtx internal_label;
227 rtx symbol;
229 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
230 deferred_plabels;
231 static size_t n_deferred_plabels = 0;
233 /* Initialize the GCC target structure. */
235 #undef TARGET_OPTION_OVERRIDE
236 #define TARGET_OPTION_OVERRIDE pa_option_override
238 #undef TARGET_ASM_ALIGNED_HI_OP
239 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
240 #undef TARGET_ASM_ALIGNED_SI_OP
241 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
242 #undef TARGET_ASM_ALIGNED_DI_OP
243 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
244 #undef TARGET_ASM_UNALIGNED_HI_OP
245 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
246 #undef TARGET_ASM_UNALIGNED_SI_OP
247 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
248 #undef TARGET_ASM_UNALIGNED_DI_OP
249 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
250 #undef TARGET_ASM_INTEGER
251 #define TARGET_ASM_INTEGER pa_assemble_integer
253 #undef TARGET_ASM_FUNCTION_PROLOGUE
254 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
255 #undef TARGET_ASM_FUNCTION_EPILOGUE
256 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
258 #undef TARGET_FUNCTION_VALUE
259 #define TARGET_FUNCTION_VALUE pa_function_value
260 #undef TARGET_LIBCALL_VALUE
261 #define TARGET_LIBCALL_VALUE pa_libcall_value
262 #undef TARGET_FUNCTION_VALUE_REGNO_P
263 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
265 #undef TARGET_LEGITIMIZE_ADDRESS
266 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
268 #undef TARGET_SCHED_ADJUST_COST
269 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
270 #undef TARGET_SCHED_ADJUST_PRIORITY
271 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
272 #undef TARGET_SCHED_ISSUE_RATE
273 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
275 #undef TARGET_ENCODE_SECTION_INFO
276 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
277 #undef TARGET_STRIP_NAME_ENCODING
278 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
280 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
281 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
283 #undef TARGET_COMMUTATIVE_P
284 #define TARGET_COMMUTATIVE_P pa_commutative_p
286 #undef TARGET_ASM_OUTPUT_MI_THUNK
287 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
288 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
289 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
291 #undef TARGET_ASM_FILE_END
292 #ifdef ASM_OUTPUT_EXTERNAL_REAL
293 #define TARGET_ASM_FILE_END pa_hpux_file_end
294 #else
295 #define TARGET_ASM_FILE_END output_deferred_plabels
296 #endif
298 #undef TARGET_ASM_RELOC_RW_MASK
299 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
301 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
302 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
304 #if !defined(USE_COLLECT2)
305 #undef TARGET_ASM_CONSTRUCTOR
306 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
307 #undef TARGET_ASM_DESTRUCTOR
308 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
309 #endif
311 #undef TARGET_INIT_BUILTINS
312 #define TARGET_INIT_BUILTINS pa_init_builtins
314 #undef TARGET_EXPAND_BUILTIN
315 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
317 #undef TARGET_REGISTER_MOVE_COST
318 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
319 #undef TARGET_RTX_COSTS
320 #define TARGET_RTX_COSTS hppa_rtx_costs
321 #undef TARGET_ADDRESS_COST
322 #define TARGET_ADDRESS_COST hppa_address_cost
324 #undef TARGET_MACHINE_DEPENDENT_REORG
325 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
327 #undef TARGET_INIT_LIBFUNCS
328 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
330 #undef TARGET_PROMOTE_FUNCTION_MODE
331 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
332 #undef TARGET_PROMOTE_PROTOTYPES
333 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
335 #undef TARGET_STRUCT_VALUE_RTX
336 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
337 #undef TARGET_RETURN_IN_MEMORY
338 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
339 #undef TARGET_MUST_PASS_IN_STACK
340 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
341 #undef TARGET_PASS_BY_REFERENCE
342 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
343 #undef TARGET_CALLEE_COPIES
344 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
345 #undef TARGET_ARG_PARTIAL_BYTES
346 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
347 #undef TARGET_FUNCTION_ARG
348 #define TARGET_FUNCTION_ARG pa_function_arg
349 #undef TARGET_FUNCTION_ARG_ADVANCE
350 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
351 #undef TARGET_FUNCTION_ARG_BOUNDARY
352 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
354 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
355 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
356 #undef TARGET_EXPAND_BUILTIN_VA_START
357 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
358 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
359 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
361 #undef TARGET_SCALAR_MODE_SUPPORTED_P
362 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
364 #undef TARGET_CANNOT_FORCE_CONST_MEM
365 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
367 #undef TARGET_SECONDARY_RELOAD
368 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
370 #undef TARGET_EXTRA_LIVE_ON_ENTRY
371 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
373 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
374 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
375 #undef TARGET_TRAMPOLINE_INIT
376 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
377 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
378 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
379 #undef TARGET_DELEGITIMIZE_ADDRESS
380 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
381 #undef TARGET_INTERNAL_ARG_POINTER
382 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
383 #undef TARGET_CAN_ELIMINATE
384 #define TARGET_CAN_ELIMINATE pa_can_eliminate
385 #undef TARGET_CONDITIONAL_REGISTER_USAGE
386 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
387 #undef TARGET_C_MODE_FOR_SUFFIX
388 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
389 #undef TARGET_ASM_FUNCTION_SECTION
390 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
392 #undef TARGET_LEGITIMATE_CONSTANT_P
393 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
394 #undef TARGET_SECTION_TYPE_FLAGS
395 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
396 #undef TARGET_LEGITIMATE_ADDRESS_P
397 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
399 struct gcc_target targetm = TARGET_INITIALIZER;
401 /* Parse the -mfixed-range= option string. */
403 static void
404 fix_range (const char *const_str)
406 int i, first, last;
407 char *str, *dash, *comma;
409 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
410 REG2 are either register names or register numbers. The effect
411 of this option is to mark the registers in the range from REG1 to
412 REG2 as ``fixed'' so they won't be used by the compiler. This is
413 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
415 i = strlen (const_str);
416 str = (char *) alloca (i + 1);
417 memcpy (str, const_str, i + 1);
419 while (1)
421 dash = strchr (str, '-');
422 if (!dash)
424 warning (0, "value of -mfixed-range must have form REG1-REG2");
425 return;
427 *dash = '\0';
429 comma = strchr (dash + 1, ',');
430 if (comma)
431 *comma = '\0';
433 first = decode_reg_name (str);
434 if (first < 0)
436 warning (0, "unknown register name: %s", str);
437 return;
440 last = decode_reg_name (dash + 1);
441 if (last < 0)
443 warning (0, "unknown register name: %s", dash + 1);
444 return;
447 *dash = '-';
449 if (first > last)
451 warning (0, "%s-%s is an empty range", str, dash + 1);
452 return;
455 for (i = first; i <= last; ++i)
456 fixed_regs[i] = call_used_regs[i] = 1;
458 if (!comma)
459 break;
461 *comma = ',';
462 str = comma + 1;
465 /* Check if all floating point registers have been fixed. */
466 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
467 if (!fixed_regs[i])
468 break;
470 if (i > FP_REG_LAST)
471 target_flags |= MASK_DISABLE_FPREGS;
474 /* Implement the TARGET_OPTION_OVERRIDE hook. */
476 static void
477 pa_option_override (void)
479 unsigned int i;
480 cl_deferred_option *opt;
481 vec<cl_deferred_option> *v
482 = (vec<cl_deferred_option> *) pa_deferred_options;
484 if (v)
485 FOR_EACH_VEC_ELT (*v, i, opt)
487 switch (opt->opt_index)
489 case OPT_mfixed_range_:
490 fix_range (opt->arg);
491 break;
493 default:
494 gcc_unreachable ();
498 /* Unconditional branches in the delay slot are not compatible with dwarf2
499 call frame information. There is no benefit in using this optimization
500 on PA8000 and later processors. */
501 if (pa_cpu >= PROCESSOR_8000
502 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
503 && flag_exceptions)
504 || flag_unwind_tables)
505 target_flags &= ~MASK_JUMP_IN_DELAY;
507 if (flag_pic && TARGET_PORTABLE_RUNTIME)
509 warning (0, "PIC code generation is not supported in the portable runtime model");
512 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
514 warning (0, "PIC code generation is not compatible with fast indirect calls");
517 if (! TARGET_GAS && write_symbols != NO_DEBUG)
519 warning (0, "-g is only supported when using GAS on this processor,");
520 warning (0, "-g option disabled");
521 write_symbols = NO_DEBUG;
524 /* We only support the "big PIC" model now. And we always generate PIC
525 code when in 64bit mode. */
526 if (flag_pic == 1 || TARGET_64BIT)
527 flag_pic = 2;
529 /* Disable -freorder-blocks-and-partition as we don't support hot and
530 cold partitioning. */
531 if (flag_reorder_blocks_and_partition)
533 inform (input_location,
534 "-freorder-blocks-and-partition does not work "
535 "on this architecture");
536 flag_reorder_blocks_and_partition = 0;
537 flag_reorder_blocks = 1;
540 /* We can't guarantee that .dword is available for 32-bit targets. */
541 if (UNITS_PER_WORD == 4)
542 targetm.asm_out.aligned_op.di = NULL;
544 /* The unaligned ops are only available when using GAS. */
545 if (!TARGET_GAS)
547 targetm.asm_out.unaligned_op.hi = NULL;
548 targetm.asm_out.unaligned_op.si = NULL;
549 targetm.asm_out.unaligned_op.di = NULL;
552 init_machine_status = pa_init_machine_status;
555 enum pa_builtins
557 PA_BUILTIN_COPYSIGNQ,
558 PA_BUILTIN_FABSQ,
559 PA_BUILTIN_INFQ,
560 PA_BUILTIN_HUGE_VALQ,
561 PA_BUILTIN_max
564 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
566 static void
567 pa_init_builtins (void)
569 #ifdef DONT_HAVE_FPUTC_UNLOCKED
571 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
572 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
573 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
575 #endif
576 #if TARGET_HPUX_11
578 tree decl;
580 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
581 set_user_assembler_name (decl, "_Isfinite");
582 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
583 set_user_assembler_name (decl, "_Isfinitef");
585 #endif
587 if (HPUX_LONG_DOUBLE_LIBRARY)
589 tree decl, ftype;
591 /* Under HPUX, the __float128 type is a synonym for "long double". */
592 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
593 "__float128");
595 /* TFmode support builtins. */
596 ftype = build_function_type_list (long_double_type_node,
597 long_double_type_node,
598 NULL_TREE);
599 decl = add_builtin_function ("__builtin_fabsq", ftype,
600 PA_BUILTIN_FABSQ, BUILT_IN_MD,
601 "_U_Qfabs", NULL_TREE);
602 TREE_READONLY (decl) = 1;
603 pa_builtins[PA_BUILTIN_FABSQ] = decl;
605 ftype = build_function_type_list (long_double_type_node,
606 long_double_type_node,
607 long_double_type_node,
608 NULL_TREE);
609 decl = add_builtin_function ("__builtin_copysignq", ftype,
610 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
611 "_U_Qfcopysign", NULL_TREE);
612 TREE_READONLY (decl) = 1;
613 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
615 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
616 decl = add_builtin_function ("__builtin_infq", ftype,
617 PA_BUILTIN_INFQ, BUILT_IN_MD,
618 NULL, NULL_TREE);
619 pa_builtins[PA_BUILTIN_INFQ] = decl;
621 decl = add_builtin_function ("__builtin_huge_valq", ftype,
622 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
623 NULL, NULL_TREE);
624 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
628 static rtx
629 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
630 enum machine_mode mode ATTRIBUTE_UNUSED,
631 int ignore ATTRIBUTE_UNUSED)
633 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
634 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
636 switch (fcode)
638 case PA_BUILTIN_FABSQ:
639 case PA_BUILTIN_COPYSIGNQ:
640 return expand_call (exp, target, ignore);
642 case PA_BUILTIN_INFQ:
643 case PA_BUILTIN_HUGE_VALQ:
645 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
646 REAL_VALUE_TYPE inf;
647 rtx tmp;
649 real_inf (&inf);
650 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
652 tmp = validize_mem (force_const_mem (target_mode, tmp));
654 if (target == 0)
655 target = gen_reg_rtx (target_mode);
657 emit_move_insn (target, tmp);
658 return target;
661 default:
662 gcc_unreachable ();
665 return NULL_RTX;
668 /* Function to init struct machine_function.
669 This will be called, via a pointer variable,
670 from push_function_context. */
672 static struct machine_function *
673 pa_init_machine_status (void)
675 return ggc_alloc_cleared_machine_function ();
678 /* If FROM is a probable pointer register, mark TO as a probable
679 pointer register with the same pointer alignment as FROM. */
681 static void
682 copy_reg_pointer (rtx to, rtx from)
684 if (REG_POINTER (from))
685 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
688 /* Return 1 if X contains a symbolic expression. We know these
689 expressions will have one of a few well defined forms, so
690 we need only check those forms. */
692 pa_symbolic_expression_p (rtx x)
695 /* Strip off any HIGH. */
696 if (GET_CODE (x) == HIGH)
697 x = XEXP (x, 0);
699 return symbolic_operand (x, VOIDmode);
702 /* Accept any constant that can be moved in one instruction into a
703 general register. */
705 pa_cint_ok_for_move (HOST_WIDE_INT ival)
707 /* OK if ldo, ldil, or zdepi, can be used. */
708 return (VAL_14_BITS_P (ival)
709 || pa_ldil_cint_p (ival)
710 || pa_zdepi_cint_p (ival));
713 /* True iff ldil can be used to load this CONST_INT. The least
714 significant 11 bits of the value must be zero and the value must
715 not change sign when extended from 32 to 64 bits. */
717 pa_ldil_cint_p (HOST_WIDE_INT ival)
719 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
721 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
724 /* True iff zdepi can be used to generate this CONST_INT.
725 zdepi first sign extends a 5-bit signed number to a given field
726 length, then places this field anywhere in a zero. */
728 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
730 unsigned HOST_WIDE_INT lsb_mask, t;
732 /* This might not be obvious, but it's at least fast.
733 This function is critical; we don't have the time loops would take. */
734 lsb_mask = x & -x;
735 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
736 /* Return true iff t is a power of two. */
737 return ((t & (t - 1)) == 0);
740 /* True iff depi or extru can be used to compute (reg & mask).
741 Accept bit pattern like these:
742 0....01....1
743 1....10....0
744 1..10..01..1 */
746 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
748 mask = ~mask;
749 mask += mask & -mask;
750 return (mask & (mask - 1)) == 0;
753 /* True iff depi can be used to compute (reg | MASK). */
755 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
757 mask += mask & -mask;
758 return (mask & (mask - 1)) == 0;
761 /* Legitimize PIC addresses. If the address is already
762 position-independent, we return ORIG. Newly generated
763 position-independent addresses go to REG. If we need more
764 than one register, we lose. */
766 static rtx
767 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
769 rtx pic_ref = orig;
771 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
773 /* Labels need special handling. */
774 if (pic_label_operand (orig, mode))
776 rtx insn;
778 /* We do not want to go through the movXX expanders here since that
779 would create recursion.
781 Nor do we really want to call a generator for a named pattern
782 since that requires multiple patterns if we want to support
783 multiple word sizes.
785 So instead we just emit the raw set, which avoids the movXX
786 expanders completely. */
787 mark_reg_pointer (reg, BITS_PER_UNIT);
788 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
790 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
791 add_reg_note (insn, REG_EQUAL, orig);
793 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
794 and update LABEL_NUSES because this is not done automatically. */
795 if (reload_in_progress || reload_completed)
797 /* Extract LABEL_REF. */
798 if (GET_CODE (orig) == CONST)
799 orig = XEXP (XEXP (orig, 0), 0);
800 /* Extract CODE_LABEL. */
801 orig = XEXP (orig, 0);
802 add_reg_note (insn, REG_LABEL_OPERAND, orig);
803 /* Make sure we have label and not a note. */
804 if (LABEL_P (orig))
805 LABEL_NUSES (orig)++;
807 crtl->uses_pic_offset_table = 1;
808 return reg;
810 if (GET_CODE (orig) == SYMBOL_REF)
812 rtx insn, tmp_reg;
814 gcc_assert (reg);
816 /* Before reload, allocate a temporary register for the intermediate
817 result. This allows the sequence to be deleted when the final
818 result is unused and the insns are trivially dead. */
819 tmp_reg = ((reload_in_progress || reload_completed)
820 ? reg : gen_reg_rtx (Pmode));
822 if (function_label_operand (orig, VOIDmode))
824 /* Force function label into memory in word mode. */
825 orig = XEXP (force_const_mem (word_mode, orig), 0);
826 /* Load plabel address from DLT. */
827 emit_move_insn (tmp_reg,
828 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
829 gen_rtx_HIGH (word_mode, orig)));
830 pic_ref
831 = gen_const_mem (Pmode,
832 gen_rtx_LO_SUM (Pmode, tmp_reg,
833 gen_rtx_UNSPEC (Pmode,
834 gen_rtvec (1, orig),
835 UNSPEC_DLTIND14R)));
836 emit_move_insn (reg, pic_ref);
837 /* Now load address of function descriptor. */
838 pic_ref = gen_rtx_MEM (Pmode, reg);
840 else
842 /* Load symbol reference from DLT. */
843 emit_move_insn (tmp_reg,
844 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
845 gen_rtx_HIGH (word_mode, orig)));
846 pic_ref
847 = gen_const_mem (Pmode,
848 gen_rtx_LO_SUM (Pmode, tmp_reg,
849 gen_rtx_UNSPEC (Pmode,
850 gen_rtvec (1, orig),
851 UNSPEC_DLTIND14R)));
854 crtl->uses_pic_offset_table = 1;
855 mark_reg_pointer (reg, BITS_PER_UNIT);
856 insn = emit_move_insn (reg, pic_ref);
858 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
859 set_unique_reg_note (insn, REG_EQUAL, orig);
861 return reg;
863 else if (GET_CODE (orig) == CONST)
865 rtx base;
867 if (GET_CODE (XEXP (orig, 0)) == PLUS
868 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
869 return orig;
871 gcc_assert (reg);
872 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
874 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
875 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
876 base == reg ? 0 : reg);
878 if (GET_CODE (orig) == CONST_INT)
880 if (INT_14_BITS (orig))
881 return plus_constant (Pmode, base, INTVAL (orig));
882 orig = force_reg (Pmode, orig);
884 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
885 /* Likewise, should we set special REG_NOTEs here? */
888 return pic_ref;
891 static GTY(()) rtx gen_tls_tga;
893 static rtx
894 gen_tls_get_addr (void)
896 if (!gen_tls_tga)
897 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
898 return gen_tls_tga;
901 static rtx
902 hppa_tls_call (rtx arg)
904 rtx ret;
906 ret = gen_reg_rtx (Pmode);
907 emit_library_call_value (gen_tls_get_addr (), ret,
908 LCT_CONST, Pmode, 1, arg, Pmode);
910 return ret;
913 static rtx
914 legitimize_tls_address (rtx addr)
916 rtx ret, insn, tmp, t1, t2, tp;
918 /* Currently, we can't handle anything but a SYMBOL_REF. */
919 if (GET_CODE (addr) != SYMBOL_REF)
920 return addr;
922 switch (SYMBOL_REF_TLS_MODEL (addr))
924 case TLS_MODEL_GLOBAL_DYNAMIC:
925 tmp = gen_reg_rtx (Pmode);
926 if (flag_pic)
927 emit_insn (gen_tgd_load_pic (tmp, addr));
928 else
929 emit_insn (gen_tgd_load (tmp, addr));
930 ret = hppa_tls_call (tmp);
931 break;
933 case TLS_MODEL_LOCAL_DYNAMIC:
934 ret = gen_reg_rtx (Pmode);
935 tmp = gen_reg_rtx (Pmode);
936 start_sequence ();
937 if (flag_pic)
938 emit_insn (gen_tld_load_pic (tmp, addr));
939 else
940 emit_insn (gen_tld_load (tmp, addr));
941 t1 = hppa_tls_call (tmp);
942 insn = get_insns ();
943 end_sequence ();
944 t2 = gen_reg_rtx (Pmode);
945 emit_libcall_block (insn, t2, t1,
946 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
947 UNSPEC_TLSLDBASE));
948 emit_insn (gen_tld_offset_load (ret, addr, t2));
949 break;
951 case TLS_MODEL_INITIAL_EXEC:
952 tp = gen_reg_rtx (Pmode);
953 tmp = gen_reg_rtx (Pmode);
954 ret = gen_reg_rtx (Pmode);
955 emit_insn (gen_tp_load (tp));
956 if (flag_pic)
957 emit_insn (gen_tie_load_pic (tmp, addr));
958 else
959 emit_insn (gen_tie_load (tmp, addr));
960 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
961 break;
963 case TLS_MODEL_LOCAL_EXEC:
964 tp = gen_reg_rtx (Pmode);
965 ret = gen_reg_rtx (Pmode);
966 emit_insn (gen_tp_load (tp));
967 emit_insn (gen_tle_load (ret, addr, tp));
968 break;
970 default:
971 gcc_unreachable ();
974 return ret;
977 /* Try machine-dependent ways of modifying an illegitimate address
978 to be legitimate. If we find one, return the new, valid address.
979 This macro is used in only one place: `memory_address' in explow.c.
981 OLDX is the address as it was before break_out_memory_refs was called.
982 In some cases it is useful to look at this to decide what needs to be done.
984 It is always safe for this macro to do nothing. It exists to recognize
985 opportunities to optimize the output.
987 For the PA, transform:
989 memory(X + <large int>)
991 into:
993 if (<large int> & mask) >= 16
994 Y = (<large int> & ~mask) + mask + 1 Round up.
995 else
996 Y = (<large int> & ~mask) Round down.
997 Z = X + Y
998 memory (Z + (<large int> - Y));
1000 This is for CSE to find several similar references, and only use one Z.
1002 X can either be a SYMBOL_REF or REG, but because combine cannot
1003 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1004 D will not fit in 14 bits.
1006 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1007 0x1f as the mask.
1009 MODE_INT references allow displacements which fit in 14 bits, so use
1010 0x3fff as the mask.
1012 This relies on the fact that most mode MODE_FLOAT references will use FP
1013 registers and most mode MODE_INT references will use integer registers.
1014 (In the rare case of an FP register used in an integer MODE, we depend
1015 on secondary reloads to clean things up.)
1018 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1019 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1020 addressing modes to be used).
1022 Put X and Z into registers. Then put the entire expression into
1023 a register. */
1026 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1027 enum machine_mode mode)
1029 rtx orig = x;
1031 /* We need to canonicalize the order of operands in unscaled indexed
1032 addresses since the code that checks if an address is valid doesn't
1033 always try both orders. */
1034 if (!TARGET_NO_SPACE_REGS
1035 && GET_CODE (x) == PLUS
1036 && GET_MODE (x) == Pmode
1037 && REG_P (XEXP (x, 0))
1038 && REG_P (XEXP (x, 1))
1039 && REG_POINTER (XEXP (x, 0))
1040 && !REG_POINTER (XEXP (x, 1)))
1041 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1043 if (pa_tls_referenced_p (x))
1044 return legitimize_tls_address (x);
1045 else if (flag_pic)
1046 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1048 /* Strip off CONST. */
1049 if (GET_CODE (x) == CONST)
1050 x = XEXP (x, 0);
1052 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1053 That should always be safe. */
1054 if (GET_CODE (x) == PLUS
1055 && GET_CODE (XEXP (x, 0)) == REG
1056 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1058 rtx reg = force_reg (Pmode, XEXP (x, 1));
1059 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1062 /* Note we must reject symbols which represent function addresses
1063 since the assembler/linker can't handle arithmetic on plabels. */
1064 if (GET_CODE (x) == PLUS
1065 && GET_CODE (XEXP (x, 1)) == CONST_INT
1066 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1067 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1068 || GET_CODE (XEXP (x, 0)) == REG))
1070 rtx int_part, ptr_reg;
1071 int newoffset;
1072 int offset = INTVAL (XEXP (x, 1));
1073 int mask;
1075 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1076 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1078 /* Choose which way to round the offset. Round up if we
1079 are >= halfway to the next boundary. */
1080 if ((offset & mask) >= ((mask + 1) / 2))
1081 newoffset = (offset & ~ mask) + mask + 1;
1082 else
1083 newoffset = (offset & ~ mask);
1085 /* If the newoffset will not fit in 14 bits (ldo), then
1086 handling this would take 4 or 5 instructions (2 to load
1087 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1088 add the new offset and the SYMBOL_REF.) Combine can
1089 not handle 4->2 or 5->2 combinations, so do not create
1090 them. */
1091 if (! VAL_14_BITS_P (newoffset)
1092 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1094 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1095 rtx tmp_reg
1096 = force_reg (Pmode,
1097 gen_rtx_HIGH (Pmode, const_part));
1098 ptr_reg
1099 = force_reg (Pmode,
1100 gen_rtx_LO_SUM (Pmode,
1101 tmp_reg, const_part));
1103 else
1105 if (! VAL_14_BITS_P (newoffset))
1106 int_part = force_reg (Pmode, GEN_INT (newoffset));
1107 else
1108 int_part = GEN_INT (newoffset);
1110 ptr_reg = force_reg (Pmode,
1111 gen_rtx_PLUS (Pmode,
1112 force_reg (Pmode, XEXP (x, 0)),
1113 int_part));
1115 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1118 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1120 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1121 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1122 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1123 && (OBJECT_P (XEXP (x, 1))
1124 || GET_CODE (XEXP (x, 1)) == SUBREG)
1125 && GET_CODE (XEXP (x, 1)) != CONST)
1127 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1128 rtx reg1, reg2;
1130 reg1 = XEXP (x, 1);
1131 if (GET_CODE (reg1) != REG)
1132 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1134 reg2 = XEXP (XEXP (x, 0), 0);
1135 if (GET_CODE (reg2) != REG)
1136 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1138 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1139 gen_rtx_MULT (Pmode,
1140 reg2,
1141 GEN_INT (val)),
1142 reg1));
1145 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1147 Only do so for floating point modes since this is more speculative
1148 and we lose if it's an integer store. */
1149 if (GET_CODE (x) == PLUS
1150 && GET_CODE (XEXP (x, 0)) == PLUS
1151 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1152 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1153 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1154 && (mode == SFmode || mode == DFmode))
1157 /* First, try and figure out what to use as a base register. */
1158 rtx reg1, reg2, base, idx;
1160 reg1 = XEXP (XEXP (x, 0), 1);
1161 reg2 = XEXP (x, 1);
1162 base = NULL_RTX;
1163 idx = NULL_RTX;
1165 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1166 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1167 it's a base register below. */
1168 if (GET_CODE (reg1) != REG)
1169 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1171 if (GET_CODE (reg2) != REG)
1172 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1174 /* Figure out what the base and index are. */
1176 if (GET_CODE (reg1) == REG
1177 && REG_POINTER (reg1))
1179 base = reg1;
1180 idx = gen_rtx_PLUS (Pmode,
1181 gen_rtx_MULT (Pmode,
1182 XEXP (XEXP (XEXP (x, 0), 0), 0),
1183 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1184 XEXP (x, 1));
1186 else if (GET_CODE (reg2) == REG
1187 && REG_POINTER (reg2))
1189 base = reg2;
1190 idx = XEXP (x, 0);
1193 if (base == 0)
1194 return orig;
1196 /* If the index adds a large constant, try to scale the
1197 constant so that it can be loaded with only one insn. */
1198 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1199 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1200 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1201 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1203 /* Divide the CONST_INT by the scale factor, then add it to A. */
1204 int val = INTVAL (XEXP (idx, 1));
1206 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1207 reg1 = XEXP (XEXP (idx, 0), 0);
1208 if (GET_CODE (reg1) != REG)
1209 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1211 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1213 /* We can now generate a simple scaled indexed address. */
1214 return
1215 force_reg
1216 (Pmode, gen_rtx_PLUS (Pmode,
1217 gen_rtx_MULT (Pmode, reg1,
1218 XEXP (XEXP (idx, 0), 1)),
1219 base));
1222 /* If B + C is still a valid base register, then add them. */
1223 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1224 && INTVAL (XEXP (idx, 1)) <= 4096
1225 && INTVAL (XEXP (idx, 1)) >= -4096)
1227 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1228 rtx reg1, reg2;
1230 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1232 reg2 = XEXP (XEXP (idx, 0), 0);
1233 if (GET_CODE (reg2) != CONST_INT)
1234 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1236 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1237 gen_rtx_MULT (Pmode,
1238 reg2,
1239 GEN_INT (val)),
1240 reg1));
1243 /* Get the index into a register, then add the base + index and
1244 return a register holding the result. */
1246 /* First get A into a register. */
1247 reg1 = XEXP (XEXP (idx, 0), 0);
1248 if (GET_CODE (reg1) != REG)
1249 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1251 /* And get B into a register. */
1252 reg2 = XEXP (idx, 1);
1253 if (GET_CODE (reg2) != REG)
1254 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1256 reg1 = force_reg (Pmode,
1257 gen_rtx_PLUS (Pmode,
1258 gen_rtx_MULT (Pmode, reg1,
1259 XEXP (XEXP (idx, 0), 1)),
1260 reg2));
1262 /* Add the result to our base register and return. */
1263 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1267 /* Uh-oh. We might have an address for x[n-100000]. This needs
1268 special handling to avoid creating an indexed memory address
1269 with x-100000 as the base.
1271 If the constant part is small enough, then it's still safe because
1272 there is a guard page at the beginning and end of the data segment.
1274 Scaled references are common enough that we want to try and rearrange the
1275 terms so that we can use indexing for these addresses too. Only
1276 do the optimization for floatint point modes. */
1278 if (GET_CODE (x) == PLUS
1279 && pa_symbolic_expression_p (XEXP (x, 1)))
1281 /* Ugly. We modify things here so that the address offset specified
1282 by the index expression is computed first, then added to x to form
1283 the entire address. */
1285 rtx regx1, regx2, regy1, regy2, y;
1287 /* Strip off any CONST. */
1288 y = XEXP (x, 1);
1289 if (GET_CODE (y) == CONST)
1290 y = XEXP (y, 0);
1292 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1294 /* See if this looks like
1295 (plus (mult (reg) (shadd_const))
1296 (const (plus (symbol_ref) (const_int))))
1298 Where const_int is small. In that case the const
1299 expression is a valid pointer for indexing.
1301 If const_int is big, but can be divided evenly by shadd_const
1302 and added to (reg). This allows more scaled indexed addresses. */
1303 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1304 && GET_CODE (XEXP (x, 0)) == MULT
1305 && GET_CODE (XEXP (y, 1)) == CONST_INT
1306 && INTVAL (XEXP (y, 1)) >= -4096
1307 && INTVAL (XEXP (y, 1)) <= 4095
1308 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1309 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1311 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1312 rtx reg1, reg2;
1314 reg1 = XEXP (x, 1);
1315 if (GET_CODE (reg1) != REG)
1316 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1318 reg2 = XEXP (XEXP (x, 0), 0);
1319 if (GET_CODE (reg2) != REG)
1320 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1322 return force_reg (Pmode,
1323 gen_rtx_PLUS (Pmode,
1324 gen_rtx_MULT (Pmode,
1325 reg2,
1326 GEN_INT (val)),
1327 reg1));
1329 else if ((mode == DFmode || mode == SFmode)
1330 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1331 && GET_CODE (XEXP (x, 0)) == MULT
1332 && GET_CODE (XEXP (y, 1)) == CONST_INT
1333 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1334 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1335 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1337 regx1
1338 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1339 / INTVAL (XEXP (XEXP (x, 0), 1))));
1340 regx2 = XEXP (XEXP (x, 0), 0);
1341 if (GET_CODE (regx2) != REG)
1342 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1343 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1344 regx2, regx1));
1345 return
1346 force_reg (Pmode,
1347 gen_rtx_PLUS (Pmode,
1348 gen_rtx_MULT (Pmode, regx2,
1349 XEXP (XEXP (x, 0), 1)),
1350 force_reg (Pmode, XEXP (y, 0))));
1352 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1353 && INTVAL (XEXP (y, 1)) >= -4096
1354 && INTVAL (XEXP (y, 1)) <= 4095)
1356 /* This is safe because of the guard page at the
1357 beginning and end of the data space. Just
1358 return the original address. */
1359 return orig;
1361 else
1363 /* Doesn't look like one we can optimize. */
1364 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1365 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1366 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1367 regx1 = force_reg (Pmode,
1368 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1369 regx1, regy2));
1370 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1375 return orig;
1378 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1380 Compute extra cost of moving data between one register class
1381 and another.
1383 Make moves from SAR so expensive they should never happen. We used to
1384 have 0xffff here, but that generates overflow in rare cases.
1386 Copies involving a FP register and a non-FP register are relatively
1387 expensive because they must go through memory.
1389 Other copies are reasonably cheap. */
1391 static int
1392 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1393 reg_class_t from, reg_class_t to)
1395 if (from == SHIFT_REGS)
1396 return 0x100;
1397 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1398 return 18;
1399 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1400 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1401 return 16;
1402 else
1403 return 2;
1406 /* For the HPPA, REG and REG+CONST is cost 0
1407 and addresses involving symbolic constants are cost 2.
1409 PIC addresses are very expensive.
1411 It is no coincidence that this has the same structure
1412 as pa_legitimate_address_p. */
1414 static int
1415 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1416 addr_space_t as ATTRIBUTE_UNUSED,
1417 bool speed ATTRIBUTE_UNUSED)
1419 switch (GET_CODE (X))
1421 case REG:
1422 case PLUS:
1423 case LO_SUM:
1424 return 1;
1425 case HIGH:
1426 return 2;
1427 default:
1428 return 4;
1432 /* Compute a (partial) cost for rtx X. Return true if the complete
1433 cost has been computed, and false if subexpressions should be
1434 scanned. In either case, *TOTAL contains the cost result. */
1436 static bool
1437 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1438 int *total, bool speed ATTRIBUTE_UNUSED)
1440 int factor;
1442 switch (code)
1444 case CONST_INT:
1445 if (INTVAL (x) == 0)
1446 *total = 0;
1447 else if (INT_14_BITS (x))
1448 *total = 1;
1449 else
1450 *total = 2;
1451 return true;
1453 case HIGH:
1454 *total = 2;
1455 return true;
1457 case CONST:
1458 case LABEL_REF:
1459 case SYMBOL_REF:
1460 *total = 4;
1461 return true;
1463 case CONST_DOUBLE:
1464 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1465 && outer_code != SET)
1466 *total = 0;
1467 else
1468 *total = 8;
1469 return true;
1471 case MULT:
1472 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1474 *total = COSTS_N_INSNS (3);
1475 return true;
1478 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1479 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1480 if (factor == 0)
1481 factor = 1;
1483 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1484 *total = factor * factor * COSTS_N_INSNS (8);
1485 else
1486 *total = factor * factor * COSTS_N_INSNS (20);
1487 return true;
1489 case DIV:
1490 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1492 *total = COSTS_N_INSNS (14);
1493 return true;
1495 /* FALLTHRU */
1497 case UDIV:
1498 case MOD:
1499 case UMOD:
1500 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1501 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1502 if (factor == 0)
1503 factor = 1;
1505 *total = factor * factor * COSTS_N_INSNS (60);
1506 return true;
1508 case PLUS: /* this includes shNadd insns */
1509 case MINUS:
1510 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1512 *total = COSTS_N_INSNS (3);
1513 return true;
1516 /* A size N times larger than UNITS_PER_WORD needs N times as
1517 many insns, taking N times as long. */
1518 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1519 if (factor == 0)
1520 factor = 1;
1521 *total = factor * COSTS_N_INSNS (1);
1522 return true;
1524 case ASHIFT:
1525 case ASHIFTRT:
1526 case LSHIFTRT:
1527 *total = COSTS_N_INSNS (1);
1528 return true;
1530 default:
1531 return false;
1535 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1536 new rtx with the correct mode. */
1537 static inline rtx
1538 force_mode (enum machine_mode mode, rtx orig)
1540 if (mode == GET_MODE (orig))
1541 return orig;
1543 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1545 return gen_rtx_REG (mode, REGNO (orig));
1548 /* Return 1 if *X is a thread-local symbol. */
1550 static int
1551 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1553 return PA_SYMBOL_REF_TLS_P (*x);
1556 /* Return 1 if X contains a thread-local symbol. */
1558 bool
1559 pa_tls_referenced_p (rtx x)
1561 if (!TARGET_HAVE_TLS)
1562 return false;
1564 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1567 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1569 static bool
1570 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1572 return pa_tls_referenced_p (x);
1575 /* Emit insns to move operands[1] into operands[0].
1577 Return 1 if we have written out everything that needs to be done to
1578 do the move. Otherwise, return 0 and the caller will emit the move
1579 normally.
1581 Note SCRATCH_REG may not be in the proper mode depending on how it
1582 will be used. This routine is responsible for creating a new copy
1583 of SCRATCH_REG in the proper mode. */
1586 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1588 register rtx operand0 = operands[0];
1589 register rtx operand1 = operands[1];
1590 register rtx tem;
1592 /* We can only handle indexed addresses in the destination operand
1593 of floating point stores. Thus, we need to break out indexed
1594 addresses from the destination operand. */
1595 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1597 gcc_assert (can_create_pseudo_p ());
1599 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1600 operand0 = replace_equiv_address (operand0, tem);
1603 /* On targets with non-equivalent space registers, break out unscaled
1604 indexed addresses from the source operand before the final CSE.
1605 We have to do this because the REG_POINTER flag is not correctly
1606 carried through various optimization passes and CSE may substitute
1607 a pseudo without the pointer set for one with the pointer set. As
1608 a result, we loose various opportunities to create insns with
1609 unscaled indexed addresses. */
1610 if (!TARGET_NO_SPACE_REGS
1611 && !cse_not_expected
1612 && GET_CODE (operand1) == MEM
1613 && GET_CODE (XEXP (operand1, 0)) == PLUS
1614 && REG_P (XEXP (XEXP (operand1, 0), 0))
1615 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1616 operand1
1617 = replace_equiv_address (operand1,
1618 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1620 if (scratch_reg
1621 && reload_in_progress && GET_CODE (operand0) == REG
1622 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1623 operand0 = reg_equiv_mem (REGNO (operand0));
1624 else if (scratch_reg
1625 && reload_in_progress && GET_CODE (operand0) == SUBREG
1626 && GET_CODE (SUBREG_REG (operand0)) == REG
1627 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1629 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1630 the code which tracks sets/uses for delete_output_reload. */
1631 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1632 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1633 SUBREG_BYTE (operand0));
1634 operand0 = alter_subreg (&temp, true);
1637 if (scratch_reg
1638 && reload_in_progress && GET_CODE (operand1) == REG
1639 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1640 operand1 = reg_equiv_mem (REGNO (operand1));
1641 else if (scratch_reg
1642 && reload_in_progress && GET_CODE (operand1) == SUBREG
1643 && GET_CODE (SUBREG_REG (operand1)) == REG
1644 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1646 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1647 the code which tracks sets/uses for delete_output_reload. */
1648 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1649 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1650 SUBREG_BYTE (operand1));
1651 operand1 = alter_subreg (&temp, true);
1654 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1655 && ((tem = find_replacement (&XEXP (operand0, 0)))
1656 != XEXP (operand0, 0)))
1657 operand0 = replace_equiv_address (operand0, tem);
1659 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1660 && ((tem = find_replacement (&XEXP (operand1, 0)))
1661 != XEXP (operand1, 0)))
1662 operand1 = replace_equiv_address (operand1, tem);
1664 /* Handle secondary reloads for loads/stores of FP registers from
1665 REG+D addresses where D does not fit in 5 or 14 bits, including
1666 (subreg (mem (addr))) cases. */
1667 if (scratch_reg
1668 && fp_reg_operand (operand0, mode)
1669 && (MEM_P (operand1)
1670 || (GET_CODE (operand1) == SUBREG
1671 && MEM_P (XEXP (operand1, 0))))
1672 && !floating_point_store_memory_operand (operand1, mode))
1674 if (GET_CODE (operand1) == SUBREG)
1675 operand1 = XEXP (operand1, 0);
1677 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1678 it in WORD_MODE regardless of what mode it was originally given
1679 to us. */
1680 scratch_reg = force_mode (word_mode, scratch_reg);
1682 /* D might not fit in 14 bits either; for such cases load D into
1683 scratch reg. */
1684 if (reg_plus_base_memory_operand (operand1, mode)
1685 && !(TARGET_PA_20
1686 && !TARGET_ELF32
1687 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1689 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1690 emit_move_insn (scratch_reg,
1691 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1692 Pmode,
1693 XEXP (XEXP (operand1, 0), 0),
1694 scratch_reg));
1696 else
1697 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1698 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1699 replace_equiv_address (operand1, scratch_reg)));
1700 return 1;
1702 else if (scratch_reg
1703 && fp_reg_operand (operand1, mode)
1704 && (MEM_P (operand0)
1705 || (GET_CODE (operand0) == SUBREG
1706 && MEM_P (XEXP (operand0, 0))))
1707 && !floating_point_store_memory_operand (operand0, mode))
1709 if (GET_CODE (operand0) == SUBREG)
1710 operand0 = XEXP (operand0, 0);
1712 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1713 it in WORD_MODE regardless of what mode it was originally given
1714 to us. */
1715 scratch_reg = force_mode (word_mode, scratch_reg);
1717 /* D might not fit in 14 bits either; for such cases load D into
1718 scratch reg. */
1719 if (reg_plus_base_memory_operand (operand0, mode)
1720 && !(TARGET_PA_20
1721 && !TARGET_ELF32
1722 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1724 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1725 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1726 0)),
1727 Pmode,
1728 XEXP (XEXP (operand0, 0),
1730 scratch_reg));
1732 else
1733 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1734 emit_insn (gen_rtx_SET (VOIDmode,
1735 replace_equiv_address (operand0, scratch_reg),
1736 operand1));
1737 return 1;
1739 /* Handle secondary reloads for loads of FP registers from constant
1740 expressions by forcing the constant into memory. For the most part,
1741 this is only necessary for SImode and DImode.
1743 Use scratch_reg to hold the address of the memory location. */
1744 else if (scratch_reg
1745 && CONSTANT_P (operand1)
1746 && fp_reg_operand (operand0, mode))
1748 rtx const_mem, xoperands[2];
1750 if (operand1 == CONST0_RTX (mode))
1752 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1753 return 1;
1756 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1757 it in WORD_MODE regardless of what mode it was originally given
1758 to us. */
1759 scratch_reg = force_mode (word_mode, scratch_reg);
1761 /* Force the constant into memory and put the address of the
1762 memory location into scratch_reg. */
1763 const_mem = force_const_mem (mode, operand1);
1764 xoperands[0] = scratch_reg;
1765 xoperands[1] = XEXP (const_mem, 0);
1766 pa_emit_move_sequence (xoperands, Pmode, 0);
1768 /* Now load the destination register. */
1769 emit_insn (gen_rtx_SET (mode, operand0,
1770 replace_equiv_address (const_mem, scratch_reg)));
1771 return 1;
1773 /* Handle secondary reloads for SAR. These occur when trying to load
1774 the SAR from memory or a constant. */
1775 else if (scratch_reg
1776 && GET_CODE (operand0) == REG
1777 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1778 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1779 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1781 /* D might not fit in 14 bits either; for such cases load D into
1782 scratch reg. */
1783 if (GET_CODE (operand1) == MEM
1784 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1786 /* We are reloading the address into the scratch register, so we
1787 want to make sure the scratch register is a full register. */
1788 scratch_reg = force_mode (word_mode, scratch_reg);
1790 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1791 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1792 0)),
1793 Pmode,
1794 XEXP (XEXP (operand1, 0),
1796 scratch_reg));
1798 /* Now we are going to load the scratch register from memory,
1799 we want to load it in the same width as the original MEM,
1800 which must be the same as the width of the ultimate destination,
1801 OPERAND0. */
1802 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1804 emit_move_insn (scratch_reg,
1805 replace_equiv_address (operand1, scratch_reg));
1807 else
1809 /* We want to load the scratch register using the same mode as
1810 the ultimate destination. */
1811 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1813 emit_move_insn (scratch_reg, operand1);
1816 /* And emit the insn to set the ultimate destination. We know that
1817 the scratch register has the same mode as the destination at this
1818 point. */
1819 emit_move_insn (operand0, scratch_reg);
1820 return 1;
1822 /* Handle the most common case: storing into a register. */
1823 else if (register_operand (operand0, mode))
1825 /* Legitimize TLS symbol references. This happens for references
1826 that aren't a legitimate constant. */
1827 if (PA_SYMBOL_REF_TLS_P (operand1))
1828 operand1 = legitimize_tls_address (operand1);
1830 if (register_operand (operand1, mode)
1831 || (GET_CODE (operand1) == CONST_INT
1832 && pa_cint_ok_for_move (INTVAL (operand1)))
1833 || (operand1 == CONST0_RTX (mode))
1834 || (GET_CODE (operand1) == HIGH
1835 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1836 /* Only `general_operands' can come here, so MEM is ok. */
1837 || GET_CODE (operand1) == MEM)
1839 /* Various sets are created during RTL generation which don't
1840 have the REG_POINTER flag correctly set. After the CSE pass,
1841 instruction recognition can fail if we don't consistently
1842 set this flag when performing register copies. This should
1843 also improve the opportunities for creating insns that use
1844 unscaled indexing. */
1845 if (REG_P (operand0) && REG_P (operand1))
1847 if (REG_POINTER (operand1)
1848 && !REG_POINTER (operand0)
1849 && !HARD_REGISTER_P (operand0))
1850 copy_reg_pointer (operand0, operand1);
1853 /* When MEMs are broken out, the REG_POINTER flag doesn't
1854 get set. In some cases, we can set the REG_POINTER flag
1855 from the declaration for the MEM. */
1856 if (REG_P (operand0)
1857 && GET_CODE (operand1) == MEM
1858 && !REG_POINTER (operand0))
1860 tree decl = MEM_EXPR (operand1);
1862 /* Set the register pointer flag and register alignment
1863 if the declaration for this memory reference is a
1864 pointer type. */
1865 if (decl)
1867 tree type;
1869 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1870 tree operand 1. */
1871 if (TREE_CODE (decl) == COMPONENT_REF)
1872 decl = TREE_OPERAND (decl, 1);
1874 type = TREE_TYPE (decl);
1875 type = strip_array_types (type);
1877 if (POINTER_TYPE_P (type))
1879 int align;
1881 type = TREE_TYPE (type);
1882 /* Using TYPE_ALIGN_OK is rather conservative as
1883 only the ada frontend actually sets it. */
1884 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1885 : BITS_PER_UNIT);
1886 mark_reg_pointer (operand0, align);
1891 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1892 return 1;
1895 else if (GET_CODE (operand0) == MEM)
1897 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1898 && !(reload_in_progress || reload_completed))
1900 rtx temp = gen_reg_rtx (DFmode);
1902 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1903 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1904 return 1;
1906 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1908 /* Run this case quickly. */
1909 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1910 return 1;
1912 if (! (reload_in_progress || reload_completed))
1914 operands[0] = validize_mem (operand0);
1915 operands[1] = operand1 = force_reg (mode, operand1);
1919 /* Simplify the source if we need to.
1920 Note we do have to handle function labels here, even though we do
1921 not consider them legitimate constants. Loop optimizations can
1922 call the emit_move_xxx with one as a source. */
1923 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1924 || (GET_CODE (operand1) == HIGH
1925 && symbolic_operand (XEXP (operand1, 0), mode))
1926 || function_label_operand (operand1, VOIDmode)
1927 || pa_tls_referenced_p (operand1))
1929 int ishighonly = 0;
1931 if (GET_CODE (operand1) == HIGH)
1933 ishighonly = 1;
1934 operand1 = XEXP (operand1, 0);
1936 if (symbolic_operand (operand1, mode))
1938 /* Argh. The assembler and linker can't handle arithmetic
1939 involving plabels.
1941 So we force the plabel into memory, load operand0 from
1942 the memory location, then add in the constant part. */
1943 if ((GET_CODE (operand1) == CONST
1944 && GET_CODE (XEXP (operand1, 0)) == PLUS
1945 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1946 VOIDmode))
1947 || function_label_operand (operand1, VOIDmode))
1949 rtx temp, const_part;
1951 /* Figure out what (if any) scratch register to use. */
1952 if (reload_in_progress || reload_completed)
1954 scratch_reg = scratch_reg ? scratch_reg : operand0;
1955 /* SCRATCH_REG will hold an address and maybe the actual
1956 data. We want it in WORD_MODE regardless of what mode it
1957 was originally given to us. */
1958 scratch_reg = force_mode (word_mode, scratch_reg);
1960 else if (flag_pic)
1961 scratch_reg = gen_reg_rtx (Pmode);
1963 if (GET_CODE (operand1) == CONST)
1965 /* Save away the constant part of the expression. */
1966 const_part = XEXP (XEXP (operand1, 0), 1);
1967 gcc_assert (GET_CODE (const_part) == CONST_INT);
1969 /* Force the function label into memory. */
1970 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1972 else
1974 /* No constant part. */
1975 const_part = NULL_RTX;
1977 /* Force the function label into memory. */
1978 temp = force_const_mem (mode, operand1);
1982 /* Get the address of the memory location. PIC-ify it if
1983 necessary. */
1984 temp = XEXP (temp, 0);
1985 if (flag_pic)
1986 temp = legitimize_pic_address (temp, mode, scratch_reg);
1988 /* Put the address of the memory location into our destination
1989 register. */
1990 operands[1] = temp;
1991 pa_emit_move_sequence (operands, mode, scratch_reg);
1993 /* Now load from the memory location into our destination
1994 register. */
1995 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1996 pa_emit_move_sequence (operands, mode, scratch_reg);
1998 /* And add back in the constant part. */
1999 if (const_part != NULL_RTX)
2000 expand_inc (operand0, const_part);
2002 return 1;
2005 if (flag_pic)
2007 rtx temp;
2009 if (reload_in_progress || reload_completed)
2011 temp = scratch_reg ? scratch_reg : operand0;
2012 /* TEMP will hold an address and maybe the actual
2013 data. We want it in WORD_MODE regardless of what mode it
2014 was originally given to us. */
2015 temp = force_mode (word_mode, temp);
2017 else
2018 temp = gen_reg_rtx (Pmode);
2020 /* (const (plus (symbol) (const_int))) must be forced to
2021 memory during/after reload if the const_int will not fit
2022 in 14 bits. */
2023 if (GET_CODE (operand1) == CONST
2024 && GET_CODE (XEXP (operand1, 0)) == PLUS
2025 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2026 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2027 && (reload_completed || reload_in_progress)
2028 && flag_pic)
2030 rtx const_mem = force_const_mem (mode, operand1);
2031 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2032 mode, temp);
2033 operands[1] = replace_equiv_address (const_mem, operands[1]);
2034 pa_emit_move_sequence (operands, mode, temp);
2036 else
2038 operands[1] = legitimize_pic_address (operand1, mode, temp);
2039 if (REG_P (operand0) && REG_P (operands[1]))
2040 copy_reg_pointer (operand0, operands[1]);
2041 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2044 /* On the HPPA, references to data space are supposed to use dp,
2045 register 27, but showing it in the RTL inhibits various cse
2046 and loop optimizations. */
2047 else
2049 rtx temp, set;
2051 if (reload_in_progress || reload_completed)
2053 temp = scratch_reg ? scratch_reg : operand0;
2054 /* TEMP will hold an address and maybe the actual
2055 data. We want it in WORD_MODE regardless of what mode it
2056 was originally given to us. */
2057 temp = force_mode (word_mode, temp);
2059 else
2060 temp = gen_reg_rtx (mode);
2062 /* Loading a SYMBOL_REF into a register makes that register
2063 safe to be used as the base in an indexed address.
2065 Don't mark hard registers though. That loses. */
2066 if (GET_CODE (operand0) == REG
2067 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2068 mark_reg_pointer (operand0, BITS_PER_UNIT);
2069 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2070 mark_reg_pointer (temp, BITS_PER_UNIT);
2072 if (ishighonly)
2073 set = gen_rtx_SET (mode, operand0, temp);
2074 else
2075 set = gen_rtx_SET (VOIDmode,
2076 operand0,
2077 gen_rtx_LO_SUM (mode, temp, operand1));
2079 emit_insn (gen_rtx_SET (VOIDmode,
2080 temp,
2081 gen_rtx_HIGH (mode, operand1)));
2082 emit_insn (set);
2085 return 1;
2087 else if (pa_tls_referenced_p (operand1))
2089 rtx tmp = operand1;
2090 rtx addend = NULL;
2092 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2094 addend = XEXP (XEXP (tmp, 0), 1);
2095 tmp = XEXP (XEXP (tmp, 0), 0);
2098 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2099 tmp = legitimize_tls_address (tmp);
2100 if (addend)
2102 tmp = gen_rtx_PLUS (mode, tmp, addend);
2103 tmp = force_operand (tmp, operands[0]);
2105 operands[1] = tmp;
2107 else if (GET_CODE (operand1) != CONST_INT
2108 || !pa_cint_ok_for_move (INTVAL (operand1)))
2110 rtx insn, temp;
2111 rtx op1 = operand1;
2112 HOST_WIDE_INT value = 0;
2113 HOST_WIDE_INT insv = 0;
2114 int insert = 0;
2116 if (GET_CODE (operand1) == CONST_INT)
2117 value = INTVAL (operand1);
2119 if (TARGET_64BIT
2120 && GET_CODE (operand1) == CONST_INT
2121 && HOST_BITS_PER_WIDE_INT > 32
2122 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2124 HOST_WIDE_INT nval;
2126 /* Extract the low order 32 bits of the value and sign extend.
2127 If the new value is the same as the original value, we can
2128 can use the original value as-is. If the new value is
2129 different, we use it and insert the most-significant 32-bits
2130 of the original value into the final result. */
2131 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2132 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2133 if (value != nval)
2135 #if HOST_BITS_PER_WIDE_INT > 32
2136 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2137 #endif
2138 insert = 1;
2139 value = nval;
2140 operand1 = GEN_INT (nval);
2144 if (reload_in_progress || reload_completed)
2145 temp = scratch_reg ? scratch_reg : operand0;
2146 else
2147 temp = gen_reg_rtx (mode);
2149 /* We don't directly split DImode constants on 32-bit targets
2150 because PLUS uses an 11-bit immediate and the insn sequence
2151 generated is not as efficient as the one using HIGH/LO_SUM. */
2152 if (GET_CODE (operand1) == CONST_INT
2153 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2154 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2155 && !insert)
2157 /* Directly break constant into high and low parts. This
2158 provides better optimization opportunities because various
2159 passes recognize constants split with PLUS but not LO_SUM.
2160 We use a 14-bit signed low part except when the addition
2161 of 0x4000 to the high part might change the sign of the
2162 high part. */
2163 HOST_WIDE_INT low = value & 0x3fff;
2164 HOST_WIDE_INT high = value & ~ 0x3fff;
2166 if (low >= 0x2000)
2168 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2169 high += 0x2000;
2170 else
2171 high += 0x4000;
2174 low = value - high;
2176 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2177 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2179 else
2181 emit_insn (gen_rtx_SET (VOIDmode, temp,
2182 gen_rtx_HIGH (mode, operand1)));
2183 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2186 insn = emit_move_insn (operands[0], operands[1]);
2188 /* Now insert the most significant 32 bits of the value
2189 into the register. When we don't have a second register
2190 available, it could take up to nine instructions to load
2191 a 64-bit integer constant. Prior to reload, we force
2192 constants that would take more than three instructions
2193 to load to the constant pool. During and after reload,
2194 we have to handle all possible values. */
2195 if (insert)
2197 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2198 register and the value to be inserted is outside the
2199 range that can be loaded with three depdi instructions. */
2200 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2202 operand1 = GEN_INT (insv);
2204 emit_insn (gen_rtx_SET (VOIDmode, temp,
2205 gen_rtx_HIGH (mode, operand1)));
2206 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2207 if (mode == DImode)
2208 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2209 const0_rtx, temp));
2210 else
2211 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2212 const0_rtx, temp));
2214 else
2216 int len = 5, pos = 27;
2218 /* Insert the bits using the depdi instruction. */
2219 while (pos >= 0)
2221 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2222 HOST_WIDE_INT sign = v5 < 0;
2224 /* Left extend the insertion. */
2225 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2226 while (pos > 0 && (insv & 1) == sign)
2228 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2229 len += 1;
2230 pos -= 1;
2233 if (mode == DImode)
2234 insn = emit_insn (gen_insvdi (operand0,
2235 GEN_INT (len),
2236 GEN_INT (pos),
2237 GEN_INT (v5)));
2238 else
2239 insn = emit_insn (gen_insvsi (operand0,
2240 GEN_INT (len),
2241 GEN_INT (pos),
2242 GEN_INT (v5)));
2244 len = pos > 0 && pos < 5 ? pos : 5;
2245 pos -= len;
2250 set_unique_reg_note (insn, REG_EQUAL, op1);
2252 return 1;
2255 /* Now have insn-emit do whatever it normally does. */
2256 return 0;
2259 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2260 it will need a link/runtime reloc). */
2263 pa_reloc_needed (tree exp)
2265 int reloc = 0;
2267 switch (TREE_CODE (exp))
2269 case ADDR_EXPR:
2270 return 1;
2272 case POINTER_PLUS_EXPR:
2273 case PLUS_EXPR:
2274 case MINUS_EXPR:
2275 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2276 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2277 break;
2279 CASE_CONVERT:
2280 case NON_LVALUE_EXPR:
2281 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2282 break;
2284 case CONSTRUCTOR:
2286 tree value;
2287 unsigned HOST_WIDE_INT ix;
2289 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2290 if (value)
2291 reloc |= pa_reloc_needed (value);
2293 break;
2295 case ERROR_MARK:
2296 break;
2298 default:
2299 break;
2301 return reloc;
2305 /* Return the best assembler insn template
2306 for moving operands[1] into operands[0] as a fullword. */
2307 const char *
2308 pa_singlemove_string (rtx *operands)
2310 HOST_WIDE_INT intval;
2312 if (GET_CODE (operands[0]) == MEM)
2313 return "stw %r1,%0";
2314 if (GET_CODE (operands[1]) == MEM)
2315 return "ldw %1,%0";
2316 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2318 long i;
2319 REAL_VALUE_TYPE d;
2321 gcc_assert (GET_MODE (operands[1]) == SFmode);
2323 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2324 bit pattern. */
2325 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2326 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2328 operands[1] = GEN_INT (i);
2329 /* Fall through to CONST_INT case. */
2331 if (GET_CODE (operands[1]) == CONST_INT)
2333 intval = INTVAL (operands[1]);
2335 if (VAL_14_BITS_P (intval))
2336 return "ldi %1,%0";
2337 else if ((intval & 0x7ff) == 0)
2338 return "ldil L'%1,%0";
2339 else if (pa_zdepi_cint_p (intval))
2340 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2341 else
2342 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2344 return "copy %1,%0";
2348 /* Compute position (in OP[1]) and width (in OP[2])
2349 useful for copying IMM to a register using the zdepi
2350 instructions. Store the immediate value to insert in OP[0]. */
2351 static void
2352 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2354 int lsb, len;
2356 /* Find the least significant set bit in IMM. */
2357 for (lsb = 0; lsb < 32; lsb++)
2359 if ((imm & 1) != 0)
2360 break;
2361 imm >>= 1;
2364 /* Choose variants based on *sign* of the 5-bit field. */
2365 if ((imm & 0x10) == 0)
2366 len = (lsb <= 28) ? 4 : 32 - lsb;
2367 else
2369 /* Find the width of the bitstring in IMM. */
2370 for (len = 5; len < 32 - lsb; len++)
2372 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2373 break;
2376 /* Sign extend IMM as a 5-bit value. */
2377 imm = (imm & 0xf) - 0x10;
2380 op[0] = imm;
2381 op[1] = 31 - lsb;
2382 op[2] = len;
2385 /* Compute position (in OP[1]) and width (in OP[2])
2386 useful for copying IMM to a register using the depdi,z
2387 instructions. Store the immediate value to insert in OP[0]. */
2389 static void
2390 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2392 int lsb, len, maxlen;
2394 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2396 /* Find the least significant set bit in IMM. */
2397 for (lsb = 0; lsb < maxlen; lsb++)
2399 if ((imm & 1) != 0)
2400 break;
2401 imm >>= 1;
2404 /* Choose variants based on *sign* of the 5-bit field. */
2405 if ((imm & 0x10) == 0)
2406 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2407 else
2409 /* Find the width of the bitstring in IMM. */
2410 for (len = 5; len < maxlen - lsb; len++)
2412 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2413 break;
2416 /* Extend length if host is narrow and IMM is negative. */
2417 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2418 len += 32;
2420 /* Sign extend IMM as a 5-bit value. */
2421 imm = (imm & 0xf) - 0x10;
2424 op[0] = imm;
2425 op[1] = 63 - lsb;
2426 op[2] = len;
2429 /* Output assembler code to perform a doubleword move insn
2430 with operands OPERANDS. */
2432 const char *
2433 pa_output_move_double (rtx *operands)
2435 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2436 rtx latehalf[2];
2437 rtx addreg0 = 0, addreg1 = 0;
2439 /* First classify both operands. */
2441 if (REG_P (operands[0]))
2442 optype0 = REGOP;
2443 else if (offsettable_memref_p (operands[0]))
2444 optype0 = OFFSOP;
2445 else if (GET_CODE (operands[0]) == MEM)
2446 optype0 = MEMOP;
2447 else
2448 optype0 = RNDOP;
2450 if (REG_P (operands[1]))
2451 optype1 = REGOP;
2452 else if (CONSTANT_P (operands[1]))
2453 optype1 = CNSTOP;
2454 else if (offsettable_memref_p (operands[1]))
2455 optype1 = OFFSOP;
2456 else if (GET_CODE (operands[1]) == MEM)
2457 optype1 = MEMOP;
2458 else
2459 optype1 = RNDOP;
2461 /* Check for the cases that the operand constraints are not
2462 supposed to allow to happen. */
2463 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2465 /* Handle copies between general and floating registers. */
2467 if (optype0 == REGOP && optype1 == REGOP
2468 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2470 if (FP_REG_P (operands[0]))
2472 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2473 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2474 return "{fldds|fldd} -16(%%sp),%0";
2476 else
2478 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2479 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2480 return "{ldws|ldw} -12(%%sp),%R0";
2484 /* Handle auto decrementing and incrementing loads and stores
2485 specifically, since the structure of the function doesn't work
2486 for them without major modification. Do it better when we learn
2487 this port about the general inc/dec addressing of PA.
2488 (This was written by tege. Chide him if it doesn't work.) */
2490 if (optype0 == MEMOP)
2492 /* We have to output the address syntax ourselves, since print_operand
2493 doesn't deal with the addresses we want to use. Fix this later. */
2495 rtx addr = XEXP (operands[0], 0);
2496 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2498 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2500 operands[0] = XEXP (addr, 0);
2501 gcc_assert (GET_CODE (operands[1]) == REG
2502 && GET_CODE (operands[0]) == REG);
2504 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2506 /* No overlap between high target register and address
2507 register. (We do this in a non-obvious way to
2508 save a register file writeback) */
2509 if (GET_CODE (addr) == POST_INC)
2510 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2511 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2513 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2515 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2517 operands[0] = XEXP (addr, 0);
2518 gcc_assert (GET_CODE (operands[1]) == REG
2519 && GET_CODE (operands[0]) == REG);
2521 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2522 /* No overlap between high target register and address
2523 register. (We do this in a non-obvious way to save a
2524 register file writeback) */
2525 if (GET_CODE (addr) == PRE_INC)
2526 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2527 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2530 if (optype1 == MEMOP)
2532 /* We have to output the address syntax ourselves, since print_operand
2533 doesn't deal with the addresses we want to use. Fix this later. */
2535 rtx addr = XEXP (operands[1], 0);
2536 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2538 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2540 operands[1] = XEXP (addr, 0);
2541 gcc_assert (GET_CODE (operands[0]) == REG
2542 && GET_CODE (operands[1]) == REG);
2544 if (!reg_overlap_mentioned_p (high_reg, addr))
2546 /* No overlap between high target register and address
2547 register. (We do this in a non-obvious way to
2548 save a register file writeback) */
2549 if (GET_CODE (addr) == POST_INC)
2550 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2551 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2553 else
2555 /* This is an undefined situation. We should load into the
2556 address register *and* update that register. Probably
2557 we don't need to handle this at all. */
2558 if (GET_CODE (addr) == POST_INC)
2559 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2560 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2563 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2565 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2567 operands[1] = XEXP (addr, 0);
2568 gcc_assert (GET_CODE (operands[0]) == REG
2569 && GET_CODE (operands[1]) == REG);
2571 if (!reg_overlap_mentioned_p (high_reg, addr))
2573 /* No overlap between high target register and address
2574 register. (We do this in a non-obvious way to
2575 save a register file writeback) */
2576 if (GET_CODE (addr) == PRE_INC)
2577 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2578 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2580 else
2582 /* This is an undefined situation. We should load into the
2583 address register *and* update that register. Probably
2584 we don't need to handle this at all. */
2585 if (GET_CODE (addr) == PRE_INC)
2586 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2587 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2590 else if (GET_CODE (addr) == PLUS
2591 && GET_CODE (XEXP (addr, 0)) == MULT)
2593 rtx xoperands[4];
2595 /* Load address into left half of destination register. */
2596 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2597 xoperands[1] = XEXP (addr, 1);
2598 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2599 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2600 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2601 xoperands);
2602 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2604 else if (GET_CODE (addr) == PLUS
2605 && REG_P (XEXP (addr, 0))
2606 && REG_P (XEXP (addr, 1)))
2608 rtx xoperands[3];
2610 /* Load address into left half of destination register. */
2611 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2612 xoperands[1] = XEXP (addr, 0);
2613 xoperands[2] = XEXP (addr, 1);
2614 output_asm_insn ("{addl|add,l} %1,%2,%0",
2615 xoperands);
2616 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2620 /* If an operand is an unoffsettable memory ref, find a register
2621 we can increment temporarily to make it refer to the second word. */
2623 if (optype0 == MEMOP)
2624 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2626 if (optype1 == MEMOP)
2627 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2629 /* Ok, we can do one word at a time.
2630 Normally we do the low-numbered word first.
2632 In either case, set up in LATEHALF the operands to use
2633 for the high-numbered word and in some cases alter the
2634 operands in OPERANDS to be suitable for the low-numbered word. */
2636 if (optype0 == REGOP)
2637 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2638 else if (optype0 == OFFSOP)
2639 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2640 else
2641 latehalf[0] = operands[0];
2643 if (optype1 == REGOP)
2644 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2645 else if (optype1 == OFFSOP)
2646 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2647 else if (optype1 == CNSTOP)
2648 split_double (operands[1], &operands[1], &latehalf[1]);
2649 else
2650 latehalf[1] = operands[1];
2652 /* If the first move would clobber the source of the second one,
2653 do them in the other order.
2655 This can happen in two cases:
2657 mem -> register where the first half of the destination register
2658 is the same register used in the memory's address. Reload
2659 can create such insns.
2661 mem in this case will be either register indirect or register
2662 indirect plus a valid offset.
2664 register -> register move where REGNO(dst) == REGNO(src + 1)
2665 someone (Tim/Tege?) claimed this can happen for parameter loads.
2667 Handle mem -> register case first. */
2668 if (optype0 == REGOP
2669 && (optype1 == MEMOP || optype1 == OFFSOP)
2670 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2671 operands[1], 0))
2673 /* Do the late half first. */
2674 if (addreg1)
2675 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2676 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2678 /* Then clobber. */
2679 if (addreg1)
2680 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2681 return pa_singlemove_string (operands);
2684 /* Now handle register -> register case. */
2685 if (optype0 == REGOP && optype1 == REGOP
2686 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2688 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2689 return pa_singlemove_string (operands);
2692 /* Normal case: do the two words, low-numbered first. */
2694 output_asm_insn (pa_singlemove_string (operands), operands);
2696 /* Make any unoffsettable addresses point at high-numbered word. */
2697 if (addreg0)
2698 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2699 if (addreg1)
2700 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2702 /* Do that word. */
2703 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2705 /* Undo the adds we just did. */
2706 if (addreg0)
2707 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2708 if (addreg1)
2709 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2711 return "";
2714 const char *
2715 pa_output_fp_move_double (rtx *operands)
2717 if (FP_REG_P (operands[0]))
2719 if (FP_REG_P (operands[1])
2720 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2721 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2722 else
2723 output_asm_insn ("fldd%F1 %1,%0", operands);
2725 else if (FP_REG_P (operands[1]))
2727 output_asm_insn ("fstd%F0 %1,%0", operands);
2729 else
2731 rtx xoperands[2];
2733 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2735 /* This is a pain. You have to be prepared to deal with an
2736 arbitrary address here including pre/post increment/decrement.
2738 so avoid this in the MD. */
2739 gcc_assert (GET_CODE (operands[0]) == REG);
2741 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2742 xoperands[0] = operands[0];
2743 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2745 return "";
2748 /* Return a REG that occurs in ADDR with coefficient 1.
2749 ADDR can be effectively incremented by incrementing REG. */
2751 static rtx
2752 find_addr_reg (rtx addr)
2754 while (GET_CODE (addr) == PLUS)
2756 if (GET_CODE (XEXP (addr, 0)) == REG)
2757 addr = XEXP (addr, 0);
2758 else if (GET_CODE (XEXP (addr, 1)) == REG)
2759 addr = XEXP (addr, 1);
2760 else if (CONSTANT_P (XEXP (addr, 0)))
2761 addr = XEXP (addr, 1);
2762 else if (CONSTANT_P (XEXP (addr, 1)))
2763 addr = XEXP (addr, 0);
2764 else
2765 gcc_unreachable ();
2767 gcc_assert (GET_CODE (addr) == REG);
2768 return addr;
2771 /* Emit code to perform a block move.
2773 OPERANDS[0] is the destination pointer as a REG, clobbered.
2774 OPERANDS[1] is the source pointer as a REG, clobbered.
2775 OPERANDS[2] is a register for temporary storage.
2776 OPERANDS[3] is a register for temporary storage.
2777 OPERANDS[4] is the size as a CONST_INT
2778 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2779 OPERANDS[6] is another temporary register. */
2781 const char *
2782 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2784 int align = INTVAL (operands[5]);
2785 unsigned long n_bytes = INTVAL (operands[4]);
2787 /* We can't move more than a word at a time because the PA
2788 has no longer integer move insns. (Could use fp mem ops?) */
2789 if (align > (TARGET_64BIT ? 8 : 4))
2790 align = (TARGET_64BIT ? 8 : 4);
2792 /* Note that we know each loop below will execute at least twice
2793 (else we would have open-coded the copy). */
2794 switch (align)
2796 case 8:
2797 /* Pre-adjust the loop counter. */
2798 operands[4] = GEN_INT (n_bytes - 16);
2799 output_asm_insn ("ldi %4,%2", operands);
2801 /* Copying loop. */
2802 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2803 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2804 output_asm_insn ("std,ma %3,8(%0)", operands);
2805 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2806 output_asm_insn ("std,ma %6,8(%0)", operands);
2808 /* Handle the residual. There could be up to 7 bytes of
2809 residual to copy! */
2810 if (n_bytes % 16 != 0)
2812 operands[4] = GEN_INT (n_bytes % 8);
2813 if (n_bytes % 16 >= 8)
2814 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2815 if (n_bytes % 8 != 0)
2816 output_asm_insn ("ldd 0(%1),%6", operands);
2817 if (n_bytes % 16 >= 8)
2818 output_asm_insn ("std,ma %3,8(%0)", operands);
2819 if (n_bytes % 8 != 0)
2820 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2822 return "";
2824 case 4:
2825 /* Pre-adjust the loop counter. */
2826 operands[4] = GEN_INT (n_bytes - 8);
2827 output_asm_insn ("ldi %4,%2", operands);
2829 /* Copying loop. */
2830 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2831 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2832 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2833 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2834 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2836 /* Handle the residual. There could be up to 7 bytes of
2837 residual to copy! */
2838 if (n_bytes % 8 != 0)
2840 operands[4] = GEN_INT (n_bytes % 4);
2841 if (n_bytes % 8 >= 4)
2842 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2843 if (n_bytes % 4 != 0)
2844 output_asm_insn ("ldw 0(%1),%6", operands);
2845 if (n_bytes % 8 >= 4)
2846 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2847 if (n_bytes % 4 != 0)
2848 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2850 return "";
2852 case 2:
2853 /* Pre-adjust the loop counter. */
2854 operands[4] = GEN_INT (n_bytes - 4);
2855 output_asm_insn ("ldi %4,%2", operands);
2857 /* Copying loop. */
2858 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2859 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2860 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2861 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2862 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2864 /* Handle the residual. */
2865 if (n_bytes % 4 != 0)
2867 if (n_bytes % 4 >= 2)
2868 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2869 if (n_bytes % 2 != 0)
2870 output_asm_insn ("ldb 0(%1),%6", operands);
2871 if (n_bytes % 4 >= 2)
2872 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2873 if (n_bytes % 2 != 0)
2874 output_asm_insn ("stb %6,0(%0)", operands);
2876 return "";
2878 case 1:
2879 /* Pre-adjust the loop counter. */
2880 operands[4] = GEN_INT (n_bytes - 2);
2881 output_asm_insn ("ldi %4,%2", operands);
2883 /* Copying loop. */
2884 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2885 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2886 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2887 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2888 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2890 /* Handle the residual. */
2891 if (n_bytes % 2 != 0)
2893 output_asm_insn ("ldb 0(%1),%3", operands);
2894 output_asm_insn ("stb %3,0(%0)", operands);
2896 return "";
2898 default:
2899 gcc_unreachable ();
2903 /* Count the number of insns necessary to handle this block move.
2905 Basic structure is the same as emit_block_move, except that we
2906 count insns rather than emit them. */
2908 static int
2909 compute_movmem_length (rtx insn)
2911 rtx pat = PATTERN (insn);
2912 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2913 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2914 unsigned int n_insns = 0;
2916 /* We can't move more than four bytes at a time because the PA
2917 has no longer integer move insns. (Could use fp mem ops?) */
2918 if (align > (TARGET_64BIT ? 8 : 4))
2919 align = (TARGET_64BIT ? 8 : 4);
2921 /* The basic copying loop. */
2922 n_insns = 6;
2924 /* Residuals. */
2925 if (n_bytes % (2 * align) != 0)
2927 if ((n_bytes % (2 * align)) >= align)
2928 n_insns += 2;
2930 if ((n_bytes % align) != 0)
2931 n_insns += 2;
2934 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2935 return n_insns * 4;
2938 /* Emit code to perform a block clear.
2940 OPERANDS[0] is the destination pointer as a REG, clobbered.
2941 OPERANDS[1] is a register for temporary storage.
2942 OPERANDS[2] is the size as a CONST_INT
2943 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2945 const char *
2946 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2948 int align = INTVAL (operands[3]);
2949 unsigned long n_bytes = INTVAL (operands[2]);
2951 /* We can't clear more than a word at a time because the PA
2952 has no longer integer move insns. */
2953 if (align > (TARGET_64BIT ? 8 : 4))
2954 align = (TARGET_64BIT ? 8 : 4);
2956 /* Note that we know each loop below will execute at least twice
2957 (else we would have open-coded the copy). */
2958 switch (align)
2960 case 8:
2961 /* Pre-adjust the loop counter. */
2962 operands[2] = GEN_INT (n_bytes - 16);
2963 output_asm_insn ("ldi %2,%1", operands);
2965 /* Loop. */
2966 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2967 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2968 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2970 /* Handle the residual. There could be up to 7 bytes of
2971 residual to copy! */
2972 if (n_bytes % 16 != 0)
2974 operands[2] = GEN_INT (n_bytes % 8);
2975 if (n_bytes % 16 >= 8)
2976 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2977 if (n_bytes % 8 != 0)
2978 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2980 return "";
2982 case 4:
2983 /* Pre-adjust the loop counter. */
2984 operands[2] = GEN_INT (n_bytes - 8);
2985 output_asm_insn ("ldi %2,%1", operands);
2987 /* Loop. */
2988 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2989 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2990 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2992 /* Handle the residual. There could be up to 7 bytes of
2993 residual to copy! */
2994 if (n_bytes % 8 != 0)
2996 operands[2] = GEN_INT (n_bytes % 4);
2997 if (n_bytes % 8 >= 4)
2998 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2999 if (n_bytes % 4 != 0)
3000 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3002 return "";
3004 case 2:
3005 /* Pre-adjust the loop counter. */
3006 operands[2] = GEN_INT (n_bytes - 4);
3007 output_asm_insn ("ldi %2,%1", operands);
3009 /* Loop. */
3010 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3011 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3012 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3014 /* Handle the residual. */
3015 if (n_bytes % 4 != 0)
3017 if (n_bytes % 4 >= 2)
3018 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3019 if (n_bytes % 2 != 0)
3020 output_asm_insn ("stb %%r0,0(%0)", operands);
3022 return "";
3024 case 1:
3025 /* Pre-adjust the loop counter. */
3026 operands[2] = GEN_INT (n_bytes - 2);
3027 output_asm_insn ("ldi %2,%1", operands);
3029 /* Loop. */
3030 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3031 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3032 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3034 /* Handle the residual. */
3035 if (n_bytes % 2 != 0)
3036 output_asm_insn ("stb %%r0,0(%0)", operands);
3038 return "";
3040 default:
3041 gcc_unreachable ();
3045 /* Count the number of insns necessary to handle this block move.
3047 Basic structure is the same as emit_block_move, except that we
3048 count insns rather than emit them. */
3050 static int
3051 compute_clrmem_length (rtx insn)
3053 rtx pat = PATTERN (insn);
3054 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3055 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3056 unsigned int n_insns = 0;
3058 /* We can't clear more than a word at a time because the PA
3059 has no longer integer move insns. */
3060 if (align > (TARGET_64BIT ? 8 : 4))
3061 align = (TARGET_64BIT ? 8 : 4);
3063 /* The basic loop. */
3064 n_insns = 4;
3066 /* Residuals. */
3067 if (n_bytes % (2 * align) != 0)
3069 if ((n_bytes % (2 * align)) >= align)
3070 n_insns++;
3072 if ((n_bytes % align) != 0)
3073 n_insns++;
3076 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3077 return n_insns * 4;
3081 const char *
3082 pa_output_and (rtx *operands)
3084 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3086 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3087 int ls0, ls1, ms0, p, len;
3089 for (ls0 = 0; ls0 < 32; ls0++)
3090 if ((mask & (1 << ls0)) == 0)
3091 break;
3093 for (ls1 = ls0; ls1 < 32; ls1++)
3094 if ((mask & (1 << ls1)) != 0)
3095 break;
3097 for (ms0 = ls1; ms0 < 32; ms0++)
3098 if ((mask & (1 << ms0)) == 0)
3099 break;
3101 gcc_assert (ms0 == 32);
3103 if (ls1 == 32)
3105 len = ls0;
3107 gcc_assert (len);
3109 operands[2] = GEN_INT (len);
3110 return "{extru|extrw,u} %1,31,%2,%0";
3112 else
3114 /* We could use this `depi' for the case above as well, but `depi'
3115 requires one more register file access than an `extru'. */
3117 p = 31 - ls0;
3118 len = ls1 - ls0;
3120 operands[2] = GEN_INT (p);
3121 operands[3] = GEN_INT (len);
3122 return "{depi|depwi} 0,%2,%3,%0";
3125 else
3126 return "and %1,%2,%0";
3129 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3130 storing the result in operands[0]. */
3131 const char *
3132 pa_output_64bit_and (rtx *operands)
3134 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3136 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3137 int ls0, ls1, ms0, p, len;
3139 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3140 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3141 break;
3143 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3144 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3145 break;
3147 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3148 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3149 break;
3151 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3153 if (ls1 == HOST_BITS_PER_WIDE_INT)
3155 len = ls0;
3157 gcc_assert (len);
3159 operands[2] = GEN_INT (len);
3160 return "extrd,u %1,63,%2,%0";
3162 else
3164 /* We could use this `depi' for the case above as well, but `depi'
3165 requires one more register file access than an `extru'. */
3167 p = 63 - ls0;
3168 len = ls1 - ls0;
3170 operands[2] = GEN_INT (p);
3171 operands[3] = GEN_INT (len);
3172 return "depdi 0,%2,%3,%0";
3175 else
3176 return "and %1,%2,%0";
3179 const char *
3180 pa_output_ior (rtx *operands)
3182 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3183 int bs0, bs1, p, len;
3185 if (INTVAL (operands[2]) == 0)
3186 return "copy %1,%0";
3188 for (bs0 = 0; bs0 < 32; bs0++)
3189 if ((mask & (1 << bs0)) != 0)
3190 break;
3192 for (bs1 = bs0; bs1 < 32; bs1++)
3193 if ((mask & (1 << bs1)) == 0)
3194 break;
3196 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3198 p = 31 - bs0;
3199 len = bs1 - bs0;
3201 operands[2] = GEN_INT (p);
3202 operands[3] = GEN_INT (len);
3203 return "{depi|depwi} -1,%2,%3,%0";
3206 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3207 storing the result in operands[0]. */
3208 const char *
3209 pa_output_64bit_ior (rtx *operands)
3211 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3212 int bs0, bs1, p, len;
3214 if (INTVAL (operands[2]) == 0)
3215 return "copy %1,%0";
3217 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3218 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3219 break;
3221 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3222 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3223 break;
3225 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3226 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3228 p = 63 - bs0;
3229 len = bs1 - bs0;
3231 operands[2] = GEN_INT (p);
3232 operands[3] = GEN_INT (len);
3233 return "depdi -1,%2,%3,%0";
3236 /* Target hook for assembling integer objects. This code handles
3237 aligned SI and DI integers specially since function references
3238 must be preceded by P%. */
3240 static bool
3241 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3243 if (size == UNITS_PER_WORD
3244 && aligned_p
3245 && function_label_operand (x, VOIDmode))
3247 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3249 /* We don't want an OPD when generating fast indirect calls. */
3250 if (!TARGET_FAST_INDIRECT_CALLS)
3251 fputs ("P%", asm_out_file);
3253 output_addr_const (asm_out_file, x);
3254 fputc ('\n', asm_out_file);
3255 return true;
3257 return default_assemble_integer (x, size, aligned_p);
3260 /* Output an ascii string. */
3261 void
3262 pa_output_ascii (FILE *file, const char *p, int size)
3264 int i;
3265 int chars_output;
3266 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3268 /* The HP assembler can only take strings of 256 characters at one
3269 time. This is a limitation on input line length, *not* the
3270 length of the string. Sigh. Even worse, it seems that the
3271 restriction is in number of input characters (see \xnn &
3272 \whatever). So we have to do this very carefully. */
3274 fputs ("\t.STRING \"", file);
3276 chars_output = 0;
3277 for (i = 0; i < size; i += 4)
3279 int co = 0;
3280 int io = 0;
3281 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3283 register unsigned int c = (unsigned char) p[i + io];
3285 if (c == '\"' || c == '\\')
3286 partial_output[co++] = '\\';
3287 if (c >= ' ' && c < 0177)
3288 partial_output[co++] = c;
3289 else
3291 unsigned int hexd;
3292 partial_output[co++] = '\\';
3293 partial_output[co++] = 'x';
3294 hexd = c / 16 - 0 + '0';
3295 if (hexd > '9')
3296 hexd -= '9' - 'a' + 1;
3297 partial_output[co++] = hexd;
3298 hexd = c % 16 - 0 + '0';
3299 if (hexd > '9')
3300 hexd -= '9' - 'a' + 1;
3301 partial_output[co++] = hexd;
3304 if (chars_output + co > 243)
3306 fputs ("\"\n\t.STRING \"", file);
3307 chars_output = 0;
3309 fwrite (partial_output, 1, (size_t) co, file);
3310 chars_output += co;
3311 co = 0;
3313 fputs ("\"\n", file);
3316 /* Try to rewrite floating point comparisons & branches to avoid
3317 useless add,tr insns.
3319 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3320 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3321 first attempt to remove useless add,tr insns. It is zero
3322 for the second pass as reorg sometimes leaves bogus REG_DEAD
3323 notes lying around.
3325 When CHECK_NOTES is zero we can only eliminate add,tr insns
3326 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3327 instructions. */
3328 static void
3329 remove_useless_addtr_insns (int check_notes)
3331 rtx insn;
3332 static int pass = 0;
3334 /* This is fairly cheap, so always run it when optimizing. */
3335 if (optimize > 0)
3337 int fcmp_count = 0;
3338 int fbranch_count = 0;
3340 /* Walk all the insns in this function looking for fcmp & fbranch
3341 instructions. Keep track of how many of each we find. */
3342 for (insn = get_insns (); insn; insn = next_insn (insn))
3344 rtx tmp;
3346 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3347 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3348 continue;
3350 tmp = PATTERN (insn);
3352 /* It must be a set. */
3353 if (GET_CODE (tmp) != SET)
3354 continue;
3356 /* If the destination is CCFP, then we've found an fcmp insn. */
3357 tmp = SET_DEST (tmp);
3358 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3360 fcmp_count++;
3361 continue;
3364 tmp = PATTERN (insn);
3365 /* If this is an fbranch instruction, bump the fbranch counter. */
3366 if (GET_CODE (tmp) == SET
3367 && SET_DEST (tmp) == pc_rtx
3368 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3369 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3370 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3371 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3373 fbranch_count++;
3374 continue;
3379 /* Find all floating point compare + branch insns. If possible,
3380 reverse the comparison & the branch to avoid add,tr insns. */
3381 for (insn = get_insns (); insn; insn = next_insn (insn))
3383 rtx tmp, next;
3385 /* Ignore anything that isn't an INSN. */
3386 if (! NONJUMP_INSN_P (insn))
3387 continue;
3389 tmp = PATTERN (insn);
3391 /* It must be a set. */
3392 if (GET_CODE (tmp) != SET)
3393 continue;
3395 /* The destination must be CCFP, which is register zero. */
3396 tmp = SET_DEST (tmp);
3397 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3398 continue;
3400 /* INSN should be a set of CCFP.
3402 See if the result of this insn is used in a reversed FP
3403 conditional branch. If so, reverse our condition and
3404 the branch. Doing so avoids useless add,tr insns. */
3405 next = next_insn (insn);
3406 while (next)
3408 /* Jumps, calls and labels stop our search. */
3409 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3410 break;
3412 /* As does another fcmp insn. */
3413 if (NONJUMP_INSN_P (next)
3414 && GET_CODE (PATTERN (next)) == SET
3415 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3416 && REGNO (SET_DEST (PATTERN (next))) == 0)
3417 break;
3419 next = next_insn (next);
3422 /* Is NEXT_INSN a branch? */
3423 if (next && JUMP_P (next))
3425 rtx pattern = PATTERN (next);
3427 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3428 and CCFP dies, then reverse our conditional and the branch
3429 to avoid the add,tr. */
3430 if (GET_CODE (pattern) == SET
3431 && SET_DEST (pattern) == pc_rtx
3432 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3433 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3434 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3435 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3436 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3437 && (fcmp_count == fbranch_count
3438 || (check_notes
3439 && find_regno_note (next, REG_DEAD, 0))))
3441 /* Reverse the branch. */
3442 tmp = XEXP (SET_SRC (pattern), 1);
3443 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3444 XEXP (SET_SRC (pattern), 2) = tmp;
3445 INSN_CODE (next) = -1;
3447 /* Reverse our condition. */
3448 tmp = PATTERN (insn);
3449 PUT_CODE (XEXP (tmp, 1),
3450 (reverse_condition_maybe_unordered
3451 (GET_CODE (XEXP (tmp, 1)))));
3457 pass = !pass;
3461 /* You may have trouble believing this, but this is the 32 bit HP-PA
3462 stack layout. Wow.
3464 Offset Contents
3466 Variable arguments (optional; any number may be allocated)
3468 SP-(4*(N+9)) arg word N
3470 SP-56 arg word 5
3471 SP-52 arg word 4
3473 Fixed arguments (must be allocated; may remain unused)
3475 SP-48 arg word 3
3476 SP-44 arg word 2
3477 SP-40 arg word 1
3478 SP-36 arg word 0
3480 Frame Marker
3482 SP-32 External Data Pointer (DP)
3483 SP-28 External sr4
3484 SP-24 External/stub RP (RP')
3485 SP-20 Current RP
3486 SP-16 Static Link
3487 SP-12 Clean up
3488 SP-8 Calling Stub RP (RP'')
3489 SP-4 Previous SP
3491 Top of Frame
3493 SP-0 Stack Pointer (points to next available address)
3497 /* This function saves registers as follows. Registers marked with ' are
3498 this function's registers (as opposed to the previous function's).
3499 If a frame_pointer isn't needed, r4 is saved as a general register;
3500 the space for the frame pointer is still allocated, though, to keep
3501 things simple.
3504 Top of Frame
3506 SP (FP') Previous FP
3507 SP + 4 Alignment filler (sigh)
3508 SP + 8 Space for locals reserved here.
3512 SP + n All call saved register used.
3516 SP + o All call saved fp registers used.
3520 SP + p (SP') points to next available address.
3524 /* Global variables set by output_function_prologue(). */
3525 /* Size of frame. Need to know this to emit return insns from
3526 leaf procedures. */
3527 static HOST_WIDE_INT actual_fsize, local_fsize;
3528 static int save_fregs;
3530 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3531 Handle case where DISP > 8k by using the add_high_const patterns.
3533 Note in DISP > 8k case, we will leave the high part of the address
3534 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3536 static void
3537 store_reg (int reg, HOST_WIDE_INT disp, int base)
3539 rtx insn, dest, src, basereg;
3541 src = gen_rtx_REG (word_mode, reg);
3542 basereg = gen_rtx_REG (Pmode, base);
3543 if (VAL_14_BITS_P (disp))
3545 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3546 insn = emit_move_insn (dest, src);
3548 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3550 rtx delta = GEN_INT (disp);
3551 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3553 emit_move_insn (tmpreg, delta);
3554 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3555 if (DO_FRAME_NOTES)
3557 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3558 gen_rtx_SET (VOIDmode, tmpreg,
3559 gen_rtx_PLUS (Pmode, basereg, delta)));
3560 RTX_FRAME_RELATED_P (insn) = 1;
3562 dest = gen_rtx_MEM (word_mode, tmpreg);
3563 insn = emit_move_insn (dest, src);
3565 else
3567 rtx delta = GEN_INT (disp);
3568 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3569 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3571 emit_move_insn (tmpreg, high);
3572 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3573 insn = emit_move_insn (dest, src);
3574 if (DO_FRAME_NOTES)
3575 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3576 gen_rtx_SET (VOIDmode,
3577 gen_rtx_MEM (word_mode,
3578 gen_rtx_PLUS (word_mode,
3579 basereg,
3580 delta)),
3581 src));
3584 if (DO_FRAME_NOTES)
3585 RTX_FRAME_RELATED_P (insn) = 1;
3588 /* Emit RTL to store REG at the memory location specified by BASE and then
3589 add MOD to BASE. MOD must be <= 8k. */
3591 static void
3592 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3594 rtx insn, basereg, srcreg, delta;
3596 gcc_assert (VAL_14_BITS_P (mod));
3598 basereg = gen_rtx_REG (Pmode, base);
3599 srcreg = gen_rtx_REG (word_mode, reg);
3600 delta = GEN_INT (mod);
3602 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3603 if (DO_FRAME_NOTES)
3605 RTX_FRAME_RELATED_P (insn) = 1;
3607 /* RTX_FRAME_RELATED_P must be set on each frame related set
3608 in a parallel with more than one element. */
3609 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3610 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3614 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3615 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3616 whether to add a frame note or not.
3618 In the DISP > 8k case, we leave the high part of the address in %r1.
3619 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3621 static void
3622 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3624 rtx insn;
3626 if (VAL_14_BITS_P (disp))
3628 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3629 plus_constant (Pmode,
3630 gen_rtx_REG (Pmode, base), disp));
3632 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3634 rtx basereg = gen_rtx_REG (Pmode, base);
3635 rtx delta = GEN_INT (disp);
3636 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3638 emit_move_insn (tmpreg, delta);
3639 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3640 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3641 if (DO_FRAME_NOTES)
3642 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3643 gen_rtx_SET (VOIDmode, tmpreg,
3644 gen_rtx_PLUS (Pmode, basereg, delta)));
3646 else
3648 rtx basereg = gen_rtx_REG (Pmode, base);
3649 rtx delta = GEN_INT (disp);
3650 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3652 emit_move_insn (tmpreg,
3653 gen_rtx_PLUS (Pmode, basereg,
3654 gen_rtx_HIGH (Pmode, delta)));
3655 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3656 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3659 if (DO_FRAME_NOTES && note)
3660 RTX_FRAME_RELATED_P (insn) = 1;
3663 HOST_WIDE_INT
3664 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3666 int freg_saved = 0;
3667 int i, j;
3669 /* The code in pa_expand_prologue and pa_expand_epilogue must
3670 be consistent with the rounding and size calculation done here.
3671 Change them at the same time. */
3673 /* We do our own stack alignment. First, round the size of the
3674 stack locals up to a word boundary. */
3675 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3677 /* Space for previous frame pointer + filler. If any frame is
3678 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3679 waste some space here for the sake of HP compatibility. The
3680 first slot is only used when the frame pointer is needed. */
3681 if (size || frame_pointer_needed)
3682 size += STARTING_FRAME_OFFSET;
3684 /* If the current function calls __builtin_eh_return, then we need
3685 to allocate stack space for registers that will hold data for
3686 the exception handler. */
3687 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3689 unsigned int i;
3691 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3692 continue;
3693 size += i * UNITS_PER_WORD;
3696 /* Account for space used by the callee general register saves. */
3697 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3698 if (df_regs_ever_live_p (i))
3699 size += UNITS_PER_WORD;
3701 /* Account for space used by the callee floating point register saves. */
3702 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3703 if (df_regs_ever_live_p (i)
3704 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3706 freg_saved = 1;
3708 /* We always save both halves of the FP register, so always
3709 increment the frame size by 8 bytes. */
3710 size += 8;
3713 /* If any of the floating registers are saved, account for the
3714 alignment needed for the floating point register save block. */
3715 if (freg_saved)
3717 size = (size + 7) & ~7;
3718 if (fregs_live)
3719 *fregs_live = 1;
3722 /* The various ABIs include space for the outgoing parameters in the
3723 size of the current function's stack frame. We don't need to align
3724 for the outgoing arguments as their alignment is set by the final
3725 rounding for the frame as a whole. */
3726 size += crtl->outgoing_args_size;
3728 /* Allocate space for the fixed frame marker. This space must be
3729 allocated for any function that makes calls or allocates
3730 stack space. */
3731 if (!crtl->is_leaf || size)
3732 size += TARGET_64BIT ? 48 : 32;
3734 /* Finally, round to the preferred stack boundary. */
3735 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3736 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3739 /* Generate the assembly code for function entry. FILE is a stdio
3740 stream to output the code to. SIZE is an int: how many units of
3741 temporary storage to allocate.
3743 Refer to the array `regs_ever_live' to determine which registers to
3744 save; `regs_ever_live[I]' is nonzero if register number I is ever
3745 used in the function. This function is responsible for knowing
3746 which registers should not be saved even if used. */
3748 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3749 of memory. If any fpu reg is used in the function, we allocate
3750 such a block here, at the bottom of the frame, just in case it's needed.
3752 If this function is a leaf procedure, then we may choose not
3753 to do a "save" insn. The decision about whether or not
3754 to do this is made in regclass.c. */
3756 static void
3757 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3759 /* The function's label and associated .PROC must never be
3760 separated and must be output *after* any profiling declarations
3761 to avoid changing spaces/subspaces within a procedure. */
3762 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3763 fputs ("\t.PROC\n", file);
3765 /* pa_expand_prologue does the dirty work now. We just need
3766 to output the assembler directives which denote the start
3767 of a function. */
3768 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3769 if (crtl->is_leaf)
3770 fputs (",NO_CALLS", file);
3771 else
3772 fputs (",CALLS", file);
3773 if (rp_saved)
3774 fputs (",SAVE_RP", file);
3776 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3777 at the beginning of the frame and that it is used as the frame
3778 pointer for the frame. We do this because our current frame
3779 layout doesn't conform to that specified in the HP runtime
3780 documentation and we need a way to indicate to programs such as
3781 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3782 isn't used by HP compilers but is supported by the assembler.
3783 However, SAVE_SP is supposed to indicate that the previous stack
3784 pointer has been saved in the frame marker. */
3785 if (frame_pointer_needed)
3786 fputs (",SAVE_SP", file);
3788 /* Pass on information about the number of callee register saves
3789 performed in the prologue.
3791 The compiler is supposed to pass the highest register number
3792 saved, the assembler then has to adjust that number before
3793 entering it into the unwind descriptor (to account for any
3794 caller saved registers with lower register numbers than the
3795 first callee saved register). */
3796 if (gr_saved)
3797 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3799 if (fr_saved)
3800 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3802 fputs ("\n\t.ENTRY\n", file);
3804 remove_useless_addtr_insns (0);
3807 void
3808 pa_expand_prologue (void)
3810 int merge_sp_adjust_with_store = 0;
3811 HOST_WIDE_INT size = get_frame_size ();
3812 HOST_WIDE_INT offset;
3813 int i;
3814 rtx insn, tmpreg;
3816 gr_saved = 0;
3817 fr_saved = 0;
3818 save_fregs = 0;
3820 /* Compute total size for frame pointer, filler, locals and rounding to
3821 the next word boundary. Similar code appears in pa_compute_frame_size
3822 and must be changed in tandem with this code. */
3823 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3824 if (local_fsize || frame_pointer_needed)
3825 local_fsize += STARTING_FRAME_OFFSET;
3827 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3828 if (flag_stack_usage_info)
3829 current_function_static_stack_size = actual_fsize;
3831 /* Compute a few things we will use often. */
3832 tmpreg = gen_rtx_REG (word_mode, 1);
3834 /* Save RP first. The calling conventions manual states RP will
3835 always be stored into the caller's frame at sp - 20 or sp - 16
3836 depending on which ABI is in use. */
3837 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3839 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3840 rp_saved = true;
3842 else
3843 rp_saved = false;
3845 /* Allocate the local frame and set up the frame pointer if needed. */
3846 if (actual_fsize != 0)
3848 if (frame_pointer_needed)
3850 /* Copy the old frame pointer temporarily into %r1. Set up the
3851 new stack pointer, then store away the saved old frame pointer
3852 into the stack at sp and at the same time update the stack
3853 pointer by actual_fsize bytes. Two versions, first
3854 handles small (<8k) frames. The second handles large (>=8k)
3855 frames. */
3856 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3857 if (DO_FRAME_NOTES)
3858 RTX_FRAME_RELATED_P (insn) = 1;
3860 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3861 if (DO_FRAME_NOTES)
3862 RTX_FRAME_RELATED_P (insn) = 1;
3864 if (VAL_14_BITS_P (actual_fsize))
3865 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3866 else
3868 /* It is incorrect to store the saved frame pointer at *sp,
3869 then increment sp (writes beyond the current stack boundary).
3871 So instead use stwm to store at *sp and post-increment the
3872 stack pointer as an atomic operation. Then increment sp to
3873 finish allocating the new frame. */
3874 HOST_WIDE_INT adjust1 = 8192 - 64;
3875 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3877 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3878 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3879 adjust2, 1);
3882 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3883 we need to store the previous stack pointer (frame pointer)
3884 into the frame marker on targets that use the HP unwind
3885 library. This allows the HP unwind library to be used to
3886 unwind GCC frames. However, we are not fully compatible
3887 with the HP library because our frame layout differs from
3888 that specified in the HP runtime specification.
3890 We don't want a frame note on this instruction as the frame
3891 marker moves during dynamic stack allocation.
3893 This instruction also serves as a blockage to prevent
3894 register spills from being scheduled before the stack
3895 pointer is raised. This is necessary as we store
3896 registers using the frame pointer as a base register,
3897 and the frame pointer is set before sp is raised. */
3898 if (TARGET_HPUX_UNWIND_LIBRARY)
3900 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3901 GEN_INT (TARGET_64BIT ? -8 : -4));
3903 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3904 hard_frame_pointer_rtx);
3906 else
3907 emit_insn (gen_blockage ());
3909 /* no frame pointer needed. */
3910 else
3912 /* In some cases we can perform the first callee register save
3913 and allocating the stack frame at the same time. If so, just
3914 make a note of it and defer allocating the frame until saving
3915 the callee registers. */
3916 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3917 merge_sp_adjust_with_store = 1;
3918 /* Can not optimize. Adjust the stack frame by actual_fsize
3919 bytes. */
3920 else
3921 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3922 actual_fsize, 1);
3926 /* Normal register save.
3928 Do not save the frame pointer in the frame_pointer_needed case. It
3929 was done earlier. */
3930 if (frame_pointer_needed)
3932 offset = local_fsize;
3934 /* Saving the EH return data registers in the frame is the simplest
3935 way to get the frame unwind information emitted. We put them
3936 just before the general registers. */
3937 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3939 unsigned int i, regno;
3941 for (i = 0; ; ++i)
3943 regno = EH_RETURN_DATA_REGNO (i);
3944 if (regno == INVALID_REGNUM)
3945 break;
3947 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3948 offset += UNITS_PER_WORD;
3952 for (i = 18; i >= 4; i--)
3953 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3955 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3956 offset += UNITS_PER_WORD;
3957 gr_saved++;
3959 /* Account for %r3 which is saved in a special place. */
3960 gr_saved++;
3962 /* No frame pointer needed. */
3963 else
3965 offset = local_fsize - actual_fsize;
3967 /* Saving the EH return data registers in the frame is the simplest
3968 way to get the frame unwind information emitted. */
3969 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3971 unsigned int i, regno;
3973 for (i = 0; ; ++i)
3975 regno = EH_RETURN_DATA_REGNO (i);
3976 if (regno == INVALID_REGNUM)
3977 break;
3979 /* If merge_sp_adjust_with_store is nonzero, then we can
3980 optimize the first save. */
3981 if (merge_sp_adjust_with_store)
3983 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3984 merge_sp_adjust_with_store = 0;
3986 else
3987 store_reg (regno, offset, STACK_POINTER_REGNUM);
3988 offset += UNITS_PER_WORD;
3992 for (i = 18; i >= 3; i--)
3993 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3995 /* If merge_sp_adjust_with_store is nonzero, then we can
3996 optimize the first GR save. */
3997 if (merge_sp_adjust_with_store)
3999 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4000 merge_sp_adjust_with_store = 0;
4002 else
4003 store_reg (i, offset, STACK_POINTER_REGNUM);
4004 offset += UNITS_PER_WORD;
4005 gr_saved++;
4008 /* If we wanted to merge the SP adjustment with a GR save, but we never
4009 did any GR saves, then just emit the adjustment here. */
4010 if (merge_sp_adjust_with_store)
4011 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4012 actual_fsize, 1);
4015 /* The hppa calling conventions say that %r19, the pic offset
4016 register, is saved at sp - 32 (in this function's frame)
4017 when generating PIC code. FIXME: What is the correct thing
4018 to do for functions which make no calls and allocate no
4019 frame? Do we need to allocate a frame, or can we just omit
4020 the save? For now we'll just omit the save.
4022 We don't want a note on this insn as the frame marker can
4023 move if there is a dynamic stack allocation. */
4024 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4026 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4028 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4032 /* Align pointer properly (doubleword boundary). */
4033 offset = (offset + 7) & ~7;
4035 /* Floating point register store. */
4036 if (save_fregs)
4038 rtx base;
4040 /* First get the frame or stack pointer to the start of the FP register
4041 save area. */
4042 if (frame_pointer_needed)
4044 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4045 base = hard_frame_pointer_rtx;
4047 else
4049 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4050 base = stack_pointer_rtx;
4053 /* Now actually save the FP registers. */
4054 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4056 if (df_regs_ever_live_p (i)
4057 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4059 rtx addr, insn, reg;
4060 addr = gen_rtx_MEM (DFmode,
4061 gen_rtx_POST_INC (word_mode, tmpreg));
4062 reg = gen_rtx_REG (DFmode, i);
4063 insn = emit_move_insn (addr, reg);
4064 if (DO_FRAME_NOTES)
4066 RTX_FRAME_RELATED_P (insn) = 1;
4067 if (TARGET_64BIT)
4069 rtx mem = gen_rtx_MEM (DFmode,
4070 plus_constant (Pmode, base,
4071 offset));
4072 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4073 gen_rtx_SET (VOIDmode, mem, reg));
4075 else
4077 rtx meml = gen_rtx_MEM (SFmode,
4078 plus_constant (Pmode, base,
4079 offset));
4080 rtx memr = gen_rtx_MEM (SFmode,
4081 plus_constant (Pmode, base,
4082 offset + 4));
4083 rtx regl = gen_rtx_REG (SFmode, i);
4084 rtx regr = gen_rtx_REG (SFmode, i + 1);
4085 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4086 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4087 rtvec vec;
4089 RTX_FRAME_RELATED_P (setl) = 1;
4090 RTX_FRAME_RELATED_P (setr) = 1;
4091 vec = gen_rtvec (2, setl, setr);
4092 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4093 gen_rtx_SEQUENCE (VOIDmode, vec));
4096 offset += GET_MODE_SIZE (DFmode);
4097 fr_saved++;
4103 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4104 Handle case where DISP > 8k by using the add_high_const patterns. */
4106 static void
4107 load_reg (int reg, HOST_WIDE_INT disp, int base)
4109 rtx dest = gen_rtx_REG (word_mode, reg);
4110 rtx basereg = gen_rtx_REG (Pmode, base);
4111 rtx src;
4113 if (VAL_14_BITS_P (disp))
4114 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4115 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4117 rtx delta = GEN_INT (disp);
4118 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4120 emit_move_insn (tmpreg, delta);
4121 if (TARGET_DISABLE_INDEXING)
4123 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4124 src = gen_rtx_MEM (word_mode, tmpreg);
4126 else
4127 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4129 else
4131 rtx delta = GEN_INT (disp);
4132 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4133 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4135 emit_move_insn (tmpreg, high);
4136 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4139 emit_move_insn (dest, src);
4142 /* Update the total code bytes output to the text section. */
4144 static void
4145 update_total_code_bytes (unsigned int nbytes)
4147 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4148 && !IN_NAMED_SECTION_P (cfun->decl))
4150 unsigned int old_total = total_code_bytes;
4152 total_code_bytes += nbytes;
4154 /* Be prepared to handle overflows. */
4155 if (old_total > total_code_bytes)
4156 total_code_bytes = UINT_MAX;
4160 /* This function generates the assembly code for function exit.
4161 Args are as for output_function_prologue ().
4163 The function epilogue should not depend on the current stack
4164 pointer! It should use the frame pointer only. This is mandatory
4165 because of alloca; we also take advantage of it to omit stack
4166 adjustments before returning. */
4168 static void
4169 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4171 rtx insn = get_last_insn ();
4172 bool extra_nop;
4174 /* pa_expand_epilogue does the dirty work now. We just need
4175 to output the assembler directives which denote the end
4176 of a function.
4178 To make debuggers happy, emit a nop if the epilogue was completely
4179 eliminated due to a volatile call as the last insn in the
4180 current function. That way the return address (in %r2) will
4181 always point to a valid instruction in the current function. */
4183 /* Get the last real insn. */
4184 if (NOTE_P (insn))
4185 insn = prev_real_insn (insn);
4187 /* If it is a sequence, then look inside. */
4188 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4189 insn = XVECEXP (PATTERN (insn), 0, 0);
4191 /* If insn is a CALL_INSN, then it must be a call to a volatile
4192 function (otherwise there would be epilogue insns). */
4193 if (insn && CALL_P (insn))
4195 fputs ("\tnop\n", file);
4196 extra_nop = true;
4198 else
4199 extra_nop = false;
4201 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4203 if (TARGET_SOM && TARGET_GAS)
4205 /* We are done with this subspace except possibly for some additional
4206 debug information. Forget that we are in this subspace to ensure
4207 that the next function is output in its own subspace. */
4208 in_section = NULL;
4209 cfun->machine->in_nsubspa = 2;
4212 /* Thunks do their own insn accounting. */
4213 if (cfun->is_thunk)
4214 return;
4216 if (INSN_ADDRESSES_SET_P ())
4218 last_address = extra_nop ? 4 : 0;
4219 insn = get_last_nonnote_insn ();
4220 if (insn)
4222 last_address += INSN_ADDRESSES (INSN_UID (insn));
4223 if (INSN_P (insn))
4224 last_address += insn_default_length (insn);
4226 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4227 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4229 else
4230 last_address = UINT_MAX;
4232 /* Finally, update the total number of code bytes output so far. */
4233 update_total_code_bytes (last_address);
4236 void
4237 pa_expand_epilogue (void)
4239 rtx tmpreg;
4240 HOST_WIDE_INT offset;
4241 HOST_WIDE_INT ret_off = 0;
4242 int i;
4243 int merge_sp_adjust_with_load = 0;
4245 /* We will use this often. */
4246 tmpreg = gen_rtx_REG (word_mode, 1);
4248 /* Try to restore RP early to avoid load/use interlocks when
4249 RP gets used in the return (bv) instruction. This appears to still
4250 be necessary even when we schedule the prologue and epilogue. */
4251 if (rp_saved)
4253 ret_off = TARGET_64BIT ? -16 : -20;
4254 if (frame_pointer_needed)
4256 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4257 ret_off = 0;
4259 else
4261 /* No frame pointer, and stack is smaller than 8k. */
4262 if (VAL_14_BITS_P (ret_off - actual_fsize))
4264 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4265 ret_off = 0;
4270 /* General register restores. */
4271 if (frame_pointer_needed)
4273 offset = local_fsize;
4275 /* If the current function calls __builtin_eh_return, then we need
4276 to restore the saved EH data registers. */
4277 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4279 unsigned int i, regno;
4281 for (i = 0; ; ++i)
4283 regno = EH_RETURN_DATA_REGNO (i);
4284 if (regno == INVALID_REGNUM)
4285 break;
4287 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4288 offset += UNITS_PER_WORD;
4292 for (i = 18; i >= 4; i--)
4293 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4295 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4296 offset += UNITS_PER_WORD;
4299 else
4301 offset = local_fsize - actual_fsize;
4303 /* If the current function calls __builtin_eh_return, then we need
4304 to restore the saved EH data registers. */
4305 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4307 unsigned int i, regno;
4309 for (i = 0; ; ++i)
4311 regno = EH_RETURN_DATA_REGNO (i);
4312 if (regno == INVALID_REGNUM)
4313 break;
4315 /* Only for the first load.
4316 merge_sp_adjust_with_load holds the register load
4317 with which we will merge the sp adjustment. */
4318 if (merge_sp_adjust_with_load == 0
4319 && local_fsize == 0
4320 && VAL_14_BITS_P (-actual_fsize))
4321 merge_sp_adjust_with_load = regno;
4322 else
4323 load_reg (regno, offset, STACK_POINTER_REGNUM);
4324 offset += UNITS_PER_WORD;
4328 for (i = 18; i >= 3; i--)
4330 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4332 /* Only for the first load.
4333 merge_sp_adjust_with_load holds the register load
4334 with which we will merge the sp adjustment. */
4335 if (merge_sp_adjust_with_load == 0
4336 && local_fsize == 0
4337 && VAL_14_BITS_P (-actual_fsize))
4338 merge_sp_adjust_with_load = i;
4339 else
4340 load_reg (i, offset, STACK_POINTER_REGNUM);
4341 offset += UNITS_PER_WORD;
4346 /* Align pointer properly (doubleword boundary). */
4347 offset = (offset + 7) & ~7;
4349 /* FP register restores. */
4350 if (save_fregs)
4352 /* Adjust the register to index off of. */
4353 if (frame_pointer_needed)
4354 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4355 else
4356 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4358 /* Actually do the restores now. */
4359 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4360 if (df_regs_ever_live_p (i)
4361 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4363 rtx src = gen_rtx_MEM (DFmode,
4364 gen_rtx_POST_INC (word_mode, tmpreg));
4365 rtx dest = gen_rtx_REG (DFmode, i);
4366 emit_move_insn (dest, src);
4370 /* Emit a blockage insn here to keep these insns from being moved to
4371 an earlier spot in the epilogue, or into the main instruction stream.
4373 This is necessary as we must not cut the stack back before all the
4374 restores are finished. */
4375 emit_insn (gen_blockage ());
4377 /* Reset stack pointer (and possibly frame pointer). The stack
4378 pointer is initially set to fp + 64 to avoid a race condition. */
4379 if (frame_pointer_needed)
4381 rtx delta = GEN_INT (-64);
4383 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4384 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4385 stack_pointer_rtx, delta));
4387 /* If we were deferring a callee register restore, do it now. */
4388 else if (merge_sp_adjust_with_load)
4390 rtx delta = GEN_INT (-actual_fsize);
4391 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4393 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4395 else if (actual_fsize != 0)
4396 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4397 - actual_fsize, 0);
4399 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4400 frame greater than 8k), do so now. */
4401 if (ret_off != 0)
4402 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4404 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4406 rtx sa = EH_RETURN_STACKADJ_RTX;
4408 emit_insn (gen_blockage ());
4409 emit_insn (TARGET_64BIT
4410 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4411 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4415 bool
4416 pa_can_use_return_insn (void)
4418 if (!reload_completed)
4419 return false;
4421 if (frame_pointer_needed)
4422 return false;
4424 if (df_regs_ever_live_p (2))
4425 return false;
4427 if (crtl->profile)
4428 return false;
4430 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4434 hppa_pic_save_rtx (void)
4436 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4439 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4440 #define NO_DEFERRED_PROFILE_COUNTERS 0
4441 #endif
4444 /* Vector of funcdef numbers. */
4445 static vec<int> funcdef_nos;
4447 /* Output deferred profile counters. */
4448 static void
4449 output_deferred_profile_counters (void)
4451 unsigned int i;
4452 int align, n;
4454 if (funcdef_nos.is_empty ())
4455 return;
4457 switch_to_section (data_section);
4458 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4459 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4461 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4463 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4464 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4467 funcdef_nos.release ();
4470 void
4471 hppa_profile_hook (int label_no)
4473 /* We use SImode for the address of the function in both 32 and
4474 64-bit code to avoid having to provide DImode versions of the
4475 lcla2 and load_offset_label_address insn patterns. */
4476 rtx reg = gen_reg_rtx (SImode);
4477 rtx label_rtx = gen_label_rtx ();
4478 rtx begin_label_rtx, call_insn;
4479 char begin_label_name[16];
4481 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4482 label_no);
4483 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4485 if (TARGET_64BIT)
4486 emit_move_insn (arg_pointer_rtx,
4487 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4488 GEN_INT (64)));
4490 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4492 /* The address of the function is loaded into %r25 with an instruction-
4493 relative sequence that avoids the use of relocations. The sequence
4494 is split so that the load_offset_label_address instruction can
4495 occupy the delay slot of the call to _mcount. */
4496 if (TARGET_PA_20)
4497 emit_insn (gen_lcla2 (reg, label_rtx));
4498 else
4499 emit_insn (gen_lcla1 (reg, label_rtx));
4501 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4502 reg, begin_label_rtx, label_rtx));
4504 #if !NO_DEFERRED_PROFILE_COUNTERS
4506 rtx count_label_rtx, addr, r24;
4507 char count_label_name[16];
4509 funcdef_nos.safe_push (label_no);
4510 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4511 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4513 addr = force_reg (Pmode, count_label_rtx);
4514 r24 = gen_rtx_REG (Pmode, 24);
4515 emit_move_insn (r24, addr);
4517 call_insn =
4518 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4519 gen_rtx_SYMBOL_REF (Pmode,
4520 "_mcount")),
4521 GEN_INT (TARGET_64BIT ? 24 : 12)));
4523 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4525 #else
4527 call_insn =
4528 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4529 gen_rtx_SYMBOL_REF (Pmode,
4530 "_mcount")),
4531 GEN_INT (TARGET_64BIT ? 16 : 8)));
4533 #endif
4535 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4536 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4538 /* Indicate the _mcount call cannot throw, nor will it execute a
4539 non-local goto. */
4540 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4543 /* Fetch the return address for the frame COUNT steps up from
4544 the current frame, after the prologue. FRAMEADDR is the
4545 frame pointer of the COUNT frame.
4547 We want to ignore any export stub remnants here. To handle this,
4548 we examine the code at the return address, and if it is an export
4549 stub, we return a memory rtx for the stub return address stored
4550 at frame-24.
4552 The value returned is used in two different ways:
4554 1. To find a function's caller.
4556 2. To change the return address for a function.
4558 This function handles most instances of case 1; however, it will
4559 fail if there are two levels of stubs to execute on the return
4560 path. The only way I believe that can happen is if the return value
4561 needs a parameter relocation, which never happens for C code.
4563 This function handles most instances of case 2; however, it will
4564 fail if we did not originally have stub code on the return path
4565 but will need stub code on the new return path. This can happen if
4566 the caller & callee are both in the main program, but the new
4567 return location is in a shared library. */
4570 pa_return_addr_rtx (int count, rtx frameaddr)
4572 rtx label;
4573 rtx rp;
4574 rtx saved_rp;
4575 rtx ins;
4577 /* The instruction stream at the return address of a PA1.X export stub is:
4579 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4580 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4581 0x00011820 | stub+16: mtsp r1,sr0
4582 0xe0400002 | stub+20: be,n 0(sr0,rp)
4584 0xe0400002 must be specified as -532676606 so that it won't be
4585 rejected as an invalid immediate operand on 64-bit hosts.
4587 The instruction stream at the return address of a PA2.0 export stub is:
4589 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4590 0xe840d002 | stub+12: bve,n (rp)
4593 HOST_WIDE_INT insns[4];
4594 int i, len;
4596 if (count != 0)
4597 return NULL_RTX;
4599 rp = get_hard_reg_initial_val (Pmode, 2);
4601 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4602 return rp;
4604 /* If there is no export stub then just use the value saved from
4605 the return pointer register. */
4607 saved_rp = gen_reg_rtx (Pmode);
4608 emit_move_insn (saved_rp, rp);
4610 /* Get pointer to the instruction stream. We have to mask out the
4611 privilege level from the two low order bits of the return address
4612 pointer here so that ins will point to the start of the first
4613 instruction that would have been executed if we returned. */
4614 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4615 label = gen_label_rtx ();
4617 if (TARGET_PA_20)
4619 insns[0] = 0x4bc23fd1;
4620 insns[1] = -398405630;
4621 len = 2;
4623 else
4625 insns[0] = 0x4bc23fd1;
4626 insns[1] = 0x004010a1;
4627 insns[2] = 0x00011820;
4628 insns[3] = -532676606;
4629 len = 4;
4632 /* Check the instruction stream at the normal return address for the
4633 export stub. If it is an export stub, than our return address is
4634 really in -24[frameaddr]. */
4636 for (i = 0; i < len; i++)
4638 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4639 rtx op1 = GEN_INT (insns[i]);
4640 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4643 /* Here we know that our return address points to an export
4644 stub. We don't want to return the address of the export stub,
4645 but rather the return address of the export stub. That return
4646 address is stored at -24[frameaddr]. */
4648 emit_move_insn (saved_rp,
4649 gen_rtx_MEM (Pmode,
4650 memory_address (Pmode,
4651 plus_constant (Pmode, frameaddr,
4652 -24))));
4654 emit_label (label);
4656 return saved_rp;
4659 void
4660 pa_emit_bcond_fp (rtx operands[])
4662 enum rtx_code code = GET_CODE (operands[0]);
4663 rtx operand0 = operands[1];
4664 rtx operand1 = operands[2];
4665 rtx label = operands[3];
4667 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4668 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4670 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4671 gen_rtx_IF_THEN_ELSE (VOIDmode,
4672 gen_rtx_fmt_ee (NE,
4673 VOIDmode,
4674 gen_rtx_REG (CCFPmode, 0),
4675 const0_rtx),
4676 gen_rtx_LABEL_REF (VOIDmode, label),
4677 pc_rtx)));
4681 /* Adjust the cost of a scheduling dependency. Return the new cost of
4682 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4684 static int
4685 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4687 enum attr_type attr_type;
4689 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4690 true dependencies as they are described with bypasses now. */
4691 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4692 return cost;
4694 if (! recog_memoized (insn))
4695 return 0;
4697 attr_type = get_attr_type (insn);
4699 switch (REG_NOTE_KIND (link))
4701 case REG_DEP_ANTI:
4702 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4703 cycles later. */
4705 if (attr_type == TYPE_FPLOAD)
4707 rtx pat = PATTERN (insn);
4708 rtx dep_pat = PATTERN (dep_insn);
4709 if (GET_CODE (pat) == PARALLEL)
4711 /* This happens for the fldXs,mb patterns. */
4712 pat = XVECEXP (pat, 0, 0);
4714 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4715 /* If this happens, we have to extend this to schedule
4716 optimally. Return 0 for now. */
4717 return 0;
4719 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4721 if (! recog_memoized (dep_insn))
4722 return 0;
4723 switch (get_attr_type (dep_insn))
4725 case TYPE_FPALU:
4726 case TYPE_FPMULSGL:
4727 case TYPE_FPMULDBL:
4728 case TYPE_FPDIVSGL:
4729 case TYPE_FPDIVDBL:
4730 case TYPE_FPSQRTSGL:
4731 case TYPE_FPSQRTDBL:
4732 /* A fpload can't be issued until one cycle before a
4733 preceding arithmetic operation has finished if
4734 the target of the fpload is any of the sources
4735 (or destination) of the arithmetic operation. */
4736 return insn_default_latency (dep_insn) - 1;
4738 default:
4739 return 0;
4743 else if (attr_type == TYPE_FPALU)
4745 rtx pat = PATTERN (insn);
4746 rtx dep_pat = PATTERN (dep_insn);
4747 if (GET_CODE (pat) == PARALLEL)
4749 /* This happens for the fldXs,mb patterns. */
4750 pat = XVECEXP (pat, 0, 0);
4752 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4753 /* If this happens, we have to extend this to schedule
4754 optimally. Return 0 for now. */
4755 return 0;
4757 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4759 if (! recog_memoized (dep_insn))
4760 return 0;
4761 switch (get_attr_type (dep_insn))
4763 case TYPE_FPDIVSGL:
4764 case TYPE_FPDIVDBL:
4765 case TYPE_FPSQRTSGL:
4766 case TYPE_FPSQRTDBL:
4767 /* An ALU flop can't be issued until two cycles before a
4768 preceding divide or sqrt operation has finished if
4769 the target of the ALU flop is any of the sources
4770 (or destination) of the divide or sqrt operation. */
4771 return insn_default_latency (dep_insn) - 2;
4773 default:
4774 return 0;
4779 /* For other anti dependencies, the cost is 0. */
4780 return 0;
4782 case REG_DEP_OUTPUT:
4783 /* Output dependency; DEP_INSN writes a register that INSN writes some
4784 cycles later. */
4785 if (attr_type == TYPE_FPLOAD)
4787 rtx pat = PATTERN (insn);
4788 rtx dep_pat = PATTERN (dep_insn);
4789 if (GET_CODE (pat) == PARALLEL)
4791 /* This happens for the fldXs,mb patterns. */
4792 pat = XVECEXP (pat, 0, 0);
4794 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4795 /* If this happens, we have to extend this to schedule
4796 optimally. Return 0 for now. */
4797 return 0;
4799 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4801 if (! recog_memoized (dep_insn))
4802 return 0;
4803 switch (get_attr_type (dep_insn))
4805 case TYPE_FPALU:
4806 case TYPE_FPMULSGL:
4807 case TYPE_FPMULDBL:
4808 case TYPE_FPDIVSGL:
4809 case TYPE_FPDIVDBL:
4810 case TYPE_FPSQRTSGL:
4811 case TYPE_FPSQRTDBL:
4812 /* A fpload can't be issued until one cycle before a
4813 preceding arithmetic operation has finished if
4814 the target of the fpload is the destination of the
4815 arithmetic operation.
4817 Exception: For PA7100LC, PA7200 and PA7300, the cost
4818 is 3 cycles, unless they bundle together. We also
4819 pay the penalty if the second insn is a fpload. */
4820 return insn_default_latency (dep_insn) - 1;
4822 default:
4823 return 0;
4827 else if (attr_type == TYPE_FPALU)
4829 rtx pat = PATTERN (insn);
4830 rtx dep_pat = PATTERN (dep_insn);
4831 if (GET_CODE (pat) == PARALLEL)
4833 /* This happens for the fldXs,mb patterns. */
4834 pat = XVECEXP (pat, 0, 0);
4836 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4837 /* If this happens, we have to extend this to schedule
4838 optimally. Return 0 for now. */
4839 return 0;
4841 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4843 if (! recog_memoized (dep_insn))
4844 return 0;
4845 switch (get_attr_type (dep_insn))
4847 case TYPE_FPDIVSGL:
4848 case TYPE_FPDIVDBL:
4849 case TYPE_FPSQRTSGL:
4850 case TYPE_FPSQRTDBL:
4851 /* An ALU flop can't be issued until two cycles before a
4852 preceding divide or sqrt operation has finished if
4853 the target of the ALU flop is also the target of
4854 the divide or sqrt operation. */
4855 return insn_default_latency (dep_insn) - 2;
4857 default:
4858 return 0;
4863 /* For other output dependencies, the cost is 0. */
4864 return 0;
4866 default:
4867 gcc_unreachable ();
4871 /* Adjust scheduling priorities. We use this to try and keep addil
4872 and the next use of %r1 close together. */
4873 static int
4874 pa_adjust_priority (rtx insn, int priority)
4876 rtx set = single_set (insn);
4877 rtx src, dest;
4878 if (set)
4880 src = SET_SRC (set);
4881 dest = SET_DEST (set);
4882 if (GET_CODE (src) == LO_SUM
4883 && symbolic_operand (XEXP (src, 1), VOIDmode)
4884 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4885 priority >>= 3;
4887 else if (GET_CODE (src) == MEM
4888 && GET_CODE (XEXP (src, 0)) == LO_SUM
4889 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4890 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4891 priority >>= 1;
4893 else if (GET_CODE (dest) == MEM
4894 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4895 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4896 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4897 priority >>= 3;
4899 return priority;
4902 /* The 700 can only issue a single insn at a time.
4903 The 7XXX processors can issue two insns at a time.
4904 The 8000 can issue 4 insns at a time. */
4905 static int
4906 pa_issue_rate (void)
4908 switch (pa_cpu)
4910 case PROCESSOR_700: return 1;
4911 case PROCESSOR_7100: return 2;
4912 case PROCESSOR_7100LC: return 2;
4913 case PROCESSOR_7200: return 2;
4914 case PROCESSOR_7300: return 2;
4915 case PROCESSOR_8000: return 4;
4917 default:
4918 gcc_unreachable ();
4924 /* Return any length plus adjustment needed by INSN which already has
4925 its length computed as LENGTH. Return LENGTH if no adjustment is
4926 necessary.
4928 Also compute the length of an inline block move here as it is too
4929 complicated to express as a length attribute in pa.md. */
4931 pa_adjust_insn_length (rtx insn, int length)
4933 rtx pat = PATTERN (insn);
4935 /* If length is negative or undefined, provide initial length. */
4936 if ((unsigned int) length >= INT_MAX)
4938 if (GET_CODE (pat) == SEQUENCE)
4939 insn = XVECEXP (pat, 0, 0);
4941 switch (get_attr_type (insn))
4943 case TYPE_MILLI:
4944 length = pa_attr_length_millicode_call (insn);
4945 break;
4946 case TYPE_CALL:
4947 length = pa_attr_length_call (insn, 0);
4948 break;
4949 case TYPE_SIBCALL:
4950 length = pa_attr_length_call (insn, 1);
4951 break;
4952 case TYPE_DYNCALL:
4953 length = pa_attr_length_indirect_call (insn);
4954 break;
4955 case TYPE_SH_FUNC_ADRS:
4956 length = pa_attr_length_millicode_call (insn) + 20;
4957 break;
4958 default:
4959 gcc_unreachable ();
4963 /* Block move pattern. */
4964 if (NONJUMP_INSN_P (insn)
4965 && GET_CODE (pat) == PARALLEL
4966 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4967 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4968 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4969 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4970 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4971 length += compute_movmem_length (insn) - 4;
4972 /* Block clear pattern. */
4973 else if (NONJUMP_INSN_P (insn)
4974 && GET_CODE (pat) == PARALLEL
4975 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4976 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4977 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4978 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4979 length += compute_clrmem_length (insn) - 4;
4980 /* Conditional branch with an unfilled delay slot. */
4981 else if (JUMP_P (insn) && ! simplejump_p (insn))
4983 /* Adjust a short backwards conditional with an unfilled delay slot. */
4984 if (GET_CODE (pat) == SET
4985 && length == 4
4986 && JUMP_LABEL (insn) != NULL_RTX
4987 && ! forward_branch_p (insn))
4988 length += 4;
4989 else if (GET_CODE (pat) == PARALLEL
4990 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4991 && length == 4)
4992 length += 4;
4993 /* Adjust dbra insn with short backwards conditional branch with
4994 unfilled delay slot -- only for case where counter is in a
4995 general register register. */
4996 else if (GET_CODE (pat) == PARALLEL
4997 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4998 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4999 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5000 && length == 4
5001 && ! forward_branch_p (insn))
5002 length += 4;
5004 return length;
5007 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5009 static bool
5010 pa_print_operand_punct_valid_p (unsigned char code)
5012 if (code == '@'
5013 || code == '#'
5014 || code == '*'
5015 || code == '^')
5016 return true;
5018 return false;
5021 /* Print operand X (an rtx) in assembler syntax to file FILE.
5022 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5023 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5025 void
5026 pa_print_operand (FILE *file, rtx x, int code)
5028 switch (code)
5030 case '#':
5031 /* Output a 'nop' if there's nothing for the delay slot. */
5032 if (dbr_sequence_length () == 0)
5033 fputs ("\n\tnop", file);
5034 return;
5035 case '*':
5036 /* Output a nullification completer if there's nothing for the */
5037 /* delay slot or nullification is requested. */
5038 if (dbr_sequence_length () == 0 ||
5039 (final_sequence &&
5040 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5041 fputs (",n", file);
5042 return;
5043 case 'R':
5044 /* Print out the second register name of a register pair.
5045 I.e., R (6) => 7. */
5046 fputs (reg_names[REGNO (x) + 1], file);
5047 return;
5048 case 'r':
5049 /* A register or zero. */
5050 if (x == const0_rtx
5051 || (x == CONST0_RTX (DFmode))
5052 || (x == CONST0_RTX (SFmode)))
5054 fputs ("%r0", file);
5055 return;
5057 else
5058 break;
5059 case 'f':
5060 /* A register or zero (floating point). */
5061 if (x == const0_rtx
5062 || (x == CONST0_RTX (DFmode))
5063 || (x == CONST0_RTX (SFmode)))
5065 fputs ("%fr0", file);
5066 return;
5068 else
5069 break;
5070 case 'A':
5072 rtx xoperands[2];
5074 xoperands[0] = XEXP (XEXP (x, 0), 0);
5075 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5076 pa_output_global_address (file, xoperands[1], 0);
5077 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5078 return;
5081 case 'C': /* Plain (C)ondition */
5082 case 'X':
5083 switch (GET_CODE (x))
5085 case EQ:
5086 fputs ("=", file); break;
5087 case NE:
5088 fputs ("<>", file); break;
5089 case GT:
5090 fputs (">", file); break;
5091 case GE:
5092 fputs (">=", file); break;
5093 case GEU:
5094 fputs (">>=", file); break;
5095 case GTU:
5096 fputs (">>", file); break;
5097 case LT:
5098 fputs ("<", file); break;
5099 case LE:
5100 fputs ("<=", file); break;
5101 case LEU:
5102 fputs ("<<=", file); break;
5103 case LTU:
5104 fputs ("<<", file); break;
5105 default:
5106 gcc_unreachable ();
5108 return;
5109 case 'N': /* Condition, (N)egated */
5110 switch (GET_CODE (x))
5112 case EQ:
5113 fputs ("<>", file); break;
5114 case NE:
5115 fputs ("=", file); break;
5116 case GT:
5117 fputs ("<=", file); break;
5118 case GE:
5119 fputs ("<", file); break;
5120 case GEU:
5121 fputs ("<<", file); break;
5122 case GTU:
5123 fputs ("<<=", file); break;
5124 case LT:
5125 fputs (">=", file); break;
5126 case LE:
5127 fputs (">", file); break;
5128 case LEU:
5129 fputs (">>", file); break;
5130 case LTU:
5131 fputs (">>=", file); break;
5132 default:
5133 gcc_unreachable ();
5135 return;
5136 /* For floating point comparisons. Note that the output
5137 predicates are the complement of the desired mode. The
5138 conditions for GT, GE, LT, LE and LTGT cause an invalid
5139 operation exception if the result is unordered and this
5140 exception is enabled in the floating-point status register. */
5141 case 'Y':
5142 switch (GET_CODE (x))
5144 case EQ:
5145 fputs ("!=", file); break;
5146 case NE:
5147 fputs ("=", file); break;
5148 case GT:
5149 fputs ("!>", file); break;
5150 case GE:
5151 fputs ("!>=", file); break;
5152 case LT:
5153 fputs ("!<", file); break;
5154 case LE:
5155 fputs ("!<=", file); break;
5156 case LTGT:
5157 fputs ("!<>", file); break;
5158 case UNLE:
5159 fputs ("!?<=", file); break;
5160 case UNLT:
5161 fputs ("!?<", file); break;
5162 case UNGE:
5163 fputs ("!?>=", file); break;
5164 case UNGT:
5165 fputs ("!?>", file); break;
5166 case UNEQ:
5167 fputs ("!?=", file); break;
5168 case UNORDERED:
5169 fputs ("!?", file); break;
5170 case ORDERED:
5171 fputs ("?", file); break;
5172 default:
5173 gcc_unreachable ();
5175 return;
5176 case 'S': /* Condition, operands are (S)wapped. */
5177 switch (GET_CODE (x))
5179 case EQ:
5180 fputs ("=", file); break;
5181 case NE:
5182 fputs ("<>", file); break;
5183 case GT:
5184 fputs ("<", file); break;
5185 case GE:
5186 fputs ("<=", file); break;
5187 case GEU:
5188 fputs ("<<=", file); break;
5189 case GTU:
5190 fputs ("<<", file); break;
5191 case LT:
5192 fputs (">", file); break;
5193 case LE:
5194 fputs (">=", file); break;
5195 case LEU:
5196 fputs (">>=", file); break;
5197 case LTU:
5198 fputs (">>", file); break;
5199 default:
5200 gcc_unreachable ();
5202 return;
5203 case 'B': /* Condition, (B)oth swapped and negate. */
5204 switch (GET_CODE (x))
5206 case EQ:
5207 fputs ("<>", file); break;
5208 case NE:
5209 fputs ("=", file); break;
5210 case GT:
5211 fputs (">=", file); break;
5212 case GE:
5213 fputs (">", file); break;
5214 case GEU:
5215 fputs (">>", file); break;
5216 case GTU:
5217 fputs (">>=", file); break;
5218 case LT:
5219 fputs ("<=", file); break;
5220 case LE:
5221 fputs ("<", file); break;
5222 case LEU:
5223 fputs ("<<", file); break;
5224 case LTU:
5225 fputs ("<<=", file); break;
5226 default:
5227 gcc_unreachable ();
5229 return;
5230 case 'k':
5231 gcc_assert (GET_CODE (x) == CONST_INT);
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5233 return;
5234 case 'Q':
5235 gcc_assert (GET_CODE (x) == CONST_INT);
5236 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5237 return;
5238 case 'L':
5239 gcc_assert (GET_CODE (x) == CONST_INT);
5240 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5241 return;
5242 case 'O':
5243 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5244 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5245 return;
5246 case 'p':
5247 gcc_assert (GET_CODE (x) == CONST_INT);
5248 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5249 return;
5250 case 'P':
5251 gcc_assert (GET_CODE (x) == CONST_INT);
5252 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5253 return;
5254 case 'I':
5255 if (GET_CODE (x) == CONST_INT)
5256 fputs ("i", file);
5257 return;
5258 case 'M':
5259 case 'F':
5260 switch (GET_CODE (XEXP (x, 0)))
5262 case PRE_DEC:
5263 case PRE_INC:
5264 if (ASSEMBLER_DIALECT == 0)
5265 fputs ("s,mb", file);
5266 else
5267 fputs (",mb", file);
5268 break;
5269 case POST_DEC:
5270 case POST_INC:
5271 if (ASSEMBLER_DIALECT == 0)
5272 fputs ("s,ma", file);
5273 else
5274 fputs (",ma", file);
5275 break;
5276 case PLUS:
5277 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5278 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5280 if (ASSEMBLER_DIALECT == 0)
5281 fputs ("x", file);
5283 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5284 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5286 if (ASSEMBLER_DIALECT == 0)
5287 fputs ("x,s", file);
5288 else
5289 fputs (",s", file);
5291 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5292 fputs ("s", file);
5293 break;
5294 default:
5295 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5296 fputs ("s", file);
5297 break;
5299 return;
5300 case 'G':
5301 pa_output_global_address (file, x, 0);
5302 return;
5303 case 'H':
5304 pa_output_global_address (file, x, 1);
5305 return;
5306 case 0: /* Don't do anything special */
5307 break;
5308 case 'Z':
5310 unsigned op[3];
5311 compute_zdepwi_operands (INTVAL (x), op);
5312 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5313 return;
5315 case 'z':
5317 unsigned op[3];
5318 compute_zdepdi_operands (INTVAL (x), op);
5319 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5320 return;
5322 case 'c':
5323 /* We can get here from a .vtable_inherit due to our
5324 CONSTANT_ADDRESS_P rejecting perfectly good constant
5325 addresses. */
5326 break;
5327 default:
5328 gcc_unreachable ();
5330 if (GET_CODE (x) == REG)
5332 fputs (reg_names [REGNO (x)], file);
5333 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5335 fputs ("R", file);
5336 return;
5338 if (FP_REG_P (x)
5339 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5340 && (REGNO (x) & 1) == 0)
5341 fputs ("L", file);
5343 else if (GET_CODE (x) == MEM)
5345 int size = GET_MODE_SIZE (GET_MODE (x));
5346 rtx base = NULL_RTX;
5347 switch (GET_CODE (XEXP (x, 0)))
5349 case PRE_DEC:
5350 case POST_DEC:
5351 base = XEXP (XEXP (x, 0), 0);
5352 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5353 break;
5354 case PRE_INC:
5355 case POST_INC:
5356 base = XEXP (XEXP (x, 0), 0);
5357 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5358 break;
5359 case PLUS:
5360 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5361 fprintf (file, "%s(%s)",
5362 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5363 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5364 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5365 fprintf (file, "%s(%s)",
5366 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5367 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5368 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5369 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5371 /* Because the REG_POINTER flag can get lost during reload,
5372 pa_legitimate_address_p canonicalizes the order of the
5373 index and base registers in the combined move patterns. */
5374 rtx base = XEXP (XEXP (x, 0), 1);
5375 rtx index = XEXP (XEXP (x, 0), 0);
5377 fprintf (file, "%s(%s)",
5378 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5380 else
5381 output_address (XEXP (x, 0));
5382 break;
5383 default:
5384 output_address (XEXP (x, 0));
5385 break;
5388 else
5389 output_addr_const (file, x);
5392 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5394 void
5395 pa_output_global_address (FILE *file, rtx x, int round_constant)
5398 /* Imagine (high (const (plus ...))). */
5399 if (GET_CODE (x) == HIGH)
5400 x = XEXP (x, 0);
5402 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5403 output_addr_const (file, x);
5404 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5406 output_addr_const (file, x);
5407 fputs ("-$global$", file);
5409 else if (GET_CODE (x) == CONST)
5411 const char *sep = "";
5412 int offset = 0; /* assembler wants -$global$ at end */
5413 rtx base = NULL_RTX;
5415 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5417 case LABEL_REF:
5418 case SYMBOL_REF:
5419 base = XEXP (XEXP (x, 0), 0);
5420 output_addr_const (file, base);
5421 break;
5422 case CONST_INT:
5423 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5424 break;
5425 default:
5426 gcc_unreachable ();
5429 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5431 case LABEL_REF:
5432 case SYMBOL_REF:
5433 base = XEXP (XEXP (x, 0), 1);
5434 output_addr_const (file, base);
5435 break;
5436 case CONST_INT:
5437 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5438 break;
5439 default:
5440 gcc_unreachable ();
5443 /* How bogus. The compiler is apparently responsible for
5444 rounding the constant if it uses an LR field selector.
5446 The linker and/or assembler seem a better place since
5447 they have to do this kind of thing already.
5449 If we fail to do this, HP's optimizing linker may eliminate
5450 an addil, but not update the ldw/stw/ldo instruction that
5451 uses the result of the addil. */
5452 if (round_constant)
5453 offset = ((offset + 0x1000) & ~0x1fff);
5455 switch (GET_CODE (XEXP (x, 0)))
5457 case PLUS:
5458 if (offset < 0)
5460 offset = -offset;
5461 sep = "-";
5463 else
5464 sep = "+";
5465 break;
5467 case MINUS:
5468 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5469 sep = "-";
5470 break;
5472 default:
5473 gcc_unreachable ();
5476 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5477 fputs ("-$global$", file);
5478 if (offset)
5479 fprintf (file, "%s%d", sep, offset);
5481 else
5482 output_addr_const (file, x);
5485 /* Output boilerplate text to appear at the beginning of the file.
5486 There are several possible versions. */
5487 #define aputs(x) fputs(x, asm_out_file)
5488 static inline void
5489 pa_file_start_level (void)
5491 if (TARGET_64BIT)
5492 aputs ("\t.LEVEL 2.0w\n");
5493 else if (TARGET_PA_20)
5494 aputs ("\t.LEVEL 2.0\n");
5495 else if (TARGET_PA_11)
5496 aputs ("\t.LEVEL 1.1\n");
5497 else
5498 aputs ("\t.LEVEL 1.0\n");
5501 static inline void
5502 pa_file_start_space (int sortspace)
5504 aputs ("\t.SPACE $PRIVATE$");
5505 if (sortspace)
5506 aputs (",SORT=16");
5507 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5508 if (flag_tm)
5509 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5510 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5511 "\n\t.SPACE $TEXT$");
5512 if (sortspace)
5513 aputs (",SORT=8");
5514 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5515 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5518 static inline void
5519 pa_file_start_file (int want_version)
5521 if (write_symbols != NO_DEBUG)
5523 output_file_directive (asm_out_file, main_input_filename);
5524 if (want_version)
5525 aputs ("\t.version\t\"01.01\"\n");
5529 static inline void
5530 pa_file_start_mcount (const char *aswhat)
5532 if (profile_flag)
5533 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5536 static void
5537 pa_elf_file_start (void)
5539 pa_file_start_level ();
5540 pa_file_start_mcount ("ENTRY");
5541 pa_file_start_file (0);
5544 static void
5545 pa_som_file_start (void)
5547 pa_file_start_level ();
5548 pa_file_start_space (0);
5549 aputs ("\t.IMPORT $global$,DATA\n"
5550 "\t.IMPORT $$dyncall,MILLICODE\n");
5551 pa_file_start_mcount ("CODE");
5552 pa_file_start_file (0);
5555 static void
5556 pa_linux_file_start (void)
5558 pa_file_start_file (1);
5559 pa_file_start_level ();
5560 pa_file_start_mcount ("CODE");
5563 static void
5564 pa_hpux64_gas_file_start (void)
5566 pa_file_start_level ();
5567 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5568 if (profile_flag)
5569 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5570 #endif
5571 pa_file_start_file (1);
5574 static void
5575 pa_hpux64_hpas_file_start (void)
5577 pa_file_start_level ();
5578 pa_file_start_space (1);
5579 pa_file_start_mcount ("CODE");
5580 pa_file_start_file (0);
5582 #undef aputs
5584 /* Search the deferred plabel list for SYMBOL and return its internal
5585 label. If an entry for SYMBOL is not found, a new entry is created. */
5588 pa_get_deferred_plabel (rtx symbol)
5590 const char *fname = XSTR (symbol, 0);
5591 size_t i;
5593 /* See if we have already put this function on the list of deferred
5594 plabels. This list is generally small, so a liner search is not
5595 too ugly. If it proves too slow replace it with something faster. */
5596 for (i = 0; i < n_deferred_plabels; i++)
5597 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5598 break;
5600 /* If the deferred plabel list is empty, or this entry was not found
5601 on the list, create a new entry on the list. */
5602 if (deferred_plabels == NULL || i == n_deferred_plabels)
5604 tree id;
5606 if (deferred_plabels == 0)
5607 deferred_plabels = ggc_alloc_deferred_plabel ();
5608 else
5609 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5610 deferred_plabels,
5611 n_deferred_plabels + 1);
5613 i = n_deferred_plabels++;
5614 deferred_plabels[i].internal_label = gen_label_rtx ();
5615 deferred_plabels[i].symbol = symbol;
5617 /* Gross. We have just implicitly taken the address of this
5618 function. Mark it in the same manner as assemble_name. */
5619 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5620 if (id)
5621 mark_referenced (id);
5624 return deferred_plabels[i].internal_label;
5627 static void
5628 output_deferred_plabels (void)
5630 size_t i;
5632 /* If we have some deferred plabels, then we need to switch into the
5633 data or readonly data section, and align it to a 4 byte boundary
5634 before outputting the deferred plabels. */
5635 if (n_deferred_plabels)
5637 switch_to_section (flag_pic ? data_section : readonly_data_section);
5638 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5641 /* Now output the deferred plabels. */
5642 for (i = 0; i < n_deferred_plabels; i++)
5644 targetm.asm_out.internal_label (asm_out_file, "L",
5645 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5646 assemble_integer (deferred_plabels[i].symbol,
5647 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5651 /* Initialize optabs to point to emulation routines. */
5653 static void
5654 pa_init_libfuncs (void)
5656 if (HPUX_LONG_DOUBLE_LIBRARY)
5658 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5659 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5660 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5661 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5662 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5663 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5664 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5665 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5666 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5668 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5669 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5670 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5671 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5672 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5673 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5674 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5676 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5677 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5678 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5679 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5681 set_conv_libfunc (sfix_optab, SImode, TFmode,
5682 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5683 : "_U_Qfcnvfxt_quad_to_sgl");
5684 set_conv_libfunc (sfix_optab, DImode, TFmode,
5685 "_U_Qfcnvfxt_quad_to_dbl");
5686 set_conv_libfunc (ufix_optab, SImode, TFmode,
5687 "_U_Qfcnvfxt_quad_to_usgl");
5688 set_conv_libfunc (ufix_optab, DImode, TFmode,
5689 "_U_Qfcnvfxt_quad_to_udbl");
5691 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5692 "_U_Qfcnvxf_sgl_to_quad");
5693 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5694 "_U_Qfcnvxf_dbl_to_quad");
5695 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5696 "_U_Qfcnvxf_usgl_to_quad");
5697 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5698 "_U_Qfcnvxf_udbl_to_quad");
5701 if (TARGET_SYNC_LIBCALL)
5702 init_sync_libfuncs (UNITS_PER_WORD);
5705 /* HP's millicode routines mean something special to the assembler.
5706 Keep track of which ones we have used. */
5708 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5709 static void import_milli (enum millicodes);
5710 static char imported[(int) end1000];
5711 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5712 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5713 #define MILLI_START 10
5715 static void
5716 import_milli (enum millicodes code)
5718 char str[sizeof (import_string)];
5720 if (!imported[(int) code])
5722 imported[(int) code] = 1;
5723 strcpy (str, import_string);
5724 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5725 output_asm_insn (str, 0);
5729 /* The register constraints have put the operands and return value in
5730 the proper registers. */
5732 const char *
5733 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5735 import_milli (mulI);
5736 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5739 /* Emit the rtl for doing a division by a constant. */
5741 /* Do magic division millicodes exist for this value? */
5742 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5744 /* We'll use an array to keep track of the magic millicodes and
5745 whether or not we've used them already. [n][0] is signed, [n][1] is
5746 unsigned. */
5748 static int div_milli[16][2];
5751 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5753 if (GET_CODE (operands[2]) == CONST_INT
5754 && INTVAL (operands[2]) > 0
5755 && INTVAL (operands[2]) < 16
5756 && pa_magic_milli[INTVAL (operands[2])])
5758 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5760 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5761 emit
5762 (gen_rtx_PARALLEL
5763 (VOIDmode,
5764 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5765 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5766 SImode,
5767 gen_rtx_REG (SImode, 26),
5768 operands[2])),
5769 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5770 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5771 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5772 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5773 gen_rtx_CLOBBER (VOIDmode, ret))));
5774 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5775 return 1;
5777 return 0;
5780 const char *
5781 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5783 int divisor;
5785 /* If the divisor is a constant, try to use one of the special
5786 opcodes .*/
5787 if (GET_CODE (operands[0]) == CONST_INT)
5789 static char buf[100];
5790 divisor = INTVAL (operands[0]);
5791 if (!div_milli[divisor][unsignedp])
5793 div_milli[divisor][unsignedp] = 1;
5794 if (unsignedp)
5795 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5796 else
5797 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5799 if (unsignedp)
5801 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5802 INTVAL (operands[0]));
5803 return pa_output_millicode_call (insn,
5804 gen_rtx_SYMBOL_REF (SImode, buf));
5806 else
5808 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5809 INTVAL (operands[0]));
5810 return pa_output_millicode_call (insn,
5811 gen_rtx_SYMBOL_REF (SImode, buf));
5814 /* Divisor isn't a special constant. */
5815 else
5817 if (unsignedp)
5819 import_milli (divU);
5820 return pa_output_millicode_call (insn,
5821 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5823 else
5825 import_milli (divI);
5826 return pa_output_millicode_call (insn,
5827 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5832 /* Output a $$rem millicode to do mod. */
5834 const char *
5835 pa_output_mod_insn (int unsignedp, rtx insn)
5837 if (unsignedp)
5839 import_milli (remU);
5840 return pa_output_millicode_call (insn,
5841 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5843 else
5845 import_milli (remI);
5846 return pa_output_millicode_call (insn,
5847 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5851 void
5852 pa_output_arg_descriptor (rtx call_insn)
5854 const char *arg_regs[4];
5855 enum machine_mode arg_mode;
5856 rtx link;
5857 int i, output_flag = 0;
5858 int regno;
5860 /* We neither need nor want argument location descriptors for the
5861 64bit runtime environment or the ELF32 environment. */
5862 if (TARGET_64BIT || TARGET_ELF32)
5863 return;
5865 for (i = 0; i < 4; i++)
5866 arg_regs[i] = 0;
5868 /* Specify explicitly that no argument relocations should take place
5869 if using the portable runtime calling conventions. */
5870 if (TARGET_PORTABLE_RUNTIME)
5872 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5873 asm_out_file);
5874 return;
5877 gcc_assert (CALL_P (call_insn));
5878 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5879 link; link = XEXP (link, 1))
5881 rtx use = XEXP (link, 0);
5883 if (! (GET_CODE (use) == USE
5884 && GET_CODE (XEXP (use, 0)) == REG
5885 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5886 continue;
5888 arg_mode = GET_MODE (XEXP (use, 0));
5889 regno = REGNO (XEXP (use, 0));
5890 if (regno >= 23 && regno <= 26)
5892 arg_regs[26 - regno] = "GR";
5893 if (arg_mode == DImode)
5894 arg_regs[25 - regno] = "GR";
5896 else if (regno >= 32 && regno <= 39)
5898 if (arg_mode == SFmode)
5899 arg_regs[(regno - 32) / 2] = "FR";
5900 else
5902 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5903 arg_regs[(regno - 34) / 2] = "FR";
5904 arg_regs[(regno - 34) / 2 + 1] = "FU";
5905 #else
5906 arg_regs[(regno - 34) / 2] = "FU";
5907 arg_regs[(regno - 34) / 2 + 1] = "FR";
5908 #endif
5912 fputs ("\t.CALL ", asm_out_file);
5913 for (i = 0; i < 4; i++)
5915 if (arg_regs[i])
5917 if (output_flag++)
5918 fputc (',', asm_out_file);
5919 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5922 fputc ('\n', asm_out_file);
5925 /* Inform reload about cases where moving X with a mode MODE to or from
5926 a register in RCLASS requires an extra scratch or immediate register.
5927 Return the class needed for the immediate register. */
5929 static reg_class_t
5930 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5931 enum machine_mode mode, secondary_reload_info *sri)
5933 int regno;
5934 enum reg_class rclass = (enum reg_class) rclass_i;
5936 /* Handle the easy stuff first. */
5937 if (rclass == R1_REGS)
5938 return NO_REGS;
5940 if (REG_P (x))
5942 regno = REGNO (x);
5943 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5944 return NO_REGS;
5946 else
5947 regno = -1;
5949 /* If we have something like (mem (mem (...)), we can safely assume the
5950 inner MEM will end up in a general register after reloading, so there's
5951 no need for a secondary reload. */
5952 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5953 return NO_REGS;
5955 /* Trying to load a constant into a FP register during PIC code
5956 generation requires %r1 as a scratch register. For float modes,
5957 the only legitimate constant is CONST0_RTX. However, there are
5958 a few patterns that accept constant double operands. */
5959 if (flag_pic
5960 && FP_REG_CLASS_P (rclass)
5961 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5963 switch (mode)
5965 case SImode:
5966 sri->icode = CODE_FOR_reload_insi_r1;
5967 break;
5969 case DImode:
5970 sri->icode = CODE_FOR_reload_indi_r1;
5971 break;
5973 case SFmode:
5974 sri->icode = CODE_FOR_reload_insf_r1;
5975 break;
5977 case DFmode:
5978 sri->icode = CODE_FOR_reload_indf_r1;
5979 break;
5981 default:
5982 gcc_unreachable ();
5984 return NO_REGS;
5987 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5988 register when we're generating PIC code or when the operand isn't
5989 readonly. */
5990 if (pa_symbolic_expression_p (x))
5992 if (GET_CODE (x) == HIGH)
5993 x = XEXP (x, 0);
5995 if (flag_pic || !read_only_operand (x, VOIDmode))
5997 switch (mode)
5999 case SImode:
6000 sri->icode = CODE_FOR_reload_insi_r1;
6001 break;
6003 case DImode:
6004 sri->icode = CODE_FOR_reload_indi_r1;
6005 break;
6007 default:
6008 gcc_unreachable ();
6010 return NO_REGS;
6014 /* Profiling showed the PA port spends about 1.3% of its compilation
6015 time in true_regnum from calls inside pa_secondary_reload_class. */
6016 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6017 regno = true_regnum (x);
6019 /* Handle reloads for floating point loads and stores. */
6020 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6021 && FP_REG_CLASS_P (rclass))
6023 if (MEM_P (x))
6025 x = XEXP (x, 0);
6027 /* We don't need a secondary reload for indexed memory addresses.
6029 When INT14_OK_STRICT is true, it might appear that we could
6030 directly allow register indirect memory addresses. However,
6031 this doesn't work because we don't support SUBREGs in
6032 floating-point register copies and reload doesn't tell us
6033 when it's going to use a SUBREG. */
6034 if (IS_INDEX_ADDR_P (x))
6035 return NO_REGS;
6038 /* Request a secondary reload with a general scratch register
6039 for everything else. ??? Could symbolic operands be handled
6040 directly when generating non-pic PA 2.0 code? */
6041 sri->icode = (in_p
6042 ? direct_optab_handler (reload_in_optab, mode)
6043 : direct_optab_handler (reload_out_optab, mode));
6044 return NO_REGS;
6047 /* A SAR<->FP register copy requires an intermediate general register
6048 and secondary memory. We need a secondary reload with a general
6049 scratch register for spills. */
6050 if (rclass == SHIFT_REGS)
6052 /* Handle spill. */
6053 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6055 sri->icode = (in_p
6056 ? direct_optab_handler (reload_in_optab, mode)
6057 : direct_optab_handler (reload_out_optab, mode));
6058 return NO_REGS;
6061 /* Handle FP copy. */
6062 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6063 return GENERAL_REGS;
6066 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6067 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6068 && FP_REG_CLASS_P (rclass))
6069 return GENERAL_REGS;
6071 return NO_REGS;
6074 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6075 is only marked as live on entry by df-scan when it is a fixed
6076 register. It isn't a fixed register in the 64-bit runtime,
6077 so we need to mark it here. */
6079 static void
6080 pa_extra_live_on_entry (bitmap regs)
6082 if (TARGET_64BIT)
6083 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6086 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6087 to prevent it from being deleted. */
6090 pa_eh_return_handler_rtx (void)
6092 rtx tmp;
6094 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6095 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6096 tmp = gen_rtx_MEM (word_mode, tmp);
6097 tmp->volatil = 1;
6098 return tmp;
6101 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6102 by invisible reference. As a GCC extension, we also pass anything
6103 with a zero or variable size by reference.
6105 The 64-bit runtime does not describe passing any types by invisible
6106 reference. The internals of GCC can't currently handle passing
6107 empty structures, and zero or variable length arrays when they are
6108 not passed entirely on the stack or by reference. Thus, as a GCC
6109 extension, we pass these types by reference. The HP compiler doesn't
6110 support these types, so hopefully there shouldn't be any compatibility
6111 issues. This may have to be revisited when HP releases a C99 compiler
6112 or updates the ABI. */
6114 static bool
6115 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6116 enum machine_mode mode, const_tree type,
6117 bool named ATTRIBUTE_UNUSED)
6119 HOST_WIDE_INT size;
6121 if (type)
6122 size = int_size_in_bytes (type);
6123 else
6124 size = GET_MODE_SIZE (mode);
6126 if (TARGET_64BIT)
6127 return size <= 0;
6128 else
6129 return size <= 0 || size > 8;
6132 enum direction
6133 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6135 if (mode == BLKmode
6136 || (TARGET_64BIT
6137 && type
6138 && (AGGREGATE_TYPE_P (type)
6139 || TREE_CODE (type) == COMPLEX_TYPE
6140 || TREE_CODE (type) == VECTOR_TYPE)))
6142 /* Return none if justification is not required. */
6143 if (type
6144 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6145 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6146 return none;
6148 /* The directions set here are ignored when a BLKmode argument larger
6149 than a word is placed in a register. Different code is used for
6150 the stack and registers. This makes it difficult to have a
6151 consistent data representation for both the stack and registers.
6152 For both runtimes, the justification and padding for arguments on
6153 the stack and in registers should be identical. */
6154 if (TARGET_64BIT)
6155 /* The 64-bit runtime specifies left justification for aggregates. */
6156 return upward;
6157 else
6158 /* The 32-bit runtime architecture specifies right justification.
6159 When the argument is passed on the stack, the argument is padded
6160 with garbage on the left. The HP compiler pads with zeros. */
6161 return downward;
6164 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6165 return downward;
6166 else
6167 return none;
6171 /* Do what is necessary for `va_start'. We look at the current function
6172 to determine if stdargs or varargs is used and fill in an initial
6173 va_list. A pointer to this constructor is returned. */
6175 static rtx
6176 hppa_builtin_saveregs (void)
6178 rtx offset, dest;
6179 tree fntype = TREE_TYPE (current_function_decl);
6180 int argadj = ((!stdarg_p (fntype))
6181 ? UNITS_PER_WORD : 0);
6183 if (argadj)
6184 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6185 else
6186 offset = crtl->args.arg_offset_rtx;
6188 if (TARGET_64BIT)
6190 int i, off;
6192 /* Adjust for varargs/stdarg differences. */
6193 if (argadj)
6194 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6195 else
6196 offset = crtl->args.arg_offset_rtx;
6198 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6199 from the incoming arg pointer and growing to larger addresses. */
6200 for (i = 26, off = -64; i >= 19; i--, off += 8)
6201 emit_move_insn (gen_rtx_MEM (word_mode,
6202 plus_constant (Pmode,
6203 arg_pointer_rtx, off)),
6204 gen_rtx_REG (word_mode, i));
6206 /* The incoming args pointer points just beyond the flushback area;
6207 normally this is not a serious concern. However, when we are doing
6208 varargs/stdargs we want to make the arg pointer point to the start
6209 of the incoming argument area. */
6210 emit_move_insn (virtual_incoming_args_rtx,
6211 plus_constant (Pmode, arg_pointer_rtx, -64));
6213 /* Now return a pointer to the first anonymous argument. */
6214 return copy_to_reg (expand_binop (Pmode, add_optab,
6215 virtual_incoming_args_rtx,
6216 offset, 0, 0, OPTAB_LIB_WIDEN));
6219 /* Store general registers on the stack. */
6220 dest = gen_rtx_MEM (BLKmode,
6221 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6222 -16));
6223 set_mem_alias_set (dest, get_varargs_alias_set ());
6224 set_mem_align (dest, BITS_PER_WORD);
6225 move_block_from_reg (23, dest, 4);
6227 /* move_block_from_reg will emit code to store the argument registers
6228 individually as scalar stores.
6230 However, other insns may later load from the same addresses for
6231 a structure load (passing a struct to a varargs routine).
6233 The alias code assumes that such aliasing can never happen, so we
6234 have to keep memory referencing insns from moving up beyond the
6235 last argument register store. So we emit a blockage insn here. */
6236 emit_insn (gen_blockage ());
6238 return copy_to_reg (expand_binop (Pmode, add_optab,
6239 crtl->args.internal_arg_pointer,
6240 offset, 0, 0, OPTAB_LIB_WIDEN));
6243 static void
6244 hppa_va_start (tree valist, rtx nextarg)
6246 nextarg = expand_builtin_saveregs ();
6247 std_expand_builtin_va_start (valist, nextarg);
6250 static tree
6251 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6252 gimple_seq *post_p)
6254 if (TARGET_64BIT)
6256 /* Args grow upward. We can use the generic routines. */
6257 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6259 else /* !TARGET_64BIT */
6261 tree ptr = build_pointer_type (type);
6262 tree valist_type;
6263 tree t, u;
6264 unsigned int size, ofs;
6265 bool indirect;
6267 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6268 if (indirect)
6270 type = ptr;
6271 ptr = build_pointer_type (type);
6273 size = int_size_in_bytes (type);
6274 valist_type = TREE_TYPE (valist);
6276 /* Args grow down. Not handled by generic routines. */
6278 u = fold_convert (sizetype, size_in_bytes (type));
6279 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6280 t = fold_build_pointer_plus (valist, u);
6282 /* Align to 4 or 8 byte boundary depending on argument size. */
6284 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6285 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6286 t = fold_convert (valist_type, t);
6288 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6290 ofs = (8 - size) % 4;
6291 if (ofs != 0)
6292 t = fold_build_pointer_plus_hwi (t, ofs);
6294 t = fold_convert (ptr, t);
6295 t = build_va_arg_indirect_ref (t);
6297 if (indirect)
6298 t = build_va_arg_indirect_ref (t);
6300 return t;
6304 /* True if MODE is valid for the target. By "valid", we mean able to
6305 be manipulated in non-trivial ways. In particular, this means all
6306 the arithmetic is supported.
6308 Currently, TImode is not valid as the HP 64-bit runtime documentation
6309 doesn't document the alignment and calling conventions for this type.
6310 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6311 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6313 static bool
6314 pa_scalar_mode_supported_p (enum machine_mode mode)
6316 int precision = GET_MODE_PRECISION (mode);
6318 switch (GET_MODE_CLASS (mode))
6320 case MODE_PARTIAL_INT:
6321 case MODE_INT:
6322 if (precision == CHAR_TYPE_SIZE)
6323 return true;
6324 if (precision == SHORT_TYPE_SIZE)
6325 return true;
6326 if (precision == INT_TYPE_SIZE)
6327 return true;
6328 if (precision == LONG_TYPE_SIZE)
6329 return true;
6330 if (precision == LONG_LONG_TYPE_SIZE)
6331 return true;
6332 return false;
6334 case MODE_FLOAT:
6335 if (precision == FLOAT_TYPE_SIZE)
6336 return true;
6337 if (precision == DOUBLE_TYPE_SIZE)
6338 return true;
6339 if (precision == LONG_DOUBLE_TYPE_SIZE)
6340 return true;
6341 return false;
6343 case MODE_DECIMAL_FLOAT:
6344 return false;
6346 default:
6347 gcc_unreachable ();
6351 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6352 it branches into the delay slot. Otherwise, return FALSE. */
6354 static bool
6355 branch_to_delay_slot_p (rtx insn)
6357 rtx jump_insn;
6359 if (dbr_sequence_length ())
6360 return FALSE;
6362 jump_insn = next_active_insn (JUMP_LABEL (insn));
6363 while (insn)
6365 insn = next_active_insn (insn);
6366 if (jump_insn == insn)
6367 return TRUE;
6369 /* We can't rely on the length of asms. So, we return FALSE when
6370 the branch is followed by an asm. */
6371 if (!insn
6372 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6373 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6374 || get_attr_length (insn) > 0)
6375 break;
6378 return FALSE;
6381 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6383 This occurs when INSN has an unfilled delay slot and is followed
6384 by an asm. Disaster can occur if the asm is empty and the jump
6385 branches into the delay slot. So, we add a nop in the delay slot
6386 when this occurs. */
6388 static bool
6389 branch_needs_nop_p (rtx insn)
6391 rtx jump_insn;
6393 if (dbr_sequence_length ())
6394 return FALSE;
6396 jump_insn = next_active_insn (JUMP_LABEL (insn));
6397 while (insn)
6399 insn = next_active_insn (insn);
6400 if (!insn || jump_insn == insn)
6401 return TRUE;
6403 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6404 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6405 && get_attr_length (insn) > 0)
6406 break;
6409 return FALSE;
6412 /* Return TRUE if INSN, a forward jump insn, can use nullification
6413 to skip the following instruction. This avoids an extra cycle due
6414 to a mis-predicted branch when we fall through. */
6416 static bool
6417 use_skip_p (rtx insn)
6419 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6421 while (insn)
6423 insn = next_active_insn (insn);
6425 /* We can't rely on the length of asms, so we can't skip asms. */
6426 if (!insn
6427 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6428 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6429 break;
6430 if (get_attr_length (insn) == 4
6431 && jump_insn == next_active_insn (insn))
6432 return TRUE;
6433 if (get_attr_length (insn) > 0)
6434 break;
6437 return FALSE;
6440 /* This routine handles all the normal conditional branch sequences we
6441 might need to generate. It handles compare immediate vs compare
6442 register, nullification of delay slots, varying length branches,
6443 negated branches, and all combinations of the above. It returns the
6444 output appropriate to emit the branch corresponding to all given
6445 parameters. */
6447 const char *
6448 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6450 static char buf[100];
6451 bool useskip;
6452 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6453 int length = get_attr_length (insn);
6454 int xdelay;
6456 /* A conditional branch to the following instruction (e.g. the delay slot)
6457 is asking for a disaster. This can happen when not optimizing and
6458 when jump optimization fails.
6460 While it is usually safe to emit nothing, this can fail if the
6461 preceding instruction is a nullified branch with an empty delay
6462 slot and the same branch target as this branch. We could check
6463 for this but jump optimization should eliminate nop jumps. It
6464 is always safe to emit a nop. */
6465 if (branch_to_delay_slot_p (insn))
6466 return "nop";
6468 /* The doubleword form of the cmpib instruction doesn't have the LEU
6469 and GTU conditions while the cmpb instruction does. Since we accept
6470 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6471 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6472 operands[2] = gen_rtx_REG (DImode, 0);
6473 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6474 operands[1] = gen_rtx_REG (DImode, 0);
6476 /* If this is a long branch with its delay slot unfilled, set `nullify'
6477 as it can nullify the delay slot and save a nop. */
6478 if (length == 8 && dbr_sequence_length () == 0)
6479 nullify = 1;
6481 /* If this is a short forward conditional branch which did not get
6482 its delay slot filled, the delay slot can still be nullified. */
6483 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6484 nullify = forward_branch_p (insn);
6486 /* A forward branch over a single nullified insn can be done with a
6487 comclr instruction. This avoids a single cycle penalty due to
6488 mis-predicted branch if we fall through (branch not taken). */
6489 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6491 switch (length)
6493 /* All short conditional branches except backwards with an unfilled
6494 delay slot. */
6495 case 4:
6496 if (useskip)
6497 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6498 else
6499 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6500 if (GET_MODE (operands[1]) == DImode)
6501 strcat (buf, "*");
6502 if (negated)
6503 strcat (buf, "%B3");
6504 else
6505 strcat (buf, "%S3");
6506 if (useskip)
6507 strcat (buf, " %2,%r1,%%r0");
6508 else if (nullify)
6510 if (branch_needs_nop_p (insn))
6511 strcat (buf, ",n %2,%r1,%0%#");
6512 else
6513 strcat (buf, ",n %2,%r1,%0");
6515 else
6516 strcat (buf, " %2,%r1,%0");
6517 break;
6519 /* All long conditionals. Note a short backward branch with an
6520 unfilled delay slot is treated just like a long backward branch
6521 with an unfilled delay slot. */
6522 case 8:
6523 /* Handle weird backwards branch with a filled delay slot
6524 which is nullified. */
6525 if (dbr_sequence_length () != 0
6526 && ! forward_branch_p (insn)
6527 && nullify)
6529 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6530 if (GET_MODE (operands[1]) == DImode)
6531 strcat (buf, "*");
6532 if (negated)
6533 strcat (buf, "%S3");
6534 else
6535 strcat (buf, "%B3");
6536 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6538 /* Handle short backwards branch with an unfilled delay slot.
6539 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6540 taken and untaken branches. */
6541 else if (dbr_sequence_length () == 0
6542 && ! forward_branch_p (insn)
6543 && INSN_ADDRESSES_SET_P ()
6544 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6545 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6547 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6548 if (GET_MODE (operands[1]) == DImode)
6549 strcat (buf, "*");
6550 if (negated)
6551 strcat (buf, "%B3 %2,%r1,%0%#");
6552 else
6553 strcat (buf, "%S3 %2,%r1,%0%#");
6555 else
6557 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6558 if (GET_MODE (operands[1]) == DImode)
6559 strcat (buf, "*");
6560 if (negated)
6561 strcat (buf, "%S3");
6562 else
6563 strcat (buf, "%B3");
6564 if (nullify)
6565 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6566 else
6567 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6569 break;
6571 default:
6572 /* The reversed conditional branch must branch over one additional
6573 instruction if the delay slot is filled and needs to be extracted
6574 by pa_output_lbranch. If the delay slot is empty or this is a
6575 nullified forward branch, the instruction after the reversed
6576 condition branch must be nullified. */
6577 if (dbr_sequence_length () == 0
6578 || (nullify && forward_branch_p (insn)))
6580 nullify = 1;
6581 xdelay = 0;
6582 operands[4] = GEN_INT (length);
6584 else
6586 xdelay = 1;
6587 operands[4] = GEN_INT (length + 4);
6590 /* Create a reversed conditional branch which branches around
6591 the following insns. */
6592 if (GET_MODE (operands[1]) != DImode)
6594 if (nullify)
6596 if (negated)
6597 strcpy (buf,
6598 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6599 else
6600 strcpy (buf,
6601 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6603 else
6605 if (negated)
6606 strcpy (buf,
6607 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6608 else
6609 strcpy (buf,
6610 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6613 else
6615 if (nullify)
6617 if (negated)
6618 strcpy (buf,
6619 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6620 else
6621 strcpy (buf,
6622 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6624 else
6626 if (negated)
6627 strcpy (buf,
6628 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6629 else
6630 strcpy (buf,
6631 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6635 output_asm_insn (buf, operands);
6636 return pa_output_lbranch (operands[0], insn, xdelay);
6638 return buf;
6641 /* This routine handles output of long unconditional branches that
6642 exceed the maximum range of a simple branch instruction. Since
6643 we don't have a register available for the branch, we save register
6644 %r1 in the frame marker, load the branch destination DEST into %r1,
6645 execute the branch, and restore %r1 in the delay slot of the branch.
6647 Since long branches may have an insn in the delay slot and the
6648 delay slot is used to restore %r1, we in general need to extract
6649 this insn and execute it before the branch. However, to facilitate
6650 use of this function by conditional branches, we also provide an
6651 option to not extract the delay insn so that it will be emitted
6652 after the long branch. So, if there is an insn in the delay slot,
6653 it is extracted if XDELAY is nonzero.
6655 The lengths of the various long-branch sequences are 20, 16 and 24
6656 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6658 const char *
6659 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6661 rtx xoperands[2];
6663 xoperands[0] = dest;
6665 /* First, free up the delay slot. */
6666 if (xdelay && dbr_sequence_length () != 0)
6668 /* We can't handle a jump in the delay slot. */
6669 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6671 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6672 optimize, 0, NULL);
6674 /* Now delete the delay insn. */
6675 SET_INSN_DELETED (NEXT_INSN (insn));
6678 /* Output an insn to save %r1. The runtime documentation doesn't
6679 specify whether the "Clean Up" slot in the callers frame can
6680 be clobbered by the callee. It isn't copied by HP's builtin
6681 alloca, so this suggests that it can be clobbered if necessary.
6682 The "Static Link" location is copied by HP builtin alloca, so
6683 we avoid using it. Using the cleanup slot might be a problem
6684 if we have to interoperate with languages that pass cleanup
6685 information. However, it should be possible to handle these
6686 situations with GCC's asm feature.
6688 The "Current RP" slot is reserved for the called procedure, so
6689 we try to use it when we don't have a frame of our own. It's
6690 rather unlikely that we won't have a frame when we need to emit
6691 a very long branch.
6693 Really the way to go long term is a register scavenger; goto
6694 the target of the jump and find a register which we can use
6695 as a scratch to hold the value in %r1. Then, we wouldn't have
6696 to free up the delay slot or clobber a slot that may be needed
6697 for other purposes. */
6698 if (TARGET_64BIT)
6700 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6701 /* Use the return pointer slot in the frame marker. */
6702 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6703 else
6704 /* Use the slot at -40 in the frame marker since HP builtin
6705 alloca doesn't copy it. */
6706 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6708 else
6710 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6711 /* Use the return pointer slot in the frame marker. */
6712 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6713 else
6714 /* Use the "Clean Up" slot in the frame marker. In GCC,
6715 the only other use of this location is for copying a
6716 floating point double argument from a floating-point
6717 register to two general registers. The copy is done
6718 as an "atomic" operation when outputting a call, so it
6719 won't interfere with our using the location here. */
6720 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6723 if (TARGET_PORTABLE_RUNTIME)
6725 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6726 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6727 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6729 else if (flag_pic)
6731 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6732 if (TARGET_SOM || !TARGET_GAS)
6734 xoperands[1] = gen_label_rtx ();
6735 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6736 targetm.asm_out.internal_label (asm_out_file, "L",
6737 CODE_LABEL_NUMBER (xoperands[1]));
6738 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6740 else
6742 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6743 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6745 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6747 else
6748 /* Now output a very long branch to the original target. */
6749 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6751 /* Now restore the value of %r1 in the delay slot. */
6752 if (TARGET_64BIT)
6754 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6755 return "ldd -16(%%r30),%%r1";
6756 else
6757 return "ldd -40(%%r30),%%r1";
6759 else
6761 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6762 return "ldw -20(%%r30),%%r1";
6763 else
6764 return "ldw -12(%%r30),%%r1";
6768 /* This routine handles all the branch-on-bit conditional branch sequences we
6769 might need to generate. It handles nullification of delay slots,
6770 varying length branches, negated branches and all combinations of the
6771 above. it returns the appropriate output template to emit the branch. */
6773 const char *
6774 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6776 static char buf[100];
6777 bool useskip;
6778 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6779 int length = get_attr_length (insn);
6780 int xdelay;
6782 /* A conditional branch to the following instruction (e.g. the delay slot) is
6783 asking for a disaster. I do not think this can happen as this pattern
6784 is only used when optimizing; jump optimization should eliminate the
6785 jump. But be prepared just in case. */
6787 if (branch_to_delay_slot_p (insn))
6788 return "nop";
6790 /* If this is a long branch with its delay slot unfilled, set `nullify'
6791 as it can nullify the delay slot and save a nop. */
6792 if (length == 8 && dbr_sequence_length () == 0)
6793 nullify = 1;
6795 /* If this is a short forward conditional branch which did not get
6796 its delay slot filled, the delay slot can still be nullified. */
6797 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6798 nullify = forward_branch_p (insn);
6800 /* A forward branch over a single nullified insn can be done with a
6801 extrs instruction. This avoids a single cycle penalty due to
6802 mis-predicted branch if we fall through (branch not taken). */
6803 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6805 switch (length)
6808 /* All short conditional branches except backwards with an unfilled
6809 delay slot. */
6810 case 4:
6811 if (useskip)
6812 strcpy (buf, "{extrs,|extrw,s,}");
6813 else
6814 strcpy (buf, "bb,");
6815 if (useskip && GET_MODE (operands[0]) == DImode)
6816 strcpy (buf, "extrd,s,*");
6817 else if (GET_MODE (operands[0]) == DImode)
6818 strcpy (buf, "bb,*");
6819 if ((which == 0 && negated)
6820 || (which == 1 && ! negated))
6821 strcat (buf, ">=");
6822 else
6823 strcat (buf, "<");
6824 if (useskip)
6825 strcat (buf, " %0,%1,1,%%r0");
6826 else if (nullify && negated)
6828 if (branch_needs_nop_p (insn))
6829 strcat (buf, ",n %0,%1,%3%#");
6830 else
6831 strcat (buf, ",n %0,%1,%3");
6833 else if (nullify && ! negated)
6835 if (branch_needs_nop_p (insn))
6836 strcat (buf, ",n %0,%1,%2%#");
6837 else
6838 strcat (buf, ",n %0,%1,%2");
6840 else if (! nullify && negated)
6841 strcat (buf, " %0,%1,%3");
6842 else if (! nullify && ! negated)
6843 strcat (buf, " %0,%1,%2");
6844 break;
6846 /* All long conditionals. Note a short backward branch with an
6847 unfilled delay slot is treated just like a long backward branch
6848 with an unfilled delay slot. */
6849 case 8:
6850 /* Handle weird backwards branch with a filled delay slot
6851 which is nullified. */
6852 if (dbr_sequence_length () != 0
6853 && ! forward_branch_p (insn)
6854 && nullify)
6856 strcpy (buf, "bb,");
6857 if (GET_MODE (operands[0]) == DImode)
6858 strcat (buf, "*");
6859 if ((which == 0 && negated)
6860 || (which == 1 && ! negated))
6861 strcat (buf, "<");
6862 else
6863 strcat (buf, ">=");
6864 if (negated)
6865 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6866 else
6867 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6869 /* Handle short backwards branch with an unfilled delay slot.
6870 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6871 taken and untaken branches. */
6872 else if (dbr_sequence_length () == 0
6873 && ! forward_branch_p (insn)
6874 && INSN_ADDRESSES_SET_P ()
6875 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6876 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6878 strcpy (buf, "bb,");
6879 if (GET_MODE (operands[0]) == DImode)
6880 strcat (buf, "*");
6881 if ((which == 0 && negated)
6882 || (which == 1 && ! negated))
6883 strcat (buf, ">=");
6884 else
6885 strcat (buf, "<");
6886 if (negated)
6887 strcat (buf, " %0,%1,%3%#");
6888 else
6889 strcat (buf, " %0,%1,%2%#");
6891 else
6893 if (GET_MODE (operands[0]) == DImode)
6894 strcpy (buf, "extrd,s,*");
6895 else
6896 strcpy (buf, "{extrs,|extrw,s,}");
6897 if ((which == 0 && negated)
6898 || (which == 1 && ! negated))
6899 strcat (buf, "<");
6900 else
6901 strcat (buf, ">=");
6902 if (nullify && negated)
6903 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6904 else if (nullify && ! negated)
6905 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6906 else if (negated)
6907 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6908 else
6909 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6911 break;
6913 default:
6914 /* The reversed conditional branch must branch over one additional
6915 instruction if the delay slot is filled and needs to be extracted
6916 by pa_output_lbranch. If the delay slot is empty or this is a
6917 nullified forward branch, the instruction after the reversed
6918 condition branch must be nullified. */
6919 if (dbr_sequence_length () == 0
6920 || (nullify && forward_branch_p (insn)))
6922 nullify = 1;
6923 xdelay = 0;
6924 operands[4] = GEN_INT (length);
6926 else
6928 xdelay = 1;
6929 operands[4] = GEN_INT (length + 4);
6932 if (GET_MODE (operands[0]) == DImode)
6933 strcpy (buf, "bb,*");
6934 else
6935 strcpy (buf, "bb,");
6936 if ((which == 0 && negated)
6937 || (which == 1 && !negated))
6938 strcat (buf, "<");
6939 else
6940 strcat (buf, ">=");
6941 if (nullify)
6942 strcat (buf, ",n %0,%1,.+%4");
6943 else
6944 strcat (buf, " %0,%1,.+%4");
6945 output_asm_insn (buf, operands);
6946 return pa_output_lbranch (negated ? operands[3] : operands[2],
6947 insn, xdelay);
6949 return buf;
6952 /* This routine handles all the branch-on-variable-bit conditional branch
6953 sequences we might need to generate. It handles nullification of delay
6954 slots, varying length branches, negated branches and all combinations
6955 of the above. it returns the appropriate output template to emit the
6956 branch. */
6958 const char *
6959 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6960 int which)
6962 static char buf[100];
6963 bool useskip;
6964 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6965 int length = get_attr_length (insn);
6966 int xdelay;
6968 /* A conditional branch to the following instruction (e.g. the delay slot) is
6969 asking for a disaster. I do not think this can happen as this pattern
6970 is only used when optimizing; jump optimization should eliminate the
6971 jump. But be prepared just in case. */
6973 if (branch_to_delay_slot_p (insn))
6974 return "nop";
6976 /* If this is a long branch with its delay slot unfilled, set `nullify'
6977 as it can nullify the delay slot and save a nop. */
6978 if (length == 8 && dbr_sequence_length () == 0)
6979 nullify = 1;
6981 /* If this is a short forward conditional branch which did not get
6982 its delay slot filled, the delay slot can still be nullified. */
6983 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6984 nullify = forward_branch_p (insn);
6986 /* A forward branch over a single nullified insn can be done with a
6987 extrs instruction. This avoids a single cycle penalty due to
6988 mis-predicted branch if we fall through (branch not taken). */
6989 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6991 switch (length)
6994 /* All short conditional branches except backwards with an unfilled
6995 delay slot. */
6996 case 4:
6997 if (useskip)
6998 strcpy (buf, "{vextrs,|extrw,s,}");
6999 else
7000 strcpy (buf, "{bvb,|bb,}");
7001 if (useskip && GET_MODE (operands[0]) == DImode)
7002 strcpy (buf, "extrd,s,*");
7003 else if (GET_MODE (operands[0]) == DImode)
7004 strcpy (buf, "bb,*");
7005 if ((which == 0 && negated)
7006 || (which == 1 && ! negated))
7007 strcat (buf, ">=");
7008 else
7009 strcat (buf, "<");
7010 if (useskip)
7011 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7012 else if (nullify && negated)
7014 if (branch_needs_nop_p (insn))
7015 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7016 else
7017 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7019 else if (nullify && ! negated)
7021 if (branch_needs_nop_p (insn))
7022 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7023 else
7024 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7026 else if (! nullify && negated)
7027 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7028 else if (! nullify && ! negated)
7029 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7030 break;
7032 /* All long conditionals. Note a short backward branch with an
7033 unfilled delay slot is treated just like a long backward branch
7034 with an unfilled delay slot. */
7035 case 8:
7036 /* Handle weird backwards branch with a filled delay slot
7037 which is nullified. */
7038 if (dbr_sequence_length () != 0
7039 && ! forward_branch_p (insn)
7040 && nullify)
7042 strcpy (buf, "{bvb,|bb,}");
7043 if (GET_MODE (operands[0]) == DImode)
7044 strcat (buf, "*");
7045 if ((which == 0 && negated)
7046 || (which == 1 && ! negated))
7047 strcat (buf, "<");
7048 else
7049 strcat (buf, ">=");
7050 if (negated)
7051 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7052 else
7053 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7055 /* Handle short backwards branch with an unfilled delay slot.
7056 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7057 taken and untaken branches. */
7058 else if (dbr_sequence_length () == 0
7059 && ! forward_branch_p (insn)
7060 && INSN_ADDRESSES_SET_P ()
7061 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7062 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7064 strcpy (buf, "{bvb,|bb,}");
7065 if (GET_MODE (operands[0]) == DImode)
7066 strcat (buf, "*");
7067 if ((which == 0 && negated)
7068 || (which == 1 && ! negated))
7069 strcat (buf, ">=");
7070 else
7071 strcat (buf, "<");
7072 if (negated)
7073 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7074 else
7075 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7077 else
7079 strcpy (buf, "{vextrs,|extrw,s,}");
7080 if (GET_MODE (operands[0]) == DImode)
7081 strcpy (buf, "extrd,s,*");
7082 if ((which == 0 && negated)
7083 || (which == 1 && ! negated))
7084 strcat (buf, "<");
7085 else
7086 strcat (buf, ">=");
7087 if (nullify && negated)
7088 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7089 else if (nullify && ! negated)
7090 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7091 else if (negated)
7092 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7093 else
7094 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7096 break;
7098 default:
7099 /* The reversed conditional branch must branch over one additional
7100 instruction if the delay slot is filled and needs to be extracted
7101 by pa_output_lbranch. If the delay slot is empty or this is a
7102 nullified forward branch, the instruction after the reversed
7103 condition branch must be nullified. */
7104 if (dbr_sequence_length () == 0
7105 || (nullify && forward_branch_p (insn)))
7107 nullify = 1;
7108 xdelay = 0;
7109 operands[4] = GEN_INT (length);
7111 else
7113 xdelay = 1;
7114 operands[4] = GEN_INT (length + 4);
7117 if (GET_MODE (operands[0]) == DImode)
7118 strcpy (buf, "bb,*");
7119 else
7120 strcpy (buf, "{bvb,|bb,}");
7121 if ((which == 0 && negated)
7122 || (which == 1 && !negated))
7123 strcat (buf, "<");
7124 else
7125 strcat (buf, ">=");
7126 if (nullify)
7127 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7128 else
7129 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7130 output_asm_insn (buf, operands);
7131 return pa_output_lbranch (negated ? operands[3] : operands[2],
7132 insn, xdelay);
7134 return buf;
7137 /* Return the output template for emitting a dbra type insn.
7139 Note it may perform some output operations on its own before
7140 returning the final output string. */
7141 const char *
7142 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7144 int length = get_attr_length (insn);
7146 /* A conditional branch to the following instruction (e.g. the delay slot) is
7147 asking for a disaster. Be prepared! */
7149 if (branch_to_delay_slot_p (insn))
7151 if (which_alternative == 0)
7152 return "ldo %1(%0),%0";
7153 else if (which_alternative == 1)
7155 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7156 output_asm_insn ("ldw -16(%%r30),%4", operands);
7157 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7158 return "{fldws|fldw} -16(%%r30),%0";
7160 else
7162 output_asm_insn ("ldw %0,%4", operands);
7163 return "ldo %1(%4),%4\n\tstw %4,%0";
7167 if (which_alternative == 0)
7169 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7170 int xdelay;
7172 /* If this is a long branch with its delay slot unfilled, set `nullify'
7173 as it can nullify the delay slot and save a nop. */
7174 if (length == 8 && dbr_sequence_length () == 0)
7175 nullify = 1;
7177 /* If this is a short forward conditional branch which did not get
7178 its delay slot filled, the delay slot can still be nullified. */
7179 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7180 nullify = forward_branch_p (insn);
7182 switch (length)
7184 case 4:
7185 if (nullify)
7187 if (branch_needs_nop_p (insn))
7188 return "addib,%C2,n %1,%0,%3%#";
7189 else
7190 return "addib,%C2,n %1,%0,%3";
7192 else
7193 return "addib,%C2 %1,%0,%3";
7195 case 8:
7196 /* Handle weird backwards branch with a fulled delay slot
7197 which is nullified. */
7198 if (dbr_sequence_length () != 0
7199 && ! forward_branch_p (insn)
7200 && nullify)
7201 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7202 /* Handle short backwards branch with an unfilled delay slot.
7203 Using a addb;nop rather than addi;bl saves 1 cycle for both
7204 taken and untaken branches. */
7205 else if (dbr_sequence_length () == 0
7206 && ! forward_branch_p (insn)
7207 && INSN_ADDRESSES_SET_P ()
7208 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7209 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7210 return "addib,%C2 %1,%0,%3%#";
7212 /* Handle normal cases. */
7213 if (nullify)
7214 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7215 else
7216 return "addi,%N2 %1,%0,%0\n\tb %3";
7218 default:
7219 /* The reversed conditional branch must branch over one additional
7220 instruction if the delay slot is filled and needs to be extracted
7221 by pa_output_lbranch. If the delay slot is empty or this is a
7222 nullified forward branch, the instruction after the reversed
7223 condition branch must be nullified. */
7224 if (dbr_sequence_length () == 0
7225 || (nullify && forward_branch_p (insn)))
7227 nullify = 1;
7228 xdelay = 0;
7229 operands[4] = GEN_INT (length);
7231 else
7233 xdelay = 1;
7234 operands[4] = GEN_INT (length + 4);
7237 if (nullify)
7238 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7239 else
7240 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7242 return pa_output_lbranch (operands[3], insn, xdelay);
7246 /* Deal with gross reload from FP register case. */
7247 else if (which_alternative == 1)
7249 /* Move loop counter from FP register to MEM then into a GR,
7250 increment the GR, store the GR into MEM, and finally reload
7251 the FP register from MEM from within the branch's delay slot. */
7252 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7253 operands);
7254 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7255 if (length == 24)
7256 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7257 else if (length == 28)
7258 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7259 else
7261 operands[5] = GEN_INT (length - 16);
7262 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7263 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7264 return pa_output_lbranch (operands[3], insn, 0);
7267 /* Deal with gross reload from memory case. */
7268 else
7270 /* Reload loop counter from memory, the store back to memory
7271 happens in the branch's delay slot. */
7272 output_asm_insn ("ldw %0,%4", operands);
7273 if (length == 12)
7274 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7275 else if (length == 16)
7276 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7277 else
7279 operands[5] = GEN_INT (length - 4);
7280 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7281 return pa_output_lbranch (operands[3], insn, 0);
7286 /* Return the output template for emitting a movb type insn.
7288 Note it may perform some output operations on its own before
7289 returning the final output string. */
7290 const char *
7291 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7292 int reverse_comparison)
7294 int length = get_attr_length (insn);
7296 /* A conditional branch to the following instruction (e.g. the delay slot) is
7297 asking for a disaster. Be prepared! */
7299 if (branch_to_delay_slot_p (insn))
7301 if (which_alternative == 0)
7302 return "copy %1,%0";
7303 else if (which_alternative == 1)
7305 output_asm_insn ("stw %1,-16(%%r30)", operands);
7306 return "{fldws|fldw} -16(%%r30),%0";
7308 else if (which_alternative == 2)
7309 return "stw %1,%0";
7310 else
7311 return "mtsar %r1";
7314 /* Support the second variant. */
7315 if (reverse_comparison)
7316 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7318 if (which_alternative == 0)
7320 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7321 int xdelay;
7323 /* If this is a long branch with its delay slot unfilled, set `nullify'
7324 as it can nullify the delay slot and save a nop. */
7325 if (length == 8 && dbr_sequence_length () == 0)
7326 nullify = 1;
7328 /* If this is a short forward conditional branch which did not get
7329 its delay slot filled, the delay slot can still be nullified. */
7330 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7331 nullify = forward_branch_p (insn);
7333 switch (length)
7335 case 4:
7336 if (nullify)
7338 if (branch_needs_nop_p (insn))
7339 return "movb,%C2,n %1,%0,%3%#";
7340 else
7341 return "movb,%C2,n %1,%0,%3";
7343 else
7344 return "movb,%C2 %1,%0,%3";
7346 case 8:
7347 /* Handle weird backwards branch with a filled delay slot
7348 which is nullified. */
7349 if (dbr_sequence_length () != 0
7350 && ! forward_branch_p (insn)
7351 && nullify)
7352 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7354 /* Handle short backwards branch with an unfilled delay slot.
7355 Using a movb;nop rather than or;bl saves 1 cycle for both
7356 taken and untaken branches. */
7357 else if (dbr_sequence_length () == 0
7358 && ! forward_branch_p (insn)
7359 && INSN_ADDRESSES_SET_P ()
7360 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7361 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7362 return "movb,%C2 %1,%0,%3%#";
7363 /* Handle normal cases. */
7364 if (nullify)
7365 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7366 else
7367 return "or,%N2 %1,%%r0,%0\n\tb %3";
7369 default:
7370 /* The reversed conditional branch must branch over one additional
7371 instruction if the delay slot is filled and needs to be extracted
7372 by pa_output_lbranch. If the delay slot is empty or this is a
7373 nullified forward branch, the instruction after the reversed
7374 condition branch must be nullified. */
7375 if (dbr_sequence_length () == 0
7376 || (nullify && forward_branch_p (insn)))
7378 nullify = 1;
7379 xdelay = 0;
7380 operands[4] = GEN_INT (length);
7382 else
7384 xdelay = 1;
7385 operands[4] = GEN_INT (length + 4);
7388 if (nullify)
7389 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7390 else
7391 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7393 return pa_output_lbranch (operands[3], insn, xdelay);
7396 /* Deal with gross reload for FP destination register case. */
7397 else if (which_alternative == 1)
7399 /* Move source register to MEM, perform the branch test, then
7400 finally load the FP register from MEM from within the branch's
7401 delay slot. */
7402 output_asm_insn ("stw %1,-16(%%r30)", operands);
7403 if (length == 12)
7404 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7405 else if (length == 16)
7406 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7407 else
7409 operands[4] = GEN_INT (length - 4);
7410 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7411 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7412 return pa_output_lbranch (operands[3], insn, 0);
7415 /* Deal with gross reload from memory case. */
7416 else if (which_alternative == 2)
7418 /* Reload loop counter from memory, the store back to memory
7419 happens in the branch's delay slot. */
7420 if (length == 8)
7421 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7422 else if (length == 12)
7423 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7424 else
7426 operands[4] = GEN_INT (length);
7427 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7428 operands);
7429 return pa_output_lbranch (operands[3], insn, 0);
7432 /* Handle SAR as a destination. */
7433 else
7435 if (length == 8)
7436 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7437 else if (length == 12)
7438 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7439 else
7441 operands[4] = GEN_INT (length);
7442 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7443 operands);
7444 return pa_output_lbranch (operands[3], insn, 0);
7449 /* Copy any FP arguments in INSN into integer registers. */
7450 static void
7451 copy_fp_args (rtx insn)
7453 rtx link;
7454 rtx xoperands[2];
7456 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7458 int arg_mode, regno;
7459 rtx use = XEXP (link, 0);
7461 if (! (GET_CODE (use) == USE
7462 && GET_CODE (XEXP (use, 0)) == REG
7463 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7464 continue;
7466 arg_mode = GET_MODE (XEXP (use, 0));
7467 regno = REGNO (XEXP (use, 0));
7469 /* Is it a floating point register? */
7470 if (regno >= 32 && regno <= 39)
7472 /* Copy the FP register into an integer register via memory. */
7473 if (arg_mode == SFmode)
7475 xoperands[0] = XEXP (use, 0);
7476 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7477 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7478 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7480 else
7482 xoperands[0] = XEXP (use, 0);
7483 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7484 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7485 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7486 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7492 /* Compute length of the FP argument copy sequence for INSN. */
7493 static int
7494 length_fp_args (rtx insn)
7496 int length = 0;
7497 rtx link;
7499 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7501 int arg_mode, regno;
7502 rtx use = XEXP (link, 0);
7504 if (! (GET_CODE (use) == USE
7505 && GET_CODE (XEXP (use, 0)) == REG
7506 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7507 continue;
7509 arg_mode = GET_MODE (XEXP (use, 0));
7510 regno = REGNO (XEXP (use, 0));
7512 /* Is it a floating point register? */
7513 if (regno >= 32 && regno <= 39)
7515 if (arg_mode == SFmode)
7516 length += 8;
7517 else
7518 length += 12;
7522 return length;
7525 /* Return the attribute length for the millicode call instruction INSN.
7526 The length must match the code generated by pa_output_millicode_call.
7527 We include the delay slot in the returned length as it is better to
7528 over estimate the length than to under estimate it. */
7531 pa_attr_length_millicode_call (rtx insn)
7533 unsigned long distance = -1;
7534 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7536 if (INSN_ADDRESSES_SET_P ())
7538 distance = (total + insn_current_reference_address (insn));
7539 if (distance < total)
7540 distance = -1;
7543 if (TARGET_64BIT)
7545 if (!TARGET_LONG_CALLS && distance < 7600000)
7546 return 8;
7548 return 20;
7550 else if (TARGET_PORTABLE_RUNTIME)
7551 return 24;
7552 else
7554 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7555 return 8;
7557 if (!flag_pic)
7558 return 12;
7560 return 24;
7564 /* INSN is a function call. It may have an unconditional jump
7565 in its delay slot.
7567 CALL_DEST is the routine we are calling. */
7569 const char *
7570 pa_output_millicode_call (rtx insn, rtx call_dest)
7572 int attr_length = get_attr_length (insn);
7573 int seq_length = dbr_sequence_length ();
7574 int distance;
7575 rtx seq_insn;
7576 rtx xoperands[3];
7578 xoperands[0] = call_dest;
7579 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7581 /* Handle the common case where we are sure that the branch will
7582 reach the beginning of the $CODE$ subspace. The within reach
7583 form of the $$sh_func_adrs call has a length of 28. Because it
7584 has an attribute type of sh_func_adrs, it never has a nonzero
7585 sequence length (i.e., the delay slot is never filled). */
7586 if (!TARGET_LONG_CALLS
7587 && (attr_length == 8
7588 || (attr_length == 28
7589 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7591 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7593 else
7595 if (TARGET_64BIT)
7597 /* It might seem that one insn could be saved by accessing
7598 the millicode function using the linkage table. However,
7599 this doesn't work in shared libraries and other dynamically
7600 loaded objects. Using a pc-relative sequence also avoids
7601 problems related to the implicit use of the gp register. */
7602 output_asm_insn ("b,l .+8,%%r1", xoperands);
7604 if (TARGET_GAS)
7606 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7607 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7609 else
7611 xoperands[1] = gen_label_rtx ();
7612 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7613 targetm.asm_out.internal_label (asm_out_file, "L",
7614 CODE_LABEL_NUMBER (xoperands[1]));
7615 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7618 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7620 else if (TARGET_PORTABLE_RUNTIME)
7622 /* Pure portable runtime doesn't allow be/ble; we also don't
7623 have PIC support in the assembler/linker, so this sequence
7624 is needed. */
7626 /* Get the address of our target into %r1. */
7627 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7628 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7630 /* Get our return address into %r31. */
7631 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7632 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7634 /* Jump to our target address in %r1. */
7635 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7637 else if (!flag_pic)
7639 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7640 if (TARGET_PA_20)
7641 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7642 else
7643 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7645 else
7647 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7648 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7650 if (TARGET_SOM || !TARGET_GAS)
7652 /* The HP assembler can generate relocations for the
7653 difference of two symbols. GAS can do this for a
7654 millicode symbol but not an arbitrary external
7655 symbol when generating SOM output. */
7656 xoperands[1] = gen_label_rtx ();
7657 targetm.asm_out.internal_label (asm_out_file, "L",
7658 CODE_LABEL_NUMBER (xoperands[1]));
7659 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7660 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7662 else
7664 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7665 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7666 xoperands);
7669 /* Jump to our target address in %r1. */
7670 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7674 if (seq_length == 0)
7675 output_asm_insn ("nop", xoperands);
7677 /* We are done if there isn't a jump in the delay slot. */
7678 if (seq_length == 0 || ! JUMP_P (NEXT_INSN (insn)))
7679 return "";
7681 /* This call has an unconditional jump in its delay slot. */
7682 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7684 /* See if the return address can be adjusted. Use the containing
7685 sequence insn's address. */
7686 if (INSN_ADDRESSES_SET_P ())
7688 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7689 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7690 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7692 if (VAL_14_BITS_P (distance))
7694 xoperands[1] = gen_label_rtx ();
7695 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7696 targetm.asm_out.internal_label (asm_out_file, "L",
7697 CODE_LABEL_NUMBER (xoperands[1]));
7699 else
7700 /* ??? This branch may not reach its target. */
7701 output_asm_insn ("nop\n\tb,n %0", xoperands);
7703 else
7704 /* ??? This branch may not reach its target. */
7705 output_asm_insn ("nop\n\tb,n %0", xoperands);
7707 /* Delete the jump. */
7708 SET_INSN_DELETED (NEXT_INSN (insn));
7710 return "";
7713 /* Return the attribute length of the call instruction INSN. The SIBCALL
7714 flag indicates whether INSN is a regular call or a sibling call. The
7715 length returned must be longer than the code actually generated by
7716 pa_output_call. Since branch shortening is done before delay branch
7717 sequencing, there is no way to determine whether or not the delay
7718 slot will be filled during branch shortening. Even when the delay
7719 slot is filled, we may have to add a nop if the delay slot contains
7720 a branch that can't reach its target. Thus, we always have to include
7721 the delay slot in the length estimate. This used to be done in
7722 pa_adjust_insn_length but we do it here now as some sequences always
7723 fill the delay slot and we can save four bytes in the estimate for
7724 these sequences. */
7727 pa_attr_length_call (rtx insn, int sibcall)
7729 int local_call;
7730 rtx call, call_dest;
7731 tree call_decl;
7732 int length = 0;
7733 rtx pat = PATTERN (insn);
7734 unsigned long distance = -1;
7736 gcc_assert (CALL_P (insn));
7738 if (INSN_ADDRESSES_SET_P ())
7740 unsigned long total;
7742 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7743 distance = (total + insn_current_reference_address (insn));
7744 if (distance < total)
7745 distance = -1;
7748 gcc_assert (GET_CODE (pat) == PARALLEL);
7750 /* Get the call rtx. */
7751 call = XVECEXP (pat, 0, 0);
7752 if (GET_CODE (call) == SET)
7753 call = SET_SRC (call);
7755 gcc_assert (GET_CODE (call) == CALL);
7757 /* Determine if this is a local call. */
7758 call_dest = XEXP (XEXP (call, 0), 0);
7759 call_decl = SYMBOL_REF_DECL (call_dest);
7760 local_call = call_decl && targetm.binds_local_p (call_decl);
7762 /* pc-relative branch. */
7763 if (!TARGET_LONG_CALLS
7764 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7765 || distance < MAX_PCREL17F_OFFSET))
7766 length += 8;
7768 /* 64-bit plabel sequence. */
7769 else if (TARGET_64BIT && !local_call)
7770 length += sibcall ? 28 : 24;
7772 /* non-pic long absolute branch sequence. */
7773 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7774 length += 12;
7776 /* long pc-relative branch sequence. */
7777 else if (TARGET_LONG_PIC_SDIFF_CALL
7778 || (TARGET_GAS && !TARGET_SOM
7779 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7781 length += 20;
7783 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7784 length += 8;
7787 /* 32-bit plabel sequence. */
7788 else
7790 length += 32;
7792 if (TARGET_SOM)
7793 length += length_fp_args (insn);
7795 if (flag_pic)
7796 length += 4;
7798 if (!TARGET_PA_20)
7800 if (!sibcall)
7801 length += 8;
7803 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7804 length += 8;
7808 return length;
7811 /* INSN is a function call. It may have an unconditional jump
7812 in its delay slot.
7814 CALL_DEST is the routine we are calling. */
7816 const char *
7817 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7819 int delay_insn_deleted = 0;
7820 int delay_slot_filled = 0;
7821 int seq_length = dbr_sequence_length ();
7822 tree call_decl = SYMBOL_REF_DECL (call_dest);
7823 int local_call = call_decl && targetm.binds_local_p (call_decl);
7824 rtx xoperands[2];
7826 xoperands[0] = call_dest;
7828 /* Handle the common case where we're sure that the branch will reach
7829 the beginning of the "$CODE$" subspace. This is the beginning of
7830 the current function if we are in a named section. */
7831 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7833 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7834 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7836 else
7838 if (TARGET_64BIT && !local_call)
7840 /* ??? As far as I can tell, the HP linker doesn't support the
7841 long pc-relative sequence described in the 64-bit runtime
7842 architecture. So, we use a slightly longer indirect call. */
7843 xoperands[0] = pa_get_deferred_plabel (call_dest);
7844 xoperands[1] = gen_label_rtx ();
7846 /* If this isn't a sibcall, we put the load of %r27 into the
7847 delay slot. We can't do this in a sibcall as we don't
7848 have a second call-clobbered scratch register available. */
7849 if (seq_length != 0
7850 && ! JUMP_P (NEXT_INSN (insn))
7851 && !sibcall)
7853 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7854 optimize, 0, NULL);
7856 /* Now delete the delay insn. */
7857 SET_INSN_DELETED (NEXT_INSN (insn));
7858 delay_insn_deleted = 1;
7861 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7862 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7863 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7865 if (sibcall)
7867 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7868 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7869 output_asm_insn ("bve (%%r1)", xoperands);
7871 else
7873 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7874 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7875 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7876 delay_slot_filled = 1;
7879 else
7881 int indirect_call = 0;
7883 /* Emit a long call. There are several different sequences
7884 of increasing length and complexity. In most cases,
7885 they don't allow an instruction in the delay slot. */
7886 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7887 && !TARGET_LONG_PIC_SDIFF_CALL
7888 && !(TARGET_GAS && !TARGET_SOM
7889 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7890 && !TARGET_64BIT)
7891 indirect_call = 1;
7893 if (seq_length != 0
7894 && ! JUMP_P (NEXT_INSN (insn))
7895 && !sibcall
7896 && (!TARGET_PA_20
7897 || indirect_call
7898 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7900 /* A non-jump insn in the delay slot. By definition we can
7901 emit this insn before the call (and in fact before argument
7902 relocating. */
7903 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7904 NULL);
7906 /* Now delete the delay insn. */
7907 SET_INSN_DELETED (NEXT_INSN (insn));
7908 delay_insn_deleted = 1;
7911 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7913 /* This is the best sequence for making long calls in
7914 non-pic code. Unfortunately, GNU ld doesn't provide
7915 the stub needed for external calls, and GAS's support
7916 for this with the SOM linker is buggy. It is safe
7917 to use this for local calls. */
7918 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7919 if (sibcall)
7920 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7921 else
7923 if (TARGET_PA_20)
7924 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7925 xoperands);
7926 else
7927 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7929 output_asm_insn ("copy %%r31,%%r2", xoperands);
7930 delay_slot_filled = 1;
7933 else
7935 if (TARGET_LONG_PIC_SDIFF_CALL)
7937 /* The HP assembler and linker can handle relocations
7938 for the difference of two symbols. The HP assembler
7939 recognizes the sequence as a pc-relative call and
7940 the linker provides stubs when needed. */
7941 xoperands[1] = gen_label_rtx ();
7942 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7943 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7944 targetm.asm_out.internal_label (asm_out_file, "L",
7945 CODE_LABEL_NUMBER (xoperands[1]));
7946 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7948 else if (TARGET_GAS && !TARGET_SOM
7949 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7951 /* GAS currently can't generate the relocations that
7952 are needed for the SOM linker under HP-UX using this
7953 sequence. The GNU linker doesn't generate the stubs
7954 that are needed for external calls on TARGET_ELF32
7955 with this sequence. For now, we have to use a
7956 longer plabel sequence when using GAS. */
7957 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7958 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7959 xoperands);
7960 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7961 xoperands);
7963 else
7965 /* Emit a long plabel-based call sequence. This is
7966 essentially an inline implementation of $$dyncall.
7967 We don't actually try to call $$dyncall as this is
7968 as difficult as calling the function itself. */
7969 xoperands[0] = pa_get_deferred_plabel (call_dest);
7970 xoperands[1] = gen_label_rtx ();
7972 /* Since the call is indirect, FP arguments in registers
7973 need to be copied to the general registers. Then, the
7974 argument relocation stub will copy them back. */
7975 if (TARGET_SOM)
7976 copy_fp_args (insn);
7978 if (flag_pic)
7980 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7981 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7982 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7984 else
7986 output_asm_insn ("addil LR'%0-$global$,%%r27",
7987 xoperands);
7988 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7989 xoperands);
7992 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7993 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7994 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7995 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7997 if (!sibcall && !TARGET_PA_20)
7999 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8000 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8001 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8002 else
8003 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8007 if (TARGET_PA_20)
8009 if (sibcall)
8010 output_asm_insn ("bve (%%r1)", xoperands);
8011 else
8013 if (indirect_call)
8015 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8016 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8017 delay_slot_filled = 1;
8019 else
8020 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8023 else
8025 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8026 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8027 xoperands);
8029 if (sibcall)
8031 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8032 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8033 else
8034 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8036 else
8038 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8039 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8040 else
8041 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8043 if (indirect_call)
8044 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8045 else
8046 output_asm_insn ("copy %%r31,%%r2", xoperands);
8047 delay_slot_filled = 1;
8054 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8055 output_asm_insn ("nop", xoperands);
8057 /* We are done if there isn't a jump in the delay slot. */
8058 if (seq_length == 0
8059 || delay_insn_deleted
8060 || ! JUMP_P (NEXT_INSN (insn)))
8061 return "";
8063 /* A sibcall should never have a branch in the delay slot. */
8064 gcc_assert (!sibcall);
8066 /* This call has an unconditional jump in its delay slot. */
8067 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8069 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8071 /* See if the return address can be adjusted. Use the containing
8072 sequence insn's address. This would break the regular call/return@
8073 relationship assumed by the table based eh unwinder, so only do that
8074 if the call is not possibly throwing. */
8075 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
8076 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8077 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8079 if (VAL_14_BITS_P (distance)
8080 && !(can_throw_internal (insn) || can_throw_external (insn)))
8082 xoperands[1] = gen_label_rtx ();
8083 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8084 targetm.asm_out.internal_label (asm_out_file, "L",
8085 CODE_LABEL_NUMBER (xoperands[1]));
8087 else
8088 output_asm_insn ("nop\n\tb,n %0", xoperands);
8090 else
8091 output_asm_insn ("b,n %0", xoperands);
8093 /* Delete the jump. */
8094 SET_INSN_DELETED (NEXT_INSN (insn));
8096 return "";
8099 /* Return the attribute length of the indirect call instruction INSN.
8100 The length must match the code generated by output_indirect call.
8101 The returned length includes the delay slot. Currently, the delay
8102 slot of an indirect call sequence is not exposed and it is used by
8103 the sequence itself. */
8106 pa_attr_length_indirect_call (rtx insn)
8108 unsigned long distance = -1;
8109 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8111 if (INSN_ADDRESSES_SET_P ())
8113 distance = (total + insn_current_reference_address (insn));
8114 if (distance < total)
8115 distance = -1;
8118 if (TARGET_64BIT)
8119 return 12;
8121 if (TARGET_FAST_INDIRECT_CALLS
8122 || (!TARGET_LONG_CALLS
8123 && !TARGET_PORTABLE_RUNTIME
8124 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8125 || distance < MAX_PCREL17F_OFFSET)))
8126 return 8;
8128 if (flag_pic)
8129 return 20;
8131 if (TARGET_PORTABLE_RUNTIME)
8132 return 16;
8134 /* Out of reach, can use ble. */
8135 return 12;
8138 const char *
8139 pa_output_indirect_call (rtx insn, rtx call_dest)
8141 rtx xoperands[1];
8143 if (TARGET_64BIT)
8145 xoperands[0] = call_dest;
8146 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8147 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8148 return "";
8151 /* First the special case for kernels, level 0 systems, etc. */
8152 if (TARGET_FAST_INDIRECT_CALLS)
8153 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8155 /* Now the normal case -- we can reach $$dyncall directly or
8156 we're sure that we can get there via a long-branch stub.
8158 No need to check target flags as the length uniquely identifies
8159 the remaining cases. */
8160 if (pa_attr_length_indirect_call (insn) == 8)
8162 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8163 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8164 variant of the B,L instruction can't be used on the SOM target. */
8165 if (TARGET_PA_20 && !TARGET_SOM)
8166 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8167 else
8168 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8171 /* Long millicode call, but we are not generating PIC or portable runtime
8172 code. */
8173 if (pa_attr_length_indirect_call (insn) == 12)
8174 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8176 /* Long millicode call for portable runtime. */
8177 if (pa_attr_length_indirect_call (insn) == 16)
8178 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8180 /* We need a long PIC call to $$dyncall. */
8181 xoperands[0] = NULL_RTX;
8182 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8183 if (TARGET_SOM || !TARGET_GAS)
8185 xoperands[0] = gen_label_rtx ();
8186 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8187 targetm.asm_out.internal_label (asm_out_file, "L",
8188 CODE_LABEL_NUMBER (xoperands[0]));
8189 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8191 else
8193 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8194 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8195 xoperands);
8197 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8198 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8199 return "";
8202 /* In HPUX 8.0's shared library scheme, special relocations are needed
8203 for function labels if they might be passed to a function
8204 in a shared library (because shared libraries don't live in code
8205 space), and special magic is needed to construct their address. */
8207 void
8208 pa_encode_label (rtx sym)
8210 const char *str = XSTR (sym, 0);
8211 int len = strlen (str) + 1;
8212 char *newstr, *p;
8214 p = newstr = XALLOCAVEC (char, len + 1);
8215 *p++ = '@';
8216 strcpy (p, str);
8218 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8221 static void
8222 pa_encode_section_info (tree decl, rtx rtl, int first)
8224 int old_referenced = 0;
8226 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8227 old_referenced
8228 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8230 default_encode_section_info (decl, rtl, first);
8232 if (first && TEXT_SPACE_P (decl))
8234 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8235 if (TREE_CODE (decl) == FUNCTION_DECL)
8236 pa_encode_label (XEXP (rtl, 0));
8238 else if (old_referenced)
8239 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8242 /* This is sort of inverse to pa_encode_section_info. */
8244 static const char *
8245 pa_strip_name_encoding (const char *str)
8247 str += (*str == '@');
8248 str += (*str == '*');
8249 return str;
8252 /* Returns 1 if OP is a function label involved in a simple addition
8253 with a constant. Used to keep certain patterns from matching
8254 during instruction combination. */
8256 pa_is_function_label_plus_const (rtx op)
8258 /* Strip off any CONST. */
8259 if (GET_CODE (op) == CONST)
8260 op = XEXP (op, 0);
8262 return (GET_CODE (op) == PLUS
8263 && function_label_operand (XEXP (op, 0), VOIDmode)
8264 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8267 /* Output assembly code for a thunk to FUNCTION. */
8269 static void
8270 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8271 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8272 tree function)
8274 static unsigned int current_thunk_number;
8275 int val_14 = VAL_14_BITS_P (delta);
8276 unsigned int old_last_address = last_address, nbytes = 0;
8277 char label[16];
8278 rtx xoperands[4];
8280 xoperands[0] = XEXP (DECL_RTL (function), 0);
8281 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8282 xoperands[2] = GEN_INT (delta);
8284 final_start_function (emit_barrier (), file, 1);
8286 /* Output the thunk. We know that the function is in the same
8287 translation unit (i.e., the same space) as the thunk, and that
8288 thunks are output after their method. Thus, we don't need an
8289 external branch to reach the function. With SOM and GAS,
8290 functions and thunks are effectively in different sections.
8291 Thus, we can always use a IA-relative branch and the linker
8292 will add a long branch stub if necessary.
8294 However, we have to be careful when generating PIC code on the
8295 SOM port to ensure that the sequence does not transfer to an
8296 import stub for the target function as this could clobber the
8297 return value saved at SP-24. This would also apply to the
8298 32-bit linux port if the multi-space model is implemented. */
8299 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8300 && !(flag_pic && TREE_PUBLIC (function))
8301 && (TARGET_GAS || last_address < 262132))
8302 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8303 && ((targetm_common.have_named_sections
8304 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8305 /* The GNU 64-bit linker has rather poor stub management.
8306 So, we use a long branch from thunks that aren't in
8307 the same section as the target function. */
8308 && ((!TARGET_64BIT
8309 && (DECL_SECTION_NAME (thunk_fndecl)
8310 != DECL_SECTION_NAME (function)))
8311 || ((DECL_SECTION_NAME (thunk_fndecl)
8312 == DECL_SECTION_NAME (function))
8313 && last_address < 262132)))
8314 /* In this case, we need to be able to reach the start of
8315 the stub table even though the function is likely closer
8316 and can be jumped to directly. */
8317 || (targetm_common.have_named_sections
8318 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8319 && DECL_SECTION_NAME (function) == NULL
8320 && total_code_bytes < MAX_PCREL17F_OFFSET)
8321 /* Likewise. */
8322 || (!targetm_common.have_named_sections
8323 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8325 if (!val_14)
8326 output_asm_insn ("addil L'%2,%%r26", xoperands);
8328 output_asm_insn ("b %0", xoperands);
8330 if (val_14)
8332 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8333 nbytes += 8;
8335 else
8337 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8338 nbytes += 12;
8341 else if (TARGET_64BIT)
8343 /* We only have one call-clobbered scratch register, so we can't
8344 make use of the delay slot if delta doesn't fit in 14 bits. */
8345 if (!val_14)
8347 output_asm_insn ("addil L'%2,%%r26", xoperands);
8348 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8351 output_asm_insn ("b,l .+8,%%r1", xoperands);
8353 if (TARGET_GAS)
8355 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8356 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8358 else
8360 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8361 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8364 if (val_14)
8366 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8367 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8368 nbytes += 20;
8370 else
8372 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8373 nbytes += 24;
8376 else if (TARGET_PORTABLE_RUNTIME)
8378 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8379 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8381 if (!val_14)
8382 output_asm_insn ("addil L'%2,%%r26", xoperands);
8384 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8386 if (val_14)
8388 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8389 nbytes += 16;
8391 else
8393 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8394 nbytes += 20;
8397 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8399 /* The function is accessible from outside this module. The only
8400 way to avoid an import stub between the thunk and function is to
8401 call the function directly with an indirect sequence similar to
8402 that used by $$dyncall. This is possible because $$dyncall acts
8403 as the import stub in an indirect call. */
8404 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8405 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8406 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8407 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8408 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8409 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8410 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8411 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8412 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8414 if (!val_14)
8416 output_asm_insn ("addil L'%2,%%r26", xoperands);
8417 nbytes += 4;
8420 if (TARGET_PA_20)
8422 output_asm_insn ("bve (%%r22)", xoperands);
8423 nbytes += 36;
8425 else if (TARGET_NO_SPACE_REGS)
8427 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8428 nbytes += 36;
8430 else
8432 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8433 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8434 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8435 nbytes += 44;
8438 if (val_14)
8439 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8440 else
8441 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8443 else if (flag_pic)
8445 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8447 if (TARGET_SOM || !TARGET_GAS)
8449 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8450 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8452 else
8454 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8455 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8458 if (!val_14)
8459 output_asm_insn ("addil L'%2,%%r26", xoperands);
8461 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8463 if (val_14)
8465 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8466 nbytes += 20;
8468 else
8470 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8471 nbytes += 24;
8474 else
8476 if (!val_14)
8477 output_asm_insn ("addil L'%2,%%r26", xoperands);
8479 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8480 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8482 if (val_14)
8484 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8485 nbytes += 12;
8487 else
8489 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8490 nbytes += 16;
8494 final_end_function ();
8496 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8498 switch_to_section (data_section);
8499 output_asm_insn (".align 4", xoperands);
8500 ASM_OUTPUT_LABEL (file, label);
8501 output_asm_insn (".word P'%0", xoperands);
8504 current_thunk_number++;
8505 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8506 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8507 last_address += nbytes;
8508 if (old_last_address > last_address)
8509 last_address = UINT_MAX;
8510 update_total_code_bytes (nbytes);
8513 /* Only direct calls to static functions are allowed to be sibling (tail)
8514 call optimized.
8516 This restriction is necessary because some linker generated stubs will
8517 store return pointers into rp' in some cases which might clobber a
8518 live value already in rp'.
8520 In a sibcall the current function and the target function share stack
8521 space. Thus if the path to the current function and the path to the
8522 target function save a value in rp', they save the value into the
8523 same stack slot, which has undesirable consequences.
8525 Because of the deferred binding nature of shared libraries any function
8526 with external scope could be in a different load module and thus require
8527 rp' to be saved when calling that function. So sibcall optimizations
8528 can only be safe for static function.
8530 Note that GCC never needs return value relocations, so we don't have to
8531 worry about static calls with return value relocations (which require
8532 saving rp').
8534 It is safe to perform a sibcall optimization when the target function
8535 will never return. */
8536 static bool
8537 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8539 if (TARGET_PORTABLE_RUNTIME)
8540 return false;
8542 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8543 single subspace mode and the call is not indirect. As far as I know,
8544 there is no operating system support for the multiple subspace mode.
8545 It might be possible to support indirect calls if we didn't use
8546 $$dyncall (see the indirect sequence generated in pa_output_call). */
8547 if (TARGET_ELF32)
8548 return (decl != NULL_TREE);
8550 /* Sibcalls are not ok because the arg pointer register is not a fixed
8551 register. This prevents the sibcall optimization from occurring. In
8552 addition, there are problems with stub placement using GNU ld. This
8553 is because a normal sibcall branch uses a 17-bit relocation while
8554 a regular call branch uses a 22-bit relocation. As a result, more
8555 care needs to be taken in the placement of long-branch stubs. */
8556 if (TARGET_64BIT)
8557 return false;
8559 /* Sibcalls are only ok within a translation unit. */
8560 return (decl && !TREE_PUBLIC (decl));
8563 /* ??? Addition is not commutative on the PA due to the weird implicit
8564 space register selection rules for memory addresses. Therefore, we
8565 don't consider a + b == b + a, as this might be inside a MEM. */
8566 static bool
8567 pa_commutative_p (const_rtx x, int outer_code)
8569 return (COMMUTATIVE_P (x)
8570 && (TARGET_NO_SPACE_REGS
8571 || (outer_code != UNKNOWN && outer_code != MEM)
8572 || GET_CODE (x) != PLUS));
8575 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8576 use in fmpyadd instructions. */
8578 pa_fmpyaddoperands (rtx *operands)
8580 enum machine_mode mode = GET_MODE (operands[0]);
8582 /* Must be a floating point mode. */
8583 if (mode != SFmode && mode != DFmode)
8584 return 0;
8586 /* All modes must be the same. */
8587 if (! (mode == GET_MODE (operands[1])
8588 && mode == GET_MODE (operands[2])
8589 && mode == GET_MODE (operands[3])
8590 && mode == GET_MODE (operands[4])
8591 && mode == GET_MODE (operands[5])))
8592 return 0;
8594 /* All operands must be registers. */
8595 if (! (GET_CODE (operands[1]) == REG
8596 && GET_CODE (operands[2]) == REG
8597 && GET_CODE (operands[3]) == REG
8598 && GET_CODE (operands[4]) == REG
8599 && GET_CODE (operands[5]) == REG))
8600 return 0;
8602 /* Only 2 real operands to the addition. One of the input operands must
8603 be the same as the output operand. */
8604 if (! rtx_equal_p (operands[3], operands[4])
8605 && ! rtx_equal_p (operands[3], operands[5]))
8606 return 0;
8608 /* Inout operand of add cannot conflict with any operands from multiply. */
8609 if (rtx_equal_p (operands[3], operands[0])
8610 || rtx_equal_p (operands[3], operands[1])
8611 || rtx_equal_p (operands[3], operands[2]))
8612 return 0;
8614 /* multiply cannot feed into addition operands. */
8615 if (rtx_equal_p (operands[4], operands[0])
8616 || rtx_equal_p (operands[5], operands[0]))
8617 return 0;
8619 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8620 if (mode == SFmode
8621 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8622 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8623 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8624 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8625 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8626 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8627 return 0;
8629 /* Passed. Operands are suitable for fmpyadd. */
8630 return 1;
8633 #if !defined(USE_COLLECT2)
8634 static void
8635 pa_asm_out_constructor (rtx symbol, int priority)
8637 if (!function_label_operand (symbol, VOIDmode))
8638 pa_encode_label (symbol);
8640 #ifdef CTORS_SECTION_ASM_OP
8641 default_ctor_section_asm_out_constructor (symbol, priority);
8642 #else
8643 # ifdef TARGET_ASM_NAMED_SECTION
8644 default_named_section_asm_out_constructor (symbol, priority);
8645 # else
8646 default_stabs_asm_out_constructor (symbol, priority);
8647 # endif
8648 #endif
8651 static void
8652 pa_asm_out_destructor (rtx symbol, int priority)
8654 if (!function_label_operand (symbol, VOIDmode))
8655 pa_encode_label (symbol);
8657 #ifdef DTORS_SECTION_ASM_OP
8658 default_dtor_section_asm_out_destructor (symbol, priority);
8659 #else
8660 # ifdef TARGET_ASM_NAMED_SECTION
8661 default_named_section_asm_out_destructor (symbol, priority);
8662 # else
8663 default_stabs_asm_out_destructor (symbol, priority);
8664 # endif
8665 #endif
8667 #endif
8669 /* This function places uninitialized global data in the bss section.
8670 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8671 function on the SOM port to prevent uninitialized global data from
8672 being placed in the data section. */
8674 void
8675 pa_asm_output_aligned_bss (FILE *stream,
8676 const char *name,
8677 unsigned HOST_WIDE_INT size,
8678 unsigned int align)
8680 switch_to_section (bss_section);
8681 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8683 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8684 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8685 #endif
8687 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8688 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8689 #endif
8691 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8692 ASM_OUTPUT_LABEL (stream, name);
8693 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8696 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8697 that doesn't allow the alignment of global common storage to be directly
8698 specified. The SOM linker aligns common storage based on the rounded
8699 value of the NUM_BYTES parameter in the .comm directive. It's not
8700 possible to use the .align directive as it doesn't affect the alignment
8701 of the label associated with a .comm directive. */
8703 void
8704 pa_asm_output_aligned_common (FILE *stream,
8705 const char *name,
8706 unsigned HOST_WIDE_INT size,
8707 unsigned int align)
8709 unsigned int max_common_align;
8711 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8712 if (align > max_common_align)
8714 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8715 "for global common data. Using %u",
8716 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8717 align = max_common_align;
8720 switch_to_section (bss_section);
8722 assemble_name (stream, name);
8723 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8724 MAX (size, align / BITS_PER_UNIT));
8727 /* We can't use .comm for local common storage as the SOM linker effectively
8728 treats the symbol as universal and uses the same storage for local symbols
8729 with the same name in different object files. The .block directive
8730 reserves an uninitialized block of storage. However, it's not common
8731 storage. Fortunately, GCC never requests common storage with the same
8732 name in any given translation unit. */
8734 void
8735 pa_asm_output_aligned_local (FILE *stream,
8736 const char *name,
8737 unsigned HOST_WIDE_INT size,
8738 unsigned int align)
8740 switch_to_section (bss_section);
8741 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8743 #ifdef LOCAL_ASM_OP
8744 fprintf (stream, "%s", LOCAL_ASM_OP);
8745 assemble_name (stream, name);
8746 fprintf (stream, "\n");
8747 #endif
8749 ASM_OUTPUT_LABEL (stream, name);
8750 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8753 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8754 use in fmpysub instructions. */
8756 pa_fmpysuboperands (rtx *operands)
8758 enum machine_mode mode = GET_MODE (operands[0]);
8760 /* Must be a floating point mode. */
8761 if (mode != SFmode && mode != DFmode)
8762 return 0;
8764 /* All modes must be the same. */
8765 if (! (mode == GET_MODE (operands[1])
8766 && mode == GET_MODE (operands[2])
8767 && mode == GET_MODE (operands[3])
8768 && mode == GET_MODE (operands[4])
8769 && mode == GET_MODE (operands[5])))
8770 return 0;
8772 /* All operands must be registers. */
8773 if (! (GET_CODE (operands[1]) == REG
8774 && GET_CODE (operands[2]) == REG
8775 && GET_CODE (operands[3]) == REG
8776 && GET_CODE (operands[4]) == REG
8777 && GET_CODE (operands[5]) == REG))
8778 return 0;
8780 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8781 operation, so operands[4] must be the same as operand[3]. */
8782 if (! rtx_equal_p (operands[3], operands[4]))
8783 return 0;
8785 /* multiply cannot feed into subtraction. */
8786 if (rtx_equal_p (operands[5], operands[0]))
8787 return 0;
8789 /* Inout operand of sub cannot conflict with any operands from multiply. */
8790 if (rtx_equal_p (operands[3], operands[0])
8791 || rtx_equal_p (operands[3], operands[1])
8792 || rtx_equal_p (operands[3], operands[2]))
8793 return 0;
8795 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8796 if (mode == SFmode
8797 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8798 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8799 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8800 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8801 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8802 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8803 return 0;
8805 /* Passed. Operands are suitable for fmpysub. */
8806 return 1;
8809 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8810 constants for shadd instructions. */
8812 pa_shadd_constant_p (int val)
8814 if (val == 2 || val == 4 || val == 8)
8815 return 1;
8816 else
8817 return 0;
8820 /* Return TRUE if INSN branches forward. */
8822 static bool
8823 forward_branch_p (rtx insn)
8825 rtx lab = JUMP_LABEL (insn);
8827 /* The INSN must have a jump label. */
8828 gcc_assert (lab != NULL_RTX);
8830 if (INSN_ADDRESSES_SET_P ())
8831 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8833 while (insn)
8835 if (insn == lab)
8836 return true;
8837 else
8838 insn = NEXT_INSN (insn);
8841 return false;
8844 /* Return 1 if INSN is in the delay slot of a call instruction. */
8846 pa_jump_in_call_delay (rtx insn)
8849 if (! JUMP_P (insn))
8850 return 0;
8852 if (PREV_INSN (insn)
8853 && PREV_INSN (PREV_INSN (insn))
8854 && NONJUMP_INSN_P (next_real_insn (PREV_INSN (PREV_INSN (insn)))))
8856 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8858 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8859 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8862 else
8863 return 0;
8866 /* Output an unconditional move and branch insn. */
8868 const char *
8869 pa_output_parallel_movb (rtx *operands, rtx insn)
8871 int length = get_attr_length (insn);
8873 /* These are the cases in which we win. */
8874 if (length == 4)
8875 return "mov%I1b,tr %1,%0,%2";
8877 /* None of the following cases win, but they don't lose either. */
8878 if (length == 8)
8880 if (dbr_sequence_length () == 0)
8882 /* Nothing in the delay slot, fake it by putting the combined
8883 insn (the copy or add) in the delay slot of a bl. */
8884 if (GET_CODE (operands[1]) == CONST_INT)
8885 return "b %2\n\tldi %1,%0";
8886 else
8887 return "b %2\n\tcopy %1,%0";
8889 else
8891 /* Something in the delay slot, but we've got a long branch. */
8892 if (GET_CODE (operands[1]) == CONST_INT)
8893 return "ldi %1,%0\n\tb %2";
8894 else
8895 return "copy %1,%0\n\tb %2";
8899 if (GET_CODE (operands[1]) == CONST_INT)
8900 output_asm_insn ("ldi %1,%0", operands);
8901 else
8902 output_asm_insn ("copy %1,%0", operands);
8903 return pa_output_lbranch (operands[2], insn, 1);
8906 /* Output an unconditional add and branch insn. */
8908 const char *
8909 pa_output_parallel_addb (rtx *operands, rtx insn)
8911 int length = get_attr_length (insn);
8913 /* To make life easy we want operand0 to be the shared input/output
8914 operand and operand1 to be the readonly operand. */
8915 if (operands[0] == operands[1])
8916 operands[1] = operands[2];
8918 /* These are the cases in which we win. */
8919 if (length == 4)
8920 return "add%I1b,tr %1,%0,%3";
8922 /* None of the following cases win, but they don't lose either. */
8923 if (length == 8)
8925 if (dbr_sequence_length () == 0)
8926 /* Nothing in the delay slot, fake it by putting the combined
8927 insn (the copy or add) in the delay slot of a bl. */
8928 return "b %3\n\tadd%I1 %1,%0,%0";
8929 else
8930 /* Something in the delay slot, but we've got a long branch. */
8931 return "add%I1 %1,%0,%0\n\tb %3";
8934 output_asm_insn ("add%I1 %1,%0,%0", operands);
8935 return pa_output_lbranch (operands[3], insn, 1);
8938 /* Return nonzero if INSN (a jump insn) immediately follows a call
8939 to a named function. This is used to avoid filling the delay slot
8940 of the jump since it can usually be eliminated by modifying RP in
8941 the delay slot of the call. */
8944 pa_following_call (rtx insn)
8946 if (! TARGET_JUMP_IN_DELAY)
8947 return 0;
8949 /* Find the previous real insn, skipping NOTEs. */
8950 insn = PREV_INSN (insn);
8951 while (insn && NOTE_P (insn))
8952 insn = PREV_INSN (insn);
8954 /* Check for CALL_INSNs and millicode calls. */
8955 if (insn
8956 && ((CALL_P (insn)
8957 && get_attr_type (insn) != TYPE_DYNCALL)
8958 || (NONJUMP_INSN_P (insn)
8959 && GET_CODE (PATTERN (insn)) != SEQUENCE
8960 && GET_CODE (PATTERN (insn)) != USE
8961 && GET_CODE (PATTERN (insn)) != CLOBBER
8962 && get_attr_type (insn) == TYPE_MILLI)))
8963 return 1;
8965 return 0;
8968 /* We use this hook to perform a PA specific optimization which is difficult
8969 to do in earlier passes. */
8971 static void
8972 pa_reorg (void)
8974 remove_useless_addtr_insns (1);
8976 if (pa_cpu < PROCESSOR_8000)
8977 pa_combine_instructions ();
8980 /* The PA has a number of odd instructions which can perform multiple
8981 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8982 it may be profitable to combine two instructions into one instruction
8983 with two outputs. It's not profitable PA2.0 machines because the
8984 two outputs would take two slots in the reorder buffers.
8986 This routine finds instructions which can be combined and combines
8987 them. We only support some of the potential combinations, and we
8988 only try common ways to find suitable instructions.
8990 * addb can add two registers or a register and a small integer
8991 and jump to a nearby (+-8k) location. Normally the jump to the
8992 nearby location is conditional on the result of the add, but by
8993 using the "true" condition we can make the jump unconditional.
8994 Thus addb can perform two independent operations in one insn.
8996 * movb is similar to addb in that it can perform a reg->reg
8997 or small immediate->reg copy and jump to a nearby (+-8k location).
8999 * fmpyadd and fmpysub can perform a FP multiply and either an
9000 FP add or FP sub if the operands of the multiply and add/sub are
9001 independent (there are other minor restrictions). Note both
9002 the fmpy and fadd/fsub can in theory move to better spots according
9003 to data dependencies, but for now we require the fmpy stay at a
9004 fixed location.
9006 * Many of the memory operations can perform pre & post updates
9007 of index registers. GCC's pre/post increment/decrement addressing
9008 is far too simple to take advantage of all the possibilities. This
9009 pass may not be suitable since those insns may not be independent.
9011 * comclr can compare two ints or an int and a register, nullify
9012 the following instruction and zero some other register. This
9013 is more difficult to use as it's harder to find an insn which
9014 will generate a comclr than finding something like an unconditional
9015 branch. (conditional moves & long branches create comclr insns).
9017 * Most arithmetic operations can conditionally skip the next
9018 instruction. They can be viewed as "perform this operation
9019 and conditionally jump to this nearby location" (where nearby
9020 is an insns away). These are difficult to use due to the
9021 branch length restrictions. */
9023 static void
9024 pa_combine_instructions (void)
9026 rtx anchor, new_rtx;
9028 /* This can get expensive since the basic algorithm is on the
9029 order of O(n^2) (or worse). Only do it for -O2 or higher
9030 levels of optimization. */
9031 if (optimize < 2)
9032 return;
9034 /* Walk down the list of insns looking for "anchor" insns which
9035 may be combined with "floating" insns. As the name implies,
9036 "anchor" instructions don't move, while "floating" insns may
9037 move around. */
9038 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9039 new_rtx = make_insn_raw (new_rtx);
9041 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9043 enum attr_pa_combine_type anchor_attr;
9044 enum attr_pa_combine_type floater_attr;
9046 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9047 Also ignore any special USE insns. */
9048 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9049 || GET_CODE (PATTERN (anchor)) == USE
9050 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9051 continue;
9053 anchor_attr = get_attr_pa_combine_type (anchor);
9054 /* See if anchor is an insn suitable for combination. */
9055 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9056 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9057 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9058 && ! forward_branch_p (anchor)))
9060 rtx floater;
9062 for (floater = PREV_INSN (anchor);
9063 floater;
9064 floater = PREV_INSN (floater))
9066 if (NOTE_P (floater)
9067 || (NONJUMP_INSN_P (floater)
9068 && (GET_CODE (PATTERN (floater)) == USE
9069 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9070 continue;
9072 /* Anything except a regular INSN will stop our search. */
9073 if (! NONJUMP_INSN_P (floater))
9075 floater = NULL_RTX;
9076 break;
9079 /* See if FLOATER is suitable for combination with the
9080 anchor. */
9081 floater_attr = get_attr_pa_combine_type (floater);
9082 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9083 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9084 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9085 && floater_attr == PA_COMBINE_TYPE_FMPY))
9087 /* If ANCHOR and FLOATER can be combined, then we're
9088 done with this pass. */
9089 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9090 SET_DEST (PATTERN (floater)),
9091 XEXP (SET_SRC (PATTERN (floater)), 0),
9092 XEXP (SET_SRC (PATTERN (floater)), 1)))
9093 break;
9096 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9097 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9099 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9101 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9102 SET_DEST (PATTERN (floater)),
9103 XEXP (SET_SRC (PATTERN (floater)), 0),
9104 XEXP (SET_SRC (PATTERN (floater)), 1)))
9105 break;
9107 else
9109 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9110 SET_DEST (PATTERN (floater)),
9111 SET_SRC (PATTERN (floater)),
9112 SET_SRC (PATTERN (floater))))
9113 break;
9118 /* If we didn't find anything on the backwards scan try forwards. */
9119 if (!floater
9120 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9121 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9123 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9125 if (NOTE_P (floater)
9126 || (NONJUMP_INSN_P (floater)
9127 && (GET_CODE (PATTERN (floater)) == USE
9128 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9130 continue;
9132 /* Anything except a regular INSN will stop our search. */
9133 if (! NONJUMP_INSN_P (floater))
9135 floater = NULL_RTX;
9136 break;
9139 /* See if FLOATER is suitable for combination with the
9140 anchor. */
9141 floater_attr = get_attr_pa_combine_type (floater);
9142 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9143 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9144 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9145 && floater_attr == PA_COMBINE_TYPE_FMPY))
9147 /* If ANCHOR and FLOATER can be combined, then we're
9148 done with this pass. */
9149 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9150 SET_DEST (PATTERN (floater)),
9151 XEXP (SET_SRC (PATTERN (floater)),
9153 XEXP (SET_SRC (PATTERN (floater)),
9154 1)))
9155 break;
9160 /* FLOATER will be nonzero if we found a suitable floating
9161 insn for combination with ANCHOR. */
9162 if (floater
9163 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9164 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9166 /* Emit the new instruction and delete the old anchor. */
9167 emit_insn_before (gen_rtx_PARALLEL
9168 (VOIDmode,
9169 gen_rtvec (2, PATTERN (anchor),
9170 PATTERN (floater))),
9171 anchor);
9173 SET_INSN_DELETED (anchor);
9175 /* Emit a special USE insn for FLOATER, then delete
9176 the floating insn. */
9177 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9178 delete_insn (floater);
9180 continue;
9182 else if (floater
9183 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9185 rtx temp;
9186 /* Emit the new_jump instruction and delete the old anchor. */
9187 temp
9188 = emit_jump_insn_before (gen_rtx_PARALLEL
9189 (VOIDmode,
9190 gen_rtvec (2, PATTERN (anchor),
9191 PATTERN (floater))),
9192 anchor);
9194 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9195 SET_INSN_DELETED (anchor);
9197 /* Emit a special USE insn for FLOATER, then delete
9198 the floating insn. */
9199 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9200 delete_insn (floater);
9201 continue;
9207 static int
9208 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9209 rtx src1, rtx src2)
9211 int insn_code_number;
9212 rtx start, end;
9214 /* Create a PARALLEL with the patterns of ANCHOR and
9215 FLOATER, try to recognize it, then test constraints
9216 for the resulting pattern.
9218 If the pattern doesn't match or the constraints
9219 aren't met keep searching for a suitable floater
9220 insn. */
9221 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9222 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9223 INSN_CODE (new_rtx) = -1;
9224 insn_code_number = recog_memoized (new_rtx);
9225 if (insn_code_number < 0
9226 || (extract_insn (new_rtx), ! constrain_operands (1)))
9227 return 0;
9229 if (reversed)
9231 start = anchor;
9232 end = floater;
9234 else
9236 start = floater;
9237 end = anchor;
9240 /* There's up to three operands to consider. One
9241 output and two inputs.
9243 The output must not be used between FLOATER & ANCHOR
9244 exclusive. The inputs must not be set between
9245 FLOATER and ANCHOR exclusive. */
9247 if (reg_used_between_p (dest, start, end))
9248 return 0;
9250 if (reg_set_between_p (src1, start, end))
9251 return 0;
9253 if (reg_set_between_p (src2, start, end))
9254 return 0;
9256 /* If we get here, then everything is good. */
9257 return 1;
9260 /* Return nonzero if references for INSN are delayed.
9262 Millicode insns are actually function calls with some special
9263 constraints on arguments and register usage.
9265 Millicode calls always expect their arguments in the integer argument
9266 registers, and always return their result in %r29 (ret1). They
9267 are expected to clobber their arguments, %r1, %r29, and the return
9268 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9270 This function tells reorg that the references to arguments and
9271 millicode calls do not appear to happen until after the millicode call.
9272 This allows reorg to put insns which set the argument registers into the
9273 delay slot of the millicode call -- thus they act more like traditional
9274 CALL_INSNs.
9276 Note we cannot consider side effects of the insn to be delayed because
9277 the branch and link insn will clobber the return pointer. If we happened
9278 to use the return pointer in the delay slot of the call, then we lose.
9280 get_attr_type will try to recognize the given insn, so make sure to
9281 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9282 in particular. */
9284 pa_insn_refs_are_delayed (rtx insn)
9286 return ((NONJUMP_INSN_P (insn)
9287 && GET_CODE (PATTERN (insn)) != SEQUENCE
9288 && GET_CODE (PATTERN (insn)) != USE
9289 && GET_CODE (PATTERN (insn)) != CLOBBER
9290 && get_attr_type (insn) == TYPE_MILLI));
9293 /* Promote the return value, but not the arguments. */
9295 static enum machine_mode
9296 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9297 enum machine_mode mode,
9298 int *punsignedp ATTRIBUTE_UNUSED,
9299 const_tree fntype ATTRIBUTE_UNUSED,
9300 int for_return)
9302 if (for_return == 0)
9303 return mode;
9304 return promote_mode (type, mode, punsignedp);
9307 /* On the HP-PA the value is found in register(s) 28(-29), unless
9308 the mode is SF or DF. Then the value is returned in fr4 (32).
9310 This must perform the same promotions as PROMOTE_MODE, else promoting
9311 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9313 Small structures must be returned in a PARALLEL on PA64 in order
9314 to match the HP Compiler ABI. */
9316 static rtx
9317 pa_function_value (const_tree valtype,
9318 const_tree func ATTRIBUTE_UNUSED,
9319 bool outgoing ATTRIBUTE_UNUSED)
9321 enum machine_mode valmode;
9323 if (AGGREGATE_TYPE_P (valtype)
9324 || TREE_CODE (valtype) == COMPLEX_TYPE
9325 || TREE_CODE (valtype) == VECTOR_TYPE)
9327 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9329 /* Handle aggregates that fit exactly in a word or double word. */
9330 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9331 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9333 if (TARGET_64BIT)
9335 /* Aggregates with a size less than or equal to 128 bits are
9336 returned in GR 28(-29). They are left justified. The pad
9337 bits are undefined. Larger aggregates are returned in
9338 memory. */
9339 rtx loc[2];
9340 int i, offset = 0;
9341 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9343 for (i = 0; i < ub; i++)
9345 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9346 gen_rtx_REG (DImode, 28 + i),
9347 GEN_INT (offset));
9348 offset += 8;
9351 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9353 else if (valsize > UNITS_PER_WORD)
9355 /* Aggregates 5 to 8 bytes in size are returned in general
9356 registers r28-r29 in the same manner as other non
9357 floating-point objects. The data is right-justified and
9358 zero-extended to 64 bits. This is opposite to the normal
9359 justification used on big endian targets and requires
9360 special treatment. */
9361 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9362 gen_rtx_REG (DImode, 28), const0_rtx);
9363 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9367 if ((INTEGRAL_TYPE_P (valtype)
9368 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9369 || POINTER_TYPE_P (valtype))
9370 valmode = word_mode;
9371 else
9372 valmode = TYPE_MODE (valtype);
9374 if (TREE_CODE (valtype) == REAL_TYPE
9375 && !AGGREGATE_TYPE_P (valtype)
9376 && TYPE_MODE (valtype) != TFmode
9377 && !TARGET_SOFT_FLOAT)
9378 return gen_rtx_REG (valmode, 32);
9380 return gen_rtx_REG (valmode, 28);
9383 /* Implement the TARGET_LIBCALL_VALUE hook. */
9385 static rtx
9386 pa_libcall_value (enum machine_mode mode,
9387 const_rtx fun ATTRIBUTE_UNUSED)
9389 if (! TARGET_SOFT_FLOAT
9390 && (mode == SFmode || mode == DFmode))
9391 return gen_rtx_REG (mode, 32);
9392 else
9393 return gen_rtx_REG (mode, 28);
9396 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9398 static bool
9399 pa_function_value_regno_p (const unsigned int regno)
9401 if (regno == 28
9402 || (! TARGET_SOFT_FLOAT && regno == 32))
9403 return true;
9405 return false;
9408 /* Update the data in CUM to advance over an argument
9409 of mode MODE and data type TYPE.
9410 (TYPE is null for libcalls where that information may not be available.) */
9412 static void
9413 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9414 const_tree type, bool named ATTRIBUTE_UNUSED)
9416 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9417 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9419 cum->nargs_prototype--;
9420 cum->words += (arg_size
9421 + ((cum->words & 01)
9422 && type != NULL_TREE
9423 && arg_size > 1));
9426 /* Return the location of a parameter that is passed in a register or NULL
9427 if the parameter has any component that is passed in memory.
9429 This is new code and will be pushed to into the net sources after
9430 further testing.
9432 ??? We might want to restructure this so that it looks more like other
9433 ports. */
9434 static rtx
9435 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9436 const_tree type, bool named ATTRIBUTE_UNUSED)
9438 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9439 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9440 int alignment = 0;
9441 int arg_size;
9442 int fpr_reg_base;
9443 int gpr_reg_base;
9444 rtx retval;
9446 if (mode == VOIDmode)
9447 return NULL_RTX;
9449 arg_size = FUNCTION_ARG_SIZE (mode, type);
9451 /* If this arg would be passed partially or totally on the stack, then
9452 this routine should return zero. pa_arg_partial_bytes will
9453 handle arguments which are split between regs and stack slots if
9454 the ABI mandates split arguments. */
9455 if (!TARGET_64BIT)
9457 /* The 32-bit ABI does not split arguments. */
9458 if (cum->words + arg_size > max_arg_words)
9459 return NULL_RTX;
9461 else
9463 if (arg_size > 1)
9464 alignment = cum->words & 1;
9465 if (cum->words + alignment >= max_arg_words)
9466 return NULL_RTX;
9469 /* The 32bit ABIs and the 64bit ABIs are rather different,
9470 particularly in their handling of FP registers. We might
9471 be able to cleverly share code between them, but I'm not
9472 going to bother in the hope that splitting them up results
9473 in code that is more easily understood. */
9475 if (TARGET_64BIT)
9477 /* Advance the base registers to their current locations.
9479 Remember, gprs grow towards smaller register numbers while
9480 fprs grow to higher register numbers. Also remember that
9481 although FP regs are 32-bit addressable, we pretend that
9482 the registers are 64-bits wide. */
9483 gpr_reg_base = 26 - cum->words;
9484 fpr_reg_base = 32 + cum->words;
9486 /* Arguments wider than one word and small aggregates need special
9487 treatment. */
9488 if (arg_size > 1
9489 || mode == BLKmode
9490 || (type && (AGGREGATE_TYPE_P (type)
9491 || TREE_CODE (type) == COMPLEX_TYPE
9492 || TREE_CODE (type) == VECTOR_TYPE)))
9494 /* Double-extended precision (80-bit), quad-precision (128-bit)
9495 and aggregates including complex numbers are aligned on
9496 128-bit boundaries. The first eight 64-bit argument slots
9497 are associated one-to-one, with general registers r26
9498 through r19, and also with floating-point registers fr4
9499 through fr11. Arguments larger than one word are always
9500 passed in general registers.
9502 Using a PARALLEL with a word mode register results in left
9503 justified data on a big-endian target. */
9505 rtx loc[8];
9506 int i, offset = 0, ub = arg_size;
9508 /* Align the base register. */
9509 gpr_reg_base -= alignment;
9511 ub = MIN (ub, max_arg_words - cum->words - alignment);
9512 for (i = 0; i < ub; i++)
9514 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9515 gen_rtx_REG (DImode, gpr_reg_base),
9516 GEN_INT (offset));
9517 gpr_reg_base -= 1;
9518 offset += 8;
9521 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9524 else
9526 /* If the argument is larger than a word, then we know precisely
9527 which registers we must use. */
9528 if (arg_size > 1)
9530 if (cum->words)
9532 gpr_reg_base = 23;
9533 fpr_reg_base = 38;
9535 else
9537 gpr_reg_base = 25;
9538 fpr_reg_base = 34;
9541 /* Structures 5 to 8 bytes in size are passed in the general
9542 registers in the same manner as other non floating-point
9543 objects. The data is right-justified and zero-extended
9544 to 64 bits. This is opposite to the normal justification
9545 used on big endian targets and requires special treatment.
9546 We now define BLOCK_REG_PADDING to pad these objects.
9547 Aggregates, complex and vector types are passed in the same
9548 manner as structures. */
9549 if (mode == BLKmode
9550 || (type && (AGGREGATE_TYPE_P (type)
9551 || TREE_CODE (type) == COMPLEX_TYPE
9552 || TREE_CODE (type) == VECTOR_TYPE)))
9554 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9555 gen_rtx_REG (DImode, gpr_reg_base),
9556 const0_rtx);
9557 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9560 else
9562 /* We have a single word (32 bits). A simple computation
9563 will get us the register #s we need. */
9564 gpr_reg_base = 26 - cum->words;
9565 fpr_reg_base = 32 + 2 * cum->words;
9569 /* Determine if the argument needs to be passed in both general and
9570 floating point registers. */
9571 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9572 /* If we are doing soft-float with portable runtime, then there
9573 is no need to worry about FP regs. */
9574 && !TARGET_SOFT_FLOAT
9575 /* The parameter must be some kind of scalar float, else we just
9576 pass it in integer registers. */
9577 && GET_MODE_CLASS (mode) == MODE_FLOAT
9578 /* The target function must not have a prototype. */
9579 && cum->nargs_prototype <= 0
9580 /* libcalls do not need to pass items in both FP and general
9581 registers. */
9582 && type != NULL_TREE
9583 /* All this hair applies to "outgoing" args only. This includes
9584 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9585 && !cum->incoming)
9586 /* Also pass outgoing floating arguments in both registers in indirect
9587 calls with the 32 bit ABI and the HP assembler since there is no
9588 way to the specify argument locations in static functions. */
9589 || (!TARGET_64BIT
9590 && !TARGET_GAS
9591 && !cum->incoming
9592 && cum->indirect
9593 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9595 retval
9596 = gen_rtx_PARALLEL
9597 (mode,
9598 gen_rtvec (2,
9599 gen_rtx_EXPR_LIST (VOIDmode,
9600 gen_rtx_REG (mode, fpr_reg_base),
9601 const0_rtx),
9602 gen_rtx_EXPR_LIST (VOIDmode,
9603 gen_rtx_REG (mode, gpr_reg_base),
9604 const0_rtx)));
9606 else
9608 /* See if we should pass this parameter in a general register. */
9609 if (TARGET_SOFT_FLOAT
9610 /* Indirect calls in the normal 32bit ABI require all arguments
9611 to be passed in general registers. */
9612 || (!TARGET_PORTABLE_RUNTIME
9613 && !TARGET_64BIT
9614 && !TARGET_ELF32
9615 && cum->indirect)
9616 /* If the parameter is not a scalar floating-point parameter,
9617 then it belongs in GPRs. */
9618 || GET_MODE_CLASS (mode) != MODE_FLOAT
9619 /* Structure with single SFmode field belongs in GPR. */
9620 || (type && AGGREGATE_TYPE_P (type)))
9621 retval = gen_rtx_REG (mode, gpr_reg_base);
9622 else
9623 retval = gen_rtx_REG (mode, fpr_reg_base);
9625 return retval;
9628 /* Arguments larger than one word are double word aligned. */
9630 static unsigned int
9631 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9633 bool singleword = (type
9634 ? (integer_zerop (TYPE_SIZE (type))
9635 || !TREE_CONSTANT (TYPE_SIZE (type))
9636 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9637 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9639 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9642 /* If this arg would be passed totally in registers or totally on the stack,
9643 then this routine should return zero. */
9645 static int
9646 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9647 tree type, bool named ATTRIBUTE_UNUSED)
9649 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9650 unsigned int max_arg_words = 8;
9651 unsigned int offset = 0;
9653 if (!TARGET_64BIT)
9654 return 0;
9656 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9657 offset = 1;
9659 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9660 /* Arg fits fully into registers. */
9661 return 0;
9662 else if (cum->words + offset >= max_arg_words)
9663 /* Arg fully on the stack. */
9664 return 0;
9665 else
9666 /* Arg is split. */
9667 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9671 /* A get_unnamed_section callback for switching to the text section.
9673 This function is only used with SOM. Because we don't support
9674 named subspaces, we can only create a new subspace or switch back
9675 to the default text subspace. */
9677 static void
9678 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9680 gcc_assert (TARGET_SOM);
9681 if (TARGET_GAS)
9683 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9685 /* We only want to emit a .nsubspa directive once at the
9686 start of the function. */
9687 cfun->machine->in_nsubspa = 1;
9689 /* Create a new subspace for the text. This provides
9690 better stub placement and one-only functions. */
9691 if (cfun->decl
9692 && DECL_ONE_ONLY (cfun->decl)
9693 && !DECL_WEAK (cfun->decl))
9695 output_section_asm_op ("\t.SPACE $TEXT$\n"
9696 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9697 "ACCESS=44,SORT=24,COMDAT");
9698 return;
9701 else
9703 /* There isn't a current function or the body of the current
9704 function has been completed. So, we are changing to the
9705 text section to output debugging information. Thus, we
9706 need to forget that we are in the text section so that
9707 varasm.c will call us when text_section is selected again. */
9708 gcc_assert (!cfun || !cfun->machine
9709 || cfun->machine->in_nsubspa == 2);
9710 in_section = NULL;
9712 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9713 return;
9715 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9718 /* A get_unnamed_section callback for switching to comdat data
9719 sections. This function is only used with SOM. */
9721 static void
9722 som_output_comdat_data_section_asm_op (const void *data)
9724 in_section = NULL;
9725 output_section_asm_op (data);
9728 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9730 static void
9731 pa_som_asm_init_sections (void)
9733 text_section
9734 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9736 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9737 is not being generated. */
9738 som_readonly_data_section
9739 = get_unnamed_section (0, output_section_asm_op,
9740 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9742 /* When secondary definitions are not supported, SOM makes readonly
9743 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9744 the comdat flag. */
9745 som_one_only_readonly_data_section
9746 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9747 "\t.SPACE $TEXT$\n"
9748 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9749 "ACCESS=0x2c,SORT=16,COMDAT");
9752 /* When secondary definitions are not supported, SOM makes data one-only
9753 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9754 som_one_only_data_section
9755 = get_unnamed_section (SECTION_WRITE,
9756 som_output_comdat_data_section_asm_op,
9757 "\t.SPACE $PRIVATE$\n"
9758 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9759 "ACCESS=31,SORT=24,COMDAT");
9761 if (flag_tm)
9762 som_tm_clone_table_section
9763 = get_unnamed_section (0, output_section_asm_op,
9764 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9766 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9767 which reference data within the $TEXT$ space (for example constant
9768 strings in the $LIT$ subspace).
9770 The assemblers (GAS and HP as) both have problems with handling
9771 the difference of two symbols which is the other correct way to
9772 reference constant data during PIC code generation.
9774 So, there's no way to reference constant data which is in the
9775 $TEXT$ space during PIC generation. Instead place all constant
9776 data into the $PRIVATE$ subspace (this reduces sharing, but it
9777 works correctly). */
9778 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9780 /* We must not have a reference to an external symbol defined in a
9781 shared library in a readonly section, else the SOM linker will
9782 complain.
9784 So, we force exception information into the data section. */
9785 exception_section = data_section;
9788 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9790 static section *
9791 pa_som_tm_clone_table_section (void)
9793 return som_tm_clone_table_section;
9796 /* On hpux10, the linker will give an error if we have a reference
9797 in the read-only data section to a symbol defined in a shared
9798 library. Therefore, expressions that might require a reloc can
9799 not be placed in the read-only data section. */
9801 static section *
9802 pa_select_section (tree exp, int reloc,
9803 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9805 if (TREE_CODE (exp) == VAR_DECL
9806 && TREE_READONLY (exp)
9807 && !TREE_THIS_VOLATILE (exp)
9808 && DECL_INITIAL (exp)
9809 && (DECL_INITIAL (exp) == error_mark_node
9810 || TREE_CONSTANT (DECL_INITIAL (exp)))
9811 && !reloc)
9813 if (TARGET_SOM
9814 && DECL_ONE_ONLY (exp)
9815 && !DECL_WEAK (exp))
9816 return som_one_only_readonly_data_section;
9817 else
9818 return readonly_data_section;
9820 else if (CONSTANT_CLASS_P (exp) && !reloc)
9821 return readonly_data_section;
9822 else if (TARGET_SOM
9823 && TREE_CODE (exp) == VAR_DECL
9824 && DECL_ONE_ONLY (exp)
9825 && !DECL_WEAK (exp))
9826 return som_one_only_data_section;
9827 else
9828 return data_section;
9831 /* Implement pa_reloc_rw_mask. */
9833 static int
9834 pa_reloc_rw_mask (void)
9836 /* We force (const (plus (symbol) (const_int))) to memory when the
9837 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9838 handle this construct in read-only memory and we want to avoid
9839 this for ELF. So, we always force an RTX needing relocation to
9840 the data section. */
9841 return 3;
9844 static void
9845 pa_globalize_label (FILE *stream, const char *name)
9847 /* We only handle DATA objects here, functions are globalized in
9848 ASM_DECLARE_FUNCTION_NAME. */
9849 if (! FUNCTION_NAME_P (name))
9851 fputs ("\t.EXPORT ", stream);
9852 assemble_name (stream, name);
9853 fputs (",DATA\n", stream);
9857 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9859 static rtx
9860 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9861 int incoming ATTRIBUTE_UNUSED)
9863 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9866 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9868 bool
9869 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9871 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9872 PA64 ABI says that objects larger than 128 bits are returned in memory.
9873 Note, int_size_in_bytes can return -1 if the size of the object is
9874 variable or larger than the maximum value that can be expressed as
9875 a HOST_WIDE_INT. It can also return zero for an empty type. The
9876 simplest way to handle variable and empty types is to pass them in
9877 memory. This avoids problems in defining the boundaries of argument
9878 slots, allocating registers, etc. */
9879 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9880 || int_size_in_bytes (type) <= 0);
9883 /* Structure to hold declaration and name of external symbols that are
9884 emitted by GCC. We generate a vector of these symbols and output them
9885 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9886 This avoids putting out names that are never really used. */
9888 typedef struct GTY(()) extern_symbol
9890 tree decl;
9891 const char *name;
9892 } extern_symbol;
9894 /* Define gc'd vector type for extern_symbol. */
9896 /* Vector of extern_symbol pointers. */
9897 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9899 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9900 /* Mark DECL (name NAME) as an external reference (assembler output
9901 file FILE). This saves the names to output at the end of the file
9902 if actually referenced. */
9904 void
9905 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9907 gcc_assert (file == asm_out_file);
9908 extern_symbol p = {decl, name};
9909 vec_safe_push (extern_symbols, p);
9912 /* Output text required at the end of an assembler file.
9913 This includes deferred plabels and .import directives for
9914 all external symbols that were actually referenced. */
9916 static void
9917 pa_hpux_file_end (void)
9919 unsigned int i;
9920 extern_symbol *p;
9922 if (!NO_DEFERRED_PROFILE_COUNTERS)
9923 output_deferred_profile_counters ();
9925 output_deferred_plabels ();
9927 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9929 tree decl = p->decl;
9931 if (!TREE_ASM_WRITTEN (decl)
9932 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9933 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9936 vec_free (extern_symbols);
9938 #endif
9940 /* Return true if a change from mode FROM to mode TO for a register
9941 in register class RCLASS is invalid. */
9943 bool
9944 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9945 enum reg_class rclass)
9947 if (from == to)
9948 return false;
9950 /* Reject changes to/from complex and vector modes. */
9951 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9952 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9953 return true;
9955 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9956 return false;
9958 /* There is no way to load QImode or HImode values directly from
9959 memory. SImode loads to the FP registers are not zero extended.
9960 On the 64-bit target, this conflicts with the definition of
9961 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9962 with different sizes in the floating-point registers. */
9963 if (MAYBE_FP_REG_CLASS_P (rclass))
9964 return true;
9966 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9967 in specific sets of registers. Thus, we cannot allow changing
9968 to a larger mode when it's larger than a word. */
9969 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9970 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9971 return true;
9973 return false;
9976 /* Returns TRUE if it is a good idea to tie two pseudo registers
9977 when one has mode MODE1 and one has mode MODE2.
9978 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9979 for any hard reg, then this must be FALSE for correct output.
9981 We should return FALSE for QImode and HImode because these modes
9982 are not ok in the floating-point registers. However, this prevents
9983 tieing these modes to SImode and DImode in the general registers.
9984 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9985 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9986 in the floating-point registers. */
9988 bool
9989 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9991 /* Don't tie modes in different classes. */
9992 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9993 return false;
9995 return true;
9999 /* Length in units of the trampoline instruction code. */
10001 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10004 /* Output assembler code for a block containing the constant parts
10005 of a trampoline, leaving space for the variable parts.\
10007 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10008 and then branches to the specified routine.
10010 This code template is copied from text segment to stack location
10011 and then patched with pa_trampoline_init to contain valid values,
10012 and then entered as a subroutine.
10014 It is best to keep this as small as possible to avoid having to
10015 flush multiple lines in the cache. */
10017 static void
10018 pa_asm_trampoline_template (FILE *f)
10020 if (!TARGET_64BIT)
10022 fputs ("\tldw 36(%r22),%r21\n", f);
10023 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10024 if (ASSEMBLER_DIALECT == 0)
10025 fputs ("\tdepi 0,31,2,%r21\n", f);
10026 else
10027 fputs ("\tdepwi 0,31,2,%r21\n", f);
10028 fputs ("\tldw 4(%r21),%r19\n", f);
10029 fputs ("\tldw 0(%r21),%r21\n", f);
10030 if (TARGET_PA_20)
10032 fputs ("\tbve (%r21)\n", f);
10033 fputs ("\tldw 40(%r22),%r29\n", f);
10034 fputs ("\t.word 0\n", f);
10035 fputs ("\t.word 0\n", f);
10037 else
10039 fputs ("\tldsid (%r21),%r1\n", f);
10040 fputs ("\tmtsp %r1,%sr0\n", f);
10041 fputs ("\tbe 0(%sr0,%r21)\n", f);
10042 fputs ("\tldw 40(%r22),%r29\n", f);
10044 fputs ("\t.word 0\n", f);
10045 fputs ("\t.word 0\n", f);
10046 fputs ("\t.word 0\n", f);
10047 fputs ("\t.word 0\n", f);
10049 else
10051 fputs ("\t.dword 0\n", f);
10052 fputs ("\t.dword 0\n", f);
10053 fputs ("\t.dword 0\n", f);
10054 fputs ("\t.dword 0\n", f);
10055 fputs ("\tmfia %r31\n", f);
10056 fputs ("\tldd 24(%r31),%r1\n", f);
10057 fputs ("\tldd 24(%r1),%r27\n", f);
10058 fputs ("\tldd 16(%r1),%r1\n", f);
10059 fputs ("\tbve (%r1)\n", f);
10060 fputs ("\tldd 32(%r31),%r31\n", f);
10061 fputs ("\t.dword 0 ; fptr\n", f);
10062 fputs ("\t.dword 0 ; static link\n", f);
10066 /* Emit RTL insns to initialize the variable parts of a trampoline.
10067 FNADDR is an RTX for the address of the function's pure code.
10068 CXT is an RTX for the static chain value for the function.
10070 Move the function address to the trampoline template at offset 36.
10071 Move the static chain value to trampoline template at offset 40.
10072 Move the trampoline address to trampoline template at offset 44.
10073 Move r19 to trampoline template at offset 48. The latter two
10074 words create a plabel for the indirect call to the trampoline.
10076 A similar sequence is used for the 64-bit port but the plabel is
10077 at the beginning of the trampoline.
10079 Finally, the cache entries for the trampoline code are flushed.
10080 This is necessary to ensure that the trampoline instruction sequence
10081 is written to memory prior to any attempts at prefetching the code
10082 sequence. */
10084 static void
10085 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10087 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10088 rtx start_addr = gen_reg_rtx (Pmode);
10089 rtx end_addr = gen_reg_rtx (Pmode);
10090 rtx line_length = gen_reg_rtx (Pmode);
10091 rtx r_tramp, tmp;
10093 emit_block_move (m_tramp, assemble_trampoline_template (),
10094 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10095 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10097 if (!TARGET_64BIT)
10099 tmp = adjust_address (m_tramp, Pmode, 36);
10100 emit_move_insn (tmp, fnaddr);
10101 tmp = adjust_address (m_tramp, Pmode, 40);
10102 emit_move_insn (tmp, chain_value);
10104 /* Create a fat pointer for the trampoline. */
10105 tmp = adjust_address (m_tramp, Pmode, 44);
10106 emit_move_insn (tmp, r_tramp);
10107 tmp = adjust_address (m_tramp, Pmode, 48);
10108 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10110 /* fdc and fic only use registers for the address to flush,
10111 they do not accept integer displacements. We align the
10112 start and end addresses to the beginning of their respective
10113 cache lines to minimize the number of lines flushed. */
10114 emit_insn (gen_andsi3 (start_addr, r_tramp,
10115 GEN_INT (-MIN_CACHELINE_SIZE)));
10116 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10117 TRAMPOLINE_CODE_SIZE-1));
10118 emit_insn (gen_andsi3 (end_addr, tmp,
10119 GEN_INT (-MIN_CACHELINE_SIZE)));
10120 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10121 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10122 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10123 gen_reg_rtx (Pmode),
10124 gen_reg_rtx (Pmode)));
10126 else
10128 tmp = adjust_address (m_tramp, Pmode, 56);
10129 emit_move_insn (tmp, fnaddr);
10130 tmp = adjust_address (m_tramp, Pmode, 64);
10131 emit_move_insn (tmp, chain_value);
10133 /* Create a fat pointer for the trampoline. */
10134 tmp = adjust_address (m_tramp, Pmode, 16);
10135 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10136 r_tramp, 32)));
10137 tmp = adjust_address (m_tramp, Pmode, 24);
10138 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10140 /* fdc and fic only use registers for the address to flush,
10141 they do not accept integer displacements. We align the
10142 start and end addresses to the beginning of their respective
10143 cache lines to minimize the number of lines flushed. */
10144 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10145 emit_insn (gen_anddi3 (start_addr, tmp,
10146 GEN_INT (-MIN_CACHELINE_SIZE)));
10147 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10148 TRAMPOLINE_CODE_SIZE - 1));
10149 emit_insn (gen_anddi3 (end_addr, tmp,
10150 GEN_INT (-MIN_CACHELINE_SIZE)));
10151 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10152 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10153 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10154 gen_reg_rtx (Pmode),
10155 gen_reg_rtx (Pmode)));
10158 #ifdef HAVE_ENABLE_EXECUTE_STACK
10159  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10160      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10161 #endif
10164 /* Perform any machine-specific adjustment in the address of the trampoline.
10165 ADDR contains the address that was passed to pa_trampoline_init.
10166 Adjust the trampoline address to point to the plabel at offset 44. */
10168 static rtx
10169 pa_trampoline_adjust_address (rtx addr)
10171 if (!TARGET_64BIT)
10172 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10173 return addr;
10176 static rtx
10177 pa_delegitimize_address (rtx orig_x)
10179 rtx x = delegitimize_mem_from_attrs (orig_x);
10181 if (GET_CODE (x) == LO_SUM
10182 && GET_CODE (XEXP (x, 1)) == UNSPEC
10183 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10184 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10185 return x;
10188 static rtx
10189 pa_internal_arg_pointer (void)
10191 /* The argument pointer and the hard frame pointer are the same in
10192 the 32-bit runtime, so we don't need a copy. */
10193 if (TARGET_64BIT)
10194 return copy_to_reg (virtual_incoming_args_rtx);
10195 else
10196 return virtual_incoming_args_rtx;
10199 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10200 Frame pointer elimination is automatically handled. */
10202 static bool
10203 pa_can_eliminate (const int from, const int to)
10205 /* The argument cannot be eliminated in the 64-bit runtime. */
10206 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10207 return false;
10209 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10210 ? ! frame_pointer_needed
10211 : true);
10214 /* Define the offset between two registers, FROM to be eliminated and its
10215 replacement TO, at the start of a routine. */
10216 HOST_WIDE_INT
10217 pa_initial_elimination_offset (int from, int to)
10219 HOST_WIDE_INT offset;
10221 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10222 && to == STACK_POINTER_REGNUM)
10223 offset = -pa_compute_frame_size (get_frame_size (), 0);
10224 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10225 offset = 0;
10226 else
10227 gcc_unreachable ();
10229 return offset;
10232 static void
10233 pa_conditional_register_usage (void)
10235 int i;
10237 if (!TARGET_64BIT && !TARGET_PA_11)
10239 for (i = 56; i <= FP_REG_LAST; i++)
10240 fixed_regs[i] = call_used_regs[i] = 1;
10241 for (i = 33; i < 56; i += 2)
10242 fixed_regs[i] = call_used_regs[i] = 1;
10244 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10246 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10247 fixed_regs[i] = call_used_regs[i] = 1;
10249 if (flag_pic)
10250 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10253 /* Target hook for c_mode_for_suffix. */
10255 static enum machine_mode
10256 pa_c_mode_for_suffix (char suffix)
10258 if (HPUX_LONG_DOUBLE_LIBRARY)
10260 if (suffix == 'q')
10261 return TFmode;
10264 return VOIDmode;
10267 /* Target hook for function_section. */
10269 static section *
10270 pa_function_section (tree decl, enum node_frequency freq,
10271 bool startup, bool exit)
10273 /* Put functions in text section if target doesn't have named sections. */
10274 if (!targetm_common.have_named_sections)
10275 return text_section;
10277 /* Force nested functions into the same section as the containing
10278 function. */
10279 if (decl
10280 && DECL_SECTION_NAME (decl) == NULL_TREE
10281 && DECL_CONTEXT (decl) != NULL_TREE
10282 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10283 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL_TREE)
10284 return function_section (DECL_CONTEXT (decl));
10286 /* Otherwise, use the default function section. */
10287 return default_function_section (decl, freq, startup, exit);
10290 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10292 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10293 that need more than three instructions to load prior to reload. This
10294 limit is somewhat arbitrary. It takes three instructions to load a
10295 CONST_INT from memory but two are memory accesses. It may be better
10296 to increase the allowed range for CONST_INTS. We may also be able
10297 to handle CONST_DOUBLES. */
10299 static bool
10300 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10302 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10303 return false;
10305 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10306 return false;
10308 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10309 legitimate constants. The other variants can't be handled by
10310 the move patterns after reload starts. */
10311 if (pa_tls_referenced_p (x))
10312 return false;
10314 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10315 return false;
10317 if (TARGET_64BIT
10318 && HOST_BITS_PER_WIDE_INT > 32
10319 && GET_CODE (x) == CONST_INT
10320 && !reload_in_progress
10321 && !reload_completed
10322 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10323 && !pa_cint_ok_for_move (INTVAL (x)))
10324 return false;
10326 if (function_label_operand (x, mode))
10327 return false;
10329 return true;
10332 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10334 static unsigned int
10335 pa_section_type_flags (tree decl, const char *name, int reloc)
10337 unsigned int flags;
10339 flags = default_section_type_flags (decl, name, reloc);
10341 /* Function labels are placed in the constant pool. This can
10342 cause a section conflict if decls are put in ".data.rel.ro"
10343 or ".data.rel.ro.local" using the __attribute__ construct. */
10344 if (strcmp (name, ".data.rel.ro") == 0
10345 || strcmp (name, ".data.rel.ro.local") == 0)
10346 flags |= SECTION_WRITE | SECTION_RELRO;
10348 return flags;
10351 /* pa_legitimate_address_p recognizes an RTL expression that is a
10352 valid memory address for an instruction. The MODE argument is the
10353 machine mode for the MEM expression that wants to use this address.
10355 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10356 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10357 available with floating point loads and stores, and integer loads.
10358 We get better code by allowing indexed addresses in the initial
10359 RTL generation.
10361 The acceptance of indexed addresses as legitimate implies that we
10362 must provide patterns for doing indexed integer stores, or the move
10363 expanders must force the address of an indexed store to a register.
10364 We have adopted the latter approach.
10366 Another function of pa_legitimate_address_p is to ensure that
10367 the base register is a valid pointer for indexed instructions.
10368 On targets that have non-equivalent space registers, we have to
10369 know at the time of assembler output which register in a REG+REG
10370 pair is the base register. The REG_POINTER flag is sometimes lost
10371 in reload and the following passes, so it can't be relied on during
10372 code generation. Thus, we either have to canonicalize the order
10373 of the registers in REG+REG indexed addresses, or treat REG+REG
10374 addresses separately and provide patterns for both permutations.
10376 The latter approach requires several hundred additional lines of
10377 code in pa.md. The downside to canonicalizing is that a PLUS
10378 in the wrong order can't combine to form to make a scaled indexed
10379 memory operand. As we won't need to canonicalize the operands if
10380 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10382 We initially break out scaled indexed addresses in canonical order
10383 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10384 scaled indexed addresses during RTL generation. However, fold_rtx
10385 has its own opinion on how the operands of a PLUS should be ordered.
10386 If one of the operands is equivalent to a constant, it will make
10387 that operand the second operand. As the base register is likely to
10388 be equivalent to a SYMBOL_REF, we have made it the second operand.
10390 pa_legitimate_address_p accepts REG+REG as legitimate when the
10391 operands are in the order INDEX+BASE on targets with non-equivalent
10392 space registers, and in any order on targets with equivalent space
10393 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10395 We treat a SYMBOL_REF as legitimate if it is part of the current
10396 function's constant-pool, because such addresses can actually be
10397 output as REG+SMALLINT. */
10399 static bool
10400 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10402 if ((REG_P (x)
10403 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10404 : REG_OK_FOR_BASE_P (x)))
10405 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10406 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10407 && REG_P (XEXP (x, 0))
10408 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10409 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10410 return true;
10412 if (GET_CODE (x) == PLUS)
10414 rtx base, index;
10416 /* For REG+REG, the base register should be in XEXP (x, 1),
10417 so check it first. */
10418 if (REG_P (XEXP (x, 1))
10419 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10420 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10421 base = XEXP (x, 1), index = XEXP (x, 0);
10422 else if (REG_P (XEXP (x, 0))
10423 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10424 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10425 base = XEXP (x, 0), index = XEXP (x, 1);
10426 else
10427 return false;
10429 if (GET_CODE (index) == CONST_INT)
10431 if (INT_5_BITS (index))
10432 return true;
10434 /* When INT14_OK_STRICT is false, a secondary reload is needed
10435 to adjust the displacement of SImode and DImode floating point
10436 instructions but this may fail when the register also needs
10437 reloading. So, we return false when STRICT is true. We
10438 also reject long displacements for float mode addresses since
10439 the majority of accesses will use floating point instructions
10440 that don't support 14-bit offsets. */
10441 if (!INT14_OK_STRICT
10442 && (strict || !(reload_in_progress || reload_completed))
10443 && mode != QImode
10444 && mode != HImode)
10445 return false;
10447 return base14_operand (index, mode);
10450 if (!TARGET_DISABLE_INDEXING
10451 /* Only accept the "canonical" INDEX+BASE operand order
10452 on targets with non-equivalent space registers. */
10453 && (TARGET_NO_SPACE_REGS
10454 ? REG_P (index)
10455 : (base == XEXP (x, 1) && REG_P (index)
10456 && (reload_completed
10457 || (reload_in_progress && HARD_REGISTER_P (base))
10458 || REG_POINTER (base))
10459 && (reload_completed
10460 || (reload_in_progress && HARD_REGISTER_P (index))
10461 || !REG_POINTER (index))))
10462 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10463 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10464 : REG_OK_FOR_INDEX_P (index))
10465 && borx_reg_operand (base, Pmode)
10466 && borx_reg_operand (index, Pmode))
10467 return true;
10469 if (!TARGET_DISABLE_INDEXING
10470 && GET_CODE (index) == MULT
10471 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10472 && REG_P (XEXP (index, 0))
10473 && GET_MODE (XEXP (index, 0)) == Pmode
10474 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10475 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10476 && GET_CODE (XEXP (index, 1)) == CONST_INT
10477 && INTVAL (XEXP (index, 1))
10478 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10479 && borx_reg_operand (base, Pmode))
10480 return true;
10482 return false;
10485 if (GET_CODE (x) == LO_SUM)
10487 rtx y = XEXP (x, 0);
10489 if (GET_CODE (y) == SUBREG)
10490 y = SUBREG_REG (y);
10492 if (REG_P (y)
10493 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10494 : REG_OK_FOR_BASE_P (y)))
10496 /* Needed for -fPIC */
10497 if (mode == Pmode
10498 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10499 return true;
10501 if (!INT14_OK_STRICT
10502 && (strict || !(reload_in_progress || reload_completed))
10503 && mode != QImode
10504 && mode != HImode)
10505 return false;
10507 if (CONSTANT_P (XEXP (x, 1)))
10508 return true;
10510 return false;
10513 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10514 return true;
10516 return false;
10519 /* Look for machine dependent ways to make the invalid address AD a
10520 valid address.
10522 For the PA, transform:
10524 memory(X + <large int>)
10526 into:
10528 if (<large int> & mask) >= 16
10529 Y = (<large int> & ~mask) + mask + 1 Round up.
10530 else
10531 Y = (<large int> & ~mask) Round down.
10532 Z = X + Y
10533 memory (Z + (<large int> - Y));
10535 This makes reload inheritance and reload_cse work better since Z
10536 can be reused.
10538 There may be more opportunities to improve code with this hook. */
10541 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10542 int opnum, int type,
10543 int ind_levels ATTRIBUTE_UNUSED)
10545 long offset, newoffset, mask;
10546 rtx new_rtx, temp = NULL_RTX;
10548 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10549 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10551 if (optimize && GET_CODE (ad) == PLUS)
10552 temp = simplify_binary_operation (PLUS, Pmode,
10553 XEXP (ad, 0), XEXP (ad, 1));
10555 new_rtx = temp ? temp : ad;
10557 if (optimize
10558 && GET_CODE (new_rtx) == PLUS
10559 && GET_CODE (XEXP (new_rtx, 0)) == REG
10560 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10562 offset = INTVAL (XEXP ((new_rtx), 1));
10564 /* Choose rounding direction. Round up if we are >= halfway. */
10565 if ((offset & mask) >= ((mask + 1) / 2))
10566 newoffset = (offset & ~mask) + mask + 1;
10567 else
10568 newoffset = offset & ~mask;
10570 /* Ensure that long displacements are aligned. */
10571 if (mask == 0x3fff
10572 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10573 || (TARGET_64BIT && (mode) == DImode)))
10574 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10576 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10578 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10579 GEN_INT (newoffset));
10580 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10581 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10582 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10583 opnum, (enum reload_type) type);
10584 return ad;
10588 return NULL_RTX;
10591 /* Output address vector. */
10593 void
10594 pa_output_addr_vec (rtx lab, rtx body)
10596 int idx, vlen = XVECLEN (body, 0);
10598 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10599 if (TARGET_GAS)
10600 fputs ("\t.begin_brtab\n", asm_out_file);
10601 for (idx = 0; idx < vlen; idx++)
10603 ASM_OUTPUT_ADDR_VEC_ELT
10604 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10606 if (TARGET_GAS)
10607 fputs ("\t.end_brtab\n", asm_out_file);
10610 /* Output address difference vector. */
10612 void
10613 pa_output_addr_diff_vec (rtx lab, rtx body)
10615 rtx base = XEXP (XEXP (body, 0), 0);
10616 int idx, vlen = XVECLEN (body, 1);
10618 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10619 if (TARGET_GAS)
10620 fputs ("\t.begin_brtab\n", asm_out_file);
10621 for (idx = 0; idx < vlen; idx++)
10623 ASM_OUTPUT_ADDR_DIFF_ELT
10624 (asm_out_file,
10625 body,
10626 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10627 CODE_LABEL_NUMBER (base));
10629 if (TARGET_GAS)
10630 fputs ("\t.end_brtab\n", asm_out_file);
10633 #include "gt-pa.h"