Merge with gcc-4_3-branch up to revision 175516.
[official-gcc.git] / gcc / config / pa / pa.c
blob3ee7841108e71f3ecaaa5c008e227eedca0cdcd3
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "tree.h"
35 #include "output.h"
36 #include "except.h"
37 #include "expr.h"
38 #include "optabs.h"
39 #include "reload.h"
40 #include "integrate.h"
41 #include "function.h"
42 #include "toplev.h"
43 #include "ggc.h"
44 #include "recog.h"
45 #include "predict.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "df.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || (get_attr_type (in_insn) != TYPE_FPSTORE
62 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
63 || recog_memoized (out_insn) < 0)
64 return 0;
66 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
68 set = single_set (out_insn);
69 if (!set)
70 return 0;
72 other_mode = GET_MODE (SET_SRC (set));
74 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
78 #ifndef DO_FRAME_NOTES
79 #ifdef INCOMING_RETURN_ADDR_RTX
80 #define DO_FRAME_NOTES 1
81 #else
82 #define DO_FRAME_NOTES 0
83 #endif
84 #endif
86 static void copy_reg_pointer (rtx, rtx);
87 static void fix_range (const char *);
88 static bool pa_handle_option (size_t, const char *, int);
89 static int hppa_address_cost (rtx);
90 static bool hppa_rtx_costs (rtx, int, int, int *);
91 static inline rtx force_mode (enum machine_mode, rtx);
92 static void pa_reorg (void);
93 static void pa_combine_instructions (void);
94 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
95 static int forward_branch_p (rtx);
96 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
97 static int compute_movmem_length (rtx);
98 static int compute_clrmem_length (rtx);
99 static bool pa_assemble_integer (rtx, unsigned int, int);
100 static void remove_useless_addtr_insns (int);
101 static void store_reg (int, HOST_WIDE_INT, int);
102 static void store_reg_modify (int, int, HOST_WIDE_INT);
103 static void load_reg (int, HOST_WIDE_INT, int);
104 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
105 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
106 static void update_total_code_bytes (unsigned int);
107 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
108 static int pa_adjust_cost (rtx, rtx, rtx, int);
109 static int pa_adjust_priority (rtx, int);
110 static int pa_issue_rate (void);
111 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
112 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 ATTRIBUTE_UNUSED;
114 static void pa_encode_section_info (tree, rtx, int);
115 static const char *pa_strip_name_encoding (const char *);
116 static bool pa_function_ok_for_sibcall (tree, tree);
117 static void pa_globalize_label (FILE *, const char *)
118 ATTRIBUTE_UNUSED;
119 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
120 HOST_WIDE_INT, tree);
121 #if !defined(USE_COLLECT2)
122 static void pa_asm_out_constructor (rtx, int);
123 static void pa_asm_out_destructor (rtx, int);
124 #endif
125 static void pa_init_builtins (void);
126 static rtx hppa_builtin_saveregs (void);
127 static void hppa_va_start (tree, rtx);
128 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
129 static bool pa_scalar_mode_supported_p (enum machine_mode);
130 static bool pa_commutative_p (const_rtx x, int outer_code);
131 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
132 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
136 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
137 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
141 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
142 static void output_deferred_plabels (void);
143 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
144 #ifdef ASM_OUTPUT_EXTERNAL_REAL
145 static void pa_hpux_file_end (void);
146 #endif
147 #ifdef HPUX_LONG_DOUBLE_LIBRARY
148 static void pa_hpux_init_libfuncs (void);
149 #endif
150 static rtx pa_struct_value_rtx (tree, int);
151 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
152 const_tree, bool);
153 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
154 tree, bool);
155 static struct machine_function * pa_init_machine_status (void);
156 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
157 enum machine_mode,
158 secondary_reload_info *);
159 static void pa_extra_live_on_entry (bitmap);
161 /* The following extra sections are only used for SOM. */
162 static GTY(()) section *som_readonly_data_section;
163 static GTY(()) section *som_one_only_readonly_data_section;
164 static GTY(()) section *som_one_only_data_section;
166 /* Save the operands last given to a compare for use when we
167 generate a scc or bcc insn. */
168 rtx hppa_compare_op0, hppa_compare_op1;
169 enum cmp_type hppa_branch_type;
171 /* Which cpu we are scheduling for. */
172 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
174 /* The UNIX standard to use for predefines and linking. */
175 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
177 /* Counts for the number of callee-saved general and floating point
178 registers which were saved by the current function's prologue. */
179 static int gr_saved, fr_saved;
181 /* Boolean indicating whether the return pointer was saved by the
182 current function's prologue. */
183 static bool rp_saved;
185 static rtx find_addr_reg (rtx);
187 /* Keep track of the number of bytes we have output in the CODE subspace
188 during this compilation so we'll know when to emit inline long-calls. */
189 unsigned long total_code_bytes;
191 /* The last address of the previous function plus the number of bytes in
192 associated thunks that have been output. This is used to determine if
193 a thunk can use an IA-relative branch to reach its target function. */
194 static unsigned int last_address;
196 /* Variables to handle plabels that we discover are necessary at assembly
197 output time. They are output after the current function. */
198 struct deferred_plabel GTY(())
200 rtx internal_label;
201 rtx symbol;
203 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
204 deferred_plabels;
205 static size_t n_deferred_plabels = 0;
208 /* Initialize the GCC target structure. */
210 #undef TARGET_ASM_ALIGNED_HI_OP
211 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
212 #undef TARGET_ASM_ALIGNED_SI_OP
213 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
214 #undef TARGET_ASM_ALIGNED_DI_OP
215 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
216 #undef TARGET_ASM_UNALIGNED_HI_OP
217 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
218 #undef TARGET_ASM_UNALIGNED_SI_OP
219 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
220 #undef TARGET_ASM_UNALIGNED_DI_OP
221 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
222 #undef TARGET_ASM_INTEGER
223 #define TARGET_ASM_INTEGER pa_assemble_integer
225 #undef TARGET_ASM_FUNCTION_PROLOGUE
226 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
230 #undef TARGET_SCHED_ADJUST_COST
231 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
232 #undef TARGET_SCHED_ADJUST_PRIORITY
233 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
234 #undef TARGET_SCHED_ISSUE_RATE
235 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
237 #undef TARGET_ENCODE_SECTION_INFO
238 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
242 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
243 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
245 #undef TARGET_COMMUTATIVE_P
246 #define TARGET_COMMUTATIVE_P pa_commutative_p
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
253 #undef TARGET_ASM_FILE_END
254 #ifdef ASM_OUTPUT_EXTERNAL_REAL
255 #define TARGET_ASM_FILE_END pa_hpux_file_end
256 #else
257 #define TARGET_ASM_FILE_END output_deferred_plabels
258 #endif
260 #if !defined(USE_COLLECT2)
261 #undef TARGET_ASM_CONSTRUCTOR
262 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
263 #undef TARGET_ASM_DESTRUCTOR
264 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
265 #endif
267 #undef TARGET_DEFAULT_TARGET_FLAGS
268 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
269 #undef TARGET_HANDLE_OPTION
270 #define TARGET_HANDLE_OPTION pa_handle_option
272 #undef TARGET_INIT_BUILTINS
273 #define TARGET_INIT_BUILTINS pa_init_builtins
275 #undef TARGET_RTX_COSTS
276 #define TARGET_RTX_COSTS hppa_rtx_costs
277 #undef TARGET_ADDRESS_COST
278 #define TARGET_ADDRESS_COST hppa_address_cost
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
283 #ifdef HPUX_LONG_DOUBLE_LIBRARY
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
286 #endif
288 #undef TARGET_PROMOTE_FUNCTION_RETURN
289 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
290 #undef TARGET_PROMOTE_PROTOTYPES
291 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
293 #undef TARGET_STRUCT_VALUE_RTX
294 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
295 #undef TARGET_RETURN_IN_MEMORY
296 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
297 #undef TARGET_MUST_PASS_IN_STACK
298 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
299 #undef TARGET_PASS_BY_REFERENCE
300 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
301 #undef TARGET_CALLEE_COPIES
302 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
303 #undef TARGET_ARG_PARTIAL_BYTES
304 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
306 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
307 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
308 #undef TARGET_EXPAND_BUILTIN_VA_START
309 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
310 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
311 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
313 #undef TARGET_SCALAR_MODE_SUPPORTED_P
314 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
316 #undef TARGET_CANNOT_FORCE_CONST_MEM
317 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
319 #undef TARGET_SECONDARY_RELOAD
320 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
322 #undef TARGET_EXTRA_LIVE_ON_ENTRY
323 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
325 struct gcc_target targetm = TARGET_INITIALIZER;
327 /* Parse the -mfixed-range= option string. */
329 static void
330 fix_range (const char *const_str)
332 int i, first, last;
333 char *str, *dash, *comma;
335 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
336 REG2 are either register names or register numbers. The effect
337 of this option is to mark the registers in the range from REG1 to
338 REG2 as ``fixed'' so they won't be used by the compiler. This is
339 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
341 i = strlen (const_str);
342 str = (char *) alloca (i + 1);
343 memcpy (str, const_str, i + 1);
345 while (1)
347 dash = strchr (str, '-');
348 if (!dash)
350 warning (0, "value of -mfixed-range must have form REG1-REG2");
351 return;
353 *dash = '\0';
355 comma = strchr (dash + 1, ',');
356 if (comma)
357 *comma = '\0';
359 first = decode_reg_name (str);
360 if (first < 0)
362 warning (0, "unknown register name: %s", str);
363 return;
366 last = decode_reg_name (dash + 1);
367 if (last < 0)
369 warning (0, "unknown register name: %s", dash + 1);
370 return;
373 *dash = '-';
375 if (first > last)
377 warning (0, "%s-%s is an empty range", str, dash + 1);
378 return;
381 for (i = first; i <= last; ++i)
382 fixed_regs[i] = call_used_regs[i] = 1;
384 if (!comma)
385 break;
387 *comma = ',';
388 str = comma + 1;
391 /* Check if all floating point registers have been fixed. */
392 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
393 if (!fixed_regs[i])
394 break;
396 if (i > FP_REG_LAST)
397 target_flags |= MASK_DISABLE_FPREGS;
400 /* Implement TARGET_HANDLE_OPTION. */
402 static bool
403 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
405 switch (code)
407 case OPT_mnosnake:
408 case OPT_mpa_risc_1_0:
409 case OPT_march_1_0:
410 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
411 return true;
413 case OPT_msnake:
414 case OPT_mpa_risc_1_1:
415 case OPT_march_1_1:
416 target_flags &= ~MASK_PA_20;
417 target_flags |= MASK_PA_11;
418 return true;
420 case OPT_mpa_risc_2_0:
421 case OPT_march_2_0:
422 target_flags |= MASK_PA_11 | MASK_PA_20;
423 return true;
425 case OPT_mschedule_:
426 if (strcmp (arg, "8000") == 0)
427 pa_cpu = PROCESSOR_8000;
428 else if (strcmp (arg, "7100") == 0)
429 pa_cpu = PROCESSOR_7100;
430 else if (strcmp (arg, "700") == 0)
431 pa_cpu = PROCESSOR_700;
432 else if (strcmp (arg, "7100LC") == 0)
433 pa_cpu = PROCESSOR_7100LC;
434 else if (strcmp (arg, "7200") == 0)
435 pa_cpu = PROCESSOR_7200;
436 else if (strcmp (arg, "7300") == 0)
437 pa_cpu = PROCESSOR_7300;
438 else
439 return false;
440 return true;
442 case OPT_mfixed_range_:
443 fix_range (arg);
444 return true;
446 #if TARGET_HPUX
447 case OPT_munix_93:
448 flag_pa_unix = 1993;
449 return true;
450 #endif
452 #if TARGET_HPUX_10_10
453 case OPT_munix_95:
454 flag_pa_unix = 1995;
455 return true;
456 #endif
458 #if TARGET_HPUX_11_11
459 case OPT_munix_98:
460 flag_pa_unix = 1998;
461 return true;
462 #endif
464 default:
465 return true;
469 void
470 override_options (void)
472 /* Unconditional branches in the delay slot are not compatible with dwarf2
473 call frame information. There is no benefit in using this optimization
474 on PA8000 and later processors. */
475 if (pa_cpu >= PROCESSOR_8000
476 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
477 || flag_unwind_tables)
478 target_flags &= ~MASK_JUMP_IN_DELAY;
480 if (flag_pic && TARGET_PORTABLE_RUNTIME)
482 warning (0, "PIC code generation is not supported in the portable runtime model");
485 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
487 warning (0, "PIC code generation is not compatible with fast indirect calls");
490 if (! TARGET_GAS && write_symbols != NO_DEBUG)
492 warning (0, "-g is only supported when using GAS on this processor,");
493 warning (0, "-g option disabled");
494 write_symbols = NO_DEBUG;
497 /* We only support the "big PIC" model now. And we always generate PIC
498 code when in 64bit mode. */
499 if (flag_pic == 1 || TARGET_64BIT)
500 flag_pic = 2;
502 /* Disable -freorder-blocks-and-partition as we don't support hot and
503 cold partitioning. */
504 if (flag_reorder_blocks_and_partition)
506 inform ("-freorder-blocks-and-partition does not work "
507 "on this architecture");
508 flag_reorder_blocks_and_partition = 0;
509 flag_reorder_blocks = 1;
512 /* We can't guarantee that .dword is available for 32-bit targets. */
513 if (UNITS_PER_WORD == 4)
514 targetm.asm_out.aligned_op.di = NULL;
516 /* The unaligned ops are only available when using GAS. */
517 if (!TARGET_GAS)
519 targetm.asm_out.unaligned_op.hi = NULL;
520 targetm.asm_out.unaligned_op.si = NULL;
521 targetm.asm_out.unaligned_op.di = NULL;
524 init_machine_status = pa_init_machine_status;
527 static void
528 pa_init_builtins (void)
530 #ifdef DONT_HAVE_FPUTC_UNLOCKED
531 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
532 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
533 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
534 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
535 #endif
536 #if TARGET_HPUX_11
537 if (built_in_decls [BUILT_IN_FINITE])
538 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
539 if (built_in_decls [BUILT_IN_FINITEF])
540 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
541 #endif
544 /* Function to init struct machine_function.
545 This will be called, via a pointer variable,
546 from push_function_context. */
548 static struct machine_function *
549 pa_init_machine_status (void)
551 return ggc_alloc_cleared (sizeof (machine_function));
554 /* If FROM is a probable pointer register, mark TO as a probable
555 pointer register with the same pointer alignment as FROM. */
557 static void
558 copy_reg_pointer (rtx to, rtx from)
560 if (REG_POINTER (from))
561 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
564 /* Return 1 if X contains a symbolic expression. We know these
565 expressions will have one of a few well defined forms, so
566 we need only check those forms. */
568 symbolic_expression_p (rtx x)
571 /* Strip off any HIGH. */
572 if (GET_CODE (x) == HIGH)
573 x = XEXP (x, 0);
575 return (symbolic_operand (x, VOIDmode));
578 /* Accept any constant that can be moved in one instruction into a
579 general register. */
581 cint_ok_for_move (HOST_WIDE_INT ival)
583 /* OK if ldo, ldil, or zdepi, can be used. */
584 return (VAL_14_BITS_P (ival)
585 || ldil_cint_p (ival)
586 || zdepi_cint_p (ival));
589 /* Return truth value of whether OP can be used as an operand in a
590 adddi3 insn. */
592 adddi3_operand (rtx op, enum machine_mode mode)
594 return (register_operand (op, mode)
595 || (GET_CODE (op) == CONST_INT
596 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
599 /* True iff the operand OP can be used as the destination operand of
600 an integer store. This also implies the operand could be used as
601 the source operand of an integer load. Symbolic, lo_sum and indexed
602 memory operands are not allowed. We accept reloading pseudos and
603 other memory operands. */
605 integer_store_memory_operand (rtx op, enum machine_mode mode)
607 return ((reload_in_progress
608 && REG_P (op)
609 && REGNO (op) >= FIRST_PSEUDO_REGISTER
610 && reg_renumber [REGNO (op)] < 0)
611 || (GET_CODE (op) == MEM
612 && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
613 && !symbolic_memory_operand (op, VOIDmode)
614 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
615 && !IS_INDEX_ADDR_P (XEXP (op, 0))));
618 /* True iff ldil can be used to load this CONST_INT. The least
619 significant 11 bits of the value must be zero and the value must
620 not change sign when extended from 32 to 64 bits. */
622 ldil_cint_p (HOST_WIDE_INT ival)
624 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
626 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
629 /* True iff zdepi can be used to generate this CONST_INT.
630 zdepi first sign extends a 5-bit signed number to a given field
631 length, then places this field anywhere in a zero. */
633 zdepi_cint_p (unsigned HOST_WIDE_INT x)
635 unsigned HOST_WIDE_INT lsb_mask, t;
637 /* This might not be obvious, but it's at least fast.
638 This function is critical; we don't have the time loops would take. */
639 lsb_mask = x & -x;
640 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
641 /* Return true iff t is a power of two. */
642 return ((t & (t - 1)) == 0);
645 /* True iff depi or extru can be used to compute (reg & mask).
646 Accept bit pattern like these:
647 0....01....1
648 1....10....0
649 1..10..01..1 */
651 and_mask_p (unsigned HOST_WIDE_INT mask)
653 mask = ~mask;
654 mask += mask & -mask;
655 return (mask & (mask - 1)) == 0;
658 /* True iff depi can be used to compute (reg | MASK). */
660 ior_mask_p (unsigned HOST_WIDE_INT mask)
662 mask += mask & -mask;
663 return (mask & (mask - 1)) == 0;
666 /* Legitimize PIC addresses. If the address is already
667 position-independent, we return ORIG. Newly generated
668 position-independent addresses go to REG. If we need more
669 than one register, we lose. */
672 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
674 rtx pic_ref = orig;
676 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
678 /* Labels need special handling. */
679 if (pic_label_operand (orig, mode))
681 rtx insn;
683 /* We do not want to go through the movXX expanders here since that
684 would create recursion.
686 Nor do we really want to call a generator for a named pattern
687 since that requires multiple patterns if we want to support
688 multiple word sizes.
690 So instead we just emit the raw set, which avoids the movXX
691 expanders completely. */
692 mark_reg_pointer (reg, BITS_PER_UNIT);
693 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
695 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
696 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
698 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
699 and update LABEL_NUSES because this is not done automatically. */
700 if (reload_in_progress || reload_completed)
702 /* Extract LABEL_REF. */
703 if (GET_CODE (orig) == CONST)
704 orig = XEXP (XEXP (orig, 0), 0);
705 /* Extract CODE_LABEL. */
706 orig = XEXP (orig, 0);
707 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL_OPERAND, orig,
708 REG_NOTES (insn));
709 LABEL_NUSES (orig)++;
711 current_function_uses_pic_offset_table = 1;
712 return reg;
714 if (GET_CODE (orig) == SYMBOL_REF)
716 rtx insn, tmp_reg;
718 gcc_assert (reg);
720 /* Before reload, allocate a temporary register for the intermediate
721 result. This allows the sequence to be deleted when the final
722 result is unused and the insns are trivially dead. */
723 tmp_reg = ((reload_in_progress || reload_completed)
724 ? reg : gen_reg_rtx (Pmode));
726 if (function_label_operand (orig, mode))
728 /* Force function label into memory in word mode. */
729 orig = XEXP (force_const_mem (word_mode, orig), 0);
730 /* Load plabel address from DLT. */
731 emit_move_insn (tmp_reg,
732 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
733 gen_rtx_HIGH (word_mode, orig)));
734 pic_ref
735 = gen_const_mem (Pmode,
736 gen_rtx_LO_SUM (Pmode, tmp_reg,
737 gen_rtx_UNSPEC (Pmode,
738 gen_rtvec (1, orig),
739 UNSPEC_DLTIND14R)));
740 emit_move_insn (reg, pic_ref);
741 /* Now load address of function descriptor. */
742 pic_ref = gen_rtx_MEM (Pmode, reg);
744 else
746 /* Load symbol reference from DLT. */
747 emit_move_insn (tmp_reg,
748 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
749 gen_rtx_HIGH (word_mode, orig)));
750 pic_ref
751 = gen_const_mem (Pmode,
752 gen_rtx_LO_SUM (Pmode, tmp_reg,
753 gen_rtx_UNSPEC (Pmode,
754 gen_rtvec (1, orig),
755 UNSPEC_DLTIND14R)));
758 current_function_uses_pic_offset_table = 1;
759 mark_reg_pointer (reg, BITS_PER_UNIT);
760 insn = emit_move_insn (reg, pic_ref);
762 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
763 set_unique_reg_note (insn, REG_EQUAL, orig);
765 return reg;
767 else if (GET_CODE (orig) == CONST)
769 rtx base;
771 if (GET_CODE (XEXP (orig, 0)) == PLUS
772 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
773 return orig;
775 gcc_assert (reg);
776 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
778 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
779 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
780 base == reg ? 0 : reg);
782 if (GET_CODE (orig) == CONST_INT)
784 if (INT_14_BITS (orig))
785 return plus_constant (base, INTVAL (orig));
786 orig = force_reg (Pmode, orig);
788 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
789 /* Likewise, should we set special REG_NOTEs here? */
792 return pic_ref;
795 static GTY(()) rtx gen_tls_tga;
797 static rtx
798 gen_tls_get_addr (void)
800 if (!gen_tls_tga)
801 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
802 return gen_tls_tga;
805 static rtx
806 hppa_tls_call (rtx arg)
808 rtx ret;
810 ret = gen_reg_rtx (Pmode);
811 emit_library_call_value (gen_tls_get_addr (), ret,
812 LCT_CONST, Pmode, 1, arg, Pmode);
814 return ret;
817 static rtx
818 legitimize_tls_address (rtx addr)
820 rtx ret, insn, tmp, t1, t2, tp;
821 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
823 switch (model)
825 case TLS_MODEL_GLOBAL_DYNAMIC:
826 tmp = gen_reg_rtx (Pmode);
827 if (flag_pic)
828 emit_insn (gen_tgd_load_pic (tmp, addr));
829 else
830 emit_insn (gen_tgd_load (tmp, addr));
831 ret = hppa_tls_call (tmp);
832 break;
834 case TLS_MODEL_LOCAL_DYNAMIC:
835 ret = gen_reg_rtx (Pmode);
836 tmp = gen_reg_rtx (Pmode);
837 start_sequence ();
838 if (flag_pic)
839 emit_insn (gen_tld_load_pic (tmp, addr));
840 else
841 emit_insn (gen_tld_load (tmp, addr));
842 t1 = hppa_tls_call (tmp);
843 insn = get_insns ();
844 end_sequence ();
845 t2 = gen_reg_rtx (Pmode);
846 emit_libcall_block (insn, t2, t1,
847 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
848 UNSPEC_TLSLDBASE));
849 emit_insn (gen_tld_offset_load (ret, addr, t2));
850 break;
852 case TLS_MODEL_INITIAL_EXEC:
853 tp = gen_reg_rtx (Pmode);
854 tmp = gen_reg_rtx (Pmode);
855 ret = gen_reg_rtx (Pmode);
856 emit_insn (gen_tp_load (tp));
857 if (flag_pic)
858 emit_insn (gen_tie_load_pic (tmp, addr));
859 else
860 emit_insn (gen_tie_load (tmp, addr));
861 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
862 break;
864 case TLS_MODEL_LOCAL_EXEC:
865 tp = gen_reg_rtx (Pmode);
866 ret = gen_reg_rtx (Pmode);
867 emit_insn (gen_tp_load (tp));
868 emit_insn (gen_tle_load (ret, addr, tp));
869 break;
871 default:
872 gcc_unreachable ();
875 return ret;
878 /* Try machine-dependent ways of modifying an illegitimate address
879 to be legitimate. If we find one, return the new, valid address.
880 This macro is used in only one place: `memory_address' in explow.c.
882 OLDX is the address as it was before break_out_memory_refs was called.
883 In some cases it is useful to look at this to decide what needs to be done.
885 MODE and WIN are passed so that this macro can use
886 GO_IF_LEGITIMATE_ADDRESS.
888 It is always safe for this macro to do nothing. It exists to recognize
889 opportunities to optimize the output.
891 For the PA, transform:
893 memory(X + <large int>)
895 into:
897 if (<large int> & mask) >= 16
898 Y = (<large int> & ~mask) + mask + 1 Round up.
899 else
900 Y = (<large int> & ~mask) Round down.
901 Z = X + Y
902 memory (Z + (<large int> - Y));
904 This is for CSE to find several similar references, and only use one Z.
906 X can either be a SYMBOL_REF or REG, but because combine cannot
907 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
908 D will not fit in 14 bits.
910 MODE_FLOAT references allow displacements which fit in 5 bits, so use
911 0x1f as the mask.
913 MODE_INT references allow displacements which fit in 14 bits, so use
914 0x3fff as the mask.
916 This relies on the fact that most mode MODE_FLOAT references will use FP
917 registers and most mode MODE_INT references will use integer registers.
918 (In the rare case of an FP register used in an integer MODE, we depend
919 on secondary reloads to clean things up.)
922 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
923 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
924 addressing modes to be used).
926 Put X and Z into registers. Then put the entire expression into
927 a register. */
930 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
931 enum machine_mode mode)
933 rtx orig = x;
935 /* We need to canonicalize the order of operands in unscaled indexed
936 addresses since the code that checks if an address is valid doesn't
937 always try both orders. */
938 if (!TARGET_NO_SPACE_REGS
939 && GET_CODE (x) == PLUS
940 && GET_MODE (x) == Pmode
941 && REG_P (XEXP (x, 0))
942 && REG_P (XEXP (x, 1))
943 && REG_POINTER (XEXP (x, 0))
944 && !REG_POINTER (XEXP (x, 1)))
945 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
947 if (PA_SYMBOL_REF_TLS_P (x))
948 return legitimize_tls_address (x);
949 else if (flag_pic)
950 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
952 /* Strip off CONST. */
953 if (GET_CODE (x) == CONST)
954 x = XEXP (x, 0);
956 /* Special case. Get the SYMBOL_REF into a register and use indexing.
957 That should always be safe. */
958 if (GET_CODE (x) == PLUS
959 && GET_CODE (XEXP (x, 0)) == REG
960 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
962 rtx reg = force_reg (Pmode, XEXP (x, 1));
963 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
966 /* Note we must reject symbols which represent function addresses
967 since the assembler/linker can't handle arithmetic on plabels. */
968 if (GET_CODE (x) == PLUS
969 && GET_CODE (XEXP (x, 1)) == CONST_INT
970 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
971 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
972 || GET_CODE (XEXP (x, 0)) == REG))
974 rtx int_part, ptr_reg;
975 int newoffset;
976 int offset = INTVAL (XEXP (x, 1));
977 int mask;
979 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
980 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
982 /* Choose which way to round the offset. Round up if we
983 are >= halfway to the next boundary. */
984 if ((offset & mask) >= ((mask + 1) / 2))
985 newoffset = (offset & ~ mask) + mask + 1;
986 else
987 newoffset = (offset & ~ mask);
989 /* If the newoffset will not fit in 14 bits (ldo), then
990 handling this would take 4 or 5 instructions (2 to load
991 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
992 add the new offset and the SYMBOL_REF.) Combine can
993 not handle 4->2 or 5->2 combinations, so do not create
994 them. */
995 if (! VAL_14_BITS_P (newoffset)
996 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
998 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
999 rtx tmp_reg
1000 = force_reg (Pmode,
1001 gen_rtx_HIGH (Pmode, const_part));
1002 ptr_reg
1003 = force_reg (Pmode,
1004 gen_rtx_LO_SUM (Pmode,
1005 tmp_reg, const_part));
1007 else
1009 if (! VAL_14_BITS_P (newoffset))
1010 int_part = force_reg (Pmode, GEN_INT (newoffset));
1011 else
1012 int_part = GEN_INT (newoffset);
1014 ptr_reg = force_reg (Pmode,
1015 gen_rtx_PLUS (Pmode,
1016 force_reg (Pmode, XEXP (x, 0)),
1017 int_part));
1019 return plus_constant (ptr_reg, offset - newoffset);
1022 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1024 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1025 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1026 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1027 && (OBJECT_P (XEXP (x, 1))
1028 || GET_CODE (XEXP (x, 1)) == SUBREG)
1029 && GET_CODE (XEXP (x, 1)) != CONST)
1031 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1032 rtx reg1, reg2;
1034 reg1 = XEXP (x, 1);
1035 if (GET_CODE (reg1) != REG)
1036 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1038 reg2 = XEXP (XEXP (x, 0), 0);
1039 if (GET_CODE (reg2) != REG)
1040 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1042 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1043 gen_rtx_MULT (Pmode,
1044 reg2,
1045 GEN_INT (val)),
1046 reg1));
1049 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1051 Only do so for floating point modes since this is more speculative
1052 and we lose if it's an integer store. */
1053 if (GET_CODE (x) == PLUS
1054 && GET_CODE (XEXP (x, 0)) == PLUS
1055 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1056 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1057 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1058 && (mode == SFmode || mode == DFmode))
1061 /* First, try and figure out what to use as a base register. */
1062 rtx reg1, reg2, base, idx, orig_base;
1064 reg1 = XEXP (XEXP (x, 0), 1);
1065 reg2 = XEXP (x, 1);
1066 base = NULL_RTX;
1067 idx = NULL_RTX;
1069 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1070 then emit_move_sequence will turn on REG_POINTER so we'll know
1071 it's a base register below. */
1072 if (GET_CODE (reg1) != REG)
1073 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1075 if (GET_CODE (reg2) != REG)
1076 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1078 /* Figure out what the base and index are. */
1080 if (GET_CODE (reg1) == REG
1081 && REG_POINTER (reg1))
1083 base = reg1;
1084 orig_base = XEXP (XEXP (x, 0), 1);
1085 idx = gen_rtx_PLUS (Pmode,
1086 gen_rtx_MULT (Pmode,
1087 XEXP (XEXP (XEXP (x, 0), 0), 0),
1088 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1089 XEXP (x, 1));
1091 else if (GET_CODE (reg2) == REG
1092 && REG_POINTER (reg2))
1094 base = reg2;
1095 orig_base = XEXP (x, 1);
1096 idx = XEXP (x, 0);
1099 if (base == 0)
1100 return orig;
1102 /* If the index adds a large constant, try to scale the
1103 constant so that it can be loaded with only one insn. */
1104 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1105 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1106 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1107 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1109 /* Divide the CONST_INT by the scale factor, then add it to A. */
1110 int val = INTVAL (XEXP (idx, 1));
1112 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1113 reg1 = XEXP (XEXP (idx, 0), 0);
1114 if (GET_CODE (reg1) != REG)
1115 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1117 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1119 /* We can now generate a simple scaled indexed address. */
1120 return
1121 force_reg
1122 (Pmode, gen_rtx_PLUS (Pmode,
1123 gen_rtx_MULT (Pmode, reg1,
1124 XEXP (XEXP (idx, 0), 1)),
1125 base));
1128 /* If B + C is still a valid base register, then add them. */
1129 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1130 && INTVAL (XEXP (idx, 1)) <= 4096
1131 && INTVAL (XEXP (idx, 1)) >= -4096)
1133 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1134 rtx reg1, reg2;
1136 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1138 reg2 = XEXP (XEXP (idx, 0), 0);
1139 if (GET_CODE (reg2) != CONST_INT)
1140 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1142 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1143 gen_rtx_MULT (Pmode,
1144 reg2,
1145 GEN_INT (val)),
1146 reg1));
1149 /* Get the index into a register, then add the base + index and
1150 return a register holding the result. */
1152 /* First get A into a register. */
1153 reg1 = XEXP (XEXP (idx, 0), 0);
1154 if (GET_CODE (reg1) != REG)
1155 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1157 /* And get B into a register. */
1158 reg2 = XEXP (idx, 1);
1159 if (GET_CODE (reg2) != REG)
1160 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1162 reg1 = force_reg (Pmode,
1163 gen_rtx_PLUS (Pmode,
1164 gen_rtx_MULT (Pmode, reg1,
1165 XEXP (XEXP (idx, 0), 1)),
1166 reg2));
1168 /* Add the result to our base register and return. */
1169 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1173 /* Uh-oh. We might have an address for x[n-100000]. This needs
1174 special handling to avoid creating an indexed memory address
1175 with x-100000 as the base.
1177 If the constant part is small enough, then it's still safe because
1178 there is a guard page at the beginning and end of the data segment.
1180 Scaled references are common enough that we want to try and rearrange the
1181 terms so that we can use indexing for these addresses too. Only
1182 do the optimization for floatint point modes. */
1184 if (GET_CODE (x) == PLUS
1185 && symbolic_expression_p (XEXP (x, 1)))
1187 /* Ugly. We modify things here so that the address offset specified
1188 by the index expression is computed first, then added to x to form
1189 the entire address. */
1191 rtx regx1, regx2, regy1, regy2, y;
1193 /* Strip off any CONST. */
1194 y = XEXP (x, 1);
1195 if (GET_CODE (y) == CONST)
1196 y = XEXP (y, 0);
1198 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1200 /* See if this looks like
1201 (plus (mult (reg) (shadd_const))
1202 (const (plus (symbol_ref) (const_int))))
1204 Where const_int is small. In that case the const
1205 expression is a valid pointer for indexing.
1207 If const_int is big, but can be divided evenly by shadd_const
1208 and added to (reg). This allows more scaled indexed addresses. */
1209 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1210 && GET_CODE (XEXP (x, 0)) == MULT
1211 && GET_CODE (XEXP (y, 1)) == CONST_INT
1212 && INTVAL (XEXP (y, 1)) >= -4096
1213 && INTVAL (XEXP (y, 1)) <= 4095
1214 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1215 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1217 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1218 rtx reg1, reg2;
1220 reg1 = XEXP (x, 1);
1221 if (GET_CODE (reg1) != REG)
1222 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1224 reg2 = XEXP (XEXP (x, 0), 0);
1225 if (GET_CODE (reg2) != REG)
1226 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1228 return force_reg (Pmode,
1229 gen_rtx_PLUS (Pmode,
1230 gen_rtx_MULT (Pmode,
1231 reg2,
1232 GEN_INT (val)),
1233 reg1));
1235 else if ((mode == DFmode || mode == SFmode)
1236 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1237 && GET_CODE (XEXP (x, 0)) == MULT
1238 && GET_CODE (XEXP (y, 1)) == CONST_INT
1239 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1240 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1241 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1243 regx1
1244 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1245 / INTVAL (XEXP (XEXP (x, 0), 1))));
1246 regx2 = XEXP (XEXP (x, 0), 0);
1247 if (GET_CODE (regx2) != REG)
1248 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1249 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1250 regx2, regx1));
1251 return
1252 force_reg (Pmode,
1253 gen_rtx_PLUS (Pmode,
1254 gen_rtx_MULT (Pmode, regx2,
1255 XEXP (XEXP (x, 0), 1)),
1256 force_reg (Pmode, XEXP (y, 0))));
1258 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1259 && INTVAL (XEXP (y, 1)) >= -4096
1260 && INTVAL (XEXP (y, 1)) <= 4095)
1262 /* This is safe because of the guard page at the
1263 beginning and end of the data space. Just
1264 return the original address. */
1265 return orig;
1267 else
1269 /* Doesn't look like one we can optimize. */
1270 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1271 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1272 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1273 regx1 = force_reg (Pmode,
1274 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1275 regx1, regy2));
1276 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1281 return orig;
1284 /* For the HPPA, REG and REG+CONST is cost 0
1285 and addresses involving symbolic constants are cost 2.
1287 PIC addresses are very expensive.
1289 It is no coincidence that this has the same structure
1290 as GO_IF_LEGITIMATE_ADDRESS. */
1292 static int
1293 hppa_address_cost (rtx X)
1295 switch (GET_CODE (X))
1297 case REG:
1298 case PLUS:
1299 case LO_SUM:
1300 return 1;
1301 case HIGH:
1302 return 2;
1303 default:
1304 return 4;
1308 /* Compute a (partial) cost for rtx X. Return true if the complete
1309 cost has been computed, and false if subexpressions should be
1310 scanned. In either case, *TOTAL contains the cost result. */
1312 static bool
1313 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1315 switch (code)
1317 case CONST_INT:
1318 if (INTVAL (x) == 0)
1319 *total = 0;
1320 else if (INT_14_BITS (x))
1321 *total = 1;
1322 else
1323 *total = 2;
1324 return true;
1326 case HIGH:
1327 *total = 2;
1328 return true;
1330 case CONST:
1331 case LABEL_REF:
1332 case SYMBOL_REF:
1333 *total = 4;
1334 return true;
1336 case CONST_DOUBLE:
1337 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1338 && outer_code != SET)
1339 *total = 0;
1340 else
1341 *total = 8;
1342 return true;
1344 case MULT:
1345 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1346 *total = COSTS_N_INSNS (3);
1347 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1348 *total = COSTS_N_INSNS (8);
1349 else
1350 *total = COSTS_N_INSNS (20);
1351 return true;
1353 case DIV:
1354 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1356 *total = COSTS_N_INSNS (14);
1357 return true;
1359 /* FALLTHRU */
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 *total = COSTS_N_INSNS (60);
1365 return true;
1367 case PLUS: /* this includes shNadd insns */
1368 case MINUS:
1369 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1370 *total = COSTS_N_INSNS (3);
1371 else
1372 *total = COSTS_N_INSNS (1);
1373 return true;
1375 case ASHIFT:
1376 case ASHIFTRT:
1377 case LSHIFTRT:
1378 *total = COSTS_N_INSNS (1);
1379 return true;
1381 default:
1382 return false;
1386 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1387 new rtx with the correct mode. */
1388 static inline rtx
1389 force_mode (enum machine_mode mode, rtx orig)
1391 if (mode == GET_MODE (orig))
1392 return orig;
1394 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1396 return gen_rtx_REG (mode, REGNO (orig));
1399 /* Return 1 if *X is a thread-local symbol. */
1401 static int
1402 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1404 return PA_SYMBOL_REF_TLS_P (*x);
1407 /* Return 1 if X contains a thread-local symbol. */
1409 bool
1410 pa_tls_referenced_p (rtx x)
1412 if (!TARGET_HAVE_TLS)
1413 return false;
1415 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1418 /* Emit insns to move operands[1] into operands[0].
1420 Return 1 if we have written out everything that needs to be done to
1421 do the move. Otherwise, return 0 and the caller will emit the move
1422 normally.
1424 Note SCRATCH_REG may not be in the proper mode depending on how it
1425 will be used. This routine is responsible for creating a new copy
1426 of SCRATCH_REG in the proper mode. */
1429 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1431 register rtx operand0 = operands[0];
1432 register rtx operand1 = operands[1];
1433 register rtx tem;
1435 /* We can only handle indexed addresses in the destination operand
1436 of floating point stores. Thus, we need to break out indexed
1437 addresses from the destination operand. */
1438 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1440 gcc_assert (can_create_pseudo_p ());
1442 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1443 operand0 = replace_equiv_address (operand0, tem);
1446 /* On targets with non-equivalent space registers, break out unscaled
1447 indexed addresses from the source operand before the final CSE.
1448 We have to do this because the REG_POINTER flag is not correctly
1449 carried through various optimization passes and CSE may substitute
1450 a pseudo without the pointer set for one with the pointer set. As
1451 a result, we loose various opportunities to create insns with
1452 unscaled indexed addresses. */
1453 if (!TARGET_NO_SPACE_REGS
1454 && !cse_not_expected
1455 && GET_CODE (operand1) == MEM
1456 && GET_CODE (XEXP (operand1, 0)) == PLUS
1457 && REG_P (XEXP (XEXP (operand1, 0), 0))
1458 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1459 operand1
1460 = replace_equiv_address (operand1,
1461 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1463 if (scratch_reg
1464 && reload_in_progress && GET_CODE (operand0) == REG
1465 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1466 operand0 = reg_equiv_mem[REGNO (operand0)];
1467 else if (scratch_reg
1468 && reload_in_progress && GET_CODE (operand0) == SUBREG
1469 && GET_CODE (SUBREG_REG (operand0)) == REG
1470 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1472 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1473 the code which tracks sets/uses for delete_output_reload. */
1474 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1475 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1476 SUBREG_BYTE (operand0));
1477 operand0 = alter_subreg (&temp);
1480 if (scratch_reg
1481 && reload_in_progress && GET_CODE (operand1) == REG
1482 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1483 operand1 = reg_equiv_mem[REGNO (operand1)];
1484 else if (scratch_reg
1485 && reload_in_progress && GET_CODE (operand1) == SUBREG
1486 && GET_CODE (SUBREG_REG (operand1)) == REG
1487 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1489 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1490 the code which tracks sets/uses for delete_output_reload. */
1491 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1492 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1493 SUBREG_BYTE (operand1));
1494 operand1 = alter_subreg (&temp);
1497 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1498 && ((tem = find_replacement (&XEXP (operand0, 0)))
1499 != XEXP (operand0, 0)))
1500 operand0 = replace_equiv_address (operand0, tem);
1502 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1503 && ((tem = find_replacement (&XEXP (operand1, 0)))
1504 != XEXP (operand1, 0)))
1505 operand1 = replace_equiv_address (operand1, tem);
1507 /* Handle secondary reloads for loads/stores of FP registers from
1508 REG+D addresses where D does not fit in 5 or 14 bits, including
1509 (subreg (mem (addr))) cases. */
1510 if (scratch_reg
1511 && fp_reg_operand (operand0, mode)
1512 && ((GET_CODE (operand1) == MEM
1513 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1514 XEXP (operand1, 0)))
1515 || ((GET_CODE (operand1) == SUBREG
1516 && GET_CODE (XEXP (operand1, 0)) == MEM
1517 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1518 ? SFmode : DFmode),
1519 XEXP (XEXP (operand1, 0), 0))))))
1521 if (GET_CODE (operand1) == SUBREG)
1522 operand1 = XEXP (operand1, 0);
1524 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1525 it in WORD_MODE regardless of what mode it was originally given
1526 to us. */
1527 scratch_reg = force_mode (word_mode, scratch_reg);
1529 /* D might not fit in 14 bits either; for such cases load D into
1530 scratch reg. */
1531 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1533 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1534 emit_move_insn (scratch_reg,
1535 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1536 Pmode,
1537 XEXP (XEXP (operand1, 0), 0),
1538 scratch_reg));
1540 else
1541 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1542 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1543 replace_equiv_address (operand1, scratch_reg)));
1544 return 1;
1546 else if (scratch_reg
1547 && fp_reg_operand (operand1, mode)
1548 && ((GET_CODE (operand0) == MEM
1549 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1550 ? SFmode : DFmode),
1551 XEXP (operand0, 0)))
1552 || ((GET_CODE (operand0) == SUBREG)
1553 && GET_CODE (XEXP (operand0, 0)) == MEM
1554 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1555 ? SFmode : DFmode),
1556 XEXP (XEXP (operand0, 0), 0)))))
1558 if (GET_CODE (operand0) == SUBREG)
1559 operand0 = XEXP (operand0, 0);
1561 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1562 it in WORD_MODE regardless of what mode it was originally given
1563 to us. */
1564 scratch_reg = force_mode (word_mode, scratch_reg);
1566 /* D might not fit in 14 bits either; for such cases load D into
1567 scratch reg. */
1568 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1570 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1571 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1572 0)),
1573 Pmode,
1574 XEXP (XEXP (operand0, 0),
1576 scratch_reg));
1578 else
1579 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1580 emit_insn (gen_rtx_SET (VOIDmode,
1581 replace_equiv_address (operand0, scratch_reg),
1582 operand1));
1583 return 1;
1585 /* Handle secondary reloads for loads of FP registers from constant
1586 expressions by forcing the constant into memory.
1588 Use scratch_reg to hold the address of the memory location.
1590 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1591 NO_REGS when presented with a const_int and a register class
1592 containing only FP registers. Doing so unfortunately creates
1593 more problems than it solves. Fix this for 2.5. */
1594 else if (scratch_reg
1595 && CONSTANT_P (operand1)
1596 && fp_reg_operand (operand0, mode))
1598 rtx const_mem, xoperands[2];
1600 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1601 it in WORD_MODE regardless of what mode it was originally given
1602 to us. */
1603 scratch_reg = force_mode (word_mode, scratch_reg);
1605 /* Force the constant into memory and put the address of the
1606 memory location into scratch_reg. */
1607 const_mem = force_const_mem (mode, operand1);
1608 xoperands[0] = scratch_reg;
1609 xoperands[1] = XEXP (const_mem, 0);
1610 emit_move_sequence (xoperands, Pmode, 0);
1612 /* Now load the destination register. */
1613 emit_insn (gen_rtx_SET (mode, operand0,
1614 replace_equiv_address (const_mem, scratch_reg)));
1615 return 1;
1617 /* Handle secondary reloads for SAR. These occur when trying to load
1618 the SAR from memory, FP register, or with a constant. */
1619 else if (scratch_reg
1620 && GET_CODE (operand0) == REG
1621 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1622 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1623 && (GET_CODE (operand1) == MEM
1624 || GET_CODE (operand1) == CONST_INT
1625 || (GET_CODE (operand1) == REG
1626 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1628 /* D might not fit in 14 bits either; for such cases load D into
1629 scratch reg. */
1630 if (GET_CODE (operand1) == MEM
1631 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1633 /* We are reloading the address into the scratch register, so we
1634 want to make sure the scratch register is a full register. */
1635 scratch_reg = force_mode (word_mode, scratch_reg);
1637 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1638 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1639 0)),
1640 Pmode,
1641 XEXP (XEXP (operand1, 0),
1643 scratch_reg));
1645 /* Now we are going to load the scratch register from memory,
1646 we want to load it in the same width as the original MEM,
1647 which must be the same as the width of the ultimate destination,
1648 OPERAND0. */
1649 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1651 emit_move_insn (scratch_reg,
1652 replace_equiv_address (operand1, scratch_reg));
1654 else
1656 /* We want to load the scratch register using the same mode as
1657 the ultimate destination. */
1658 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1660 emit_move_insn (scratch_reg, operand1);
1663 /* And emit the insn to set the ultimate destination. We know that
1664 the scratch register has the same mode as the destination at this
1665 point. */
1666 emit_move_insn (operand0, scratch_reg);
1667 return 1;
1669 /* Handle the most common case: storing into a register. */
1670 else if (register_operand (operand0, mode))
1672 if (register_operand (operand1, mode)
1673 || (GET_CODE (operand1) == CONST_INT
1674 && cint_ok_for_move (INTVAL (operand1)))
1675 || (operand1 == CONST0_RTX (mode))
1676 || (GET_CODE (operand1) == HIGH
1677 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1678 /* Only `general_operands' can come here, so MEM is ok. */
1679 || GET_CODE (operand1) == MEM)
1681 /* Various sets are created during RTL generation which don't
1682 have the REG_POINTER flag correctly set. After the CSE pass,
1683 instruction recognition can fail if we don't consistently
1684 set this flag when performing register copies. This should
1685 also improve the opportunities for creating insns that use
1686 unscaled indexing. */
1687 if (REG_P (operand0) && REG_P (operand1))
1689 if (REG_POINTER (operand1)
1690 && !REG_POINTER (operand0)
1691 && !HARD_REGISTER_P (operand0))
1692 copy_reg_pointer (operand0, operand1);
1695 /* When MEMs are broken out, the REG_POINTER flag doesn't
1696 get set. In some cases, we can set the REG_POINTER flag
1697 from the declaration for the MEM. */
1698 if (REG_P (operand0)
1699 && GET_CODE (operand1) == MEM
1700 && !REG_POINTER (operand0))
1702 tree decl = MEM_EXPR (operand1);
1704 /* Set the register pointer flag and register alignment
1705 if the declaration for this memory reference is a
1706 pointer type. Fortran indirect argument references
1707 are ignored. */
1708 if (decl
1709 && !(flag_argument_noalias > 1
1710 && TREE_CODE (decl) == INDIRECT_REF
1711 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1713 tree type;
1715 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1716 tree operand 1. */
1717 if (TREE_CODE (decl) == COMPONENT_REF)
1718 decl = TREE_OPERAND (decl, 1);
1720 type = TREE_TYPE (decl);
1721 if (TREE_CODE (type) == ARRAY_TYPE)
1722 type = get_inner_array_type (type);
1724 if (POINTER_TYPE_P (type))
1726 int align;
1728 type = TREE_TYPE (type);
1729 /* Using TYPE_ALIGN_OK is rather conservative as
1730 only the ada frontend actually sets it. */
1731 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1732 : BITS_PER_UNIT);
1733 mark_reg_pointer (operand0, align);
1738 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1739 return 1;
1742 else if (GET_CODE (operand0) == MEM)
1744 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1745 && !(reload_in_progress || reload_completed))
1747 rtx temp = gen_reg_rtx (DFmode);
1749 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1750 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1751 return 1;
1753 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1755 /* Run this case quickly. */
1756 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1757 return 1;
1759 if (! (reload_in_progress || reload_completed))
1761 operands[0] = validize_mem (operand0);
1762 operands[1] = operand1 = force_reg (mode, operand1);
1766 /* Simplify the source if we need to.
1767 Note we do have to handle function labels here, even though we do
1768 not consider them legitimate constants. Loop optimizations can
1769 call the emit_move_xxx with one as a source. */
1770 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1771 || function_label_operand (operand1, mode)
1772 || (GET_CODE (operand1) == HIGH
1773 && symbolic_operand (XEXP (operand1, 0), mode)))
1775 int ishighonly = 0;
1777 if (GET_CODE (operand1) == HIGH)
1779 ishighonly = 1;
1780 operand1 = XEXP (operand1, 0);
1782 if (symbolic_operand (operand1, mode))
1784 /* Argh. The assembler and linker can't handle arithmetic
1785 involving plabels.
1787 So we force the plabel into memory, load operand0 from
1788 the memory location, then add in the constant part. */
1789 if ((GET_CODE (operand1) == CONST
1790 && GET_CODE (XEXP (operand1, 0)) == PLUS
1791 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1792 || function_label_operand (operand1, mode))
1794 rtx temp, const_part;
1796 /* Figure out what (if any) scratch register to use. */
1797 if (reload_in_progress || reload_completed)
1799 scratch_reg = scratch_reg ? scratch_reg : operand0;
1800 /* SCRATCH_REG will hold an address and maybe the actual
1801 data. We want it in WORD_MODE regardless of what mode it
1802 was originally given to us. */
1803 scratch_reg = force_mode (word_mode, scratch_reg);
1805 else if (flag_pic)
1806 scratch_reg = gen_reg_rtx (Pmode);
1808 if (GET_CODE (operand1) == CONST)
1810 /* Save away the constant part of the expression. */
1811 const_part = XEXP (XEXP (operand1, 0), 1);
1812 gcc_assert (GET_CODE (const_part) == CONST_INT);
1814 /* Force the function label into memory. */
1815 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1817 else
1819 /* No constant part. */
1820 const_part = NULL_RTX;
1822 /* Force the function label into memory. */
1823 temp = force_const_mem (mode, operand1);
1827 /* Get the address of the memory location. PIC-ify it if
1828 necessary. */
1829 temp = XEXP (temp, 0);
1830 if (flag_pic)
1831 temp = legitimize_pic_address (temp, mode, scratch_reg);
1833 /* Put the address of the memory location into our destination
1834 register. */
1835 operands[1] = temp;
1836 emit_move_sequence (operands, mode, scratch_reg);
1838 /* Now load from the memory location into our destination
1839 register. */
1840 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1841 emit_move_sequence (operands, mode, scratch_reg);
1843 /* And add back in the constant part. */
1844 if (const_part != NULL_RTX)
1845 expand_inc (operand0, const_part);
1847 return 1;
1850 if (flag_pic)
1852 rtx temp;
1854 if (reload_in_progress || reload_completed)
1856 temp = scratch_reg ? scratch_reg : operand0;
1857 /* TEMP will hold an address and maybe the actual
1858 data. We want it in WORD_MODE regardless of what mode it
1859 was originally given to us. */
1860 temp = force_mode (word_mode, temp);
1862 else
1863 temp = gen_reg_rtx (Pmode);
1865 /* (const (plus (symbol) (const_int))) must be forced to
1866 memory during/after reload if the const_int will not fit
1867 in 14 bits. */
1868 if (GET_CODE (operand1) == CONST
1869 && GET_CODE (XEXP (operand1, 0)) == PLUS
1870 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1871 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1872 && (reload_completed || reload_in_progress)
1873 && flag_pic)
1875 rtx const_mem = force_const_mem (mode, operand1);
1876 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1877 mode, temp);
1878 operands[1] = replace_equiv_address (const_mem, operands[1]);
1879 emit_move_sequence (operands, mode, temp);
1881 else
1883 operands[1] = legitimize_pic_address (operand1, mode, temp);
1884 if (REG_P (operand0) && REG_P (operands[1]))
1885 copy_reg_pointer (operand0, operands[1]);
1886 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1889 /* On the HPPA, references to data space are supposed to use dp,
1890 register 27, but showing it in the RTL inhibits various cse
1891 and loop optimizations. */
1892 else
1894 rtx temp, set;
1896 if (reload_in_progress || reload_completed)
1898 temp = scratch_reg ? scratch_reg : operand0;
1899 /* TEMP will hold an address and maybe the actual
1900 data. We want it in WORD_MODE regardless of what mode it
1901 was originally given to us. */
1902 temp = force_mode (word_mode, temp);
1904 else
1905 temp = gen_reg_rtx (mode);
1907 /* Loading a SYMBOL_REF into a register makes that register
1908 safe to be used as the base in an indexed address.
1910 Don't mark hard registers though. That loses. */
1911 if (GET_CODE (operand0) == REG
1912 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1913 mark_reg_pointer (operand0, BITS_PER_UNIT);
1914 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1915 mark_reg_pointer (temp, BITS_PER_UNIT);
1917 if (ishighonly)
1918 set = gen_rtx_SET (mode, operand0, temp);
1919 else
1920 set = gen_rtx_SET (VOIDmode,
1921 operand0,
1922 gen_rtx_LO_SUM (mode, temp, operand1));
1924 emit_insn (gen_rtx_SET (VOIDmode,
1925 temp,
1926 gen_rtx_HIGH (mode, operand1)));
1927 emit_insn (set);
1930 return 1;
1932 else if (pa_tls_referenced_p (operand1))
1934 rtx tmp = operand1;
1935 rtx addend = NULL;
1937 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1939 addend = XEXP (XEXP (tmp, 0), 1);
1940 tmp = XEXP (XEXP (tmp, 0), 0);
1943 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1944 tmp = legitimize_tls_address (tmp);
1945 if (addend)
1947 tmp = gen_rtx_PLUS (mode, tmp, addend);
1948 tmp = force_operand (tmp, operands[0]);
1950 operands[1] = tmp;
1952 else if (GET_CODE (operand1) != CONST_INT
1953 || !cint_ok_for_move (INTVAL (operand1)))
1955 rtx insn, temp;
1956 rtx op1 = operand1;
1957 HOST_WIDE_INT value = 0;
1958 HOST_WIDE_INT insv = 0;
1959 int insert = 0;
1961 if (GET_CODE (operand1) == CONST_INT)
1962 value = INTVAL (operand1);
1964 if (TARGET_64BIT
1965 && GET_CODE (operand1) == CONST_INT
1966 && HOST_BITS_PER_WIDE_INT > 32
1967 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1969 HOST_WIDE_INT nval;
1971 /* Extract the low order 32 bits of the value and sign extend.
1972 If the new value is the same as the original value, we can
1973 can use the original value as-is. If the new value is
1974 different, we use it and insert the most-significant 32-bits
1975 of the original value into the final result. */
1976 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1977 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1978 if (value != nval)
1980 #if HOST_BITS_PER_WIDE_INT > 32
1981 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1982 #endif
1983 insert = 1;
1984 value = nval;
1985 operand1 = GEN_INT (nval);
1989 if (reload_in_progress || reload_completed)
1990 temp = scratch_reg ? scratch_reg : operand0;
1991 else
1992 temp = gen_reg_rtx (mode);
1994 /* We don't directly split DImode constants on 32-bit targets
1995 because PLUS uses an 11-bit immediate and the insn sequence
1996 generated is not as efficient as the one using HIGH/LO_SUM. */
1997 if (GET_CODE (operand1) == CONST_INT
1998 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1999 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2000 && !insert)
2002 /* Directly break constant into high and low parts. This
2003 provides better optimization opportunities because various
2004 passes recognize constants split with PLUS but not LO_SUM.
2005 We use a 14-bit signed low part except when the addition
2006 of 0x4000 to the high part might change the sign of the
2007 high part. */
2008 HOST_WIDE_INT low = value & 0x3fff;
2009 HOST_WIDE_INT high = value & ~ 0x3fff;
2011 if (low >= 0x2000)
2013 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2014 high += 0x2000;
2015 else
2016 high += 0x4000;
2019 low = value - high;
2021 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2022 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2024 else
2026 emit_insn (gen_rtx_SET (VOIDmode, temp,
2027 gen_rtx_HIGH (mode, operand1)));
2028 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2031 insn = emit_move_insn (operands[0], operands[1]);
2033 /* Now insert the most significant 32 bits of the value
2034 into the register. When we don't have a second register
2035 available, it could take up to nine instructions to load
2036 a 64-bit integer constant. Prior to reload, we force
2037 constants that would take more than three instructions
2038 to load to the constant pool. During and after reload,
2039 we have to handle all possible values. */
2040 if (insert)
2042 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2043 register and the value to be inserted is outside the
2044 range that can be loaded with three depdi instructions. */
2045 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2047 operand1 = GEN_INT (insv);
2049 emit_insn (gen_rtx_SET (VOIDmode, temp,
2050 gen_rtx_HIGH (mode, operand1)));
2051 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2052 emit_insn (gen_insv (operand0, GEN_INT (32),
2053 const0_rtx, temp));
2055 else
2057 int len = 5, pos = 27;
2059 /* Insert the bits using the depdi instruction. */
2060 while (pos >= 0)
2062 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2063 HOST_WIDE_INT sign = v5 < 0;
2065 /* Left extend the insertion. */
2066 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2067 while (pos > 0 && (insv & 1) == sign)
2069 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2070 len += 1;
2071 pos -= 1;
2074 emit_insn (gen_insv (operand0, GEN_INT (len),
2075 GEN_INT (pos), GEN_INT (v5)));
2077 len = pos > 0 && pos < 5 ? pos : 5;
2078 pos -= len;
2083 set_unique_reg_note (insn, REG_EQUAL, op1);
2085 return 1;
2088 /* Now have insn-emit do whatever it normally does. */
2089 return 0;
2092 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2093 it will need a link/runtime reloc). */
2096 reloc_needed (tree exp)
2098 int reloc = 0;
2100 switch (TREE_CODE (exp))
2102 case ADDR_EXPR:
2103 return 1;
2105 case POINTER_PLUS_EXPR:
2106 case PLUS_EXPR:
2107 case MINUS_EXPR:
2108 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2109 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2110 break;
2112 case NOP_EXPR:
2113 case CONVERT_EXPR:
2114 case NON_LVALUE_EXPR:
2115 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2116 break;
2118 case CONSTRUCTOR:
2120 tree value;
2121 unsigned HOST_WIDE_INT ix;
2123 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2124 if (value)
2125 reloc |= reloc_needed (value);
2127 break;
2129 case ERROR_MARK:
2130 break;
2132 default:
2133 break;
2135 return reloc;
2138 /* Does operand (which is a symbolic_operand) live in text space?
2139 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2140 will be true. */
2143 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2145 if (GET_CODE (operand) == CONST)
2146 operand = XEXP (XEXP (operand, 0), 0);
2147 if (flag_pic)
2149 if (GET_CODE (operand) == SYMBOL_REF)
2150 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2152 else
2154 if (GET_CODE (operand) == SYMBOL_REF)
2155 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2157 return 1;
2161 /* Return the best assembler insn template
2162 for moving operands[1] into operands[0] as a fullword. */
2163 const char *
2164 singlemove_string (rtx *operands)
2166 HOST_WIDE_INT intval;
2168 if (GET_CODE (operands[0]) == MEM)
2169 return "stw %r1,%0";
2170 if (GET_CODE (operands[1]) == MEM)
2171 return "ldw %1,%0";
2172 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2174 long i;
2175 REAL_VALUE_TYPE d;
2177 gcc_assert (GET_MODE (operands[1]) == SFmode);
2179 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2180 bit pattern. */
2181 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2182 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2184 operands[1] = GEN_INT (i);
2185 /* Fall through to CONST_INT case. */
2187 if (GET_CODE (operands[1]) == CONST_INT)
2189 intval = INTVAL (operands[1]);
2191 if (VAL_14_BITS_P (intval))
2192 return "ldi %1,%0";
2193 else if ((intval & 0x7ff) == 0)
2194 return "ldil L'%1,%0";
2195 else if (zdepi_cint_p (intval))
2196 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2197 else
2198 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2200 return "copy %1,%0";
2204 /* Compute position (in OP[1]) and width (in OP[2])
2205 useful for copying IMM to a register using the zdepi
2206 instructions. Store the immediate value to insert in OP[0]. */
2207 static void
2208 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2210 int lsb, len;
2212 /* Find the least significant set bit in IMM. */
2213 for (lsb = 0; lsb < 32; lsb++)
2215 if ((imm & 1) != 0)
2216 break;
2217 imm >>= 1;
2220 /* Choose variants based on *sign* of the 5-bit field. */
2221 if ((imm & 0x10) == 0)
2222 len = (lsb <= 28) ? 4 : 32 - lsb;
2223 else
2225 /* Find the width of the bitstring in IMM. */
2226 for (len = 5; len < 32 - lsb; len++)
2228 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2229 break;
2232 /* Sign extend IMM as a 5-bit value. */
2233 imm = (imm & 0xf) - 0x10;
2236 op[0] = imm;
2237 op[1] = 31 - lsb;
2238 op[2] = len;
2241 /* Compute position (in OP[1]) and width (in OP[2])
2242 useful for copying IMM to a register using the depdi,z
2243 instructions. Store the immediate value to insert in OP[0]. */
2244 void
2245 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2247 int lsb, len, maxlen;
2249 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2251 /* Find the least significant set bit in IMM. */
2252 for (lsb = 0; lsb < maxlen; lsb++)
2254 if ((imm & 1) != 0)
2255 break;
2256 imm >>= 1;
2259 /* Choose variants based on *sign* of the 5-bit field. */
2260 if ((imm & 0x10) == 0)
2261 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2262 else
2264 /* Find the width of the bitstring in IMM. */
2265 for (len = 5; len < maxlen - lsb; len++)
2267 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2268 break;
2271 /* Extend length if host is narrow and IMM is negative. */
2272 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2273 len += 32;
2275 /* Sign extend IMM as a 5-bit value. */
2276 imm = (imm & 0xf) - 0x10;
2279 op[0] = imm;
2280 op[1] = 63 - lsb;
2281 op[2] = len;
2284 /* Output assembler code to perform a doubleword move insn
2285 with operands OPERANDS. */
2287 const char *
2288 output_move_double (rtx *operands)
2290 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2291 rtx latehalf[2];
2292 rtx addreg0 = 0, addreg1 = 0;
2294 /* First classify both operands. */
2296 if (REG_P (operands[0]))
2297 optype0 = REGOP;
2298 else if (offsettable_memref_p (operands[0]))
2299 optype0 = OFFSOP;
2300 else if (GET_CODE (operands[0]) == MEM)
2301 optype0 = MEMOP;
2302 else
2303 optype0 = RNDOP;
2305 if (REG_P (operands[1]))
2306 optype1 = REGOP;
2307 else if (CONSTANT_P (operands[1]))
2308 optype1 = CNSTOP;
2309 else if (offsettable_memref_p (operands[1]))
2310 optype1 = OFFSOP;
2311 else if (GET_CODE (operands[1]) == MEM)
2312 optype1 = MEMOP;
2313 else
2314 optype1 = RNDOP;
2316 /* Check for the cases that the operand constraints are not
2317 supposed to allow to happen. */
2318 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2320 /* Handle copies between general and floating registers. */
2322 if (optype0 == REGOP && optype1 == REGOP
2323 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2325 if (FP_REG_P (operands[0]))
2327 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2328 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2329 return "{fldds|fldd} -16(%%sp),%0";
2331 else
2333 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2334 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2335 return "{ldws|ldw} -12(%%sp),%R0";
2339 /* Handle auto decrementing and incrementing loads and stores
2340 specifically, since the structure of the function doesn't work
2341 for them without major modification. Do it better when we learn
2342 this port about the general inc/dec addressing of PA.
2343 (This was written by tege. Chide him if it doesn't work.) */
2345 if (optype0 == MEMOP)
2347 /* We have to output the address syntax ourselves, since print_operand
2348 doesn't deal with the addresses we want to use. Fix this later. */
2350 rtx addr = XEXP (operands[0], 0);
2351 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2353 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2355 operands[0] = XEXP (addr, 0);
2356 gcc_assert (GET_CODE (operands[1]) == REG
2357 && GET_CODE (operands[0]) == REG);
2359 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2361 /* No overlap between high target register and address
2362 register. (We do this in a non-obvious way to
2363 save a register file writeback) */
2364 if (GET_CODE (addr) == POST_INC)
2365 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2366 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2368 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2370 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2372 operands[0] = XEXP (addr, 0);
2373 gcc_assert (GET_CODE (operands[1]) == REG
2374 && GET_CODE (operands[0]) == REG);
2376 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2377 /* No overlap between high target register and address
2378 register. (We do this in a non-obvious way to save a
2379 register file writeback) */
2380 if (GET_CODE (addr) == PRE_INC)
2381 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2382 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2385 if (optype1 == MEMOP)
2387 /* We have to output the address syntax ourselves, since print_operand
2388 doesn't deal with the addresses we want to use. Fix this later. */
2390 rtx addr = XEXP (operands[1], 0);
2391 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2393 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2395 operands[1] = XEXP (addr, 0);
2396 gcc_assert (GET_CODE (operands[0]) == REG
2397 && GET_CODE (operands[1]) == REG);
2399 if (!reg_overlap_mentioned_p (high_reg, addr))
2401 /* No overlap between high target register and address
2402 register. (We do this in a non-obvious way to
2403 save a register file writeback) */
2404 if (GET_CODE (addr) == POST_INC)
2405 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2406 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2408 else
2410 /* This is an undefined situation. We should load into the
2411 address register *and* update that register. Probably
2412 we don't need to handle this at all. */
2413 if (GET_CODE (addr) == POST_INC)
2414 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2415 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2418 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2420 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2422 operands[1] = XEXP (addr, 0);
2423 gcc_assert (GET_CODE (operands[0]) == REG
2424 && GET_CODE (operands[1]) == REG);
2426 if (!reg_overlap_mentioned_p (high_reg, addr))
2428 /* No overlap between high target register and address
2429 register. (We do this in a non-obvious way to
2430 save a register file writeback) */
2431 if (GET_CODE (addr) == PRE_INC)
2432 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2433 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2435 else
2437 /* This is an undefined situation. We should load into the
2438 address register *and* update that register. Probably
2439 we don't need to handle this at all. */
2440 if (GET_CODE (addr) == PRE_INC)
2441 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2442 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2445 else if (GET_CODE (addr) == PLUS
2446 && GET_CODE (XEXP (addr, 0)) == MULT)
2448 rtx xoperands[4];
2449 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2451 if (!reg_overlap_mentioned_p (high_reg, addr))
2453 xoperands[0] = high_reg;
2454 xoperands[1] = XEXP (addr, 1);
2455 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2456 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2457 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2458 xoperands);
2459 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2461 else
2463 xoperands[0] = high_reg;
2464 xoperands[1] = XEXP (addr, 1);
2465 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2466 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2467 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2468 xoperands);
2469 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2474 /* If an operand is an unoffsettable memory ref, find a register
2475 we can increment temporarily to make it refer to the second word. */
2477 if (optype0 == MEMOP)
2478 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2480 if (optype1 == MEMOP)
2481 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2483 /* Ok, we can do one word at a time.
2484 Normally we do the low-numbered word first.
2486 In either case, set up in LATEHALF the operands to use
2487 for the high-numbered word and in some cases alter the
2488 operands in OPERANDS to be suitable for the low-numbered word. */
2490 if (optype0 == REGOP)
2491 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2492 else if (optype0 == OFFSOP)
2493 latehalf[0] = adjust_address (operands[0], SImode, 4);
2494 else
2495 latehalf[0] = operands[0];
2497 if (optype1 == REGOP)
2498 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2499 else if (optype1 == OFFSOP)
2500 latehalf[1] = adjust_address (operands[1], SImode, 4);
2501 else if (optype1 == CNSTOP)
2502 split_double (operands[1], &operands[1], &latehalf[1]);
2503 else
2504 latehalf[1] = operands[1];
2506 /* If the first move would clobber the source of the second one,
2507 do them in the other order.
2509 This can happen in two cases:
2511 mem -> register where the first half of the destination register
2512 is the same register used in the memory's address. Reload
2513 can create such insns.
2515 mem in this case will be either register indirect or register
2516 indirect plus a valid offset.
2518 register -> register move where REGNO(dst) == REGNO(src + 1)
2519 someone (Tim/Tege?) claimed this can happen for parameter loads.
2521 Handle mem -> register case first. */
2522 if (optype0 == REGOP
2523 && (optype1 == MEMOP || optype1 == OFFSOP)
2524 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2525 operands[1], 0))
2527 /* Do the late half first. */
2528 if (addreg1)
2529 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2530 output_asm_insn (singlemove_string (latehalf), latehalf);
2532 /* Then clobber. */
2533 if (addreg1)
2534 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2535 return singlemove_string (operands);
2538 /* Now handle register -> register case. */
2539 if (optype0 == REGOP && optype1 == REGOP
2540 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2542 output_asm_insn (singlemove_string (latehalf), latehalf);
2543 return singlemove_string (operands);
2546 /* Normal case: do the two words, low-numbered first. */
2548 output_asm_insn (singlemove_string (operands), operands);
2550 /* Make any unoffsettable addresses point at high-numbered word. */
2551 if (addreg0)
2552 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2553 if (addreg1)
2554 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2556 /* Do that word. */
2557 output_asm_insn (singlemove_string (latehalf), latehalf);
2559 /* Undo the adds we just did. */
2560 if (addreg0)
2561 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2562 if (addreg1)
2563 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2565 return "";
2568 const char *
2569 output_fp_move_double (rtx *operands)
2571 if (FP_REG_P (operands[0]))
2573 if (FP_REG_P (operands[1])
2574 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2575 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2576 else
2577 output_asm_insn ("fldd%F1 %1,%0", operands);
2579 else if (FP_REG_P (operands[1]))
2581 output_asm_insn ("fstd%F0 %1,%0", operands);
2583 else
2585 rtx xoperands[2];
2587 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2589 /* This is a pain. You have to be prepared to deal with an
2590 arbitrary address here including pre/post increment/decrement.
2592 so avoid this in the MD. */
2593 gcc_assert (GET_CODE (operands[0]) == REG);
2595 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2596 xoperands[0] = operands[0];
2597 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2599 return "";
2602 /* Return a REG that occurs in ADDR with coefficient 1.
2603 ADDR can be effectively incremented by incrementing REG. */
2605 static rtx
2606 find_addr_reg (rtx addr)
2608 while (GET_CODE (addr) == PLUS)
2610 if (GET_CODE (XEXP (addr, 0)) == REG)
2611 addr = XEXP (addr, 0);
2612 else if (GET_CODE (XEXP (addr, 1)) == REG)
2613 addr = XEXP (addr, 1);
2614 else if (CONSTANT_P (XEXP (addr, 0)))
2615 addr = XEXP (addr, 1);
2616 else if (CONSTANT_P (XEXP (addr, 1)))
2617 addr = XEXP (addr, 0);
2618 else
2619 gcc_unreachable ();
2621 gcc_assert (GET_CODE (addr) == REG);
2622 return addr;
2625 /* Emit code to perform a block move.
2627 OPERANDS[0] is the destination pointer as a REG, clobbered.
2628 OPERANDS[1] is the source pointer as a REG, clobbered.
2629 OPERANDS[2] is a register for temporary storage.
2630 OPERANDS[3] is a register for temporary storage.
2631 OPERANDS[4] is the size as a CONST_INT
2632 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2633 OPERANDS[6] is another temporary register. */
2635 const char *
2636 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2638 int align = INTVAL (operands[5]);
2639 unsigned long n_bytes = INTVAL (operands[4]);
2641 /* We can't move more than a word at a time because the PA
2642 has no longer integer move insns. (Could use fp mem ops?) */
2643 if (align > (TARGET_64BIT ? 8 : 4))
2644 align = (TARGET_64BIT ? 8 : 4);
2646 /* Note that we know each loop below will execute at least twice
2647 (else we would have open-coded the copy). */
2648 switch (align)
2650 case 8:
2651 /* Pre-adjust the loop counter. */
2652 operands[4] = GEN_INT (n_bytes - 16);
2653 output_asm_insn ("ldi %4,%2", operands);
2655 /* Copying loop. */
2656 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2657 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2658 output_asm_insn ("std,ma %3,8(%0)", operands);
2659 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2660 output_asm_insn ("std,ma %6,8(%0)", operands);
2662 /* Handle the residual. There could be up to 7 bytes of
2663 residual to copy! */
2664 if (n_bytes % 16 != 0)
2666 operands[4] = GEN_INT (n_bytes % 8);
2667 if (n_bytes % 16 >= 8)
2668 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2669 if (n_bytes % 8 != 0)
2670 output_asm_insn ("ldd 0(%1),%6", operands);
2671 if (n_bytes % 16 >= 8)
2672 output_asm_insn ("std,ma %3,8(%0)", operands);
2673 if (n_bytes % 8 != 0)
2674 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2676 return "";
2678 case 4:
2679 /* Pre-adjust the loop counter. */
2680 operands[4] = GEN_INT (n_bytes - 8);
2681 output_asm_insn ("ldi %4,%2", operands);
2683 /* Copying loop. */
2684 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2685 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2686 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2687 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2688 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2690 /* Handle the residual. There could be up to 7 bytes of
2691 residual to copy! */
2692 if (n_bytes % 8 != 0)
2694 operands[4] = GEN_INT (n_bytes % 4);
2695 if (n_bytes % 8 >= 4)
2696 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2697 if (n_bytes % 4 != 0)
2698 output_asm_insn ("ldw 0(%1),%6", operands);
2699 if (n_bytes % 8 >= 4)
2700 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2701 if (n_bytes % 4 != 0)
2702 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2704 return "";
2706 case 2:
2707 /* Pre-adjust the loop counter. */
2708 operands[4] = GEN_INT (n_bytes - 4);
2709 output_asm_insn ("ldi %4,%2", operands);
2711 /* Copying loop. */
2712 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2713 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2714 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2715 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2716 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2718 /* Handle the residual. */
2719 if (n_bytes % 4 != 0)
2721 if (n_bytes % 4 >= 2)
2722 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2723 if (n_bytes % 2 != 0)
2724 output_asm_insn ("ldb 0(%1),%6", operands);
2725 if (n_bytes % 4 >= 2)
2726 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2727 if (n_bytes % 2 != 0)
2728 output_asm_insn ("stb %6,0(%0)", operands);
2730 return "";
2732 case 1:
2733 /* Pre-adjust the loop counter. */
2734 operands[4] = GEN_INT (n_bytes - 2);
2735 output_asm_insn ("ldi %4,%2", operands);
2737 /* Copying loop. */
2738 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2739 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2740 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2741 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2742 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2744 /* Handle the residual. */
2745 if (n_bytes % 2 != 0)
2747 output_asm_insn ("ldb 0(%1),%3", operands);
2748 output_asm_insn ("stb %3,0(%0)", operands);
2750 return "";
2752 default:
2753 gcc_unreachable ();
2757 /* Count the number of insns necessary to handle this block move.
2759 Basic structure is the same as emit_block_move, except that we
2760 count insns rather than emit them. */
2762 static int
2763 compute_movmem_length (rtx insn)
2765 rtx pat = PATTERN (insn);
2766 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2767 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2768 unsigned int n_insns = 0;
2770 /* We can't move more than four bytes at a time because the PA
2771 has no longer integer move insns. (Could use fp mem ops?) */
2772 if (align > (TARGET_64BIT ? 8 : 4))
2773 align = (TARGET_64BIT ? 8 : 4);
2775 /* The basic copying loop. */
2776 n_insns = 6;
2778 /* Residuals. */
2779 if (n_bytes % (2 * align) != 0)
2781 if ((n_bytes % (2 * align)) >= align)
2782 n_insns += 2;
2784 if ((n_bytes % align) != 0)
2785 n_insns += 2;
2788 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2789 return n_insns * 4;
2792 /* Emit code to perform a block clear.
2794 OPERANDS[0] is the destination pointer as a REG, clobbered.
2795 OPERANDS[1] is a register for temporary storage.
2796 OPERANDS[2] is the size as a CONST_INT
2797 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2799 const char *
2800 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2802 int align = INTVAL (operands[3]);
2803 unsigned long n_bytes = INTVAL (operands[2]);
2805 /* We can't clear more than a word at a time because the PA
2806 has no longer integer move insns. */
2807 if (align > (TARGET_64BIT ? 8 : 4))
2808 align = (TARGET_64BIT ? 8 : 4);
2810 /* Note that we know each loop below will execute at least twice
2811 (else we would have open-coded the copy). */
2812 switch (align)
2814 case 8:
2815 /* Pre-adjust the loop counter. */
2816 operands[2] = GEN_INT (n_bytes - 16);
2817 output_asm_insn ("ldi %2,%1", operands);
2819 /* Loop. */
2820 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2821 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2822 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2824 /* Handle the residual. There could be up to 7 bytes of
2825 residual to copy! */
2826 if (n_bytes % 16 != 0)
2828 operands[2] = GEN_INT (n_bytes % 8);
2829 if (n_bytes % 16 >= 8)
2830 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2831 if (n_bytes % 8 != 0)
2832 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2834 return "";
2836 case 4:
2837 /* Pre-adjust the loop counter. */
2838 operands[2] = GEN_INT (n_bytes - 8);
2839 output_asm_insn ("ldi %2,%1", operands);
2841 /* Loop. */
2842 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2843 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2844 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2846 /* Handle the residual. There could be up to 7 bytes of
2847 residual to copy! */
2848 if (n_bytes % 8 != 0)
2850 operands[2] = GEN_INT (n_bytes % 4);
2851 if (n_bytes % 8 >= 4)
2852 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2853 if (n_bytes % 4 != 0)
2854 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2856 return "";
2858 case 2:
2859 /* Pre-adjust the loop counter. */
2860 operands[2] = GEN_INT (n_bytes - 4);
2861 output_asm_insn ("ldi %2,%1", operands);
2863 /* Loop. */
2864 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2865 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2866 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2868 /* Handle the residual. */
2869 if (n_bytes % 4 != 0)
2871 if (n_bytes % 4 >= 2)
2872 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2873 if (n_bytes % 2 != 0)
2874 output_asm_insn ("stb %%r0,0(%0)", operands);
2876 return "";
2878 case 1:
2879 /* Pre-adjust the loop counter. */
2880 operands[2] = GEN_INT (n_bytes - 2);
2881 output_asm_insn ("ldi %2,%1", operands);
2883 /* Loop. */
2884 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2885 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2886 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2888 /* Handle the residual. */
2889 if (n_bytes % 2 != 0)
2890 output_asm_insn ("stb %%r0,0(%0)", operands);
2892 return "";
2894 default:
2895 gcc_unreachable ();
2899 /* Count the number of insns necessary to handle this block move.
2901 Basic structure is the same as emit_block_move, except that we
2902 count insns rather than emit them. */
2904 static int
2905 compute_clrmem_length (rtx insn)
2907 rtx pat = PATTERN (insn);
2908 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2909 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2910 unsigned int n_insns = 0;
2912 /* We can't clear more than a word at a time because the PA
2913 has no longer integer move insns. */
2914 if (align > (TARGET_64BIT ? 8 : 4))
2915 align = (TARGET_64BIT ? 8 : 4);
2917 /* The basic loop. */
2918 n_insns = 4;
2920 /* Residuals. */
2921 if (n_bytes % (2 * align) != 0)
2923 if ((n_bytes % (2 * align)) >= align)
2924 n_insns++;
2926 if ((n_bytes % align) != 0)
2927 n_insns++;
2930 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2931 return n_insns * 4;
2935 const char *
2936 output_and (rtx *operands)
2938 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2940 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2941 int ls0, ls1, ms0, p, len;
2943 for (ls0 = 0; ls0 < 32; ls0++)
2944 if ((mask & (1 << ls0)) == 0)
2945 break;
2947 for (ls1 = ls0; ls1 < 32; ls1++)
2948 if ((mask & (1 << ls1)) != 0)
2949 break;
2951 for (ms0 = ls1; ms0 < 32; ms0++)
2952 if ((mask & (1 << ms0)) == 0)
2953 break;
2955 gcc_assert (ms0 == 32);
2957 if (ls1 == 32)
2959 len = ls0;
2961 gcc_assert (len);
2963 operands[2] = GEN_INT (len);
2964 return "{extru|extrw,u} %1,31,%2,%0";
2966 else
2968 /* We could use this `depi' for the case above as well, but `depi'
2969 requires one more register file access than an `extru'. */
2971 p = 31 - ls0;
2972 len = ls1 - ls0;
2974 operands[2] = GEN_INT (p);
2975 operands[3] = GEN_INT (len);
2976 return "{depi|depwi} 0,%2,%3,%0";
2979 else
2980 return "and %1,%2,%0";
2983 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2984 storing the result in operands[0]. */
2985 const char *
2986 output_64bit_and (rtx *operands)
2988 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2990 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2991 int ls0, ls1, ms0, p, len;
2993 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2994 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2995 break;
2997 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2998 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2999 break;
3001 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3002 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3003 break;
3005 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3007 if (ls1 == HOST_BITS_PER_WIDE_INT)
3009 len = ls0;
3011 gcc_assert (len);
3013 operands[2] = GEN_INT (len);
3014 return "extrd,u %1,63,%2,%0";
3016 else
3018 /* We could use this `depi' for the case above as well, but `depi'
3019 requires one more register file access than an `extru'. */
3021 p = 63 - ls0;
3022 len = ls1 - ls0;
3024 operands[2] = GEN_INT (p);
3025 operands[3] = GEN_INT (len);
3026 return "depdi 0,%2,%3,%0";
3029 else
3030 return "and %1,%2,%0";
3033 const char *
3034 output_ior (rtx *operands)
3036 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3037 int bs0, bs1, p, len;
3039 if (INTVAL (operands[2]) == 0)
3040 return "copy %1,%0";
3042 for (bs0 = 0; bs0 < 32; bs0++)
3043 if ((mask & (1 << bs0)) != 0)
3044 break;
3046 for (bs1 = bs0; bs1 < 32; bs1++)
3047 if ((mask & (1 << bs1)) == 0)
3048 break;
3050 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3052 p = 31 - bs0;
3053 len = bs1 - bs0;
3055 operands[2] = GEN_INT (p);
3056 operands[3] = GEN_INT (len);
3057 return "{depi|depwi} -1,%2,%3,%0";
3060 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3061 storing the result in operands[0]. */
3062 const char *
3063 output_64bit_ior (rtx *operands)
3065 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3066 int bs0, bs1, p, len;
3068 if (INTVAL (operands[2]) == 0)
3069 return "copy %1,%0";
3071 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3072 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3073 break;
3075 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3076 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3077 break;
3079 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3080 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3082 p = 63 - bs0;
3083 len = bs1 - bs0;
3085 operands[2] = GEN_INT (p);
3086 operands[3] = GEN_INT (len);
3087 return "depdi -1,%2,%3,%0";
3090 /* Target hook for assembling integer objects. This code handles
3091 aligned SI and DI integers specially since function references
3092 must be preceded by P%. */
3094 static bool
3095 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3097 if (size == UNITS_PER_WORD
3098 && aligned_p
3099 && function_label_operand (x, VOIDmode))
3101 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3102 output_addr_const (asm_out_file, x);
3103 fputc ('\n', asm_out_file);
3104 return true;
3106 return default_assemble_integer (x, size, aligned_p);
3109 /* Output an ascii string. */
3110 void
3111 output_ascii (FILE *file, const char *p, int size)
3113 int i;
3114 int chars_output;
3115 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3117 /* The HP assembler can only take strings of 256 characters at one
3118 time. This is a limitation on input line length, *not* the
3119 length of the string. Sigh. Even worse, it seems that the
3120 restriction is in number of input characters (see \xnn &
3121 \whatever). So we have to do this very carefully. */
3123 fputs ("\t.STRING \"", file);
3125 chars_output = 0;
3126 for (i = 0; i < size; i += 4)
3128 int co = 0;
3129 int io = 0;
3130 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3132 register unsigned int c = (unsigned char) p[i + io];
3134 if (c == '\"' || c == '\\')
3135 partial_output[co++] = '\\';
3136 if (c >= ' ' && c < 0177)
3137 partial_output[co++] = c;
3138 else
3140 unsigned int hexd;
3141 partial_output[co++] = '\\';
3142 partial_output[co++] = 'x';
3143 hexd = c / 16 - 0 + '0';
3144 if (hexd > '9')
3145 hexd -= '9' - 'a' + 1;
3146 partial_output[co++] = hexd;
3147 hexd = c % 16 - 0 + '0';
3148 if (hexd > '9')
3149 hexd -= '9' - 'a' + 1;
3150 partial_output[co++] = hexd;
3153 if (chars_output + co > 243)
3155 fputs ("\"\n\t.STRING \"", file);
3156 chars_output = 0;
3158 fwrite (partial_output, 1, (size_t) co, file);
3159 chars_output += co;
3160 co = 0;
3162 fputs ("\"\n", file);
3165 /* Try to rewrite floating point comparisons & branches to avoid
3166 useless add,tr insns.
3168 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3169 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3170 first attempt to remove useless add,tr insns. It is zero
3171 for the second pass as reorg sometimes leaves bogus REG_DEAD
3172 notes lying around.
3174 When CHECK_NOTES is zero we can only eliminate add,tr insns
3175 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3176 instructions. */
3177 static void
3178 remove_useless_addtr_insns (int check_notes)
3180 rtx insn;
3181 static int pass = 0;
3183 /* This is fairly cheap, so always run it when optimizing. */
3184 if (optimize > 0)
3186 int fcmp_count = 0;
3187 int fbranch_count = 0;
3189 /* Walk all the insns in this function looking for fcmp & fbranch
3190 instructions. Keep track of how many of each we find. */
3191 for (insn = get_insns (); insn; insn = next_insn (insn))
3193 rtx tmp;
3195 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3196 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3197 continue;
3199 tmp = PATTERN (insn);
3201 /* It must be a set. */
3202 if (GET_CODE (tmp) != SET)
3203 continue;
3205 /* If the destination is CCFP, then we've found an fcmp insn. */
3206 tmp = SET_DEST (tmp);
3207 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3209 fcmp_count++;
3210 continue;
3213 tmp = PATTERN (insn);
3214 /* If this is an fbranch instruction, bump the fbranch counter. */
3215 if (GET_CODE (tmp) == SET
3216 && SET_DEST (tmp) == pc_rtx
3217 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3218 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3219 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3220 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3222 fbranch_count++;
3223 continue;
3228 /* Find all floating point compare + branch insns. If possible,
3229 reverse the comparison & the branch to avoid add,tr insns. */
3230 for (insn = get_insns (); insn; insn = next_insn (insn))
3232 rtx tmp, next;
3234 /* Ignore anything that isn't an INSN. */
3235 if (GET_CODE (insn) != INSN)
3236 continue;
3238 tmp = PATTERN (insn);
3240 /* It must be a set. */
3241 if (GET_CODE (tmp) != SET)
3242 continue;
3244 /* The destination must be CCFP, which is register zero. */
3245 tmp = SET_DEST (tmp);
3246 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3247 continue;
3249 /* INSN should be a set of CCFP.
3251 See if the result of this insn is used in a reversed FP
3252 conditional branch. If so, reverse our condition and
3253 the branch. Doing so avoids useless add,tr insns. */
3254 next = next_insn (insn);
3255 while (next)
3257 /* Jumps, calls and labels stop our search. */
3258 if (GET_CODE (next) == JUMP_INSN
3259 || GET_CODE (next) == CALL_INSN
3260 || GET_CODE (next) == CODE_LABEL)
3261 break;
3263 /* As does another fcmp insn. */
3264 if (GET_CODE (next) == INSN
3265 && GET_CODE (PATTERN (next)) == SET
3266 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3267 && REGNO (SET_DEST (PATTERN (next))) == 0)
3268 break;
3270 next = next_insn (next);
3273 /* Is NEXT_INSN a branch? */
3274 if (next
3275 && GET_CODE (next) == JUMP_INSN)
3277 rtx pattern = PATTERN (next);
3279 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3280 and CCFP dies, then reverse our conditional and the branch
3281 to avoid the add,tr. */
3282 if (GET_CODE (pattern) == SET
3283 && SET_DEST (pattern) == pc_rtx
3284 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3285 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3286 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3287 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3288 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3289 && (fcmp_count == fbranch_count
3290 || (check_notes
3291 && find_regno_note (next, REG_DEAD, 0))))
3293 /* Reverse the branch. */
3294 tmp = XEXP (SET_SRC (pattern), 1);
3295 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3296 XEXP (SET_SRC (pattern), 2) = tmp;
3297 INSN_CODE (next) = -1;
3299 /* Reverse our condition. */
3300 tmp = PATTERN (insn);
3301 PUT_CODE (XEXP (tmp, 1),
3302 (reverse_condition_maybe_unordered
3303 (GET_CODE (XEXP (tmp, 1)))));
3309 pass = !pass;
3313 /* You may have trouble believing this, but this is the 32 bit HP-PA
3314 stack layout. Wow.
3316 Offset Contents
3318 Variable arguments (optional; any number may be allocated)
3320 SP-(4*(N+9)) arg word N
3322 SP-56 arg word 5
3323 SP-52 arg word 4
3325 Fixed arguments (must be allocated; may remain unused)
3327 SP-48 arg word 3
3328 SP-44 arg word 2
3329 SP-40 arg word 1
3330 SP-36 arg word 0
3332 Frame Marker
3334 SP-32 External Data Pointer (DP)
3335 SP-28 External sr4
3336 SP-24 External/stub RP (RP')
3337 SP-20 Current RP
3338 SP-16 Static Link
3339 SP-12 Clean up
3340 SP-8 Calling Stub RP (RP'')
3341 SP-4 Previous SP
3343 Top of Frame
3345 SP-0 Stack Pointer (points to next available address)
3349 /* This function saves registers as follows. Registers marked with ' are
3350 this function's registers (as opposed to the previous function's).
3351 If a frame_pointer isn't needed, r4 is saved as a general register;
3352 the space for the frame pointer is still allocated, though, to keep
3353 things simple.
3356 Top of Frame
3358 SP (FP') Previous FP
3359 SP + 4 Alignment filler (sigh)
3360 SP + 8 Space for locals reserved here.
3364 SP + n All call saved register used.
3368 SP + o All call saved fp registers used.
3372 SP + p (SP') points to next available address.
3376 /* Global variables set by output_function_prologue(). */
3377 /* Size of frame. Need to know this to emit return insns from
3378 leaf procedures. */
3379 static HOST_WIDE_INT actual_fsize, local_fsize;
3380 static int save_fregs;
3382 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3383 Handle case where DISP > 8k by using the add_high_const patterns.
3385 Note in DISP > 8k case, we will leave the high part of the address
3386 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3388 static void
3389 store_reg (int reg, HOST_WIDE_INT disp, int base)
3391 rtx insn, dest, src, basereg;
3393 src = gen_rtx_REG (word_mode, reg);
3394 basereg = gen_rtx_REG (Pmode, base);
3395 if (VAL_14_BITS_P (disp))
3397 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3398 insn = emit_move_insn (dest, src);
3400 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3402 rtx delta = GEN_INT (disp);
3403 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3405 emit_move_insn (tmpreg, delta);
3406 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3407 if (DO_FRAME_NOTES)
3409 REG_NOTES (insn)
3410 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3411 gen_rtx_SET (VOIDmode, tmpreg,
3412 gen_rtx_PLUS (Pmode, basereg, delta)),
3413 REG_NOTES (insn));
3414 RTX_FRAME_RELATED_P (insn) = 1;
3416 dest = gen_rtx_MEM (word_mode, tmpreg);
3417 insn = emit_move_insn (dest, src);
3419 else
3421 rtx delta = GEN_INT (disp);
3422 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3423 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3425 emit_move_insn (tmpreg, high);
3426 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3427 insn = emit_move_insn (dest, src);
3428 if (DO_FRAME_NOTES)
3430 REG_NOTES (insn)
3431 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3432 gen_rtx_SET (VOIDmode,
3433 gen_rtx_MEM (word_mode,
3434 gen_rtx_PLUS (word_mode, basereg,
3435 delta)),
3436 src),
3437 REG_NOTES (insn));
3441 if (DO_FRAME_NOTES)
3442 RTX_FRAME_RELATED_P (insn) = 1;
3445 /* Emit RTL to store REG at the memory location specified by BASE and then
3446 add MOD to BASE. MOD must be <= 8k. */
3448 static void
3449 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3451 rtx insn, basereg, srcreg, delta;
3453 gcc_assert (VAL_14_BITS_P (mod));
3455 basereg = gen_rtx_REG (Pmode, base);
3456 srcreg = gen_rtx_REG (word_mode, reg);
3457 delta = GEN_INT (mod);
3459 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3460 if (DO_FRAME_NOTES)
3462 RTX_FRAME_RELATED_P (insn) = 1;
3464 /* RTX_FRAME_RELATED_P must be set on each frame related set
3465 in a parallel with more than one element. */
3466 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3467 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3471 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3472 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3473 whether to add a frame note or not.
3475 In the DISP > 8k case, we leave the high part of the address in %r1.
3476 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3478 static void
3479 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3481 rtx insn;
3483 if (VAL_14_BITS_P (disp))
3485 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3486 plus_constant (gen_rtx_REG (Pmode, base), disp));
3488 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3490 rtx basereg = gen_rtx_REG (Pmode, base);
3491 rtx delta = GEN_INT (disp);
3492 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3494 emit_move_insn (tmpreg, delta);
3495 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3496 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3497 if (DO_FRAME_NOTES)
3498 REG_NOTES (insn)
3499 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3500 gen_rtx_SET (VOIDmode, tmpreg,
3501 gen_rtx_PLUS (Pmode, basereg, delta)),
3502 REG_NOTES (insn));
3504 else
3506 rtx basereg = gen_rtx_REG (Pmode, base);
3507 rtx delta = GEN_INT (disp);
3508 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3510 emit_move_insn (tmpreg,
3511 gen_rtx_PLUS (Pmode, basereg,
3512 gen_rtx_HIGH (Pmode, delta)));
3513 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3514 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3517 if (DO_FRAME_NOTES && note)
3518 RTX_FRAME_RELATED_P (insn) = 1;
3521 HOST_WIDE_INT
3522 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3524 int freg_saved = 0;
3525 int i, j;
3527 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3528 be consistent with the rounding and size calculation done here.
3529 Change them at the same time. */
3531 /* We do our own stack alignment. First, round the size of the
3532 stack locals up to a word boundary. */
3533 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3535 /* Space for previous frame pointer + filler. If any frame is
3536 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3537 waste some space here for the sake of HP compatibility. The
3538 first slot is only used when the frame pointer is needed. */
3539 if (size || frame_pointer_needed)
3540 size += STARTING_FRAME_OFFSET;
3542 /* If the current function calls __builtin_eh_return, then we need
3543 to allocate stack space for registers that will hold data for
3544 the exception handler. */
3545 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3547 unsigned int i;
3549 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3550 continue;
3551 size += i * UNITS_PER_WORD;
3554 /* Account for space used by the callee general register saves. */
3555 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3556 if (df_regs_ever_live_p (i))
3557 size += UNITS_PER_WORD;
3559 /* Account for space used by the callee floating point register saves. */
3560 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3561 if (df_regs_ever_live_p (i)
3562 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3564 freg_saved = 1;
3566 /* We always save both halves of the FP register, so always
3567 increment the frame size by 8 bytes. */
3568 size += 8;
3571 /* If any of the floating registers are saved, account for the
3572 alignment needed for the floating point register save block. */
3573 if (freg_saved)
3575 size = (size + 7) & ~7;
3576 if (fregs_live)
3577 *fregs_live = 1;
3580 /* The various ABIs include space for the outgoing parameters in the
3581 size of the current function's stack frame. We don't need to align
3582 for the outgoing arguments as their alignment is set by the final
3583 rounding for the frame as a whole. */
3584 size += current_function_outgoing_args_size;
3586 /* Allocate space for the fixed frame marker. This space must be
3587 allocated for any function that makes calls or allocates
3588 stack space. */
3589 if (!current_function_is_leaf || size)
3590 size += TARGET_64BIT ? 48 : 32;
3592 /* Finally, round to the preferred stack boundary. */
3593 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3594 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3597 /* Generate the assembly code for function entry. FILE is a stdio
3598 stream to output the code to. SIZE is an int: how many units of
3599 temporary storage to allocate.
3601 Refer to the array `regs_ever_live' to determine which registers to
3602 save; `regs_ever_live[I]' is nonzero if register number I is ever
3603 used in the function. This function is responsible for knowing
3604 which registers should not be saved even if used. */
3606 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3607 of memory. If any fpu reg is used in the function, we allocate
3608 such a block here, at the bottom of the frame, just in case it's needed.
3610 If this function is a leaf procedure, then we may choose not
3611 to do a "save" insn. The decision about whether or not
3612 to do this is made in regclass.c. */
3614 static void
3615 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3617 /* The function's label and associated .PROC must never be
3618 separated and must be output *after* any profiling declarations
3619 to avoid changing spaces/subspaces within a procedure. */
3620 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3621 fputs ("\t.PROC\n", file);
3623 /* hppa_expand_prologue does the dirty work now. We just need
3624 to output the assembler directives which denote the start
3625 of a function. */
3626 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3627 if (current_function_is_leaf)
3628 fputs (",NO_CALLS", file);
3629 else
3630 fputs (",CALLS", file);
3631 if (rp_saved)
3632 fputs (",SAVE_RP", file);
3634 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3635 at the beginning of the frame and that it is used as the frame
3636 pointer for the frame. We do this because our current frame
3637 layout doesn't conform to that specified in the HP runtime
3638 documentation and we need a way to indicate to programs such as
3639 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3640 isn't used by HP compilers but is supported by the assembler.
3641 However, SAVE_SP is supposed to indicate that the previous stack
3642 pointer has been saved in the frame marker. */
3643 if (frame_pointer_needed)
3644 fputs (",SAVE_SP", file);
3646 /* Pass on information about the number of callee register saves
3647 performed in the prologue.
3649 The compiler is supposed to pass the highest register number
3650 saved, the assembler then has to adjust that number before
3651 entering it into the unwind descriptor (to account for any
3652 caller saved registers with lower register numbers than the
3653 first callee saved register). */
3654 if (gr_saved)
3655 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3657 if (fr_saved)
3658 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3660 fputs ("\n\t.ENTRY\n", file);
3662 remove_useless_addtr_insns (0);
3665 void
3666 hppa_expand_prologue (void)
3668 int merge_sp_adjust_with_store = 0;
3669 HOST_WIDE_INT size = get_frame_size ();
3670 HOST_WIDE_INT offset;
3671 int i;
3672 rtx insn, tmpreg;
3674 gr_saved = 0;
3675 fr_saved = 0;
3676 save_fregs = 0;
3678 /* Compute total size for frame pointer, filler, locals and rounding to
3679 the next word boundary. Similar code appears in compute_frame_size
3680 and must be changed in tandem with this code. */
3681 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3682 if (local_fsize || frame_pointer_needed)
3683 local_fsize += STARTING_FRAME_OFFSET;
3685 actual_fsize = compute_frame_size (size, &save_fregs);
3687 /* Compute a few things we will use often. */
3688 tmpreg = gen_rtx_REG (word_mode, 1);
3690 /* Save RP first. The calling conventions manual states RP will
3691 always be stored into the caller's frame at sp - 20 or sp - 16
3692 depending on which ABI is in use. */
3693 if (df_regs_ever_live_p (2) || current_function_calls_eh_return)
3695 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3696 rp_saved = true;
3698 else
3699 rp_saved = false;
3701 /* Allocate the local frame and set up the frame pointer if needed. */
3702 if (actual_fsize != 0)
3704 if (frame_pointer_needed)
3706 /* Copy the old frame pointer temporarily into %r1. Set up the
3707 new stack pointer, then store away the saved old frame pointer
3708 into the stack at sp and at the same time update the stack
3709 pointer by actual_fsize bytes. Two versions, first
3710 handles small (<8k) frames. The second handles large (>=8k)
3711 frames. */
3712 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3713 if (DO_FRAME_NOTES)
3714 RTX_FRAME_RELATED_P (insn) = 1;
3716 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3717 if (DO_FRAME_NOTES)
3718 RTX_FRAME_RELATED_P (insn) = 1;
3720 if (VAL_14_BITS_P (actual_fsize))
3721 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3722 else
3724 /* It is incorrect to store the saved frame pointer at *sp,
3725 then increment sp (writes beyond the current stack boundary).
3727 So instead use stwm to store at *sp and post-increment the
3728 stack pointer as an atomic operation. Then increment sp to
3729 finish allocating the new frame. */
3730 HOST_WIDE_INT adjust1 = 8192 - 64;
3731 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3733 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3734 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3735 adjust2, 1);
3738 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3739 we need to store the previous stack pointer (frame pointer)
3740 into the frame marker on targets that use the HP unwind
3741 library. This allows the HP unwind library to be used to
3742 unwind GCC frames. However, we are not fully compatible
3743 with the HP library because our frame layout differs from
3744 that specified in the HP runtime specification.
3746 We don't want a frame note on this instruction as the frame
3747 marker moves during dynamic stack allocation.
3749 This instruction also serves as a blockage to prevent
3750 register spills from being scheduled before the stack
3751 pointer is raised. This is necessary as we store
3752 registers using the frame pointer as a base register,
3753 and the frame pointer is set before sp is raised. */
3754 if (TARGET_HPUX_UNWIND_LIBRARY)
3756 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3757 GEN_INT (TARGET_64BIT ? -8 : -4));
3759 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3760 frame_pointer_rtx);
3762 else
3763 emit_insn (gen_blockage ());
3765 /* no frame pointer needed. */
3766 else
3768 /* In some cases we can perform the first callee register save
3769 and allocating the stack frame at the same time. If so, just
3770 make a note of it and defer allocating the frame until saving
3771 the callee registers. */
3772 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3773 merge_sp_adjust_with_store = 1;
3774 /* Can not optimize. Adjust the stack frame by actual_fsize
3775 bytes. */
3776 else
3777 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3778 actual_fsize, 1);
3782 /* Normal register save.
3784 Do not save the frame pointer in the frame_pointer_needed case. It
3785 was done earlier. */
3786 if (frame_pointer_needed)
3788 offset = local_fsize;
3790 /* Saving the EH return data registers in the frame is the simplest
3791 way to get the frame unwind information emitted. We put them
3792 just before the general registers. */
3793 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3795 unsigned int i, regno;
3797 for (i = 0; ; ++i)
3799 regno = EH_RETURN_DATA_REGNO (i);
3800 if (regno == INVALID_REGNUM)
3801 break;
3803 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3804 offset += UNITS_PER_WORD;
3808 for (i = 18; i >= 4; i--)
3809 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3811 store_reg (i, offset, FRAME_POINTER_REGNUM);
3812 offset += UNITS_PER_WORD;
3813 gr_saved++;
3815 /* Account for %r3 which is saved in a special place. */
3816 gr_saved++;
3818 /* No frame pointer needed. */
3819 else
3821 offset = local_fsize - actual_fsize;
3823 /* Saving the EH return data registers in the frame is the simplest
3824 way to get the frame unwind information emitted. */
3825 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3827 unsigned int i, regno;
3829 for (i = 0; ; ++i)
3831 regno = EH_RETURN_DATA_REGNO (i);
3832 if (regno == INVALID_REGNUM)
3833 break;
3835 /* If merge_sp_adjust_with_store is nonzero, then we can
3836 optimize the first save. */
3837 if (merge_sp_adjust_with_store)
3839 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3840 merge_sp_adjust_with_store = 0;
3842 else
3843 store_reg (regno, offset, STACK_POINTER_REGNUM);
3844 offset += UNITS_PER_WORD;
3848 for (i = 18; i >= 3; i--)
3849 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3851 /* If merge_sp_adjust_with_store is nonzero, then we can
3852 optimize the first GR save. */
3853 if (merge_sp_adjust_with_store)
3855 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3856 merge_sp_adjust_with_store = 0;
3858 else
3859 store_reg (i, offset, STACK_POINTER_REGNUM);
3860 offset += UNITS_PER_WORD;
3861 gr_saved++;
3864 /* If we wanted to merge the SP adjustment with a GR save, but we never
3865 did any GR saves, then just emit the adjustment here. */
3866 if (merge_sp_adjust_with_store)
3867 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3868 actual_fsize, 1);
3871 /* The hppa calling conventions say that %r19, the pic offset
3872 register, is saved at sp - 32 (in this function's frame)
3873 when generating PIC code. FIXME: What is the correct thing
3874 to do for functions which make no calls and allocate no
3875 frame? Do we need to allocate a frame, or can we just omit
3876 the save? For now we'll just omit the save.
3878 We don't want a note on this insn as the frame marker can
3879 move if there is a dynamic stack allocation. */
3880 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3882 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3884 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3888 /* Align pointer properly (doubleword boundary). */
3889 offset = (offset + 7) & ~7;
3891 /* Floating point register store. */
3892 if (save_fregs)
3894 rtx base;
3896 /* First get the frame or stack pointer to the start of the FP register
3897 save area. */
3898 if (frame_pointer_needed)
3900 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3901 base = frame_pointer_rtx;
3903 else
3905 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3906 base = stack_pointer_rtx;
3909 /* Now actually save the FP registers. */
3910 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3912 if (df_regs_ever_live_p (i)
3913 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3915 rtx addr, insn, reg;
3916 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3917 reg = gen_rtx_REG (DFmode, i);
3918 insn = emit_move_insn (addr, reg);
3919 if (DO_FRAME_NOTES)
3921 RTX_FRAME_RELATED_P (insn) = 1;
3922 if (TARGET_64BIT)
3924 rtx mem = gen_rtx_MEM (DFmode,
3925 plus_constant (base, offset));
3926 REG_NOTES (insn)
3927 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3928 gen_rtx_SET (VOIDmode, mem, reg),
3929 REG_NOTES (insn));
3931 else
3933 rtx meml = gen_rtx_MEM (SFmode,
3934 plus_constant (base, offset));
3935 rtx memr = gen_rtx_MEM (SFmode,
3936 plus_constant (base, offset + 4));
3937 rtx regl = gen_rtx_REG (SFmode, i);
3938 rtx regr = gen_rtx_REG (SFmode, i + 1);
3939 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3940 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3941 rtvec vec;
3943 RTX_FRAME_RELATED_P (setl) = 1;
3944 RTX_FRAME_RELATED_P (setr) = 1;
3945 vec = gen_rtvec (2, setl, setr);
3946 REG_NOTES (insn)
3947 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3948 gen_rtx_SEQUENCE (VOIDmode, vec),
3949 REG_NOTES (insn));
3952 offset += GET_MODE_SIZE (DFmode);
3953 fr_saved++;
3959 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3960 Handle case where DISP > 8k by using the add_high_const patterns. */
3962 static void
3963 load_reg (int reg, HOST_WIDE_INT disp, int base)
3965 rtx dest = gen_rtx_REG (word_mode, reg);
3966 rtx basereg = gen_rtx_REG (Pmode, base);
3967 rtx src;
3969 if (VAL_14_BITS_P (disp))
3970 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3971 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3973 rtx delta = GEN_INT (disp);
3974 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3976 emit_move_insn (tmpreg, delta);
3977 if (TARGET_DISABLE_INDEXING)
3979 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3980 src = gen_rtx_MEM (word_mode, tmpreg);
3982 else
3983 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3985 else
3987 rtx delta = GEN_INT (disp);
3988 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3989 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3991 emit_move_insn (tmpreg, high);
3992 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3995 emit_move_insn (dest, src);
3998 /* Update the total code bytes output to the text section. */
4000 static void
4001 update_total_code_bytes (unsigned int nbytes)
4003 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4004 && !IN_NAMED_SECTION_P (cfun->decl))
4006 unsigned int old_total = total_code_bytes;
4008 total_code_bytes += nbytes;
4010 /* Be prepared to handle overflows. */
4011 if (old_total > total_code_bytes)
4012 total_code_bytes = UINT_MAX;
4016 /* This function generates the assembly code for function exit.
4017 Args are as for output_function_prologue ().
4019 The function epilogue should not depend on the current stack
4020 pointer! It should use the frame pointer only. This is mandatory
4021 because of alloca; we also take advantage of it to omit stack
4022 adjustments before returning. */
4024 static void
4025 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4027 rtx insn = get_last_insn ();
4029 last_address = 0;
4031 /* hppa_expand_epilogue does the dirty work now. We just need
4032 to output the assembler directives which denote the end
4033 of a function.
4035 To make debuggers happy, emit a nop if the epilogue was completely
4036 eliminated due to a volatile call as the last insn in the
4037 current function. That way the return address (in %r2) will
4038 always point to a valid instruction in the current function. */
4040 /* Get the last real insn. */
4041 if (GET_CODE (insn) == NOTE)
4042 insn = prev_real_insn (insn);
4044 /* If it is a sequence, then look inside. */
4045 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4046 insn = XVECEXP (PATTERN (insn), 0, 0);
4048 /* If insn is a CALL_INSN, then it must be a call to a volatile
4049 function (otherwise there would be epilogue insns). */
4050 if (insn && GET_CODE (insn) == CALL_INSN)
4052 fputs ("\tnop\n", file);
4053 last_address += 4;
4056 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4058 if (TARGET_SOM && TARGET_GAS)
4060 /* We done with this subspace except possibly for some additional
4061 debug information. Forget that we are in this subspace to ensure
4062 that the next function is output in its own subspace. */
4063 in_section = NULL;
4064 cfun->machine->in_nsubspa = 2;
4067 if (INSN_ADDRESSES_SET_P ())
4069 insn = get_last_nonnote_insn ();
4070 last_address += INSN_ADDRESSES (INSN_UID (insn));
4071 if (INSN_P (insn))
4072 last_address += insn_default_length (insn);
4073 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4074 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4076 else
4077 last_address = UINT_MAX;
4079 /* Finally, update the total number of code bytes output so far. */
4080 update_total_code_bytes (last_address);
4083 void
4084 hppa_expand_epilogue (void)
4086 rtx tmpreg;
4087 HOST_WIDE_INT offset;
4088 HOST_WIDE_INT ret_off = 0;
4089 int i;
4090 int merge_sp_adjust_with_load = 0;
4092 /* We will use this often. */
4093 tmpreg = gen_rtx_REG (word_mode, 1);
4095 /* Try to restore RP early to avoid load/use interlocks when
4096 RP gets used in the return (bv) instruction. This appears to still
4097 be necessary even when we schedule the prologue and epilogue. */
4098 if (rp_saved)
4100 ret_off = TARGET_64BIT ? -16 : -20;
4101 if (frame_pointer_needed)
4103 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4104 ret_off = 0;
4106 else
4108 /* No frame pointer, and stack is smaller than 8k. */
4109 if (VAL_14_BITS_P (ret_off - actual_fsize))
4111 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4112 ret_off = 0;
4117 /* General register restores. */
4118 if (frame_pointer_needed)
4120 offset = local_fsize;
4122 /* If the current function calls __builtin_eh_return, then we need
4123 to restore the saved EH data registers. */
4124 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4126 unsigned int i, regno;
4128 for (i = 0; ; ++i)
4130 regno = EH_RETURN_DATA_REGNO (i);
4131 if (regno == INVALID_REGNUM)
4132 break;
4134 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4135 offset += UNITS_PER_WORD;
4139 for (i = 18; i >= 4; i--)
4140 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4142 load_reg (i, offset, FRAME_POINTER_REGNUM);
4143 offset += UNITS_PER_WORD;
4146 else
4148 offset = local_fsize - actual_fsize;
4150 /* If the current function calls __builtin_eh_return, then we need
4151 to restore the saved EH data registers. */
4152 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4154 unsigned int i, regno;
4156 for (i = 0; ; ++i)
4158 regno = EH_RETURN_DATA_REGNO (i);
4159 if (regno == INVALID_REGNUM)
4160 break;
4162 /* Only for the first load.
4163 merge_sp_adjust_with_load holds the register load
4164 with which we will merge the sp adjustment. */
4165 if (merge_sp_adjust_with_load == 0
4166 && local_fsize == 0
4167 && VAL_14_BITS_P (-actual_fsize))
4168 merge_sp_adjust_with_load = regno;
4169 else
4170 load_reg (regno, offset, STACK_POINTER_REGNUM);
4171 offset += UNITS_PER_WORD;
4175 for (i = 18; i >= 3; i--)
4177 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4179 /* Only for the first load.
4180 merge_sp_adjust_with_load holds the register load
4181 with which we will merge the sp adjustment. */
4182 if (merge_sp_adjust_with_load == 0
4183 && local_fsize == 0
4184 && VAL_14_BITS_P (-actual_fsize))
4185 merge_sp_adjust_with_load = i;
4186 else
4187 load_reg (i, offset, STACK_POINTER_REGNUM);
4188 offset += UNITS_PER_WORD;
4193 /* Align pointer properly (doubleword boundary). */
4194 offset = (offset + 7) & ~7;
4196 /* FP register restores. */
4197 if (save_fregs)
4199 /* Adjust the register to index off of. */
4200 if (frame_pointer_needed)
4201 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4202 else
4203 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4205 /* Actually do the restores now. */
4206 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4207 if (df_regs_ever_live_p (i)
4208 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4210 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4211 rtx dest = gen_rtx_REG (DFmode, i);
4212 emit_move_insn (dest, src);
4216 /* Emit a blockage insn here to keep these insns from being moved to
4217 an earlier spot in the epilogue, or into the main instruction stream.
4219 This is necessary as we must not cut the stack back before all the
4220 restores are finished. */
4221 emit_insn (gen_blockage ());
4223 /* Reset stack pointer (and possibly frame pointer). The stack
4224 pointer is initially set to fp + 64 to avoid a race condition. */
4225 if (frame_pointer_needed)
4227 rtx delta = GEN_INT (-64);
4229 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4230 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4232 /* If we were deferring a callee register restore, do it now. */
4233 else if (merge_sp_adjust_with_load)
4235 rtx delta = GEN_INT (-actual_fsize);
4236 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4238 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4240 else if (actual_fsize != 0)
4241 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4242 - actual_fsize, 0);
4244 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4245 frame greater than 8k), do so now. */
4246 if (ret_off != 0)
4247 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4249 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4251 rtx sa = EH_RETURN_STACKADJ_RTX;
4253 emit_insn (gen_blockage ());
4254 emit_insn (TARGET_64BIT
4255 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4256 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4261 hppa_pic_save_rtx (void)
4263 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4266 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4267 #define NO_DEFERRED_PROFILE_COUNTERS 0
4268 #endif
4271 /* Vector of funcdef numbers. */
4272 static VEC(int,heap) *funcdef_nos;
4274 /* Output deferred profile counters. */
4275 static void
4276 output_deferred_profile_counters (void)
4278 unsigned int i;
4279 int align, n;
4281 if (VEC_empty (int, funcdef_nos))
4282 return;
4284 switch_to_section (data_section);
4285 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4286 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4288 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4290 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4291 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4294 VEC_free (int, heap, funcdef_nos);
4297 void
4298 hppa_profile_hook (int label_no)
4300 /* We use SImode for the address of the function in both 32 and
4301 64-bit code to avoid having to provide DImode versions of the
4302 lcla2 and load_offset_label_address insn patterns. */
4303 rtx reg = gen_reg_rtx (SImode);
4304 rtx label_rtx = gen_label_rtx ();
4305 rtx begin_label_rtx, call_insn;
4306 char begin_label_name[16];
4308 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4309 label_no);
4310 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4312 if (TARGET_64BIT)
4313 emit_move_insn (arg_pointer_rtx,
4314 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4315 GEN_INT (64)));
4317 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4319 /* The address of the function is loaded into %r25 with an instruction-
4320 relative sequence that avoids the use of relocations. The sequence
4321 is split so that the load_offset_label_address instruction can
4322 occupy the delay slot of the call to _mcount. */
4323 if (TARGET_PA_20)
4324 emit_insn (gen_lcla2 (reg, label_rtx));
4325 else
4326 emit_insn (gen_lcla1 (reg, label_rtx));
4328 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4329 reg, begin_label_rtx, label_rtx));
4331 #if !NO_DEFERRED_PROFILE_COUNTERS
4333 rtx count_label_rtx, addr, r24;
4334 char count_label_name[16];
4336 VEC_safe_push (int, heap, funcdef_nos, label_no);
4337 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4338 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4340 addr = force_reg (Pmode, count_label_rtx);
4341 r24 = gen_rtx_REG (Pmode, 24);
4342 emit_move_insn (r24, addr);
4344 call_insn =
4345 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4346 gen_rtx_SYMBOL_REF (Pmode,
4347 "_mcount")),
4348 GEN_INT (TARGET_64BIT ? 24 : 12)));
4350 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4352 #else
4354 call_insn =
4355 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4356 gen_rtx_SYMBOL_REF (Pmode,
4357 "_mcount")),
4358 GEN_INT (TARGET_64BIT ? 16 : 8)));
4360 #endif
4362 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4363 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4365 /* Indicate the _mcount call cannot throw, nor will it execute a
4366 non-local goto. */
4367 REG_NOTES (call_insn)
4368 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4371 /* Fetch the return address for the frame COUNT steps up from
4372 the current frame, after the prologue. FRAMEADDR is the
4373 frame pointer of the COUNT frame.
4375 We want to ignore any export stub remnants here. To handle this,
4376 we examine the code at the return address, and if it is an export
4377 stub, we return a memory rtx for the stub return address stored
4378 at frame-24.
4380 The value returned is used in two different ways:
4382 1. To find a function's caller.
4384 2. To change the return address for a function.
4386 This function handles most instances of case 1; however, it will
4387 fail if there are two levels of stubs to execute on the return
4388 path. The only way I believe that can happen is if the return value
4389 needs a parameter relocation, which never happens for C code.
4391 This function handles most instances of case 2; however, it will
4392 fail if we did not originally have stub code on the return path
4393 but will need stub code on the new return path. This can happen if
4394 the caller & callee are both in the main program, but the new
4395 return location is in a shared library. */
4398 return_addr_rtx (int count, rtx frameaddr)
4400 rtx label;
4401 rtx rp;
4402 rtx saved_rp;
4403 rtx ins;
4405 if (count != 0)
4406 return NULL_RTX;
4408 rp = get_hard_reg_initial_val (Pmode, 2);
4410 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4411 return rp;
4413 saved_rp = gen_reg_rtx (Pmode);
4414 emit_move_insn (saved_rp, rp);
4416 /* Get pointer to the instruction stream. We have to mask out the
4417 privilege level from the two low order bits of the return address
4418 pointer here so that ins will point to the start of the first
4419 instruction that would have been executed if we returned. */
4420 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4421 label = gen_label_rtx ();
4423 /* Check the instruction stream at the normal return address for the
4424 export stub:
4426 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4427 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4428 0x00011820 | stub+16: mtsp r1,sr0
4429 0xe0400002 | stub+20: be,n 0(sr0,rp)
4431 If it is an export stub, than our return address is really in
4432 -24[frameaddr]. */
4434 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4435 NULL_RTX, SImode, 1);
4436 emit_jump_insn (gen_bne (label));
4438 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4439 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4440 emit_jump_insn (gen_bne (label));
4442 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4443 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4444 emit_jump_insn (gen_bne (label));
4446 /* 0xe0400002 must be specified as -532676606 so that it won't be
4447 rejected as an invalid immediate operand on 64-bit hosts. */
4448 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4449 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4451 /* If there is no export stub then just use the value saved from
4452 the return pointer register. */
4454 emit_jump_insn (gen_bne (label));
4456 /* Here we know that our return address points to an export
4457 stub. We don't want to return the address of the export stub,
4458 but rather the return address of the export stub. That return
4459 address is stored at -24[frameaddr]. */
4461 emit_move_insn (saved_rp,
4462 gen_rtx_MEM (Pmode,
4463 memory_address (Pmode,
4464 plus_constant (frameaddr,
4465 -24))));
4467 emit_label (label);
4468 return saved_rp;
4471 void
4472 emit_bcond_fp (enum rtx_code code, rtx operand0)
4474 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4475 gen_rtx_IF_THEN_ELSE (VOIDmode,
4476 gen_rtx_fmt_ee (code,
4477 VOIDmode,
4478 gen_rtx_REG (CCFPmode, 0),
4479 const0_rtx),
4480 gen_rtx_LABEL_REF (VOIDmode, operand0),
4481 pc_rtx)));
4486 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4488 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4489 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4492 /* Adjust the cost of a scheduling dependency. Return the new cost of
4493 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4495 static int
4496 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4498 enum attr_type attr_type;
4500 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4501 true dependencies as they are described with bypasses now. */
4502 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4503 return cost;
4505 if (! recog_memoized (insn))
4506 return 0;
4508 attr_type = get_attr_type (insn);
4510 switch (REG_NOTE_KIND (link))
4512 case REG_DEP_ANTI:
4513 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4514 cycles later. */
4516 if (attr_type == TYPE_FPLOAD)
4518 rtx pat = PATTERN (insn);
4519 rtx dep_pat = PATTERN (dep_insn);
4520 if (GET_CODE (pat) == PARALLEL)
4522 /* This happens for the fldXs,mb patterns. */
4523 pat = XVECEXP (pat, 0, 0);
4525 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4526 /* If this happens, we have to extend this to schedule
4527 optimally. Return 0 for now. */
4528 return 0;
4530 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4532 if (! recog_memoized (dep_insn))
4533 return 0;
4534 switch (get_attr_type (dep_insn))
4536 case TYPE_FPALU:
4537 case TYPE_FPMULSGL:
4538 case TYPE_FPMULDBL:
4539 case TYPE_FPDIVSGL:
4540 case TYPE_FPDIVDBL:
4541 case TYPE_FPSQRTSGL:
4542 case TYPE_FPSQRTDBL:
4543 /* A fpload can't be issued until one cycle before a
4544 preceding arithmetic operation has finished if
4545 the target of the fpload is any of the sources
4546 (or destination) of the arithmetic operation. */
4547 return insn_default_latency (dep_insn) - 1;
4549 default:
4550 return 0;
4554 else if (attr_type == TYPE_FPALU)
4556 rtx pat = PATTERN (insn);
4557 rtx dep_pat = PATTERN (dep_insn);
4558 if (GET_CODE (pat) == PARALLEL)
4560 /* This happens for the fldXs,mb patterns. */
4561 pat = XVECEXP (pat, 0, 0);
4563 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4564 /* If this happens, we have to extend this to schedule
4565 optimally. Return 0 for now. */
4566 return 0;
4568 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4570 if (! recog_memoized (dep_insn))
4571 return 0;
4572 switch (get_attr_type (dep_insn))
4574 case TYPE_FPDIVSGL:
4575 case TYPE_FPDIVDBL:
4576 case TYPE_FPSQRTSGL:
4577 case TYPE_FPSQRTDBL:
4578 /* An ALU flop can't be issued until two cycles before a
4579 preceding divide or sqrt operation has finished if
4580 the target of the ALU flop is any of the sources
4581 (or destination) of the divide or sqrt operation. */
4582 return insn_default_latency (dep_insn) - 2;
4584 default:
4585 return 0;
4590 /* For other anti dependencies, the cost is 0. */
4591 return 0;
4593 case REG_DEP_OUTPUT:
4594 /* Output dependency; DEP_INSN writes a register that INSN writes some
4595 cycles later. */
4596 if (attr_type == TYPE_FPLOAD)
4598 rtx pat = PATTERN (insn);
4599 rtx dep_pat = PATTERN (dep_insn);
4600 if (GET_CODE (pat) == PARALLEL)
4602 /* This happens for the fldXs,mb patterns. */
4603 pat = XVECEXP (pat, 0, 0);
4605 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4606 /* If this happens, we have to extend this to schedule
4607 optimally. Return 0 for now. */
4608 return 0;
4610 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4612 if (! recog_memoized (dep_insn))
4613 return 0;
4614 switch (get_attr_type (dep_insn))
4616 case TYPE_FPALU:
4617 case TYPE_FPMULSGL:
4618 case TYPE_FPMULDBL:
4619 case TYPE_FPDIVSGL:
4620 case TYPE_FPDIVDBL:
4621 case TYPE_FPSQRTSGL:
4622 case TYPE_FPSQRTDBL:
4623 /* A fpload can't be issued until one cycle before a
4624 preceding arithmetic operation has finished if
4625 the target of the fpload is the destination of the
4626 arithmetic operation.
4628 Exception: For PA7100LC, PA7200 and PA7300, the cost
4629 is 3 cycles, unless they bundle together. We also
4630 pay the penalty if the second insn is a fpload. */
4631 return insn_default_latency (dep_insn) - 1;
4633 default:
4634 return 0;
4638 else if (attr_type == TYPE_FPALU)
4640 rtx pat = PATTERN (insn);
4641 rtx dep_pat = PATTERN (dep_insn);
4642 if (GET_CODE (pat) == PARALLEL)
4644 /* This happens for the fldXs,mb patterns. */
4645 pat = XVECEXP (pat, 0, 0);
4647 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4648 /* If this happens, we have to extend this to schedule
4649 optimally. Return 0 for now. */
4650 return 0;
4652 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4654 if (! recog_memoized (dep_insn))
4655 return 0;
4656 switch (get_attr_type (dep_insn))
4658 case TYPE_FPDIVSGL:
4659 case TYPE_FPDIVDBL:
4660 case TYPE_FPSQRTSGL:
4661 case TYPE_FPSQRTDBL:
4662 /* An ALU flop can't be issued until two cycles before a
4663 preceding divide or sqrt operation has finished if
4664 the target of the ALU flop is also the target of
4665 the divide or sqrt operation. */
4666 return insn_default_latency (dep_insn) - 2;
4668 default:
4669 return 0;
4674 /* For other output dependencies, the cost is 0. */
4675 return 0;
4677 default:
4678 gcc_unreachable ();
4682 /* Adjust scheduling priorities. We use this to try and keep addil
4683 and the next use of %r1 close together. */
4684 static int
4685 pa_adjust_priority (rtx insn, int priority)
4687 rtx set = single_set (insn);
4688 rtx src, dest;
4689 if (set)
4691 src = SET_SRC (set);
4692 dest = SET_DEST (set);
4693 if (GET_CODE (src) == LO_SUM
4694 && symbolic_operand (XEXP (src, 1), VOIDmode)
4695 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4696 priority >>= 3;
4698 else if (GET_CODE (src) == MEM
4699 && GET_CODE (XEXP (src, 0)) == LO_SUM
4700 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4701 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4702 priority >>= 1;
4704 else if (GET_CODE (dest) == MEM
4705 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4706 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4707 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4708 priority >>= 3;
4710 return priority;
4713 /* The 700 can only issue a single insn at a time.
4714 The 7XXX processors can issue two insns at a time.
4715 The 8000 can issue 4 insns at a time. */
4716 static int
4717 pa_issue_rate (void)
4719 switch (pa_cpu)
4721 case PROCESSOR_700: return 1;
4722 case PROCESSOR_7100: return 2;
4723 case PROCESSOR_7100LC: return 2;
4724 case PROCESSOR_7200: return 2;
4725 case PROCESSOR_7300: return 2;
4726 case PROCESSOR_8000: return 4;
4728 default:
4729 gcc_unreachable ();
4735 /* Return any length adjustment needed by INSN which already has its length
4736 computed as LENGTH. Return zero if no adjustment is necessary.
4738 For the PA: function calls, millicode calls, and backwards short
4739 conditional branches with unfilled delay slots need an adjustment by +1
4740 (to account for the NOP which will be inserted into the instruction stream).
4742 Also compute the length of an inline block move here as it is too
4743 complicated to express as a length attribute in pa.md. */
4745 pa_adjust_insn_length (rtx insn, int length)
4747 rtx pat = PATTERN (insn);
4749 /* Jumps inside switch tables which have unfilled delay slots need
4750 adjustment. */
4751 if (GET_CODE (insn) == JUMP_INSN
4752 && GET_CODE (pat) == PARALLEL
4753 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4754 return 4;
4755 /* Millicode insn with an unfilled delay slot. */
4756 else if (GET_CODE (insn) == INSN
4757 && GET_CODE (pat) != SEQUENCE
4758 && GET_CODE (pat) != USE
4759 && GET_CODE (pat) != CLOBBER
4760 && get_attr_type (insn) == TYPE_MILLI)
4761 return 4;
4762 /* Block move pattern. */
4763 else if (GET_CODE (insn) == INSN
4764 && GET_CODE (pat) == PARALLEL
4765 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4766 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4767 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4768 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4769 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4770 return compute_movmem_length (insn) - 4;
4771 /* Block clear pattern. */
4772 else if (GET_CODE (insn) == INSN
4773 && GET_CODE (pat) == PARALLEL
4774 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4775 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4776 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4777 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4778 return compute_clrmem_length (insn) - 4;
4779 /* Conditional branch with an unfilled delay slot. */
4780 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4782 /* Adjust a short backwards conditional with an unfilled delay slot. */
4783 if (GET_CODE (pat) == SET
4784 && length == 4
4785 && ! forward_branch_p (insn))
4786 return 4;
4787 else if (GET_CODE (pat) == PARALLEL
4788 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4789 && length == 4)
4790 return 4;
4791 /* Adjust dbra insn with short backwards conditional branch with
4792 unfilled delay slot -- only for case where counter is in a
4793 general register register. */
4794 else if (GET_CODE (pat) == PARALLEL
4795 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4796 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4797 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4798 && length == 4
4799 && ! forward_branch_p (insn))
4800 return 4;
4801 else
4802 return 0;
4804 return 0;
4807 /* Print operand X (an rtx) in assembler syntax to file FILE.
4808 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4809 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4811 void
4812 print_operand (FILE *file, rtx x, int code)
4814 switch (code)
4816 case '#':
4817 /* Output a 'nop' if there's nothing for the delay slot. */
4818 if (dbr_sequence_length () == 0)
4819 fputs ("\n\tnop", file);
4820 return;
4821 case '*':
4822 /* Output a nullification completer if there's nothing for the */
4823 /* delay slot or nullification is requested. */
4824 if (dbr_sequence_length () == 0 ||
4825 (final_sequence &&
4826 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4827 fputs (",n", file);
4828 return;
4829 case 'R':
4830 /* Print out the second register name of a register pair.
4831 I.e., R (6) => 7. */
4832 fputs (reg_names[REGNO (x) + 1], file);
4833 return;
4834 case 'r':
4835 /* A register or zero. */
4836 if (x == const0_rtx
4837 || (x == CONST0_RTX (DFmode))
4838 || (x == CONST0_RTX (SFmode)))
4840 fputs ("%r0", file);
4841 return;
4843 else
4844 break;
4845 case 'f':
4846 /* A register or zero (floating point). */
4847 if (x == const0_rtx
4848 || (x == CONST0_RTX (DFmode))
4849 || (x == CONST0_RTX (SFmode)))
4851 fputs ("%fr0", file);
4852 return;
4854 else
4855 break;
4856 case 'A':
4858 rtx xoperands[2];
4860 xoperands[0] = XEXP (XEXP (x, 0), 0);
4861 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4862 output_global_address (file, xoperands[1], 0);
4863 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4864 return;
4867 case 'C': /* Plain (C)ondition */
4868 case 'X':
4869 switch (GET_CODE (x))
4871 case EQ:
4872 fputs ("=", file); break;
4873 case NE:
4874 fputs ("<>", file); break;
4875 case GT:
4876 fputs (">", file); break;
4877 case GE:
4878 fputs (">=", file); break;
4879 case GEU:
4880 fputs (">>=", file); break;
4881 case GTU:
4882 fputs (">>", file); break;
4883 case LT:
4884 fputs ("<", file); break;
4885 case LE:
4886 fputs ("<=", file); break;
4887 case LEU:
4888 fputs ("<<=", file); break;
4889 case LTU:
4890 fputs ("<<", file); break;
4891 default:
4892 gcc_unreachable ();
4894 return;
4895 case 'N': /* Condition, (N)egated */
4896 switch (GET_CODE (x))
4898 case EQ:
4899 fputs ("<>", file); break;
4900 case NE:
4901 fputs ("=", file); break;
4902 case GT:
4903 fputs ("<=", file); break;
4904 case GE:
4905 fputs ("<", file); break;
4906 case GEU:
4907 fputs ("<<", file); break;
4908 case GTU:
4909 fputs ("<<=", file); break;
4910 case LT:
4911 fputs (">=", file); break;
4912 case LE:
4913 fputs (">", file); break;
4914 case LEU:
4915 fputs (">>", file); break;
4916 case LTU:
4917 fputs (">>=", file); break;
4918 default:
4919 gcc_unreachable ();
4921 return;
4922 /* For floating point comparisons. Note that the output
4923 predicates are the complement of the desired mode. The
4924 conditions for GT, GE, LT, LE and LTGT cause an invalid
4925 operation exception if the result is unordered and this
4926 exception is enabled in the floating-point status register. */
4927 case 'Y':
4928 switch (GET_CODE (x))
4930 case EQ:
4931 fputs ("!=", file); break;
4932 case NE:
4933 fputs ("=", file); break;
4934 case GT:
4935 fputs ("!>", file); break;
4936 case GE:
4937 fputs ("!>=", file); break;
4938 case LT:
4939 fputs ("!<", file); break;
4940 case LE:
4941 fputs ("!<=", file); break;
4942 case LTGT:
4943 fputs ("!<>", file); break;
4944 case UNLE:
4945 fputs ("!?<=", file); break;
4946 case UNLT:
4947 fputs ("!?<", file); break;
4948 case UNGE:
4949 fputs ("!?>=", file); break;
4950 case UNGT:
4951 fputs ("!?>", file); break;
4952 case UNEQ:
4953 fputs ("!?=", file); break;
4954 case UNORDERED:
4955 fputs ("!?", file); break;
4956 case ORDERED:
4957 fputs ("?", file); break;
4958 default:
4959 gcc_unreachable ();
4961 return;
4962 case 'S': /* Condition, operands are (S)wapped. */
4963 switch (GET_CODE (x))
4965 case EQ:
4966 fputs ("=", file); break;
4967 case NE:
4968 fputs ("<>", file); break;
4969 case GT:
4970 fputs ("<", file); break;
4971 case GE:
4972 fputs ("<=", file); break;
4973 case GEU:
4974 fputs ("<<=", file); break;
4975 case GTU:
4976 fputs ("<<", file); break;
4977 case LT:
4978 fputs (">", file); break;
4979 case LE:
4980 fputs (">=", file); break;
4981 case LEU:
4982 fputs (">>=", file); break;
4983 case LTU:
4984 fputs (">>", file); break;
4985 default:
4986 gcc_unreachable ();
4988 return;
4989 case 'B': /* Condition, (B)oth swapped and negate. */
4990 switch (GET_CODE (x))
4992 case EQ:
4993 fputs ("<>", file); break;
4994 case NE:
4995 fputs ("=", file); break;
4996 case GT:
4997 fputs (">=", file); break;
4998 case GE:
4999 fputs (">", file); break;
5000 case GEU:
5001 fputs (">>", file); break;
5002 case GTU:
5003 fputs (">>=", file); break;
5004 case LT:
5005 fputs ("<=", file); break;
5006 case LE:
5007 fputs ("<", file); break;
5008 case LEU:
5009 fputs ("<<", file); break;
5010 case LTU:
5011 fputs ("<<=", file); break;
5012 default:
5013 gcc_unreachable ();
5015 return;
5016 case 'k':
5017 gcc_assert (GET_CODE (x) == CONST_INT);
5018 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5019 return;
5020 case 'Q':
5021 gcc_assert (GET_CODE (x) == CONST_INT);
5022 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5023 return;
5024 case 'L':
5025 gcc_assert (GET_CODE (x) == CONST_INT);
5026 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5027 return;
5028 case 'O':
5029 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5030 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5031 return;
5032 case 'p':
5033 gcc_assert (GET_CODE (x) == CONST_INT);
5034 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5035 return;
5036 case 'P':
5037 gcc_assert (GET_CODE (x) == CONST_INT);
5038 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5039 return;
5040 case 'I':
5041 if (GET_CODE (x) == CONST_INT)
5042 fputs ("i", file);
5043 return;
5044 case 'M':
5045 case 'F':
5046 switch (GET_CODE (XEXP (x, 0)))
5048 case PRE_DEC:
5049 case PRE_INC:
5050 if (ASSEMBLER_DIALECT == 0)
5051 fputs ("s,mb", file);
5052 else
5053 fputs (",mb", file);
5054 break;
5055 case POST_DEC:
5056 case POST_INC:
5057 if (ASSEMBLER_DIALECT == 0)
5058 fputs ("s,ma", file);
5059 else
5060 fputs (",ma", file);
5061 break;
5062 case PLUS:
5063 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5064 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5066 if (ASSEMBLER_DIALECT == 0)
5067 fputs ("x", file);
5069 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5070 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5072 if (ASSEMBLER_DIALECT == 0)
5073 fputs ("x,s", file);
5074 else
5075 fputs (",s", file);
5077 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5078 fputs ("s", file);
5079 break;
5080 default:
5081 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5082 fputs ("s", file);
5083 break;
5085 return;
5086 case 'G':
5087 output_global_address (file, x, 0);
5088 return;
5089 case 'H':
5090 output_global_address (file, x, 1);
5091 return;
5092 case 0: /* Don't do anything special */
5093 break;
5094 case 'Z':
5096 unsigned op[3];
5097 compute_zdepwi_operands (INTVAL (x), op);
5098 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5099 return;
5101 case 'z':
5103 unsigned op[3];
5104 compute_zdepdi_operands (INTVAL (x), op);
5105 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5106 return;
5108 case 'c':
5109 /* We can get here from a .vtable_inherit due to our
5110 CONSTANT_ADDRESS_P rejecting perfectly good constant
5111 addresses. */
5112 break;
5113 default:
5114 gcc_unreachable ();
5116 if (GET_CODE (x) == REG)
5118 fputs (reg_names [REGNO (x)], file);
5119 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5121 fputs ("R", file);
5122 return;
5124 if (FP_REG_P (x)
5125 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5126 && (REGNO (x) & 1) == 0)
5127 fputs ("L", file);
5129 else if (GET_CODE (x) == MEM)
5131 int size = GET_MODE_SIZE (GET_MODE (x));
5132 rtx base = NULL_RTX;
5133 switch (GET_CODE (XEXP (x, 0)))
5135 case PRE_DEC:
5136 case POST_DEC:
5137 base = XEXP (XEXP (x, 0), 0);
5138 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5139 break;
5140 case PRE_INC:
5141 case POST_INC:
5142 base = XEXP (XEXP (x, 0), 0);
5143 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5144 break;
5145 case PLUS:
5146 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5147 fprintf (file, "%s(%s)",
5148 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5149 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5150 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5151 fprintf (file, "%s(%s)",
5152 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5153 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5154 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5155 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5157 /* Because the REG_POINTER flag can get lost during reload,
5158 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5159 index and base registers in the combined move patterns. */
5160 rtx base = XEXP (XEXP (x, 0), 1);
5161 rtx index = XEXP (XEXP (x, 0), 0);
5163 fprintf (file, "%s(%s)",
5164 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5166 else
5167 output_address (XEXP (x, 0));
5168 break;
5169 default:
5170 output_address (XEXP (x, 0));
5171 break;
5174 else
5175 output_addr_const (file, x);
5178 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5180 void
5181 output_global_address (FILE *file, rtx x, int round_constant)
5184 /* Imagine (high (const (plus ...))). */
5185 if (GET_CODE (x) == HIGH)
5186 x = XEXP (x, 0);
5188 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5189 output_addr_const (file, x);
5190 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5192 output_addr_const (file, x);
5193 fputs ("-$global$", file);
5195 else if (GET_CODE (x) == CONST)
5197 const char *sep = "";
5198 int offset = 0; /* assembler wants -$global$ at end */
5199 rtx base = NULL_RTX;
5201 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5203 case SYMBOL_REF:
5204 base = XEXP (XEXP (x, 0), 0);
5205 output_addr_const (file, base);
5206 break;
5207 case CONST_INT:
5208 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5209 break;
5210 default:
5211 gcc_unreachable ();
5214 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5216 case SYMBOL_REF:
5217 base = XEXP (XEXP (x, 0), 1);
5218 output_addr_const (file, base);
5219 break;
5220 case CONST_INT:
5221 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5222 break;
5223 default:
5224 gcc_unreachable ();
5227 /* How bogus. The compiler is apparently responsible for
5228 rounding the constant if it uses an LR field selector.
5230 The linker and/or assembler seem a better place since
5231 they have to do this kind of thing already.
5233 If we fail to do this, HP's optimizing linker may eliminate
5234 an addil, but not update the ldw/stw/ldo instruction that
5235 uses the result of the addil. */
5236 if (round_constant)
5237 offset = ((offset + 0x1000) & ~0x1fff);
5239 switch (GET_CODE (XEXP (x, 0)))
5241 case PLUS:
5242 if (offset < 0)
5244 offset = -offset;
5245 sep = "-";
5247 else
5248 sep = "+";
5249 break;
5251 case MINUS:
5252 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5253 sep = "-";
5254 break;
5256 default:
5257 gcc_unreachable ();
5260 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5261 fputs ("-$global$", file);
5262 if (offset)
5263 fprintf (file, "%s%d", sep, offset);
5265 else
5266 output_addr_const (file, x);
5269 /* Output boilerplate text to appear at the beginning of the file.
5270 There are several possible versions. */
5271 #define aputs(x) fputs(x, asm_out_file)
5272 static inline void
5273 pa_file_start_level (void)
5275 if (TARGET_64BIT)
5276 aputs ("\t.LEVEL 2.0w\n");
5277 else if (TARGET_PA_20)
5278 aputs ("\t.LEVEL 2.0\n");
5279 else if (TARGET_PA_11)
5280 aputs ("\t.LEVEL 1.1\n");
5281 else
5282 aputs ("\t.LEVEL 1.0\n");
5285 static inline void
5286 pa_file_start_space (int sortspace)
5288 aputs ("\t.SPACE $PRIVATE$");
5289 if (sortspace)
5290 aputs (",SORT=16");
5291 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5292 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5293 "\n\t.SPACE $TEXT$");
5294 if (sortspace)
5295 aputs (",SORT=8");
5296 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5297 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5300 static inline void
5301 pa_file_start_file (int want_version)
5303 if (write_symbols != NO_DEBUG)
5305 output_file_directive (asm_out_file, main_input_filename);
5306 if (want_version)
5307 aputs ("\t.version\t\"01.01\"\n");
5311 static inline void
5312 pa_file_start_mcount (const char *aswhat)
5314 if (profile_flag)
5315 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5318 static void
5319 pa_elf_file_start (void)
5321 pa_file_start_level ();
5322 pa_file_start_mcount ("ENTRY");
5323 pa_file_start_file (0);
5326 static void
5327 pa_som_file_start (void)
5329 pa_file_start_level ();
5330 pa_file_start_space (0);
5331 aputs ("\t.IMPORT $global$,DATA\n"
5332 "\t.IMPORT $$dyncall,MILLICODE\n");
5333 pa_file_start_mcount ("CODE");
5334 pa_file_start_file (0);
5337 static void
5338 pa_linux_file_start (void)
5340 pa_file_start_file (1);
5341 pa_file_start_level ();
5342 pa_file_start_mcount ("CODE");
5345 static void
5346 pa_hpux64_gas_file_start (void)
5348 pa_file_start_level ();
5349 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5350 if (profile_flag)
5351 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5352 #endif
5353 pa_file_start_file (1);
5356 static void
5357 pa_hpux64_hpas_file_start (void)
5359 pa_file_start_level ();
5360 pa_file_start_space (1);
5361 pa_file_start_mcount ("CODE");
5362 pa_file_start_file (0);
5364 #undef aputs
5366 /* Search the deferred plabel list for SYMBOL and return its internal
5367 label. If an entry for SYMBOL is not found, a new entry is created. */
5370 get_deferred_plabel (rtx symbol)
5372 const char *fname = XSTR (symbol, 0);
5373 size_t i;
5375 /* See if we have already put this function on the list of deferred
5376 plabels. This list is generally small, so a liner search is not
5377 too ugly. If it proves too slow replace it with something faster. */
5378 for (i = 0; i < n_deferred_plabels; i++)
5379 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5380 break;
5382 /* If the deferred plabel list is empty, or this entry was not found
5383 on the list, create a new entry on the list. */
5384 if (deferred_plabels == NULL || i == n_deferred_plabels)
5386 tree id;
5388 if (deferred_plabels == 0)
5389 deferred_plabels = (struct deferred_plabel *)
5390 ggc_alloc (sizeof (struct deferred_plabel));
5391 else
5392 deferred_plabels = (struct deferred_plabel *)
5393 ggc_realloc (deferred_plabels,
5394 ((n_deferred_plabels + 1)
5395 * sizeof (struct deferred_plabel)));
5397 i = n_deferred_plabels++;
5398 deferred_plabels[i].internal_label = gen_label_rtx ();
5399 deferred_plabels[i].symbol = symbol;
5401 /* Gross. We have just implicitly taken the address of this
5402 function. Mark it in the same manner as assemble_name. */
5403 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5404 if (id)
5405 mark_referenced (id);
5408 return deferred_plabels[i].internal_label;
5411 static void
5412 output_deferred_plabels (void)
5414 size_t i;
5416 /* If we have some deferred plabels, then we need to switch into the
5417 data or readonly data section, and align it to a 4 byte boundary
5418 before outputting the deferred plabels. */
5419 if (n_deferred_plabels)
5421 switch_to_section (flag_pic ? data_section : readonly_data_section);
5422 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5425 /* Now output the deferred plabels. */
5426 for (i = 0; i < n_deferred_plabels; i++)
5428 targetm.asm_out.internal_label (asm_out_file, "L",
5429 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5430 assemble_integer (deferred_plabels[i].symbol,
5431 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5435 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5436 /* Initialize optabs to point to HPUX long double emulation routines. */
5437 static void
5438 pa_hpux_init_libfuncs (void)
5440 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5441 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5442 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5443 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5444 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5445 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5446 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5447 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5448 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5450 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5451 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5452 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5453 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5454 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5455 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5456 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5458 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5459 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5460 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5461 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5463 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5464 ? "__U_Qfcnvfxt_quad_to_sgl"
5465 : "_U_Qfcnvfxt_quad_to_sgl");
5466 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5467 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5468 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5470 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5471 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5472 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5473 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5475 #endif
5477 /* HP's millicode routines mean something special to the assembler.
5478 Keep track of which ones we have used. */
5480 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5481 static void import_milli (enum millicodes);
5482 static char imported[(int) end1000];
5483 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5484 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5485 #define MILLI_START 10
5487 static void
5488 import_milli (enum millicodes code)
5490 char str[sizeof (import_string)];
5492 if (!imported[(int) code])
5494 imported[(int) code] = 1;
5495 strcpy (str, import_string);
5496 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5497 output_asm_insn (str, 0);
5501 /* The register constraints have put the operands and return value in
5502 the proper registers. */
5504 const char *
5505 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5507 import_milli (mulI);
5508 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5511 /* Emit the rtl for doing a division by a constant. */
5513 /* Do magic division millicodes exist for this value? */
5514 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5516 /* We'll use an array to keep track of the magic millicodes and
5517 whether or not we've used them already. [n][0] is signed, [n][1] is
5518 unsigned. */
5520 static int div_milli[16][2];
5523 emit_hpdiv_const (rtx *operands, int unsignedp)
5525 if (GET_CODE (operands[2]) == CONST_INT
5526 && INTVAL (operands[2]) > 0
5527 && INTVAL (operands[2]) < 16
5528 && magic_milli[INTVAL (operands[2])])
5530 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5532 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5533 emit
5534 (gen_rtx_PARALLEL
5535 (VOIDmode,
5536 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5537 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5538 SImode,
5539 gen_rtx_REG (SImode, 26),
5540 operands[2])),
5541 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5542 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5543 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5544 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5545 gen_rtx_CLOBBER (VOIDmode, ret))));
5546 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5547 return 1;
5549 return 0;
5552 const char *
5553 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5555 int divisor;
5557 /* If the divisor is a constant, try to use one of the special
5558 opcodes .*/
5559 if (GET_CODE (operands[0]) == CONST_INT)
5561 static char buf[100];
5562 divisor = INTVAL (operands[0]);
5563 if (!div_milli[divisor][unsignedp])
5565 div_milli[divisor][unsignedp] = 1;
5566 if (unsignedp)
5567 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5568 else
5569 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5571 if (unsignedp)
5573 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5574 INTVAL (operands[0]));
5575 return output_millicode_call (insn,
5576 gen_rtx_SYMBOL_REF (SImode, buf));
5578 else
5580 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5581 INTVAL (operands[0]));
5582 return output_millicode_call (insn,
5583 gen_rtx_SYMBOL_REF (SImode, buf));
5586 /* Divisor isn't a special constant. */
5587 else
5589 if (unsignedp)
5591 import_milli (divU);
5592 return output_millicode_call (insn,
5593 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5595 else
5597 import_milli (divI);
5598 return output_millicode_call (insn,
5599 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5604 /* Output a $$rem millicode to do mod. */
5606 const char *
5607 output_mod_insn (int unsignedp, rtx insn)
5609 if (unsignedp)
5611 import_milli (remU);
5612 return output_millicode_call (insn,
5613 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5615 else
5617 import_milli (remI);
5618 return output_millicode_call (insn,
5619 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5623 void
5624 output_arg_descriptor (rtx call_insn)
5626 const char *arg_regs[4];
5627 enum machine_mode arg_mode;
5628 rtx link;
5629 int i, output_flag = 0;
5630 int regno;
5632 /* We neither need nor want argument location descriptors for the
5633 64bit runtime environment or the ELF32 environment. */
5634 if (TARGET_64BIT || TARGET_ELF32)
5635 return;
5637 for (i = 0; i < 4; i++)
5638 arg_regs[i] = 0;
5640 /* Specify explicitly that no argument relocations should take place
5641 if using the portable runtime calling conventions. */
5642 if (TARGET_PORTABLE_RUNTIME)
5644 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5645 asm_out_file);
5646 return;
5649 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5650 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5651 link; link = XEXP (link, 1))
5653 rtx use = XEXP (link, 0);
5655 if (! (GET_CODE (use) == USE
5656 && GET_CODE (XEXP (use, 0)) == REG
5657 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5658 continue;
5660 arg_mode = GET_MODE (XEXP (use, 0));
5661 regno = REGNO (XEXP (use, 0));
5662 if (regno >= 23 && regno <= 26)
5664 arg_regs[26 - regno] = "GR";
5665 if (arg_mode == DImode)
5666 arg_regs[25 - regno] = "GR";
5668 else if (regno >= 32 && regno <= 39)
5670 if (arg_mode == SFmode)
5671 arg_regs[(regno - 32) / 2] = "FR";
5672 else
5674 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5675 arg_regs[(regno - 34) / 2] = "FR";
5676 arg_regs[(regno - 34) / 2 + 1] = "FU";
5677 #else
5678 arg_regs[(regno - 34) / 2] = "FU";
5679 arg_regs[(regno - 34) / 2 + 1] = "FR";
5680 #endif
5684 fputs ("\t.CALL ", asm_out_file);
5685 for (i = 0; i < 4; i++)
5687 if (arg_regs[i])
5689 if (output_flag++)
5690 fputc (',', asm_out_file);
5691 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5694 fputc ('\n', asm_out_file);
5697 static enum reg_class
5698 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5699 enum machine_mode mode, secondary_reload_info *sri)
5701 int regno;
5703 /* Handle the easy stuff first. */
5704 if (class == R1_REGS)
5705 return NO_REGS;
5707 if (REG_P (x))
5709 regno = REGNO (x);
5710 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5711 return NO_REGS;
5713 else
5714 regno = -1;
5716 /* If we have something like (mem (mem (...)), we can safely assume the
5717 inner MEM will end up in a general register after reloading, so there's
5718 no need for a secondary reload. */
5719 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5720 return NO_REGS;
5722 /* Trying to load a constant into a FP register during PIC code
5723 generation requires %r1 as a scratch register. */
5724 if (flag_pic
5725 && (mode == SImode || mode == DImode)
5726 && FP_REG_CLASS_P (class)
5727 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5729 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5730 : CODE_FOR_reload_indi_r1);
5731 return NO_REGS;
5734 /* Secondary reloads of symbolic operands require %r1 as a scratch
5735 register when we're generating PIC code and when the operand isn't
5736 readonly. */
5737 if (symbolic_expression_p (x))
5739 if (GET_CODE (x) == HIGH)
5740 x = XEXP (x, 0);
5742 if (flag_pic || !read_only_operand (x, VOIDmode))
5744 gcc_assert (mode == SImode || mode == DImode);
5745 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5746 : CODE_FOR_reload_indi_r1);
5747 return NO_REGS;
5751 /* Profiling showed the PA port spends about 1.3% of its compilation
5752 time in true_regnum from calls inside pa_secondary_reload_class. */
5753 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5754 regno = true_regnum (x);
5756 /* In order to allow 14-bit displacements in integer loads and stores,
5757 we need to prevent reload from generating out of range integer mode
5758 loads and stores to the floating point registers. Previously, we
5759 used to call for a secondary reload and have emit_move_sequence()
5760 fix the instruction sequence. However, reload occasionally wouldn't
5761 generate the reload and we would end up with an invalid REG+D memory
5762 address. So, now we use an intermediate general register for most
5763 memory loads and stores. */
5764 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5765 && GET_MODE_CLASS (mode) == MODE_INT
5766 && FP_REG_CLASS_P (class))
5768 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5769 the secondary reload needed for a pseudo. It never passes a
5770 REG+D address. */
5771 if (GET_CODE (x) == MEM)
5773 x = XEXP (x, 0);
5775 /* We don't need an intermediate for indexed and LO_SUM DLT
5776 memory addresses. When INT14_OK_STRICT is true, it might
5777 appear that we could directly allow register indirect
5778 memory addresses. However, this doesn't work because we
5779 don't support SUBREGs in floating-point register copies
5780 and reload doesn't tell us when it's going to use a SUBREG. */
5781 if (IS_INDEX_ADDR_P (x)
5782 || IS_LO_SUM_DLT_ADDR_P (x))
5783 return NO_REGS;
5785 /* Otherwise, we need an intermediate general register. */
5786 return GENERAL_REGS;
5789 /* Request a secondary reload with a general scratch register
5790 for everthing else. ??? Could symbolic operands be handled
5791 directly when generating non-pic PA 2.0 code? */
5792 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5793 return NO_REGS;
5796 /* We need a secondary register (GPR) for copies between the SAR
5797 and anything other than a general register. */
5798 if (class == SHIFT_REGS && (regno <= 0 || regno >= 32))
5800 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5801 return NO_REGS;
5804 /* A SAR<->FP register copy requires a secondary register (GPR) as
5805 well as secondary memory. */
5806 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5807 && (REGNO_REG_CLASS (regno) == SHIFT_REGS
5808 && FP_REG_CLASS_P (class)))
5809 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5811 return NO_REGS;
5814 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5815 is only marked as live on entry by df-scan when it is a fixed
5816 register. It isn't a fixed register in the 64-bit runtime,
5817 so we need to mark it here. */
5819 static void
5820 pa_extra_live_on_entry (bitmap regs)
5822 if (TARGET_64BIT)
5823 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5826 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5827 to prevent it from being deleted. */
5830 pa_eh_return_handler_rtx (void)
5832 rtx tmp;
5834 tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx,
5835 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5836 tmp = gen_rtx_MEM (word_mode, tmp);
5837 tmp->volatil = 1;
5838 return tmp;
5841 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5842 by invisible reference. As a GCC extension, we also pass anything
5843 with a zero or variable size by reference.
5845 The 64-bit runtime does not describe passing any types by invisible
5846 reference. The internals of GCC can't currently handle passing
5847 empty structures, and zero or variable length arrays when they are
5848 not passed entirely on the stack or by reference. Thus, as a GCC
5849 extension, we pass these types by reference. The HP compiler doesn't
5850 support these types, so hopefully there shouldn't be any compatibility
5851 issues. This may have to be revisited when HP releases a C99 compiler
5852 or updates the ABI. */
5854 static bool
5855 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5856 enum machine_mode mode, const_tree type,
5857 bool named ATTRIBUTE_UNUSED)
5859 HOST_WIDE_INT size;
5861 if (type)
5862 size = int_size_in_bytes (type);
5863 else
5864 size = GET_MODE_SIZE (mode);
5866 if (TARGET_64BIT)
5867 return size <= 0;
5868 else
5869 return size <= 0 || size > 8;
5872 enum direction
5873 function_arg_padding (enum machine_mode mode, const_tree type)
5875 if (mode == BLKmode
5876 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5878 /* Return none if justification is not required. */
5879 if (type
5880 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5881 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5882 return none;
5884 /* The directions set here are ignored when a BLKmode argument larger
5885 than a word is placed in a register. Different code is used for
5886 the stack and registers. This makes it difficult to have a
5887 consistent data representation for both the stack and registers.
5888 For both runtimes, the justification and padding for arguments on
5889 the stack and in registers should be identical. */
5890 if (TARGET_64BIT)
5891 /* The 64-bit runtime specifies left justification for aggregates. */
5892 return upward;
5893 else
5894 /* The 32-bit runtime architecture specifies right justification.
5895 When the argument is passed on the stack, the argument is padded
5896 with garbage on the left. The HP compiler pads with zeros. */
5897 return downward;
5900 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5901 return downward;
5902 else
5903 return none;
5907 /* Do what is necessary for `va_start'. We look at the current function
5908 to determine if stdargs or varargs is used and fill in an initial
5909 va_list. A pointer to this constructor is returned. */
5911 static rtx
5912 hppa_builtin_saveregs (void)
5914 rtx offset, dest;
5915 tree fntype = TREE_TYPE (current_function_decl);
5916 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5917 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5918 != void_type_node)))
5919 ? UNITS_PER_WORD : 0);
5921 if (argadj)
5922 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5923 else
5924 offset = current_function_arg_offset_rtx;
5926 if (TARGET_64BIT)
5928 int i, off;
5930 /* Adjust for varargs/stdarg differences. */
5931 if (argadj)
5932 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5933 else
5934 offset = current_function_arg_offset_rtx;
5936 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5937 from the incoming arg pointer and growing to larger addresses. */
5938 for (i = 26, off = -64; i >= 19; i--, off += 8)
5939 emit_move_insn (gen_rtx_MEM (word_mode,
5940 plus_constant (arg_pointer_rtx, off)),
5941 gen_rtx_REG (word_mode, i));
5943 /* The incoming args pointer points just beyond the flushback area;
5944 normally this is not a serious concern. However, when we are doing
5945 varargs/stdargs we want to make the arg pointer point to the start
5946 of the incoming argument area. */
5947 emit_move_insn (virtual_incoming_args_rtx,
5948 plus_constant (arg_pointer_rtx, -64));
5950 /* Now return a pointer to the first anonymous argument. */
5951 return copy_to_reg (expand_binop (Pmode, add_optab,
5952 virtual_incoming_args_rtx,
5953 offset, 0, 0, OPTAB_LIB_WIDEN));
5956 /* Store general registers on the stack. */
5957 dest = gen_rtx_MEM (BLKmode,
5958 plus_constant (current_function_internal_arg_pointer,
5959 -16));
5960 set_mem_alias_set (dest, get_varargs_alias_set ());
5961 set_mem_align (dest, BITS_PER_WORD);
5962 move_block_from_reg (23, dest, 4);
5964 /* move_block_from_reg will emit code to store the argument registers
5965 individually as scalar stores.
5967 However, other insns may later load from the same addresses for
5968 a structure load (passing a struct to a varargs routine).
5970 The alias code assumes that such aliasing can never happen, so we
5971 have to keep memory referencing insns from moving up beyond the
5972 last argument register store. So we emit a blockage insn here. */
5973 emit_insn (gen_blockage ());
5975 return copy_to_reg (expand_binop (Pmode, add_optab,
5976 current_function_internal_arg_pointer,
5977 offset, 0, 0, OPTAB_LIB_WIDEN));
5980 static void
5981 hppa_va_start (tree valist, rtx nextarg)
5983 nextarg = expand_builtin_saveregs ();
5984 std_expand_builtin_va_start (valist, nextarg);
5987 static tree
5988 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5990 if (TARGET_64BIT)
5992 /* Args grow upward. We can use the generic routines. */
5993 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5995 else /* !TARGET_64BIT */
5997 tree ptr = build_pointer_type (type);
5998 tree valist_type;
5999 tree t, u;
6000 unsigned int size, ofs;
6001 bool indirect;
6003 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6004 if (indirect)
6006 type = ptr;
6007 ptr = build_pointer_type (type);
6009 size = int_size_in_bytes (type);
6010 valist_type = TREE_TYPE (valist);
6012 /* Args grow down. Not handled by generic routines. */
6014 u = fold_convert (sizetype, size_in_bytes (type));
6015 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6016 t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
6018 /* Copied from va-pa.h, but we probably don't need to align to
6019 word size, since we generate and preserve that invariant. */
6020 u = size_int (size > 4 ? -8 : -4);
6021 t = fold_convert (sizetype, t);
6022 t = build2 (BIT_AND_EXPR, sizetype, t, u);
6023 t = fold_convert (valist_type, t);
6025 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6027 ofs = (8 - size) % 4;
6028 if (ofs != 0)
6030 u = size_int (ofs);
6031 t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
6034 t = fold_convert (ptr, t);
6035 t = build_va_arg_indirect_ref (t);
6037 if (indirect)
6038 t = build_va_arg_indirect_ref (t);
6040 return t;
6044 /* True if MODE is valid for the target. By "valid", we mean able to
6045 be manipulated in non-trivial ways. In particular, this means all
6046 the arithmetic is supported.
6048 Currently, TImode is not valid as the HP 64-bit runtime documentation
6049 doesn't document the alignment and calling conventions for this type.
6050 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6051 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6053 static bool
6054 pa_scalar_mode_supported_p (enum machine_mode mode)
6056 int precision = GET_MODE_PRECISION (mode);
6058 switch (GET_MODE_CLASS (mode))
6060 case MODE_PARTIAL_INT:
6061 case MODE_INT:
6062 if (precision == CHAR_TYPE_SIZE)
6063 return true;
6064 if (precision == SHORT_TYPE_SIZE)
6065 return true;
6066 if (precision == INT_TYPE_SIZE)
6067 return true;
6068 if (precision == LONG_TYPE_SIZE)
6069 return true;
6070 if (precision == LONG_LONG_TYPE_SIZE)
6071 return true;
6072 return false;
6074 case MODE_FLOAT:
6075 if (precision == FLOAT_TYPE_SIZE)
6076 return true;
6077 if (precision == DOUBLE_TYPE_SIZE)
6078 return true;
6079 if (precision == LONG_DOUBLE_TYPE_SIZE)
6080 return true;
6081 return false;
6083 case MODE_DECIMAL_FLOAT:
6084 return false;
6086 default:
6087 gcc_unreachable ();
6091 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6092 it branches into the delay slot. Otherwise, return FALSE. */
6094 static bool
6095 branch_to_delay_slot_p (rtx insn)
6097 rtx jump_insn;
6099 if (dbr_sequence_length ())
6100 return FALSE;
6102 jump_insn = next_active_insn (JUMP_LABEL (insn));
6103 while (insn)
6105 insn = next_active_insn (insn);
6106 if (jump_insn == insn)
6107 return TRUE;
6109 /* We can't rely on the length of asms. So, we return FALSE when
6110 the branch is followed by an asm. */
6111 if (!insn
6112 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6113 || asm_noperands (PATTERN (insn)) >= 0
6114 || get_attr_length (insn) > 0)
6115 break;
6118 return FALSE;
6121 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6123 This occurs when INSN has an unfilled delay slot and is followed
6124 by an asm. Disaster can occur if the asm is empty and the jump
6125 branches into the delay slot. So, we add a nop in the delay slot
6126 when this occurs. */
6128 static bool
6129 branch_needs_nop_p (rtx insn)
6131 rtx jump_insn;
6133 if (dbr_sequence_length ())
6134 return FALSE;
6136 jump_insn = next_active_insn (JUMP_LABEL (insn));
6137 while (insn)
6139 insn = next_active_insn (insn);
6140 if (!insn || jump_insn == insn)
6141 return TRUE;
6143 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6144 || asm_noperands (PATTERN (insn)) >= 0)
6145 && get_attr_length (insn) > 0)
6146 break;
6149 return FALSE;
6152 /* Return TRUE if INSN, a forward jump insn, can use nullification
6153 to skip the following instruction. This avoids an extra cycle due
6154 to a mis-predicted branch when we fall through. */
6156 static bool
6157 use_skip_p (rtx insn)
6159 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6161 while (insn)
6163 insn = next_active_insn (insn);
6165 /* We can't rely on the length of asms, so we can't skip asms. */
6166 if (!insn
6167 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6168 || asm_noperands (PATTERN (insn)) >= 0)
6169 break;
6170 if (get_attr_length (insn) == 4
6171 && jump_insn == next_active_insn (insn))
6172 return TRUE;
6173 if (get_attr_length (insn) > 0)
6174 break;
6177 return FALSE;
6180 /* This routine handles all the normal conditional branch sequences we
6181 might need to generate. It handles compare immediate vs compare
6182 register, nullification of delay slots, varying length branches,
6183 negated branches, and all combinations of the above. It returns the
6184 output appropriate to emit the branch corresponding to all given
6185 parameters. */
6187 const char *
6188 output_cbranch (rtx *operands, int negated, rtx insn)
6190 static char buf[100];
6191 bool useskip;
6192 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6193 int length = get_attr_length (insn);
6194 int xdelay;
6196 /* A conditional branch to the following instruction (e.g. the delay slot)
6197 is asking for a disaster. This can happen when not optimizing and
6198 when jump optimization fails.
6200 While it is usually safe to emit nothing, this can fail if the
6201 preceding instruction is a nullified branch with an empty delay
6202 slot and the same branch target as this branch. We could check
6203 for this but jump optimization should eliminate nop jumps. It
6204 is always safe to emit a nop. */
6205 if (branch_to_delay_slot_p (insn))
6206 return "nop";
6208 /* The doubleword form of the cmpib instruction doesn't have the LEU
6209 and GTU conditions while the cmpb instruction does. Since we accept
6210 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6211 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6212 operands[2] = gen_rtx_REG (DImode, 0);
6213 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6214 operands[1] = gen_rtx_REG (DImode, 0);
6216 /* If this is a long branch with its delay slot unfilled, set `nullify'
6217 as it can nullify the delay slot and save a nop. */
6218 if (length == 8 && dbr_sequence_length () == 0)
6219 nullify = 1;
6221 /* If this is a short forward conditional branch which did not get
6222 its delay slot filled, the delay slot can still be nullified. */
6223 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6224 nullify = forward_branch_p (insn);
6226 /* A forward branch over a single nullified insn can be done with a
6227 comclr instruction. This avoids a single cycle penalty due to
6228 mis-predicted branch if we fall through (branch not taken). */
6229 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6231 switch (length)
6233 /* All short conditional branches except backwards with an unfilled
6234 delay slot. */
6235 case 4:
6236 if (useskip)
6237 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6238 else
6239 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6240 if (GET_MODE (operands[1]) == DImode)
6241 strcat (buf, "*");
6242 if (negated)
6243 strcat (buf, "%B3");
6244 else
6245 strcat (buf, "%S3");
6246 if (useskip)
6247 strcat (buf, " %2,%r1,%%r0");
6248 else if (nullify)
6250 if (branch_needs_nop_p (insn))
6251 strcat (buf, ",n %2,%r1,%0%#");
6252 else
6253 strcat (buf, ",n %2,%r1,%0");
6255 else
6256 strcat (buf, " %2,%r1,%0");
6257 break;
6259 /* All long conditionals. Note a short backward branch with an
6260 unfilled delay slot is treated just like a long backward branch
6261 with an unfilled delay slot. */
6262 case 8:
6263 /* Handle weird backwards branch with a filled delay slot
6264 which is nullified. */
6265 if (dbr_sequence_length () != 0
6266 && ! forward_branch_p (insn)
6267 && nullify)
6269 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6270 if (GET_MODE (operands[1]) == DImode)
6271 strcat (buf, "*");
6272 if (negated)
6273 strcat (buf, "%S3");
6274 else
6275 strcat (buf, "%B3");
6276 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6278 /* Handle short backwards branch with an unfilled delay slot.
6279 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6280 taken and untaken branches. */
6281 else if (dbr_sequence_length () == 0
6282 && ! forward_branch_p (insn)
6283 && INSN_ADDRESSES_SET_P ()
6284 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6285 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6287 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6288 if (GET_MODE (operands[1]) == DImode)
6289 strcat (buf, "*");
6290 if (negated)
6291 strcat (buf, "%B3 %2,%r1,%0%#");
6292 else
6293 strcat (buf, "%S3 %2,%r1,%0%#");
6295 else
6297 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6298 if (GET_MODE (operands[1]) == DImode)
6299 strcat (buf, "*");
6300 if (negated)
6301 strcat (buf, "%S3");
6302 else
6303 strcat (buf, "%B3");
6304 if (nullify)
6305 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6306 else
6307 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6309 break;
6311 default:
6312 /* The reversed conditional branch must branch over one additional
6313 instruction if the delay slot is filled and needs to be extracted
6314 by output_lbranch. If the delay slot is empty or this is a
6315 nullified forward branch, the instruction after the reversed
6316 condition branch must be nullified. */
6317 if (dbr_sequence_length () == 0
6318 || (nullify && forward_branch_p (insn)))
6320 nullify = 1;
6321 xdelay = 0;
6322 operands[4] = GEN_INT (length);
6324 else
6326 xdelay = 1;
6327 operands[4] = GEN_INT (length + 4);
6330 /* Create a reversed conditional branch which branches around
6331 the following insns. */
6332 if (GET_MODE (operands[1]) != DImode)
6334 if (nullify)
6336 if (negated)
6337 strcpy (buf,
6338 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6339 else
6340 strcpy (buf,
6341 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6343 else
6345 if (negated)
6346 strcpy (buf,
6347 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6348 else
6349 strcpy (buf,
6350 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6353 else
6355 if (nullify)
6357 if (negated)
6358 strcpy (buf,
6359 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6360 else
6361 strcpy (buf,
6362 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6364 else
6366 if (negated)
6367 strcpy (buf,
6368 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6369 else
6370 strcpy (buf,
6371 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6375 output_asm_insn (buf, operands);
6376 return output_lbranch (operands[0], insn, xdelay);
6378 return buf;
6381 /* This routine handles output of long unconditional branches that
6382 exceed the maximum range of a simple branch instruction. Since
6383 we don't have a register available for the branch, we save register
6384 %r1 in the frame marker, load the branch destination DEST into %r1,
6385 execute the branch, and restore %r1 in the delay slot of the branch.
6387 Since long branches may have an insn in the delay slot and the
6388 delay slot is used to restore %r1, we in general need to extract
6389 this insn and execute it before the branch. However, to facilitate
6390 use of this function by conditional branches, we also provide an
6391 option to not extract the delay insn so that it will be emitted
6392 after the long branch. So, if there is an insn in the delay slot,
6393 it is extracted if XDELAY is nonzero.
6395 The lengths of the various long-branch sequences are 20, 16 and 24
6396 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6398 const char *
6399 output_lbranch (rtx dest, rtx insn, int xdelay)
6401 rtx xoperands[2];
6403 xoperands[0] = dest;
6405 /* First, free up the delay slot. */
6406 if (xdelay && dbr_sequence_length () != 0)
6408 /* We can't handle a jump in the delay slot. */
6409 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6411 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6412 optimize, 0, NULL);
6414 /* Now delete the delay insn. */
6415 SET_INSN_DELETED (NEXT_INSN (insn));
6418 /* Output an insn to save %r1. The runtime documentation doesn't
6419 specify whether the "Clean Up" slot in the callers frame can
6420 be clobbered by the callee. It isn't copied by HP's builtin
6421 alloca, so this suggests that it can be clobbered if necessary.
6422 The "Static Link" location is copied by HP builtin alloca, so
6423 we avoid using it. Using the cleanup slot might be a problem
6424 if we have to interoperate with languages that pass cleanup
6425 information. However, it should be possible to handle these
6426 situations with GCC's asm feature.
6428 The "Current RP" slot is reserved for the called procedure, so
6429 we try to use it when we don't have a frame of our own. It's
6430 rather unlikely that we won't have a frame when we need to emit
6431 a very long branch.
6433 Really the way to go long term is a register scavenger; goto
6434 the target of the jump and find a register which we can use
6435 as a scratch to hold the value in %r1. Then, we wouldn't have
6436 to free up the delay slot or clobber a slot that may be needed
6437 for other purposes. */
6438 if (TARGET_64BIT)
6440 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6441 /* Use the return pointer slot in the frame marker. */
6442 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6443 else
6444 /* Use the slot at -40 in the frame marker since HP builtin
6445 alloca doesn't copy it. */
6446 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6448 else
6450 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6451 /* Use the return pointer slot in the frame marker. */
6452 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6453 else
6454 /* Use the "Clean Up" slot in the frame marker. In GCC,
6455 the only other use of this location is for copying a
6456 floating point double argument from a floating-point
6457 register to two general registers. The copy is done
6458 as an "atomic" operation when outputting a call, so it
6459 won't interfere with our using the location here. */
6460 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6463 if (TARGET_PORTABLE_RUNTIME)
6465 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6466 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6467 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6469 else if (flag_pic)
6471 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6472 if (TARGET_SOM || !TARGET_GAS)
6474 xoperands[1] = gen_label_rtx ();
6475 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6476 targetm.asm_out.internal_label (asm_out_file, "L",
6477 CODE_LABEL_NUMBER (xoperands[1]));
6478 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6480 else
6482 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6483 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6485 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6487 else
6488 /* Now output a very long branch to the original target. */
6489 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6491 /* Now restore the value of %r1 in the delay slot. */
6492 if (TARGET_64BIT)
6494 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6495 return "ldd -16(%%r30),%%r1";
6496 else
6497 return "ldd -40(%%r30),%%r1";
6499 else
6501 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6502 return "ldw -20(%%r30),%%r1";
6503 else
6504 return "ldw -12(%%r30),%%r1";
6508 /* This routine handles all the branch-on-bit conditional branch sequences we
6509 might need to generate. It handles nullification of delay slots,
6510 varying length branches, negated branches and all combinations of the
6511 above. it returns the appropriate output template to emit the branch. */
6513 const char *
6514 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6516 static char buf[100];
6517 bool useskip;
6518 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6519 int length = get_attr_length (insn);
6520 int xdelay;
6522 /* A conditional branch to the following instruction (e.g. the delay slot) is
6523 asking for a disaster. I do not think this can happen as this pattern
6524 is only used when optimizing; jump optimization should eliminate the
6525 jump. But be prepared just in case. */
6527 if (branch_to_delay_slot_p (insn))
6528 return "nop";
6530 /* If this is a long branch with its delay slot unfilled, set `nullify'
6531 as it can nullify the delay slot and save a nop. */
6532 if (length == 8 && dbr_sequence_length () == 0)
6533 nullify = 1;
6535 /* If this is a short forward conditional branch which did not get
6536 its delay slot filled, the delay slot can still be nullified. */
6537 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6538 nullify = forward_branch_p (insn);
6540 /* A forward branch over a single nullified insn can be done with a
6541 extrs instruction. This avoids a single cycle penalty due to
6542 mis-predicted branch if we fall through (branch not taken). */
6543 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6545 switch (length)
6548 /* All short conditional branches except backwards with an unfilled
6549 delay slot. */
6550 case 4:
6551 if (useskip)
6552 strcpy (buf, "{extrs,|extrw,s,}");
6553 else
6554 strcpy (buf, "bb,");
6555 if (useskip && GET_MODE (operands[0]) == DImode)
6556 strcpy (buf, "extrd,s,*");
6557 else if (GET_MODE (operands[0]) == DImode)
6558 strcpy (buf, "bb,*");
6559 if ((which == 0 && negated)
6560 || (which == 1 && ! negated))
6561 strcat (buf, ">=");
6562 else
6563 strcat (buf, "<");
6564 if (useskip)
6565 strcat (buf, " %0,%1,1,%%r0");
6566 else if (nullify && negated)
6568 if (branch_needs_nop_p (insn))
6569 strcat (buf, ",n %0,%1,%3%#");
6570 else
6571 strcat (buf, ",n %0,%1,%3");
6573 else if (nullify && ! negated)
6575 if (branch_needs_nop_p (insn))
6576 strcat (buf, ",n %0,%1,%2%#");
6577 else
6578 strcat (buf, ",n %0,%1,%2");
6580 else if (! nullify && negated)
6581 strcat (buf, " %0,%1,%3");
6582 else if (! nullify && ! negated)
6583 strcat (buf, " %0,%1,%2");
6584 break;
6586 /* All long conditionals. Note a short backward branch with an
6587 unfilled delay slot is treated just like a long backward branch
6588 with an unfilled delay slot. */
6589 case 8:
6590 /* Handle weird backwards branch with a filled delay slot
6591 which is nullified. */
6592 if (dbr_sequence_length () != 0
6593 && ! forward_branch_p (insn)
6594 && nullify)
6596 strcpy (buf, "bb,");
6597 if (GET_MODE (operands[0]) == DImode)
6598 strcat (buf, "*");
6599 if ((which == 0 && negated)
6600 || (which == 1 && ! negated))
6601 strcat (buf, "<");
6602 else
6603 strcat (buf, ">=");
6604 if (negated)
6605 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6606 else
6607 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6609 /* Handle short backwards branch with an unfilled delay slot.
6610 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6611 taken and untaken branches. */
6612 else if (dbr_sequence_length () == 0
6613 && ! forward_branch_p (insn)
6614 && INSN_ADDRESSES_SET_P ()
6615 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6616 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6618 strcpy (buf, "bb,");
6619 if (GET_MODE (operands[0]) == DImode)
6620 strcat (buf, "*");
6621 if ((which == 0 && negated)
6622 || (which == 1 && ! negated))
6623 strcat (buf, ">=");
6624 else
6625 strcat (buf, "<");
6626 if (negated)
6627 strcat (buf, " %0,%1,%3%#");
6628 else
6629 strcat (buf, " %0,%1,%2%#");
6631 else
6633 if (GET_MODE (operands[0]) == DImode)
6634 strcpy (buf, "extrd,s,*");
6635 else
6636 strcpy (buf, "{extrs,|extrw,s,}");
6637 if ((which == 0 && negated)
6638 || (which == 1 && ! negated))
6639 strcat (buf, "<");
6640 else
6641 strcat (buf, ">=");
6642 if (nullify && negated)
6643 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6644 else if (nullify && ! negated)
6645 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6646 else if (negated)
6647 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6648 else
6649 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6651 break;
6653 default:
6654 /* The reversed conditional branch must branch over one additional
6655 instruction if the delay slot is filled and needs to be extracted
6656 by output_lbranch. If the delay slot is empty or this is a
6657 nullified forward branch, the instruction after the reversed
6658 condition branch must be nullified. */
6659 if (dbr_sequence_length () == 0
6660 || (nullify && forward_branch_p (insn)))
6662 nullify = 1;
6663 xdelay = 0;
6664 operands[4] = GEN_INT (length);
6666 else
6668 xdelay = 1;
6669 operands[4] = GEN_INT (length + 4);
6672 if (GET_MODE (operands[0]) == DImode)
6673 strcpy (buf, "bb,*");
6674 else
6675 strcpy (buf, "bb,");
6676 if ((which == 0 && negated)
6677 || (which == 1 && !negated))
6678 strcat (buf, "<");
6679 else
6680 strcat (buf, ">=");
6681 if (nullify)
6682 strcat (buf, ",n %0,%1,.+%4");
6683 else
6684 strcat (buf, " %0,%1,.+%4");
6685 output_asm_insn (buf, operands);
6686 return output_lbranch (negated ? operands[3] : operands[2],
6687 insn, xdelay);
6689 return buf;
6692 /* This routine handles all the branch-on-variable-bit conditional branch
6693 sequences we might need to generate. It handles nullification of delay
6694 slots, varying length branches, negated branches and all combinations
6695 of the above. it returns the appropriate output template to emit the
6696 branch. */
6698 const char *
6699 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6701 static char buf[100];
6702 bool useskip;
6703 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6704 int length = get_attr_length (insn);
6705 int xdelay;
6707 /* A conditional branch to the following instruction (e.g. the delay slot) is
6708 asking for a disaster. I do not think this can happen as this pattern
6709 is only used when optimizing; jump optimization should eliminate the
6710 jump. But be prepared just in case. */
6712 if (branch_to_delay_slot_p (insn))
6713 return "nop";
6715 /* If this is a long branch with its delay slot unfilled, set `nullify'
6716 as it can nullify the delay slot and save a nop. */
6717 if (length == 8 && dbr_sequence_length () == 0)
6718 nullify = 1;
6720 /* If this is a short forward conditional branch which did not get
6721 its delay slot filled, the delay slot can still be nullified. */
6722 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6723 nullify = forward_branch_p (insn);
6725 /* A forward branch over a single nullified insn can be done with a
6726 extrs instruction. This avoids a single cycle penalty due to
6727 mis-predicted branch if we fall through (branch not taken). */
6728 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6730 switch (length)
6733 /* All short conditional branches except backwards with an unfilled
6734 delay slot. */
6735 case 4:
6736 if (useskip)
6737 strcpy (buf, "{vextrs,|extrw,s,}");
6738 else
6739 strcpy (buf, "{bvb,|bb,}");
6740 if (useskip && GET_MODE (operands[0]) == DImode)
6741 strcpy (buf, "extrd,s,*");
6742 else if (GET_MODE (operands[0]) == DImode)
6743 strcpy (buf, "bb,*");
6744 if ((which == 0 && negated)
6745 || (which == 1 && ! negated))
6746 strcat (buf, ">=");
6747 else
6748 strcat (buf, "<");
6749 if (useskip)
6750 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6751 else if (nullify && negated)
6753 if (branch_needs_nop_p (insn))
6754 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6755 else
6756 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6758 else if (nullify && ! negated)
6760 if (branch_needs_nop_p (insn))
6761 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6762 else
6763 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6765 else if (! nullify && negated)
6766 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
6767 else if (! nullify && ! negated)
6768 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6769 break;
6771 /* All long conditionals. Note a short backward branch with an
6772 unfilled delay slot is treated just like a long backward branch
6773 with an unfilled delay slot. */
6774 case 8:
6775 /* Handle weird backwards branch with a filled delay slot
6776 which is nullified. */
6777 if (dbr_sequence_length () != 0
6778 && ! forward_branch_p (insn)
6779 && nullify)
6781 strcpy (buf, "{bvb,|bb,}");
6782 if (GET_MODE (operands[0]) == DImode)
6783 strcat (buf, "*");
6784 if ((which == 0 && negated)
6785 || (which == 1 && ! negated))
6786 strcat (buf, "<");
6787 else
6788 strcat (buf, ">=");
6789 if (negated)
6790 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6791 else
6792 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6794 /* Handle short backwards branch with an unfilled delay slot.
6795 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6796 taken and untaken branches. */
6797 else if (dbr_sequence_length () == 0
6798 && ! forward_branch_p (insn)
6799 && INSN_ADDRESSES_SET_P ()
6800 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6801 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6803 strcpy (buf, "{bvb,|bb,}");
6804 if (GET_MODE (operands[0]) == DImode)
6805 strcat (buf, "*");
6806 if ((which == 0 && negated)
6807 || (which == 1 && ! negated))
6808 strcat (buf, ">=");
6809 else
6810 strcat (buf, "<");
6811 if (negated)
6812 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6813 else
6814 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6816 else
6818 strcpy (buf, "{vextrs,|extrw,s,}");
6819 if (GET_MODE (operands[0]) == DImode)
6820 strcpy (buf, "extrd,s,*");
6821 if ((which == 0 && negated)
6822 || (which == 1 && ! negated))
6823 strcat (buf, "<");
6824 else
6825 strcat (buf, ">=");
6826 if (nullify && negated)
6827 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6828 else if (nullify && ! negated)
6829 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6830 else if (negated)
6831 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6832 else
6833 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6835 break;
6837 default:
6838 /* The reversed conditional branch must branch over one additional
6839 instruction if the delay slot is filled and needs to be extracted
6840 by output_lbranch. If the delay slot is empty or this is a
6841 nullified forward branch, the instruction after the reversed
6842 condition branch must be nullified. */
6843 if (dbr_sequence_length () == 0
6844 || (nullify && forward_branch_p (insn)))
6846 nullify = 1;
6847 xdelay = 0;
6848 operands[4] = GEN_INT (length);
6850 else
6852 xdelay = 1;
6853 operands[4] = GEN_INT (length + 4);
6856 if (GET_MODE (operands[0]) == DImode)
6857 strcpy (buf, "bb,*");
6858 else
6859 strcpy (buf, "{bvb,|bb,}");
6860 if ((which == 0 && negated)
6861 || (which == 1 && !negated))
6862 strcat (buf, "<");
6863 else
6864 strcat (buf, ">=");
6865 if (nullify)
6866 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6867 else
6868 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6869 output_asm_insn (buf, operands);
6870 return output_lbranch (negated ? operands[3] : operands[2],
6871 insn, xdelay);
6873 return buf;
6876 /* Return the output template for emitting a dbra type insn.
6878 Note it may perform some output operations on its own before
6879 returning the final output string. */
6880 const char *
6881 output_dbra (rtx *operands, rtx insn, int which_alternative)
6883 int length = get_attr_length (insn);
6885 /* A conditional branch to the following instruction (e.g. the delay slot) is
6886 asking for a disaster. Be prepared! */
6888 if (branch_to_delay_slot_p (insn))
6890 if (which_alternative == 0)
6891 return "ldo %1(%0),%0";
6892 else if (which_alternative == 1)
6894 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6895 output_asm_insn ("ldw -16(%%r30),%4", operands);
6896 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6897 return "{fldws|fldw} -16(%%r30),%0";
6899 else
6901 output_asm_insn ("ldw %0,%4", operands);
6902 return "ldo %1(%4),%4\n\tstw %4,%0";
6906 if (which_alternative == 0)
6908 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6909 int xdelay;
6911 /* If this is a long branch with its delay slot unfilled, set `nullify'
6912 as it can nullify the delay slot and save a nop. */
6913 if (length == 8 && dbr_sequence_length () == 0)
6914 nullify = 1;
6916 /* If this is a short forward conditional branch which did not get
6917 its delay slot filled, the delay slot can still be nullified. */
6918 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6919 nullify = forward_branch_p (insn);
6921 switch (length)
6923 case 4:
6924 if (nullify)
6926 if (branch_needs_nop_p (insn))
6927 return "addib,%C2,n %1,%0,%3%#";
6928 else
6929 return "addib,%C2,n %1,%0,%3";
6931 else
6932 return "addib,%C2 %1,%0,%3";
6934 case 8:
6935 /* Handle weird backwards branch with a fulled delay slot
6936 which is nullified. */
6937 if (dbr_sequence_length () != 0
6938 && ! forward_branch_p (insn)
6939 && nullify)
6940 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6941 /* Handle short backwards branch with an unfilled delay slot.
6942 Using a addb;nop rather than addi;bl saves 1 cycle for both
6943 taken and untaken branches. */
6944 else if (dbr_sequence_length () == 0
6945 && ! forward_branch_p (insn)
6946 && INSN_ADDRESSES_SET_P ()
6947 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6948 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6949 return "addib,%C2 %1,%0,%3%#";
6951 /* Handle normal cases. */
6952 if (nullify)
6953 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6954 else
6955 return "addi,%N2 %1,%0,%0\n\tb %3";
6957 default:
6958 /* The reversed conditional branch must branch over one additional
6959 instruction if the delay slot is filled and needs to be extracted
6960 by output_lbranch. If the delay slot is empty or this is a
6961 nullified forward branch, the instruction after the reversed
6962 condition branch must be nullified. */
6963 if (dbr_sequence_length () == 0
6964 || (nullify && forward_branch_p (insn)))
6966 nullify = 1;
6967 xdelay = 0;
6968 operands[4] = GEN_INT (length);
6970 else
6972 xdelay = 1;
6973 operands[4] = GEN_INT (length + 4);
6976 if (nullify)
6977 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6978 else
6979 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6981 return output_lbranch (operands[3], insn, xdelay);
6985 /* Deal with gross reload from FP register case. */
6986 else if (which_alternative == 1)
6988 /* Move loop counter from FP register to MEM then into a GR,
6989 increment the GR, store the GR into MEM, and finally reload
6990 the FP register from MEM from within the branch's delay slot. */
6991 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6992 operands);
6993 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6994 if (length == 24)
6995 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6996 else if (length == 28)
6997 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6998 else
7000 operands[5] = GEN_INT (length - 16);
7001 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7002 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7003 return output_lbranch (operands[3], insn, 0);
7006 /* Deal with gross reload from memory case. */
7007 else
7009 /* Reload loop counter from memory, the store back to memory
7010 happens in the branch's delay slot. */
7011 output_asm_insn ("ldw %0,%4", operands);
7012 if (length == 12)
7013 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7014 else if (length == 16)
7015 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7016 else
7018 operands[5] = GEN_INT (length - 4);
7019 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7020 return output_lbranch (operands[3], insn, 0);
7025 /* Return the output template for emitting a movb type insn.
7027 Note it may perform some output operations on its own before
7028 returning the final output string. */
7029 const char *
7030 output_movb (rtx *operands, rtx insn, int which_alternative,
7031 int reverse_comparison)
7033 int length = get_attr_length (insn);
7035 /* A conditional branch to the following instruction (e.g. the delay slot) is
7036 asking for a disaster. Be prepared! */
7038 if (branch_to_delay_slot_p (insn))
7040 if (which_alternative == 0)
7041 return "copy %1,%0";
7042 else if (which_alternative == 1)
7044 output_asm_insn ("stw %1,-16(%%r30)", operands);
7045 return "{fldws|fldw} -16(%%r30),%0";
7047 else if (which_alternative == 2)
7048 return "stw %1,%0";
7049 else
7050 return "mtsar %r1";
7053 /* Support the second variant. */
7054 if (reverse_comparison)
7055 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7057 if (which_alternative == 0)
7059 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7060 int xdelay;
7062 /* If this is a long branch with its delay slot unfilled, set `nullify'
7063 as it can nullify the delay slot and save a nop. */
7064 if (length == 8 && dbr_sequence_length () == 0)
7065 nullify = 1;
7067 /* If this is a short forward conditional branch which did not get
7068 its delay slot filled, the delay slot can still be nullified. */
7069 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7070 nullify = forward_branch_p (insn);
7072 switch (length)
7074 case 4:
7075 if (nullify)
7077 if (branch_needs_nop_p (insn))
7078 return "movb,%C2,n %1,%0,%3%#";
7079 else
7080 return "movb,%C2,n %1,%0,%3";
7082 else
7083 return "movb,%C2 %1,%0,%3";
7085 case 8:
7086 /* Handle weird backwards branch with a filled delay slot
7087 which is nullified. */
7088 if (dbr_sequence_length () != 0
7089 && ! forward_branch_p (insn)
7090 && nullify)
7091 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7093 /* Handle short backwards branch with an unfilled delay slot.
7094 Using a movb;nop rather than or;bl saves 1 cycle for both
7095 taken and untaken branches. */
7096 else if (dbr_sequence_length () == 0
7097 && ! forward_branch_p (insn)
7098 && INSN_ADDRESSES_SET_P ()
7099 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7100 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7101 return "movb,%C2 %1,%0,%3%#";
7102 /* Handle normal cases. */
7103 if (nullify)
7104 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7105 else
7106 return "or,%N2 %1,%%r0,%0\n\tb %3";
7108 default:
7109 /* The reversed conditional branch must branch over one additional
7110 instruction if the delay slot is filled and needs to be extracted
7111 by output_lbranch. If the delay slot is empty or this is a
7112 nullified forward branch, the instruction after the reversed
7113 condition branch must be nullified. */
7114 if (dbr_sequence_length () == 0
7115 || (nullify && forward_branch_p (insn)))
7117 nullify = 1;
7118 xdelay = 0;
7119 operands[4] = GEN_INT (length);
7121 else
7123 xdelay = 1;
7124 operands[4] = GEN_INT (length + 4);
7127 if (nullify)
7128 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7129 else
7130 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7132 return output_lbranch (operands[3], insn, xdelay);
7135 /* Deal with gross reload for FP destination register case. */
7136 else if (which_alternative == 1)
7138 /* Move source register to MEM, perform the branch test, then
7139 finally load the FP register from MEM from within the branch's
7140 delay slot. */
7141 output_asm_insn ("stw %1,-16(%%r30)", operands);
7142 if (length == 12)
7143 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7144 else if (length == 16)
7145 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7146 else
7148 operands[4] = GEN_INT (length - 4);
7149 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7150 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7151 return output_lbranch (operands[3], insn, 0);
7154 /* Deal with gross reload from memory case. */
7155 else if (which_alternative == 2)
7157 /* Reload loop counter from memory, the store back to memory
7158 happens in the branch's delay slot. */
7159 if (length == 8)
7160 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7161 else if (length == 12)
7162 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7163 else
7165 operands[4] = GEN_INT (length);
7166 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7167 operands);
7168 return output_lbranch (operands[3], insn, 0);
7171 /* Handle SAR as a destination. */
7172 else
7174 if (length == 8)
7175 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7176 else if (length == 12)
7177 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7178 else
7180 operands[4] = GEN_INT (length);
7181 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7182 operands);
7183 return output_lbranch (operands[3], insn, 0);
7188 /* Copy any FP arguments in INSN into integer registers. */
7189 static void
7190 copy_fp_args (rtx insn)
7192 rtx link;
7193 rtx xoperands[2];
7195 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7197 int arg_mode, regno;
7198 rtx use = XEXP (link, 0);
7200 if (! (GET_CODE (use) == USE
7201 && GET_CODE (XEXP (use, 0)) == REG
7202 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7203 continue;
7205 arg_mode = GET_MODE (XEXP (use, 0));
7206 regno = REGNO (XEXP (use, 0));
7208 /* Is it a floating point register? */
7209 if (regno >= 32 && regno <= 39)
7211 /* Copy the FP register into an integer register via memory. */
7212 if (arg_mode == SFmode)
7214 xoperands[0] = XEXP (use, 0);
7215 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7216 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7217 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7219 else
7221 xoperands[0] = XEXP (use, 0);
7222 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7223 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7224 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7225 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7231 /* Compute length of the FP argument copy sequence for INSN. */
7232 static int
7233 length_fp_args (rtx insn)
7235 int length = 0;
7236 rtx link;
7238 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7240 int arg_mode, regno;
7241 rtx use = XEXP (link, 0);
7243 if (! (GET_CODE (use) == USE
7244 && GET_CODE (XEXP (use, 0)) == REG
7245 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7246 continue;
7248 arg_mode = GET_MODE (XEXP (use, 0));
7249 regno = REGNO (XEXP (use, 0));
7251 /* Is it a floating point register? */
7252 if (regno >= 32 && regno <= 39)
7254 if (arg_mode == SFmode)
7255 length += 8;
7256 else
7257 length += 12;
7261 return length;
7264 /* Return the attribute length for the millicode call instruction INSN.
7265 The length must match the code generated by output_millicode_call.
7266 We include the delay slot in the returned length as it is better to
7267 over estimate the length than to under estimate it. */
7270 attr_length_millicode_call (rtx insn)
7272 unsigned long distance = -1;
7273 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7275 if (INSN_ADDRESSES_SET_P ())
7277 distance = (total + insn_current_reference_address (insn));
7278 if (distance < total)
7279 distance = -1;
7282 if (TARGET_64BIT)
7284 if (!TARGET_LONG_CALLS && distance < 7600000)
7285 return 8;
7287 return 20;
7289 else if (TARGET_PORTABLE_RUNTIME)
7290 return 24;
7291 else
7293 if (!TARGET_LONG_CALLS && distance < 240000)
7294 return 8;
7296 if (TARGET_LONG_ABS_CALL && !flag_pic)
7297 return 12;
7299 return 24;
7303 /* INSN is a function call. It may have an unconditional jump
7304 in its delay slot.
7306 CALL_DEST is the routine we are calling. */
7308 const char *
7309 output_millicode_call (rtx insn, rtx call_dest)
7311 int attr_length = get_attr_length (insn);
7312 int seq_length = dbr_sequence_length ();
7313 int distance;
7314 rtx seq_insn;
7315 rtx xoperands[3];
7317 xoperands[0] = call_dest;
7318 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7320 /* Handle the common case where we are sure that the branch will
7321 reach the beginning of the $CODE$ subspace. The within reach
7322 form of the $$sh_func_adrs call has a length of 28. Because
7323 it has an attribute type of multi, it never has a nonzero
7324 sequence length. The length of the $$sh_func_adrs is the same
7325 as certain out of reach PIC calls to other routines. */
7326 if (!TARGET_LONG_CALLS
7327 && ((seq_length == 0
7328 && (attr_length == 12
7329 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7330 || (seq_length != 0 && attr_length == 8)))
7332 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7334 else
7336 if (TARGET_64BIT)
7338 /* It might seem that one insn could be saved by accessing
7339 the millicode function using the linkage table. However,
7340 this doesn't work in shared libraries and other dynamically
7341 loaded objects. Using a pc-relative sequence also avoids
7342 problems related to the implicit use of the gp register. */
7343 output_asm_insn ("b,l .+8,%%r1", xoperands);
7345 if (TARGET_GAS)
7347 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7348 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7350 else
7352 xoperands[1] = gen_label_rtx ();
7353 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7354 targetm.asm_out.internal_label (asm_out_file, "L",
7355 CODE_LABEL_NUMBER (xoperands[1]));
7356 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7359 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7361 else if (TARGET_PORTABLE_RUNTIME)
7363 /* Pure portable runtime doesn't allow be/ble; we also don't
7364 have PIC support in the assembler/linker, so this sequence
7365 is needed. */
7367 /* Get the address of our target into %r1. */
7368 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7369 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7371 /* Get our return address into %r31. */
7372 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7373 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7375 /* Jump to our target address in %r1. */
7376 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7378 else if (!flag_pic)
7380 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7381 if (TARGET_PA_20)
7382 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7383 else
7384 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7386 else
7388 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7389 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7391 if (TARGET_SOM || !TARGET_GAS)
7393 /* The HP assembler can generate relocations for the
7394 difference of two symbols. GAS can do this for a
7395 millicode symbol but not an arbitrary external
7396 symbol when generating SOM output. */
7397 xoperands[1] = gen_label_rtx ();
7398 targetm.asm_out.internal_label (asm_out_file, "L",
7399 CODE_LABEL_NUMBER (xoperands[1]));
7400 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7401 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7403 else
7405 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7406 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7407 xoperands);
7410 /* Jump to our target address in %r1. */
7411 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7415 if (seq_length == 0)
7416 output_asm_insn ("nop", xoperands);
7418 /* We are done if there isn't a jump in the delay slot. */
7419 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7420 return "";
7422 /* This call has an unconditional jump in its delay slot. */
7423 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7425 /* See if the return address can be adjusted. Use the containing
7426 sequence insn's address. */
7427 if (INSN_ADDRESSES_SET_P ())
7429 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7430 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7431 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7433 if (VAL_14_BITS_P (distance))
7435 xoperands[1] = gen_label_rtx ();
7436 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7437 targetm.asm_out.internal_label (asm_out_file, "L",
7438 CODE_LABEL_NUMBER (xoperands[1]));
7440 else
7441 /* ??? This branch may not reach its target. */
7442 output_asm_insn ("nop\n\tb,n %0", xoperands);
7444 else
7445 /* ??? This branch may not reach its target. */
7446 output_asm_insn ("nop\n\tb,n %0", xoperands);
7448 /* Delete the jump. */
7449 SET_INSN_DELETED (NEXT_INSN (insn));
7451 return "";
7454 /* Return the attribute length of the call instruction INSN. The SIBCALL
7455 flag indicates whether INSN is a regular call or a sibling call. The
7456 length returned must be longer than the code actually generated by
7457 output_call. Since branch shortening is done before delay branch
7458 sequencing, there is no way to determine whether or not the delay
7459 slot will be filled during branch shortening. Even when the delay
7460 slot is filled, we may have to add a nop if the delay slot contains
7461 a branch that can't reach its target. Thus, we always have to include
7462 the delay slot in the length estimate. This used to be done in
7463 pa_adjust_insn_length but we do it here now as some sequences always
7464 fill the delay slot and we can save four bytes in the estimate for
7465 these sequences. */
7468 attr_length_call (rtx insn, int sibcall)
7470 int local_call;
7471 rtx call_dest;
7472 tree call_decl;
7473 int length = 0;
7474 rtx pat = PATTERN (insn);
7475 unsigned long distance = -1;
7477 if (INSN_ADDRESSES_SET_P ())
7479 unsigned long total;
7481 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7482 distance = (total + insn_current_reference_address (insn));
7483 if (distance < total)
7484 distance = -1;
7487 /* Determine if this is a local call. */
7488 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7489 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7490 else
7491 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7493 call_decl = SYMBOL_REF_DECL (call_dest);
7494 local_call = call_decl && targetm.binds_local_p (call_decl);
7496 /* pc-relative branch. */
7497 if (!TARGET_LONG_CALLS
7498 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7499 || distance < 240000))
7500 length += 8;
7502 /* 64-bit plabel sequence. */
7503 else if (TARGET_64BIT && !local_call)
7504 length += sibcall ? 28 : 24;
7506 /* non-pic long absolute branch sequence. */
7507 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7508 length += 12;
7510 /* long pc-relative branch sequence. */
7511 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7512 || (TARGET_64BIT && !TARGET_GAS)
7513 || (TARGET_GAS && !TARGET_SOM
7514 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7516 length += 20;
7518 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7519 length += 8;
7522 /* 32-bit plabel sequence. */
7523 else
7525 length += 32;
7527 if (TARGET_SOM)
7528 length += length_fp_args (insn);
7530 if (flag_pic)
7531 length += 4;
7533 if (!TARGET_PA_20)
7535 if (!sibcall)
7536 length += 8;
7538 if (!TARGET_NO_SPACE_REGS)
7539 length += 8;
7543 return length;
7546 /* INSN is a function call. It may have an unconditional jump
7547 in its delay slot.
7549 CALL_DEST is the routine we are calling. */
7551 const char *
7552 output_call (rtx insn, rtx call_dest, int sibcall)
7554 int delay_insn_deleted = 0;
7555 int delay_slot_filled = 0;
7556 int seq_length = dbr_sequence_length ();
7557 tree call_decl = SYMBOL_REF_DECL (call_dest);
7558 int local_call = call_decl && targetm.binds_local_p (call_decl);
7559 rtx xoperands[2];
7561 xoperands[0] = call_dest;
7563 /* Handle the common case where we're sure that the branch will reach
7564 the beginning of the "$CODE$" subspace. This is the beginning of
7565 the current function if we are in a named section. */
7566 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7568 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7569 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7571 else
7573 if (TARGET_64BIT && !local_call)
7575 /* ??? As far as I can tell, the HP linker doesn't support the
7576 long pc-relative sequence described in the 64-bit runtime
7577 architecture. So, we use a slightly longer indirect call. */
7578 xoperands[0] = get_deferred_plabel (call_dest);
7579 xoperands[1] = gen_label_rtx ();
7581 /* If this isn't a sibcall, we put the load of %r27 into the
7582 delay slot. We can't do this in a sibcall as we don't
7583 have a second call-clobbered scratch register available. */
7584 if (seq_length != 0
7585 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7586 && !sibcall)
7588 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7589 optimize, 0, NULL);
7591 /* Now delete the delay insn. */
7592 SET_INSN_DELETED (NEXT_INSN (insn));
7593 delay_insn_deleted = 1;
7596 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7597 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7598 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7600 if (sibcall)
7602 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7603 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7604 output_asm_insn ("bve (%%r1)", xoperands);
7606 else
7608 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7609 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7610 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7611 delay_slot_filled = 1;
7614 else
7616 int indirect_call = 0;
7618 /* Emit a long call. There are several different sequences
7619 of increasing length and complexity. In most cases,
7620 they don't allow an instruction in the delay slot. */
7621 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7622 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7623 && !(TARGET_GAS && !TARGET_SOM
7624 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7625 && !TARGET_64BIT)
7626 indirect_call = 1;
7628 if (seq_length != 0
7629 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7630 && !sibcall
7631 && (!TARGET_PA_20
7632 || indirect_call
7633 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7635 /* A non-jump insn in the delay slot. By definition we can
7636 emit this insn before the call (and in fact before argument
7637 relocating. */
7638 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7639 NULL);
7641 /* Now delete the delay insn. */
7642 SET_INSN_DELETED (NEXT_INSN (insn));
7643 delay_insn_deleted = 1;
7646 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7648 /* This is the best sequence for making long calls in
7649 non-pic code. Unfortunately, GNU ld doesn't provide
7650 the stub needed for external calls, and GAS's support
7651 for this with the SOM linker is buggy. It is safe
7652 to use this for local calls. */
7653 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7654 if (sibcall)
7655 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7656 else
7658 if (TARGET_PA_20)
7659 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7660 xoperands);
7661 else
7662 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7664 output_asm_insn ("copy %%r31,%%r2", xoperands);
7665 delay_slot_filled = 1;
7668 else
7670 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7671 || (TARGET_64BIT && !TARGET_GAS))
7673 /* The HP assembler and linker can handle relocations
7674 for the difference of two symbols. GAS and the HP
7675 linker can't do this when one of the symbols is
7676 external. */
7677 xoperands[1] = gen_label_rtx ();
7678 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7679 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7680 targetm.asm_out.internal_label (asm_out_file, "L",
7681 CODE_LABEL_NUMBER (xoperands[1]));
7682 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7684 else if (TARGET_GAS && !TARGET_SOM
7685 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7687 /* GAS currently can't generate the relocations that
7688 are needed for the SOM linker under HP-UX using this
7689 sequence. The GNU linker doesn't generate the stubs
7690 that are needed for external calls on TARGET_ELF32
7691 with this sequence. For now, we have to use a
7692 longer plabel sequence when using GAS. */
7693 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7694 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7695 xoperands);
7696 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7697 xoperands);
7699 else
7701 /* Emit a long plabel-based call sequence. This is
7702 essentially an inline implementation of $$dyncall.
7703 We don't actually try to call $$dyncall as this is
7704 as difficult as calling the function itself. */
7705 xoperands[0] = get_deferred_plabel (call_dest);
7706 xoperands[1] = gen_label_rtx ();
7708 /* Since the call is indirect, FP arguments in registers
7709 need to be copied to the general registers. Then, the
7710 argument relocation stub will copy them back. */
7711 if (TARGET_SOM)
7712 copy_fp_args (insn);
7714 if (flag_pic)
7716 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7717 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7718 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7720 else
7722 output_asm_insn ("addil LR'%0-$global$,%%r27",
7723 xoperands);
7724 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7725 xoperands);
7728 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7729 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7730 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7731 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7733 if (!sibcall && !TARGET_PA_20)
7735 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7736 if (TARGET_NO_SPACE_REGS)
7737 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7738 else
7739 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7743 if (TARGET_PA_20)
7745 if (sibcall)
7746 output_asm_insn ("bve (%%r1)", xoperands);
7747 else
7749 if (indirect_call)
7751 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7752 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7753 delay_slot_filled = 1;
7755 else
7756 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7759 else
7761 if (!TARGET_NO_SPACE_REGS)
7762 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7763 xoperands);
7765 if (sibcall)
7767 if (TARGET_NO_SPACE_REGS)
7768 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7769 else
7770 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7772 else
7774 if (TARGET_NO_SPACE_REGS)
7775 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7776 else
7777 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7779 if (indirect_call)
7780 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7781 else
7782 output_asm_insn ("copy %%r31,%%r2", xoperands);
7783 delay_slot_filled = 1;
7790 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7791 output_asm_insn ("nop", xoperands);
7793 /* We are done if there isn't a jump in the delay slot. */
7794 if (seq_length == 0
7795 || delay_insn_deleted
7796 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7797 return "";
7799 /* A sibcall should never have a branch in the delay slot. */
7800 gcc_assert (!sibcall);
7802 /* This call has an unconditional jump in its delay slot. */
7803 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7805 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7807 /* See if the return address can be adjusted. Use the containing
7808 sequence insn's address. */
7809 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7810 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7811 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7813 if (VAL_14_BITS_P (distance))
7815 xoperands[1] = gen_label_rtx ();
7816 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7817 targetm.asm_out.internal_label (asm_out_file, "L",
7818 CODE_LABEL_NUMBER (xoperands[1]));
7820 else
7821 output_asm_insn ("nop\n\tb,n %0", xoperands);
7823 else
7824 output_asm_insn ("b,n %0", xoperands);
7826 /* Delete the jump. */
7827 SET_INSN_DELETED (NEXT_INSN (insn));
7829 return "";
7832 /* Return the attribute length of the indirect call instruction INSN.
7833 The length must match the code generated by output_indirect call.
7834 The returned length includes the delay slot. Currently, the delay
7835 slot of an indirect call sequence is not exposed and it is used by
7836 the sequence itself. */
7839 attr_length_indirect_call (rtx insn)
7841 unsigned long distance = -1;
7842 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7844 if (INSN_ADDRESSES_SET_P ())
7846 distance = (total + insn_current_reference_address (insn));
7847 if (distance < total)
7848 distance = -1;
7851 if (TARGET_64BIT)
7852 return 12;
7854 if (TARGET_FAST_INDIRECT_CALLS
7855 || (!TARGET_PORTABLE_RUNTIME
7856 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7857 || distance < 240000)))
7858 return 8;
7860 if (flag_pic)
7861 return 24;
7863 if (TARGET_PORTABLE_RUNTIME)
7864 return 20;
7866 /* Out of reach, can use ble. */
7867 return 12;
7870 const char *
7871 output_indirect_call (rtx insn, rtx call_dest)
7873 rtx xoperands[1];
7875 if (TARGET_64BIT)
7877 xoperands[0] = call_dest;
7878 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7879 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7880 return "";
7883 /* First the special case for kernels, level 0 systems, etc. */
7884 if (TARGET_FAST_INDIRECT_CALLS)
7885 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7887 /* Now the normal case -- we can reach $$dyncall directly or
7888 we're sure that we can get there via a long-branch stub.
7890 No need to check target flags as the length uniquely identifies
7891 the remaining cases. */
7892 if (attr_length_indirect_call (insn) == 8)
7894 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7895 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7896 variant of the B,L instruction can't be used on the SOM target. */
7897 if (TARGET_PA_20 && !TARGET_SOM)
7898 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7899 else
7900 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7903 /* Long millicode call, but we are not generating PIC or portable runtime
7904 code. */
7905 if (attr_length_indirect_call (insn) == 12)
7906 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7908 /* Long millicode call for portable runtime. */
7909 if (attr_length_indirect_call (insn) == 20)
7910 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7912 /* We need a long PIC call to $$dyncall. */
7913 xoperands[0] = NULL_RTX;
7914 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7915 if (TARGET_SOM || !TARGET_GAS)
7917 xoperands[0] = gen_label_rtx ();
7918 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7919 targetm.asm_out.internal_label (asm_out_file, "L",
7920 CODE_LABEL_NUMBER (xoperands[0]));
7921 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7923 else
7925 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7926 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7927 xoperands);
7929 output_asm_insn ("blr %%r0,%%r2", xoperands);
7930 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7931 return "";
7934 /* Return the total length of the save and restore instructions needed for
7935 the data linkage table pointer (i.e., the PIC register) across the call
7936 instruction INSN. No-return calls do not require a save and restore.
7937 In addition, we may be able to avoid the save and restore for calls
7938 within the same translation unit. */
7941 attr_length_save_restore_dltp (rtx insn)
7943 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7944 return 0;
7946 return 8;
7949 /* In HPUX 8.0's shared library scheme, special relocations are needed
7950 for function labels if they might be passed to a function
7951 in a shared library (because shared libraries don't live in code
7952 space), and special magic is needed to construct their address. */
7954 void
7955 hppa_encode_label (rtx sym)
7957 const char *str = XSTR (sym, 0);
7958 int len = strlen (str) + 1;
7959 char *newstr, *p;
7961 p = newstr = alloca (len + 1);
7962 *p++ = '@';
7963 strcpy (p, str);
7965 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7968 static void
7969 pa_encode_section_info (tree decl, rtx rtl, int first)
7971 int old_referenced = 0;
7973 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
7974 old_referenced
7975 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
7977 default_encode_section_info (decl, rtl, first);
7979 if (first && TEXT_SPACE_P (decl))
7981 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7982 if (TREE_CODE (decl) == FUNCTION_DECL)
7983 hppa_encode_label (XEXP (rtl, 0));
7985 else if (old_referenced)
7986 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
7989 /* This is sort of inverse to pa_encode_section_info. */
7991 static const char *
7992 pa_strip_name_encoding (const char *str)
7994 str += (*str == '@');
7995 str += (*str == '*');
7996 return str;
8000 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8002 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
8005 /* Returns 1 if OP is a function label involved in a simple addition
8006 with a constant. Used to keep certain patterns from matching
8007 during instruction combination. */
8009 is_function_label_plus_const (rtx op)
8011 /* Strip off any CONST. */
8012 if (GET_CODE (op) == CONST)
8013 op = XEXP (op, 0);
8015 return (GET_CODE (op) == PLUS
8016 && function_label_operand (XEXP (op, 0), Pmode)
8017 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8020 /* Output assembly code for a thunk to FUNCTION. */
8022 static void
8023 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8024 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8025 tree function)
8027 static unsigned int current_thunk_number;
8028 int val_14 = VAL_14_BITS_P (delta);
8029 unsigned int old_last_address = last_address, nbytes = 0;
8030 char label[16];
8031 rtx xoperands[4];
8033 xoperands[0] = XEXP (DECL_RTL (function), 0);
8034 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8035 xoperands[2] = GEN_INT (delta);
8037 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
8038 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8040 /* Output the thunk. We know that the function is in the same
8041 translation unit (i.e., the same space) as the thunk, and that
8042 thunks are output after their method. Thus, we don't need an
8043 external branch to reach the function. With SOM and GAS,
8044 functions and thunks are effectively in different sections.
8045 Thus, we can always use a IA-relative branch and the linker
8046 will add a long branch stub if necessary.
8048 However, we have to be careful when generating PIC code on the
8049 SOM port to ensure that the sequence does not transfer to an
8050 import stub for the target function as this could clobber the
8051 return value saved at SP-24. This would also apply to the
8052 32-bit linux port if the multi-space model is implemented. */
8053 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8054 && !(flag_pic && TREE_PUBLIC (function))
8055 && (TARGET_GAS || last_address < 262132))
8056 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8057 && ((targetm.have_named_sections
8058 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8059 /* The GNU 64-bit linker has rather poor stub management.
8060 So, we use a long branch from thunks that aren't in
8061 the same section as the target function. */
8062 && ((!TARGET_64BIT
8063 && (DECL_SECTION_NAME (thunk_fndecl)
8064 != DECL_SECTION_NAME (function)))
8065 || ((DECL_SECTION_NAME (thunk_fndecl)
8066 == DECL_SECTION_NAME (function))
8067 && last_address < 262132)))
8068 || (targetm.have_named_sections
8069 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8070 && DECL_SECTION_NAME (function) == NULL
8071 && last_address < 262132)
8072 || (!targetm.have_named_sections && last_address < 262132))))
8074 if (!val_14)
8075 output_asm_insn ("addil L'%2,%%r26", xoperands);
8077 output_asm_insn ("b %0", xoperands);
8079 if (val_14)
8081 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8082 nbytes += 8;
8084 else
8086 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8087 nbytes += 12;
8090 else if (TARGET_64BIT)
8092 /* We only have one call-clobbered scratch register, so we can't
8093 make use of the delay slot if delta doesn't fit in 14 bits. */
8094 if (!val_14)
8096 output_asm_insn ("addil L'%2,%%r26", xoperands);
8097 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8100 output_asm_insn ("b,l .+8,%%r1", xoperands);
8102 if (TARGET_GAS)
8104 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8105 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8107 else
8109 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8110 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8113 if (val_14)
8115 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8116 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8117 nbytes += 20;
8119 else
8121 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8122 nbytes += 24;
8125 else if (TARGET_PORTABLE_RUNTIME)
8127 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8128 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8130 if (!val_14)
8131 output_asm_insn ("addil L'%2,%%r26", xoperands);
8133 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8135 if (val_14)
8137 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8138 nbytes += 16;
8140 else
8142 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8143 nbytes += 20;
8146 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8148 /* The function is accessible from outside this module. The only
8149 way to avoid an import stub between the thunk and function is to
8150 call the function directly with an indirect sequence similar to
8151 that used by $$dyncall. This is possible because $$dyncall acts
8152 as the import stub in an indirect call. */
8153 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8154 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8155 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8156 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8157 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8158 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8159 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8160 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8161 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8163 if (!val_14)
8165 output_asm_insn ("addil L'%2,%%r26", xoperands);
8166 nbytes += 4;
8169 if (TARGET_PA_20)
8171 output_asm_insn ("bve (%%r22)", xoperands);
8172 nbytes += 36;
8174 else if (TARGET_NO_SPACE_REGS)
8176 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8177 nbytes += 36;
8179 else
8181 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8182 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8183 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8184 nbytes += 44;
8187 if (val_14)
8188 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8189 else
8190 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8192 else if (flag_pic)
8194 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8196 if (TARGET_SOM || !TARGET_GAS)
8198 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8199 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8201 else
8203 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8204 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8207 if (!val_14)
8208 output_asm_insn ("addil L'%2,%%r26", xoperands);
8210 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8212 if (val_14)
8214 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8215 nbytes += 20;
8217 else
8219 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8220 nbytes += 24;
8223 else
8225 if (!val_14)
8226 output_asm_insn ("addil L'%2,%%r26", xoperands);
8228 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8229 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8231 if (val_14)
8233 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8234 nbytes += 12;
8236 else
8238 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8239 nbytes += 16;
8243 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8245 if (TARGET_SOM && TARGET_GAS)
8247 /* We done with this subspace except possibly for some additional
8248 debug information. Forget that we are in this subspace to ensure
8249 that the next function is output in its own subspace. */
8250 in_section = NULL;
8251 cfun->machine->in_nsubspa = 2;
8254 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8256 switch_to_section (data_section);
8257 output_asm_insn (".align 4", xoperands);
8258 ASM_OUTPUT_LABEL (file, label);
8259 output_asm_insn (".word P'%0", xoperands);
8262 current_thunk_number++;
8263 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8264 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8265 last_address += nbytes;
8266 if (old_last_address > last_address)
8267 last_address = UINT_MAX;
8268 update_total_code_bytes (nbytes);
8271 /* Only direct calls to static functions are allowed to be sibling (tail)
8272 call optimized.
8274 This restriction is necessary because some linker generated stubs will
8275 store return pointers into rp' in some cases which might clobber a
8276 live value already in rp'.
8278 In a sibcall the current function and the target function share stack
8279 space. Thus if the path to the current function and the path to the
8280 target function save a value in rp', they save the value into the
8281 same stack slot, which has undesirable consequences.
8283 Because of the deferred binding nature of shared libraries any function
8284 with external scope could be in a different load module and thus require
8285 rp' to be saved when calling that function. So sibcall optimizations
8286 can only be safe for static function.
8288 Note that GCC never needs return value relocations, so we don't have to
8289 worry about static calls with return value relocations (which require
8290 saving rp').
8292 It is safe to perform a sibcall optimization when the target function
8293 will never return. */
8294 static bool
8295 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8297 if (TARGET_PORTABLE_RUNTIME)
8298 return false;
8300 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8301 single subspace mode and the call is not indirect. As far as I know,
8302 there is no operating system support for the multiple subspace mode.
8303 It might be possible to support indirect calls if we didn't use
8304 $$dyncall (see the indirect sequence generated in output_call). */
8305 if (TARGET_ELF32)
8306 return (decl != NULL_TREE);
8308 /* Sibcalls are not ok because the arg pointer register is not a fixed
8309 register. This prevents the sibcall optimization from occurring. In
8310 addition, there are problems with stub placement using GNU ld. This
8311 is because a normal sibcall branch uses a 17-bit relocation while
8312 a regular call branch uses a 22-bit relocation. As a result, more
8313 care needs to be taken in the placement of long-branch stubs. */
8314 if (TARGET_64BIT)
8315 return false;
8317 /* Sibcalls are only ok within a translation unit. */
8318 return (decl && !TREE_PUBLIC (decl));
8321 /* ??? Addition is not commutative on the PA due to the weird implicit
8322 space register selection rules for memory addresses. Therefore, we
8323 don't consider a + b == b + a, as this might be inside a MEM. */
8324 static bool
8325 pa_commutative_p (const_rtx x, int outer_code)
8327 return (COMMUTATIVE_P (x)
8328 && (TARGET_NO_SPACE_REGS
8329 || (outer_code != UNKNOWN && outer_code != MEM)
8330 || GET_CODE (x) != PLUS));
8333 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8334 use in fmpyadd instructions. */
8336 fmpyaddoperands (rtx *operands)
8338 enum machine_mode mode = GET_MODE (operands[0]);
8340 /* Must be a floating point mode. */
8341 if (mode != SFmode && mode != DFmode)
8342 return 0;
8344 /* All modes must be the same. */
8345 if (! (mode == GET_MODE (operands[1])
8346 && mode == GET_MODE (operands[2])
8347 && mode == GET_MODE (operands[3])
8348 && mode == GET_MODE (operands[4])
8349 && mode == GET_MODE (operands[5])))
8350 return 0;
8352 /* All operands must be registers. */
8353 if (! (GET_CODE (operands[1]) == REG
8354 && GET_CODE (operands[2]) == REG
8355 && GET_CODE (operands[3]) == REG
8356 && GET_CODE (operands[4]) == REG
8357 && GET_CODE (operands[5]) == REG))
8358 return 0;
8360 /* Only 2 real operands to the addition. One of the input operands must
8361 be the same as the output operand. */
8362 if (! rtx_equal_p (operands[3], operands[4])
8363 && ! rtx_equal_p (operands[3], operands[5]))
8364 return 0;
8366 /* Inout operand of add cannot conflict with any operands from multiply. */
8367 if (rtx_equal_p (operands[3], operands[0])
8368 || rtx_equal_p (operands[3], operands[1])
8369 || rtx_equal_p (operands[3], operands[2]))
8370 return 0;
8372 /* multiply cannot feed into addition operands. */
8373 if (rtx_equal_p (operands[4], operands[0])
8374 || rtx_equal_p (operands[5], operands[0]))
8375 return 0;
8377 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8378 if (mode == SFmode
8379 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8380 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8381 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8382 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8383 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8384 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8385 return 0;
8387 /* Passed. Operands are suitable for fmpyadd. */
8388 return 1;
8391 #if !defined(USE_COLLECT2)
8392 static void
8393 pa_asm_out_constructor (rtx symbol, int priority)
8395 if (!function_label_operand (symbol, VOIDmode))
8396 hppa_encode_label (symbol);
8398 #ifdef CTORS_SECTION_ASM_OP
8399 default_ctor_section_asm_out_constructor (symbol, priority);
8400 #else
8401 # ifdef TARGET_ASM_NAMED_SECTION
8402 default_named_section_asm_out_constructor (symbol, priority);
8403 # else
8404 default_stabs_asm_out_constructor (symbol, priority);
8405 # endif
8406 #endif
8409 static void
8410 pa_asm_out_destructor (rtx symbol, int priority)
8412 if (!function_label_operand (symbol, VOIDmode))
8413 hppa_encode_label (symbol);
8415 #ifdef DTORS_SECTION_ASM_OP
8416 default_dtor_section_asm_out_destructor (symbol, priority);
8417 #else
8418 # ifdef TARGET_ASM_NAMED_SECTION
8419 default_named_section_asm_out_destructor (symbol, priority);
8420 # else
8421 default_stabs_asm_out_destructor (symbol, priority);
8422 # endif
8423 #endif
8425 #endif
8427 /* This function places uninitialized global data in the bss section.
8428 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8429 function on the SOM port to prevent uninitialized global data from
8430 being placed in the data section. */
8432 void
8433 pa_asm_output_aligned_bss (FILE *stream,
8434 const char *name,
8435 unsigned HOST_WIDE_INT size,
8436 unsigned int align)
8438 switch_to_section (bss_section);
8439 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8441 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8442 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8443 #endif
8445 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8446 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8447 #endif
8449 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8450 ASM_OUTPUT_LABEL (stream, name);
8451 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8454 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8455 that doesn't allow the alignment of global common storage to be directly
8456 specified. The SOM linker aligns common storage based on the rounded
8457 value of the NUM_BYTES parameter in the .comm directive. It's not
8458 possible to use the .align directive as it doesn't affect the alignment
8459 of the label associated with a .comm directive. */
8461 void
8462 pa_asm_output_aligned_common (FILE *stream,
8463 const char *name,
8464 unsigned HOST_WIDE_INT size,
8465 unsigned int align)
8467 unsigned int max_common_align;
8469 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8470 if (align > max_common_align)
8472 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8473 "for global common data. Using %u",
8474 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8475 align = max_common_align;
8478 switch_to_section (bss_section);
8480 assemble_name (stream, name);
8481 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8482 MAX (size, align / BITS_PER_UNIT));
8485 /* We can't use .comm for local common storage as the SOM linker effectively
8486 treats the symbol as universal and uses the same storage for local symbols
8487 with the same name in different object files. The .block directive
8488 reserves an uninitialized block of storage. However, it's not common
8489 storage. Fortunately, GCC never requests common storage with the same
8490 name in any given translation unit. */
8492 void
8493 pa_asm_output_aligned_local (FILE *stream,
8494 const char *name,
8495 unsigned HOST_WIDE_INT size,
8496 unsigned int align)
8498 switch_to_section (bss_section);
8499 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8501 #ifdef LOCAL_ASM_OP
8502 fprintf (stream, "%s", LOCAL_ASM_OP);
8503 assemble_name (stream, name);
8504 fprintf (stream, "\n");
8505 #endif
8507 ASM_OUTPUT_LABEL (stream, name);
8508 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8511 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8512 use in fmpysub instructions. */
8514 fmpysuboperands (rtx *operands)
8516 enum machine_mode mode = GET_MODE (operands[0]);
8518 /* Must be a floating point mode. */
8519 if (mode != SFmode && mode != DFmode)
8520 return 0;
8522 /* All modes must be the same. */
8523 if (! (mode == GET_MODE (operands[1])
8524 && mode == GET_MODE (operands[2])
8525 && mode == GET_MODE (operands[3])
8526 && mode == GET_MODE (operands[4])
8527 && mode == GET_MODE (operands[5])))
8528 return 0;
8530 /* All operands must be registers. */
8531 if (! (GET_CODE (operands[1]) == REG
8532 && GET_CODE (operands[2]) == REG
8533 && GET_CODE (operands[3]) == REG
8534 && GET_CODE (operands[4]) == REG
8535 && GET_CODE (operands[5]) == REG))
8536 return 0;
8538 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8539 operation, so operands[4] must be the same as operand[3]. */
8540 if (! rtx_equal_p (operands[3], operands[4]))
8541 return 0;
8543 /* multiply cannot feed into subtraction. */
8544 if (rtx_equal_p (operands[5], operands[0]))
8545 return 0;
8547 /* Inout operand of sub cannot conflict with any operands from multiply. */
8548 if (rtx_equal_p (operands[3], operands[0])
8549 || rtx_equal_p (operands[3], operands[1])
8550 || rtx_equal_p (operands[3], operands[2]))
8551 return 0;
8553 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8554 if (mode == SFmode
8555 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8556 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8557 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8558 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8559 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8560 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8561 return 0;
8563 /* Passed. Operands are suitable for fmpysub. */
8564 return 1;
8567 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8568 constants for shadd instructions. */
8570 shadd_constant_p (int val)
8572 if (val == 2 || val == 4 || val == 8)
8573 return 1;
8574 else
8575 return 0;
8578 /* Return 1 if OP is valid as a base or index register in a
8579 REG+REG address. */
8582 borx_reg_operand (rtx op, enum machine_mode mode)
8584 if (GET_CODE (op) != REG)
8585 return 0;
8587 /* We must reject virtual registers as the only expressions that
8588 can be instantiated are REG and REG+CONST. */
8589 if (op == virtual_incoming_args_rtx
8590 || op == virtual_stack_vars_rtx
8591 || op == virtual_stack_dynamic_rtx
8592 || op == virtual_outgoing_args_rtx
8593 || op == virtual_cfa_rtx)
8594 return 0;
8596 /* While it's always safe to index off the frame pointer, it's not
8597 profitable to do so when the frame pointer is being eliminated. */
8598 if (!reload_completed
8599 && flag_omit_frame_pointer
8600 && !current_function_calls_alloca
8601 && op == frame_pointer_rtx)
8602 return 0;
8604 return register_operand (op, mode);
8607 /* Return 1 if this operand is anything other than a hard register. */
8610 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8612 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8615 /* Return 1 if INSN branches forward. Should be using insn_addresses
8616 to avoid walking through all the insns... */
8617 static int
8618 forward_branch_p (rtx insn)
8620 rtx label = JUMP_LABEL (insn);
8622 while (insn)
8624 if (insn == label)
8625 break;
8626 else
8627 insn = NEXT_INSN (insn);
8630 return (insn == label);
8633 /* Return 1 if OP is an equality comparison, else return 0. */
8635 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8637 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8640 /* Return 1 if INSN is in the delay slot of a call instruction. */
8642 jump_in_call_delay (rtx insn)
8645 if (GET_CODE (insn) != JUMP_INSN)
8646 return 0;
8648 if (PREV_INSN (insn)
8649 && PREV_INSN (PREV_INSN (insn))
8650 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8652 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8654 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8655 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8658 else
8659 return 0;
8662 /* Output an unconditional move and branch insn. */
8664 const char *
8665 output_parallel_movb (rtx *operands, rtx insn)
8667 int length = get_attr_length (insn);
8669 /* These are the cases in which we win. */
8670 if (length == 4)
8671 return "mov%I1b,tr %1,%0,%2";
8673 /* None of the following cases win, but they don't lose either. */
8674 if (length == 8)
8676 if (dbr_sequence_length () == 0)
8678 /* Nothing in the delay slot, fake it by putting the combined
8679 insn (the copy or add) in the delay slot of a bl. */
8680 if (GET_CODE (operands[1]) == CONST_INT)
8681 return "b %2\n\tldi %1,%0";
8682 else
8683 return "b %2\n\tcopy %1,%0";
8685 else
8687 /* Something in the delay slot, but we've got a long branch. */
8688 if (GET_CODE (operands[1]) == CONST_INT)
8689 return "ldi %1,%0\n\tb %2";
8690 else
8691 return "copy %1,%0\n\tb %2";
8695 if (GET_CODE (operands[1]) == CONST_INT)
8696 output_asm_insn ("ldi %1,%0", operands);
8697 else
8698 output_asm_insn ("copy %1,%0", operands);
8699 return output_lbranch (operands[2], insn, 1);
8702 /* Output an unconditional add and branch insn. */
8704 const char *
8705 output_parallel_addb (rtx *operands, rtx insn)
8707 int length = get_attr_length (insn);
8709 /* To make life easy we want operand0 to be the shared input/output
8710 operand and operand1 to be the readonly operand. */
8711 if (operands[0] == operands[1])
8712 operands[1] = operands[2];
8714 /* These are the cases in which we win. */
8715 if (length == 4)
8716 return "add%I1b,tr %1,%0,%3";
8718 /* None of the following cases win, but they don't lose either. */
8719 if (length == 8)
8721 if (dbr_sequence_length () == 0)
8722 /* Nothing in the delay slot, fake it by putting the combined
8723 insn (the copy or add) in the delay slot of a bl. */
8724 return "b %3\n\tadd%I1 %1,%0,%0";
8725 else
8726 /* Something in the delay slot, but we've got a long branch. */
8727 return "add%I1 %1,%0,%0\n\tb %3";
8730 output_asm_insn ("add%I1 %1,%0,%0", operands);
8731 return output_lbranch (operands[3], insn, 1);
8734 /* Return nonzero if INSN (a jump insn) immediately follows a call
8735 to a named function. This is used to avoid filling the delay slot
8736 of the jump since it can usually be eliminated by modifying RP in
8737 the delay slot of the call. */
8740 following_call (rtx insn)
8742 if (! TARGET_JUMP_IN_DELAY)
8743 return 0;
8745 /* Find the previous real insn, skipping NOTEs. */
8746 insn = PREV_INSN (insn);
8747 while (insn && GET_CODE (insn) == NOTE)
8748 insn = PREV_INSN (insn);
8750 /* Check for CALL_INSNs and millicode calls. */
8751 if (insn
8752 && ((GET_CODE (insn) == CALL_INSN
8753 && get_attr_type (insn) != TYPE_DYNCALL)
8754 || (GET_CODE (insn) == INSN
8755 && GET_CODE (PATTERN (insn)) != SEQUENCE
8756 && GET_CODE (PATTERN (insn)) != USE
8757 && GET_CODE (PATTERN (insn)) != CLOBBER
8758 && get_attr_type (insn) == TYPE_MILLI)))
8759 return 1;
8761 return 0;
8764 /* We use this hook to perform a PA specific optimization which is difficult
8765 to do in earlier passes.
8767 We want the delay slots of branches within jump tables to be filled.
8768 None of the compiler passes at the moment even has the notion that a
8769 PA jump table doesn't contain addresses, but instead contains actual
8770 instructions!
8772 Because we actually jump into the table, the addresses of each entry
8773 must stay constant in relation to the beginning of the table (which
8774 itself must stay constant relative to the instruction to jump into
8775 it). I don't believe we can guarantee earlier passes of the compiler
8776 will adhere to those rules.
8778 So, late in the compilation process we find all the jump tables, and
8779 expand them into real code -- e.g. each entry in the jump table vector
8780 will get an appropriate label followed by a jump to the final target.
8782 Reorg and the final jump pass can then optimize these branches and
8783 fill their delay slots. We end up with smaller, more efficient code.
8785 The jump instructions within the table are special; we must be able
8786 to identify them during assembly output (if the jumps don't get filled
8787 we need to emit a nop rather than nullifying the delay slot)). We
8788 identify jumps in switch tables by using insns with the attribute
8789 type TYPE_BTABLE_BRANCH.
8791 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8792 insns. This serves two purposes, first it prevents jump.c from
8793 noticing that the last N entries in the table jump to the instruction
8794 immediately after the table and deleting the jumps. Second, those
8795 insns mark where we should emit .begin_brtab and .end_brtab directives
8796 when using GAS (allows for better link time optimizations). */
8798 static void
8799 pa_reorg (void)
8801 rtx insn;
8803 remove_useless_addtr_insns (1);
8805 if (pa_cpu < PROCESSOR_8000)
8806 pa_combine_instructions ();
8809 /* This is fairly cheap, so always run it if optimizing. */
8810 if (optimize > 0 && !TARGET_BIG_SWITCH)
8812 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8813 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8815 rtx pattern, tmp, location, label;
8816 unsigned int length, i;
8818 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8819 if (GET_CODE (insn) != JUMP_INSN
8820 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8821 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8822 continue;
8824 /* Emit marker for the beginning of the branch table. */
8825 emit_insn_before (gen_begin_brtab (), insn);
8827 pattern = PATTERN (insn);
8828 location = PREV_INSN (insn);
8829 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8831 for (i = 0; i < length; i++)
8833 /* Emit a label before each jump to keep jump.c from
8834 removing this code. */
8835 tmp = gen_label_rtx ();
8836 LABEL_NUSES (tmp) = 1;
8837 emit_label_after (tmp, location);
8838 location = NEXT_INSN (location);
8840 if (GET_CODE (pattern) == ADDR_VEC)
8841 label = XEXP (XVECEXP (pattern, 0, i), 0);
8842 else
8843 label = XEXP (XVECEXP (pattern, 1, i), 0);
8845 tmp = gen_short_jump (label);
8847 /* Emit the jump itself. */
8848 tmp = emit_jump_insn_after (tmp, location);
8849 JUMP_LABEL (tmp) = label;
8850 LABEL_NUSES (label)++;
8851 location = NEXT_INSN (location);
8853 /* Emit a BARRIER after the jump. */
8854 emit_barrier_after (location);
8855 location = NEXT_INSN (location);
8858 /* Emit marker for the end of the branch table. */
8859 emit_insn_before (gen_end_brtab (), location);
8860 location = NEXT_INSN (location);
8861 emit_barrier_after (location);
8863 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8864 delete_insn (insn);
8867 else
8869 /* Still need brtab marker insns. FIXME: the presence of these
8870 markers disables output of the branch table to readonly memory,
8871 and any alignment directives that might be needed. Possibly,
8872 the begin_brtab insn should be output before the label for the
8873 table. This doesn't matter at the moment since the tables are
8874 always output in the text section. */
8875 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8877 /* Find an ADDR_VEC insn. */
8878 if (GET_CODE (insn) != JUMP_INSN
8879 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8880 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8881 continue;
8883 /* Now generate markers for the beginning and end of the
8884 branch table. */
8885 emit_insn_before (gen_begin_brtab (), insn);
8886 emit_insn_after (gen_end_brtab (), insn);
8891 /* The PA has a number of odd instructions which can perform multiple
8892 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8893 it may be profitable to combine two instructions into one instruction
8894 with two outputs. It's not profitable PA2.0 machines because the
8895 two outputs would take two slots in the reorder buffers.
8897 This routine finds instructions which can be combined and combines
8898 them. We only support some of the potential combinations, and we
8899 only try common ways to find suitable instructions.
8901 * addb can add two registers or a register and a small integer
8902 and jump to a nearby (+-8k) location. Normally the jump to the
8903 nearby location is conditional on the result of the add, but by
8904 using the "true" condition we can make the jump unconditional.
8905 Thus addb can perform two independent operations in one insn.
8907 * movb is similar to addb in that it can perform a reg->reg
8908 or small immediate->reg copy and jump to a nearby (+-8k location).
8910 * fmpyadd and fmpysub can perform a FP multiply and either an
8911 FP add or FP sub if the operands of the multiply and add/sub are
8912 independent (there are other minor restrictions). Note both
8913 the fmpy and fadd/fsub can in theory move to better spots according
8914 to data dependencies, but for now we require the fmpy stay at a
8915 fixed location.
8917 * Many of the memory operations can perform pre & post updates
8918 of index registers. GCC's pre/post increment/decrement addressing
8919 is far too simple to take advantage of all the possibilities. This
8920 pass may not be suitable since those insns may not be independent.
8922 * comclr can compare two ints or an int and a register, nullify
8923 the following instruction and zero some other register. This
8924 is more difficult to use as it's harder to find an insn which
8925 will generate a comclr than finding something like an unconditional
8926 branch. (conditional moves & long branches create comclr insns).
8928 * Most arithmetic operations can conditionally skip the next
8929 instruction. They can be viewed as "perform this operation
8930 and conditionally jump to this nearby location" (where nearby
8931 is an insns away). These are difficult to use due to the
8932 branch length restrictions. */
8934 static void
8935 pa_combine_instructions (void)
8937 rtx anchor, new;
8939 /* This can get expensive since the basic algorithm is on the
8940 order of O(n^2) (or worse). Only do it for -O2 or higher
8941 levels of optimization. */
8942 if (optimize < 2)
8943 return;
8945 /* Walk down the list of insns looking for "anchor" insns which
8946 may be combined with "floating" insns. As the name implies,
8947 "anchor" instructions don't move, while "floating" insns may
8948 move around. */
8949 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8950 new = make_insn_raw (new);
8952 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8954 enum attr_pa_combine_type anchor_attr;
8955 enum attr_pa_combine_type floater_attr;
8957 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8958 Also ignore any special USE insns. */
8959 if ((GET_CODE (anchor) != INSN
8960 && GET_CODE (anchor) != JUMP_INSN
8961 && GET_CODE (anchor) != CALL_INSN)
8962 || GET_CODE (PATTERN (anchor)) == USE
8963 || GET_CODE (PATTERN (anchor)) == CLOBBER
8964 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8965 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8966 continue;
8968 anchor_attr = get_attr_pa_combine_type (anchor);
8969 /* See if anchor is an insn suitable for combination. */
8970 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8971 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8972 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8973 && ! forward_branch_p (anchor)))
8975 rtx floater;
8977 for (floater = PREV_INSN (anchor);
8978 floater;
8979 floater = PREV_INSN (floater))
8981 if (GET_CODE (floater) == NOTE
8982 || (GET_CODE (floater) == INSN
8983 && (GET_CODE (PATTERN (floater)) == USE
8984 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8985 continue;
8987 /* Anything except a regular INSN will stop our search. */
8988 if (GET_CODE (floater) != INSN
8989 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8990 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8992 floater = NULL_RTX;
8993 break;
8996 /* See if FLOATER is suitable for combination with the
8997 anchor. */
8998 floater_attr = get_attr_pa_combine_type (floater);
8999 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9000 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9001 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9002 && floater_attr == PA_COMBINE_TYPE_FMPY))
9004 /* If ANCHOR and FLOATER can be combined, then we're
9005 done with this pass. */
9006 if (pa_can_combine_p (new, anchor, floater, 0,
9007 SET_DEST (PATTERN (floater)),
9008 XEXP (SET_SRC (PATTERN (floater)), 0),
9009 XEXP (SET_SRC (PATTERN (floater)), 1)))
9010 break;
9013 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9014 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9016 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9018 if (pa_can_combine_p (new, anchor, floater, 0,
9019 SET_DEST (PATTERN (floater)),
9020 XEXP (SET_SRC (PATTERN (floater)), 0),
9021 XEXP (SET_SRC (PATTERN (floater)), 1)))
9022 break;
9024 else
9026 if (pa_can_combine_p (new, anchor, floater, 0,
9027 SET_DEST (PATTERN (floater)),
9028 SET_SRC (PATTERN (floater)),
9029 SET_SRC (PATTERN (floater))))
9030 break;
9035 /* If we didn't find anything on the backwards scan try forwards. */
9036 if (!floater
9037 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9038 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9040 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9042 if (GET_CODE (floater) == NOTE
9043 || (GET_CODE (floater) == INSN
9044 && (GET_CODE (PATTERN (floater)) == USE
9045 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9047 continue;
9049 /* Anything except a regular INSN will stop our search. */
9050 if (GET_CODE (floater) != INSN
9051 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9052 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9054 floater = NULL_RTX;
9055 break;
9058 /* See if FLOATER is suitable for combination with the
9059 anchor. */
9060 floater_attr = get_attr_pa_combine_type (floater);
9061 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9062 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9063 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9064 && floater_attr == PA_COMBINE_TYPE_FMPY))
9066 /* If ANCHOR and FLOATER can be combined, then we're
9067 done with this pass. */
9068 if (pa_can_combine_p (new, anchor, floater, 1,
9069 SET_DEST (PATTERN (floater)),
9070 XEXP (SET_SRC (PATTERN (floater)),
9072 XEXP (SET_SRC (PATTERN (floater)),
9073 1)))
9074 break;
9079 /* FLOATER will be nonzero if we found a suitable floating
9080 insn for combination with ANCHOR. */
9081 if (floater
9082 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9083 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9085 /* Emit the new instruction and delete the old anchor. */
9086 emit_insn_before (gen_rtx_PARALLEL
9087 (VOIDmode,
9088 gen_rtvec (2, PATTERN (anchor),
9089 PATTERN (floater))),
9090 anchor);
9092 SET_INSN_DELETED (anchor);
9094 /* Emit a special USE insn for FLOATER, then delete
9095 the floating insn. */
9096 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9097 delete_insn (floater);
9099 continue;
9101 else if (floater
9102 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9104 rtx temp;
9105 /* Emit the new_jump instruction and delete the old anchor. */
9106 temp
9107 = emit_jump_insn_before (gen_rtx_PARALLEL
9108 (VOIDmode,
9109 gen_rtvec (2, PATTERN (anchor),
9110 PATTERN (floater))),
9111 anchor);
9113 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9114 SET_INSN_DELETED (anchor);
9116 /* Emit a special USE insn for FLOATER, then delete
9117 the floating insn. */
9118 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9119 delete_insn (floater);
9120 continue;
9126 static int
9127 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
9128 rtx src1, rtx src2)
9130 int insn_code_number;
9131 rtx start, end;
9133 /* Create a PARALLEL with the patterns of ANCHOR and
9134 FLOATER, try to recognize it, then test constraints
9135 for the resulting pattern.
9137 If the pattern doesn't match or the constraints
9138 aren't met keep searching for a suitable floater
9139 insn. */
9140 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
9141 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
9142 INSN_CODE (new) = -1;
9143 insn_code_number = recog_memoized (new);
9144 if (insn_code_number < 0
9145 || (extract_insn (new), ! constrain_operands (1)))
9146 return 0;
9148 if (reversed)
9150 start = anchor;
9151 end = floater;
9153 else
9155 start = floater;
9156 end = anchor;
9159 /* There's up to three operands to consider. One
9160 output and two inputs.
9162 The output must not be used between FLOATER & ANCHOR
9163 exclusive. The inputs must not be set between
9164 FLOATER and ANCHOR exclusive. */
9166 if (reg_used_between_p (dest, start, end))
9167 return 0;
9169 if (reg_set_between_p (src1, start, end))
9170 return 0;
9172 if (reg_set_between_p (src2, start, end))
9173 return 0;
9175 /* If we get here, then everything is good. */
9176 return 1;
9179 /* Return nonzero if references for INSN are delayed.
9181 Millicode insns are actually function calls with some special
9182 constraints on arguments and register usage.
9184 Millicode calls always expect their arguments in the integer argument
9185 registers, and always return their result in %r29 (ret1). They
9186 are expected to clobber their arguments, %r1, %r29, and the return
9187 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9189 This function tells reorg that the references to arguments and
9190 millicode calls do not appear to happen until after the millicode call.
9191 This allows reorg to put insns which set the argument registers into the
9192 delay slot of the millicode call -- thus they act more like traditional
9193 CALL_INSNs.
9195 Note we cannot consider side effects of the insn to be delayed because
9196 the branch and link insn will clobber the return pointer. If we happened
9197 to use the return pointer in the delay slot of the call, then we lose.
9199 get_attr_type will try to recognize the given insn, so make sure to
9200 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9201 in particular. */
9203 insn_refs_are_delayed (rtx insn)
9205 return ((GET_CODE (insn) == INSN
9206 && GET_CODE (PATTERN (insn)) != SEQUENCE
9207 && GET_CODE (PATTERN (insn)) != USE
9208 && GET_CODE (PATTERN (insn)) != CLOBBER
9209 && get_attr_type (insn) == TYPE_MILLI));
9212 /* On the HP-PA the value is found in register(s) 28(-29), unless
9213 the mode is SF or DF. Then the value is returned in fr4 (32).
9215 This must perform the same promotions as PROMOTE_MODE, else
9216 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
9218 Small structures must be returned in a PARALLEL on PA64 in order
9219 to match the HP Compiler ABI. */
9222 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
9224 enum machine_mode valmode;
9226 if (AGGREGATE_TYPE_P (valtype)
9227 || TREE_CODE (valtype) == COMPLEX_TYPE
9228 || TREE_CODE (valtype) == VECTOR_TYPE)
9230 if (TARGET_64BIT)
9232 /* Aggregates with a size less than or equal to 128 bits are
9233 returned in GR 28(-29). They are left justified. The pad
9234 bits are undefined. Larger aggregates are returned in
9235 memory. */
9236 rtx loc[2];
9237 int i, offset = 0;
9238 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9240 for (i = 0; i < ub; i++)
9242 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9243 gen_rtx_REG (DImode, 28 + i),
9244 GEN_INT (offset));
9245 offset += 8;
9248 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9250 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9252 /* Aggregates 5 to 8 bytes in size are returned in general
9253 registers r28-r29 in the same manner as other non
9254 floating-point objects. The data is right-justified and
9255 zero-extended to 64 bits. This is opposite to the normal
9256 justification used on big endian targets and requires
9257 special treatment. */
9258 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9259 gen_rtx_REG (DImode, 28), const0_rtx);
9260 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9264 if ((INTEGRAL_TYPE_P (valtype)
9265 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9266 || POINTER_TYPE_P (valtype))
9267 valmode = word_mode;
9268 else
9269 valmode = TYPE_MODE (valtype);
9271 if (TREE_CODE (valtype) == REAL_TYPE
9272 && !AGGREGATE_TYPE_P (valtype)
9273 && TYPE_MODE (valtype) != TFmode
9274 && !TARGET_SOFT_FLOAT)
9275 return gen_rtx_REG (valmode, 32);
9277 return gen_rtx_REG (valmode, 28);
9280 /* Return the location of a parameter that is passed in a register or NULL
9281 if the parameter has any component that is passed in memory.
9283 This is new code and will be pushed to into the net sources after
9284 further testing.
9286 ??? We might want to restructure this so that it looks more like other
9287 ports. */
9289 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9290 int named ATTRIBUTE_UNUSED)
9292 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9293 int alignment = 0;
9294 int arg_size;
9295 int fpr_reg_base;
9296 int gpr_reg_base;
9297 rtx retval;
9299 if (mode == VOIDmode)
9300 return NULL_RTX;
9302 arg_size = FUNCTION_ARG_SIZE (mode, type);
9304 /* If this arg would be passed partially or totally on the stack, then
9305 this routine should return zero. pa_arg_partial_bytes will
9306 handle arguments which are split between regs and stack slots if
9307 the ABI mandates split arguments. */
9308 if (!TARGET_64BIT)
9310 /* The 32-bit ABI does not split arguments. */
9311 if (cum->words + arg_size > max_arg_words)
9312 return NULL_RTX;
9314 else
9316 if (arg_size > 1)
9317 alignment = cum->words & 1;
9318 if (cum->words + alignment >= max_arg_words)
9319 return NULL_RTX;
9322 /* The 32bit ABIs and the 64bit ABIs are rather different,
9323 particularly in their handling of FP registers. We might
9324 be able to cleverly share code between them, but I'm not
9325 going to bother in the hope that splitting them up results
9326 in code that is more easily understood. */
9328 if (TARGET_64BIT)
9330 /* Advance the base registers to their current locations.
9332 Remember, gprs grow towards smaller register numbers while
9333 fprs grow to higher register numbers. Also remember that
9334 although FP regs are 32-bit addressable, we pretend that
9335 the registers are 64-bits wide. */
9336 gpr_reg_base = 26 - cum->words;
9337 fpr_reg_base = 32 + cum->words;
9339 /* Arguments wider than one word and small aggregates need special
9340 treatment. */
9341 if (arg_size > 1
9342 || mode == BLKmode
9343 || (type && (AGGREGATE_TYPE_P (type)
9344 || TREE_CODE (type) == COMPLEX_TYPE
9345 || TREE_CODE (type) == VECTOR_TYPE)))
9347 /* Double-extended precision (80-bit), quad-precision (128-bit)
9348 and aggregates including complex numbers are aligned on
9349 128-bit boundaries. The first eight 64-bit argument slots
9350 are associated one-to-one, with general registers r26
9351 through r19, and also with floating-point registers fr4
9352 through fr11. Arguments larger than one word are always
9353 passed in general registers.
9355 Using a PARALLEL with a word mode register results in left
9356 justified data on a big-endian target. */
9358 rtx loc[8];
9359 int i, offset = 0, ub = arg_size;
9361 /* Align the base register. */
9362 gpr_reg_base -= alignment;
9364 ub = MIN (ub, max_arg_words - cum->words - alignment);
9365 for (i = 0; i < ub; i++)
9367 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9368 gen_rtx_REG (DImode, gpr_reg_base),
9369 GEN_INT (offset));
9370 gpr_reg_base -= 1;
9371 offset += 8;
9374 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9377 else
9379 /* If the argument is larger than a word, then we know precisely
9380 which registers we must use. */
9381 if (arg_size > 1)
9383 if (cum->words)
9385 gpr_reg_base = 23;
9386 fpr_reg_base = 38;
9388 else
9390 gpr_reg_base = 25;
9391 fpr_reg_base = 34;
9394 /* Structures 5 to 8 bytes in size are passed in the general
9395 registers in the same manner as other non floating-point
9396 objects. The data is right-justified and zero-extended
9397 to 64 bits. This is opposite to the normal justification
9398 used on big endian targets and requires special treatment.
9399 We now define BLOCK_REG_PADDING to pad these objects.
9400 Aggregates, complex and vector types are passed in the same
9401 manner as structures. */
9402 if (mode == BLKmode
9403 || (type && (AGGREGATE_TYPE_P (type)
9404 || TREE_CODE (type) == COMPLEX_TYPE
9405 || TREE_CODE (type) == VECTOR_TYPE)))
9407 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9408 gen_rtx_REG (DImode, gpr_reg_base),
9409 const0_rtx);
9410 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9413 else
9415 /* We have a single word (32 bits). A simple computation
9416 will get us the register #s we need. */
9417 gpr_reg_base = 26 - cum->words;
9418 fpr_reg_base = 32 + 2 * cum->words;
9422 /* Determine if the argument needs to be passed in both general and
9423 floating point registers. */
9424 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9425 /* If we are doing soft-float with portable runtime, then there
9426 is no need to worry about FP regs. */
9427 && !TARGET_SOFT_FLOAT
9428 /* The parameter must be some kind of scalar float, else we just
9429 pass it in integer registers. */
9430 && GET_MODE_CLASS (mode) == MODE_FLOAT
9431 /* The target function must not have a prototype. */
9432 && cum->nargs_prototype <= 0
9433 /* libcalls do not need to pass items in both FP and general
9434 registers. */
9435 && type != NULL_TREE
9436 /* All this hair applies to "outgoing" args only. This includes
9437 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9438 && !cum->incoming)
9439 /* Also pass outgoing floating arguments in both registers in indirect
9440 calls with the 32 bit ABI and the HP assembler since there is no
9441 way to the specify argument locations in static functions. */
9442 || (!TARGET_64BIT
9443 && !TARGET_GAS
9444 && !cum->incoming
9445 && cum->indirect
9446 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9448 retval
9449 = gen_rtx_PARALLEL
9450 (mode,
9451 gen_rtvec (2,
9452 gen_rtx_EXPR_LIST (VOIDmode,
9453 gen_rtx_REG (mode, fpr_reg_base),
9454 const0_rtx),
9455 gen_rtx_EXPR_LIST (VOIDmode,
9456 gen_rtx_REG (mode, gpr_reg_base),
9457 const0_rtx)));
9459 else
9461 /* See if we should pass this parameter in a general register. */
9462 if (TARGET_SOFT_FLOAT
9463 /* Indirect calls in the normal 32bit ABI require all arguments
9464 to be passed in general registers. */
9465 || (!TARGET_PORTABLE_RUNTIME
9466 && !TARGET_64BIT
9467 && !TARGET_ELF32
9468 && cum->indirect)
9469 /* If the parameter is not a scalar floating-point parameter,
9470 then it belongs in GPRs. */
9471 || GET_MODE_CLASS (mode) != MODE_FLOAT
9472 /* Structure with single SFmode field belongs in GPR. */
9473 || (type && AGGREGATE_TYPE_P (type)))
9474 retval = gen_rtx_REG (mode, gpr_reg_base);
9475 else
9476 retval = gen_rtx_REG (mode, fpr_reg_base);
9478 return retval;
9482 /* If this arg would be passed totally in registers or totally on the stack,
9483 then this routine should return zero. */
9485 static int
9486 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9487 tree type, bool named ATTRIBUTE_UNUSED)
9489 unsigned int max_arg_words = 8;
9490 unsigned int offset = 0;
9492 if (!TARGET_64BIT)
9493 return 0;
9495 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9496 offset = 1;
9498 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9499 /* Arg fits fully into registers. */
9500 return 0;
9501 else if (cum->words + offset >= max_arg_words)
9502 /* Arg fully on the stack. */
9503 return 0;
9504 else
9505 /* Arg is split. */
9506 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9510 /* A get_unnamed_section callback for switching to the text section.
9512 This function is only used with SOM. Because we don't support
9513 named subspaces, we can only create a new subspace or switch back
9514 to the default text subspace. */
9516 static void
9517 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9519 gcc_assert (TARGET_SOM);
9520 if (TARGET_GAS)
9522 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9524 /* We only want to emit a .nsubspa directive once at the
9525 start of the function. */
9526 cfun->machine->in_nsubspa = 1;
9528 /* Create a new subspace for the text. This provides
9529 better stub placement and one-only functions. */
9530 if (cfun->decl
9531 && DECL_ONE_ONLY (cfun->decl)
9532 && !DECL_WEAK (cfun->decl))
9534 output_section_asm_op ("\t.SPACE $TEXT$\n"
9535 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9536 "ACCESS=44,SORT=24,COMDAT");
9537 return;
9540 else
9542 /* There isn't a current function or the body of the current
9543 function has been completed. So, we are changing to the
9544 text section to output debugging information. Thus, we
9545 need to forget that we are in the text section so that
9546 varasm.c will call us when text_section is selected again. */
9547 gcc_assert (!cfun || !cfun->machine
9548 || cfun->machine->in_nsubspa == 2);
9549 in_section = NULL;
9551 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9552 return;
9554 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9557 /* A get_unnamed_section callback for switching to comdat data
9558 sections. This function is only used with SOM. */
9560 static void
9561 som_output_comdat_data_section_asm_op (const void *data)
9563 in_section = NULL;
9564 output_section_asm_op (data);
9567 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9569 static void
9570 pa_som_asm_init_sections (void)
9572 text_section
9573 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9575 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9576 is not being generated. */
9577 som_readonly_data_section
9578 = get_unnamed_section (0, output_section_asm_op,
9579 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9581 /* When secondary definitions are not supported, SOM makes readonly
9582 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9583 the comdat flag. */
9584 som_one_only_readonly_data_section
9585 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9586 "\t.SPACE $TEXT$\n"
9587 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9588 "ACCESS=0x2c,SORT=16,COMDAT");
9591 /* When secondary definitions are not supported, SOM makes data one-only
9592 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9593 som_one_only_data_section
9594 = get_unnamed_section (SECTION_WRITE,
9595 som_output_comdat_data_section_asm_op,
9596 "\t.SPACE $PRIVATE$\n"
9597 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9598 "ACCESS=31,SORT=24,COMDAT");
9600 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9601 which reference data within the $TEXT$ space (for example constant
9602 strings in the $LIT$ subspace).
9604 The assemblers (GAS and HP as) both have problems with handling
9605 the difference of two symbols which is the other correct way to
9606 reference constant data during PIC code generation.
9608 So, there's no way to reference constant data which is in the
9609 $TEXT$ space during PIC generation. Instead place all constant
9610 data into the $PRIVATE$ subspace (this reduces sharing, but it
9611 works correctly). */
9612 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9614 /* We must not have a reference to an external symbol defined in a
9615 shared library in a readonly section, else the SOM linker will
9616 complain.
9618 So, we force exception information into the data section. */
9619 exception_section = data_section;
9622 /* On hpux10, the linker will give an error if we have a reference
9623 in the read-only data section to a symbol defined in a shared
9624 library. Therefore, expressions that might require a reloc can
9625 not be placed in the read-only data section. */
9627 static section *
9628 pa_select_section (tree exp, int reloc,
9629 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9631 if (TREE_CODE (exp) == VAR_DECL
9632 && TREE_READONLY (exp)
9633 && !TREE_THIS_VOLATILE (exp)
9634 && DECL_INITIAL (exp)
9635 && (DECL_INITIAL (exp) == error_mark_node
9636 || TREE_CONSTANT (DECL_INITIAL (exp)))
9637 && !reloc)
9639 if (TARGET_SOM
9640 && DECL_ONE_ONLY (exp)
9641 && !DECL_WEAK (exp))
9642 return som_one_only_readonly_data_section;
9643 else
9644 return readonly_data_section;
9646 else if (CONSTANT_CLASS_P (exp) && !reloc)
9647 return readonly_data_section;
9648 else if (TARGET_SOM
9649 && TREE_CODE (exp) == VAR_DECL
9650 && DECL_ONE_ONLY (exp)
9651 && !DECL_WEAK (exp))
9652 return som_one_only_data_section;
9653 else
9654 return data_section;
9657 static void
9658 pa_globalize_label (FILE *stream, const char *name)
9660 /* We only handle DATA objects here, functions are globalized in
9661 ASM_DECLARE_FUNCTION_NAME. */
9662 if (! FUNCTION_NAME_P (name))
9664 fputs ("\t.EXPORT ", stream);
9665 assemble_name (stream, name);
9666 fputs (",DATA\n", stream);
9670 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9672 static rtx
9673 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9674 int incoming ATTRIBUTE_UNUSED)
9676 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9679 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9681 bool
9682 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9684 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9685 PA64 ABI says that objects larger than 128 bits are returned in memory.
9686 Note, int_size_in_bytes can return -1 if the size of the object is
9687 variable or larger than the maximum value that can be expressed as
9688 a HOST_WIDE_INT. It can also return zero for an empty type. The
9689 simplest way to handle variable and empty types is to pass them in
9690 memory. This avoids problems in defining the boundaries of argument
9691 slots, allocating registers, etc. */
9692 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9693 || int_size_in_bytes (type) <= 0);
9696 /* Structure to hold declaration and name of external symbols that are
9697 emitted by GCC. We generate a vector of these symbols and output them
9698 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9699 This avoids putting out names that are never really used. */
9701 typedef struct extern_symbol GTY(())
9703 tree decl;
9704 const char *name;
9705 } extern_symbol;
9707 /* Define gc'd vector type for extern_symbol. */
9708 DEF_VEC_O(extern_symbol);
9709 DEF_VEC_ALLOC_O(extern_symbol,gc);
9711 /* Vector of extern_symbol pointers. */
9712 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9714 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9715 /* Mark DECL (name NAME) as an external reference (assembler output
9716 file FILE). This saves the names to output at the end of the file
9717 if actually referenced. */
9719 void
9720 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9722 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9724 gcc_assert (file == asm_out_file);
9725 p->decl = decl;
9726 p->name = name;
9729 /* Output text required at the end of an assembler file.
9730 This includes deferred plabels and .import directives for
9731 all external symbols that were actually referenced. */
9733 static void
9734 pa_hpux_file_end (void)
9736 unsigned int i;
9737 extern_symbol *p;
9739 if (!NO_DEFERRED_PROFILE_COUNTERS)
9740 output_deferred_profile_counters ();
9742 output_deferred_plabels ();
9744 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9746 tree decl = p->decl;
9748 if (!TREE_ASM_WRITTEN (decl)
9749 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9750 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9753 VEC_free (extern_symbol, gc, extern_symbols);
9755 #endif
9757 /* Return true if a change from mode FROM to mode TO for a register
9758 in register class CLASS is invalid. */
9760 bool
9761 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9762 enum reg_class class)
9764 if (from == to)
9765 return false;
9767 /* Reject changes to/from complex and vector modes. */
9768 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9769 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9770 return true;
9772 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9773 return false;
9775 /* There is no way to load QImode or HImode values directly from
9776 memory. SImode loads to the FP registers are not zero extended.
9777 On the 64-bit target, this conflicts with the definition of
9778 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9779 with different sizes in the floating-point registers. */
9780 if (MAYBE_FP_REG_CLASS_P (class))
9781 return true;
9783 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9784 in specific sets of registers. Thus, we cannot allow changing
9785 to a larger mode when it's larger than a word. */
9786 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9787 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9788 return true;
9790 return false;
9793 /* Returns TRUE if it is a good idea to tie two pseudo registers
9794 when one has mode MODE1 and one has mode MODE2.
9795 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9796 for any hard reg, then this must be FALSE for correct output.
9798 We should return FALSE for QImode and HImode because these modes
9799 are not ok in the floating-point registers. However, this prevents
9800 tieing these modes to SImode and DImode in the general registers.
9801 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9802 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9803 in the floating-point registers. */
9805 bool
9806 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9808 /* Don't tie modes in different classes. */
9809 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9810 return false;
9812 return true;
9815 #include "gt-pa.h"