2008-05-30 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / config / pa / pa.c
blob009f5faf10cadfe248d670ab5fe5d4d5710eb5c7
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "tree.h"
35 #include "output.h"
36 #include "except.h"
37 #include "expr.h"
38 #include "optabs.h"
39 #include "reload.h"
40 #include "integrate.h"
41 #include "function.h"
42 #include "toplev.h"
43 #include "ggc.h"
44 #include "recog.h"
45 #include "predict.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "df.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || (get_attr_type (in_insn) != TYPE_FPSTORE
62 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
63 || recog_memoized (out_insn) < 0)
64 return 0;
66 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
68 set = single_set (out_insn);
69 if (!set)
70 return 0;
72 other_mode = GET_MODE (SET_SRC (set));
74 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
78 #ifndef DO_FRAME_NOTES
79 #ifdef INCOMING_RETURN_ADDR_RTX
80 #define DO_FRAME_NOTES 1
81 #else
82 #define DO_FRAME_NOTES 0
83 #endif
84 #endif
86 static void copy_reg_pointer (rtx, rtx);
87 static void fix_range (const char *);
88 static bool pa_handle_option (size_t, const char *, int);
89 static int hppa_address_cost (rtx);
90 static bool hppa_rtx_costs (rtx, int, int, int *);
91 static inline rtx force_mode (enum machine_mode, rtx);
92 static void pa_reorg (void);
93 static void pa_combine_instructions (void);
94 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
95 static int forward_branch_p (rtx);
96 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
97 static int compute_movmem_length (rtx);
98 static int compute_clrmem_length (rtx);
99 static bool pa_assemble_integer (rtx, unsigned int, int);
100 static void remove_useless_addtr_insns (int);
101 static void store_reg (int, HOST_WIDE_INT, int);
102 static void store_reg_modify (int, int, HOST_WIDE_INT);
103 static void load_reg (int, HOST_WIDE_INT, int);
104 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
105 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
106 static void update_total_code_bytes (int);
107 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
108 static int pa_adjust_cost (rtx, rtx, rtx, int);
109 static int pa_adjust_priority (rtx, int);
110 static int pa_issue_rate (void);
111 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
112 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 ATTRIBUTE_UNUSED;
114 static void pa_encode_section_info (tree, rtx, int);
115 static const char *pa_strip_name_encoding (const char *);
116 static bool pa_function_ok_for_sibcall (tree, tree);
117 static void pa_globalize_label (FILE *, const char *)
118 ATTRIBUTE_UNUSED;
119 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
120 HOST_WIDE_INT, tree);
121 #if !defined(USE_COLLECT2)
122 static void pa_asm_out_constructor (rtx, int);
123 static void pa_asm_out_destructor (rtx, int);
124 #endif
125 static void pa_init_builtins (void);
126 static rtx hppa_builtin_saveregs (void);
127 static void hppa_va_start (tree, rtx);
128 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
129 static bool pa_scalar_mode_supported_p (enum machine_mode);
130 static bool pa_commutative_p (const_rtx x, int outer_code);
131 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
132 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
136 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
137 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
141 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
142 static void output_deferred_plabels (void);
143 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
144 #ifdef ASM_OUTPUT_EXTERNAL_REAL
145 static void pa_hpux_file_end (void);
146 #endif
147 #ifdef HPUX_LONG_DOUBLE_LIBRARY
148 static void pa_hpux_init_libfuncs (void);
149 #endif
150 static rtx pa_struct_value_rtx (tree, int);
151 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
152 const_tree, bool);
153 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
154 tree, bool);
155 static struct machine_function * pa_init_machine_status (void);
156 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
157 enum machine_mode,
158 secondary_reload_info *);
159 static void pa_extra_live_on_entry (bitmap);
161 /* The following extra sections are only used for SOM. */
162 static GTY(()) section *som_readonly_data_section;
163 static GTY(()) section *som_one_only_readonly_data_section;
164 static GTY(()) section *som_one_only_data_section;
166 /* Save the operands last given to a compare for use when we
167 generate a scc or bcc insn. */
168 rtx hppa_compare_op0, hppa_compare_op1;
169 enum cmp_type hppa_branch_type;
171 /* Which cpu we are scheduling for. */
172 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
174 /* The UNIX standard to use for predefines and linking. */
175 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
177 /* Counts for the number of callee-saved general and floating point
178 registers which were saved by the current function's prologue. */
179 static int gr_saved, fr_saved;
181 /* Boolean indicating whether the return pointer was saved by the
182 current function's prologue. */
183 static bool rp_saved;
185 static rtx find_addr_reg (rtx);
187 /* Keep track of the number of bytes we have output in the CODE subspace
188 during this compilation so we'll know when to emit inline long-calls. */
189 unsigned long total_code_bytes;
191 /* The last address of the previous function plus the number of bytes in
192 associated thunks that have been output. This is used to determine if
193 a thunk can use an IA-relative branch to reach its target function. */
194 static int last_address;
196 /* Variables to handle plabels that we discover are necessary at assembly
197 output time. They are output after the current function. */
198 struct deferred_plabel GTY(())
200 rtx internal_label;
201 rtx symbol;
203 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
204 deferred_plabels;
205 static size_t n_deferred_plabels = 0;
208 /* Initialize the GCC target structure. */
210 #undef TARGET_ASM_ALIGNED_HI_OP
211 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
212 #undef TARGET_ASM_ALIGNED_SI_OP
213 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
214 #undef TARGET_ASM_ALIGNED_DI_OP
215 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
216 #undef TARGET_ASM_UNALIGNED_HI_OP
217 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
218 #undef TARGET_ASM_UNALIGNED_SI_OP
219 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
220 #undef TARGET_ASM_UNALIGNED_DI_OP
221 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
222 #undef TARGET_ASM_INTEGER
223 #define TARGET_ASM_INTEGER pa_assemble_integer
225 #undef TARGET_ASM_FUNCTION_PROLOGUE
226 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
230 #undef TARGET_SCHED_ADJUST_COST
231 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
232 #undef TARGET_SCHED_ADJUST_PRIORITY
233 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
234 #undef TARGET_SCHED_ISSUE_RATE
235 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
237 #undef TARGET_ENCODE_SECTION_INFO
238 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
242 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
243 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
245 #undef TARGET_COMMUTATIVE_P
246 #define TARGET_COMMUTATIVE_P pa_commutative_p
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
253 #undef TARGET_ASM_FILE_END
254 #ifdef ASM_OUTPUT_EXTERNAL_REAL
255 #define TARGET_ASM_FILE_END pa_hpux_file_end
256 #else
257 #define TARGET_ASM_FILE_END output_deferred_plabels
258 #endif
260 #if !defined(USE_COLLECT2)
261 #undef TARGET_ASM_CONSTRUCTOR
262 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
263 #undef TARGET_ASM_DESTRUCTOR
264 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
265 #endif
267 #undef TARGET_DEFAULT_TARGET_FLAGS
268 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
269 #undef TARGET_HANDLE_OPTION
270 #define TARGET_HANDLE_OPTION pa_handle_option
272 #undef TARGET_INIT_BUILTINS
273 #define TARGET_INIT_BUILTINS pa_init_builtins
275 #undef TARGET_RTX_COSTS
276 #define TARGET_RTX_COSTS hppa_rtx_costs
277 #undef TARGET_ADDRESS_COST
278 #define TARGET_ADDRESS_COST hppa_address_cost
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
283 #ifdef HPUX_LONG_DOUBLE_LIBRARY
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
286 #endif
288 #undef TARGET_PROMOTE_FUNCTION_RETURN
289 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
290 #undef TARGET_PROMOTE_PROTOTYPES
291 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
293 #undef TARGET_STRUCT_VALUE_RTX
294 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
295 #undef TARGET_RETURN_IN_MEMORY
296 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
297 #undef TARGET_MUST_PASS_IN_STACK
298 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
299 #undef TARGET_PASS_BY_REFERENCE
300 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
301 #undef TARGET_CALLEE_COPIES
302 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
303 #undef TARGET_ARG_PARTIAL_BYTES
304 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
306 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
307 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
308 #undef TARGET_EXPAND_BUILTIN_VA_START
309 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
310 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
311 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
313 #undef TARGET_SCALAR_MODE_SUPPORTED_P
314 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
316 #undef TARGET_CANNOT_FORCE_CONST_MEM
317 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
319 #undef TARGET_SECONDARY_RELOAD
320 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
322 #undef TARGET_EXTRA_LIVE_ON_ENTRY
323 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
325 struct gcc_target targetm = TARGET_INITIALIZER;
327 /* Parse the -mfixed-range= option string. */
329 static void
330 fix_range (const char *const_str)
332 int i, first, last;
333 char *str, *dash, *comma;
335 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
336 REG2 are either register names or register numbers. The effect
337 of this option is to mark the registers in the range from REG1 to
338 REG2 as ``fixed'' so they won't be used by the compiler. This is
339 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
341 i = strlen (const_str);
342 str = (char *) alloca (i + 1);
343 memcpy (str, const_str, i + 1);
345 while (1)
347 dash = strchr (str, '-');
348 if (!dash)
350 warning (0, "value of -mfixed-range must have form REG1-REG2");
351 return;
353 *dash = '\0';
355 comma = strchr (dash + 1, ',');
356 if (comma)
357 *comma = '\0';
359 first = decode_reg_name (str);
360 if (first < 0)
362 warning (0, "unknown register name: %s", str);
363 return;
366 last = decode_reg_name (dash + 1);
367 if (last < 0)
369 warning (0, "unknown register name: %s", dash + 1);
370 return;
373 *dash = '-';
375 if (first > last)
377 warning (0, "%s-%s is an empty range", str, dash + 1);
378 return;
381 for (i = first; i <= last; ++i)
382 fixed_regs[i] = call_used_regs[i] = 1;
384 if (!comma)
385 break;
387 *comma = ',';
388 str = comma + 1;
391 /* Check if all floating point registers have been fixed. */
392 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
393 if (!fixed_regs[i])
394 break;
396 if (i > FP_REG_LAST)
397 target_flags |= MASK_DISABLE_FPREGS;
400 /* Implement TARGET_HANDLE_OPTION. */
402 static bool
403 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
405 switch (code)
407 case OPT_mnosnake:
408 case OPT_mpa_risc_1_0:
409 case OPT_march_1_0:
410 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
411 return true;
413 case OPT_msnake:
414 case OPT_mpa_risc_1_1:
415 case OPT_march_1_1:
416 target_flags &= ~MASK_PA_20;
417 target_flags |= MASK_PA_11;
418 return true;
420 case OPT_mpa_risc_2_0:
421 case OPT_march_2_0:
422 target_flags |= MASK_PA_11 | MASK_PA_20;
423 return true;
425 case OPT_mschedule_:
426 if (strcmp (arg, "8000") == 0)
427 pa_cpu = PROCESSOR_8000;
428 else if (strcmp (arg, "7100") == 0)
429 pa_cpu = PROCESSOR_7100;
430 else if (strcmp (arg, "700") == 0)
431 pa_cpu = PROCESSOR_700;
432 else if (strcmp (arg, "7100LC") == 0)
433 pa_cpu = PROCESSOR_7100LC;
434 else if (strcmp (arg, "7200") == 0)
435 pa_cpu = PROCESSOR_7200;
436 else if (strcmp (arg, "7300") == 0)
437 pa_cpu = PROCESSOR_7300;
438 else
439 return false;
440 return true;
442 case OPT_mfixed_range_:
443 fix_range (arg);
444 return true;
446 #if TARGET_HPUX
447 case OPT_munix_93:
448 flag_pa_unix = 1993;
449 return true;
450 #endif
452 #if TARGET_HPUX_10_10
453 case OPT_munix_95:
454 flag_pa_unix = 1995;
455 return true;
456 #endif
458 #if TARGET_HPUX_11_11
459 case OPT_munix_98:
460 flag_pa_unix = 1998;
461 return true;
462 #endif
464 default:
465 return true;
469 void
470 override_options (void)
472 /* Unconditional branches in the delay slot are not compatible with dwarf2
473 call frame information. There is no benefit in using this optimization
474 on PA8000 and later processors. */
475 if (pa_cpu >= PROCESSOR_8000
476 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
477 || flag_unwind_tables)
478 target_flags &= ~MASK_JUMP_IN_DELAY;
480 if (flag_pic && TARGET_PORTABLE_RUNTIME)
482 warning (0, "PIC code generation is not supported in the portable runtime model");
485 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
487 warning (0, "PIC code generation is not compatible with fast indirect calls");
490 if (! TARGET_GAS && write_symbols != NO_DEBUG)
492 warning (0, "-g is only supported when using GAS on this processor,");
493 warning (0, "-g option disabled");
494 write_symbols = NO_DEBUG;
497 /* We only support the "big PIC" model now. And we always generate PIC
498 code when in 64bit mode. */
499 if (flag_pic == 1 || TARGET_64BIT)
500 flag_pic = 2;
502 /* We can't guarantee that .dword is available for 32-bit targets. */
503 if (UNITS_PER_WORD == 4)
504 targetm.asm_out.aligned_op.di = NULL;
506 /* The unaligned ops are only available when using GAS. */
507 if (!TARGET_GAS)
509 targetm.asm_out.unaligned_op.hi = NULL;
510 targetm.asm_out.unaligned_op.si = NULL;
511 targetm.asm_out.unaligned_op.di = NULL;
514 init_machine_status = pa_init_machine_status;
517 static void
518 pa_init_builtins (void)
520 #ifdef DONT_HAVE_FPUTC_UNLOCKED
521 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
522 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
523 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
524 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
525 #endif
526 #if TARGET_HPUX_11
527 if (built_in_decls [BUILT_IN_FINITE])
528 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
529 if (built_in_decls [BUILT_IN_FINITEF])
530 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
531 #endif
534 /* Function to init struct machine_function.
535 This will be called, via a pointer variable,
536 from push_function_context. */
538 static struct machine_function *
539 pa_init_machine_status (void)
541 return ggc_alloc_cleared (sizeof (machine_function));
544 /* If FROM is a probable pointer register, mark TO as a probable
545 pointer register with the same pointer alignment as FROM. */
547 static void
548 copy_reg_pointer (rtx to, rtx from)
550 if (REG_POINTER (from))
551 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
554 /* Return 1 if X contains a symbolic expression. We know these
555 expressions will have one of a few well defined forms, so
556 we need only check those forms. */
558 symbolic_expression_p (rtx x)
561 /* Strip off any HIGH. */
562 if (GET_CODE (x) == HIGH)
563 x = XEXP (x, 0);
565 return (symbolic_operand (x, VOIDmode));
568 /* Accept any constant that can be moved in one instruction into a
569 general register. */
571 cint_ok_for_move (HOST_WIDE_INT ival)
573 /* OK if ldo, ldil, or zdepi, can be used. */
574 return (VAL_14_BITS_P (ival)
575 || ldil_cint_p (ival)
576 || zdepi_cint_p (ival));
579 /* Return truth value of whether OP can be used as an operand in a
580 adddi3 insn. */
582 adddi3_operand (rtx op, enum machine_mode mode)
584 return (register_operand (op, mode)
585 || (GET_CODE (op) == CONST_INT
586 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
589 /* True iff the operand OP can be used as the destination operand of
590 an integer store. This also implies the operand could be used as
591 the source operand of an integer load. Symbolic, lo_sum and indexed
592 memory operands are not allowed. We accept reloading pseudos and
593 other memory operands. */
595 integer_store_memory_operand (rtx op, enum machine_mode mode)
597 return ((reload_in_progress
598 && REG_P (op)
599 && REGNO (op) >= FIRST_PSEUDO_REGISTER
600 && reg_renumber [REGNO (op)] < 0)
601 || (GET_CODE (op) == MEM
602 && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
603 && !symbolic_memory_operand (op, VOIDmode)
604 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
605 && !IS_INDEX_ADDR_P (XEXP (op, 0))));
608 /* True iff ldil can be used to load this CONST_INT. The least
609 significant 11 bits of the value must be zero and the value must
610 not change sign when extended from 32 to 64 bits. */
612 ldil_cint_p (HOST_WIDE_INT ival)
614 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
616 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
619 /* True iff zdepi can be used to generate this CONST_INT.
620 zdepi first sign extends a 5-bit signed number to a given field
621 length, then places this field anywhere in a zero. */
623 zdepi_cint_p (unsigned HOST_WIDE_INT x)
625 unsigned HOST_WIDE_INT lsb_mask, t;
627 /* This might not be obvious, but it's at least fast.
628 This function is critical; we don't have the time loops would take. */
629 lsb_mask = x & -x;
630 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
631 /* Return true iff t is a power of two. */
632 return ((t & (t - 1)) == 0);
635 /* True iff depi or extru can be used to compute (reg & mask).
636 Accept bit pattern like these:
637 0....01....1
638 1....10....0
639 1..10..01..1 */
641 and_mask_p (unsigned HOST_WIDE_INT mask)
643 mask = ~mask;
644 mask += mask & -mask;
645 return (mask & (mask - 1)) == 0;
648 /* True iff depi can be used to compute (reg | MASK). */
650 ior_mask_p (unsigned HOST_WIDE_INT mask)
652 mask += mask & -mask;
653 return (mask & (mask - 1)) == 0;
656 /* Legitimize PIC addresses. If the address is already
657 position-independent, we return ORIG. Newly generated
658 position-independent addresses go to REG. If we need more
659 than one register, we lose. */
662 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
664 rtx pic_ref = orig;
666 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
668 /* Labels need special handling. */
669 if (pic_label_operand (orig, mode))
671 rtx insn;
673 /* We do not want to go through the movXX expanders here since that
674 would create recursion.
676 Nor do we really want to call a generator for a named pattern
677 since that requires multiple patterns if we want to support
678 multiple word sizes.
680 So instead we just emit the raw set, which avoids the movXX
681 expanders completely. */
682 mark_reg_pointer (reg, BITS_PER_UNIT);
683 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
685 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
686 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
688 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
689 and update LABEL_NUSES because this is not done automatically. */
690 if (reload_in_progress || reload_completed)
692 /* Extract LABEL_REF. */
693 if (GET_CODE (orig) == CONST)
694 orig = XEXP (XEXP (orig, 0), 0);
695 /* Extract CODE_LABEL. */
696 orig = XEXP (orig, 0);
697 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL_OPERAND, orig,
698 REG_NOTES (insn));
699 LABEL_NUSES (orig)++;
701 crtl->uses_pic_offset_table = 1;
702 return reg;
704 if (GET_CODE (orig) == SYMBOL_REF)
706 rtx insn, tmp_reg;
708 gcc_assert (reg);
710 /* Before reload, allocate a temporary register for the intermediate
711 result. This allows the sequence to be deleted when the final
712 result is unused and the insns are trivially dead. */
713 tmp_reg = ((reload_in_progress || reload_completed)
714 ? reg : gen_reg_rtx (Pmode));
716 if (function_label_operand (orig, mode))
718 /* Force function label into memory. */
719 orig = XEXP (force_const_mem (mode, orig), 0);
720 /* Load plabel address from DLT. */
721 emit_move_insn (tmp_reg,
722 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
723 gen_rtx_HIGH (word_mode, orig)));
724 pic_ref
725 = gen_const_mem (Pmode,
726 gen_rtx_LO_SUM (Pmode, tmp_reg,
727 gen_rtx_UNSPEC (Pmode,
728 gen_rtvec (1, orig),
729 UNSPEC_DLTIND14R)));
730 emit_move_insn (reg, pic_ref);
731 /* Now load address of function descriptor. */
732 pic_ref = gen_rtx_MEM (Pmode, reg);
734 else
736 /* Load symbol reference from DLT. */
737 emit_move_insn (tmp_reg,
738 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
739 gen_rtx_HIGH (word_mode, orig)));
740 pic_ref
741 = gen_const_mem (Pmode,
742 gen_rtx_LO_SUM (Pmode, tmp_reg,
743 gen_rtx_UNSPEC (Pmode,
744 gen_rtvec (1, orig),
745 UNSPEC_DLTIND14R)));
748 crtl->uses_pic_offset_table = 1;
749 mark_reg_pointer (reg, BITS_PER_UNIT);
750 insn = emit_move_insn (reg, pic_ref);
752 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
753 set_unique_reg_note (insn, REG_EQUAL, orig);
755 return reg;
757 else if (GET_CODE (orig) == CONST)
759 rtx base;
761 if (GET_CODE (XEXP (orig, 0)) == PLUS
762 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
763 return orig;
765 gcc_assert (reg);
766 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
768 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
769 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
770 base == reg ? 0 : reg);
772 if (GET_CODE (orig) == CONST_INT)
774 if (INT_14_BITS (orig))
775 return plus_constant (base, INTVAL (orig));
776 orig = force_reg (Pmode, orig);
778 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
779 /* Likewise, should we set special REG_NOTEs here? */
782 return pic_ref;
785 static GTY(()) rtx gen_tls_tga;
787 static rtx
788 gen_tls_get_addr (void)
790 if (!gen_tls_tga)
791 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
792 return gen_tls_tga;
795 static rtx
796 hppa_tls_call (rtx arg)
798 rtx ret;
800 ret = gen_reg_rtx (Pmode);
801 emit_library_call_value (gen_tls_get_addr (), ret,
802 LCT_CONST, Pmode, 1, arg, Pmode);
804 return ret;
807 static rtx
808 legitimize_tls_address (rtx addr)
810 rtx ret, insn, tmp, t1, t2, tp;
811 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
813 switch (model)
815 case TLS_MODEL_GLOBAL_DYNAMIC:
816 tmp = gen_reg_rtx (Pmode);
817 if (flag_pic)
818 emit_insn (gen_tgd_load_pic (tmp, addr));
819 else
820 emit_insn (gen_tgd_load (tmp, addr));
821 ret = hppa_tls_call (tmp);
822 break;
824 case TLS_MODEL_LOCAL_DYNAMIC:
825 ret = gen_reg_rtx (Pmode);
826 tmp = gen_reg_rtx (Pmode);
827 start_sequence ();
828 if (flag_pic)
829 emit_insn (gen_tld_load_pic (tmp, addr));
830 else
831 emit_insn (gen_tld_load (tmp, addr));
832 t1 = hppa_tls_call (tmp);
833 insn = get_insns ();
834 end_sequence ();
835 t2 = gen_reg_rtx (Pmode);
836 emit_libcall_block (insn, t2, t1,
837 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
838 UNSPEC_TLSLDBASE));
839 emit_insn (gen_tld_offset_load (ret, addr, t2));
840 break;
842 case TLS_MODEL_INITIAL_EXEC:
843 tp = gen_reg_rtx (Pmode);
844 tmp = gen_reg_rtx (Pmode);
845 ret = gen_reg_rtx (Pmode);
846 emit_insn (gen_tp_load (tp));
847 if (flag_pic)
848 emit_insn (gen_tie_load_pic (tmp, addr));
849 else
850 emit_insn (gen_tie_load (tmp, addr));
851 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
852 break;
854 case TLS_MODEL_LOCAL_EXEC:
855 tp = gen_reg_rtx (Pmode);
856 ret = gen_reg_rtx (Pmode);
857 emit_insn (gen_tp_load (tp));
858 emit_insn (gen_tle_load (ret, addr, tp));
859 break;
861 default:
862 gcc_unreachable ();
865 return ret;
868 /* Try machine-dependent ways of modifying an illegitimate address
869 to be legitimate. If we find one, return the new, valid address.
870 This macro is used in only one place: `memory_address' in explow.c.
872 OLDX is the address as it was before break_out_memory_refs was called.
873 In some cases it is useful to look at this to decide what needs to be done.
875 MODE and WIN are passed so that this macro can use
876 GO_IF_LEGITIMATE_ADDRESS.
878 It is always safe for this macro to do nothing. It exists to recognize
879 opportunities to optimize the output.
881 For the PA, transform:
883 memory(X + <large int>)
885 into:
887 if (<large int> & mask) >= 16
888 Y = (<large int> & ~mask) + mask + 1 Round up.
889 else
890 Y = (<large int> & ~mask) Round down.
891 Z = X + Y
892 memory (Z + (<large int> - Y));
894 This is for CSE to find several similar references, and only use one Z.
896 X can either be a SYMBOL_REF or REG, but because combine cannot
897 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
898 D will not fit in 14 bits.
900 MODE_FLOAT references allow displacements which fit in 5 bits, so use
901 0x1f as the mask.
903 MODE_INT references allow displacements which fit in 14 bits, so use
904 0x3fff as the mask.
906 This relies on the fact that most mode MODE_FLOAT references will use FP
907 registers and most mode MODE_INT references will use integer registers.
908 (In the rare case of an FP register used in an integer MODE, we depend
909 on secondary reloads to clean things up.)
912 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
913 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
914 addressing modes to be used).
916 Put X and Z into registers. Then put the entire expression into
917 a register. */
920 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
921 enum machine_mode mode)
923 rtx orig = x;
925 /* We need to canonicalize the order of operands in unscaled indexed
926 addresses since the code that checks if an address is valid doesn't
927 always try both orders. */
928 if (!TARGET_NO_SPACE_REGS
929 && GET_CODE (x) == PLUS
930 && GET_MODE (x) == Pmode
931 && REG_P (XEXP (x, 0))
932 && REG_P (XEXP (x, 1))
933 && REG_POINTER (XEXP (x, 0))
934 && !REG_POINTER (XEXP (x, 1)))
935 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
937 if (PA_SYMBOL_REF_TLS_P (x))
938 return legitimize_tls_address (x);
939 else if (flag_pic)
940 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
942 /* Strip off CONST. */
943 if (GET_CODE (x) == CONST)
944 x = XEXP (x, 0);
946 /* Special case. Get the SYMBOL_REF into a register and use indexing.
947 That should always be safe. */
948 if (GET_CODE (x) == PLUS
949 && GET_CODE (XEXP (x, 0)) == REG
950 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
952 rtx reg = force_reg (Pmode, XEXP (x, 1));
953 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
956 /* Note we must reject symbols which represent function addresses
957 since the assembler/linker can't handle arithmetic on plabels. */
958 if (GET_CODE (x) == PLUS
959 && GET_CODE (XEXP (x, 1)) == CONST_INT
960 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
961 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
962 || GET_CODE (XEXP (x, 0)) == REG))
964 rtx int_part, ptr_reg;
965 int newoffset;
966 int offset = INTVAL (XEXP (x, 1));
967 int mask;
969 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
970 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
972 /* Choose which way to round the offset. Round up if we
973 are >= halfway to the next boundary. */
974 if ((offset & mask) >= ((mask + 1) / 2))
975 newoffset = (offset & ~ mask) + mask + 1;
976 else
977 newoffset = (offset & ~ mask);
979 /* If the newoffset will not fit in 14 bits (ldo), then
980 handling this would take 4 or 5 instructions (2 to load
981 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
982 add the new offset and the SYMBOL_REF.) Combine can
983 not handle 4->2 or 5->2 combinations, so do not create
984 them. */
985 if (! VAL_14_BITS_P (newoffset)
986 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
988 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
989 rtx tmp_reg
990 = force_reg (Pmode,
991 gen_rtx_HIGH (Pmode, const_part));
992 ptr_reg
993 = force_reg (Pmode,
994 gen_rtx_LO_SUM (Pmode,
995 tmp_reg, const_part));
997 else
999 if (! VAL_14_BITS_P (newoffset))
1000 int_part = force_reg (Pmode, GEN_INT (newoffset));
1001 else
1002 int_part = GEN_INT (newoffset);
1004 ptr_reg = force_reg (Pmode,
1005 gen_rtx_PLUS (Pmode,
1006 force_reg (Pmode, XEXP (x, 0)),
1007 int_part));
1009 return plus_constant (ptr_reg, offset - newoffset);
1012 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1014 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1015 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1016 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1017 && (OBJECT_P (XEXP (x, 1))
1018 || GET_CODE (XEXP (x, 1)) == SUBREG)
1019 && GET_CODE (XEXP (x, 1)) != CONST)
1021 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1022 rtx reg1, reg2;
1024 reg1 = XEXP (x, 1);
1025 if (GET_CODE (reg1) != REG)
1026 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1028 reg2 = XEXP (XEXP (x, 0), 0);
1029 if (GET_CODE (reg2) != REG)
1030 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1032 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1033 gen_rtx_MULT (Pmode,
1034 reg2,
1035 GEN_INT (val)),
1036 reg1));
1039 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1041 Only do so for floating point modes since this is more speculative
1042 and we lose if it's an integer store. */
1043 if (GET_CODE (x) == PLUS
1044 && GET_CODE (XEXP (x, 0)) == PLUS
1045 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1046 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1047 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1048 && (mode == SFmode || mode == DFmode))
1051 /* First, try and figure out what to use as a base register. */
1052 rtx reg1, reg2, base, idx, orig_base;
1054 reg1 = XEXP (XEXP (x, 0), 1);
1055 reg2 = XEXP (x, 1);
1056 base = NULL_RTX;
1057 idx = NULL_RTX;
1059 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1060 then emit_move_sequence will turn on REG_POINTER so we'll know
1061 it's a base register below. */
1062 if (GET_CODE (reg1) != REG)
1063 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1065 if (GET_CODE (reg2) != REG)
1066 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1068 /* Figure out what the base and index are. */
1070 if (GET_CODE (reg1) == REG
1071 && REG_POINTER (reg1))
1073 base = reg1;
1074 orig_base = XEXP (XEXP (x, 0), 1);
1075 idx = gen_rtx_PLUS (Pmode,
1076 gen_rtx_MULT (Pmode,
1077 XEXP (XEXP (XEXP (x, 0), 0), 0),
1078 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1079 XEXP (x, 1));
1081 else if (GET_CODE (reg2) == REG
1082 && REG_POINTER (reg2))
1084 base = reg2;
1085 orig_base = XEXP (x, 1);
1086 idx = XEXP (x, 0);
1089 if (base == 0)
1090 return orig;
1092 /* If the index adds a large constant, try to scale the
1093 constant so that it can be loaded with only one insn. */
1094 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1095 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1096 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1097 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1099 /* Divide the CONST_INT by the scale factor, then add it to A. */
1100 int val = INTVAL (XEXP (idx, 1));
1102 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1103 reg1 = XEXP (XEXP (idx, 0), 0);
1104 if (GET_CODE (reg1) != REG)
1105 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1107 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1109 /* We can now generate a simple scaled indexed address. */
1110 return
1111 force_reg
1112 (Pmode, gen_rtx_PLUS (Pmode,
1113 gen_rtx_MULT (Pmode, reg1,
1114 XEXP (XEXP (idx, 0), 1)),
1115 base));
1118 /* If B + C is still a valid base register, then add them. */
1119 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1120 && INTVAL (XEXP (idx, 1)) <= 4096
1121 && INTVAL (XEXP (idx, 1)) >= -4096)
1123 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1124 rtx reg1, reg2;
1126 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1128 reg2 = XEXP (XEXP (idx, 0), 0);
1129 if (GET_CODE (reg2) != CONST_INT)
1130 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1132 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1133 gen_rtx_MULT (Pmode,
1134 reg2,
1135 GEN_INT (val)),
1136 reg1));
1139 /* Get the index into a register, then add the base + index and
1140 return a register holding the result. */
1142 /* First get A into a register. */
1143 reg1 = XEXP (XEXP (idx, 0), 0);
1144 if (GET_CODE (reg1) != REG)
1145 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1147 /* And get B into a register. */
1148 reg2 = XEXP (idx, 1);
1149 if (GET_CODE (reg2) != REG)
1150 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1152 reg1 = force_reg (Pmode,
1153 gen_rtx_PLUS (Pmode,
1154 gen_rtx_MULT (Pmode, reg1,
1155 XEXP (XEXP (idx, 0), 1)),
1156 reg2));
1158 /* Add the result to our base register and return. */
1159 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1163 /* Uh-oh. We might have an address for x[n-100000]. This needs
1164 special handling to avoid creating an indexed memory address
1165 with x-100000 as the base.
1167 If the constant part is small enough, then it's still safe because
1168 there is a guard page at the beginning and end of the data segment.
1170 Scaled references are common enough that we want to try and rearrange the
1171 terms so that we can use indexing for these addresses too. Only
1172 do the optimization for floatint point modes. */
1174 if (GET_CODE (x) == PLUS
1175 && symbolic_expression_p (XEXP (x, 1)))
1177 /* Ugly. We modify things here so that the address offset specified
1178 by the index expression is computed first, then added to x to form
1179 the entire address. */
1181 rtx regx1, regx2, regy1, regy2, y;
1183 /* Strip off any CONST. */
1184 y = XEXP (x, 1);
1185 if (GET_CODE (y) == CONST)
1186 y = XEXP (y, 0);
1188 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1190 /* See if this looks like
1191 (plus (mult (reg) (shadd_const))
1192 (const (plus (symbol_ref) (const_int))))
1194 Where const_int is small. In that case the const
1195 expression is a valid pointer for indexing.
1197 If const_int is big, but can be divided evenly by shadd_const
1198 and added to (reg). This allows more scaled indexed addresses. */
1199 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1200 && GET_CODE (XEXP (x, 0)) == MULT
1201 && GET_CODE (XEXP (y, 1)) == CONST_INT
1202 && INTVAL (XEXP (y, 1)) >= -4096
1203 && INTVAL (XEXP (y, 1)) <= 4095
1204 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1205 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1207 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1208 rtx reg1, reg2;
1210 reg1 = XEXP (x, 1);
1211 if (GET_CODE (reg1) != REG)
1212 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1214 reg2 = XEXP (XEXP (x, 0), 0);
1215 if (GET_CODE (reg2) != REG)
1216 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1218 return force_reg (Pmode,
1219 gen_rtx_PLUS (Pmode,
1220 gen_rtx_MULT (Pmode,
1221 reg2,
1222 GEN_INT (val)),
1223 reg1));
1225 else if ((mode == DFmode || mode == SFmode)
1226 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1227 && GET_CODE (XEXP (x, 0)) == MULT
1228 && GET_CODE (XEXP (y, 1)) == CONST_INT
1229 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1230 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1231 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1233 regx1
1234 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1235 / INTVAL (XEXP (XEXP (x, 0), 1))));
1236 regx2 = XEXP (XEXP (x, 0), 0);
1237 if (GET_CODE (regx2) != REG)
1238 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1239 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1240 regx2, regx1));
1241 return
1242 force_reg (Pmode,
1243 gen_rtx_PLUS (Pmode,
1244 gen_rtx_MULT (Pmode, regx2,
1245 XEXP (XEXP (x, 0), 1)),
1246 force_reg (Pmode, XEXP (y, 0))));
1248 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1249 && INTVAL (XEXP (y, 1)) >= -4096
1250 && INTVAL (XEXP (y, 1)) <= 4095)
1252 /* This is safe because of the guard page at the
1253 beginning and end of the data space. Just
1254 return the original address. */
1255 return orig;
1257 else
1259 /* Doesn't look like one we can optimize. */
1260 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1261 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1262 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1263 regx1 = force_reg (Pmode,
1264 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1265 regx1, regy2));
1266 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1271 return orig;
1274 /* For the HPPA, REG and REG+CONST is cost 0
1275 and addresses involving symbolic constants are cost 2.
1277 PIC addresses are very expensive.
1279 It is no coincidence that this has the same structure
1280 as GO_IF_LEGITIMATE_ADDRESS. */
1282 static int
1283 hppa_address_cost (rtx X)
1285 switch (GET_CODE (X))
1287 case REG:
1288 case PLUS:
1289 case LO_SUM:
1290 return 1;
1291 case HIGH:
1292 return 2;
1293 default:
1294 return 4;
1298 /* Compute a (partial) cost for rtx X. Return true if the complete
1299 cost has been computed, and false if subexpressions should be
1300 scanned. In either case, *TOTAL contains the cost result. */
1302 static bool
1303 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1305 switch (code)
1307 case CONST_INT:
1308 if (INTVAL (x) == 0)
1309 *total = 0;
1310 else if (INT_14_BITS (x))
1311 *total = 1;
1312 else
1313 *total = 2;
1314 return true;
1316 case HIGH:
1317 *total = 2;
1318 return true;
1320 case CONST:
1321 case LABEL_REF:
1322 case SYMBOL_REF:
1323 *total = 4;
1324 return true;
1326 case CONST_DOUBLE:
1327 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1328 && outer_code != SET)
1329 *total = 0;
1330 else
1331 *total = 8;
1332 return true;
1334 case MULT:
1335 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1336 *total = COSTS_N_INSNS (3);
1337 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1338 *total = COSTS_N_INSNS (8);
1339 else
1340 *total = COSTS_N_INSNS (20);
1341 return true;
1343 case DIV:
1344 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1346 *total = COSTS_N_INSNS (14);
1347 return true;
1349 /* FALLTHRU */
1351 case UDIV:
1352 case MOD:
1353 case UMOD:
1354 *total = COSTS_N_INSNS (60);
1355 return true;
1357 case PLUS: /* this includes shNadd insns */
1358 case MINUS:
1359 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1360 *total = COSTS_N_INSNS (3);
1361 else
1362 *total = COSTS_N_INSNS (1);
1363 return true;
1365 case ASHIFT:
1366 case ASHIFTRT:
1367 case LSHIFTRT:
1368 *total = COSTS_N_INSNS (1);
1369 return true;
1371 default:
1372 return false;
1376 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1377 new rtx with the correct mode. */
1378 static inline rtx
1379 force_mode (enum machine_mode mode, rtx orig)
1381 if (mode == GET_MODE (orig))
1382 return orig;
1384 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1386 return gen_rtx_REG (mode, REGNO (orig));
1389 /* Return 1 if *X is a thread-local symbol. */
1391 static int
1392 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1394 return PA_SYMBOL_REF_TLS_P (*x);
1397 /* Return 1 if X contains a thread-local symbol. */
1399 bool
1400 pa_tls_referenced_p (rtx x)
1402 if (!TARGET_HAVE_TLS)
1403 return false;
1405 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1408 /* Emit insns to move operands[1] into operands[0].
1410 Return 1 if we have written out everything that needs to be done to
1411 do the move. Otherwise, return 0 and the caller will emit the move
1412 normally.
1414 Note SCRATCH_REG may not be in the proper mode depending on how it
1415 will be used. This routine is responsible for creating a new copy
1416 of SCRATCH_REG in the proper mode. */
1419 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1421 register rtx operand0 = operands[0];
1422 register rtx operand1 = operands[1];
1423 register rtx tem;
1425 /* We can only handle indexed addresses in the destination operand
1426 of floating point stores. Thus, we need to break out indexed
1427 addresses from the destination operand. */
1428 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1430 gcc_assert (can_create_pseudo_p ());
1432 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1433 operand0 = replace_equiv_address (operand0, tem);
1436 /* On targets with non-equivalent space registers, break out unscaled
1437 indexed addresses from the source operand before the final CSE.
1438 We have to do this because the REG_POINTER flag is not correctly
1439 carried through various optimization passes and CSE may substitute
1440 a pseudo without the pointer set for one with the pointer set. As
1441 a result, we loose various opportunities to create insns with
1442 unscaled indexed addresses. */
1443 if (!TARGET_NO_SPACE_REGS
1444 && !cse_not_expected
1445 && GET_CODE (operand1) == MEM
1446 && GET_CODE (XEXP (operand1, 0)) == PLUS
1447 && REG_P (XEXP (XEXP (operand1, 0), 0))
1448 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1449 operand1
1450 = replace_equiv_address (operand1,
1451 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1453 if (scratch_reg
1454 && reload_in_progress && GET_CODE (operand0) == REG
1455 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1456 operand0 = reg_equiv_mem[REGNO (operand0)];
1457 else if (scratch_reg
1458 && reload_in_progress && GET_CODE (operand0) == SUBREG
1459 && GET_CODE (SUBREG_REG (operand0)) == REG
1460 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1462 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1463 the code which tracks sets/uses for delete_output_reload. */
1464 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1465 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1466 SUBREG_BYTE (operand0));
1467 operand0 = alter_subreg (&temp);
1470 if (scratch_reg
1471 && reload_in_progress && GET_CODE (operand1) == REG
1472 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1473 operand1 = reg_equiv_mem[REGNO (operand1)];
1474 else if (scratch_reg
1475 && reload_in_progress && GET_CODE (operand1) == SUBREG
1476 && GET_CODE (SUBREG_REG (operand1)) == REG
1477 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1479 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1480 the code which tracks sets/uses for delete_output_reload. */
1481 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1482 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1483 SUBREG_BYTE (operand1));
1484 operand1 = alter_subreg (&temp);
1487 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1488 && ((tem = find_replacement (&XEXP (operand0, 0)))
1489 != XEXP (operand0, 0)))
1490 operand0 = replace_equiv_address (operand0, tem);
1492 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1493 && ((tem = find_replacement (&XEXP (operand1, 0)))
1494 != XEXP (operand1, 0)))
1495 operand1 = replace_equiv_address (operand1, tem);
1497 /* Handle secondary reloads for loads/stores of FP registers from
1498 REG+D addresses where D does not fit in 5 or 14 bits, including
1499 (subreg (mem (addr))) cases. */
1500 if (scratch_reg
1501 && fp_reg_operand (operand0, mode)
1502 && ((GET_CODE (operand1) == MEM
1503 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1504 XEXP (operand1, 0)))
1505 || ((GET_CODE (operand1) == SUBREG
1506 && GET_CODE (XEXP (operand1, 0)) == MEM
1507 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1508 ? SFmode : DFmode),
1509 XEXP (XEXP (operand1, 0), 0))))))
1511 if (GET_CODE (operand1) == SUBREG)
1512 operand1 = XEXP (operand1, 0);
1514 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1515 it in WORD_MODE regardless of what mode it was originally given
1516 to us. */
1517 scratch_reg = force_mode (word_mode, scratch_reg);
1519 /* D might not fit in 14 bits either; for such cases load D into
1520 scratch reg. */
1521 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1523 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1524 emit_move_insn (scratch_reg,
1525 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1526 Pmode,
1527 XEXP (XEXP (operand1, 0), 0),
1528 scratch_reg));
1530 else
1531 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1532 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1533 replace_equiv_address (operand1, scratch_reg)));
1534 return 1;
1536 else if (scratch_reg
1537 && fp_reg_operand (operand1, mode)
1538 && ((GET_CODE (operand0) == MEM
1539 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1540 ? SFmode : DFmode),
1541 XEXP (operand0, 0)))
1542 || ((GET_CODE (operand0) == SUBREG)
1543 && GET_CODE (XEXP (operand0, 0)) == MEM
1544 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1545 ? SFmode : DFmode),
1546 XEXP (XEXP (operand0, 0), 0)))))
1548 if (GET_CODE (operand0) == SUBREG)
1549 operand0 = XEXP (operand0, 0);
1551 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1552 it in WORD_MODE regardless of what mode it was originally given
1553 to us. */
1554 scratch_reg = force_mode (word_mode, scratch_reg);
1556 /* D might not fit in 14 bits either; for such cases load D into
1557 scratch reg. */
1558 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1560 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1561 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1562 0)),
1563 Pmode,
1564 XEXP (XEXP (operand0, 0),
1566 scratch_reg));
1568 else
1569 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1570 emit_insn (gen_rtx_SET (VOIDmode,
1571 replace_equiv_address (operand0, scratch_reg),
1572 operand1));
1573 return 1;
1575 /* Handle secondary reloads for loads of FP registers from constant
1576 expressions by forcing the constant into memory.
1578 Use scratch_reg to hold the address of the memory location.
1580 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1581 NO_REGS when presented with a const_int and a register class
1582 containing only FP registers. Doing so unfortunately creates
1583 more problems than it solves. Fix this for 2.5. */
1584 else if (scratch_reg
1585 && CONSTANT_P (operand1)
1586 && fp_reg_operand (operand0, mode))
1588 rtx const_mem, xoperands[2];
1590 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1591 it in WORD_MODE regardless of what mode it was originally given
1592 to us. */
1593 scratch_reg = force_mode (word_mode, scratch_reg);
1595 /* Force the constant into memory and put the address of the
1596 memory location into scratch_reg. */
1597 const_mem = force_const_mem (mode, operand1);
1598 xoperands[0] = scratch_reg;
1599 xoperands[1] = XEXP (const_mem, 0);
1600 emit_move_sequence (xoperands, Pmode, 0);
1602 /* Now load the destination register. */
1603 emit_insn (gen_rtx_SET (mode, operand0,
1604 replace_equiv_address (const_mem, scratch_reg)));
1605 return 1;
1607 /* Handle secondary reloads for SAR. These occur when trying to load
1608 the SAR from memory, FP register, or with a constant. */
1609 else if (scratch_reg
1610 && GET_CODE (operand0) == REG
1611 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1612 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1613 && (GET_CODE (operand1) == MEM
1614 || GET_CODE (operand1) == CONST_INT
1615 || (GET_CODE (operand1) == REG
1616 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1618 /* D might not fit in 14 bits either; for such cases load D into
1619 scratch reg. */
1620 if (GET_CODE (operand1) == MEM
1621 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1623 /* We are reloading the address into the scratch register, so we
1624 want to make sure the scratch register is a full register. */
1625 scratch_reg = force_mode (word_mode, scratch_reg);
1627 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1628 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1629 0)),
1630 Pmode,
1631 XEXP (XEXP (operand1, 0),
1633 scratch_reg));
1635 /* Now we are going to load the scratch register from memory,
1636 we want to load it in the same width as the original MEM,
1637 which must be the same as the width of the ultimate destination,
1638 OPERAND0. */
1639 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1641 emit_move_insn (scratch_reg,
1642 replace_equiv_address (operand1, scratch_reg));
1644 else
1646 /* We want to load the scratch register using the same mode as
1647 the ultimate destination. */
1648 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1650 emit_move_insn (scratch_reg, operand1);
1653 /* And emit the insn to set the ultimate destination. We know that
1654 the scratch register has the same mode as the destination at this
1655 point. */
1656 emit_move_insn (operand0, scratch_reg);
1657 return 1;
1659 /* Handle the most common case: storing into a register. */
1660 else if (register_operand (operand0, mode))
1662 if (register_operand (operand1, mode)
1663 || (GET_CODE (operand1) == CONST_INT
1664 && cint_ok_for_move (INTVAL (operand1)))
1665 || (operand1 == CONST0_RTX (mode))
1666 || (GET_CODE (operand1) == HIGH
1667 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1668 /* Only `general_operands' can come here, so MEM is ok. */
1669 || GET_CODE (operand1) == MEM)
1671 /* Various sets are created during RTL generation which don't
1672 have the REG_POINTER flag correctly set. After the CSE pass,
1673 instruction recognition can fail if we don't consistently
1674 set this flag when performing register copies. This should
1675 also improve the opportunities for creating insns that use
1676 unscaled indexing. */
1677 if (REG_P (operand0) && REG_P (operand1))
1679 if (REG_POINTER (operand1)
1680 && !REG_POINTER (operand0)
1681 && !HARD_REGISTER_P (operand0))
1682 copy_reg_pointer (operand0, operand1);
1683 else if (REG_POINTER (operand0)
1684 && !REG_POINTER (operand1)
1685 && !HARD_REGISTER_P (operand1))
1686 copy_reg_pointer (operand1, operand0);
1689 /* When MEMs are broken out, the REG_POINTER flag doesn't
1690 get set. In some cases, we can set the REG_POINTER flag
1691 from the declaration for the MEM. */
1692 if (REG_P (operand0)
1693 && GET_CODE (operand1) == MEM
1694 && !REG_POINTER (operand0))
1696 tree decl = MEM_EXPR (operand1);
1698 /* Set the register pointer flag and register alignment
1699 if the declaration for this memory reference is a
1700 pointer type. Fortran indirect argument references
1701 are ignored. */
1702 if (decl
1703 && !(flag_argument_noalias > 1
1704 && TREE_CODE (decl) == INDIRECT_REF
1705 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1707 tree type;
1709 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1710 tree operand 1. */
1711 if (TREE_CODE (decl) == COMPONENT_REF)
1712 decl = TREE_OPERAND (decl, 1);
1714 type = TREE_TYPE (decl);
1715 type = strip_array_types (type);
1717 if (POINTER_TYPE_P (type))
1719 int align;
1721 type = TREE_TYPE (type);
1722 /* Using TYPE_ALIGN_OK is rather conservative as
1723 only the ada frontend actually sets it. */
1724 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1725 : BITS_PER_UNIT);
1726 mark_reg_pointer (operand0, align);
1731 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1732 return 1;
1735 else if (GET_CODE (operand0) == MEM)
1737 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1738 && !(reload_in_progress || reload_completed))
1740 rtx temp = gen_reg_rtx (DFmode);
1742 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1743 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1744 return 1;
1746 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1748 /* Run this case quickly. */
1749 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1750 return 1;
1752 if (! (reload_in_progress || reload_completed))
1754 operands[0] = validize_mem (operand0);
1755 operands[1] = operand1 = force_reg (mode, operand1);
1759 /* Simplify the source if we need to.
1760 Note we do have to handle function labels here, even though we do
1761 not consider them legitimate constants. Loop optimizations can
1762 call the emit_move_xxx with one as a source. */
1763 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1764 || function_label_operand (operand1, mode)
1765 || (GET_CODE (operand1) == HIGH
1766 && symbolic_operand (XEXP (operand1, 0), mode)))
1768 int ishighonly = 0;
1770 if (GET_CODE (operand1) == HIGH)
1772 ishighonly = 1;
1773 operand1 = XEXP (operand1, 0);
1775 if (symbolic_operand (operand1, mode))
1777 /* Argh. The assembler and linker can't handle arithmetic
1778 involving plabels.
1780 So we force the plabel into memory, load operand0 from
1781 the memory location, then add in the constant part. */
1782 if ((GET_CODE (operand1) == CONST
1783 && GET_CODE (XEXP (operand1, 0)) == PLUS
1784 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1785 || function_label_operand (operand1, mode))
1787 rtx temp, const_part;
1789 /* Figure out what (if any) scratch register to use. */
1790 if (reload_in_progress || reload_completed)
1792 scratch_reg = scratch_reg ? scratch_reg : operand0;
1793 /* SCRATCH_REG will hold an address and maybe the actual
1794 data. We want it in WORD_MODE regardless of what mode it
1795 was originally given to us. */
1796 scratch_reg = force_mode (word_mode, scratch_reg);
1798 else if (flag_pic)
1799 scratch_reg = gen_reg_rtx (Pmode);
1801 if (GET_CODE (operand1) == CONST)
1803 /* Save away the constant part of the expression. */
1804 const_part = XEXP (XEXP (operand1, 0), 1);
1805 gcc_assert (GET_CODE (const_part) == CONST_INT);
1807 /* Force the function label into memory. */
1808 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1810 else
1812 /* No constant part. */
1813 const_part = NULL_RTX;
1815 /* Force the function label into memory. */
1816 temp = force_const_mem (mode, operand1);
1820 /* Get the address of the memory location. PIC-ify it if
1821 necessary. */
1822 temp = XEXP (temp, 0);
1823 if (flag_pic)
1824 temp = legitimize_pic_address (temp, mode, scratch_reg);
1826 /* Put the address of the memory location into our destination
1827 register. */
1828 operands[1] = temp;
1829 emit_move_sequence (operands, mode, scratch_reg);
1831 /* Now load from the memory location into our destination
1832 register. */
1833 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1834 emit_move_sequence (operands, mode, scratch_reg);
1836 /* And add back in the constant part. */
1837 if (const_part != NULL_RTX)
1838 expand_inc (operand0, const_part);
1840 return 1;
1843 if (flag_pic)
1845 rtx temp;
1847 if (reload_in_progress || reload_completed)
1849 temp = scratch_reg ? scratch_reg : operand0;
1850 /* TEMP will hold an address and maybe the actual
1851 data. We want it in WORD_MODE regardless of what mode it
1852 was originally given to us. */
1853 temp = force_mode (word_mode, temp);
1855 else
1856 temp = gen_reg_rtx (Pmode);
1858 /* (const (plus (symbol) (const_int))) must be forced to
1859 memory during/after reload if the const_int will not fit
1860 in 14 bits. */
1861 if (GET_CODE (operand1) == CONST
1862 && GET_CODE (XEXP (operand1, 0)) == PLUS
1863 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1864 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1865 && (reload_completed || reload_in_progress)
1866 && flag_pic)
1868 rtx const_mem = force_const_mem (mode, operand1);
1869 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1870 mode, temp);
1871 operands[1] = replace_equiv_address (const_mem, operands[1]);
1872 emit_move_sequence (operands, mode, temp);
1874 else
1876 operands[1] = legitimize_pic_address (operand1, mode, temp);
1877 if (REG_P (operand0) && REG_P (operands[1]))
1878 copy_reg_pointer (operand0, operands[1]);
1879 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1882 /* On the HPPA, references to data space are supposed to use dp,
1883 register 27, but showing it in the RTL inhibits various cse
1884 and loop optimizations. */
1885 else
1887 rtx temp, set;
1889 if (reload_in_progress || reload_completed)
1891 temp = scratch_reg ? scratch_reg : operand0;
1892 /* TEMP will hold an address and maybe the actual
1893 data. We want it in WORD_MODE regardless of what mode it
1894 was originally given to us. */
1895 temp = force_mode (word_mode, temp);
1897 else
1898 temp = gen_reg_rtx (mode);
1900 /* Loading a SYMBOL_REF into a register makes that register
1901 safe to be used as the base in an indexed address.
1903 Don't mark hard registers though. That loses. */
1904 if (GET_CODE (operand0) == REG
1905 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1906 mark_reg_pointer (operand0, BITS_PER_UNIT);
1907 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1908 mark_reg_pointer (temp, BITS_PER_UNIT);
1910 if (ishighonly)
1911 set = gen_rtx_SET (mode, operand0, temp);
1912 else
1913 set = gen_rtx_SET (VOIDmode,
1914 operand0,
1915 gen_rtx_LO_SUM (mode, temp, operand1));
1917 emit_insn (gen_rtx_SET (VOIDmode,
1918 temp,
1919 gen_rtx_HIGH (mode, operand1)));
1920 emit_insn (set);
1923 return 1;
1925 else if (pa_tls_referenced_p (operand1))
1927 rtx tmp = operand1;
1928 rtx addend = NULL;
1930 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1932 addend = XEXP (XEXP (tmp, 0), 1);
1933 tmp = XEXP (XEXP (tmp, 0), 0);
1936 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1937 tmp = legitimize_tls_address (tmp);
1938 if (addend)
1940 tmp = gen_rtx_PLUS (mode, tmp, addend);
1941 tmp = force_operand (tmp, operands[0]);
1943 operands[1] = tmp;
1945 else if (GET_CODE (operand1) != CONST_INT
1946 || !cint_ok_for_move (INTVAL (operand1)))
1948 rtx insn, temp;
1949 rtx op1 = operand1;
1950 HOST_WIDE_INT value = 0;
1951 HOST_WIDE_INT insv = 0;
1952 int insert = 0;
1954 if (GET_CODE (operand1) == CONST_INT)
1955 value = INTVAL (operand1);
1957 if (TARGET_64BIT
1958 && GET_CODE (operand1) == CONST_INT
1959 && HOST_BITS_PER_WIDE_INT > 32
1960 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1962 HOST_WIDE_INT nval;
1964 /* Extract the low order 32 bits of the value and sign extend.
1965 If the new value is the same as the original value, we can
1966 can use the original value as-is. If the new value is
1967 different, we use it and insert the most-significant 32-bits
1968 of the original value into the final result. */
1969 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1970 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1971 if (value != nval)
1973 #if HOST_BITS_PER_WIDE_INT > 32
1974 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1975 #endif
1976 insert = 1;
1977 value = nval;
1978 operand1 = GEN_INT (nval);
1982 if (reload_in_progress || reload_completed)
1983 temp = scratch_reg ? scratch_reg : operand0;
1984 else
1985 temp = gen_reg_rtx (mode);
1987 /* We don't directly split DImode constants on 32-bit targets
1988 because PLUS uses an 11-bit immediate and the insn sequence
1989 generated is not as efficient as the one using HIGH/LO_SUM. */
1990 if (GET_CODE (operand1) == CONST_INT
1991 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1992 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1993 && !insert)
1995 /* Directly break constant into high and low parts. This
1996 provides better optimization opportunities because various
1997 passes recognize constants split with PLUS but not LO_SUM.
1998 We use a 14-bit signed low part except when the addition
1999 of 0x4000 to the high part might change the sign of the
2000 high part. */
2001 HOST_WIDE_INT low = value & 0x3fff;
2002 HOST_WIDE_INT high = value & ~ 0x3fff;
2004 if (low >= 0x2000)
2006 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2007 high += 0x2000;
2008 else
2009 high += 0x4000;
2012 low = value - high;
2014 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2015 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2017 else
2019 emit_insn (gen_rtx_SET (VOIDmode, temp,
2020 gen_rtx_HIGH (mode, operand1)));
2021 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2024 insn = emit_move_insn (operands[0], operands[1]);
2026 /* Now insert the most significant 32 bits of the value
2027 into the register. When we don't have a second register
2028 available, it could take up to nine instructions to load
2029 a 64-bit integer constant. Prior to reload, we force
2030 constants that would take more than three instructions
2031 to load to the constant pool. During and after reload,
2032 we have to handle all possible values. */
2033 if (insert)
2035 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2036 register and the value to be inserted is outside the
2037 range that can be loaded with three depdi instructions. */
2038 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2040 operand1 = GEN_INT (insv);
2042 emit_insn (gen_rtx_SET (VOIDmode, temp,
2043 gen_rtx_HIGH (mode, operand1)));
2044 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2045 emit_insn (gen_insv (operand0, GEN_INT (32),
2046 const0_rtx, temp));
2048 else
2050 int len = 5, pos = 27;
2052 /* Insert the bits using the depdi instruction. */
2053 while (pos >= 0)
2055 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2056 HOST_WIDE_INT sign = v5 < 0;
2058 /* Left extend the insertion. */
2059 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2060 while (pos > 0 && (insv & 1) == sign)
2062 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2063 len += 1;
2064 pos -= 1;
2067 emit_insn (gen_insv (operand0, GEN_INT (len),
2068 GEN_INT (pos), GEN_INT (v5)));
2070 len = pos > 0 && pos < 5 ? pos : 5;
2071 pos -= len;
2076 set_unique_reg_note (insn, REG_EQUAL, op1);
2078 return 1;
2081 /* Now have insn-emit do whatever it normally does. */
2082 return 0;
2085 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2086 it will need a link/runtime reloc). */
2089 reloc_needed (tree exp)
2091 int reloc = 0;
2093 switch (TREE_CODE (exp))
2095 case ADDR_EXPR:
2096 return 1;
2098 case POINTER_PLUS_EXPR:
2099 case PLUS_EXPR:
2100 case MINUS_EXPR:
2101 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2102 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2103 break;
2105 CASE_CONVERT:
2106 case NON_LVALUE_EXPR:
2107 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2108 break;
2110 case CONSTRUCTOR:
2112 tree value;
2113 unsigned HOST_WIDE_INT ix;
2115 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2116 if (value)
2117 reloc |= reloc_needed (value);
2119 break;
2121 case ERROR_MARK:
2122 break;
2124 default:
2125 break;
2127 return reloc;
2130 /* Does operand (which is a symbolic_operand) live in text space?
2131 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2132 will be true. */
2135 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2137 if (GET_CODE (operand) == CONST)
2138 operand = XEXP (XEXP (operand, 0), 0);
2139 if (flag_pic)
2141 if (GET_CODE (operand) == SYMBOL_REF)
2142 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2144 else
2146 if (GET_CODE (operand) == SYMBOL_REF)
2147 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2149 return 1;
2153 /* Return the best assembler insn template
2154 for moving operands[1] into operands[0] as a fullword. */
2155 const char *
2156 singlemove_string (rtx *operands)
2158 HOST_WIDE_INT intval;
2160 if (GET_CODE (operands[0]) == MEM)
2161 return "stw %r1,%0";
2162 if (GET_CODE (operands[1]) == MEM)
2163 return "ldw %1,%0";
2164 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2166 long i;
2167 REAL_VALUE_TYPE d;
2169 gcc_assert (GET_MODE (operands[1]) == SFmode);
2171 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2172 bit pattern. */
2173 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2174 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2176 operands[1] = GEN_INT (i);
2177 /* Fall through to CONST_INT case. */
2179 if (GET_CODE (operands[1]) == CONST_INT)
2181 intval = INTVAL (operands[1]);
2183 if (VAL_14_BITS_P (intval))
2184 return "ldi %1,%0";
2185 else if ((intval & 0x7ff) == 0)
2186 return "ldil L'%1,%0";
2187 else if (zdepi_cint_p (intval))
2188 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2189 else
2190 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2192 return "copy %1,%0";
2196 /* Compute position (in OP[1]) and width (in OP[2])
2197 useful for copying IMM to a register using the zdepi
2198 instructions. Store the immediate value to insert in OP[0]. */
2199 static void
2200 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2202 int lsb, len;
2204 /* Find the least significant set bit in IMM. */
2205 for (lsb = 0; lsb < 32; lsb++)
2207 if ((imm & 1) != 0)
2208 break;
2209 imm >>= 1;
2212 /* Choose variants based on *sign* of the 5-bit field. */
2213 if ((imm & 0x10) == 0)
2214 len = (lsb <= 28) ? 4 : 32 - lsb;
2215 else
2217 /* Find the width of the bitstring in IMM. */
2218 for (len = 5; len < 32; len++)
2220 if ((imm & (1 << len)) == 0)
2221 break;
2224 /* Sign extend IMM as a 5-bit value. */
2225 imm = (imm & 0xf) - 0x10;
2228 op[0] = imm;
2229 op[1] = 31 - lsb;
2230 op[2] = len;
2233 /* Compute position (in OP[1]) and width (in OP[2])
2234 useful for copying IMM to a register using the depdi,z
2235 instructions. Store the immediate value to insert in OP[0]. */
2236 void
2237 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2239 HOST_WIDE_INT lsb, len;
2241 /* Find the least significant set bit in IMM. */
2242 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2244 if ((imm & 1) != 0)
2245 break;
2246 imm >>= 1;
2249 /* Choose variants based on *sign* of the 5-bit field. */
2250 if ((imm & 0x10) == 0)
2251 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2252 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2253 else
2255 /* Find the width of the bitstring in IMM. */
2256 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2258 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2259 break;
2262 /* Sign extend IMM as a 5-bit value. */
2263 imm = (imm & 0xf) - 0x10;
2266 op[0] = imm;
2267 op[1] = 63 - lsb;
2268 op[2] = len;
2271 /* Output assembler code to perform a doubleword move insn
2272 with operands OPERANDS. */
2274 const char *
2275 output_move_double (rtx *operands)
2277 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2278 rtx latehalf[2];
2279 rtx addreg0 = 0, addreg1 = 0;
2281 /* First classify both operands. */
2283 if (REG_P (operands[0]))
2284 optype0 = REGOP;
2285 else if (offsettable_memref_p (operands[0]))
2286 optype0 = OFFSOP;
2287 else if (GET_CODE (operands[0]) == MEM)
2288 optype0 = MEMOP;
2289 else
2290 optype0 = RNDOP;
2292 if (REG_P (operands[1]))
2293 optype1 = REGOP;
2294 else if (CONSTANT_P (operands[1]))
2295 optype1 = CNSTOP;
2296 else if (offsettable_memref_p (operands[1]))
2297 optype1 = OFFSOP;
2298 else if (GET_CODE (operands[1]) == MEM)
2299 optype1 = MEMOP;
2300 else
2301 optype1 = RNDOP;
2303 /* Check for the cases that the operand constraints are not
2304 supposed to allow to happen. */
2305 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2307 /* Handle copies between general and floating registers. */
2309 if (optype0 == REGOP && optype1 == REGOP
2310 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2312 if (FP_REG_P (operands[0]))
2314 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2315 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2316 return "{fldds|fldd} -16(%%sp),%0";
2318 else
2320 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2321 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2322 return "{ldws|ldw} -12(%%sp),%R0";
2326 /* Handle auto decrementing and incrementing loads and stores
2327 specifically, since the structure of the function doesn't work
2328 for them without major modification. Do it better when we learn
2329 this port about the general inc/dec addressing of PA.
2330 (This was written by tege. Chide him if it doesn't work.) */
2332 if (optype0 == MEMOP)
2334 /* We have to output the address syntax ourselves, since print_operand
2335 doesn't deal with the addresses we want to use. Fix this later. */
2337 rtx addr = XEXP (operands[0], 0);
2338 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2340 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2342 operands[0] = XEXP (addr, 0);
2343 gcc_assert (GET_CODE (operands[1]) == REG
2344 && GET_CODE (operands[0]) == REG);
2346 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2348 /* No overlap between high target register and address
2349 register. (We do this in a non-obvious way to
2350 save a register file writeback) */
2351 if (GET_CODE (addr) == POST_INC)
2352 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2353 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2355 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2357 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2359 operands[0] = XEXP (addr, 0);
2360 gcc_assert (GET_CODE (operands[1]) == REG
2361 && GET_CODE (operands[0]) == REG);
2363 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2364 /* No overlap between high target register and address
2365 register. (We do this in a non-obvious way to save a
2366 register file writeback) */
2367 if (GET_CODE (addr) == PRE_INC)
2368 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2369 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2372 if (optype1 == MEMOP)
2374 /* We have to output the address syntax ourselves, since print_operand
2375 doesn't deal with the addresses we want to use. Fix this later. */
2377 rtx addr = XEXP (operands[1], 0);
2378 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2380 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2382 operands[1] = XEXP (addr, 0);
2383 gcc_assert (GET_CODE (operands[0]) == REG
2384 && GET_CODE (operands[1]) == REG);
2386 if (!reg_overlap_mentioned_p (high_reg, addr))
2388 /* No overlap between high target register and address
2389 register. (We do this in a non-obvious way to
2390 save a register file writeback) */
2391 if (GET_CODE (addr) == POST_INC)
2392 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2393 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2395 else
2397 /* This is an undefined situation. We should load into the
2398 address register *and* update that register. Probably
2399 we don't need to handle this at all. */
2400 if (GET_CODE (addr) == POST_INC)
2401 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2402 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2405 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2407 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2409 operands[1] = XEXP (addr, 0);
2410 gcc_assert (GET_CODE (operands[0]) == REG
2411 && GET_CODE (operands[1]) == REG);
2413 if (!reg_overlap_mentioned_p (high_reg, addr))
2415 /* No overlap between high target register and address
2416 register. (We do this in a non-obvious way to
2417 save a register file writeback) */
2418 if (GET_CODE (addr) == PRE_INC)
2419 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2420 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2422 else
2424 /* This is an undefined situation. We should load into the
2425 address register *and* update that register. Probably
2426 we don't need to handle this at all. */
2427 if (GET_CODE (addr) == PRE_INC)
2428 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2429 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2432 else if (GET_CODE (addr) == PLUS
2433 && GET_CODE (XEXP (addr, 0)) == MULT)
2435 rtx xoperands[4];
2436 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2438 if (!reg_overlap_mentioned_p (high_reg, addr))
2440 xoperands[0] = high_reg;
2441 xoperands[1] = XEXP (addr, 1);
2442 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2443 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2444 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2445 xoperands);
2446 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2448 else
2450 xoperands[0] = high_reg;
2451 xoperands[1] = XEXP (addr, 1);
2452 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2453 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2454 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2455 xoperands);
2456 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2461 /* If an operand is an unoffsettable memory ref, find a register
2462 we can increment temporarily to make it refer to the second word. */
2464 if (optype0 == MEMOP)
2465 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2467 if (optype1 == MEMOP)
2468 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2470 /* Ok, we can do one word at a time.
2471 Normally we do the low-numbered word first.
2473 In either case, set up in LATEHALF the operands to use
2474 for the high-numbered word and in some cases alter the
2475 operands in OPERANDS to be suitable for the low-numbered word. */
2477 if (optype0 == REGOP)
2478 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2479 else if (optype0 == OFFSOP)
2480 latehalf[0] = adjust_address (operands[0], SImode, 4);
2481 else
2482 latehalf[0] = operands[0];
2484 if (optype1 == REGOP)
2485 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2486 else if (optype1 == OFFSOP)
2487 latehalf[1] = adjust_address (operands[1], SImode, 4);
2488 else if (optype1 == CNSTOP)
2489 split_double (operands[1], &operands[1], &latehalf[1]);
2490 else
2491 latehalf[1] = operands[1];
2493 /* If the first move would clobber the source of the second one,
2494 do them in the other order.
2496 This can happen in two cases:
2498 mem -> register where the first half of the destination register
2499 is the same register used in the memory's address. Reload
2500 can create such insns.
2502 mem in this case will be either register indirect or register
2503 indirect plus a valid offset.
2505 register -> register move where REGNO(dst) == REGNO(src + 1)
2506 someone (Tim/Tege?) claimed this can happen for parameter loads.
2508 Handle mem -> register case first. */
2509 if (optype0 == REGOP
2510 && (optype1 == MEMOP || optype1 == OFFSOP)
2511 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2512 operands[1], 0))
2514 /* Do the late half first. */
2515 if (addreg1)
2516 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2517 output_asm_insn (singlemove_string (latehalf), latehalf);
2519 /* Then clobber. */
2520 if (addreg1)
2521 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2522 return singlemove_string (operands);
2525 /* Now handle register -> register case. */
2526 if (optype0 == REGOP && optype1 == REGOP
2527 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2529 output_asm_insn (singlemove_string (latehalf), latehalf);
2530 return singlemove_string (operands);
2533 /* Normal case: do the two words, low-numbered first. */
2535 output_asm_insn (singlemove_string (operands), operands);
2537 /* Make any unoffsettable addresses point at high-numbered word. */
2538 if (addreg0)
2539 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2540 if (addreg1)
2541 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2543 /* Do that word. */
2544 output_asm_insn (singlemove_string (latehalf), latehalf);
2546 /* Undo the adds we just did. */
2547 if (addreg0)
2548 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2549 if (addreg1)
2550 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2552 return "";
2555 const char *
2556 output_fp_move_double (rtx *operands)
2558 if (FP_REG_P (operands[0]))
2560 if (FP_REG_P (operands[1])
2561 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2562 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2563 else
2564 output_asm_insn ("fldd%F1 %1,%0", operands);
2566 else if (FP_REG_P (operands[1]))
2568 output_asm_insn ("fstd%F0 %1,%0", operands);
2570 else
2572 rtx xoperands[2];
2574 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2576 /* This is a pain. You have to be prepared to deal with an
2577 arbitrary address here including pre/post increment/decrement.
2579 so avoid this in the MD. */
2580 gcc_assert (GET_CODE (operands[0]) == REG);
2582 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2583 xoperands[0] = operands[0];
2584 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2586 return "";
2589 /* Return a REG that occurs in ADDR with coefficient 1.
2590 ADDR can be effectively incremented by incrementing REG. */
2592 static rtx
2593 find_addr_reg (rtx addr)
2595 while (GET_CODE (addr) == PLUS)
2597 if (GET_CODE (XEXP (addr, 0)) == REG)
2598 addr = XEXP (addr, 0);
2599 else if (GET_CODE (XEXP (addr, 1)) == REG)
2600 addr = XEXP (addr, 1);
2601 else if (CONSTANT_P (XEXP (addr, 0)))
2602 addr = XEXP (addr, 1);
2603 else if (CONSTANT_P (XEXP (addr, 1)))
2604 addr = XEXP (addr, 0);
2605 else
2606 gcc_unreachable ();
2608 gcc_assert (GET_CODE (addr) == REG);
2609 return addr;
2612 /* Emit code to perform a block move.
2614 OPERANDS[0] is the destination pointer as a REG, clobbered.
2615 OPERANDS[1] is the source pointer as a REG, clobbered.
2616 OPERANDS[2] is a register for temporary storage.
2617 OPERANDS[3] is a register for temporary storage.
2618 OPERANDS[4] is the size as a CONST_INT
2619 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2620 OPERANDS[6] is another temporary register. */
2622 const char *
2623 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2625 int align = INTVAL (operands[5]);
2626 unsigned long n_bytes = INTVAL (operands[4]);
2628 /* We can't move more than a word at a time because the PA
2629 has no longer integer move insns. (Could use fp mem ops?) */
2630 if (align > (TARGET_64BIT ? 8 : 4))
2631 align = (TARGET_64BIT ? 8 : 4);
2633 /* Note that we know each loop below will execute at least twice
2634 (else we would have open-coded the copy). */
2635 switch (align)
2637 case 8:
2638 /* Pre-adjust the loop counter. */
2639 operands[4] = GEN_INT (n_bytes - 16);
2640 output_asm_insn ("ldi %4,%2", operands);
2642 /* Copying loop. */
2643 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2644 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2645 output_asm_insn ("std,ma %3,8(%0)", operands);
2646 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2647 output_asm_insn ("std,ma %6,8(%0)", operands);
2649 /* Handle the residual. There could be up to 7 bytes of
2650 residual to copy! */
2651 if (n_bytes % 16 != 0)
2653 operands[4] = GEN_INT (n_bytes % 8);
2654 if (n_bytes % 16 >= 8)
2655 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2656 if (n_bytes % 8 != 0)
2657 output_asm_insn ("ldd 0(%1),%6", operands);
2658 if (n_bytes % 16 >= 8)
2659 output_asm_insn ("std,ma %3,8(%0)", operands);
2660 if (n_bytes % 8 != 0)
2661 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2663 return "";
2665 case 4:
2666 /* Pre-adjust the loop counter. */
2667 operands[4] = GEN_INT (n_bytes - 8);
2668 output_asm_insn ("ldi %4,%2", operands);
2670 /* Copying loop. */
2671 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2672 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2673 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2674 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2675 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2677 /* Handle the residual. There could be up to 7 bytes of
2678 residual to copy! */
2679 if (n_bytes % 8 != 0)
2681 operands[4] = GEN_INT (n_bytes % 4);
2682 if (n_bytes % 8 >= 4)
2683 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2684 if (n_bytes % 4 != 0)
2685 output_asm_insn ("ldw 0(%1),%6", operands);
2686 if (n_bytes % 8 >= 4)
2687 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2688 if (n_bytes % 4 != 0)
2689 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2691 return "";
2693 case 2:
2694 /* Pre-adjust the loop counter. */
2695 operands[4] = GEN_INT (n_bytes - 4);
2696 output_asm_insn ("ldi %4,%2", operands);
2698 /* Copying loop. */
2699 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2700 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2701 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2702 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2703 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2705 /* Handle the residual. */
2706 if (n_bytes % 4 != 0)
2708 if (n_bytes % 4 >= 2)
2709 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2710 if (n_bytes % 2 != 0)
2711 output_asm_insn ("ldb 0(%1),%6", operands);
2712 if (n_bytes % 4 >= 2)
2713 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2714 if (n_bytes % 2 != 0)
2715 output_asm_insn ("stb %6,0(%0)", operands);
2717 return "";
2719 case 1:
2720 /* Pre-adjust the loop counter. */
2721 operands[4] = GEN_INT (n_bytes - 2);
2722 output_asm_insn ("ldi %4,%2", operands);
2724 /* Copying loop. */
2725 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2726 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2727 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2728 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2729 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2731 /* Handle the residual. */
2732 if (n_bytes % 2 != 0)
2734 output_asm_insn ("ldb 0(%1),%3", operands);
2735 output_asm_insn ("stb %3,0(%0)", operands);
2737 return "";
2739 default:
2740 gcc_unreachable ();
2744 /* Count the number of insns necessary to handle this block move.
2746 Basic structure is the same as emit_block_move, except that we
2747 count insns rather than emit them. */
2749 static int
2750 compute_movmem_length (rtx insn)
2752 rtx pat = PATTERN (insn);
2753 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2754 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2755 unsigned int n_insns = 0;
2757 /* We can't move more than four bytes at a time because the PA
2758 has no longer integer move insns. (Could use fp mem ops?) */
2759 if (align > (TARGET_64BIT ? 8 : 4))
2760 align = (TARGET_64BIT ? 8 : 4);
2762 /* The basic copying loop. */
2763 n_insns = 6;
2765 /* Residuals. */
2766 if (n_bytes % (2 * align) != 0)
2768 if ((n_bytes % (2 * align)) >= align)
2769 n_insns += 2;
2771 if ((n_bytes % align) != 0)
2772 n_insns += 2;
2775 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2776 return n_insns * 4;
2779 /* Emit code to perform a block clear.
2781 OPERANDS[0] is the destination pointer as a REG, clobbered.
2782 OPERANDS[1] is a register for temporary storage.
2783 OPERANDS[2] is the size as a CONST_INT
2784 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2786 const char *
2787 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2789 int align = INTVAL (operands[3]);
2790 unsigned long n_bytes = INTVAL (operands[2]);
2792 /* We can't clear more than a word at a time because the PA
2793 has no longer integer move insns. */
2794 if (align > (TARGET_64BIT ? 8 : 4))
2795 align = (TARGET_64BIT ? 8 : 4);
2797 /* Note that we know each loop below will execute at least twice
2798 (else we would have open-coded the copy). */
2799 switch (align)
2801 case 8:
2802 /* Pre-adjust the loop counter. */
2803 operands[2] = GEN_INT (n_bytes - 16);
2804 output_asm_insn ("ldi %2,%1", operands);
2806 /* Loop. */
2807 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2808 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2809 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2811 /* Handle the residual. There could be up to 7 bytes of
2812 residual to copy! */
2813 if (n_bytes % 16 != 0)
2815 operands[2] = GEN_INT (n_bytes % 8);
2816 if (n_bytes % 16 >= 8)
2817 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2818 if (n_bytes % 8 != 0)
2819 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2821 return "";
2823 case 4:
2824 /* Pre-adjust the loop counter. */
2825 operands[2] = GEN_INT (n_bytes - 8);
2826 output_asm_insn ("ldi %2,%1", operands);
2828 /* Loop. */
2829 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2830 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2831 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2833 /* Handle the residual. There could be up to 7 bytes of
2834 residual to copy! */
2835 if (n_bytes % 8 != 0)
2837 operands[2] = GEN_INT (n_bytes % 4);
2838 if (n_bytes % 8 >= 4)
2839 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2840 if (n_bytes % 4 != 0)
2841 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2843 return "";
2845 case 2:
2846 /* Pre-adjust the loop counter. */
2847 operands[2] = GEN_INT (n_bytes - 4);
2848 output_asm_insn ("ldi %2,%1", operands);
2850 /* Loop. */
2851 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2852 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2853 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2855 /* Handle the residual. */
2856 if (n_bytes % 4 != 0)
2858 if (n_bytes % 4 >= 2)
2859 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2860 if (n_bytes % 2 != 0)
2861 output_asm_insn ("stb %%r0,0(%0)", operands);
2863 return "";
2865 case 1:
2866 /* Pre-adjust the loop counter. */
2867 operands[2] = GEN_INT (n_bytes - 2);
2868 output_asm_insn ("ldi %2,%1", operands);
2870 /* Loop. */
2871 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2872 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2873 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2875 /* Handle the residual. */
2876 if (n_bytes % 2 != 0)
2877 output_asm_insn ("stb %%r0,0(%0)", operands);
2879 return "";
2881 default:
2882 gcc_unreachable ();
2886 /* Count the number of insns necessary to handle this block move.
2888 Basic structure is the same as emit_block_move, except that we
2889 count insns rather than emit them. */
2891 static int
2892 compute_clrmem_length (rtx insn)
2894 rtx pat = PATTERN (insn);
2895 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2896 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2897 unsigned int n_insns = 0;
2899 /* We can't clear more than a word at a time because the PA
2900 has no longer integer move insns. */
2901 if (align > (TARGET_64BIT ? 8 : 4))
2902 align = (TARGET_64BIT ? 8 : 4);
2904 /* The basic loop. */
2905 n_insns = 4;
2907 /* Residuals. */
2908 if (n_bytes % (2 * align) != 0)
2910 if ((n_bytes % (2 * align)) >= align)
2911 n_insns++;
2913 if ((n_bytes % align) != 0)
2914 n_insns++;
2917 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2918 return n_insns * 4;
2922 const char *
2923 output_and (rtx *operands)
2925 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2927 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2928 int ls0, ls1, ms0, p, len;
2930 for (ls0 = 0; ls0 < 32; ls0++)
2931 if ((mask & (1 << ls0)) == 0)
2932 break;
2934 for (ls1 = ls0; ls1 < 32; ls1++)
2935 if ((mask & (1 << ls1)) != 0)
2936 break;
2938 for (ms0 = ls1; ms0 < 32; ms0++)
2939 if ((mask & (1 << ms0)) == 0)
2940 break;
2942 gcc_assert (ms0 == 32);
2944 if (ls1 == 32)
2946 len = ls0;
2948 gcc_assert (len);
2950 operands[2] = GEN_INT (len);
2951 return "{extru|extrw,u} %1,31,%2,%0";
2953 else
2955 /* We could use this `depi' for the case above as well, but `depi'
2956 requires one more register file access than an `extru'. */
2958 p = 31 - ls0;
2959 len = ls1 - ls0;
2961 operands[2] = GEN_INT (p);
2962 operands[3] = GEN_INT (len);
2963 return "{depi|depwi} 0,%2,%3,%0";
2966 else
2967 return "and %1,%2,%0";
2970 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2971 storing the result in operands[0]. */
2972 const char *
2973 output_64bit_and (rtx *operands)
2975 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2977 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2978 int ls0, ls1, ms0, p, len;
2980 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2981 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2982 break;
2984 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2985 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2986 break;
2988 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2989 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2990 break;
2992 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2994 if (ls1 == HOST_BITS_PER_WIDE_INT)
2996 len = ls0;
2998 gcc_assert (len);
3000 operands[2] = GEN_INT (len);
3001 return "extrd,u %1,63,%2,%0";
3003 else
3005 /* We could use this `depi' for the case above as well, but `depi'
3006 requires one more register file access than an `extru'. */
3008 p = 63 - ls0;
3009 len = ls1 - ls0;
3011 operands[2] = GEN_INT (p);
3012 operands[3] = GEN_INT (len);
3013 return "depdi 0,%2,%3,%0";
3016 else
3017 return "and %1,%2,%0";
3020 const char *
3021 output_ior (rtx *operands)
3023 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3024 int bs0, bs1, p, len;
3026 if (INTVAL (operands[2]) == 0)
3027 return "copy %1,%0";
3029 for (bs0 = 0; bs0 < 32; bs0++)
3030 if ((mask & (1 << bs0)) != 0)
3031 break;
3033 for (bs1 = bs0; bs1 < 32; bs1++)
3034 if ((mask & (1 << bs1)) == 0)
3035 break;
3037 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3039 p = 31 - bs0;
3040 len = bs1 - bs0;
3042 operands[2] = GEN_INT (p);
3043 operands[3] = GEN_INT (len);
3044 return "{depi|depwi} -1,%2,%3,%0";
3047 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3048 storing the result in operands[0]. */
3049 const char *
3050 output_64bit_ior (rtx *operands)
3052 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3053 int bs0, bs1, p, len;
3055 if (INTVAL (operands[2]) == 0)
3056 return "copy %1,%0";
3058 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3059 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3060 break;
3062 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3063 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3064 break;
3066 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3067 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3069 p = 63 - bs0;
3070 len = bs1 - bs0;
3072 operands[2] = GEN_INT (p);
3073 operands[3] = GEN_INT (len);
3074 return "depdi -1,%2,%3,%0";
3077 /* Target hook for assembling integer objects. This code handles
3078 aligned SI and DI integers specially since function references
3079 must be preceded by P%. */
3081 static bool
3082 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3084 if (size == UNITS_PER_WORD
3085 && aligned_p
3086 && function_label_operand (x, VOIDmode))
3088 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3089 output_addr_const (asm_out_file, x);
3090 fputc ('\n', asm_out_file);
3091 return true;
3093 return default_assemble_integer (x, size, aligned_p);
3096 /* Output an ascii string. */
3097 void
3098 output_ascii (FILE *file, const char *p, int size)
3100 int i;
3101 int chars_output;
3102 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3104 /* The HP assembler can only take strings of 256 characters at one
3105 time. This is a limitation on input line length, *not* the
3106 length of the string. Sigh. Even worse, it seems that the
3107 restriction is in number of input characters (see \xnn &
3108 \whatever). So we have to do this very carefully. */
3110 fputs ("\t.STRING \"", file);
3112 chars_output = 0;
3113 for (i = 0; i < size; i += 4)
3115 int co = 0;
3116 int io = 0;
3117 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3119 register unsigned int c = (unsigned char) p[i + io];
3121 if (c == '\"' || c == '\\')
3122 partial_output[co++] = '\\';
3123 if (c >= ' ' && c < 0177)
3124 partial_output[co++] = c;
3125 else
3127 unsigned int hexd;
3128 partial_output[co++] = '\\';
3129 partial_output[co++] = 'x';
3130 hexd = c / 16 - 0 + '0';
3131 if (hexd > '9')
3132 hexd -= '9' - 'a' + 1;
3133 partial_output[co++] = hexd;
3134 hexd = c % 16 - 0 + '0';
3135 if (hexd > '9')
3136 hexd -= '9' - 'a' + 1;
3137 partial_output[co++] = hexd;
3140 if (chars_output + co > 243)
3142 fputs ("\"\n\t.STRING \"", file);
3143 chars_output = 0;
3145 fwrite (partial_output, 1, (size_t) co, file);
3146 chars_output += co;
3147 co = 0;
3149 fputs ("\"\n", file);
3152 /* Try to rewrite floating point comparisons & branches to avoid
3153 useless add,tr insns.
3155 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3156 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3157 first attempt to remove useless add,tr insns. It is zero
3158 for the second pass as reorg sometimes leaves bogus REG_DEAD
3159 notes lying around.
3161 When CHECK_NOTES is zero we can only eliminate add,tr insns
3162 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3163 instructions. */
3164 static void
3165 remove_useless_addtr_insns (int check_notes)
3167 rtx insn;
3168 static int pass = 0;
3170 /* This is fairly cheap, so always run it when optimizing. */
3171 if (optimize > 0)
3173 int fcmp_count = 0;
3174 int fbranch_count = 0;
3176 /* Walk all the insns in this function looking for fcmp & fbranch
3177 instructions. Keep track of how many of each we find. */
3178 for (insn = get_insns (); insn; insn = next_insn (insn))
3180 rtx tmp;
3182 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3183 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3184 continue;
3186 tmp = PATTERN (insn);
3188 /* It must be a set. */
3189 if (GET_CODE (tmp) != SET)
3190 continue;
3192 /* If the destination is CCFP, then we've found an fcmp insn. */
3193 tmp = SET_DEST (tmp);
3194 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3196 fcmp_count++;
3197 continue;
3200 tmp = PATTERN (insn);
3201 /* If this is an fbranch instruction, bump the fbranch counter. */
3202 if (GET_CODE (tmp) == SET
3203 && SET_DEST (tmp) == pc_rtx
3204 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3205 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3206 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3207 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3209 fbranch_count++;
3210 continue;
3215 /* Find all floating point compare + branch insns. If possible,
3216 reverse the comparison & the branch to avoid add,tr insns. */
3217 for (insn = get_insns (); insn; insn = next_insn (insn))
3219 rtx tmp, next;
3221 /* Ignore anything that isn't an INSN. */
3222 if (GET_CODE (insn) != INSN)
3223 continue;
3225 tmp = PATTERN (insn);
3227 /* It must be a set. */
3228 if (GET_CODE (tmp) != SET)
3229 continue;
3231 /* The destination must be CCFP, which is register zero. */
3232 tmp = SET_DEST (tmp);
3233 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3234 continue;
3236 /* INSN should be a set of CCFP.
3238 See if the result of this insn is used in a reversed FP
3239 conditional branch. If so, reverse our condition and
3240 the branch. Doing so avoids useless add,tr insns. */
3241 next = next_insn (insn);
3242 while (next)
3244 /* Jumps, calls and labels stop our search. */
3245 if (GET_CODE (next) == JUMP_INSN
3246 || GET_CODE (next) == CALL_INSN
3247 || GET_CODE (next) == CODE_LABEL)
3248 break;
3250 /* As does another fcmp insn. */
3251 if (GET_CODE (next) == INSN
3252 && GET_CODE (PATTERN (next)) == SET
3253 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3254 && REGNO (SET_DEST (PATTERN (next))) == 0)
3255 break;
3257 next = next_insn (next);
3260 /* Is NEXT_INSN a branch? */
3261 if (next
3262 && GET_CODE (next) == JUMP_INSN)
3264 rtx pattern = PATTERN (next);
3266 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3267 and CCFP dies, then reverse our conditional and the branch
3268 to avoid the add,tr. */
3269 if (GET_CODE (pattern) == SET
3270 && SET_DEST (pattern) == pc_rtx
3271 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3272 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3273 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3274 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3275 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3276 && (fcmp_count == fbranch_count
3277 || (check_notes
3278 && find_regno_note (next, REG_DEAD, 0))))
3280 /* Reverse the branch. */
3281 tmp = XEXP (SET_SRC (pattern), 1);
3282 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3283 XEXP (SET_SRC (pattern), 2) = tmp;
3284 INSN_CODE (next) = -1;
3286 /* Reverse our condition. */
3287 tmp = PATTERN (insn);
3288 PUT_CODE (XEXP (tmp, 1),
3289 (reverse_condition_maybe_unordered
3290 (GET_CODE (XEXP (tmp, 1)))));
3296 pass = !pass;
3300 /* You may have trouble believing this, but this is the 32 bit HP-PA
3301 stack layout. Wow.
3303 Offset Contents
3305 Variable arguments (optional; any number may be allocated)
3307 SP-(4*(N+9)) arg word N
3309 SP-56 arg word 5
3310 SP-52 arg word 4
3312 Fixed arguments (must be allocated; may remain unused)
3314 SP-48 arg word 3
3315 SP-44 arg word 2
3316 SP-40 arg word 1
3317 SP-36 arg word 0
3319 Frame Marker
3321 SP-32 External Data Pointer (DP)
3322 SP-28 External sr4
3323 SP-24 External/stub RP (RP')
3324 SP-20 Current RP
3325 SP-16 Static Link
3326 SP-12 Clean up
3327 SP-8 Calling Stub RP (RP'')
3328 SP-4 Previous SP
3330 Top of Frame
3332 SP-0 Stack Pointer (points to next available address)
3336 /* This function saves registers as follows. Registers marked with ' are
3337 this function's registers (as opposed to the previous function's).
3338 If a frame_pointer isn't needed, r4 is saved as a general register;
3339 the space for the frame pointer is still allocated, though, to keep
3340 things simple.
3343 Top of Frame
3345 SP (FP') Previous FP
3346 SP + 4 Alignment filler (sigh)
3347 SP + 8 Space for locals reserved here.
3351 SP + n All call saved register used.
3355 SP + o All call saved fp registers used.
3359 SP + p (SP') points to next available address.
3363 /* Global variables set by output_function_prologue(). */
3364 /* Size of frame. Need to know this to emit return insns from
3365 leaf procedures. */
3366 static HOST_WIDE_INT actual_fsize, local_fsize;
3367 static int save_fregs;
3369 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3370 Handle case where DISP > 8k by using the add_high_const patterns.
3372 Note in DISP > 8k case, we will leave the high part of the address
3373 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3375 static void
3376 store_reg (int reg, HOST_WIDE_INT disp, int base)
3378 rtx insn, dest, src, basereg;
3380 src = gen_rtx_REG (word_mode, reg);
3381 basereg = gen_rtx_REG (Pmode, base);
3382 if (VAL_14_BITS_P (disp))
3384 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3385 insn = emit_move_insn (dest, src);
3387 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3389 rtx delta = GEN_INT (disp);
3390 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3392 emit_move_insn (tmpreg, delta);
3393 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3394 if (DO_FRAME_NOTES)
3396 REG_NOTES (insn)
3397 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3398 gen_rtx_SET (VOIDmode, tmpreg,
3399 gen_rtx_PLUS (Pmode, basereg, delta)),
3400 REG_NOTES (insn));
3401 RTX_FRAME_RELATED_P (insn) = 1;
3403 dest = gen_rtx_MEM (word_mode, tmpreg);
3404 insn = emit_move_insn (dest, src);
3406 else
3408 rtx delta = GEN_INT (disp);
3409 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3410 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3412 emit_move_insn (tmpreg, high);
3413 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3414 insn = emit_move_insn (dest, src);
3415 if (DO_FRAME_NOTES)
3417 REG_NOTES (insn)
3418 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3419 gen_rtx_SET (VOIDmode,
3420 gen_rtx_MEM (word_mode,
3421 gen_rtx_PLUS (word_mode, basereg,
3422 delta)),
3423 src),
3424 REG_NOTES (insn));
3428 if (DO_FRAME_NOTES)
3429 RTX_FRAME_RELATED_P (insn) = 1;
3432 /* Emit RTL to store REG at the memory location specified by BASE and then
3433 add MOD to BASE. MOD must be <= 8k. */
3435 static void
3436 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3438 rtx insn, basereg, srcreg, delta;
3440 gcc_assert (VAL_14_BITS_P (mod));
3442 basereg = gen_rtx_REG (Pmode, base);
3443 srcreg = gen_rtx_REG (word_mode, reg);
3444 delta = GEN_INT (mod);
3446 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3447 if (DO_FRAME_NOTES)
3449 RTX_FRAME_RELATED_P (insn) = 1;
3451 /* RTX_FRAME_RELATED_P must be set on each frame related set
3452 in a parallel with more than one element. */
3453 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3454 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3458 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3459 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3460 whether to add a frame note or not.
3462 In the DISP > 8k case, we leave the high part of the address in %r1.
3463 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3465 static void
3466 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3468 rtx insn;
3470 if (VAL_14_BITS_P (disp))
3472 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3473 plus_constant (gen_rtx_REG (Pmode, base), disp));
3475 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3477 rtx basereg = gen_rtx_REG (Pmode, base);
3478 rtx delta = GEN_INT (disp);
3479 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3481 emit_move_insn (tmpreg, delta);
3482 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3483 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3484 if (DO_FRAME_NOTES)
3485 REG_NOTES (insn)
3486 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3487 gen_rtx_SET (VOIDmode, tmpreg,
3488 gen_rtx_PLUS (Pmode, basereg, delta)),
3489 REG_NOTES (insn));
3491 else
3493 rtx basereg = gen_rtx_REG (Pmode, base);
3494 rtx delta = GEN_INT (disp);
3495 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3497 emit_move_insn (tmpreg,
3498 gen_rtx_PLUS (Pmode, basereg,
3499 gen_rtx_HIGH (Pmode, delta)));
3500 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3501 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3504 if (DO_FRAME_NOTES && note)
3505 RTX_FRAME_RELATED_P (insn) = 1;
3508 HOST_WIDE_INT
3509 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3511 int freg_saved = 0;
3512 int i, j;
3514 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3515 be consistent with the rounding and size calculation done here.
3516 Change them at the same time. */
3518 /* We do our own stack alignment. First, round the size of the
3519 stack locals up to a word boundary. */
3520 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3522 /* Space for previous frame pointer + filler. If any frame is
3523 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3524 waste some space here for the sake of HP compatibility. The
3525 first slot is only used when the frame pointer is needed. */
3526 if (size || frame_pointer_needed)
3527 size += STARTING_FRAME_OFFSET;
3529 /* If the current function calls __builtin_eh_return, then we need
3530 to allocate stack space for registers that will hold data for
3531 the exception handler. */
3532 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3534 unsigned int i;
3536 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3537 continue;
3538 size += i * UNITS_PER_WORD;
3541 /* Account for space used by the callee general register saves. */
3542 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3543 if (df_regs_ever_live_p (i))
3544 size += UNITS_PER_WORD;
3546 /* Account for space used by the callee floating point register saves. */
3547 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3548 if (df_regs_ever_live_p (i)
3549 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3551 freg_saved = 1;
3553 /* We always save both halves of the FP register, so always
3554 increment the frame size by 8 bytes. */
3555 size += 8;
3558 /* If any of the floating registers are saved, account for the
3559 alignment needed for the floating point register save block. */
3560 if (freg_saved)
3562 size = (size + 7) & ~7;
3563 if (fregs_live)
3564 *fregs_live = 1;
3567 /* The various ABIs include space for the outgoing parameters in the
3568 size of the current function's stack frame. We don't need to align
3569 for the outgoing arguments as their alignment is set by the final
3570 rounding for the frame as a whole. */
3571 size += crtl->outgoing_args_size;
3573 /* Allocate space for the fixed frame marker. This space must be
3574 allocated for any function that makes calls or allocates
3575 stack space. */
3576 if (!current_function_is_leaf || size)
3577 size += TARGET_64BIT ? 48 : 32;
3579 /* Finally, round to the preferred stack boundary. */
3580 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3581 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3584 /* Generate the assembly code for function entry. FILE is a stdio
3585 stream to output the code to. SIZE is an int: how many units of
3586 temporary storage to allocate.
3588 Refer to the array `regs_ever_live' to determine which registers to
3589 save; `regs_ever_live[I]' is nonzero if register number I is ever
3590 used in the function. This function is responsible for knowing
3591 which registers should not be saved even if used. */
3593 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3594 of memory. If any fpu reg is used in the function, we allocate
3595 such a block here, at the bottom of the frame, just in case it's needed.
3597 If this function is a leaf procedure, then we may choose not
3598 to do a "save" insn. The decision about whether or not
3599 to do this is made in regclass.c. */
3601 static void
3602 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3604 /* The function's label and associated .PROC must never be
3605 separated and must be output *after* any profiling declarations
3606 to avoid changing spaces/subspaces within a procedure. */
3607 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3608 fputs ("\t.PROC\n", file);
3610 /* hppa_expand_prologue does the dirty work now. We just need
3611 to output the assembler directives which denote the start
3612 of a function. */
3613 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3614 if (current_function_is_leaf)
3615 fputs (",NO_CALLS", file);
3616 else
3617 fputs (",CALLS", file);
3618 if (rp_saved)
3619 fputs (",SAVE_RP", file);
3621 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3622 at the beginning of the frame and that it is used as the frame
3623 pointer for the frame. We do this because our current frame
3624 layout doesn't conform to that specified in the HP runtime
3625 documentation and we need a way to indicate to programs such as
3626 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3627 isn't used by HP compilers but is supported by the assembler.
3628 However, SAVE_SP is supposed to indicate that the previous stack
3629 pointer has been saved in the frame marker. */
3630 if (frame_pointer_needed)
3631 fputs (",SAVE_SP", file);
3633 /* Pass on information about the number of callee register saves
3634 performed in the prologue.
3636 The compiler is supposed to pass the highest register number
3637 saved, the assembler then has to adjust that number before
3638 entering it into the unwind descriptor (to account for any
3639 caller saved registers with lower register numbers than the
3640 first callee saved register). */
3641 if (gr_saved)
3642 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3644 if (fr_saved)
3645 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3647 fputs ("\n\t.ENTRY\n", file);
3649 remove_useless_addtr_insns (0);
3652 void
3653 hppa_expand_prologue (void)
3655 int merge_sp_adjust_with_store = 0;
3656 HOST_WIDE_INT size = get_frame_size ();
3657 HOST_WIDE_INT offset;
3658 int i;
3659 rtx insn, tmpreg;
3661 gr_saved = 0;
3662 fr_saved = 0;
3663 save_fregs = 0;
3665 /* Compute total size for frame pointer, filler, locals and rounding to
3666 the next word boundary. Similar code appears in compute_frame_size
3667 and must be changed in tandem with this code. */
3668 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3669 if (local_fsize || frame_pointer_needed)
3670 local_fsize += STARTING_FRAME_OFFSET;
3672 actual_fsize = compute_frame_size (size, &save_fregs);
3674 /* Compute a few things we will use often. */
3675 tmpreg = gen_rtx_REG (word_mode, 1);
3677 /* Save RP first. The calling conventions manual states RP will
3678 always be stored into the caller's frame at sp - 20 or sp - 16
3679 depending on which ABI is in use. */
3680 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3682 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3683 rp_saved = true;
3685 else
3686 rp_saved = false;
3688 /* Allocate the local frame and set up the frame pointer if needed. */
3689 if (actual_fsize != 0)
3691 if (frame_pointer_needed)
3693 /* Copy the old frame pointer temporarily into %r1. Set up the
3694 new stack pointer, then store away the saved old frame pointer
3695 into the stack at sp and at the same time update the stack
3696 pointer by actual_fsize bytes. Two versions, first
3697 handles small (<8k) frames. The second handles large (>=8k)
3698 frames. */
3699 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3700 if (DO_FRAME_NOTES)
3701 RTX_FRAME_RELATED_P (insn) = 1;
3703 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3704 if (DO_FRAME_NOTES)
3705 RTX_FRAME_RELATED_P (insn) = 1;
3707 if (VAL_14_BITS_P (actual_fsize))
3708 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3709 else
3711 /* It is incorrect to store the saved frame pointer at *sp,
3712 then increment sp (writes beyond the current stack boundary).
3714 So instead use stwm to store at *sp and post-increment the
3715 stack pointer as an atomic operation. Then increment sp to
3716 finish allocating the new frame. */
3717 HOST_WIDE_INT adjust1 = 8192 - 64;
3718 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3720 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3721 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3722 adjust2, 1);
3725 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3726 we need to store the previous stack pointer (frame pointer)
3727 into the frame marker on targets that use the HP unwind
3728 library. This allows the HP unwind library to be used to
3729 unwind GCC frames. However, we are not fully compatible
3730 with the HP library because our frame layout differs from
3731 that specified in the HP runtime specification.
3733 We don't want a frame note on this instruction as the frame
3734 marker moves during dynamic stack allocation.
3736 This instruction also serves as a blockage to prevent
3737 register spills from being scheduled before the stack
3738 pointer is raised. This is necessary as we store
3739 registers using the frame pointer as a base register,
3740 and the frame pointer is set before sp is raised. */
3741 if (TARGET_HPUX_UNWIND_LIBRARY)
3743 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3744 GEN_INT (TARGET_64BIT ? -8 : -4));
3746 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3747 frame_pointer_rtx);
3749 else
3750 emit_insn (gen_blockage ());
3752 /* no frame pointer needed. */
3753 else
3755 /* In some cases we can perform the first callee register save
3756 and allocating the stack frame at the same time. If so, just
3757 make a note of it and defer allocating the frame until saving
3758 the callee registers. */
3759 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3760 merge_sp_adjust_with_store = 1;
3761 /* Can not optimize. Adjust the stack frame by actual_fsize
3762 bytes. */
3763 else
3764 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3765 actual_fsize, 1);
3769 /* Normal register save.
3771 Do not save the frame pointer in the frame_pointer_needed case. It
3772 was done earlier. */
3773 if (frame_pointer_needed)
3775 offset = local_fsize;
3777 /* Saving the EH return data registers in the frame is the simplest
3778 way to get the frame unwind information emitted. We put them
3779 just before the general registers. */
3780 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3782 unsigned int i, regno;
3784 for (i = 0; ; ++i)
3786 regno = EH_RETURN_DATA_REGNO (i);
3787 if (regno == INVALID_REGNUM)
3788 break;
3790 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3791 offset += UNITS_PER_WORD;
3795 for (i = 18; i >= 4; i--)
3796 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3798 store_reg (i, offset, FRAME_POINTER_REGNUM);
3799 offset += UNITS_PER_WORD;
3800 gr_saved++;
3802 /* Account for %r3 which is saved in a special place. */
3803 gr_saved++;
3805 /* No frame pointer needed. */
3806 else
3808 offset = local_fsize - actual_fsize;
3810 /* Saving the EH return data registers in the frame is the simplest
3811 way to get the frame unwind information emitted. */
3812 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3814 unsigned int i, regno;
3816 for (i = 0; ; ++i)
3818 regno = EH_RETURN_DATA_REGNO (i);
3819 if (regno == INVALID_REGNUM)
3820 break;
3822 /* If merge_sp_adjust_with_store is nonzero, then we can
3823 optimize the first save. */
3824 if (merge_sp_adjust_with_store)
3826 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3827 merge_sp_adjust_with_store = 0;
3829 else
3830 store_reg (regno, offset, STACK_POINTER_REGNUM);
3831 offset += UNITS_PER_WORD;
3835 for (i = 18; i >= 3; i--)
3836 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3838 /* If merge_sp_adjust_with_store is nonzero, then we can
3839 optimize the first GR save. */
3840 if (merge_sp_adjust_with_store)
3842 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3843 merge_sp_adjust_with_store = 0;
3845 else
3846 store_reg (i, offset, STACK_POINTER_REGNUM);
3847 offset += UNITS_PER_WORD;
3848 gr_saved++;
3851 /* If we wanted to merge the SP adjustment with a GR save, but we never
3852 did any GR saves, then just emit the adjustment here. */
3853 if (merge_sp_adjust_with_store)
3854 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3855 actual_fsize, 1);
3858 /* The hppa calling conventions say that %r19, the pic offset
3859 register, is saved at sp - 32 (in this function's frame)
3860 when generating PIC code. FIXME: What is the correct thing
3861 to do for functions which make no calls and allocate no
3862 frame? Do we need to allocate a frame, or can we just omit
3863 the save? For now we'll just omit the save.
3865 We don't want a note on this insn as the frame marker can
3866 move if there is a dynamic stack allocation. */
3867 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3869 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3871 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3875 /* Align pointer properly (doubleword boundary). */
3876 offset = (offset + 7) & ~7;
3878 /* Floating point register store. */
3879 if (save_fregs)
3881 rtx base;
3883 /* First get the frame or stack pointer to the start of the FP register
3884 save area. */
3885 if (frame_pointer_needed)
3887 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3888 base = frame_pointer_rtx;
3890 else
3892 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3893 base = stack_pointer_rtx;
3896 /* Now actually save the FP registers. */
3897 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3899 if (df_regs_ever_live_p (i)
3900 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3902 rtx addr, insn, reg;
3903 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3904 reg = gen_rtx_REG (DFmode, i);
3905 insn = emit_move_insn (addr, reg);
3906 if (DO_FRAME_NOTES)
3908 RTX_FRAME_RELATED_P (insn) = 1;
3909 if (TARGET_64BIT)
3911 rtx mem = gen_rtx_MEM (DFmode,
3912 plus_constant (base, offset));
3913 REG_NOTES (insn)
3914 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3915 gen_rtx_SET (VOIDmode, mem, reg),
3916 REG_NOTES (insn));
3918 else
3920 rtx meml = gen_rtx_MEM (SFmode,
3921 plus_constant (base, offset));
3922 rtx memr = gen_rtx_MEM (SFmode,
3923 plus_constant (base, offset + 4));
3924 rtx regl = gen_rtx_REG (SFmode, i);
3925 rtx regr = gen_rtx_REG (SFmode, i + 1);
3926 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3927 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3928 rtvec vec;
3930 RTX_FRAME_RELATED_P (setl) = 1;
3931 RTX_FRAME_RELATED_P (setr) = 1;
3932 vec = gen_rtvec (2, setl, setr);
3933 REG_NOTES (insn)
3934 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3935 gen_rtx_SEQUENCE (VOIDmode, vec),
3936 REG_NOTES (insn));
3939 offset += GET_MODE_SIZE (DFmode);
3940 fr_saved++;
3946 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3947 Handle case where DISP > 8k by using the add_high_const patterns. */
3949 static void
3950 load_reg (int reg, HOST_WIDE_INT disp, int base)
3952 rtx dest = gen_rtx_REG (word_mode, reg);
3953 rtx basereg = gen_rtx_REG (Pmode, base);
3954 rtx src;
3956 if (VAL_14_BITS_P (disp))
3957 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3958 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3960 rtx delta = GEN_INT (disp);
3961 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3963 emit_move_insn (tmpreg, delta);
3964 if (TARGET_DISABLE_INDEXING)
3966 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3967 src = gen_rtx_MEM (word_mode, tmpreg);
3969 else
3970 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3972 else
3974 rtx delta = GEN_INT (disp);
3975 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3976 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3978 emit_move_insn (tmpreg, high);
3979 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3982 emit_move_insn (dest, src);
3985 /* Update the total code bytes output to the text section. */
3987 static void
3988 update_total_code_bytes (int nbytes)
3990 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3991 && !IN_NAMED_SECTION_P (cfun->decl))
3993 if (INSN_ADDRESSES_SET_P ())
3995 unsigned long old_total = total_code_bytes;
3997 total_code_bytes += nbytes;
3999 /* Be prepared to handle overflows. */
4000 if (old_total > total_code_bytes)
4001 total_code_bytes = -1;
4003 else
4004 total_code_bytes = -1;
4008 /* This function generates the assembly code for function exit.
4009 Args are as for output_function_prologue ().
4011 The function epilogue should not depend on the current stack
4012 pointer! It should use the frame pointer only. This is mandatory
4013 because of alloca; we also take advantage of it to omit stack
4014 adjustments before returning. */
4016 static void
4017 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4019 rtx insn = get_last_insn ();
4021 last_address = 0;
4023 /* hppa_expand_epilogue does the dirty work now. We just need
4024 to output the assembler directives which denote the end
4025 of a function.
4027 To make debuggers happy, emit a nop if the epilogue was completely
4028 eliminated due to a volatile call as the last insn in the
4029 current function. That way the return address (in %r2) will
4030 always point to a valid instruction in the current function. */
4032 /* Get the last real insn. */
4033 if (GET_CODE (insn) == NOTE)
4034 insn = prev_real_insn (insn);
4036 /* If it is a sequence, then look inside. */
4037 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4038 insn = XVECEXP (PATTERN (insn), 0, 0);
4040 /* If insn is a CALL_INSN, then it must be a call to a volatile
4041 function (otherwise there would be epilogue insns). */
4042 if (insn && GET_CODE (insn) == CALL_INSN)
4044 fputs ("\tnop\n", file);
4045 last_address += 4;
4048 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4050 if (TARGET_SOM && TARGET_GAS)
4052 /* We done with this subspace except possibly for some additional
4053 debug information. Forget that we are in this subspace to ensure
4054 that the next function is output in its own subspace. */
4055 in_section = NULL;
4056 cfun->machine->in_nsubspa = 2;
4059 if (INSN_ADDRESSES_SET_P ())
4061 insn = get_last_nonnote_insn ();
4062 last_address += INSN_ADDRESSES (INSN_UID (insn));
4063 if (INSN_P (insn))
4064 last_address += insn_default_length (insn);
4065 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4066 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4069 /* Finally, update the total number of code bytes output so far. */
4070 update_total_code_bytes (last_address);
4073 void
4074 hppa_expand_epilogue (void)
4076 rtx tmpreg;
4077 HOST_WIDE_INT offset;
4078 HOST_WIDE_INT ret_off = 0;
4079 int i;
4080 int merge_sp_adjust_with_load = 0;
4082 /* We will use this often. */
4083 tmpreg = gen_rtx_REG (word_mode, 1);
4085 /* Try to restore RP early to avoid load/use interlocks when
4086 RP gets used in the return (bv) instruction. This appears to still
4087 be necessary even when we schedule the prologue and epilogue. */
4088 if (rp_saved)
4090 ret_off = TARGET_64BIT ? -16 : -20;
4091 if (frame_pointer_needed)
4093 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4094 ret_off = 0;
4096 else
4098 /* No frame pointer, and stack is smaller than 8k. */
4099 if (VAL_14_BITS_P (ret_off - actual_fsize))
4101 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4102 ret_off = 0;
4107 /* General register restores. */
4108 if (frame_pointer_needed)
4110 offset = local_fsize;
4112 /* If the current function calls __builtin_eh_return, then we need
4113 to restore the saved EH data registers. */
4114 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4116 unsigned int i, regno;
4118 for (i = 0; ; ++i)
4120 regno = EH_RETURN_DATA_REGNO (i);
4121 if (regno == INVALID_REGNUM)
4122 break;
4124 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4125 offset += UNITS_PER_WORD;
4129 for (i = 18; i >= 4; i--)
4130 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4132 load_reg (i, offset, FRAME_POINTER_REGNUM);
4133 offset += UNITS_PER_WORD;
4136 else
4138 offset = local_fsize - actual_fsize;
4140 /* If the current function calls __builtin_eh_return, then we need
4141 to restore the saved EH data registers. */
4142 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4144 unsigned int i, regno;
4146 for (i = 0; ; ++i)
4148 regno = EH_RETURN_DATA_REGNO (i);
4149 if (regno == INVALID_REGNUM)
4150 break;
4152 /* Only for the first load.
4153 merge_sp_adjust_with_load holds the register load
4154 with which we will merge the sp adjustment. */
4155 if (merge_sp_adjust_with_load == 0
4156 && local_fsize == 0
4157 && VAL_14_BITS_P (-actual_fsize))
4158 merge_sp_adjust_with_load = regno;
4159 else
4160 load_reg (regno, offset, STACK_POINTER_REGNUM);
4161 offset += UNITS_PER_WORD;
4165 for (i = 18; i >= 3; i--)
4167 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4169 /* Only for the first load.
4170 merge_sp_adjust_with_load holds the register load
4171 with which we will merge the sp adjustment. */
4172 if (merge_sp_adjust_with_load == 0
4173 && local_fsize == 0
4174 && VAL_14_BITS_P (-actual_fsize))
4175 merge_sp_adjust_with_load = i;
4176 else
4177 load_reg (i, offset, STACK_POINTER_REGNUM);
4178 offset += UNITS_PER_WORD;
4183 /* Align pointer properly (doubleword boundary). */
4184 offset = (offset + 7) & ~7;
4186 /* FP register restores. */
4187 if (save_fregs)
4189 /* Adjust the register to index off of. */
4190 if (frame_pointer_needed)
4191 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4192 else
4193 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4195 /* Actually do the restores now. */
4196 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4197 if (df_regs_ever_live_p (i)
4198 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4200 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4201 rtx dest = gen_rtx_REG (DFmode, i);
4202 emit_move_insn (dest, src);
4206 /* Emit a blockage insn here to keep these insns from being moved to
4207 an earlier spot in the epilogue, or into the main instruction stream.
4209 This is necessary as we must not cut the stack back before all the
4210 restores are finished. */
4211 emit_insn (gen_blockage ());
4213 /* Reset stack pointer (and possibly frame pointer). The stack
4214 pointer is initially set to fp + 64 to avoid a race condition. */
4215 if (frame_pointer_needed)
4217 rtx delta = GEN_INT (-64);
4219 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4220 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4222 /* If we were deferring a callee register restore, do it now. */
4223 else if (merge_sp_adjust_with_load)
4225 rtx delta = GEN_INT (-actual_fsize);
4226 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4228 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4230 else if (actual_fsize != 0)
4231 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4232 - actual_fsize, 0);
4234 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4235 frame greater than 8k), do so now. */
4236 if (ret_off != 0)
4237 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4239 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4241 rtx sa = EH_RETURN_STACKADJ_RTX;
4243 emit_insn (gen_blockage ());
4244 emit_insn (TARGET_64BIT
4245 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4246 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4251 hppa_pic_save_rtx (void)
4253 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4256 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4257 #define NO_DEFERRED_PROFILE_COUNTERS 0
4258 #endif
4261 /* Vector of funcdef numbers. */
4262 static VEC(int,heap) *funcdef_nos;
4264 /* Output deferred profile counters. */
4265 static void
4266 output_deferred_profile_counters (void)
4268 unsigned int i;
4269 int align, n;
4271 if (VEC_empty (int, funcdef_nos))
4272 return;
4274 switch_to_section (data_section);
4275 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4276 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4278 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4280 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4281 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4284 VEC_free (int, heap, funcdef_nos);
4287 void
4288 hppa_profile_hook (int label_no)
4290 /* We use SImode for the address of the function in both 32 and
4291 64-bit code to avoid having to provide DImode versions of the
4292 lcla2 and load_offset_label_address insn patterns. */
4293 rtx reg = gen_reg_rtx (SImode);
4294 rtx label_rtx = gen_label_rtx ();
4295 rtx begin_label_rtx, call_insn;
4296 char begin_label_name[16];
4298 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4299 label_no);
4300 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4302 if (TARGET_64BIT)
4303 emit_move_insn (arg_pointer_rtx,
4304 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4305 GEN_INT (64)));
4307 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4309 /* The address of the function is loaded into %r25 with an instruction-
4310 relative sequence that avoids the use of relocations. The sequence
4311 is split so that the load_offset_label_address instruction can
4312 occupy the delay slot of the call to _mcount. */
4313 if (TARGET_PA_20)
4314 emit_insn (gen_lcla2 (reg, label_rtx));
4315 else
4316 emit_insn (gen_lcla1 (reg, label_rtx));
4318 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4319 reg, begin_label_rtx, label_rtx));
4321 #if !NO_DEFERRED_PROFILE_COUNTERS
4323 rtx count_label_rtx, addr, r24;
4324 char count_label_name[16];
4326 VEC_safe_push (int, heap, funcdef_nos, label_no);
4327 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4328 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4330 addr = force_reg (Pmode, count_label_rtx);
4331 r24 = gen_rtx_REG (Pmode, 24);
4332 emit_move_insn (r24, addr);
4334 call_insn =
4335 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4336 gen_rtx_SYMBOL_REF (Pmode,
4337 "_mcount")),
4338 GEN_INT (TARGET_64BIT ? 24 : 12)));
4340 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4342 #else
4344 call_insn =
4345 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4346 gen_rtx_SYMBOL_REF (Pmode,
4347 "_mcount")),
4348 GEN_INT (TARGET_64BIT ? 16 : 8)));
4350 #endif
4352 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4353 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4355 /* Indicate the _mcount call cannot throw, nor will it execute a
4356 non-local goto. */
4357 REG_NOTES (call_insn)
4358 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4361 /* Fetch the return address for the frame COUNT steps up from
4362 the current frame, after the prologue. FRAMEADDR is the
4363 frame pointer of the COUNT frame.
4365 We want to ignore any export stub remnants here. To handle this,
4366 we examine the code at the return address, and if it is an export
4367 stub, we return a memory rtx for the stub return address stored
4368 at frame-24.
4370 The value returned is used in two different ways:
4372 1. To find a function's caller.
4374 2. To change the return address for a function.
4376 This function handles most instances of case 1; however, it will
4377 fail if there are two levels of stubs to execute on the return
4378 path. The only way I believe that can happen is if the return value
4379 needs a parameter relocation, which never happens for C code.
4381 This function handles most instances of case 2; however, it will
4382 fail if we did not originally have stub code on the return path
4383 but will need stub code on the new return path. This can happen if
4384 the caller & callee are both in the main program, but the new
4385 return location is in a shared library. */
4388 return_addr_rtx (int count, rtx frameaddr)
4390 rtx label;
4391 rtx rp;
4392 rtx saved_rp;
4393 rtx ins;
4395 if (count != 0)
4396 return NULL_RTX;
4398 rp = get_hard_reg_initial_val (Pmode, 2);
4400 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4401 return rp;
4403 saved_rp = gen_reg_rtx (Pmode);
4404 emit_move_insn (saved_rp, rp);
4406 /* Get pointer to the instruction stream. We have to mask out the
4407 privilege level from the two low order bits of the return address
4408 pointer here so that ins will point to the start of the first
4409 instruction that would have been executed if we returned. */
4410 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4411 label = gen_label_rtx ();
4413 /* Check the instruction stream at the normal return address for the
4414 export stub:
4416 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4417 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4418 0x00011820 | stub+16: mtsp r1,sr0
4419 0xe0400002 | stub+20: be,n 0(sr0,rp)
4421 If it is an export stub, than our return address is really in
4422 -24[frameaddr]. */
4424 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4425 NULL_RTX, SImode, 1);
4426 emit_jump_insn (gen_bne (label));
4428 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4429 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4430 emit_jump_insn (gen_bne (label));
4432 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4433 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4434 emit_jump_insn (gen_bne (label));
4436 /* 0xe0400002 must be specified as -532676606 so that it won't be
4437 rejected as an invalid immediate operand on 64-bit hosts. */
4438 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4439 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4441 /* If there is no export stub then just use the value saved from
4442 the return pointer register. */
4444 emit_jump_insn (gen_bne (label));
4446 /* Here we know that our return address points to an export
4447 stub. We don't want to return the address of the export stub,
4448 but rather the return address of the export stub. That return
4449 address is stored at -24[frameaddr]. */
4451 emit_move_insn (saved_rp,
4452 gen_rtx_MEM (Pmode,
4453 memory_address (Pmode,
4454 plus_constant (frameaddr,
4455 -24))));
4457 emit_label (label);
4458 return saved_rp;
4461 void
4462 emit_bcond_fp (enum rtx_code code, rtx operand0)
4464 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4465 gen_rtx_IF_THEN_ELSE (VOIDmode,
4466 gen_rtx_fmt_ee (code,
4467 VOIDmode,
4468 gen_rtx_REG (CCFPmode, 0),
4469 const0_rtx),
4470 gen_rtx_LABEL_REF (VOIDmode, operand0),
4471 pc_rtx)));
4476 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4478 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4479 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4482 /* Adjust the cost of a scheduling dependency. Return the new cost of
4483 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4485 static int
4486 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4488 enum attr_type attr_type;
4490 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4491 true dependencies as they are described with bypasses now. */
4492 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4493 return cost;
4495 if (! recog_memoized (insn))
4496 return 0;
4498 attr_type = get_attr_type (insn);
4500 switch (REG_NOTE_KIND (link))
4502 case REG_DEP_ANTI:
4503 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4504 cycles later. */
4506 if (attr_type == TYPE_FPLOAD)
4508 rtx pat = PATTERN (insn);
4509 rtx dep_pat = PATTERN (dep_insn);
4510 if (GET_CODE (pat) == PARALLEL)
4512 /* This happens for the fldXs,mb patterns. */
4513 pat = XVECEXP (pat, 0, 0);
4515 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4516 /* If this happens, we have to extend this to schedule
4517 optimally. Return 0 for now. */
4518 return 0;
4520 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4522 if (! recog_memoized (dep_insn))
4523 return 0;
4524 switch (get_attr_type (dep_insn))
4526 case TYPE_FPALU:
4527 case TYPE_FPMULSGL:
4528 case TYPE_FPMULDBL:
4529 case TYPE_FPDIVSGL:
4530 case TYPE_FPDIVDBL:
4531 case TYPE_FPSQRTSGL:
4532 case TYPE_FPSQRTDBL:
4533 /* A fpload can't be issued until one cycle before a
4534 preceding arithmetic operation has finished if
4535 the target of the fpload is any of the sources
4536 (or destination) of the arithmetic operation. */
4537 return insn_default_latency (dep_insn) - 1;
4539 default:
4540 return 0;
4544 else if (attr_type == TYPE_FPALU)
4546 rtx pat = PATTERN (insn);
4547 rtx dep_pat = PATTERN (dep_insn);
4548 if (GET_CODE (pat) == PARALLEL)
4550 /* This happens for the fldXs,mb patterns. */
4551 pat = XVECEXP (pat, 0, 0);
4553 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4554 /* If this happens, we have to extend this to schedule
4555 optimally. Return 0 for now. */
4556 return 0;
4558 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4560 if (! recog_memoized (dep_insn))
4561 return 0;
4562 switch (get_attr_type (dep_insn))
4564 case TYPE_FPDIVSGL:
4565 case TYPE_FPDIVDBL:
4566 case TYPE_FPSQRTSGL:
4567 case TYPE_FPSQRTDBL:
4568 /* An ALU flop can't be issued until two cycles before a
4569 preceding divide or sqrt operation has finished if
4570 the target of the ALU flop is any of the sources
4571 (or destination) of the divide or sqrt operation. */
4572 return insn_default_latency (dep_insn) - 2;
4574 default:
4575 return 0;
4580 /* For other anti dependencies, the cost is 0. */
4581 return 0;
4583 case REG_DEP_OUTPUT:
4584 /* Output dependency; DEP_INSN writes a register that INSN writes some
4585 cycles later. */
4586 if (attr_type == TYPE_FPLOAD)
4588 rtx pat = PATTERN (insn);
4589 rtx dep_pat = PATTERN (dep_insn);
4590 if (GET_CODE (pat) == PARALLEL)
4592 /* This happens for the fldXs,mb patterns. */
4593 pat = XVECEXP (pat, 0, 0);
4595 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4596 /* If this happens, we have to extend this to schedule
4597 optimally. Return 0 for now. */
4598 return 0;
4600 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4602 if (! recog_memoized (dep_insn))
4603 return 0;
4604 switch (get_attr_type (dep_insn))
4606 case TYPE_FPALU:
4607 case TYPE_FPMULSGL:
4608 case TYPE_FPMULDBL:
4609 case TYPE_FPDIVSGL:
4610 case TYPE_FPDIVDBL:
4611 case TYPE_FPSQRTSGL:
4612 case TYPE_FPSQRTDBL:
4613 /* A fpload can't be issued until one cycle before a
4614 preceding arithmetic operation has finished if
4615 the target of the fpload is the destination of the
4616 arithmetic operation.
4618 Exception: For PA7100LC, PA7200 and PA7300, the cost
4619 is 3 cycles, unless they bundle together. We also
4620 pay the penalty if the second insn is a fpload. */
4621 return insn_default_latency (dep_insn) - 1;
4623 default:
4624 return 0;
4628 else if (attr_type == TYPE_FPALU)
4630 rtx pat = PATTERN (insn);
4631 rtx dep_pat = PATTERN (dep_insn);
4632 if (GET_CODE (pat) == PARALLEL)
4634 /* This happens for the fldXs,mb patterns. */
4635 pat = XVECEXP (pat, 0, 0);
4637 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4638 /* If this happens, we have to extend this to schedule
4639 optimally. Return 0 for now. */
4640 return 0;
4642 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4644 if (! recog_memoized (dep_insn))
4645 return 0;
4646 switch (get_attr_type (dep_insn))
4648 case TYPE_FPDIVSGL:
4649 case TYPE_FPDIVDBL:
4650 case TYPE_FPSQRTSGL:
4651 case TYPE_FPSQRTDBL:
4652 /* An ALU flop can't be issued until two cycles before a
4653 preceding divide or sqrt operation has finished if
4654 the target of the ALU flop is also the target of
4655 the divide or sqrt operation. */
4656 return insn_default_latency (dep_insn) - 2;
4658 default:
4659 return 0;
4664 /* For other output dependencies, the cost is 0. */
4665 return 0;
4667 default:
4668 gcc_unreachable ();
4672 /* Adjust scheduling priorities. We use this to try and keep addil
4673 and the next use of %r1 close together. */
4674 static int
4675 pa_adjust_priority (rtx insn, int priority)
4677 rtx set = single_set (insn);
4678 rtx src, dest;
4679 if (set)
4681 src = SET_SRC (set);
4682 dest = SET_DEST (set);
4683 if (GET_CODE (src) == LO_SUM
4684 && symbolic_operand (XEXP (src, 1), VOIDmode)
4685 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4686 priority >>= 3;
4688 else if (GET_CODE (src) == MEM
4689 && GET_CODE (XEXP (src, 0)) == LO_SUM
4690 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4691 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4692 priority >>= 1;
4694 else if (GET_CODE (dest) == MEM
4695 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4696 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4697 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4698 priority >>= 3;
4700 return priority;
4703 /* The 700 can only issue a single insn at a time.
4704 The 7XXX processors can issue two insns at a time.
4705 The 8000 can issue 4 insns at a time. */
4706 static int
4707 pa_issue_rate (void)
4709 switch (pa_cpu)
4711 case PROCESSOR_700: return 1;
4712 case PROCESSOR_7100: return 2;
4713 case PROCESSOR_7100LC: return 2;
4714 case PROCESSOR_7200: return 2;
4715 case PROCESSOR_7300: return 2;
4716 case PROCESSOR_8000: return 4;
4718 default:
4719 gcc_unreachable ();
4725 /* Return any length adjustment needed by INSN which already has its length
4726 computed as LENGTH. Return zero if no adjustment is necessary.
4728 For the PA: function calls, millicode calls, and backwards short
4729 conditional branches with unfilled delay slots need an adjustment by +1
4730 (to account for the NOP which will be inserted into the instruction stream).
4732 Also compute the length of an inline block move here as it is too
4733 complicated to express as a length attribute in pa.md. */
4735 pa_adjust_insn_length (rtx insn, int length)
4737 rtx pat = PATTERN (insn);
4739 /* Jumps inside switch tables which have unfilled delay slots need
4740 adjustment. */
4741 if (GET_CODE (insn) == JUMP_INSN
4742 && GET_CODE (pat) == PARALLEL
4743 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4744 return 4;
4745 /* Millicode insn with an unfilled delay slot. */
4746 else if (GET_CODE (insn) == INSN
4747 && GET_CODE (pat) != SEQUENCE
4748 && GET_CODE (pat) != USE
4749 && GET_CODE (pat) != CLOBBER
4750 && get_attr_type (insn) == TYPE_MILLI)
4751 return 4;
4752 /* Block move pattern. */
4753 else if (GET_CODE (insn) == INSN
4754 && GET_CODE (pat) == PARALLEL
4755 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4756 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4757 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4758 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4759 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4760 return compute_movmem_length (insn) - 4;
4761 /* Block clear pattern. */
4762 else if (GET_CODE (insn) == INSN
4763 && GET_CODE (pat) == PARALLEL
4764 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4765 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4766 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4767 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4768 return compute_clrmem_length (insn) - 4;
4769 /* Conditional branch with an unfilled delay slot. */
4770 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4772 /* Adjust a short backwards conditional with an unfilled delay slot. */
4773 if (GET_CODE (pat) == SET
4774 && length == 4
4775 && ! forward_branch_p (insn))
4776 return 4;
4777 else if (GET_CODE (pat) == PARALLEL
4778 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4779 && length == 4)
4780 return 4;
4781 /* Adjust dbra insn with short backwards conditional branch with
4782 unfilled delay slot -- only for case where counter is in a
4783 general register register. */
4784 else if (GET_CODE (pat) == PARALLEL
4785 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4786 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4787 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4788 && length == 4
4789 && ! forward_branch_p (insn))
4790 return 4;
4791 else
4792 return 0;
4794 return 0;
4797 /* Print operand X (an rtx) in assembler syntax to file FILE.
4798 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4799 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4801 void
4802 print_operand (FILE *file, rtx x, int code)
4804 switch (code)
4806 case '#':
4807 /* Output a 'nop' if there's nothing for the delay slot. */
4808 if (dbr_sequence_length () == 0)
4809 fputs ("\n\tnop", file);
4810 return;
4811 case '*':
4812 /* Output a nullification completer if there's nothing for the */
4813 /* delay slot or nullification is requested. */
4814 if (dbr_sequence_length () == 0 ||
4815 (final_sequence &&
4816 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4817 fputs (",n", file);
4818 return;
4819 case 'R':
4820 /* Print out the second register name of a register pair.
4821 I.e., R (6) => 7. */
4822 fputs (reg_names[REGNO (x) + 1], file);
4823 return;
4824 case 'r':
4825 /* A register or zero. */
4826 if (x == const0_rtx
4827 || (x == CONST0_RTX (DFmode))
4828 || (x == CONST0_RTX (SFmode)))
4830 fputs ("%r0", file);
4831 return;
4833 else
4834 break;
4835 case 'f':
4836 /* A register or zero (floating point). */
4837 if (x == const0_rtx
4838 || (x == CONST0_RTX (DFmode))
4839 || (x == CONST0_RTX (SFmode)))
4841 fputs ("%fr0", file);
4842 return;
4844 else
4845 break;
4846 case 'A':
4848 rtx xoperands[2];
4850 xoperands[0] = XEXP (XEXP (x, 0), 0);
4851 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4852 output_global_address (file, xoperands[1], 0);
4853 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4854 return;
4857 case 'C': /* Plain (C)ondition */
4858 case 'X':
4859 switch (GET_CODE (x))
4861 case EQ:
4862 fputs ("=", file); break;
4863 case NE:
4864 fputs ("<>", file); break;
4865 case GT:
4866 fputs (">", file); break;
4867 case GE:
4868 fputs (">=", file); break;
4869 case GEU:
4870 fputs (">>=", file); break;
4871 case GTU:
4872 fputs (">>", file); break;
4873 case LT:
4874 fputs ("<", file); break;
4875 case LE:
4876 fputs ("<=", file); break;
4877 case LEU:
4878 fputs ("<<=", file); break;
4879 case LTU:
4880 fputs ("<<", file); break;
4881 default:
4882 gcc_unreachable ();
4884 return;
4885 case 'N': /* Condition, (N)egated */
4886 switch (GET_CODE (x))
4888 case EQ:
4889 fputs ("<>", file); break;
4890 case NE:
4891 fputs ("=", file); break;
4892 case GT:
4893 fputs ("<=", file); break;
4894 case GE:
4895 fputs ("<", file); break;
4896 case GEU:
4897 fputs ("<<", file); break;
4898 case GTU:
4899 fputs ("<<=", file); break;
4900 case LT:
4901 fputs (">=", file); break;
4902 case LE:
4903 fputs (">", file); break;
4904 case LEU:
4905 fputs (">>", file); break;
4906 case LTU:
4907 fputs (">>=", file); break;
4908 default:
4909 gcc_unreachable ();
4911 return;
4912 /* For floating point comparisons. Note that the output
4913 predicates are the complement of the desired mode. The
4914 conditions for GT, GE, LT, LE and LTGT cause an invalid
4915 operation exception if the result is unordered and this
4916 exception is enabled in the floating-point status register. */
4917 case 'Y':
4918 switch (GET_CODE (x))
4920 case EQ:
4921 fputs ("!=", file); break;
4922 case NE:
4923 fputs ("=", file); break;
4924 case GT:
4925 fputs ("!>", file); break;
4926 case GE:
4927 fputs ("!>=", file); break;
4928 case LT:
4929 fputs ("!<", file); break;
4930 case LE:
4931 fputs ("!<=", file); break;
4932 case LTGT:
4933 fputs ("!<>", file); break;
4934 case UNLE:
4935 fputs ("!?<=", file); break;
4936 case UNLT:
4937 fputs ("!?<", file); break;
4938 case UNGE:
4939 fputs ("!?>=", file); break;
4940 case UNGT:
4941 fputs ("!?>", file); break;
4942 case UNEQ:
4943 fputs ("!?=", file); break;
4944 case UNORDERED:
4945 fputs ("!?", file); break;
4946 case ORDERED:
4947 fputs ("?", file); break;
4948 default:
4949 gcc_unreachable ();
4951 return;
4952 case 'S': /* Condition, operands are (S)wapped. */
4953 switch (GET_CODE (x))
4955 case EQ:
4956 fputs ("=", file); break;
4957 case NE:
4958 fputs ("<>", file); break;
4959 case GT:
4960 fputs ("<", file); break;
4961 case GE:
4962 fputs ("<=", file); break;
4963 case GEU:
4964 fputs ("<<=", file); break;
4965 case GTU:
4966 fputs ("<<", file); break;
4967 case LT:
4968 fputs (">", file); break;
4969 case LE:
4970 fputs (">=", file); break;
4971 case LEU:
4972 fputs (">>=", file); break;
4973 case LTU:
4974 fputs (">>", file); break;
4975 default:
4976 gcc_unreachable ();
4978 return;
4979 case 'B': /* Condition, (B)oth swapped and negate. */
4980 switch (GET_CODE (x))
4982 case EQ:
4983 fputs ("<>", file); break;
4984 case NE:
4985 fputs ("=", file); break;
4986 case GT:
4987 fputs (">=", file); break;
4988 case GE:
4989 fputs (">", file); break;
4990 case GEU:
4991 fputs (">>", file); break;
4992 case GTU:
4993 fputs (">>=", file); break;
4994 case LT:
4995 fputs ("<=", file); break;
4996 case LE:
4997 fputs ("<", file); break;
4998 case LEU:
4999 fputs ("<<", file); break;
5000 case LTU:
5001 fputs ("<<=", file); break;
5002 default:
5003 gcc_unreachable ();
5005 return;
5006 case 'k':
5007 gcc_assert (GET_CODE (x) == CONST_INT);
5008 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5009 return;
5010 case 'Q':
5011 gcc_assert (GET_CODE (x) == CONST_INT);
5012 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5013 return;
5014 case 'L':
5015 gcc_assert (GET_CODE (x) == CONST_INT);
5016 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5017 return;
5018 case 'O':
5019 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5020 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5021 return;
5022 case 'p':
5023 gcc_assert (GET_CODE (x) == CONST_INT);
5024 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5025 return;
5026 case 'P':
5027 gcc_assert (GET_CODE (x) == CONST_INT);
5028 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5029 return;
5030 case 'I':
5031 if (GET_CODE (x) == CONST_INT)
5032 fputs ("i", file);
5033 return;
5034 case 'M':
5035 case 'F':
5036 switch (GET_CODE (XEXP (x, 0)))
5038 case PRE_DEC:
5039 case PRE_INC:
5040 if (ASSEMBLER_DIALECT == 0)
5041 fputs ("s,mb", file);
5042 else
5043 fputs (",mb", file);
5044 break;
5045 case POST_DEC:
5046 case POST_INC:
5047 if (ASSEMBLER_DIALECT == 0)
5048 fputs ("s,ma", file);
5049 else
5050 fputs (",ma", file);
5051 break;
5052 case PLUS:
5053 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5054 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5056 if (ASSEMBLER_DIALECT == 0)
5057 fputs ("x", file);
5059 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5060 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5062 if (ASSEMBLER_DIALECT == 0)
5063 fputs ("x,s", file);
5064 else
5065 fputs (",s", file);
5067 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5068 fputs ("s", file);
5069 break;
5070 default:
5071 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5072 fputs ("s", file);
5073 break;
5075 return;
5076 case 'G':
5077 output_global_address (file, x, 0);
5078 return;
5079 case 'H':
5080 output_global_address (file, x, 1);
5081 return;
5082 case 0: /* Don't do anything special */
5083 break;
5084 case 'Z':
5086 unsigned op[3];
5087 compute_zdepwi_operands (INTVAL (x), op);
5088 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5089 return;
5091 case 'z':
5093 unsigned op[3];
5094 compute_zdepdi_operands (INTVAL (x), op);
5095 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5096 return;
5098 case 'c':
5099 /* We can get here from a .vtable_inherit due to our
5100 CONSTANT_ADDRESS_P rejecting perfectly good constant
5101 addresses. */
5102 break;
5103 default:
5104 gcc_unreachable ();
5106 if (GET_CODE (x) == REG)
5108 fputs (reg_names [REGNO (x)], file);
5109 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5111 fputs ("R", file);
5112 return;
5114 if (FP_REG_P (x)
5115 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5116 && (REGNO (x) & 1) == 0)
5117 fputs ("L", file);
5119 else if (GET_CODE (x) == MEM)
5121 int size = GET_MODE_SIZE (GET_MODE (x));
5122 rtx base = NULL_RTX;
5123 switch (GET_CODE (XEXP (x, 0)))
5125 case PRE_DEC:
5126 case POST_DEC:
5127 base = XEXP (XEXP (x, 0), 0);
5128 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5129 break;
5130 case PRE_INC:
5131 case POST_INC:
5132 base = XEXP (XEXP (x, 0), 0);
5133 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5134 break;
5135 case PLUS:
5136 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5137 fprintf (file, "%s(%s)",
5138 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5139 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5140 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5141 fprintf (file, "%s(%s)",
5142 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5143 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5144 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5145 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5147 /* Because the REG_POINTER flag can get lost during reload,
5148 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5149 index and base registers in the combined move patterns. */
5150 rtx base = XEXP (XEXP (x, 0), 1);
5151 rtx index = XEXP (XEXP (x, 0), 0);
5153 fprintf (file, "%s(%s)",
5154 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5156 else
5157 output_address (XEXP (x, 0));
5158 break;
5159 default:
5160 output_address (XEXP (x, 0));
5161 break;
5164 else
5165 output_addr_const (file, x);
5168 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5170 void
5171 output_global_address (FILE *file, rtx x, int round_constant)
5174 /* Imagine (high (const (plus ...))). */
5175 if (GET_CODE (x) == HIGH)
5176 x = XEXP (x, 0);
5178 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5179 output_addr_const (file, x);
5180 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5182 output_addr_const (file, x);
5183 fputs ("-$global$", file);
5185 else if (GET_CODE (x) == CONST)
5187 const char *sep = "";
5188 int offset = 0; /* assembler wants -$global$ at end */
5189 rtx base = NULL_RTX;
5191 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5193 case SYMBOL_REF:
5194 base = XEXP (XEXP (x, 0), 0);
5195 output_addr_const (file, base);
5196 break;
5197 case CONST_INT:
5198 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5199 break;
5200 default:
5201 gcc_unreachable ();
5204 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5206 case SYMBOL_REF:
5207 base = XEXP (XEXP (x, 0), 1);
5208 output_addr_const (file, base);
5209 break;
5210 case CONST_INT:
5211 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5212 break;
5213 default:
5214 gcc_unreachable ();
5217 /* How bogus. The compiler is apparently responsible for
5218 rounding the constant if it uses an LR field selector.
5220 The linker and/or assembler seem a better place since
5221 they have to do this kind of thing already.
5223 If we fail to do this, HP's optimizing linker may eliminate
5224 an addil, but not update the ldw/stw/ldo instruction that
5225 uses the result of the addil. */
5226 if (round_constant)
5227 offset = ((offset + 0x1000) & ~0x1fff);
5229 switch (GET_CODE (XEXP (x, 0)))
5231 case PLUS:
5232 if (offset < 0)
5234 offset = -offset;
5235 sep = "-";
5237 else
5238 sep = "+";
5239 break;
5241 case MINUS:
5242 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5243 sep = "-";
5244 break;
5246 default:
5247 gcc_unreachable ();
5250 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5251 fputs ("-$global$", file);
5252 if (offset)
5253 fprintf (file, "%s%d", sep, offset);
5255 else
5256 output_addr_const (file, x);
5259 /* Output boilerplate text to appear at the beginning of the file.
5260 There are several possible versions. */
5261 #define aputs(x) fputs(x, asm_out_file)
5262 static inline void
5263 pa_file_start_level (void)
5265 if (TARGET_64BIT)
5266 aputs ("\t.LEVEL 2.0w\n");
5267 else if (TARGET_PA_20)
5268 aputs ("\t.LEVEL 2.0\n");
5269 else if (TARGET_PA_11)
5270 aputs ("\t.LEVEL 1.1\n");
5271 else
5272 aputs ("\t.LEVEL 1.0\n");
5275 static inline void
5276 pa_file_start_space (int sortspace)
5278 aputs ("\t.SPACE $PRIVATE$");
5279 if (sortspace)
5280 aputs (",SORT=16");
5281 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5282 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5283 "\n\t.SPACE $TEXT$");
5284 if (sortspace)
5285 aputs (",SORT=8");
5286 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5287 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5290 static inline void
5291 pa_file_start_file (int want_version)
5293 if (write_symbols != NO_DEBUG)
5295 output_file_directive (asm_out_file, main_input_filename);
5296 if (want_version)
5297 aputs ("\t.version\t\"01.01\"\n");
5301 static inline void
5302 pa_file_start_mcount (const char *aswhat)
5304 if (profile_flag)
5305 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5308 static void
5309 pa_elf_file_start (void)
5311 pa_file_start_level ();
5312 pa_file_start_mcount ("ENTRY");
5313 pa_file_start_file (0);
5316 static void
5317 pa_som_file_start (void)
5319 pa_file_start_level ();
5320 pa_file_start_space (0);
5321 aputs ("\t.IMPORT $global$,DATA\n"
5322 "\t.IMPORT $$dyncall,MILLICODE\n");
5323 pa_file_start_mcount ("CODE");
5324 pa_file_start_file (0);
5327 static void
5328 pa_linux_file_start (void)
5330 pa_file_start_file (1);
5331 pa_file_start_level ();
5332 pa_file_start_mcount ("CODE");
5335 static void
5336 pa_hpux64_gas_file_start (void)
5338 pa_file_start_level ();
5339 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5340 if (profile_flag)
5341 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5342 #endif
5343 pa_file_start_file (1);
5346 static void
5347 pa_hpux64_hpas_file_start (void)
5349 pa_file_start_level ();
5350 pa_file_start_space (1);
5351 pa_file_start_mcount ("CODE");
5352 pa_file_start_file (0);
5354 #undef aputs
5356 /* Search the deferred plabel list for SYMBOL and return its internal
5357 label. If an entry for SYMBOL is not found, a new entry is created. */
5360 get_deferred_plabel (rtx symbol)
5362 const char *fname = XSTR (symbol, 0);
5363 size_t i;
5365 /* See if we have already put this function on the list of deferred
5366 plabels. This list is generally small, so a liner search is not
5367 too ugly. If it proves too slow replace it with something faster. */
5368 for (i = 0; i < n_deferred_plabels; i++)
5369 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5370 break;
5372 /* If the deferred plabel list is empty, or this entry was not found
5373 on the list, create a new entry on the list. */
5374 if (deferred_plabels == NULL || i == n_deferred_plabels)
5376 tree id;
5378 if (deferred_plabels == 0)
5379 deferred_plabels = (struct deferred_plabel *)
5380 ggc_alloc (sizeof (struct deferred_plabel));
5381 else
5382 deferred_plabels = (struct deferred_plabel *)
5383 ggc_realloc (deferred_plabels,
5384 ((n_deferred_plabels + 1)
5385 * sizeof (struct deferred_plabel)));
5387 i = n_deferred_plabels++;
5388 deferred_plabels[i].internal_label = gen_label_rtx ();
5389 deferred_plabels[i].symbol = symbol;
5391 /* Gross. We have just implicitly taken the address of this
5392 function. Mark it in the same manner as assemble_name. */
5393 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5394 if (id)
5395 mark_referenced (id);
5398 return deferred_plabels[i].internal_label;
5401 static void
5402 output_deferred_plabels (void)
5404 size_t i;
5406 /* If we have some deferred plabels, then we need to switch into the
5407 data or readonly data section, and align it to a 4 byte boundary
5408 before outputting the deferred plabels. */
5409 if (n_deferred_plabels)
5411 switch_to_section (flag_pic ? data_section : readonly_data_section);
5412 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5415 /* Now output the deferred plabels. */
5416 for (i = 0; i < n_deferred_plabels; i++)
5418 targetm.asm_out.internal_label (asm_out_file, "L",
5419 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5420 assemble_integer (deferred_plabels[i].symbol,
5421 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5425 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5426 /* Initialize optabs to point to HPUX long double emulation routines. */
5427 static void
5428 pa_hpux_init_libfuncs (void)
5430 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5431 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5432 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5433 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5434 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5435 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5436 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5437 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5438 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5440 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5441 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5442 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5443 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5444 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5445 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5446 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5448 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5449 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5450 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5451 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5453 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5454 ? "__U_Qfcnvfxt_quad_to_sgl"
5455 : "_U_Qfcnvfxt_quad_to_sgl");
5456 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5457 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5458 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5460 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5461 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5462 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5463 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5465 #endif
5467 /* HP's millicode routines mean something special to the assembler.
5468 Keep track of which ones we have used. */
5470 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5471 static void import_milli (enum millicodes);
5472 static char imported[(int) end1000];
5473 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5474 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5475 #define MILLI_START 10
5477 static void
5478 import_milli (enum millicodes code)
5480 char str[sizeof (import_string)];
5482 if (!imported[(int) code])
5484 imported[(int) code] = 1;
5485 strcpy (str, import_string);
5486 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5487 output_asm_insn (str, 0);
5491 /* The register constraints have put the operands and return value in
5492 the proper registers. */
5494 const char *
5495 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5497 import_milli (mulI);
5498 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5501 /* Emit the rtl for doing a division by a constant. */
5503 /* Do magic division millicodes exist for this value? */
5504 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5506 /* We'll use an array to keep track of the magic millicodes and
5507 whether or not we've used them already. [n][0] is signed, [n][1] is
5508 unsigned. */
5510 static int div_milli[16][2];
5513 emit_hpdiv_const (rtx *operands, int unsignedp)
5515 if (GET_CODE (operands[2]) == CONST_INT
5516 && INTVAL (operands[2]) > 0
5517 && INTVAL (operands[2]) < 16
5518 && magic_milli[INTVAL (operands[2])])
5520 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5522 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5523 emit
5524 (gen_rtx_PARALLEL
5525 (VOIDmode,
5526 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5527 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5528 SImode,
5529 gen_rtx_REG (SImode, 26),
5530 operands[2])),
5531 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5532 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5533 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5534 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5535 gen_rtx_CLOBBER (VOIDmode, ret))));
5536 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5537 return 1;
5539 return 0;
5542 const char *
5543 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5545 int divisor;
5547 /* If the divisor is a constant, try to use one of the special
5548 opcodes .*/
5549 if (GET_CODE (operands[0]) == CONST_INT)
5551 static char buf[100];
5552 divisor = INTVAL (operands[0]);
5553 if (!div_milli[divisor][unsignedp])
5555 div_milli[divisor][unsignedp] = 1;
5556 if (unsignedp)
5557 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5558 else
5559 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5561 if (unsignedp)
5563 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5564 INTVAL (operands[0]));
5565 return output_millicode_call (insn,
5566 gen_rtx_SYMBOL_REF (SImode, buf));
5568 else
5570 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5571 INTVAL (operands[0]));
5572 return output_millicode_call (insn,
5573 gen_rtx_SYMBOL_REF (SImode, buf));
5576 /* Divisor isn't a special constant. */
5577 else
5579 if (unsignedp)
5581 import_milli (divU);
5582 return output_millicode_call (insn,
5583 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5585 else
5587 import_milli (divI);
5588 return output_millicode_call (insn,
5589 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5594 /* Output a $$rem millicode to do mod. */
5596 const char *
5597 output_mod_insn (int unsignedp, rtx insn)
5599 if (unsignedp)
5601 import_milli (remU);
5602 return output_millicode_call (insn,
5603 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5605 else
5607 import_milli (remI);
5608 return output_millicode_call (insn,
5609 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5613 void
5614 output_arg_descriptor (rtx call_insn)
5616 const char *arg_regs[4];
5617 enum machine_mode arg_mode;
5618 rtx link;
5619 int i, output_flag = 0;
5620 int regno;
5622 /* We neither need nor want argument location descriptors for the
5623 64bit runtime environment or the ELF32 environment. */
5624 if (TARGET_64BIT || TARGET_ELF32)
5625 return;
5627 for (i = 0; i < 4; i++)
5628 arg_regs[i] = 0;
5630 /* Specify explicitly that no argument relocations should take place
5631 if using the portable runtime calling conventions. */
5632 if (TARGET_PORTABLE_RUNTIME)
5634 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5635 asm_out_file);
5636 return;
5639 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5640 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5641 link; link = XEXP (link, 1))
5643 rtx use = XEXP (link, 0);
5645 if (! (GET_CODE (use) == USE
5646 && GET_CODE (XEXP (use, 0)) == REG
5647 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5648 continue;
5650 arg_mode = GET_MODE (XEXP (use, 0));
5651 regno = REGNO (XEXP (use, 0));
5652 if (regno >= 23 && regno <= 26)
5654 arg_regs[26 - regno] = "GR";
5655 if (arg_mode == DImode)
5656 arg_regs[25 - regno] = "GR";
5658 else if (regno >= 32 && regno <= 39)
5660 if (arg_mode == SFmode)
5661 arg_regs[(regno - 32) / 2] = "FR";
5662 else
5664 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5665 arg_regs[(regno - 34) / 2] = "FR";
5666 arg_regs[(regno - 34) / 2 + 1] = "FU";
5667 #else
5668 arg_regs[(regno - 34) / 2] = "FU";
5669 arg_regs[(regno - 34) / 2 + 1] = "FR";
5670 #endif
5674 fputs ("\t.CALL ", asm_out_file);
5675 for (i = 0; i < 4; i++)
5677 if (arg_regs[i])
5679 if (output_flag++)
5680 fputc (',', asm_out_file);
5681 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5684 fputc ('\n', asm_out_file);
5687 static enum reg_class
5688 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5689 enum machine_mode mode, secondary_reload_info *sri)
5691 int is_symbolic, regno;
5693 /* Handle the easy stuff first. */
5694 if (class == R1_REGS)
5695 return NO_REGS;
5697 if (REG_P (x))
5699 regno = REGNO (x);
5700 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5701 return NO_REGS;
5703 else
5704 regno = -1;
5706 /* If we have something like (mem (mem (...)), we can safely assume the
5707 inner MEM will end up in a general register after reloading, so there's
5708 no need for a secondary reload. */
5709 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5710 return NO_REGS;
5712 /* Trying to load a constant into a FP register during PIC code
5713 generation requires %r1 as a scratch register. */
5714 if (flag_pic
5715 && (mode == SImode || mode == DImode)
5716 && FP_REG_CLASS_P (class)
5717 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5719 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5720 : CODE_FOR_reload_indi_r1);
5721 return NO_REGS;
5724 /* Profiling showed the PA port spends about 1.3% of its compilation
5725 time in true_regnum from calls inside pa_secondary_reload_class. */
5726 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5727 regno = true_regnum (x);
5729 /* In order to allow 14-bit displacements in integer loads and stores,
5730 we need to prevent reload from generating out of range integer mode
5731 loads and stores to the floating point registers. Previously, we
5732 used to call for a secondary reload and have emit_move_sequence()
5733 fix the instruction sequence. However, reload occasionally wouldn't
5734 generate the reload and we would end up with an invalid REG+D memory
5735 address. So, now we use an intermediate general register for most
5736 memory loads and stores. */
5737 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5738 && GET_MODE_CLASS (mode) == MODE_INT
5739 && FP_REG_CLASS_P (class))
5741 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5742 the secondary reload needed for a pseudo. It never passes a
5743 REG+D address. */
5744 if (GET_CODE (x) == MEM)
5746 x = XEXP (x, 0);
5748 /* We don't need an intermediate for indexed and LO_SUM DLT
5749 memory addresses. When INT14_OK_STRICT is true, it might
5750 appear that we could directly allow register indirect
5751 memory addresses. However, this doesn't work because we
5752 don't support SUBREGs in floating-point register copies
5753 and reload doesn't tell us when it's going to use a SUBREG. */
5754 if (IS_INDEX_ADDR_P (x)
5755 || IS_LO_SUM_DLT_ADDR_P (x))
5756 return NO_REGS;
5758 /* Otherwise, we need an intermediate general register. */
5759 return GENERAL_REGS;
5762 /* Request a secondary reload with a general scratch register
5763 for everthing else. ??? Could symbolic operands be handled
5764 directly when generating non-pic PA 2.0 code? */
5765 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5766 return NO_REGS;
5769 /* We need a secondary register (GPR) for copies between the SAR
5770 and anything other than a general register. */
5771 if (class == SHIFT_REGS && (regno <= 0 || regno >= 32))
5773 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5774 return NO_REGS;
5777 /* A SAR<->FP register copy requires a secondary register (GPR) as
5778 well as secondary memory. */
5779 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5780 && (REGNO_REG_CLASS (regno) == SHIFT_REGS
5781 && FP_REG_CLASS_P (class)))
5783 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5784 return NO_REGS;
5787 /* Secondary reloads of symbolic operands require %r1 as a scratch
5788 register when we're generating PIC code and when the operand isn't
5789 readonly. */
5790 if (GET_CODE (x) == HIGH)
5791 x = XEXP (x, 0);
5793 /* Profiling has showed GCC spends about 2.6% of its compilation
5794 time in symbolic_operand from calls inside pa_secondary_reload_class.
5795 So, we use an inline copy to avoid useless work. */
5796 switch (GET_CODE (x))
5798 rtx op;
5800 case SYMBOL_REF:
5801 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5802 break;
5803 case LABEL_REF:
5804 is_symbolic = 1;
5805 break;
5806 case CONST:
5807 op = XEXP (x, 0);
5808 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5809 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5810 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5811 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5812 break;
5813 default:
5814 is_symbolic = 0;
5815 break;
5818 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5820 gcc_assert (mode == SImode || mode == DImode);
5821 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5822 : CODE_FOR_reload_indi_r1);
5825 return NO_REGS;
5828 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5829 is only marked as live on entry by df-scan when it is a fixed
5830 register. It isn't a fixed register in the 64-bit runtime,
5831 so we need to mark it here. */
5833 static void
5834 pa_extra_live_on_entry (bitmap regs)
5836 if (TARGET_64BIT)
5837 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5840 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5841 to prevent it from being deleted. */
5844 pa_eh_return_handler_rtx (void)
5846 rtx tmp;
5848 tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx,
5849 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5850 tmp = gen_rtx_MEM (word_mode, tmp);
5851 tmp->volatil = 1;
5852 return tmp;
5855 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5856 by invisible reference. As a GCC extension, we also pass anything
5857 with a zero or variable size by reference.
5859 The 64-bit runtime does not describe passing any types by invisible
5860 reference. The internals of GCC can't currently handle passing
5861 empty structures, and zero or variable length arrays when they are
5862 not passed entirely on the stack or by reference. Thus, as a GCC
5863 extension, we pass these types by reference. The HP compiler doesn't
5864 support these types, so hopefully there shouldn't be any compatibility
5865 issues. This may have to be revisited when HP releases a C99 compiler
5866 or updates the ABI. */
5868 static bool
5869 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5870 enum machine_mode mode, const_tree type,
5871 bool named ATTRIBUTE_UNUSED)
5873 HOST_WIDE_INT size;
5875 if (type)
5876 size = int_size_in_bytes (type);
5877 else
5878 size = GET_MODE_SIZE (mode);
5880 if (TARGET_64BIT)
5881 return size <= 0;
5882 else
5883 return size <= 0 || size > 8;
5886 enum direction
5887 function_arg_padding (enum machine_mode mode, const_tree type)
5889 if (mode == BLKmode
5890 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5892 /* Return none if justification is not required. */
5893 if (type
5894 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5895 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5896 return none;
5898 /* The directions set here are ignored when a BLKmode argument larger
5899 than a word is placed in a register. Different code is used for
5900 the stack and registers. This makes it difficult to have a
5901 consistent data representation for both the stack and registers.
5902 For both runtimes, the justification and padding for arguments on
5903 the stack and in registers should be identical. */
5904 if (TARGET_64BIT)
5905 /* The 64-bit runtime specifies left justification for aggregates. */
5906 return upward;
5907 else
5908 /* The 32-bit runtime architecture specifies right justification.
5909 When the argument is passed on the stack, the argument is padded
5910 with garbage on the left. The HP compiler pads with zeros. */
5911 return downward;
5914 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5915 return downward;
5916 else
5917 return none;
5921 /* Do what is necessary for `va_start'. We look at the current function
5922 to determine if stdargs or varargs is used and fill in an initial
5923 va_list. A pointer to this constructor is returned. */
5925 static rtx
5926 hppa_builtin_saveregs (void)
5928 rtx offset, dest;
5929 tree fntype = TREE_TYPE (current_function_decl);
5930 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5931 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5932 != void_type_node)))
5933 ? UNITS_PER_WORD : 0);
5935 if (argadj)
5936 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
5937 else
5938 offset = crtl->args.arg_offset_rtx;
5940 if (TARGET_64BIT)
5942 int i, off;
5944 /* Adjust for varargs/stdarg differences. */
5945 if (argadj)
5946 offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
5947 else
5948 offset = crtl->args.arg_offset_rtx;
5950 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5951 from the incoming arg pointer and growing to larger addresses. */
5952 for (i = 26, off = -64; i >= 19; i--, off += 8)
5953 emit_move_insn (gen_rtx_MEM (word_mode,
5954 plus_constant (arg_pointer_rtx, off)),
5955 gen_rtx_REG (word_mode, i));
5957 /* The incoming args pointer points just beyond the flushback area;
5958 normally this is not a serious concern. However, when we are doing
5959 varargs/stdargs we want to make the arg pointer point to the start
5960 of the incoming argument area. */
5961 emit_move_insn (virtual_incoming_args_rtx,
5962 plus_constant (arg_pointer_rtx, -64));
5964 /* Now return a pointer to the first anonymous argument. */
5965 return copy_to_reg (expand_binop (Pmode, add_optab,
5966 virtual_incoming_args_rtx,
5967 offset, 0, 0, OPTAB_LIB_WIDEN));
5970 /* Store general registers on the stack. */
5971 dest = gen_rtx_MEM (BLKmode,
5972 plus_constant (crtl->args.internal_arg_pointer,
5973 -16));
5974 set_mem_alias_set (dest, get_varargs_alias_set ());
5975 set_mem_align (dest, BITS_PER_WORD);
5976 move_block_from_reg (23, dest, 4);
5978 /* move_block_from_reg will emit code to store the argument registers
5979 individually as scalar stores.
5981 However, other insns may later load from the same addresses for
5982 a structure load (passing a struct to a varargs routine).
5984 The alias code assumes that such aliasing can never happen, so we
5985 have to keep memory referencing insns from moving up beyond the
5986 last argument register store. So we emit a blockage insn here. */
5987 emit_insn (gen_blockage ());
5989 return copy_to_reg (expand_binop (Pmode, add_optab,
5990 crtl->args.internal_arg_pointer,
5991 offset, 0, 0, OPTAB_LIB_WIDEN));
5994 static void
5995 hppa_va_start (tree valist, rtx nextarg)
5997 nextarg = expand_builtin_saveregs ();
5998 std_expand_builtin_va_start (valist, nextarg);
6001 static tree
6002 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
6004 if (TARGET_64BIT)
6006 /* Args grow upward. We can use the generic routines. */
6007 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6009 else /* !TARGET_64BIT */
6011 tree ptr = build_pointer_type (type);
6012 tree valist_type;
6013 tree t, u;
6014 unsigned int size, ofs;
6015 bool indirect;
6017 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6018 if (indirect)
6020 type = ptr;
6021 ptr = build_pointer_type (type);
6023 size = int_size_in_bytes (type);
6024 valist_type = TREE_TYPE (valist);
6026 /* Args grow down. Not handled by generic routines. */
6028 u = fold_convert (sizetype, size_in_bytes (type));
6029 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6030 t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
6032 /* Copied from va-pa.h, but we probably don't need to align to
6033 word size, since we generate and preserve that invariant. */
6034 u = size_int (size > 4 ? -8 : -4);
6035 t = fold_convert (sizetype, t);
6036 t = build2 (BIT_AND_EXPR, sizetype, t, u);
6037 t = fold_convert (valist_type, t);
6039 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6041 ofs = (8 - size) % 4;
6042 if (ofs != 0)
6044 u = size_int (ofs);
6045 t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
6048 t = fold_convert (ptr, t);
6049 t = build_va_arg_indirect_ref (t);
6051 if (indirect)
6052 t = build_va_arg_indirect_ref (t);
6054 return t;
6058 /* True if MODE is valid for the target. By "valid", we mean able to
6059 be manipulated in non-trivial ways. In particular, this means all
6060 the arithmetic is supported.
6062 Currently, TImode is not valid as the HP 64-bit runtime documentation
6063 doesn't document the alignment and calling conventions for this type.
6064 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6065 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6067 static bool
6068 pa_scalar_mode_supported_p (enum machine_mode mode)
6070 int precision = GET_MODE_PRECISION (mode);
6072 switch (GET_MODE_CLASS (mode))
6074 case MODE_PARTIAL_INT:
6075 case MODE_INT:
6076 if (precision == CHAR_TYPE_SIZE)
6077 return true;
6078 if (precision == SHORT_TYPE_SIZE)
6079 return true;
6080 if (precision == INT_TYPE_SIZE)
6081 return true;
6082 if (precision == LONG_TYPE_SIZE)
6083 return true;
6084 if (precision == LONG_LONG_TYPE_SIZE)
6085 return true;
6086 return false;
6088 case MODE_FLOAT:
6089 if (precision == FLOAT_TYPE_SIZE)
6090 return true;
6091 if (precision == DOUBLE_TYPE_SIZE)
6092 return true;
6093 if (precision == LONG_DOUBLE_TYPE_SIZE)
6094 return true;
6095 return false;
6097 case MODE_DECIMAL_FLOAT:
6098 return false;
6100 default:
6101 gcc_unreachable ();
6105 /* This routine handles all the normal conditional branch sequences we
6106 might need to generate. It handles compare immediate vs compare
6107 register, nullification of delay slots, varying length branches,
6108 negated branches, and all combinations of the above. It returns the
6109 output appropriate to emit the branch corresponding to all given
6110 parameters. */
6112 const char *
6113 output_cbranch (rtx *operands, int negated, rtx insn)
6115 static char buf[100];
6116 int useskip = 0;
6117 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6118 int length = get_attr_length (insn);
6119 int xdelay;
6121 /* A conditional branch to the following instruction (e.g. the delay slot)
6122 is asking for a disaster. This can happen when not optimizing and
6123 when jump optimization fails.
6125 While it is usually safe to emit nothing, this can fail if the
6126 preceding instruction is a nullified branch with an empty delay
6127 slot and the same branch target as this branch. We could check
6128 for this but jump optimization should eliminate nop jumps. It
6129 is always safe to emit a nop. */
6130 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6131 return "nop";
6133 /* The doubleword form of the cmpib instruction doesn't have the LEU
6134 and GTU conditions while the cmpb instruction does. Since we accept
6135 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6136 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6137 operands[2] = gen_rtx_REG (DImode, 0);
6138 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6139 operands[1] = gen_rtx_REG (DImode, 0);
6141 /* If this is a long branch with its delay slot unfilled, set `nullify'
6142 as it can nullify the delay slot and save a nop. */
6143 if (length == 8 && dbr_sequence_length () == 0)
6144 nullify = 1;
6146 /* If this is a short forward conditional branch which did not get
6147 its delay slot filled, the delay slot can still be nullified. */
6148 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6149 nullify = forward_branch_p (insn);
6151 /* A forward branch over a single nullified insn can be done with a
6152 comclr instruction. This avoids a single cycle penalty due to
6153 mis-predicted branch if we fall through (branch not taken). */
6154 if (length == 4
6155 && next_real_insn (insn) != 0
6156 && get_attr_length (next_real_insn (insn)) == 4
6157 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6158 && nullify)
6159 useskip = 1;
6161 switch (length)
6163 /* All short conditional branches except backwards with an unfilled
6164 delay slot. */
6165 case 4:
6166 if (useskip)
6167 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6168 else
6169 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6170 if (GET_MODE (operands[1]) == DImode)
6171 strcat (buf, "*");
6172 if (negated)
6173 strcat (buf, "%B3");
6174 else
6175 strcat (buf, "%S3");
6176 if (useskip)
6177 strcat (buf, " %2,%r1,%%r0");
6178 else if (nullify)
6179 strcat (buf, ",n %2,%r1,%0");
6180 else
6181 strcat (buf, " %2,%r1,%0");
6182 break;
6184 /* All long conditionals. Note a short backward branch with an
6185 unfilled delay slot is treated just like a long backward branch
6186 with an unfilled delay slot. */
6187 case 8:
6188 /* Handle weird backwards branch with a filled delay slot
6189 which is nullified. */
6190 if (dbr_sequence_length () != 0
6191 && ! forward_branch_p (insn)
6192 && nullify)
6194 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6195 if (GET_MODE (operands[1]) == DImode)
6196 strcat (buf, "*");
6197 if (negated)
6198 strcat (buf, "%S3");
6199 else
6200 strcat (buf, "%B3");
6201 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6203 /* Handle short backwards branch with an unfilled delay slot.
6204 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6205 taken and untaken branches. */
6206 else if (dbr_sequence_length () == 0
6207 && ! forward_branch_p (insn)
6208 && INSN_ADDRESSES_SET_P ()
6209 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6210 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6212 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6213 if (GET_MODE (operands[1]) == DImode)
6214 strcat (buf, "*");
6215 if (negated)
6216 strcat (buf, "%B3 %2,%r1,%0%#");
6217 else
6218 strcat (buf, "%S3 %2,%r1,%0%#");
6220 else
6222 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6223 if (GET_MODE (operands[1]) == DImode)
6224 strcat (buf, "*");
6225 if (negated)
6226 strcat (buf, "%S3");
6227 else
6228 strcat (buf, "%B3");
6229 if (nullify)
6230 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6231 else
6232 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6234 break;
6236 default:
6237 /* The reversed conditional branch must branch over one additional
6238 instruction if the delay slot is filled and needs to be extracted
6239 by output_lbranch. If the delay slot is empty or this is a
6240 nullified forward branch, the instruction after the reversed
6241 condition branch must be nullified. */
6242 if (dbr_sequence_length () == 0
6243 || (nullify && forward_branch_p (insn)))
6245 nullify = 1;
6246 xdelay = 0;
6247 operands[4] = GEN_INT (length);
6249 else
6251 xdelay = 1;
6252 operands[4] = GEN_INT (length + 4);
6255 /* Create a reversed conditional branch which branches around
6256 the following insns. */
6257 if (GET_MODE (operands[1]) != DImode)
6259 if (nullify)
6261 if (negated)
6262 strcpy (buf,
6263 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6264 else
6265 strcpy (buf,
6266 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6268 else
6270 if (negated)
6271 strcpy (buf,
6272 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6273 else
6274 strcpy (buf,
6275 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6278 else
6280 if (nullify)
6282 if (negated)
6283 strcpy (buf,
6284 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6285 else
6286 strcpy (buf,
6287 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6289 else
6291 if (negated)
6292 strcpy (buf,
6293 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6294 else
6295 strcpy (buf,
6296 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6300 output_asm_insn (buf, operands);
6301 return output_lbranch (operands[0], insn, xdelay);
6303 return buf;
6306 /* This routine handles output of long unconditional branches that
6307 exceed the maximum range of a simple branch instruction. Since
6308 we don't have a register available for the branch, we save register
6309 %r1 in the frame marker, load the branch destination DEST into %r1,
6310 execute the branch, and restore %r1 in the delay slot of the branch.
6312 Since long branches may have an insn in the delay slot and the
6313 delay slot is used to restore %r1, we in general need to extract
6314 this insn and execute it before the branch. However, to facilitate
6315 use of this function by conditional branches, we also provide an
6316 option to not extract the delay insn so that it will be emitted
6317 after the long branch. So, if there is an insn in the delay slot,
6318 it is extracted if XDELAY is nonzero.
6320 The lengths of the various long-branch sequences are 20, 16 and 24
6321 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6323 const char *
6324 output_lbranch (rtx dest, rtx insn, int xdelay)
6326 rtx xoperands[2];
6328 xoperands[0] = dest;
6330 /* First, free up the delay slot. */
6331 if (xdelay && dbr_sequence_length () != 0)
6333 /* We can't handle a jump in the delay slot. */
6334 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6336 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6337 optimize, 0, NULL);
6339 /* Now delete the delay insn. */
6340 SET_INSN_DELETED (NEXT_INSN (insn));
6343 /* Output an insn to save %r1. The runtime documentation doesn't
6344 specify whether the "Clean Up" slot in the callers frame can
6345 be clobbered by the callee. It isn't copied by HP's builtin
6346 alloca, so this suggests that it can be clobbered if necessary.
6347 The "Static Link" location is copied by HP builtin alloca, so
6348 we avoid using it. Using the cleanup slot might be a problem
6349 if we have to interoperate with languages that pass cleanup
6350 information. However, it should be possible to handle these
6351 situations with GCC's asm feature.
6353 The "Current RP" slot is reserved for the called procedure, so
6354 we try to use it when we don't have a frame of our own. It's
6355 rather unlikely that we won't have a frame when we need to emit
6356 a very long branch.
6358 Really the way to go long term is a register scavenger; goto
6359 the target of the jump and find a register which we can use
6360 as a scratch to hold the value in %r1. Then, we wouldn't have
6361 to free up the delay slot or clobber a slot that may be needed
6362 for other purposes. */
6363 if (TARGET_64BIT)
6365 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6366 /* Use the return pointer slot in the frame marker. */
6367 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6368 else
6369 /* Use the slot at -40 in the frame marker since HP builtin
6370 alloca doesn't copy it. */
6371 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6373 else
6375 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6376 /* Use the return pointer slot in the frame marker. */
6377 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6378 else
6379 /* Use the "Clean Up" slot in the frame marker. In GCC,
6380 the only other use of this location is for copying a
6381 floating point double argument from a floating-point
6382 register to two general registers. The copy is done
6383 as an "atomic" operation when outputting a call, so it
6384 won't interfere with our using the location here. */
6385 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6388 if (TARGET_PORTABLE_RUNTIME)
6390 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6391 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6392 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6394 else if (flag_pic)
6396 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6397 if (TARGET_SOM || !TARGET_GAS)
6399 xoperands[1] = gen_label_rtx ();
6400 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6401 targetm.asm_out.internal_label (asm_out_file, "L",
6402 CODE_LABEL_NUMBER (xoperands[1]));
6403 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6405 else
6407 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6408 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6410 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6412 else
6413 /* Now output a very long branch to the original target. */
6414 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6416 /* Now restore the value of %r1 in the delay slot. */
6417 if (TARGET_64BIT)
6419 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6420 return "ldd -16(%%r30),%%r1";
6421 else
6422 return "ldd -40(%%r30),%%r1";
6424 else
6426 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6427 return "ldw -20(%%r30),%%r1";
6428 else
6429 return "ldw -12(%%r30),%%r1";
6433 /* This routine handles all the branch-on-bit conditional branch sequences we
6434 might need to generate. It handles nullification of delay slots,
6435 varying length branches, negated branches and all combinations of the
6436 above. it returns the appropriate output template to emit the branch. */
6438 const char *
6439 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6441 static char buf[100];
6442 int useskip = 0;
6443 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6444 int length = get_attr_length (insn);
6445 int xdelay;
6447 /* A conditional branch to the following instruction (e.g. the delay slot) is
6448 asking for a disaster. I do not think this can happen as this pattern
6449 is only used when optimizing; jump optimization should eliminate the
6450 jump. But be prepared just in case. */
6452 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6453 return "nop";
6455 /* If this is a long branch with its delay slot unfilled, set `nullify'
6456 as it can nullify the delay slot and save a nop. */
6457 if (length == 8 && dbr_sequence_length () == 0)
6458 nullify = 1;
6460 /* If this is a short forward conditional branch which did not get
6461 its delay slot filled, the delay slot can still be nullified. */
6462 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6463 nullify = forward_branch_p (insn);
6465 /* A forward branch over a single nullified insn can be done with a
6466 extrs instruction. This avoids a single cycle penalty due to
6467 mis-predicted branch if we fall through (branch not taken). */
6469 if (length == 4
6470 && next_real_insn (insn) != 0
6471 && get_attr_length (next_real_insn (insn)) == 4
6472 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6473 && nullify)
6474 useskip = 1;
6476 switch (length)
6479 /* All short conditional branches except backwards with an unfilled
6480 delay slot. */
6481 case 4:
6482 if (useskip)
6483 strcpy (buf, "{extrs,|extrw,s,}");
6484 else
6485 strcpy (buf, "bb,");
6486 if (useskip && GET_MODE (operands[0]) == DImode)
6487 strcpy (buf, "extrd,s,*");
6488 else if (GET_MODE (operands[0]) == DImode)
6489 strcpy (buf, "bb,*");
6490 if ((which == 0 && negated)
6491 || (which == 1 && ! negated))
6492 strcat (buf, ">=");
6493 else
6494 strcat (buf, "<");
6495 if (useskip)
6496 strcat (buf, " %0,%1,1,%%r0");
6497 else if (nullify && negated)
6498 strcat (buf, ",n %0,%1,%3");
6499 else if (nullify && ! negated)
6500 strcat (buf, ",n %0,%1,%2");
6501 else if (! nullify && negated)
6502 strcat (buf, "%0,%1,%3");
6503 else if (! nullify && ! negated)
6504 strcat (buf, " %0,%1,%2");
6505 break;
6507 /* All long conditionals. Note a short backward branch with an
6508 unfilled delay slot is treated just like a long backward branch
6509 with an unfilled delay slot. */
6510 case 8:
6511 /* Handle weird backwards branch with a filled delay slot
6512 which is nullified. */
6513 if (dbr_sequence_length () != 0
6514 && ! forward_branch_p (insn)
6515 && nullify)
6517 strcpy (buf, "bb,");
6518 if (GET_MODE (operands[0]) == DImode)
6519 strcat (buf, "*");
6520 if ((which == 0 && negated)
6521 || (which == 1 && ! negated))
6522 strcat (buf, "<");
6523 else
6524 strcat (buf, ">=");
6525 if (negated)
6526 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6527 else
6528 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6530 /* Handle short backwards branch with an unfilled delay slot.
6531 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6532 taken and untaken branches. */
6533 else if (dbr_sequence_length () == 0
6534 && ! forward_branch_p (insn)
6535 && INSN_ADDRESSES_SET_P ()
6536 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6537 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6539 strcpy (buf, "bb,");
6540 if (GET_MODE (operands[0]) == DImode)
6541 strcat (buf, "*");
6542 if ((which == 0 && negated)
6543 || (which == 1 && ! negated))
6544 strcat (buf, ">=");
6545 else
6546 strcat (buf, "<");
6547 if (negated)
6548 strcat (buf, " %0,%1,%3%#");
6549 else
6550 strcat (buf, " %0,%1,%2%#");
6552 else
6554 if (GET_MODE (operands[0]) == DImode)
6555 strcpy (buf, "extrd,s,*");
6556 else
6557 strcpy (buf, "{extrs,|extrw,s,}");
6558 if ((which == 0 && negated)
6559 || (which == 1 && ! negated))
6560 strcat (buf, "<");
6561 else
6562 strcat (buf, ">=");
6563 if (nullify && negated)
6564 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6565 else if (nullify && ! negated)
6566 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6567 else if (negated)
6568 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6569 else
6570 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6572 break;
6574 default:
6575 /* The reversed conditional branch must branch over one additional
6576 instruction if the delay slot is filled and needs to be extracted
6577 by output_lbranch. If the delay slot is empty or this is a
6578 nullified forward branch, the instruction after the reversed
6579 condition branch must be nullified. */
6580 if (dbr_sequence_length () == 0
6581 || (nullify && forward_branch_p (insn)))
6583 nullify = 1;
6584 xdelay = 0;
6585 operands[4] = GEN_INT (length);
6587 else
6589 xdelay = 1;
6590 operands[4] = GEN_INT (length + 4);
6593 if (GET_MODE (operands[0]) == DImode)
6594 strcpy (buf, "bb,*");
6595 else
6596 strcpy (buf, "bb,");
6597 if ((which == 0 && negated)
6598 || (which == 1 && !negated))
6599 strcat (buf, "<");
6600 else
6601 strcat (buf, ">=");
6602 if (nullify)
6603 strcat (buf, ",n %0,%1,.+%4");
6604 else
6605 strcat (buf, " %0,%1,.+%4");
6606 output_asm_insn (buf, operands);
6607 return output_lbranch (negated ? operands[3] : operands[2],
6608 insn, xdelay);
6610 return buf;
6613 /* This routine handles all the branch-on-variable-bit conditional branch
6614 sequences we might need to generate. It handles nullification of delay
6615 slots, varying length branches, negated branches and all combinations
6616 of the above. it returns the appropriate output template to emit the
6617 branch. */
6619 const char *
6620 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6622 static char buf[100];
6623 int useskip = 0;
6624 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6625 int length = get_attr_length (insn);
6626 int xdelay;
6628 /* A conditional branch to the following instruction (e.g. the delay slot) is
6629 asking for a disaster. I do not think this can happen as this pattern
6630 is only used when optimizing; jump optimization should eliminate the
6631 jump. But be prepared just in case. */
6633 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6634 return "nop";
6636 /* If this is a long branch with its delay slot unfilled, set `nullify'
6637 as it can nullify the delay slot and save a nop. */
6638 if (length == 8 && dbr_sequence_length () == 0)
6639 nullify = 1;
6641 /* If this is a short forward conditional branch which did not get
6642 its delay slot filled, the delay slot can still be nullified. */
6643 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6644 nullify = forward_branch_p (insn);
6646 /* A forward branch over a single nullified insn can be done with a
6647 extrs instruction. This avoids a single cycle penalty due to
6648 mis-predicted branch if we fall through (branch not taken). */
6650 if (length == 4
6651 && next_real_insn (insn) != 0
6652 && get_attr_length (next_real_insn (insn)) == 4
6653 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6654 && nullify)
6655 useskip = 1;
6657 switch (length)
6660 /* All short conditional branches except backwards with an unfilled
6661 delay slot. */
6662 case 4:
6663 if (useskip)
6664 strcpy (buf, "{vextrs,|extrw,s,}");
6665 else
6666 strcpy (buf, "{bvb,|bb,}");
6667 if (useskip && GET_MODE (operands[0]) == DImode)
6668 strcpy (buf, "extrd,s,*");
6669 else if (GET_MODE (operands[0]) == DImode)
6670 strcpy (buf, "bb,*");
6671 if ((which == 0 && negated)
6672 || (which == 1 && ! negated))
6673 strcat (buf, ">=");
6674 else
6675 strcat (buf, "<");
6676 if (useskip)
6677 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6678 else if (nullify && negated)
6679 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6680 else if (nullify && ! negated)
6681 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6682 else if (! nullify && negated)
6683 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6684 else if (! nullify && ! negated)
6685 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6686 break;
6688 /* All long conditionals. Note a short backward branch with an
6689 unfilled delay slot is treated just like a long backward branch
6690 with an unfilled delay slot. */
6691 case 8:
6692 /* Handle weird backwards branch with a filled delay slot
6693 which is nullified. */
6694 if (dbr_sequence_length () != 0
6695 && ! forward_branch_p (insn)
6696 && nullify)
6698 strcpy (buf, "{bvb,|bb,}");
6699 if (GET_MODE (operands[0]) == DImode)
6700 strcat (buf, "*");
6701 if ((which == 0 && negated)
6702 || (which == 1 && ! negated))
6703 strcat (buf, "<");
6704 else
6705 strcat (buf, ">=");
6706 if (negated)
6707 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6708 else
6709 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6711 /* Handle short backwards branch with an unfilled delay slot.
6712 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6713 taken and untaken branches. */
6714 else if (dbr_sequence_length () == 0
6715 && ! forward_branch_p (insn)
6716 && INSN_ADDRESSES_SET_P ()
6717 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6718 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6720 strcpy (buf, "{bvb,|bb,}");
6721 if (GET_MODE (operands[0]) == DImode)
6722 strcat (buf, "*");
6723 if ((which == 0 && negated)
6724 || (which == 1 && ! negated))
6725 strcat (buf, ">=");
6726 else
6727 strcat (buf, "<");
6728 if (negated)
6729 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6730 else
6731 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6733 else
6735 strcpy (buf, "{vextrs,|extrw,s,}");
6736 if (GET_MODE (operands[0]) == DImode)
6737 strcpy (buf, "extrd,s,*");
6738 if ((which == 0 && negated)
6739 || (which == 1 && ! negated))
6740 strcat (buf, "<");
6741 else
6742 strcat (buf, ">=");
6743 if (nullify && negated)
6744 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6745 else if (nullify && ! negated)
6746 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6747 else if (negated)
6748 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6749 else
6750 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6752 break;
6754 default:
6755 /* The reversed conditional branch must branch over one additional
6756 instruction if the delay slot is filled and needs to be extracted
6757 by output_lbranch. If the delay slot is empty or this is a
6758 nullified forward branch, the instruction after the reversed
6759 condition branch must be nullified. */
6760 if (dbr_sequence_length () == 0
6761 || (nullify && forward_branch_p (insn)))
6763 nullify = 1;
6764 xdelay = 0;
6765 operands[4] = GEN_INT (length);
6767 else
6769 xdelay = 1;
6770 operands[4] = GEN_INT (length + 4);
6773 if (GET_MODE (operands[0]) == DImode)
6774 strcpy (buf, "bb,*");
6775 else
6776 strcpy (buf, "{bvb,|bb,}");
6777 if ((which == 0 && negated)
6778 || (which == 1 && !negated))
6779 strcat (buf, "<");
6780 else
6781 strcat (buf, ">=");
6782 if (nullify)
6783 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6784 else
6785 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6786 output_asm_insn (buf, operands);
6787 return output_lbranch (negated ? operands[3] : operands[2],
6788 insn, xdelay);
6790 return buf;
6793 /* Return the output template for emitting a dbra type insn.
6795 Note it may perform some output operations on its own before
6796 returning the final output string. */
6797 const char *
6798 output_dbra (rtx *operands, rtx insn, int which_alternative)
6800 int length = get_attr_length (insn);
6802 /* A conditional branch to the following instruction (e.g. the delay slot) is
6803 asking for a disaster. Be prepared! */
6805 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6807 if (which_alternative == 0)
6808 return "ldo %1(%0),%0";
6809 else if (which_alternative == 1)
6811 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6812 output_asm_insn ("ldw -16(%%r30),%4", operands);
6813 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6814 return "{fldws|fldw} -16(%%r30),%0";
6816 else
6818 output_asm_insn ("ldw %0,%4", operands);
6819 return "ldo %1(%4),%4\n\tstw %4,%0";
6823 if (which_alternative == 0)
6825 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6826 int xdelay;
6828 /* If this is a long branch with its delay slot unfilled, set `nullify'
6829 as it can nullify the delay slot and save a nop. */
6830 if (length == 8 && dbr_sequence_length () == 0)
6831 nullify = 1;
6833 /* If this is a short forward conditional branch which did not get
6834 its delay slot filled, the delay slot can still be nullified. */
6835 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6836 nullify = forward_branch_p (insn);
6838 switch (length)
6840 case 4:
6841 if (nullify)
6842 return "addib,%C2,n %1,%0,%3";
6843 else
6844 return "addib,%C2 %1,%0,%3";
6846 case 8:
6847 /* Handle weird backwards branch with a fulled delay slot
6848 which is nullified. */
6849 if (dbr_sequence_length () != 0
6850 && ! forward_branch_p (insn)
6851 && nullify)
6852 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6853 /* Handle short backwards branch with an unfilled delay slot.
6854 Using a addb;nop rather than addi;bl saves 1 cycle for both
6855 taken and untaken branches. */
6856 else if (dbr_sequence_length () == 0
6857 && ! forward_branch_p (insn)
6858 && INSN_ADDRESSES_SET_P ()
6859 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6860 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6861 return "addib,%C2 %1,%0,%3%#";
6863 /* Handle normal cases. */
6864 if (nullify)
6865 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6866 else
6867 return "addi,%N2 %1,%0,%0\n\tb %3";
6869 default:
6870 /* The reversed conditional branch must branch over one additional
6871 instruction if the delay slot is filled and needs to be extracted
6872 by output_lbranch. If the delay slot is empty or this is a
6873 nullified forward branch, the instruction after the reversed
6874 condition branch must be nullified. */
6875 if (dbr_sequence_length () == 0
6876 || (nullify && forward_branch_p (insn)))
6878 nullify = 1;
6879 xdelay = 0;
6880 operands[4] = GEN_INT (length);
6882 else
6884 xdelay = 1;
6885 operands[4] = GEN_INT (length + 4);
6888 if (nullify)
6889 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6890 else
6891 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6893 return output_lbranch (operands[3], insn, xdelay);
6897 /* Deal with gross reload from FP register case. */
6898 else if (which_alternative == 1)
6900 /* Move loop counter from FP register to MEM then into a GR,
6901 increment the GR, store the GR into MEM, and finally reload
6902 the FP register from MEM from within the branch's delay slot. */
6903 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6904 operands);
6905 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6906 if (length == 24)
6907 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6908 else if (length == 28)
6909 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6910 else
6912 operands[5] = GEN_INT (length - 16);
6913 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6914 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6915 return output_lbranch (operands[3], insn, 0);
6918 /* Deal with gross reload from memory case. */
6919 else
6921 /* Reload loop counter from memory, the store back to memory
6922 happens in the branch's delay slot. */
6923 output_asm_insn ("ldw %0,%4", operands);
6924 if (length == 12)
6925 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6926 else if (length == 16)
6927 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6928 else
6930 operands[5] = GEN_INT (length - 4);
6931 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6932 return output_lbranch (operands[3], insn, 0);
6937 /* Return the output template for emitting a movb type insn.
6939 Note it may perform some output operations on its own before
6940 returning the final output string. */
6941 const char *
6942 output_movb (rtx *operands, rtx insn, int which_alternative,
6943 int reverse_comparison)
6945 int length = get_attr_length (insn);
6947 /* A conditional branch to the following instruction (e.g. the delay slot) is
6948 asking for a disaster. Be prepared! */
6950 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6952 if (which_alternative == 0)
6953 return "copy %1,%0";
6954 else if (which_alternative == 1)
6956 output_asm_insn ("stw %1,-16(%%r30)", operands);
6957 return "{fldws|fldw} -16(%%r30),%0";
6959 else if (which_alternative == 2)
6960 return "stw %1,%0";
6961 else
6962 return "mtsar %r1";
6965 /* Support the second variant. */
6966 if (reverse_comparison)
6967 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6969 if (which_alternative == 0)
6971 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6972 int xdelay;
6974 /* If this is a long branch with its delay slot unfilled, set `nullify'
6975 as it can nullify the delay slot and save a nop. */
6976 if (length == 8 && dbr_sequence_length () == 0)
6977 nullify = 1;
6979 /* If this is a short forward conditional branch which did not get
6980 its delay slot filled, the delay slot can still be nullified. */
6981 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6982 nullify = forward_branch_p (insn);
6984 switch (length)
6986 case 4:
6987 if (nullify)
6988 return "movb,%C2,n %1,%0,%3";
6989 else
6990 return "movb,%C2 %1,%0,%3";
6992 case 8:
6993 /* Handle weird backwards branch with a filled delay slot
6994 which is nullified. */
6995 if (dbr_sequence_length () != 0
6996 && ! forward_branch_p (insn)
6997 && nullify)
6998 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7000 /* Handle short backwards branch with an unfilled delay slot.
7001 Using a movb;nop rather than or;bl saves 1 cycle for both
7002 taken and untaken branches. */
7003 else if (dbr_sequence_length () == 0
7004 && ! forward_branch_p (insn)
7005 && INSN_ADDRESSES_SET_P ()
7006 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7007 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7008 return "movb,%C2 %1,%0,%3%#";
7009 /* Handle normal cases. */
7010 if (nullify)
7011 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7012 else
7013 return "or,%N2 %1,%%r0,%0\n\tb %3";
7015 default:
7016 /* The reversed conditional branch must branch over one additional
7017 instruction if the delay slot is filled and needs to be extracted
7018 by output_lbranch. If the delay slot is empty or this is a
7019 nullified forward branch, the instruction after the reversed
7020 condition branch must be nullified. */
7021 if (dbr_sequence_length () == 0
7022 || (nullify && forward_branch_p (insn)))
7024 nullify = 1;
7025 xdelay = 0;
7026 operands[4] = GEN_INT (length);
7028 else
7030 xdelay = 1;
7031 operands[4] = GEN_INT (length + 4);
7034 if (nullify)
7035 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7036 else
7037 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7039 return output_lbranch (operands[3], insn, xdelay);
7042 /* Deal with gross reload for FP destination register case. */
7043 else if (which_alternative == 1)
7045 /* Move source register to MEM, perform the branch test, then
7046 finally load the FP register from MEM from within the branch's
7047 delay slot. */
7048 output_asm_insn ("stw %1,-16(%%r30)", operands);
7049 if (length == 12)
7050 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7051 else if (length == 16)
7052 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7053 else
7055 operands[4] = GEN_INT (length - 4);
7056 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7057 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7058 return output_lbranch (operands[3], insn, 0);
7061 /* Deal with gross reload from memory case. */
7062 else if (which_alternative == 2)
7064 /* Reload loop counter from memory, the store back to memory
7065 happens in the branch's delay slot. */
7066 if (length == 8)
7067 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7068 else if (length == 12)
7069 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7070 else
7072 operands[4] = GEN_INT (length);
7073 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7074 operands);
7075 return output_lbranch (operands[3], insn, 0);
7078 /* Handle SAR as a destination. */
7079 else
7081 if (length == 8)
7082 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7083 else if (length == 12)
7084 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7085 else
7087 operands[4] = GEN_INT (length);
7088 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7089 operands);
7090 return output_lbranch (operands[3], insn, 0);
7095 /* Copy any FP arguments in INSN into integer registers. */
7096 static void
7097 copy_fp_args (rtx insn)
7099 rtx link;
7100 rtx xoperands[2];
7102 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7104 int arg_mode, regno;
7105 rtx use = XEXP (link, 0);
7107 if (! (GET_CODE (use) == USE
7108 && GET_CODE (XEXP (use, 0)) == REG
7109 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7110 continue;
7112 arg_mode = GET_MODE (XEXP (use, 0));
7113 regno = REGNO (XEXP (use, 0));
7115 /* Is it a floating point register? */
7116 if (regno >= 32 && regno <= 39)
7118 /* Copy the FP register into an integer register via memory. */
7119 if (arg_mode == SFmode)
7121 xoperands[0] = XEXP (use, 0);
7122 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7123 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7124 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7126 else
7128 xoperands[0] = XEXP (use, 0);
7129 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7130 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7131 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7132 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7138 /* Compute length of the FP argument copy sequence for INSN. */
7139 static int
7140 length_fp_args (rtx insn)
7142 int length = 0;
7143 rtx link;
7145 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7147 int arg_mode, regno;
7148 rtx use = XEXP (link, 0);
7150 if (! (GET_CODE (use) == USE
7151 && GET_CODE (XEXP (use, 0)) == REG
7152 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7153 continue;
7155 arg_mode = GET_MODE (XEXP (use, 0));
7156 regno = REGNO (XEXP (use, 0));
7158 /* Is it a floating point register? */
7159 if (regno >= 32 && regno <= 39)
7161 if (arg_mode == SFmode)
7162 length += 8;
7163 else
7164 length += 12;
7168 return length;
7171 /* Return the attribute length for the millicode call instruction INSN.
7172 The length must match the code generated by output_millicode_call.
7173 We include the delay slot in the returned length as it is better to
7174 over estimate the length than to under estimate it. */
7177 attr_length_millicode_call (rtx insn)
7179 unsigned long distance = -1;
7180 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7182 if (INSN_ADDRESSES_SET_P ())
7184 distance = (total + insn_current_reference_address (insn));
7185 if (distance < total)
7186 distance = -1;
7189 if (TARGET_64BIT)
7191 if (!TARGET_LONG_CALLS && distance < 7600000)
7192 return 8;
7194 return 20;
7196 else if (TARGET_PORTABLE_RUNTIME)
7197 return 24;
7198 else
7200 if (!TARGET_LONG_CALLS && distance < 240000)
7201 return 8;
7203 if (TARGET_LONG_ABS_CALL && !flag_pic)
7204 return 12;
7206 return 24;
7210 /* INSN is a function call. It may have an unconditional jump
7211 in its delay slot.
7213 CALL_DEST is the routine we are calling. */
7215 const char *
7216 output_millicode_call (rtx insn, rtx call_dest)
7218 int attr_length = get_attr_length (insn);
7219 int seq_length = dbr_sequence_length ();
7220 int distance;
7221 rtx seq_insn;
7222 rtx xoperands[3];
7224 xoperands[0] = call_dest;
7225 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7227 /* Handle the common case where we are sure that the branch will
7228 reach the beginning of the $CODE$ subspace. The within reach
7229 form of the $$sh_func_adrs call has a length of 28. Because
7230 it has an attribute type of multi, it never has a nonzero
7231 sequence length. The length of the $$sh_func_adrs is the same
7232 as certain out of reach PIC calls to other routines. */
7233 if (!TARGET_LONG_CALLS
7234 && ((seq_length == 0
7235 && (attr_length == 12
7236 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7237 || (seq_length != 0 && attr_length == 8)))
7239 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7241 else
7243 if (TARGET_64BIT)
7245 /* It might seem that one insn could be saved by accessing
7246 the millicode function using the linkage table. However,
7247 this doesn't work in shared libraries and other dynamically
7248 loaded objects. Using a pc-relative sequence also avoids
7249 problems related to the implicit use of the gp register. */
7250 output_asm_insn ("b,l .+8,%%r1", xoperands);
7252 if (TARGET_GAS)
7254 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7255 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7257 else
7259 xoperands[1] = gen_label_rtx ();
7260 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7261 targetm.asm_out.internal_label (asm_out_file, "L",
7262 CODE_LABEL_NUMBER (xoperands[1]));
7263 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7266 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7268 else if (TARGET_PORTABLE_RUNTIME)
7270 /* Pure portable runtime doesn't allow be/ble; we also don't
7271 have PIC support in the assembler/linker, so this sequence
7272 is needed. */
7274 /* Get the address of our target into %r1. */
7275 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7276 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7278 /* Get our return address into %r31. */
7279 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7280 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7282 /* Jump to our target address in %r1. */
7283 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7285 else if (!flag_pic)
7287 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7288 if (TARGET_PA_20)
7289 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7290 else
7291 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7293 else
7295 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7296 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7298 if (TARGET_SOM || !TARGET_GAS)
7300 /* The HP assembler can generate relocations for the
7301 difference of two symbols. GAS can do this for a
7302 millicode symbol but not an arbitrary external
7303 symbol when generating SOM output. */
7304 xoperands[1] = gen_label_rtx ();
7305 targetm.asm_out.internal_label (asm_out_file, "L",
7306 CODE_LABEL_NUMBER (xoperands[1]));
7307 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7308 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7310 else
7312 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7313 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7314 xoperands);
7317 /* Jump to our target address in %r1. */
7318 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7322 if (seq_length == 0)
7323 output_asm_insn ("nop", xoperands);
7325 /* We are done if there isn't a jump in the delay slot. */
7326 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7327 return "";
7329 /* This call has an unconditional jump in its delay slot. */
7330 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7332 /* See if the return address can be adjusted. Use the containing
7333 sequence insn's address. */
7334 if (INSN_ADDRESSES_SET_P ())
7336 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7337 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7338 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7340 if (VAL_14_BITS_P (distance))
7342 xoperands[1] = gen_label_rtx ();
7343 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7344 targetm.asm_out.internal_label (asm_out_file, "L",
7345 CODE_LABEL_NUMBER (xoperands[1]));
7347 else
7348 /* ??? This branch may not reach its target. */
7349 output_asm_insn ("nop\n\tb,n %0", xoperands);
7351 else
7352 /* ??? This branch may not reach its target. */
7353 output_asm_insn ("nop\n\tb,n %0", xoperands);
7355 /* Delete the jump. */
7356 SET_INSN_DELETED (NEXT_INSN (insn));
7358 return "";
7361 /* Return the attribute length of the call instruction INSN. The SIBCALL
7362 flag indicates whether INSN is a regular call or a sibling call. The
7363 length returned must be longer than the code actually generated by
7364 output_call. Since branch shortening is done before delay branch
7365 sequencing, there is no way to determine whether or not the delay
7366 slot will be filled during branch shortening. Even when the delay
7367 slot is filled, we may have to add a nop if the delay slot contains
7368 a branch that can't reach its target. Thus, we always have to include
7369 the delay slot in the length estimate. This used to be done in
7370 pa_adjust_insn_length but we do it here now as some sequences always
7371 fill the delay slot and we can save four bytes in the estimate for
7372 these sequences. */
7375 attr_length_call (rtx insn, int sibcall)
7377 int local_call;
7378 rtx call_dest;
7379 tree call_decl;
7380 int length = 0;
7381 rtx pat = PATTERN (insn);
7382 unsigned long distance = -1;
7384 if (INSN_ADDRESSES_SET_P ())
7386 unsigned long total;
7388 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7389 distance = (total + insn_current_reference_address (insn));
7390 if (distance < total)
7391 distance = -1;
7394 /* Determine if this is a local call. */
7395 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7396 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7397 else
7398 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7400 call_decl = SYMBOL_REF_DECL (call_dest);
7401 local_call = call_decl && targetm.binds_local_p (call_decl);
7403 /* pc-relative branch. */
7404 if (!TARGET_LONG_CALLS
7405 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7406 || distance < 240000))
7407 length += 8;
7409 /* 64-bit plabel sequence. */
7410 else if (TARGET_64BIT && !local_call)
7411 length += sibcall ? 28 : 24;
7413 /* non-pic long absolute branch sequence. */
7414 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7415 length += 12;
7417 /* long pc-relative branch sequence. */
7418 else if (TARGET_LONG_PIC_SDIFF_CALL
7419 || (TARGET_GAS && !TARGET_SOM
7420 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7422 length += 20;
7424 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && flag_pic)
7425 length += 8;
7428 /* 32-bit plabel sequence. */
7429 else
7431 length += 32;
7433 if (TARGET_SOM)
7434 length += length_fp_args (insn);
7436 if (flag_pic)
7437 length += 4;
7439 if (!TARGET_PA_20)
7441 if (!sibcall)
7442 length += 8;
7444 if (!TARGET_NO_SPACE_REGS && flag_pic)
7445 length += 8;
7449 return length;
7452 /* INSN is a function call. It may have an unconditional jump
7453 in its delay slot.
7455 CALL_DEST is the routine we are calling. */
7457 const char *
7458 output_call (rtx insn, rtx call_dest, int sibcall)
7460 int delay_insn_deleted = 0;
7461 int delay_slot_filled = 0;
7462 int seq_length = dbr_sequence_length ();
7463 tree call_decl = SYMBOL_REF_DECL (call_dest);
7464 int local_call = call_decl && targetm.binds_local_p (call_decl);
7465 rtx xoperands[2];
7467 xoperands[0] = call_dest;
7469 /* Handle the common case where we're sure that the branch will reach
7470 the beginning of the "$CODE$" subspace. This is the beginning of
7471 the current function if we are in a named section. */
7472 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7474 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7475 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7477 else
7479 if (TARGET_64BIT && !local_call)
7481 /* ??? As far as I can tell, the HP linker doesn't support the
7482 long pc-relative sequence described in the 64-bit runtime
7483 architecture. So, we use a slightly longer indirect call. */
7484 xoperands[0] = get_deferred_plabel (call_dest);
7485 xoperands[1] = gen_label_rtx ();
7487 /* If this isn't a sibcall, we put the load of %r27 into the
7488 delay slot. We can't do this in a sibcall as we don't
7489 have a second call-clobbered scratch register available. */
7490 if (seq_length != 0
7491 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7492 && !sibcall)
7494 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7495 optimize, 0, NULL);
7497 /* Now delete the delay insn. */
7498 SET_INSN_DELETED (NEXT_INSN (insn));
7499 delay_insn_deleted = 1;
7502 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7503 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7504 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7506 if (sibcall)
7508 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7509 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7510 output_asm_insn ("bve (%%r1)", xoperands);
7512 else
7514 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7515 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7516 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7517 delay_slot_filled = 1;
7520 else
7522 int indirect_call = 0;
7524 /* Emit a long call. There are several different sequences
7525 of increasing length and complexity. In most cases,
7526 they don't allow an instruction in the delay slot. */
7527 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7528 && !TARGET_LONG_PIC_SDIFF_CALL
7529 && !(TARGET_GAS && !TARGET_SOM
7530 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7531 && !TARGET_64BIT)
7532 indirect_call = 1;
7534 if (seq_length != 0
7535 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7536 && !sibcall
7537 && (!TARGET_PA_20 || indirect_call))
7539 /* A non-jump insn in the delay slot. By definition we can
7540 emit this insn before the call (and in fact before argument
7541 relocating. */
7542 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7543 NULL);
7545 /* Now delete the delay insn. */
7546 SET_INSN_DELETED (NEXT_INSN (insn));
7547 delay_insn_deleted = 1;
7550 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7552 /* This is the best sequence for making long calls in
7553 non-pic code. Unfortunately, GNU ld doesn't provide
7554 the stub needed for external calls, and GAS's support
7555 for this with the SOM linker is buggy. It is safe
7556 to use this for local calls. */
7557 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7558 if (sibcall)
7559 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7560 else
7562 if (TARGET_PA_20)
7563 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7564 xoperands);
7565 else
7566 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7568 output_asm_insn ("copy %%r31,%%r2", xoperands);
7569 delay_slot_filled = 1;
7572 else
7574 if (TARGET_LONG_PIC_SDIFF_CALL)
7576 /* The HP assembler and linker can handle relocations
7577 for the difference of two symbols. The HP assembler
7578 recognizes the sequence as a pc-relative call and
7579 the linker provides stubs when needed. */
7580 xoperands[1] = gen_label_rtx ();
7581 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7582 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7583 targetm.asm_out.internal_label (asm_out_file, "L",
7584 CODE_LABEL_NUMBER (xoperands[1]));
7585 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7587 else if (TARGET_GAS && !TARGET_SOM
7588 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7590 /* GAS currently can't generate the relocations that
7591 are needed for the SOM linker under HP-UX using this
7592 sequence. The GNU linker doesn't generate the stubs
7593 that are needed for external calls on TARGET_ELF32
7594 with this sequence. For now, we have to use a
7595 longer plabel sequence when using GAS. */
7596 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7597 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7598 xoperands);
7599 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7600 xoperands);
7602 else
7604 /* Emit a long plabel-based call sequence. This is
7605 essentially an inline implementation of $$dyncall.
7606 We don't actually try to call $$dyncall as this is
7607 as difficult as calling the function itself. */
7608 xoperands[0] = get_deferred_plabel (call_dest);
7609 xoperands[1] = gen_label_rtx ();
7611 /* Since the call is indirect, FP arguments in registers
7612 need to be copied to the general registers. Then, the
7613 argument relocation stub will copy them back. */
7614 if (TARGET_SOM)
7615 copy_fp_args (insn);
7617 if (flag_pic)
7619 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7620 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7621 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7623 else
7625 output_asm_insn ("addil LR'%0-$global$,%%r27",
7626 xoperands);
7627 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7628 xoperands);
7631 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7632 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7633 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7634 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7636 if (!sibcall && !TARGET_PA_20)
7638 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7639 if (TARGET_NO_SPACE_REGS)
7640 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7641 else
7642 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7646 if (TARGET_PA_20)
7648 if (sibcall)
7649 output_asm_insn ("bve (%%r1)", xoperands);
7650 else
7652 if (indirect_call)
7654 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7655 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7656 delay_slot_filled = 1;
7658 else
7659 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7662 else
7664 if (!TARGET_NO_SPACE_REGS && flag_pic)
7665 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7666 xoperands);
7668 if (sibcall)
7670 if (TARGET_NO_SPACE_REGS || !flag_pic)
7671 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7672 else
7673 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7675 else
7677 if (TARGET_NO_SPACE_REGS || !flag_pic)
7678 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7679 else
7680 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7682 if (indirect_call)
7683 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7684 else
7685 output_asm_insn ("copy %%r31,%%r2", xoperands);
7686 delay_slot_filled = 1;
7693 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7694 output_asm_insn ("nop", xoperands);
7696 /* We are done if there isn't a jump in the delay slot. */
7697 if (seq_length == 0
7698 || delay_insn_deleted
7699 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7700 return "";
7702 /* A sibcall should never have a branch in the delay slot. */
7703 gcc_assert (!sibcall);
7705 /* This call has an unconditional jump in its delay slot. */
7706 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7708 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7710 /* See if the return address can be adjusted. Use the containing
7711 sequence insn's address. */
7712 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7713 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7714 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7716 if (VAL_14_BITS_P (distance))
7718 xoperands[1] = gen_label_rtx ();
7719 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7720 targetm.asm_out.internal_label (asm_out_file, "L",
7721 CODE_LABEL_NUMBER (xoperands[1]));
7723 else
7724 output_asm_insn ("nop\n\tb,n %0", xoperands);
7726 else
7727 output_asm_insn ("b,n %0", xoperands);
7729 /* Delete the jump. */
7730 SET_INSN_DELETED (NEXT_INSN (insn));
7732 return "";
7735 /* Return the attribute length of the indirect call instruction INSN.
7736 The length must match the code generated by output_indirect call.
7737 The returned length includes the delay slot. Currently, the delay
7738 slot of an indirect call sequence is not exposed and it is used by
7739 the sequence itself. */
7742 attr_length_indirect_call (rtx insn)
7744 unsigned long distance = -1;
7745 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7747 if (INSN_ADDRESSES_SET_P ())
7749 distance = (total + insn_current_reference_address (insn));
7750 if (distance < total)
7751 distance = -1;
7754 if (TARGET_64BIT)
7755 return 12;
7757 if (TARGET_FAST_INDIRECT_CALLS
7758 || (!TARGET_PORTABLE_RUNTIME
7759 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7760 || distance < 240000)))
7761 return 8;
7763 if (flag_pic)
7764 return 24;
7766 if (TARGET_PORTABLE_RUNTIME)
7767 return 20;
7769 /* Out of reach, can use ble. */
7770 return 12;
7773 const char *
7774 output_indirect_call (rtx insn, rtx call_dest)
7776 rtx xoperands[1];
7778 if (TARGET_64BIT)
7780 xoperands[0] = call_dest;
7781 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7782 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7783 return "";
7786 /* First the special case for kernels, level 0 systems, etc. */
7787 if (TARGET_FAST_INDIRECT_CALLS)
7788 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7790 /* Now the normal case -- we can reach $$dyncall directly or
7791 we're sure that we can get there via a long-branch stub.
7793 No need to check target flags as the length uniquely identifies
7794 the remaining cases. */
7795 if (attr_length_indirect_call (insn) == 8)
7797 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7798 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7799 variant of the B,L instruction can't be used on the SOM target. */
7800 if (TARGET_PA_20 && !TARGET_SOM)
7801 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7802 else
7803 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7806 /* Long millicode call, but we are not generating PIC or portable runtime
7807 code. */
7808 if (attr_length_indirect_call (insn) == 12)
7809 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7811 /* Long millicode call for portable runtime. */
7812 if (attr_length_indirect_call (insn) == 20)
7813 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7815 /* We need a long PIC call to $$dyncall. */
7816 xoperands[0] = NULL_RTX;
7817 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7818 if (TARGET_SOM || !TARGET_GAS)
7820 xoperands[0] = gen_label_rtx ();
7821 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7822 targetm.asm_out.internal_label (asm_out_file, "L",
7823 CODE_LABEL_NUMBER (xoperands[0]));
7824 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7826 else
7828 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7829 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7830 xoperands);
7832 output_asm_insn ("blr %%r0,%%r2", xoperands);
7833 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7834 return "";
7837 /* Return the total length of the save and restore instructions needed for
7838 the data linkage table pointer (i.e., the PIC register) across the call
7839 instruction INSN. No-return calls do not require a save and restore.
7840 In addition, we may be able to avoid the save and restore for calls
7841 within the same translation unit. */
7844 attr_length_save_restore_dltp (rtx insn)
7846 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7847 return 0;
7849 return 8;
7852 /* In HPUX 8.0's shared library scheme, special relocations are needed
7853 for function labels if they might be passed to a function
7854 in a shared library (because shared libraries don't live in code
7855 space), and special magic is needed to construct their address. */
7857 void
7858 hppa_encode_label (rtx sym)
7860 const char *str = XSTR (sym, 0);
7861 int len = strlen (str) + 1;
7862 char *newstr, *p;
7864 p = newstr = alloca (len + 1);
7865 *p++ = '@';
7866 strcpy (p, str);
7868 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7871 static void
7872 pa_encode_section_info (tree decl, rtx rtl, int first)
7874 int old_referenced = 0;
7876 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
7877 old_referenced
7878 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
7880 default_encode_section_info (decl, rtl, first);
7882 if (first && TEXT_SPACE_P (decl))
7884 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7885 if (TREE_CODE (decl) == FUNCTION_DECL)
7886 hppa_encode_label (XEXP (rtl, 0));
7888 else if (old_referenced)
7889 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
7892 /* This is sort of inverse to pa_encode_section_info. */
7894 static const char *
7895 pa_strip_name_encoding (const char *str)
7897 str += (*str == '@');
7898 str += (*str == '*');
7899 return str;
7903 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7905 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7908 /* Returns 1 if OP is a function label involved in a simple addition
7909 with a constant. Used to keep certain patterns from matching
7910 during instruction combination. */
7912 is_function_label_plus_const (rtx op)
7914 /* Strip off any CONST. */
7915 if (GET_CODE (op) == CONST)
7916 op = XEXP (op, 0);
7918 return (GET_CODE (op) == PLUS
7919 && function_label_operand (XEXP (op, 0), Pmode)
7920 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7923 /* Output assembly code for a thunk to FUNCTION. */
7925 static void
7926 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7927 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7928 tree function)
7930 static unsigned int current_thunk_number;
7931 int val_14 = VAL_14_BITS_P (delta);
7932 int nbytes = 0;
7933 char label[16];
7934 rtx xoperands[4];
7936 xoperands[0] = XEXP (DECL_RTL (function), 0);
7937 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7938 xoperands[2] = GEN_INT (delta);
7940 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7941 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7943 /* Output the thunk. We know that the function is in the same
7944 translation unit (i.e., the same space) as the thunk, and that
7945 thunks are output after their method. Thus, we don't need an
7946 external branch to reach the function. With SOM and GAS,
7947 functions and thunks are effectively in different sections.
7948 Thus, we can always use a IA-relative branch and the linker
7949 will add a long branch stub if necessary.
7951 However, we have to be careful when generating PIC code on the
7952 SOM port to ensure that the sequence does not transfer to an
7953 import stub for the target function as this could clobber the
7954 return value saved at SP-24. This would also apply to the
7955 32-bit linux port if the multi-space model is implemented. */
7956 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7957 && !(flag_pic && TREE_PUBLIC (function))
7958 && (TARGET_GAS || last_address < 262132))
7959 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7960 && ((targetm.have_named_sections
7961 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7962 /* The GNU 64-bit linker has rather poor stub management.
7963 So, we use a long branch from thunks that aren't in
7964 the same section as the target function. */
7965 && ((!TARGET_64BIT
7966 && (DECL_SECTION_NAME (thunk_fndecl)
7967 != DECL_SECTION_NAME (function)))
7968 || ((DECL_SECTION_NAME (thunk_fndecl)
7969 == DECL_SECTION_NAME (function))
7970 && last_address < 262132)))
7971 || (!targetm.have_named_sections && last_address < 262132))))
7973 if (!val_14)
7974 output_asm_insn ("addil L'%2,%%r26", xoperands);
7976 output_asm_insn ("b %0", xoperands);
7978 if (val_14)
7980 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7981 nbytes += 8;
7983 else
7985 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7986 nbytes += 12;
7989 else if (TARGET_64BIT)
7991 /* We only have one call-clobbered scratch register, so we can't
7992 make use of the delay slot if delta doesn't fit in 14 bits. */
7993 if (!val_14)
7995 output_asm_insn ("addil L'%2,%%r26", xoperands);
7996 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7999 output_asm_insn ("b,l .+8,%%r1", xoperands);
8001 if (TARGET_GAS)
8003 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8004 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8006 else
8008 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8009 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8012 if (val_14)
8014 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8015 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8016 nbytes += 20;
8018 else
8020 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8021 nbytes += 24;
8024 else if (TARGET_PORTABLE_RUNTIME)
8026 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8027 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8029 if (!val_14)
8030 output_asm_insn ("addil L'%2,%%r26", xoperands);
8032 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8034 if (val_14)
8036 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8037 nbytes += 16;
8039 else
8041 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8042 nbytes += 20;
8045 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8047 /* The function is accessible from outside this module. The only
8048 way to avoid an import stub between the thunk and function is to
8049 call the function directly with an indirect sequence similar to
8050 that used by $$dyncall. This is possible because $$dyncall acts
8051 as the import stub in an indirect call. */
8052 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8053 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8054 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8055 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8056 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8057 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8058 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8059 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8060 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8062 if (!val_14)
8064 output_asm_insn ("addil L'%2,%%r26", xoperands);
8065 nbytes += 4;
8068 if (TARGET_PA_20)
8070 output_asm_insn ("bve (%%r22)", xoperands);
8071 nbytes += 36;
8073 else if (TARGET_NO_SPACE_REGS)
8075 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8076 nbytes += 36;
8078 else
8080 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8081 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8082 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8083 nbytes += 44;
8086 if (val_14)
8087 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8088 else
8089 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8091 else if (flag_pic)
8093 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8095 if (TARGET_SOM || !TARGET_GAS)
8097 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8098 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8100 else
8102 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8103 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8106 if (!val_14)
8107 output_asm_insn ("addil L'%2,%%r26", xoperands);
8109 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8111 if (val_14)
8113 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8114 nbytes += 20;
8116 else
8118 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8119 nbytes += 24;
8122 else
8124 if (!val_14)
8125 output_asm_insn ("addil L'%2,%%r26", xoperands);
8127 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8128 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8130 if (val_14)
8132 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8133 nbytes += 12;
8135 else
8137 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8138 nbytes += 16;
8142 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8144 if (TARGET_SOM && TARGET_GAS)
8146 /* We done with this subspace except possibly for some additional
8147 debug information. Forget that we are in this subspace to ensure
8148 that the next function is output in its own subspace. */
8149 in_section = NULL;
8150 cfun->machine->in_nsubspa = 2;
8153 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8155 switch_to_section (data_section);
8156 output_asm_insn (".align 4", xoperands);
8157 ASM_OUTPUT_LABEL (file, label);
8158 output_asm_insn (".word P'%0", xoperands);
8161 current_thunk_number++;
8162 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8163 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8164 last_address += nbytes;
8165 update_total_code_bytes (nbytes);
8168 /* Only direct calls to static functions are allowed to be sibling (tail)
8169 call optimized.
8171 This restriction is necessary because some linker generated stubs will
8172 store return pointers into rp' in some cases which might clobber a
8173 live value already in rp'.
8175 In a sibcall the current function and the target function share stack
8176 space. Thus if the path to the current function and the path to the
8177 target function save a value in rp', they save the value into the
8178 same stack slot, which has undesirable consequences.
8180 Because of the deferred binding nature of shared libraries any function
8181 with external scope could be in a different load module and thus require
8182 rp' to be saved when calling that function. So sibcall optimizations
8183 can only be safe for static function.
8185 Note that GCC never needs return value relocations, so we don't have to
8186 worry about static calls with return value relocations (which require
8187 saving rp').
8189 It is safe to perform a sibcall optimization when the target function
8190 will never return. */
8191 static bool
8192 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8194 if (TARGET_PORTABLE_RUNTIME)
8195 return false;
8197 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8198 single subspace mode and the call is not indirect. As far as I know,
8199 there is no operating system support for the multiple subspace mode.
8200 It might be possible to support indirect calls if we didn't use
8201 $$dyncall (see the indirect sequence generated in output_call). */
8202 if (TARGET_ELF32)
8203 return (decl != NULL_TREE);
8205 /* Sibcalls are not ok because the arg pointer register is not a fixed
8206 register. This prevents the sibcall optimization from occurring. In
8207 addition, there are problems with stub placement using GNU ld. This
8208 is because a normal sibcall branch uses a 17-bit relocation while
8209 a regular call branch uses a 22-bit relocation. As a result, more
8210 care needs to be taken in the placement of long-branch stubs. */
8211 if (TARGET_64BIT)
8212 return false;
8214 /* Sibcalls are only ok within a translation unit. */
8215 return (decl && !TREE_PUBLIC (decl));
8218 /* ??? Addition is not commutative on the PA due to the weird implicit
8219 space register selection rules for memory addresses. Therefore, we
8220 don't consider a + b == b + a, as this might be inside a MEM. */
8221 static bool
8222 pa_commutative_p (const_rtx x, int outer_code)
8224 return (COMMUTATIVE_P (x)
8225 && (TARGET_NO_SPACE_REGS
8226 || (outer_code != UNKNOWN && outer_code != MEM)
8227 || GET_CODE (x) != PLUS));
8230 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8231 use in fmpyadd instructions. */
8233 fmpyaddoperands (rtx *operands)
8235 enum machine_mode mode = GET_MODE (operands[0]);
8237 /* Must be a floating point mode. */
8238 if (mode != SFmode && mode != DFmode)
8239 return 0;
8241 /* All modes must be the same. */
8242 if (! (mode == GET_MODE (operands[1])
8243 && mode == GET_MODE (operands[2])
8244 && mode == GET_MODE (operands[3])
8245 && mode == GET_MODE (operands[4])
8246 && mode == GET_MODE (operands[5])))
8247 return 0;
8249 /* All operands must be registers. */
8250 if (! (GET_CODE (operands[1]) == REG
8251 && GET_CODE (operands[2]) == REG
8252 && GET_CODE (operands[3]) == REG
8253 && GET_CODE (operands[4]) == REG
8254 && GET_CODE (operands[5]) == REG))
8255 return 0;
8257 /* Only 2 real operands to the addition. One of the input operands must
8258 be the same as the output operand. */
8259 if (! rtx_equal_p (operands[3], operands[4])
8260 && ! rtx_equal_p (operands[3], operands[5]))
8261 return 0;
8263 /* Inout operand of add cannot conflict with any operands from multiply. */
8264 if (rtx_equal_p (operands[3], operands[0])
8265 || rtx_equal_p (operands[3], operands[1])
8266 || rtx_equal_p (operands[3], operands[2]))
8267 return 0;
8269 /* multiply cannot feed into addition operands. */
8270 if (rtx_equal_p (operands[4], operands[0])
8271 || rtx_equal_p (operands[5], operands[0]))
8272 return 0;
8274 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8275 if (mode == SFmode
8276 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8277 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8278 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8279 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8280 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8281 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8282 return 0;
8284 /* Passed. Operands are suitable for fmpyadd. */
8285 return 1;
8288 #if !defined(USE_COLLECT2)
8289 static void
8290 pa_asm_out_constructor (rtx symbol, int priority)
8292 if (!function_label_operand (symbol, VOIDmode))
8293 hppa_encode_label (symbol);
8295 #ifdef CTORS_SECTION_ASM_OP
8296 default_ctor_section_asm_out_constructor (symbol, priority);
8297 #else
8298 # ifdef TARGET_ASM_NAMED_SECTION
8299 default_named_section_asm_out_constructor (symbol, priority);
8300 # else
8301 default_stabs_asm_out_constructor (symbol, priority);
8302 # endif
8303 #endif
8306 static void
8307 pa_asm_out_destructor (rtx symbol, int priority)
8309 if (!function_label_operand (symbol, VOIDmode))
8310 hppa_encode_label (symbol);
8312 #ifdef DTORS_SECTION_ASM_OP
8313 default_dtor_section_asm_out_destructor (symbol, priority);
8314 #else
8315 # ifdef TARGET_ASM_NAMED_SECTION
8316 default_named_section_asm_out_destructor (symbol, priority);
8317 # else
8318 default_stabs_asm_out_destructor (symbol, priority);
8319 # endif
8320 #endif
8322 #endif
8324 /* This function places uninitialized global data in the bss section.
8325 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8326 function on the SOM port to prevent uninitialized global data from
8327 being placed in the data section. */
8329 void
8330 pa_asm_output_aligned_bss (FILE *stream,
8331 const char *name,
8332 unsigned HOST_WIDE_INT size,
8333 unsigned int align)
8335 switch_to_section (bss_section);
8336 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8338 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8339 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8340 #endif
8342 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8343 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8344 #endif
8346 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8347 ASM_OUTPUT_LABEL (stream, name);
8348 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8351 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8352 that doesn't allow the alignment of global common storage to be directly
8353 specified. The SOM linker aligns common storage based on the rounded
8354 value of the NUM_BYTES parameter in the .comm directive. It's not
8355 possible to use the .align directive as it doesn't affect the alignment
8356 of the label associated with a .comm directive. */
8358 void
8359 pa_asm_output_aligned_common (FILE *stream,
8360 const char *name,
8361 unsigned HOST_WIDE_INT size,
8362 unsigned int align)
8364 unsigned int max_common_align;
8366 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8367 if (align > max_common_align)
8369 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8370 "for global common data. Using %u",
8371 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8372 align = max_common_align;
8375 switch_to_section (bss_section);
8377 assemble_name (stream, name);
8378 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8379 MAX (size, align / BITS_PER_UNIT));
8382 /* We can't use .comm for local common storage as the SOM linker effectively
8383 treats the symbol as universal and uses the same storage for local symbols
8384 with the same name in different object files. The .block directive
8385 reserves an uninitialized block of storage. However, it's not common
8386 storage. Fortunately, GCC never requests common storage with the same
8387 name in any given translation unit. */
8389 void
8390 pa_asm_output_aligned_local (FILE *stream,
8391 const char *name,
8392 unsigned HOST_WIDE_INT size,
8393 unsigned int align)
8395 switch_to_section (bss_section);
8396 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8398 #ifdef LOCAL_ASM_OP
8399 fprintf (stream, "%s", LOCAL_ASM_OP);
8400 assemble_name (stream, name);
8401 fprintf (stream, "\n");
8402 #endif
8404 ASM_OUTPUT_LABEL (stream, name);
8405 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8408 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8409 use in fmpysub instructions. */
8411 fmpysuboperands (rtx *operands)
8413 enum machine_mode mode = GET_MODE (operands[0]);
8415 /* Must be a floating point mode. */
8416 if (mode != SFmode && mode != DFmode)
8417 return 0;
8419 /* All modes must be the same. */
8420 if (! (mode == GET_MODE (operands[1])
8421 && mode == GET_MODE (operands[2])
8422 && mode == GET_MODE (operands[3])
8423 && mode == GET_MODE (operands[4])
8424 && mode == GET_MODE (operands[5])))
8425 return 0;
8427 /* All operands must be registers. */
8428 if (! (GET_CODE (operands[1]) == REG
8429 && GET_CODE (operands[2]) == REG
8430 && GET_CODE (operands[3]) == REG
8431 && GET_CODE (operands[4]) == REG
8432 && GET_CODE (operands[5]) == REG))
8433 return 0;
8435 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8436 operation, so operands[4] must be the same as operand[3]. */
8437 if (! rtx_equal_p (operands[3], operands[4]))
8438 return 0;
8440 /* multiply cannot feed into subtraction. */
8441 if (rtx_equal_p (operands[5], operands[0]))
8442 return 0;
8444 /* Inout operand of sub cannot conflict with any operands from multiply. */
8445 if (rtx_equal_p (operands[3], operands[0])
8446 || rtx_equal_p (operands[3], operands[1])
8447 || rtx_equal_p (operands[3], operands[2]))
8448 return 0;
8450 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8451 if (mode == SFmode
8452 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8453 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8454 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8455 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8456 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8457 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8458 return 0;
8460 /* Passed. Operands are suitable for fmpysub. */
8461 return 1;
8464 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8465 constants for shadd instructions. */
8467 shadd_constant_p (int val)
8469 if (val == 2 || val == 4 || val == 8)
8470 return 1;
8471 else
8472 return 0;
8475 /* Return 1 if OP is valid as a base or index register in a
8476 REG+REG address. */
8479 borx_reg_operand (rtx op, enum machine_mode mode)
8481 if (GET_CODE (op) != REG)
8482 return 0;
8484 /* We must reject virtual registers as the only expressions that
8485 can be instantiated are REG and REG+CONST. */
8486 if (op == virtual_incoming_args_rtx
8487 || op == virtual_stack_vars_rtx
8488 || op == virtual_stack_dynamic_rtx
8489 || op == virtual_outgoing_args_rtx
8490 || op == virtual_cfa_rtx)
8491 return 0;
8493 /* While it's always safe to index off the frame pointer, it's not
8494 profitable to do so when the frame pointer is being eliminated. */
8495 if (!reload_completed
8496 && flag_omit_frame_pointer
8497 && !cfun->calls_alloca
8498 && op == frame_pointer_rtx)
8499 return 0;
8501 return register_operand (op, mode);
8504 /* Return 1 if this operand is anything other than a hard register. */
8507 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8509 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8512 /* Return 1 if INSN branches forward. Should be using insn_addresses
8513 to avoid walking through all the insns... */
8514 static int
8515 forward_branch_p (rtx insn)
8517 rtx label = JUMP_LABEL (insn);
8519 while (insn)
8521 if (insn == label)
8522 break;
8523 else
8524 insn = NEXT_INSN (insn);
8527 return (insn == label);
8530 /* Return 1 if OP is an equality comparison, else return 0. */
8532 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8534 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8537 /* Return 1 if INSN is in the delay slot of a call instruction. */
8539 jump_in_call_delay (rtx insn)
8542 if (GET_CODE (insn) != JUMP_INSN)
8543 return 0;
8545 if (PREV_INSN (insn)
8546 && PREV_INSN (PREV_INSN (insn))
8547 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8549 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8551 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8552 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8555 else
8556 return 0;
8559 /* Output an unconditional move and branch insn. */
8561 const char *
8562 output_parallel_movb (rtx *operands, rtx insn)
8564 int length = get_attr_length (insn);
8566 /* These are the cases in which we win. */
8567 if (length == 4)
8568 return "mov%I1b,tr %1,%0,%2";
8570 /* None of the following cases win, but they don't lose either. */
8571 if (length == 8)
8573 if (dbr_sequence_length () == 0)
8575 /* Nothing in the delay slot, fake it by putting the combined
8576 insn (the copy or add) in the delay slot of a bl. */
8577 if (GET_CODE (operands[1]) == CONST_INT)
8578 return "b %2\n\tldi %1,%0";
8579 else
8580 return "b %2\n\tcopy %1,%0";
8582 else
8584 /* Something in the delay slot, but we've got a long branch. */
8585 if (GET_CODE (operands[1]) == CONST_INT)
8586 return "ldi %1,%0\n\tb %2";
8587 else
8588 return "copy %1,%0\n\tb %2";
8592 if (GET_CODE (operands[1]) == CONST_INT)
8593 output_asm_insn ("ldi %1,%0", operands);
8594 else
8595 output_asm_insn ("copy %1,%0", operands);
8596 return output_lbranch (operands[2], insn, 1);
8599 /* Output an unconditional add and branch insn. */
8601 const char *
8602 output_parallel_addb (rtx *operands, rtx insn)
8604 int length = get_attr_length (insn);
8606 /* To make life easy we want operand0 to be the shared input/output
8607 operand and operand1 to be the readonly operand. */
8608 if (operands[0] == operands[1])
8609 operands[1] = operands[2];
8611 /* These are the cases in which we win. */
8612 if (length == 4)
8613 return "add%I1b,tr %1,%0,%3";
8615 /* None of the following cases win, but they don't lose either. */
8616 if (length == 8)
8618 if (dbr_sequence_length () == 0)
8619 /* Nothing in the delay slot, fake it by putting the combined
8620 insn (the copy or add) in the delay slot of a bl. */
8621 return "b %3\n\tadd%I1 %1,%0,%0";
8622 else
8623 /* Something in the delay slot, but we've got a long branch. */
8624 return "add%I1 %1,%0,%0\n\tb %3";
8627 output_asm_insn ("add%I1 %1,%0,%0", operands);
8628 return output_lbranch (operands[3], insn, 1);
8631 /* Return nonzero if INSN (a jump insn) immediately follows a call
8632 to a named function. This is used to avoid filling the delay slot
8633 of the jump since it can usually be eliminated by modifying RP in
8634 the delay slot of the call. */
8637 following_call (rtx insn)
8639 if (! TARGET_JUMP_IN_DELAY)
8640 return 0;
8642 /* Find the previous real insn, skipping NOTEs. */
8643 insn = PREV_INSN (insn);
8644 while (insn && GET_CODE (insn) == NOTE)
8645 insn = PREV_INSN (insn);
8647 /* Check for CALL_INSNs and millicode calls. */
8648 if (insn
8649 && ((GET_CODE (insn) == CALL_INSN
8650 && get_attr_type (insn) != TYPE_DYNCALL)
8651 || (GET_CODE (insn) == INSN
8652 && GET_CODE (PATTERN (insn)) != SEQUENCE
8653 && GET_CODE (PATTERN (insn)) != USE
8654 && GET_CODE (PATTERN (insn)) != CLOBBER
8655 && get_attr_type (insn) == TYPE_MILLI)))
8656 return 1;
8658 return 0;
8661 /* We use this hook to perform a PA specific optimization which is difficult
8662 to do in earlier passes.
8664 We want the delay slots of branches within jump tables to be filled.
8665 None of the compiler passes at the moment even has the notion that a
8666 PA jump table doesn't contain addresses, but instead contains actual
8667 instructions!
8669 Because we actually jump into the table, the addresses of each entry
8670 must stay constant in relation to the beginning of the table (which
8671 itself must stay constant relative to the instruction to jump into
8672 it). I don't believe we can guarantee earlier passes of the compiler
8673 will adhere to those rules.
8675 So, late in the compilation process we find all the jump tables, and
8676 expand them into real code -- e.g. each entry in the jump table vector
8677 will get an appropriate label followed by a jump to the final target.
8679 Reorg and the final jump pass can then optimize these branches and
8680 fill their delay slots. We end up with smaller, more efficient code.
8682 The jump instructions within the table are special; we must be able
8683 to identify them during assembly output (if the jumps don't get filled
8684 we need to emit a nop rather than nullifying the delay slot)). We
8685 identify jumps in switch tables by using insns with the attribute
8686 type TYPE_BTABLE_BRANCH.
8688 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8689 insns. This serves two purposes, first it prevents jump.c from
8690 noticing that the last N entries in the table jump to the instruction
8691 immediately after the table and deleting the jumps. Second, those
8692 insns mark where we should emit .begin_brtab and .end_brtab directives
8693 when using GAS (allows for better link time optimizations). */
8695 static void
8696 pa_reorg (void)
8698 rtx insn;
8700 remove_useless_addtr_insns (1);
8702 if (pa_cpu < PROCESSOR_8000)
8703 pa_combine_instructions ();
8706 /* This is fairly cheap, so always run it if optimizing. */
8707 if (optimize > 0 && !TARGET_BIG_SWITCH)
8709 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8710 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8712 rtx pattern, tmp, location, label;
8713 unsigned int length, i;
8715 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8716 if (GET_CODE (insn) != JUMP_INSN
8717 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8718 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8719 continue;
8721 /* Emit marker for the beginning of the branch table. */
8722 emit_insn_before (gen_begin_brtab (), insn);
8724 pattern = PATTERN (insn);
8725 location = PREV_INSN (insn);
8726 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8728 for (i = 0; i < length; i++)
8730 /* Emit a label before each jump to keep jump.c from
8731 removing this code. */
8732 tmp = gen_label_rtx ();
8733 LABEL_NUSES (tmp) = 1;
8734 emit_label_after (tmp, location);
8735 location = NEXT_INSN (location);
8737 if (GET_CODE (pattern) == ADDR_VEC)
8738 label = XEXP (XVECEXP (pattern, 0, i), 0);
8739 else
8740 label = XEXP (XVECEXP (pattern, 1, i), 0);
8742 tmp = gen_short_jump (label);
8744 /* Emit the jump itself. */
8745 tmp = emit_jump_insn_after (tmp, location);
8746 JUMP_LABEL (tmp) = label;
8747 LABEL_NUSES (label)++;
8748 location = NEXT_INSN (location);
8750 /* Emit a BARRIER after the jump. */
8751 emit_barrier_after (location);
8752 location = NEXT_INSN (location);
8755 /* Emit marker for the end of the branch table. */
8756 emit_insn_before (gen_end_brtab (), location);
8757 location = NEXT_INSN (location);
8758 emit_barrier_after (location);
8760 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8761 delete_insn (insn);
8764 else
8766 /* Still need brtab marker insns. FIXME: the presence of these
8767 markers disables output of the branch table to readonly memory,
8768 and any alignment directives that might be needed. Possibly,
8769 the begin_brtab insn should be output before the label for the
8770 table. This doesn't matter at the moment since the tables are
8771 always output in the text section. */
8772 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8774 /* Find an ADDR_VEC insn. */
8775 if (GET_CODE (insn) != JUMP_INSN
8776 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8777 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8778 continue;
8780 /* Now generate markers for the beginning and end of the
8781 branch table. */
8782 emit_insn_before (gen_begin_brtab (), insn);
8783 emit_insn_after (gen_end_brtab (), insn);
8788 /* The PA has a number of odd instructions which can perform multiple
8789 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8790 it may be profitable to combine two instructions into one instruction
8791 with two outputs. It's not profitable PA2.0 machines because the
8792 two outputs would take two slots in the reorder buffers.
8794 This routine finds instructions which can be combined and combines
8795 them. We only support some of the potential combinations, and we
8796 only try common ways to find suitable instructions.
8798 * addb can add two registers or a register and a small integer
8799 and jump to a nearby (+-8k) location. Normally the jump to the
8800 nearby location is conditional on the result of the add, but by
8801 using the "true" condition we can make the jump unconditional.
8802 Thus addb can perform two independent operations in one insn.
8804 * movb is similar to addb in that it can perform a reg->reg
8805 or small immediate->reg copy and jump to a nearby (+-8k location).
8807 * fmpyadd and fmpysub can perform a FP multiply and either an
8808 FP add or FP sub if the operands of the multiply and add/sub are
8809 independent (there are other minor restrictions). Note both
8810 the fmpy and fadd/fsub can in theory move to better spots according
8811 to data dependencies, but for now we require the fmpy stay at a
8812 fixed location.
8814 * Many of the memory operations can perform pre & post updates
8815 of index registers. GCC's pre/post increment/decrement addressing
8816 is far too simple to take advantage of all the possibilities. This
8817 pass may not be suitable since those insns may not be independent.
8819 * comclr can compare two ints or an int and a register, nullify
8820 the following instruction and zero some other register. This
8821 is more difficult to use as it's harder to find an insn which
8822 will generate a comclr than finding something like an unconditional
8823 branch. (conditional moves & long branches create comclr insns).
8825 * Most arithmetic operations can conditionally skip the next
8826 instruction. They can be viewed as "perform this operation
8827 and conditionally jump to this nearby location" (where nearby
8828 is an insns away). These are difficult to use due to the
8829 branch length restrictions. */
8831 static void
8832 pa_combine_instructions (void)
8834 rtx anchor, new;
8836 /* This can get expensive since the basic algorithm is on the
8837 order of O(n^2) (or worse). Only do it for -O2 or higher
8838 levels of optimization. */
8839 if (optimize < 2)
8840 return;
8842 /* Walk down the list of insns looking for "anchor" insns which
8843 may be combined with "floating" insns. As the name implies,
8844 "anchor" instructions don't move, while "floating" insns may
8845 move around. */
8846 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8847 new = make_insn_raw (new);
8849 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8851 enum attr_pa_combine_type anchor_attr;
8852 enum attr_pa_combine_type floater_attr;
8854 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8855 Also ignore any special USE insns. */
8856 if ((GET_CODE (anchor) != INSN
8857 && GET_CODE (anchor) != JUMP_INSN
8858 && GET_CODE (anchor) != CALL_INSN)
8859 || GET_CODE (PATTERN (anchor)) == USE
8860 || GET_CODE (PATTERN (anchor)) == CLOBBER
8861 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8862 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8863 continue;
8865 anchor_attr = get_attr_pa_combine_type (anchor);
8866 /* See if anchor is an insn suitable for combination. */
8867 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8868 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8869 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8870 && ! forward_branch_p (anchor)))
8872 rtx floater;
8874 for (floater = PREV_INSN (anchor);
8875 floater;
8876 floater = PREV_INSN (floater))
8878 if (GET_CODE (floater) == NOTE
8879 || (GET_CODE (floater) == INSN
8880 && (GET_CODE (PATTERN (floater)) == USE
8881 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8882 continue;
8884 /* Anything except a regular INSN will stop our search. */
8885 if (GET_CODE (floater) != INSN
8886 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8887 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8889 floater = NULL_RTX;
8890 break;
8893 /* See if FLOATER is suitable for combination with the
8894 anchor. */
8895 floater_attr = get_attr_pa_combine_type (floater);
8896 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8897 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8898 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8899 && floater_attr == PA_COMBINE_TYPE_FMPY))
8901 /* If ANCHOR and FLOATER can be combined, then we're
8902 done with this pass. */
8903 if (pa_can_combine_p (new, anchor, floater, 0,
8904 SET_DEST (PATTERN (floater)),
8905 XEXP (SET_SRC (PATTERN (floater)), 0),
8906 XEXP (SET_SRC (PATTERN (floater)), 1)))
8907 break;
8910 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8911 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8913 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8915 if (pa_can_combine_p (new, anchor, floater, 0,
8916 SET_DEST (PATTERN (floater)),
8917 XEXP (SET_SRC (PATTERN (floater)), 0),
8918 XEXP (SET_SRC (PATTERN (floater)), 1)))
8919 break;
8921 else
8923 if (pa_can_combine_p (new, anchor, floater, 0,
8924 SET_DEST (PATTERN (floater)),
8925 SET_SRC (PATTERN (floater)),
8926 SET_SRC (PATTERN (floater))))
8927 break;
8932 /* If we didn't find anything on the backwards scan try forwards. */
8933 if (!floater
8934 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8935 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8937 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8939 if (GET_CODE (floater) == NOTE
8940 || (GET_CODE (floater) == INSN
8941 && (GET_CODE (PATTERN (floater)) == USE
8942 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8944 continue;
8946 /* Anything except a regular INSN will stop our search. */
8947 if (GET_CODE (floater) != INSN
8948 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8949 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8951 floater = NULL_RTX;
8952 break;
8955 /* See if FLOATER is suitable for combination with the
8956 anchor. */
8957 floater_attr = get_attr_pa_combine_type (floater);
8958 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8959 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8960 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8961 && floater_attr == PA_COMBINE_TYPE_FMPY))
8963 /* If ANCHOR and FLOATER can be combined, then we're
8964 done with this pass. */
8965 if (pa_can_combine_p (new, anchor, floater, 1,
8966 SET_DEST (PATTERN (floater)),
8967 XEXP (SET_SRC (PATTERN (floater)),
8969 XEXP (SET_SRC (PATTERN (floater)),
8970 1)))
8971 break;
8976 /* FLOATER will be nonzero if we found a suitable floating
8977 insn for combination with ANCHOR. */
8978 if (floater
8979 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8980 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8982 /* Emit the new instruction and delete the old anchor. */
8983 emit_insn_before (gen_rtx_PARALLEL
8984 (VOIDmode,
8985 gen_rtvec (2, PATTERN (anchor),
8986 PATTERN (floater))),
8987 anchor);
8989 SET_INSN_DELETED (anchor);
8991 /* Emit a special USE insn for FLOATER, then delete
8992 the floating insn. */
8993 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8994 delete_insn (floater);
8996 continue;
8998 else if (floater
8999 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9001 rtx temp;
9002 /* Emit the new_jump instruction and delete the old anchor. */
9003 temp
9004 = emit_jump_insn_before (gen_rtx_PARALLEL
9005 (VOIDmode,
9006 gen_rtvec (2, PATTERN (anchor),
9007 PATTERN (floater))),
9008 anchor);
9010 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9011 SET_INSN_DELETED (anchor);
9013 /* Emit a special USE insn for FLOATER, then delete
9014 the floating insn. */
9015 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9016 delete_insn (floater);
9017 continue;
9023 static int
9024 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
9025 rtx src1, rtx src2)
9027 int insn_code_number;
9028 rtx start, end;
9030 /* Create a PARALLEL with the patterns of ANCHOR and
9031 FLOATER, try to recognize it, then test constraints
9032 for the resulting pattern.
9034 If the pattern doesn't match or the constraints
9035 aren't met keep searching for a suitable floater
9036 insn. */
9037 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
9038 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
9039 INSN_CODE (new) = -1;
9040 insn_code_number = recog_memoized (new);
9041 if (insn_code_number < 0
9042 || (extract_insn (new), ! constrain_operands (1)))
9043 return 0;
9045 if (reversed)
9047 start = anchor;
9048 end = floater;
9050 else
9052 start = floater;
9053 end = anchor;
9056 /* There's up to three operands to consider. One
9057 output and two inputs.
9059 The output must not be used between FLOATER & ANCHOR
9060 exclusive. The inputs must not be set between
9061 FLOATER and ANCHOR exclusive. */
9063 if (reg_used_between_p (dest, start, end))
9064 return 0;
9066 if (reg_set_between_p (src1, start, end))
9067 return 0;
9069 if (reg_set_between_p (src2, start, end))
9070 return 0;
9072 /* If we get here, then everything is good. */
9073 return 1;
9076 /* Return nonzero if references for INSN are delayed.
9078 Millicode insns are actually function calls with some special
9079 constraints on arguments and register usage.
9081 Millicode calls always expect their arguments in the integer argument
9082 registers, and always return their result in %r29 (ret1). They
9083 are expected to clobber their arguments, %r1, %r29, and the return
9084 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9086 This function tells reorg that the references to arguments and
9087 millicode calls do not appear to happen until after the millicode call.
9088 This allows reorg to put insns which set the argument registers into the
9089 delay slot of the millicode call -- thus they act more like traditional
9090 CALL_INSNs.
9092 Note we cannot consider side effects of the insn to be delayed because
9093 the branch and link insn will clobber the return pointer. If we happened
9094 to use the return pointer in the delay slot of the call, then we lose.
9096 get_attr_type will try to recognize the given insn, so make sure to
9097 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9098 in particular. */
9100 insn_refs_are_delayed (rtx insn)
9102 return ((GET_CODE (insn) == INSN
9103 && GET_CODE (PATTERN (insn)) != SEQUENCE
9104 && GET_CODE (PATTERN (insn)) != USE
9105 && GET_CODE (PATTERN (insn)) != CLOBBER
9106 && get_attr_type (insn) == TYPE_MILLI));
9109 /* On the HP-PA the value is found in register(s) 28(-29), unless
9110 the mode is SF or DF. Then the value is returned in fr4 (32).
9112 This must perform the same promotions as PROMOTE_MODE, else
9113 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
9115 Small structures must be returned in a PARALLEL on PA64 in order
9116 to match the HP Compiler ABI. */
9119 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
9121 enum machine_mode valmode;
9123 if (AGGREGATE_TYPE_P (valtype)
9124 || TREE_CODE (valtype) == COMPLEX_TYPE
9125 || TREE_CODE (valtype) == VECTOR_TYPE)
9127 if (TARGET_64BIT)
9129 /* Aggregates with a size less than or equal to 128 bits are
9130 returned in GR 28(-29). They are left justified. The pad
9131 bits are undefined. Larger aggregates are returned in
9132 memory. */
9133 rtx loc[2];
9134 int i, offset = 0;
9135 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9137 for (i = 0; i < ub; i++)
9139 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9140 gen_rtx_REG (DImode, 28 + i),
9141 GEN_INT (offset));
9142 offset += 8;
9145 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9147 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9149 /* Aggregates 5 to 8 bytes in size are returned in general
9150 registers r28-r29 in the same manner as other non
9151 floating-point objects. The data is right-justified and
9152 zero-extended to 64 bits. This is opposite to the normal
9153 justification used on big endian targets and requires
9154 special treatment. */
9155 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9156 gen_rtx_REG (DImode, 28), const0_rtx);
9157 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9161 if ((INTEGRAL_TYPE_P (valtype)
9162 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9163 || POINTER_TYPE_P (valtype))
9164 valmode = word_mode;
9165 else
9166 valmode = TYPE_MODE (valtype);
9168 if (TREE_CODE (valtype) == REAL_TYPE
9169 && !AGGREGATE_TYPE_P (valtype)
9170 && TYPE_MODE (valtype) != TFmode
9171 && !TARGET_SOFT_FLOAT)
9172 return gen_rtx_REG (valmode, 32);
9174 return gen_rtx_REG (valmode, 28);
9177 /* Return the location of a parameter that is passed in a register or NULL
9178 if the parameter has any component that is passed in memory.
9180 This is new code and will be pushed to into the net sources after
9181 further testing.
9183 ??? We might want to restructure this so that it looks more like other
9184 ports. */
9186 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9187 int named ATTRIBUTE_UNUSED)
9189 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9190 int alignment = 0;
9191 int arg_size;
9192 int fpr_reg_base;
9193 int gpr_reg_base;
9194 rtx retval;
9196 if (mode == VOIDmode)
9197 return NULL_RTX;
9199 arg_size = FUNCTION_ARG_SIZE (mode, type);
9201 /* If this arg would be passed partially or totally on the stack, then
9202 this routine should return zero. pa_arg_partial_bytes will
9203 handle arguments which are split between regs and stack slots if
9204 the ABI mandates split arguments. */
9205 if (!TARGET_64BIT)
9207 /* The 32-bit ABI does not split arguments. */
9208 if (cum->words + arg_size > max_arg_words)
9209 return NULL_RTX;
9211 else
9213 if (arg_size > 1)
9214 alignment = cum->words & 1;
9215 if (cum->words + alignment >= max_arg_words)
9216 return NULL_RTX;
9219 /* The 32bit ABIs and the 64bit ABIs are rather different,
9220 particularly in their handling of FP registers. We might
9221 be able to cleverly share code between them, but I'm not
9222 going to bother in the hope that splitting them up results
9223 in code that is more easily understood. */
9225 if (TARGET_64BIT)
9227 /* Advance the base registers to their current locations.
9229 Remember, gprs grow towards smaller register numbers while
9230 fprs grow to higher register numbers. Also remember that
9231 although FP regs are 32-bit addressable, we pretend that
9232 the registers are 64-bits wide. */
9233 gpr_reg_base = 26 - cum->words;
9234 fpr_reg_base = 32 + cum->words;
9236 /* Arguments wider than one word and small aggregates need special
9237 treatment. */
9238 if (arg_size > 1
9239 || mode == BLKmode
9240 || (type && (AGGREGATE_TYPE_P (type)
9241 || TREE_CODE (type) == COMPLEX_TYPE
9242 || TREE_CODE (type) == VECTOR_TYPE)))
9244 /* Double-extended precision (80-bit), quad-precision (128-bit)
9245 and aggregates including complex numbers are aligned on
9246 128-bit boundaries. The first eight 64-bit argument slots
9247 are associated one-to-one, with general registers r26
9248 through r19, and also with floating-point registers fr4
9249 through fr11. Arguments larger than one word are always
9250 passed in general registers.
9252 Using a PARALLEL with a word mode register results in left
9253 justified data on a big-endian target. */
9255 rtx loc[8];
9256 int i, offset = 0, ub = arg_size;
9258 /* Align the base register. */
9259 gpr_reg_base -= alignment;
9261 ub = MIN (ub, max_arg_words - cum->words - alignment);
9262 for (i = 0; i < ub; i++)
9264 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9265 gen_rtx_REG (DImode, gpr_reg_base),
9266 GEN_INT (offset));
9267 gpr_reg_base -= 1;
9268 offset += 8;
9271 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9274 else
9276 /* If the argument is larger than a word, then we know precisely
9277 which registers we must use. */
9278 if (arg_size > 1)
9280 if (cum->words)
9282 gpr_reg_base = 23;
9283 fpr_reg_base = 38;
9285 else
9287 gpr_reg_base = 25;
9288 fpr_reg_base = 34;
9291 /* Structures 5 to 8 bytes in size are passed in the general
9292 registers in the same manner as other non floating-point
9293 objects. The data is right-justified and zero-extended
9294 to 64 bits. This is opposite to the normal justification
9295 used on big endian targets and requires special treatment.
9296 We now define BLOCK_REG_PADDING to pad these objects.
9297 Aggregates, complex and vector types are passed in the same
9298 manner as structures. */
9299 if (mode == BLKmode
9300 || (type && (AGGREGATE_TYPE_P (type)
9301 || TREE_CODE (type) == COMPLEX_TYPE
9302 || TREE_CODE (type) == VECTOR_TYPE)))
9304 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9305 gen_rtx_REG (DImode, gpr_reg_base),
9306 const0_rtx);
9307 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9310 else
9312 /* We have a single word (32 bits). A simple computation
9313 will get us the register #s we need. */
9314 gpr_reg_base = 26 - cum->words;
9315 fpr_reg_base = 32 + 2 * cum->words;
9319 /* Determine if the argument needs to be passed in both general and
9320 floating point registers. */
9321 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9322 /* If we are doing soft-float with portable runtime, then there
9323 is no need to worry about FP regs. */
9324 && !TARGET_SOFT_FLOAT
9325 /* The parameter must be some kind of scalar float, else we just
9326 pass it in integer registers. */
9327 && GET_MODE_CLASS (mode) == MODE_FLOAT
9328 /* The target function must not have a prototype. */
9329 && cum->nargs_prototype <= 0
9330 /* libcalls do not need to pass items in both FP and general
9331 registers. */
9332 && type != NULL_TREE
9333 /* All this hair applies to "outgoing" args only. This includes
9334 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9335 && !cum->incoming)
9336 /* Also pass outgoing floating arguments in both registers in indirect
9337 calls with the 32 bit ABI and the HP assembler since there is no
9338 way to the specify argument locations in static functions. */
9339 || (!TARGET_64BIT
9340 && !TARGET_GAS
9341 && !cum->incoming
9342 && cum->indirect
9343 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9345 retval
9346 = gen_rtx_PARALLEL
9347 (mode,
9348 gen_rtvec (2,
9349 gen_rtx_EXPR_LIST (VOIDmode,
9350 gen_rtx_REG (mode, fpr_reg_base),
9351 const0_rtx),
9352 gen_rtx_EXPR_LIST (VOIDmode,
9353 gen_rtx_REG (mode, gpr_reg_base),
9354 const0_rtx)));
9356 else
9358 /* See if we should pass this parameter in a general register. */
9359 if (TARGET_SOFT_FLOAT
9360 /* Indirect calls in the normal 32bit ABI require all arguments
9361 to be passed in general registers. */
9362 || (!TARGET_PORTABLE_RUNTIME
9363 && !TARGET_64BIT
9364 && !TARGET_ELF32
9365 && cum->indirect)
9366 /* If the parameter is not a scalar floating-point parameter,
9367 then it belongs in GPRs. */
9368 || GET_MODE_CLASS (mode) != MODE_FLOAT
9369 /* Structure with single SFmode field belongs in GPR. */
9370 || (type && AGGREGATE_TYPE_P (type)))
9371 retval = gen_rtx_REG (mode, gpr_reg_base);
9372 else
9373 retval = gen_rtx_REG (mode, fpr_reg_base);
9375 return retval;
9379 /* If this arg would be passed totally in registers or totally on the stack,
9380 then this routine should return zero. */
9382 static int
9383 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9384 tree type, bool named ATTRIBUTE_UNUSED)
9386 unsigned int max_arg_words = 8;
9387 unsigned int offset = 0;
9389 if (!TARGET_64BIT)
9390 return 0;
9392 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9393 offset = 1;
9395 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9396 /* Arg fits fully into registers. */
9397 return 0;
9398 else if (cum->words + offset >= max_arg_words)
9399 /* Arg fully on the stack. */
9400 return 0;
9401 else
9402 /* Arg is split. */
9403 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9407 /* A get_unnamed_section callback for switching to the text section.
9409 This function is only used with SOM. Because we don't support
9410 named subspaces, we can only create a new subspace or switch back
9411 to the default text subspace. */
9413 static void
9414 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9416 gcc_assert (TARGET_SOM);
9417 if (TARGET_GAS)
9419 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9421 /* We only want to emit a .nsubspa directive once at the
9422 start of the function. */
9423 cfun->machine->in_nsubspa = 1;
9425 /* Create a new subspace for the text. This provides
9426 better stub placement and one-only functions. */
9427 if (cfun->decl
9428 && DECL_ONE_ONLY (cfun->decl)
9429 && !DECL_WEAK (cfun->decl))
9431 output_section_asm_op ("\t.SPACE $TEXT$\n"
9432 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9433 "ACCESS=44,SORT=24,COMDAT");
9434 return;
9437 else
9439 /* There isn't a current function or the body of the current
9440 function has been completed. So, we are changing to the
9441 text section to output debugging information. Thus, we
9442 need to forget that we are in the text section so that
9443 varasm.c will call us when text_section is selected again. */
9444 gcc_assert (!cfun || !cfun->machine
9445 || cfun->machine->in_nsubspa == 2);
9446 in_section = NULL;
9448 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9449 return;
9451 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9454 /* A get_unnamed_section callback for switching to comdat data
9455 sections. This function is only used with SOM. */
9457 static void
9458 som_output_comdat_data_section_asm_op (const void *data)
9460 in_section = NULL;
9461 output_section_asm_op (data);
9464 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9466 static void
9467 pa_som_asm_init_sections (void)
9469 text_section
9470 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9472 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9473 is not being generated. */
9474 som_readonly_data_section
9475 = get_unnamed_section (0, output_section_asm_op,
9476 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9478 /* When secondary definitions are not supported, SOM makes readonly
9479 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9480 the comdat flag. */
9481 som_one_only_readonly_data_section
9482 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9483 "\t.SPACE $TEXT$\n"
9484 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9485 "ACCESS=0x2c,SORT=16,COMDAT");
9488 /* When secondary definitions are not supported, SOM makes data one-only
9489 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9490 som_one_only_data_section
9491 = get_unnamed_section (SECTION_WRITE,
9492 som_output_comdat_data_section_asm_op,
9493 "\t.SPACE $PRIVATE$\n"
9494 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9495 "ACCESS=31,SORT=24,COMDAT");
9497 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9498 which reference data within the $TEXT$ space (for example constant
9499 strings in the $LIT$ subspace).
9501 The assemblers (GAS and HP as) both have problems with handling
9502 the difference of two symbols which is the other correct way to
9503 reference constant data during PIC code generation.
9505 So, there's no way to reference constant data which is in the
9506 $TEXT$ space during PIC generation. Instead place all constant
9507 data into the $PRIVATE$ subspace (this reduces sharing, but it
9508 works correctly). */
9509 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9511 /* We must not have a reference to an external symbol defined in a
9512 shared library in a readonly section, else the SOM linker will
9513 complain.
9515 So, we force exception information into the data section. */
9516 exception_section = data_section;
9519 /* On hpux10, the linker will give an error if we have a reference
9520 in the read-only data section to a symbol defined in a shared
9521 library. Therefore, expressions that might require a reloc can
9522 not be placed in the read-only data section. */
9524 static section *
9525 pa_select_section (tree exp, int reloc,
9526 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9528 if (TREE_CODE (exp) == VAR_DECL
9529 && TREE_READONLY (exp)
9530 && !TREE_THIS_VOLATILE (exp)
9531 && DECL_INITIAL (exp)
9532 && (DECL_INITIAL (exp) == error_mark_node
9533 || TREE_CONSTANT (DECL_INITIAL (exp)))
9534 && !reloc)
9536 if (TARGET_SOM
9537 && DECL_ONE_ONLY (exp)
9538 && !DECL_WEAK (exp))
9539 return som_one_only_readonly_data_section;
9540 else
9541 return readonly_data_section;
9543 else if (CONSTANT_CLASS_P (exp) && !reloc)
9544 return readonly_data_section;
9545 else if (TARGET_SOM
9546 && TREE_CODE (exp) == VAR_DECL
9547 && DECL_ONE_ONLY (exp)
9548 && !DECL_WEAK (exp))
9549 return som_one_only_data_section;
9550 else
9551 return data_section;
9554 static void
9555 pa_globalize_label (FILE *stream, const char *name)
9557 /* We only handle DATA objects here, functions are globalized in
9558 ASM_DECLARE_FUNCTION_NAME. */
9559 if (! FUNCTION_NAME_P (name))
9561 fputs ("\t.EXPORT ", stream);
9562 assemble_name (stream, name);
9563 fputs (",DATA\n", stream);
9567 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9569 static rtx
9570 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9571 int incoming ATTRIBUTE_UNUSED)
9573 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9576 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9578 bool
9579 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9581 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9582 PA64 ABI says that objects larger than 128 bits are returned in memory.
9583 Note, int_size_in_bytes can return -1 if the size of the object is
9584 variable or larger than the maximum value that can be expressed as
9585 a HOST_WIDE_INT. It can also return zero for an empty type. The
9586 simplest way to handle variable and empty types is to pass them in
9587 memory. This avoids problems in defining the boundaries of argument
9588 slots, allocating registers, etc. */
9589 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9590 || int_size_in_bytes (type) <= 0);
9593 /* Structure to hold declaration and name of external symbols that are
9594 emitted by GCC. We generate a vector of these symbols and output them
9595 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9596 This avoids putting out names that are never really used. */
9598 typedef struct extern_symbol GTY(())
9600 tree decl;
9601 const char *name;
9602 } extern_symbol;
9604 /* Define gc'd vector type for extern_symbol. */
9605 DEF_VEC_O(extern_symbol);
9606 DEF_VEC_ALLOC_O(extern_symbol,gc);
9608 /* Vector of extern_symbol pointers. */
9609 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9611 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9612 /* Mark DECL (name NAME) as an external reference (assembler output
9613 file FILE). This saves the names to output at the end of the file
9614 if actually referenced. */
9616 void
9617 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9619 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9621 gcc_assert (file == asm_out_file);
9622 p->decl = decl;
9623 p->name = name;
9626 /* Output text required at the end of an assembler file.
9627 This includes deferred plabels and .import directives for
9628 all external symbols that were actually referenced. */
9630 static void
9631 pa_hpux_file_end (void)
9633 unsigned int i;
9634 extern_symbol *p;
9636 if (!NO_DEFERRED_PROFILE_COUNTERS)
9637 output_deferred_profile_counters ();
9639 output_deferred_plabels ();
9641 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9643 tree decl = p->decl;
9645 if (!TREE_ASM_WRITTEN (decl)
9646 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9647 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9650 VEC_free (extern_symbol, gc, extern_symbols);
9652 #endif
9654 /* Return true if a change from mode FROM to mode TO for a register
9655 in register class CLASS is invalid. */
9657 bool
9658 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9659 enum reg_class class)
9661 if (from == to)
9662 return false;
9664 /* Reject changes to/from complex and vector modes. */
9665 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9666 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9667 return true;
9669 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9670 return false;
9672 /* There is no way to load QImode or HImode values directly from
9673 memory. SImode loads to the FP registers are not zero extended.
9674 On the 64-bit target, this conflicts with the definition of
9675 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9676 with different sizes in the floating-point registers. */
9677 if (MAYBE_FP_REG_CLASS_P (class))
9678 return true;
9680 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9681 in specific sets of registers. Thus, we cannot allow changing
9682 to a larger mode when it's larger than a word. */
9683 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9684 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9685 return true;
9687 return false;
9690 /* Returns TRUE if it is a good idea to tie two pseudo registers
9691 when one has mode MODE1 and one has mode MODE2.
9692 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9693 for any hard reg, then this must be FALSE for correct output.
9695 We should return FALSE for QImode and HImode because these modes
9696 are not ok in the floating-point registers. However, this prevents
9697 tieing these modes to SImode and DImode in the general registers.
9698 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9699 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9700 in the floating-point registers. */
9702 bool
9703 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9705 /* Don't tie modes in different classes. */
9706 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9707 return false;
9709 return true;
9712 #include "gt-pa.h"