Merge from trunk @ 138209
[official-gcc.git] / gcc / config / pa / pa.c
blob76d84bad092bb0e9a38cf84d39d9cf14c086d129
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "tree.h"
35 #include "output.h"
36 #include "except.h"
37 #include "expr.h"
38 #include "optabs.h"
39 #include "reload.h"
40 #include "integrate.h"
41 #include "function.h"
42 #include "toplev.h"
43 #include "ggc.h"
44 #include "recog.h"
45 #include "predict.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "df.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || (get_attr_type (in_insn) != TYPE_FPSTORE
62 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
63 || recog_memoized (out_insn) < 0)
64 return 0;
66 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
68 set = single_set (out_insn);
69 if (!set)
70 return 0;
72 other_mode = GET_MODE (SET_SRC (set));
74 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
78 #ifndef DO_FRAME_NOTES
79 #ifdef INCOMING_RETURN_ADDR_RTX
80 #define DO_FRAME_NOTES 1
81 #else
82 #define DO_FRAME_NOTES 0
83 #endif
84 #endif
86 static void copy_reg_pointer (rtx, rtx);
87 static void fix_range (const char *);
88 static bool pa_handle_option (size_t, const char *, int);
89 static int hppa_address_cost (rtx);
90 static bool hppa_rtx_costs (rtx, int, int, int *);
91 static inline rtx force_mode (enum machine_mode, rtx);
92 static void pa_reorg (void);
93 static void pa_combine_instructions (void);
94 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
95 static int forward_branch_p (rtx);
96 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
97 static int compute_movmem_length (rtx);
98 static int compute_clrmem_length (rtx);
99 static bool pa_assemble_integer (rtx, unsigned int, int);
100 static void remove_useless_addtr_insns (int);
101 static void store_reg (int, HOST_WIDE_INT, int);
102 static void store_reg_modify (int, int, HOST_WIDE_INT);
103 static void load_reg (int, HOST_WIDE_INT, int);
104 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
105 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
106 static void update_total_code_bytes (int);
107 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
108 static int pa_adjust_cost (rtx, rtx, rtx, int);
109 static int pa_adjust_priority (rtx, int);
110 static int pa_issue_rate (void);
111 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
112 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 ATTRIBUTE_UNUSED;
114 static void pa_encode_section_info (tree, rtx, int);
115 static const char *pa_strip_name_encoding (const char *);
116 static bool pa_function_ok_for_sibcall (tree, tree);
117 static void pa_globalize_label (FILE *, const char *)
118 ATTRIBUTE_UNUSED;
119 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
120 HOST_WIDE_INT, tree);
121 #if !defined(USE_COLLECT2)
122 static void pa_asm_out_constructor (rtx, int);
123 static void pa_asm_out_destructor (rtx, int);
124 #endif
125 static void pa_init_builtins (void);
126 static rtx hppa_builtin_saveregs (void);
127 static void hppa_va_start (tree, rtx);
128 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
129 static bool pa_scalar_mode_supported_p (enum machine_mode);
130 static bool pa_commutative_p (const_rtx x, int outer_code);
131 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
132 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
136 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
137 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
141 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
142 static void output_deferred_plabels (void);
143 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
144 #ifdef ASM_OUTPUT_EXTERNAL_REAL
145 static void pa_hpux_file_end (void);
146 #endif
147 #ifdef HPUX_LONG_DOUBLE_LIBRARY
148 static void pa_hpux_init_libfuncs (void);
149 #endif
150 static rtx pa_struct_value_rtx (tree, int);
151 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
152 const_tree, bool);
153 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
154 tree, bool);
155 static struct machine_function * pa_init_machine_status (void);
156 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
157 enum machine_mode,
158 secondary_reload_info *);
159 static void pa_extra_live_on_entry (bitmap);
161 /* The following extra sections are only used for SOM. */
162 static GTY(()) section *som_readonly_data_section;
163 static GTY(()) section *som_one_only_readonly_data_section;
164 static GTY(()) section *som_one_only_data_section;
166 /* Save the operands last given to a compare for use when we
167 generate a scc or bcc insn. */
168 rtx hppa_compare_op0, hppa_compare_op1;
169 enum cmp_type hppa_branch_type;
171 /* Which cpu we are scheduling for. */
172 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
174 /* The UNIX standard to use for predefines and linking. */
175 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
177 /* Counts for the number of callee-saved general and floating point
178 registers which were saved by the current function's prologue. */
179 static int gr_saved, fr_saved;
181 /* Boolean indicating whether the return pointer was saved by the
182 current function's prologue. */
183 static bool rp_saved;
185 static rtx find_addr_reg (rtx);
187 /* Keep track of the number of bytes we have output in the CODE subspace
188 during this compilation so we'll know when to emit inline long-calls. */
189 unsigned long total_code_bytes;
191 /* The last address of the previous function plus the number of bytes in
192 associated thunks that have been output. This is used to determine if
193 a thunk can use an IA-relative branch to reach its target function. */
194 static int last_address;
196 /* Variables to handle plabels that we discover are necessary at assembly
197 output time. They are output after the current function. */
198 struct deferred_plabel GTY(())
200 rtx internal_label;
201 rtx symbol;
203 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
204 deferred_plabels;
205 static size_t n_deferred_plabels = 0;
208 /* Initialize the GCC target structure. */
210 #undef TARGET_ASM_ALIGNED_HI_OP
211 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
212 #undef TARGET_ASM_ALIGNED_SI_OP
213 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
214 #undef TARGET_ASM_ALIGNED_DI_OP
215 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
216 #undef TARGET_ASM_UNALIGNED_HI_OP
217 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
218 #undef TARGET_ASM_UNALIGNED_SI_OP
219 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
220 #undef TARGET_ASM_UNALIGNED_DI_OP
221 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
222 #undef TARGET_ASM_INTEGER
223 #define TARGET_ASM_INTEGER pa_assemble_integer
225 #undef TARGET_ASM_FUNCTION_PROLOGUE
226 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
230 #undef TARGET_SCHED_ADJUST_COST
231 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
232 #undef TARGET_SCHED_ADJUST_PRIORITY
233 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
234 #undef TARGET_SCHED_ISSUE_RATE
235 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
237 #undef TARGET_ENCODE_SECTION_INFO
238 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
242 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
243 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
245 #undef TARGET_COMMUTATIVE_P
246 #define TARGET_COMMUTATIVE_P pa_commutative_p
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
253 #undef TARGET_ASM_FILE_END
254 #ifdef ASM_OUTPUT_EXTERNAL_REAL
255 #define TARGET_ASM_FILE_END pa_hpux_file_end
256 #else
257 #define TARGET_ASM_FILE_END output_deferred_plabels
258 #endif
260 #if !defined(USE_COLLECT2)
261 #undef TARGET_ASM_CONSTRUCTOR
262 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
263 #undef TARGET_ASM_DESTRUCTOR
264 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
265 #endif
267 #undef TARGET_DEFAULT_TARGET_FLAGS
268 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
269 #undef TARGET_HANDLE_OPTION
270 #define TARGET_HANDLE_OPTION pa_handle_option
272 #undef TARGET_INIT_BUILTINS
273 #define TARGET_INIT_BUILTINS pa_init_builtins
275 #undef TARGET_RTX_COSTS
276 #define TARGET_RTX_COSTS hppa_rtx_costs
277 #undef TARGET_ADDRESS_COST
278 #define TARGET_ADDRESS_COST hppa_address_cost
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
283 #ifdef HPUX_LONG_DOUBLE_LIBRARY
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
286 #endif
288 #undef TARGET_PROMOTE_FUNCTION_RETURN
289 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
290 #undef TARGET_PROMOTE_PROTOTYPES
291 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
293 #undef TARGET_STRUCT_VALUE_RTX
294 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
295 #undef TARGET_RETURN_IN_MEMORY
296 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
297 #undef TARGET_MUST_PASS_IN_STACK
298 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
299 #undef TARGET_PASS_BY_REFERENCE
300 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
301 #undef TARGET_CALLEE_COPIES
302 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
303 #undef TARGET_ARG_PARTIAL_BYTES
304 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
306 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
307 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
308 #undef TARGET_EXPAND_BUILTIN_VA_START
309 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
310 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
311 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
313 #undef TARGET_SCALAR_MODE_SUPPORTED_P
314 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
316 #undef TARGET_CANNOT_FORCE_CONST_MEM
317 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
319 #undef TARGET_SECONDARY_RELOAD
320 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
322 #undef TARGET_EXTRA_LIVE_ON_ENTRY
323 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
325 struct gcc_target targetm = TARGET_INITIALIZER;
327 /* Parse the -mfixed-range= option string. */
329 static void
330 fix_range (const char *const_str)
332 int i, first, last;
333 char *str, *dash, *comma;
335 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
336 REG2 are either register names or register numbers. The effect
337 of this option is to mark the registers in the range from REG1 to
338 REG2 as ``fixed'' so they won't be used by the compiler. This is
339 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
341 i = strlen (const_str);
342 str = (char *) alloca (i + 1);
343 memcpy (str, const_str, i + 1);
345 while (1)
347 dash = strchr (str, '-');
348 if (!dash)
350 warning (0, "value of -mfixed-range must have form REG1-REG2");
351 return;
353 *dash = '\0';
355 comma = strchr (dash + 1, ',');
356 if (comma)
357 *comma = '\0';
359 first = decode_reg_name (str);
360 if (first < 0)
362 warning (0, "unknown register name: %s", str);
363 return;
366 last = decode_reg_name (dash + 1);
367 if (last < 0)
369 warning (0, "unknown register name: %s", dash + 1);
370 return;
373 *dash = '-';
375 if (first > last)
377 warning (0, "%s-%s is an empty range", str, dash + 1);
378 return;
381 for (i = first; i <= last; ++i)
382 fixed_regs[i] = call_used_regs[i] = 1;
384 if (!comma)
385 break;
387 *comma = ',';
388 str = comma + 1;
391 /* Check if all floating point registers have been fixed. */
392 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
393 if (!fixed_regs[i])
394 break;
396 if (i > FP_REG_LAST)
397 target_flags |= MASK_DISABLE_FPREGS;
400 /* Implement TARGET_HANDLE_OPTION. */
402 static bool
403 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
405 switch (code)
407 case OPT_mnosnake:
408 case OPT_mpa_risc_1_0:
409 case OPT_march_1_0:
410 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
411 return true;
413 case OPT_msnake:
414 case OPT_mpa_risc_1_1:
415 case OPT_march_1_1:
416 target_flags &= ~MASK_PA_20;
417 target_flags |= MASK_PA_11;
418 return true;
420 case OPT_mpa_risc_2_0:
421 case OPT_march_2_0:
422 target_flags |= MASK_PA_11 | MASK_PA_20;
423 return true;
425 case OPT_mschedule_:
426 if (strcmp (arg, "8000") == 0)
427 pa_cpu = PROCESSOR_8000;
428 else if (strcmp (arg, "7100") == 0)
429 pa_cpu = PROCESSOR_7100;
430 else if (strcmp (arg, "700") == 0)
431 pa_cpu = PROCESSOR_700;
432 else if (strcmp (arg, "7100LC") == 0)
433 pa_cpu = PROCESSOR_7100LC;
434 else if (strcmp (arg, "7200") == 0)
435 pa_cpu = PROCESSOR_7200;
436 else if (strcmp (arg, "7300") == 0)
437 pa_cpu = PROCESSOR_7300;
438 else
439 return false;
440 return true;
442 case OPT_mfixed_range_:
443 fix_range (arg);
444 return true;
446 #if TARGET_HPUX
447 case OPT_munix_93:
448 flag_pa_unix = 1993;
449 return true;
450 #endif
452 #if TARGET_HPUX_10_10
453 case OPT_munix_95:
454 flag_pa_unix = 1995;
455 return true;
456 #endif
458 #if TARGET_HPUX_11_11
459 case OPT_munix_98:
460 flag_pa_unix = 1998;
461 return true;
462 #endif
464 default:
465 return true;
469 void
470 override_options (void)
472 /* Unconditional branches in the delay slot are not compatible with dwarf2
473 call frame information. There is no benefit in using this optimization
474 on PA8000 and later processors. */
475 if (pa_cpu >= PROCESSOR_8000
476 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
477 || flag_unwind_tables)
478 target_flags &= ~MASK_JUMP_IN_DELAY;
480 if (flag_pic && TARGET_PORTABLE_RUNTIME)
482 warning (0, "PIC code generation is not supported in the portable runtime model");
485 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
487 warning (0, "PIC code generation is not compatible with fast indirect calls");
490 if (! TARGET_GAS && write_symbols != NO_DEBUG)
492 warning (0, "-g is only supported when using GAS on this processor,");
493 warning (0, "-g option disabled");
494 write_symbols = NO_DEBUG;
497 /* We only support the "big PIC" model now. And we always generate PIC
498 code when in 64bit mode. */
499 if (flag_pic == 1 || TARGET_64BIT)
500 flag_pic = 2;
502 /* We can't guarantee that .dword is available for 32-bit targets. */
503 if (UNITS_PER_WORD == 4)
504 targetm.asm_out.aligned_op.di = NULL;
506 /* The unaligned ops are only available when using GAS. */
507 if (!TARGET_GAS)
509 targetm.asm_out.unaligned_op.hi = NULL;
510 targetm.asm_out.unaligned_op.si = NULL;
511 targetm.asm_out.unaligned_op.di = NULL;
514 init_machine_status = pa_init_machine_status;
517 static void
518 pa_init_builtins (void)
520 #ifdef DONT_HAVE_FPUTC_UNLOCKED
521 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
522 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
523 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
524 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
525 #endif
526 #if TARGET_HPUX_11
527 if (built_in_decls [BUILT_IN_FINITE])
528 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
529 if (built_in_decls [BUILT_IN_FINITEF])
530 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
531 #endif
534 /* Function to init struct machine_function.
535 This will be called, via a pointer variable,
536 from push_function_context. */
538 static struct machine_function *
539 pa_init_machine_status (void)
541 return GGC_CNEW (machine_function);
544 /* If FROM is a probable pointer register, mark TO as a probable
545 pointer register with the same pointer alignment as FROM. */
547 static void
548 copy_reg_pointer (rtx to, rtx from)
550 if (REG_POINTER (from))
551 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
554 /* Return 1 if X contains a symbolic expression. We know these
555 expressions will have one of a few well defined forms, so
556 we need only check those forms. */
558 symbolic_expression_p (rtx x)
561 /* Strip off any HIGH. */
562 if (GET_CODE (x) == HIGH)
563 x = XEXP (x, 0);
565 return (symbolic_operand (x, VOIDmode));
568 /* Accept any constant that can be moved in one instruction into a
569 general register. */
571 cint_ok_for_move (HOST_WIDE_INT ival)
573 /* OK if ldo, ldil, or zdepi, can be used. */
574 return (VAL_14_BITS_P (ival)
575 || ldil_cint_p (ival)
576 || zdepi_cint_p (ival));
579 /* Return truth value of whether OP can be used as an operand in a
580 adddi3 insn. */
582 adddi3_operand (rtx op, enum machine_mode mode)
584 return (register_operand (op, mode)
585 || (GET_CODE (op) == CONST_INT
586 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
589 /* True iff the operand OP can be used as the destination operand of
590 an integer store. This also implies the operand could be used as
591 the source operand of an integer load. Symbolic, lo_sum and indexed
592 memory operands are not allowed. We accept reloading pseudos and
593 other memory operands. */
595 integer_store_memory_operand (rtx op, enum machine_mode mode)
597 return ((reload_in_progress
598 && REG_P (op)
599 && REGNO (op) >= FIRST_PSEUDO_REGISTER
600 && reg_renumber [REGNO (op)] < 0)
601 || (GET_CODE (op) == MEM
602 && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
603 && !symbolic_memory_operand (op, VOIDmode)
604 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
605 && !IS_INDEX_ADDR_P (XEXP (op, 0))));
608 /* True iff ldil can be used to load this CONST_INT. The least
609 significant 11 bits of the value must be zero and the value must
610 not change sign when extended from 32 to 64 bits. */
612 ldil_cint_p (HOST_WIDE_INT ival)
614 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
616 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
619 /* True iff zdepi can be used to generate this CONST_INT.
620 zdepi first sign extends a 5-bit signed number to a given field
621 length, then places this field anywhere in a zero. */
623 zdepi_cint_p (unsigned HOST_WIDE_INT x)
625 unsigned HOST_WIDE_INT lsb_mask, t;
627 /* This might not be obvious, but it's at least fast.
628 This function is critical; we don't have the time loops would take. */
629 lsb_mask = x & -x;
630 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
631 /* Return true iff t is a power of two. */
632 return ((t & (t - 1)) == 0);
635 /* True iff depi or extru can be used to compute (reg & mask).
636 Accept bit pattern like these:
637 0....01....1
638 1....10....0
639 1..10..01..1 */
641 and_mask_p (unsigned HOST_WIDE_INT mask)
643 mask = ~mask;
644 mask += mask & -mask;
645 return (mask & (mask - 1)) == 0;
648 /* True iff depi can be used to compute (reg | MASK). */
650 ior_mask_p (unsigned HOST_WIDE_INT mask)
652 mask += mask & -mask;
653 return (mask & (mask - 1)) == 0;
656 /* Legitimize PIC addresses. If the address is already
657 position-independent, we return ORIG. Newly generated
658 position-independent addresses go to REG. If we need more
659 than one register, we lose. */
662 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
664 rtx pic_ref = orig;
666 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
668 /* Labels need special handling. */
669 if (pic_label_operand (orig, mode))
671 rtx insn;
673 /* We do not want to go through the movXX expanders here since that
674 would create recursion.
676 Nor do we really want to call a generator for a named pattern
677 since that requires multiple patterns if we want to support
678 multiple word sizes.
680 So instead we just emit the raw set, which avoids the movXX
681 expanders completely. */
682 mark_reg_pointer (reg, BITS_PER_UNIT);
683 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
685 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
686 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
688 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
689 and update LABEL_NUSES because this is not done automatically. */
690 if (reload_in_progress || reload_completed)
692 /* Extract LABEL_REF. */
693 if (GET_CODE (orig) == CONST)
694 orig = XEXP (XEXP (orig, 0), 0);
695 /* Extract CODE_LABEL. */
696 orig = XEXP (orig, 0);
697 add_reg_note (insn, REG_LABEL_OPERAND, orig);
698 LABEL_NUSES (orig)++;
700 crtl->uses_pic_offset_table = 1;
701 return reg;
703 if (GET_CODE (orig) == SYMBOL_REF)
705 rtx insn, tmp_reg;
707 gcc_assert (reg);
709 /* Before reload, allocate a temporary register for the intermediate
710 result. This allows the sequence to be deleted when the final
711 result is unused and the insns are trivially dead. */
712 tmp_reg = ((reload_in_progress || reload_completed)
713 ? reg : gen_reg_rtx (Pmode));
715 if (function_label_operand (orig, mode))
717 /* Force function label into memory. */
718 orig = XEXP (force_const_mem (mode, orig), 0);
719 /* Load plabel address from DLT. */
720 emit_move_insn (tmp_reg,
721 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
722 gen_rtx_HIGH (word_mode, orig)));
723 pic_ref
724 = gen_const_mem (Pmode,
725 gen_rtx_LO_SUM (Pmode, tmp_reg,
726 gen_rtx_UNSPEC (Pmode,
727 gen_rtvec (1, orig),
728 UNSPEC_DLTIND14R)));
729 emit_move_insn (reg, pic_ref);
730 /* Now load address of function descriptor. */
731 pic_ref = gen_rtx_MEM (Pmode, reg);
733 else
735 /* Load symbol reference from DLT. */
736 emit_move_insn (tmp_reg,
737 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
738 gen_rtx_HIGH (word_mode, orig)));
739 pic_ref
740 = gen_const_mem (Pmode,
741 gen_rtx_LO_SUM (Pmode, tmp_reg,
742 gen_rtx_UNSPEC (Pmode,
743 gen_rtvec (1, orig),
744 UNSPEC_DLTIND14R)));
747 crtl->uses_pic_offset_table = 1;
748 mark_reg_pointer (reg, BITS_PER_UNIT);
749 insn = emit_move_insn (reg, pic_ref);
751 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
752 set_unique_reg_note (insn, REG_EQUAL, orig);
754 return reg;
756 else if (GET_CODE (orig) == CONST)
758 rtx base;
760 if (GET_CODE (XEXP (orig, 0)) == PLUS
761 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
762 return orig;
764 gcc_assert (reg);
765 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
767 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
768 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
769 base == reg ? 0 : reg);
771 if (GET_CODE (orig) == CONST_INT)
773 if (INT_14_BITS (orig))
774 return plus_constant (base, INTVAL (orig));
775 orig = force_reg (Pmode, orig);
777 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
778 /* Likewise, should we set special REG_NOTEs here? */
781 return pic_ref;
784 static GTY(()) rtx gen_tls_tga;
786 static rtx
787 gen_tls_get_addr (void)
789 if (!gen_tls_tga)
790 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
791 return gen_tls_tga;
794 static rtx
795 hppa_tls_call (rtx arg)
797 rtx ret;
799 ret = gen_reg_rtx (Pmode);
800 emit_library_call_value (gen_tls_get_addr (), ret,
801 LCT_CONST, Pmode, 1, arg, Pmode);
803 return ret;
806 static rtx
807 legitimize_tls_address (rtx addr)
809 rtx ret, insn, tmp, t1, t2, tp;
810 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
812 switch (model)
814 case TLS_MODEL_GLOBAL_DYNAMIC:
815 tmp = gen_reg_rtx (Pmode);
816 if (flag_pic)
817 emit_insn (gen_tgd_load_pic (tmp, addr));
818 else
819 emit_insn (gen_tgd_load (tmp, addr));
820 ret = hppa_tls_call (tmp);
821 break;
823 case TLS_MODEL_LOCAL_DYNAMIC:
824 ret = gen_reg_rtx (Pmode);
825 tmp = gen_reg_rtx (Pmode);
826 start_sequence ();
827 if (flag_pic)
828 emit_insn (gen_tld_load_pic (tmp, addr));
829 else
830 emit_insn (gen_tld_load (tmp, addr));
831 t1 = hppa_tls_call (tmp);
832 insn = get_insns ();
833 end_sequence ();
834 t2 = gen_reg_rtx (Pmode);
835 emit_libcall_block (insn, t2, t1,
836 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
837 UNSPEC_TLSLDBASE));
838 emit_insn (gen_tld_offset_load (ret, addr, t2));
839 break;
841 case TLS_MODEL_INITIAL_EXEC:
842 tp = gen_reg_rtx (Pmode);
843 tmp = gen_reg_rtx (Pmode);
844 ret = gen_reg_rtx (Pmode);
845 emit_insn (gen_tp_load (tp));
846 if (flag_pic)
847 emit_insn (gen_tie_load_pic (tmp, addr));
848 else
849 emit_insn (gen_tie_load (tmp, addr));
850 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
851 break;
853 case TLS_MODEL_LOCAL_EXEC:
854 tp = gen_reg_rtx (Pmode);
855 ret = gen_reg_rtx (Pmode);
856 emit_insn (gen_tp_load (tp));
857 emit_insn (gen_tle_load (ret, addr, tp));
858 break;
860 default:
861 gcc_unreachable ();
864 return ret;
867 /* Try machine-dependent ways of modifying an illegitimate address
868 to be legitimate. If we find one, return the new, valid address.
869 This macro is used in only one place: `memory_address' in explow.c.
871 OLDX is the address as it was before break_out_memory_refs was called.
872 In some cases it is useful to look at this to decide what needs to be done.
874 MODE and WIN are passed so that this macro can use
875 GO_IF_LEGITIMATE_ADDRESS.
877 It is always safe for this macro to do nothing. It exists to recognize
878 opportunities to optimize the output.
880 For the PA, transform:
882 memory(X + <large int>)
884 into:
886 if (<large int> & mask) >= 16
887 Y = (<large int> & ~mask) + mask + 1 Round up.
888 else
889 Y = (<large int> & ~mask) Round down.
890 Z = X + Y
891 memory (Z + (<large int> - Y));
893 This is for CSE to find several similar references, and only use one Z.
895 X can either be a SYMBOL_REF or REG, but because combine cannot
896 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
897 D will not fit in 14 bits.
899 MODE_FLOAT references allow displacements which fit in 5 bits, so use
900 0x1f as the mask.
902 MODE_INT references allow displacements which fit in 14 bits, so use
903 0x3fff as the mask.
905 This relies on the fact that most mode MODE_FLOAT references will use FP
906 registers and most mode MODE_INT references will use integer registers.
907 (In the rare case of an FP register used in an integer MODE, we depend
908 on secondary reloads to clean things up.)
911 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
912 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
913 addressing modes to be used).
915 Put X and Z into registers. Then put the entire expression into
916 a register. */
919 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
920 enum machine_mode mode)
922 rtx orig = x;
924 /* We need to canonicalize the order of operands in unscaled indexed
925 addresses since the code that checks if an address is valid doesn't
926 always try both orders. */
927 if (!TARGET_NO_SPACE_REGS
928 && GET_CODE (x) == PLUS
929 && GET_MODE (x) == Pmode
930 && REG_P (XEXP (x, 0))
931 && REG_P (XEXP (x, 1))
932 && REG_POINTER (XEXP (x, 0))
933 && !REG_POINTER (XEXP (x, 1)))
934 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
936 if (PA_SYMBOL_REF_TLS_P (x))
937 return legitimize_tls_address (x);
938 else if (flag_pic)
939 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
941 /* Strip off CONST. */
942 if (GET_CODE (x) == CONST)
943 x = XEXP (x, 0);
945 /* Special case. Get the SYMBOL_REF into a register and use indexing.
946 That should always be safe. */
947 if (GET_CODE (x) == PLUS
948 && GET_CODE (XEXP (x, 0)) == REG
949 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
951 rtx reg = force_reg (Pmode, XEXP (x, 1));
952 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
955 /* Note we must reject symbols which represent function addresses
956 since the assembler/linker can't handle arithmetic on plabels. */
957 if (GET_CODE (x) == PLUS
958 && GET_CODE (XEXP (x, 1)) == CONST_INT
959 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
960 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
961 || GET_CODE (XEXP (x, 0)) == REG))
963 rtx int_part, ptr_reg;
964 int newoffset;
965 int offset = INTVAL (XEXP (x, 1));
966 int mask;
968 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
969 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
971 /* Choose which way to round the offset. Round up if we
972 are >= halfway to the next boundary. */
973 if ((offset & mask) >= ((mask + 1) / 2))
974 newoffset = (offset & ~ mask) + mask + 1;
975 else
976 newoffset = (offset & ~ mask);
978 /* If the newoffset will not fit in 14 bits (ldo), then
979 handling this would take 4 or 5 instructions (2 to load
980 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
981 add the new offset and the SYMBOL_REF.) Combine can
982 not handle 4->2 or 5->2 combinations, so do not create
983 them. */
984 if (! VAL_14_BITS_P (newoffset)
985 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
987 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
988 rtx tmp_reg
989 = force_reg (Pmode,
990 gen_rtx_HIGH (Pmode, const_part));
991 ptr_reg
992 = force_reg (Pmode,
993 gen_rtx_LO_SUM (Pmode,
994 tmp_reg, const_part));
996 else
998 if (! VAL_14_BITS_P (newoffset))
999 int_part = force_reg (Pmode, GEN_INT (newoffset));
1000 else
1001 int_part = GEN_INT (newoffset);
1003 ptr_reg = force_reg (Pmode,
1004 gen_rtx_PLUS (Pmode,
1005 force_reg (Pmode, XEXP (x, 0)),
1006 int_part));
1008 return plus_constant (ptr_reg, offset - newoffset);
1011 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1013 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1014 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1015 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1016 && (OBJECT_P (XEXP (x, 1))
1017 || GET_CODE (XEXP (x, 1)) == SUBREG)
1018 && GET_CODE (XEXP (x, 1)) != CONST)
1020 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1021 rtx reg1, reg2;
1023 reg1 = XEXP (x, 1);
1024 if (GET_CODE (reg1) != REG)
1025 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1027 reg2 = XEXP (XEXP (x, 0), 0);
1028 if (GET_CODE (reg2) != REG)
1029 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1031 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1032 gen_rtx_MULT (Pmode,
1033 reg2,
1034 GEN_INT (val)),
1035 reg1));
1038 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1040 Only do so for floating point modes since this is more speculative
1041 and we lose if it's an integer store. */
1042 if (GET_CODE (x) == PLUS
1043 && GET_CODE (XEXP (x, 0)) == PLUS
1044 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1045 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1046 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1047 && (mode == SFmode || mode == DFmode))
1050 /* First, try and figure out what to use as a base register. */
1051 rtx reg1, reg2, base, idx, orig_base;
1053 reg1 = XEXP (XEXP (x, 0), 1);
1054 reg2 = XEXP (x, 1);
1055 base = NULL_RTX;
1056 idx = NULL_RTX;
1058 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1059 then emit_move_sequence will turn on REG_POINTER so we'll know
1060 it's a base register below. */
1061 if (GET_CODE (reg1) != REG)
1062 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1064 if (GET_CODE (reg2) != REG)
1065 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1067 /* Figure out what the base and index are. */
1069 if (GET_CODE (reg1) == REG
1070 && REG_POINTER (reg1))
1072 base = reg1;
1073 orig_base = XEXP (XEXP (x, 0), 1);
1074 idx = gen_rtx_PLUS (Pmode,
1075 gen_rtx_MULT (Pmode,
1076 XEXP (XEXP (XEXP (x, 0), 0), 0),
1077 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1078 XEXP (x, 1));
1080 else if (GET_CODE (reg2) == REG
1081 && REG_POINTER (reg2))
1083 base = reg2;
1084 orig_base = XEXP (x, 1);
1085 idx = XEXP (x, 0);
1088 if (base == 0)
1089 return orig;
1091 /* If the index adds a large constant, try to scale the
1092 constant so that it can be loaded with only one insn. */
1093 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1094 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1095 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1096 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1098 /* Divide the CONST_INT by the scale factor, then add it to A. */
1099 int val = INTVAL (XEXP (idx, 1));
1101 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1102 reg1 = XEXP (XEXP (idx, 0), 0);
1103 if (GET_CODE (reg1) != REG)
1104 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1106 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1108 /* We can now generate a simple scaled indexed address. */
1109 return
1110 force_reg
1111 (Pmode, gen_rtx_PLUS (Pmode,
1112 gen_rtx_MULT (Pmode, reg1,
1113 XEXP (XEXP (idx, 0), 1)),
1114 base));
1117 /* If B + C is still a valid base register, then add them. */
1118 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1119 && INTVAL (XEXP (idx, 1)) <= 4096
1120 && INTVAL (XEXP (idx, 1)) >= -4096)
1122 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1123 rtx reg1, reg2;
1125 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1127 reg2 = XEXP (XEXP (idx, 0), 0);
1128 if (GET_CODE (reg2) != CONST_INT)
1129 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1131 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1132 gen_rtx_MULT (Pmode,
1133 reg2,
1134 GEN_INT (val)),
1135 reg1));
1138 /* Get the index into a register, then add the base + index and
1139 return a register holding the result. */
1141 /* First get A into a register. */
1142 reg1 = XEXP (XEXP (idx, 0), 0);
1143 if (GET_CODE (reg1) != REG)
1144 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1146 /* And get B into a register. */
1147 reg2 = XEXP (idx, 1);
1148 if (GET_CODE (reg2) != REG)
1149 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1151 reg1 = force_reg (Pmode,
1152 gen_rtx_PLUS (Pmode,
1153 gen_rtx_MULT (Pmode, reg1,
1154 XEXP (XEXP (idx, 0), 1)),
1155 reg2));
1157 /* Add the result to our base register and return. */
1158 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1162 /* Uh-oh. We might have an address for x[n-100000]. This needs
1163 special handling to avoid creating an indexed memory address
1164 with x-100000 as the base.
1166 If the constant part is small enough, then it's still safe because
1167 there is a guard page at the beginning and end of the data segment.
1169 Scaled references are common enough that we want to try and rearrange the
1170 terms so that we can use indexing for these addresses too. Only
1171 do the optimization for floatint point modes. */
1173 if (GET_CODE (x) == PLUS
1174 && symbolic_expression_p (XEXP (x, 1)))
1176 /* Ugly. We modify things here so that the address offset specified
1177 by the index expression is computed first, then added to x to form
1178 the entire address. */
1180 rtx regx1, regx2, regy1, regy2, y;
1182 /* Strip off any CONST. */
1183 y = XEXP (x, 1);
1184 if (GET_CODE (y) == CONST)
1185 y = XEXP (y, 0);
1187 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1189 /* See if this looks like
1190 (plus (mult (reg) (shadd_const))
1191 (const (plus (symbol_ref) (const_int))))
1193 Where const_int is small. In that case the const
1194 expression is a valid pointer for indexing.
1196 If const_int is big, but can be divided evenly by shadd_const
1197 and added to (reg). This allows more scaled indexed addresses. */
1198 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1199 && GET_CODE (XEXP (x, 0)) == MULT
1200 && GET_CODE (XEXP (y, 1)) == CONST_INT
1201 && INTVAL (XEXP (y, 1)) >= -4096
1202 && INTVAL (XEXP (y, 1)) <= 4095
1203 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1204 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1206 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1207 rtx reg1, reg2;
1209 reg1 = XEXP (x, 1);
1210 if (GET_CODE (reg1) != REG)
1211 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1213 reg2 = XEXP (XEXP (x, 0), 0);
1214 if (GET_CODE (reg2) != REG)
1215 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1217 return force_reg (Pmode,
1218 gen_rtx_PLUS (Pmode,
1219 gen_rtx_MULT (Pmode,
1220 reg2,
1221 GEN_INT (val)),
1222 reg1));
1224 else if ((mode == DFmode || mode == SFmode)
1225 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1226 && GET_CODE (XEXP (x, 0)) == MULT
1227 && GET_CODE (XEXP (y, 1)) == CONST_INT
1228 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1229 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1230 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1232 regx1
1233 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1234 / INTVAL (XEXP (XEXP (x, 0), 1))));
1235 regx2 = XEXP (XEXP (x, 0), 0);
1236 if (GET_CODE (regx2) != REG)
1237 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1238 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1239 regx2, regx1));
1240 return
1241 force_reg (Pmode,
1242 gen_rtx_PLUS (Pmode,
1243 gen_rtx_MULT (Pmode, regx2,
1244 XEXP (XEXP (x, 0), 1)),
1245 force_reg (Pmode, XEXP (y, 0))));
1247 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1248 && INTVAL (XEXP (y, 1)) >= -4096
1249 && INTVAL (XEXP (y, 1)) <= 4095)
1251 /* This is safe because of the guard page at the
1252 beginning and end of the data space. Just
1253 return the original address. */
1254 return orig;
1256 else
1258 /* Doesn't look like one we can optimize. */
1259 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1260 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1261 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1262 regx1 = force_reg (Pmode,
1263 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1264 regx1, regy2));
1265 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1270 return orig;
1273 /* For the HPPA, REG and REG+CONST is cost 0
1274 and addresses involving symbolic constants are cost 2.
1276 PIC addresses are very expensive.
1278 It is no coincidence that this has the same structure
1279 as GO_IF_LEGITIMATE_ADDRESS. */
1281 static int
1282 hppa_address_cost (rtx X)
1284 switch (GET_CODE (X))
1286 case REG:
1287 case PLUS:
1288 case LO_SUM:
1289 return 1;
1290 case HIGH:
1291 return 2;
1292 default:
1293 return 4;
1297 /* Compute a (partial) cost for rtx X. Return true if the complete
1298 cost has been computed, and false if subexpressions should be
1299 scanned. In either case, *TOTAL contains the cost result. */
1301 static bool
1302 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1304 switch (code)
1306 case CONST_INT:
1307 if (INTVAL (x) == 0)
1308 *total = 0;
1309 else if (INT_14_BITS (x))
1310 *total = 1;
1311 else
1312 *total = 2;
1313 return true;
1315 case HIGH:
1316 *total = 2;
1317 return true;
1319 case CONST:
1320 case LABEL_REF:
1321 case SYMBOL_REF:
1322 *total = 4;
1323 return true;
1325 case CONST_DOUBLE:
1326 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1327 && outer_code != SET)
1328 *total = 0;
1329 else
1330 *total = 8;
1331 return true;
1333 case MULT:
1334 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1335 *total = COSTS_N_INSNS (3);
1336 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1337 *total = COSTS_N_INSNS (8);
1338 else
1339 *total = COSTS_N_INSNS (20);
1340 return true;
1342 case DIV:
1343 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1345 *total = COSTS_N_INSNS (14);
1346 return true;
1348 /* FALLTHRU */
1350 case UDIV:
1351 case MOD:
1352 case UMOD:
1353 *total = COSTS_N_INSNS (60);
1354 return true;
1356 case PLUS: /* this includes shNadd insns */
1357 case MINUS:
1358 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1359 *total = COSTS_N_INSNS (3);
1360 else
1361 *total = COSTS_N_INSNS (1);
1362 return true;
1364 case ASHIFT:
1365 case ASHIFTRT:
1366 case LSHIFTRT:
1367 *total = COSTS_N_INSNS (1);
1368 return true;
1370 default:
1371 return false;
1375 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1376 new rtx with the correct mode. */
1377 static inline rtx
1378 force_mode (enum machine_mode mode, rtx orig)
1380 if (mode == GET_MODE (orig))
1381 return orig;
1383 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1385 return gen_rtx_REG (mode, REGNO (orig));
1388 /* Return 1 if *X is a thread-local symbol. */
1390 static int
1391 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1393 return PA_SYMBOL_REF_TLS_P (*x);
1396 /* Return 1 if X contains a thread-local symbol. */
1398 bool
1399 pa_tls_referenced_p (rtx x)
1401 if (!TARGET_HAVE_TLS)
1402 return false;
1404 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1407 /* Emit insns to move operands[1] into operands[0].
1409 Return 1 if we have written out everything that needs to be done to
1410 do the move. Otherwise, return 0 and the caller will emit the move
1411 normally.
1413 Note SCRATCH_REG may not be in the proper mode depending on how it
1414 will be used. This routine is responsible for creating a new copy
1415 of SCRATCH_REG in the proper mode. */
1418 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1420 register rtx operand0 = operands[0];
1421 register rtx operand1 = operands[1];
1422 register rtx tem;
1424 /* We can only handle indexed addresses in the destination operand
1425 of floating point stores. Thus, we need to break out indexed
1426 addresses from the destination operand. */
1427 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1429 gcc_assert (can_create_pseudo_p ());
1431 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1432 operand0 = replace_equiv_address (operand0, tem);
1435 /* On targets with non-equivalent space registers, break out unscaled
1436 indexed addresses from the source operand before the final CSE.
1437 We have to do this because the REG_POINTER flag is not correctly
1438 carried through various optimization passes and CSE may substitute
1439 a pseudo without the pointer set for one with the pointer set. As
1440 a result, we loose various opportunities to create insns with
1441 unscaled indexed addresses. */
1442 if (!TARGET_NO_SPACE_REGS
1443 && !cse_not_expected
1444 && GET_CODE (operand1) == MEM
1445 && GET_CODE (XEXP (operand1, 0)) == PLUS
1446 && REG_P (XEXP (XEXP (operand1, 0), 0))
1447 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1448 operand1
1449 = replace_equiv_address (operand1,
1450 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1452 if (scratch_reg
1453 && reload_in_progress && GET_CODE (operand0) == REG
1454 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1455 operand0 = reg_equiv_mem[REGNO (operand0)];
1456 else if (scratch_reg
1457 && reload_in_progress && GET_CODE (operand0) == SUBREG
1458 && GET_CODE (SUBREG_REG (operand0)) == REG
1459 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1461 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1462 the code which tracks sets/uses for delete_output_reload. */
1463 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1464 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1465 SUBREG_BYTE (operand0));
1466 operand0 = alter_subreg (&temp);
1469 if (scratch_reg
1470 && reload_in_progress && GET_CODE (operand1) == REG
1471 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1472 operand1 = reg_equiv_mem[REGNO (operand1)];
1473 else if (scratch_reg
1474 && reload_in_progress && GET_CODE (operand1) == SUBREG
1475 && GET_CODE (SUBREG_REG (operand1)) == REG
1476 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1478 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1479 the code which tracks sets/uses for delete_output_reload. */
1480 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1481 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1482 SUBREG_BYTE (operand1));
1483 operand1 = alter_subreg (&temp);
1486 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1487 && ((tem = find_replacement (&XEXP (operand0, 0)))
1488 != XEXP (operand0, 0)))
1489 operand0 = replace_equiv_address (operand0, tem);
1491 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1492 && ((tem = find_replacement (&XEXP (operand1, 0)))
1493 != XEXP (operand1, 0)))
1494 operand1 = replace_equiv_address (operand1, tem);
1496 /* Handle secondary reloads for loads/stores of FP registers from
1497 REG+D addresses where D does not fit in 5 or 14 bits, including
1498 (subreg (mem (addr))) cases. */
1499 if (scratch_reg
1500 && fp_reg_operand (operand0, mode)
1501 && ((GET_CODE (operand1) == MEM
1502 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1503 XEXP (operand1, 0)))
1504 || ((GET_CODE (operand1) == SUBREG
1505 && GET_CODE (XEXP (operand1, 0)) == MEM
1506 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1507 ? SFmode : DFmode),
1508 XEXP (XEXP (operand1, 0), 0))))))
1510 if (GET_CODE (operand1) == SUBREG)
1511 operand1 = XEXP (operand1, 0);
1513 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1514 it in WORD_MODE regardless of what mode it was originally given
1515 to us. */
1516 scratch_reg = force_mode (word_mode, scratch_reg);
1518 /* D might not fit in 14 bits either; for such cases load D into
1519 scratch reg. */
1520 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1522 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1523 emit_move_insn (scratch_reg,
1524 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1525 Pmode,
1526 XEXP (XEXP (operand1, 0), 0),
1527 scratch_reg));
1529 else
1530 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1531 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1532 replace_equiv_address (operand1, scratch_reg)));
1533 return 1;
1535 else if (scratch_reg
1536 && fp_reg_operand (operand1, mode)
1537 && ((GET_CODE (operand0) == MEM
1538 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1539 ? SFmode : DFmode),
1540 XEXP (operand0, 0)))
1541 || ((GET_CODE (operand0) == SUBREG)
1542 && GET_CODE (XEXP (operand0, 0)) == MEM
1543 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1544 ? SFmode : DFmode),
1545 XEXP (XEXP (operand0, 0), 0)))))
1547 if (GET_CODE (operand0) == SUBREG)
1548 operand0 = XEXP (operand0, 0);
1550 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1551 it in WORD_MODE regardless of what mode it was originally given
1552 to us. */
1553 scratch_reg = force_mode (word_mode, scratch_reg);
1555 /* D might not fit in 14 bits either; for such cases load D into
1556 scratch reg. */
1557 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1559 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1560 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1561 0)),
1562 Pmode,
1563 XEXP (XEXP (operand0, 0),
1565 scratch_reg));
1567 else
1568 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1569 emit_insn (gen_rtx_SET (VOIDmode,
1570 replace_equiv_address (operand0, scratch_reg),
1571 operand1));
1572 return 1;
1574 /* Handle secondary reloads for loads of FP registers from constant
1575 expressions by forcing the constant into memory.
1577 Use scratch_reg to hold the address of the memory location.
1579 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1580 NO_REGS when presented with a const_int and a register class
1581 containing only FP registers. Doing so unfortunately creates
1582 more problems than it solves. Fix this for 2.5. */
1583 else if (scratch_reg
1584 && CONSTANT_P (operand1)
1585 && fp_reg_operand (operand0, mode))
1587 rtx const_mem, xoperands[2];
1589 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1590 it in WORD_MODE regardless of what mode it was originally given
1591 to us. */
1592 scratch_reg = force_mode (word_mode, scratch_reg);
1594 /* Force the constant into memory and put the address of the
1595 memory location into scratch_reg. */
1596 const_mem = force_const_mem (mode, operand1);
1597 xoperands[0] = scratch_reg;
1598 xoperands[1] = XEXP (const_mem, 0);
1599 emit_move_sequence (xoperands, Pmode, 0);
1601 /* Now load the destination register. */
1602 emit_insn (gen_rtx_SET (mode, operand0,
1603 replace_equiv_address (const_mem, scratch_reg)));
1604 return 1;
1606 /* Handle secondary reloads for SAR. These occur when trying to load
1607 the SAR from memory, FP register, or with a constant. */
1608 else if (scratch_reg
1609 && GET_CODE (operand0) == REG
1610 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1611 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1612 && (GET_CODE (operand1) == MEM
1613 || GET_CODE (operand1) == CONST_INT
1614 || (GET_CODE (operand1) == REG
1615 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1617 /* D might not fit in 14 bits either; for such cases load D into
1618 scratch reg. */
1619 if (GET_CODE (operand1) == MEM
1620 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1622 /* We are reloading the address into the scratch register, so we
1623 want to make sure the scratch register is a full register. */
1624 scratch_reg = force_mode (word_mode, scratch_reg);
1626 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1627 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1628 0)),
1629 Pmode,
1630 XEXP (XEXP (operand1, 0),
1632 scratch_reg));
1634 /* Now we are going to load the scratch register from memory,
1635 we want to load it in the same width as the original MEM,
1636 which must be the same as the width of the ultimate destination,
1637 OPERAND0. */
1638 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1640 emit_move_insn (scratch_reg,
1641 replace_equiv_address (operand1, scratch_reg));
1643 else
1645 /* We want to load the scratch register using the same mode as
1646 the ultimate destination. */
1647 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1649 emit_move_insn (scratch_reg, operand1);
1652 /* And emit the insn to set the ultimate destination. We know that
1653 the scratch register has the same mode as the destination at this
1654 point. */
1655 emit_move_insn (operand0, scratch_reg);
1656 return 1;
1658 /* Handle the most common case: storing into a register. */
1659 else if (register_operand (operand0, mode))
1661 if (register_operand (operand1, mode)
1662 || (GET_CODE (operand1) == CONST_INT
1663 && cint_ok_for_move (INTVAL (operand1)))
1664 || (operand1 == CONST0_RTX (mode))
1665 || (GET_CODE (operand1) == HIGH
1666 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1667 /* Only `general_operands' can come here, so MEM is ok. */
1668 || GET_CODE (operand1) == MEM)
1670 /* Various sets are created during RTL generation which don't
1671 have the REG_POINTER flag correctly set. After the CSE pass,
1672 instruction recognition can fail if we don't consistently
1673 set this flag when performing register copies. This should
1674 also improve the opportunities for creating insns that use
1675 unscaled indexing. */
1676 if (REG_P (operand0) && REG_P (operand1))
1678 if (REG_POINTER (operand1)
1679 && !REG_POINTER (operand0)
1680 && !HARD_REGISTER_P (operand0))
1681 copy_reg_pointer (operand0, operand1);
1682 else if (REG_POINTER (operand0)
1683 && !REG_POINTER (operand1)
1684 && !HARD_REGISTER_P (operand1))
1685 copy_reg_pointer (operand1, operand0);
1688 /* When MEMs are broken out, the REG_POINTER flag doesn't
1689 get set. In some cases, we can set the REG_POINTER flag
1690 from the declaration for the MEM. */
1691 if (REG_P (operand0)
1692 && GET_CODE (operand1) == MEM
1693 && !REG_POINTER (operand0))
1695 tree decl = MEM_EXPR (operand1);
1697 /* Set the register pointer flag and register alignment
1698 if the declaration for this memory reference is a
1699 pointer type. Fortran indirect argument references
1700 are ignored. */
1701 if (decl
1702 && !(flag_argument_noalias > 1
1703 && TREE_CODE (decl) == INDIRECT_REF
1704 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1706 tree type;
1708 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1709 tree operand 1. */
1710 if (TREE_CODE (decl) == COMPONENT_REF)
1711 decl = TREE_OPERAND (decl, 1);
1713 type = TREE_TYPE (decl);
1714 type = strip_array_types (type);
1716 if (POINTER_TYPE_P (type))
1718 int align;
1720 type = TREE_TYPE (type);
1721 /* Using TYPE_ALIGN_OK is rather conservative as
1722 only the ada frontend actually sets it. */
1723 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1724 : BITS_PER_UNIT);
1725 mark_reg_pointer (operand0, align);
1730 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1731 return 1;
1734 else if (GET_CODE (operand0) == MEM)
1736 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1737 && !(reload_in_progress || reload_completed))
1739 rtx temp = gen_reg_rtx (DFmode);
1741 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1742 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1743 return 1;
1745 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1747 /* Run this case quickly. */
1748 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1749 return 1;
1751 if (! (reload_in_progress || reload_completed))
1753 operands[0] = validize_mem (operand0);
1754 operands[1] = operand1 = force_reg (mode, operand1);
1758 /* Simplify the source if we need to.
1759 Note we do have to handle function labels here, even though we do
1760 not consider them legitimate constants. Loop optimizations can
1761 call the emit_move_xxx with one as a source. */
1762 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1763 || function_label_operand (operand1, mode)
1764 || (GET_CODE (operand1) == HIGH
1765 && symbolic_operand (XEXP (operand1, 0), mode)))
1767 int ishighonly = 0;
1769 if (GET_CODE (operand1) == HIGH)
1771 ishighonly = 1;
1772 operand1 = XEXP (operand1, 0);
1774 if (symbolic_operand (operand1, mode))
1776 /* Argh. The assembler and linker can't handle arithmetic
1777 involving plabels.
1779 So we force the plabel into memory, load operand0 from
1780 the memory location, then add in the constant part. */
1781 if ((GET_CODE (operand1) == CONST
1782 && GET_CODE (XEXP (operand1, 0)) == PLUS
1783 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1784 || function_label_operand (operand1, mode))
1786 rtx temp, const_part;
1788 /* Figure out what (if any) scratch register to use. */
1789 if (reload_in_progress || reload_completed)
1791 scratch_reg = scratch_reg ? scratch_reg : operand0;
1792 /* SCRATCH_REG will hold an address and maybe the actual
1793 data. We want it in WORD_MODE regardless of what mode it
1794 was originally given to us. */
1795 scratch_reg = force_mode (word_mode, scratch_reg);
1797 else if (flag_pic)
1798 scratch_reg = gen_reg_rtx (Pmode);
1800 if (GET_CODE (operand1) == CONST)
1802 /* Save away the constant part of the expression. */
1803 const_part = XEXP (XEXP (operand1, 0), 1);
1804 gcc_assert (GET_CODE (const_part) == CONST_INT);
1806 /* Force the function label into memory. */
1807 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1809 else
1811 /* No constant part. */
1812 const_part = NULL_RTX;
1814 /* Force the function label into memory. */
1815 temp = force_const_mem (mode, operand1);
1819 /* Get the address of the memory location. PIC-ify it if
1820 necessary. */
1821 temp = XEXP (temp, 0);
1822 if (flag_pic)
1823 temp = legitimize_pic_address (temp, mode, scratch_reg);
1825 /* Put the address of the memory location into our destination
1826 register. */
1827 operands[1] = temp;
1828 emit_move_sequence (operands, mode, scratch_reg);
1830 /* Now load from the memory location into our destination
1831 register. */
1832 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1833 emit_move_sequence (operands, mode, scratch_reg);
1835 /* And add back in the constant part. */
1836 if (const_part != NULL_RTX)
1837 expand_inc (operand0, const_part);
1839 return 1;
1842 if (flag_pic)
1844 rtx temp;
1846 if (reload_in_progress || reload_completed)
1848 temp = scratch_reg ? scratch_reg : operand0;
1849 /* TEMP will hold an address and maybe the actual
1850 data. We want it in WORD_MODE regardless of what mode it
1851 was originally given to us. */
1852 temp = force_mode (word_mode, temp);
1854 else
1855 temp = gen_reg_rtx (Pmode);
1857 /* (const (plus (symbol) (const_int))) must be forced to
1858 memory during/after reload if the const_int will not fit
1859 in 14 bits. */
1860 if (GET_CODE (operand1) == CONST
1861 && GET_CODE (XEXP (operand1, 0)) == PLUS
1862 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1863 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1864 && (reload_completed || reload_in_progress)
1865 && flag_pic)
1867 rtx const_mem = force_const_mem (mode, operand1);
1868 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1869 mode, temp);
1870 operands[1] = replace_equiv_address (const_mem, operands[1]);
1871 emit_move_sequence (operands, mode, temp);
1873 else
1875 operands[1] = legitimize_pic_address (operand1, mode, temp);
1876 if (REG_P (operand0) && REG_P (operands[1]))
1877 copy_reg_pointer (operand0, operands[1]);
1878 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1881 /* On the HPPA, references to data space are supposed to use dp,
1882 register 27, but showing it in the RTL inhibits various cse
1883 and loop optimizations. */
1884 else
1886 rtx temp, set;
1888 if (reload_in_progress || reload_completed)
1890 temp = scratch_reg ? scratch_reg : operand0;
1891 /* TEMP will hold an address and maybe the actual
1892 data. We want it in WORD_MODE regardless of what mode it
1893 was originally given to us. */
1894 temp = force_mode (word_mode, temp);
1896 else
1897 temp = gen_reg_rtx (mode);
1899 /* Loading a SYMBOL_REF into a register makes that register
1900 safe to be used as the base in an indexed address.
1902 Don't mark hard registers though. That loses. */
1903 if (GET_CODE (operand0) == REG
1904 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1905 mark_reg_pointer (operand0, BITS_PER_UNIT);
1906 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1907 mark_reg_pointer (temp, BITS_PER_UNIT);
1909 if (ishighonly)
1910 set = gen_rtx_SET (mode, operand0, temp);
1911 else
1912 set = gen_rtx_SET (VOIDmode,
1913 operand0,
1914 gen_rtx_LO_SUM (mode, temp, operand1));
1916 emit_insn (gen_rtx_SET (VOIDmode,
1917 temp,
1918 gen_rtx_HIGH (mode, operand1)));
1919 emit_insn (set);
1922 return 1;
1924 else if (pa_tls_referenced_p (operand1))
1926 rtx tmp = operand1;
1927 rtx addend = NULL;
1929 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1931 addend = XEXP (XEXP (tmp, 0), 1);
1932 tmp = XEXP (XEXP (tmp, 0), 0);
1935 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1936 tmp = legitimize_tls_address (tmp);
1937 if (addend)
1939 tmp = gen_rtx_PLUS (mode, tmp, addend);
1940 tmp = force_operand (tmp, operands[0]);
1942 operands[1] = tmp;
1944 else if (GET_CODE (operand1) != CONST_INT
1945 || !cint_ok_for_move (INTVAL (operand1)))
1947 rtx insn, temp;
1948 rtx op1 = operand1;
1949 HOST_WIDE_INT value = 0;
1950 HOST_WIDE_INT insv = 0;
1951 int insert = 0;
1953 if (GET_CODE (operand1) == CONST_INT)
1954 value = INTVAL (operand1);
1956 if (TARGET_64BIT
1957 && GET_CODE (operand1) == CONST_INT
1958 && HOST_BITS_PER_WIDE_INT > 32
1959 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1961 HOST_WIDE_INT nval;
1963 /* Extract the low order 32 bits of the value and sign extend.
1964 If the new value is the same as the original value, we can
1965 can use the original value as-is. If the new value is
1966 different, we use it and insert the most-significant 32-bits
1967 of the original value into the final result. */
1968 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1969 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1970 if (value != nval)
1972 #if HOST_BITS_PER_WIDE_INT > 32
1973 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1974 #endif
1975 insert = 1;
1976 value = nval;
1977 operand1 = GEN_INT (nval);
1981 if (reload_in_progress || reload_completed)
1982 temp = scratch_reg ? scratch_reg : operand0;
1983 else
1984 temp = gen_reg_rtx (mode);
1986 /* We don't directly split DImode constants on 32-bit targets
1987 because PLUS uses an 11-bit immediate and the insn sequence
1988 generated is not as efficient as the one using HIGH/LO_SUM. */
1989 if (GET_CODE (operand1) == CONST_INT
1990 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1991 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1992 && !insert)
1994 /* Directly break constant into high and low parts. This
1995 provides better optimization opportunities because various
1996 passes recognize constants split with PLUS but not LO_SUM.
1997 We use a 14-bit signed low part except when the addition
1998 of 0x4000 to the high part might change the sign of the
1999 high part. */
2000 HOST_WIDE_INT low = value & 0x3fff;
2001 HOST_WIDE_INT high = value & ~ 0x3fff;
2003 if (low >= 0x2000)
2005 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2006 high += 0x2000;
2007 else
2008 high += 0x4000;
2011 low = value - high;
2013 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2014 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2016 else
2018 emit_insn (gen_rtx_SET (VOIDmode, temp,
2019 gen_rtx_HIGH (mode, operand1)));
2020 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2023 insn = emit_move_insn (operands[0], operands[1]);
2025 /* Now insert the most significant 32 bits of the value
2026 into the register. When we don't have a second register
2027 available, it could take up to nine instructions to load
2028 a 64-bit integer constant. Prior to reload, we force
2029 constants that would take more than three instructions
2030 to load to the constant pool. During and after reload,
2031 we have to handle all possible values. */
2032 if (insert)
2034 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2035 register and the value to be inserted is outside the
2036 range that can be loaded with three depdi instructions. */
2037 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2039 operand1 = GEN_INT (insv);
2041 emit_insn (gen_rtx_SET (VOIDmode, temp,
2042 gen_rtx_HIGH (mode, operand1)));
2043 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2044 emit_insn (gen_insv (operand0, GEN_INT (32),
2045 const0_rtx, temp));
2047 else
2049 int len = 5, pos = 27;
2051 /* Insert the bits using the depdi instruction. */
2052 while (pos >= 0)
2054 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2055 HOST_WIDE_INT sign = v5 < 0;
2057 /* Left extend the insertion. */
2058 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2059 while (pos > 0 && (insv & 1) == sign)
2061 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2062 len += 1;
2063 pos -= 1;
2066 emit_insn (gen_insv (operand0, GEN_INT (len),
2067 GEN_INT (pos), GEN_INT (v5)));
2069 len = pos > 0 && pos < 5 ? pos : 5;
2070 pos -= len;
2075 set_unique_reg_note (insn, REG_EQUAL, op1);
2077 return 1;
2080 /* Now have insn-emit do whatever it normally does. */
2081 return 0;
2084 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2085 it will need a link/runtime reloc). */
2088 reloc_needed (tree exp)
2090 int reloc = 0;
2092 switch (TREE_CODE (exp))
2094 case ADDR_EXPR:
2095 return 1;
2097 case POINTER_PLUS_EXPR:
2098 case PLUS_EXPR:
2099 case MINUS_EXPR:
2100 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2101 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2102 break;
2104 CASE_CONVERT:
2105 case NON_LVALUE_EXPR:
2106 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2107 break;
2109 case CONSTRUCTOR:
2111 tree value;
2112 unsigned HOST_WIDE_INT ix;
2114 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2115 if (value)
2116 reloc |= reloc_needed (value);
2118 break;
2120 case ERROR_MARK:
2121 break;
2123 default:
2124 break;
2126 return reloc;
2129 /* Does operand (which is a symbolic_operand) live in text space?
2130 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2131 will be true. */
2134 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2136 if (GET_CODE (operand) == CONST)
2137 operand = XEXP (XEXP (operand, 0), 0);
2138 if (flag_pic)
2140 if (GET_CODE (operand) == SYMBOL_REF)
2141 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2143 else
2145 if (GET_CODE (operand) == SYMBOL_REF)
2146 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2148 return 1;
2152 /* Return the best assembler insn template
2153 for moving operands[1] into operands[0] as a fullword. */
2154 const char *
2155 singlemove_string (rtx *operands)
2157 HOST_WIDE_INT intval;
2159 if (GET_CODE (operands[0]) == MEM)
2160 return "stw %r1,%0";
2161 if (GET_CODE (operands[1]) == MEM)
2162 return "ldw %1,%0";
2163 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2165 long i;
2166 REAL_VALUE_TYPE d;
2168 gcc_assert (GET_MODE (operands[1]) == SFmode);
2170 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2171 bit pattern. */
2172 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2173 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2175 operands[1] = GEN_INT (i);
2176 /* Fall through to CONST_INT case. */
2178 if (GET_CODE (operands[1]) == CONST_INT)
2180 intval = INTVAL (operands[1]);
2182 if (VAL_14_BITS_P (intval))
2183 return "ldi %1,%0";
2184 else if ((intval & 0x7ff) == 0)
2185 return "ldil L'%1,%0";
2186 else if (zdepi_cint_p (intval))
2187 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2188 else
2189 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2191 return "copy %1,%0";
2195 /* Compute position (in OP[1]) and width (in OP[2])
2196 useful for copying IMM to a register using the zdepi
2197 instructions. Store the immediate value to insert in OP[0]. */
2198 static void
2199 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2201 int lsb, len;
2203 /* Find the least significant set bit in IMM. */
2204 for (lsb = 0; lsb < 32; lsb++)
2206 if ((imm & 1) != 0)
2207 break;
2208 imm >>= 1;
2211 /* Choose variants based on *sign* of the 5-bit field. */
2212 if ((imm & 0x10) == 0)
2213 len = (lsb <= 28) ? 4 : 32 - lsb;
2214 else
2216 /* Find the width of the bitstring in IMM. */
2217 for (len = 5; len < 32; len++)
2219 if ((imm & (1 << len)) == 0)
2220 break;
2223 /* Sign extend IMM as a 5-bit value. */
2224 imm = (imm & 0xf) - 0x10;
2227 op[0] = imm;
2228 op[1] = 31 - lsb;
2229 op[2] = len;
2232 /* Compute position (in OP[1]) and width (in OP[2])
2233 useful for copying IMM to a register using the depdi,z
2234 instructions. Store the immediate value to insert in OP[0]. */
2235 void
2236 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2238 HOST_WIDE_INT lsb, len;
2240 /* Find the least significant set bit in IMM. */
2241 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2243 if ((imm & 1) != 0)
2244 break;
2245 imm >>= 1;
2248 /* Choose variants based on *sign* of the 5-bit field. */
2249 if ((imm & 0x10) == 0)
2250 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2251 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2252 else
2254 /* Find the width of the bitstring in IMM. */
2255 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2257 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2258 break;
2261 /* Sign extend IMM as a 5-bit value. */
2262 imm = (imm & 0xf) - 0x10;
2265 op[0] = imm;
2266 op[1] = 63 - lsb;
2267 op[2] = len;
2270 /* Output assembler code to perform a doubleword move insn
2271 with operands OPERANDS. */
2273 const char *
2274 output_move_double (rtx *operands)
2276 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2277 rtx latehalf[2];
2278 rtx addreg0 = 0, addreg1 = 0;
2280 /* First classify both operands. */
2282 if (REG_P (operands[0]))
2283 optype0 = REGOP;
2284 else if (offsettable_memref_p (operands[0]))
2285 optype0 = OFFSOP;
2286 else if (GET_CODE (operands[0]) == MEM)
2287 optype0 = MEMOP;
2288 else
2289 optype0 = RNDOP;
2291 if (REG_P (operands[1]))
2292 optype1 = REGOP;
2293 else if (CONSTANT_P (operands[1]))
2294 optype1 = CNSTOP;
2295 else if (offsettable_memref_p (operands[1]))
2296 optype1 = OFFSOP;
2297 else if (GET_CODE (operands[1]) == MEM)
2298 optype1 = MEMOP;
2299 else
2300 optype1 = RNDOP;
2302 /* Check for the cases that the operand constraints are not
2303 supposed to allow to happen. */
2304 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2306 /* Handle copies between general and floating registers. */
2308 if (optype0 == REGOP && optype1 == REGOP
2309 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2311 if (FP_REG_P (operands[0]))
2313 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2314 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2315 return "{fldds|fldd} -16(%%sp),%0";
2317 else
2319 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2320 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2321 return "{ldws|ldw} -12(%%sp),%R0";
2325 /* Handle auto decrementing and incrementing loads and stores
2326 specifically, since the structure of the function doesn't work
2327 for them without major modification. Do it better when we learn
2328 this port about the general inc/dec addressing of PA.
2329 (This was written by tege. Chide him if it doesn't work.) */
2331 if (optype0 == MEMOP)
2333 /* We have to output the address syntax ourselves, since print_operand
2334 doesn't deal with the addresses we want to use. Fix this later. */
2336 rtx addr = XEXP (operands[0], 0);
2337 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2339 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2341 operands[0] = XEXP (addr, 0);
2342 gcc_assert (GET_CODE (operands[1]) == REG
2343 && GET_CODE (operands[0]) == REG);
2345 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2347 /* No overlap between high target register and address
2348 register. (We do this in a non-obvious way to
2349 save a register file writeback) */
2350 if (GET_CODE (addr) == POST_INC)
2351 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2352 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2354 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2356 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2358 operands[0] = XEXP (addr, 0);
2359 gcc_assert (GET_CODE (operands[1]) == REG
2360 && GET_CODE (operands[0]) == REG);
2362 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2363 /* No overlap between high target register and address
2364 register. (We do this in a non-obvious way to save a
2365 register file writeback) */
2366 if (GET_CODE (addr) == PRE_INC)
2367 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2368 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2371 if (optype1 == MEMOP)
2373 /* We have to output the address syntax ourselves, since print_operand
2374 doesn't deal with the addresses we want to use. Fix this later. */
2376 rtx addr = XEXP (operands[1], 0);
2377 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2379 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2381 operands[1] = XEXP (addr, 0);
2382 gcc_assert (GET_CODE (operands[0]) == REG
2383 && GET_CODE (operands[1]) == REG);
2385 if (!reg_overlap_mentioned_p (high_reg, addr))
2387 /* No overlap between high target register and address
2388 register. (We do this in a non-obvious way to
2389 save a register file writeback) */
2390 if (GET_CODE (addr) == POST_INC)
2391 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2392 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2394 else
2396 /* This is an undefined situation. We should load into the
2397 address register *and* update that register. Probably
2398 we don't need to handle this at all. */
2399 if (GET_CODE (addr) == POST_INC)
2400 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2401 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2404 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2406 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2408 operands[1] = XEXP (addr, 0);
2409 gcc_assert (GET_CODE (operands[0]) == REG
2410 && GET_CODE (operands[1]) == REG);
2412 if (!reg_overlap_mentioned_p (high_reg, addr))
2414 /* No overlap between high target register and address
2415 register. (We do this in a non-obvious way to
2416 save a register file writeback) */
2417 if (GET_CODE (addr) == PRE_INC)
2418 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2419 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2421 else
2423 /* This is an undefined situation. We should load into the
2424 address register *and* update that register. Probably
2425 we don't need to handle this at all. */
2426 if (GET_CODE (addr) == PRE_INC)
2427 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2428 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2431 else if (GET_CODE (addr) == PLUS
2432 && GET_CODE (XEXP (addr, 0)) == MULT)
2434 rtx xoperands[4];
2435 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2437 if (!reg_overlap_mentioned_p (high_reg, addr))
2439 xoperands[0] = high_reg;
2440 xoperands[1] = XEXP (addr, 1);
2441 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2442 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2443 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2444 xoperands);
2445 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2447 else
2449 xoperands[0] = high_reg;
2450 xoperands[1] = XEXP (addr, 1);
2451 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2452 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2453 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2454 xoperands);
2455 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2460 /* If an operand is an unoffsettable memory ref, find a register
2461 we can increment temporarily to make it refer to the second word. */
2463 if (optype0 == MEMOP)
2464 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2466 if (optype1 == MEMOP)
2467 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2469 /* Ok, we can do one word at a time.
2470 Normally we do the low-numbered word first.
2472 In either case, set up in LATEHALF the operands to use
2473 for the high-numbered word and in some cases alter the
2474 operands in OPERANDS to be suitable for the low-numbered word. */
2476 if (optype0 == REGOP)
2477 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2478 else if (optype0 == OFFSOP)
2479 latehalf[0] = adjust_address (operands[0], SImode, 4);
2480 else
2481 latehalf[0] = operands[0];
2483 if (optype1 == REGOP)
2484 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2485 else if (optype1 == OFFSOP)
2486 latehalf[1] = adjust_address (operands[1], SImode, 4);
2487 else if (optype1 == CNSTOP)
2488 split_double (operands[1], &operands[1], &latehalf[1]);
2489 else
2490 latehalf[1] = operands[1];
2492 /* If the first move would clobber the source of the second one,
2493 do them in the other order.
2495 This can happen in two cases:
2497 mem -> register where the first half of the destination register
2498 is the same register used in the memory's address. Reload
2499 can create such insns.
2501 mem in this case will be either register indirect or register
2502 indirect plus a valid offset.
2504 register -> register move where REGNO(dst) == REGNO(src + 1)
2505 someone (Tim/Tege?) claimed this can happen for parameter loads.
2507 Handle mem -> register case first. */
2508 if (optype0 == REGOP
2509 && (optype1 == MEMOP || optype1 == OFFSOP)
2510 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2511 operands[1], 0))
2513 /* Do the late half first. */
2514 if (addreg1)
2515 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2516 output_asm_insn (singlemove_string (latehalf), latehalf);
2518 /* Then clobber. */
2519 if (addreg1)
2520 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2521 return singlemove_string (operands);
2524 /* Now handle register -> register case. */
2525 if (optype0 == REGOP && optype1 == REGOP
2526 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2528 output_asm_insn (singlemove_string (latehalf), latehalf);
2529 return singlemove_string (operands);
2532 /* Normal case: do the two words, low-numbered first. */
2534 output_asm_insn (singlemove_string (operands), operands);
2536 /* Make any unoffsettable addresses point at high-numbered word. */
2537 if (addreg0)
2538 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2539 if (addreg1)
2540 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2542 /* Do that word. */
2543 output_asm_insn (singlemove_string (latehalf), latehalf);
2545 /* Undo the adds we just did. */
2546 if (addreg0)
2547 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2548 if (addreg1)
2549 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2551 return "";
2554 const char *
2555 output_fp_move_double (rtx *operands)
2557 if (FP_REG_P (operands[0]))
2559 if (FP_REG_P (operands[1])
2560 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2561 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2562 else
2563 output_asm_insn ("fldd%F1 %1,%0", operands);
2565 else if (FP_REG_P (operands[1]))
2567 output_asm_insn ("fstd%F0 %1,%0", operands);
2569 else
2571 rtx xoperands[2];
2573 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2575 /* This is a pain. You have to be prepared to deal with an
2576 arbitrary address here including pre/post increment/decrement.
2578 so avoid this in the MD. */
2579 gcc_assert (GET_CODE (operands[0]) == REG);
2581 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2582 xoperands[0] = operands[0];
2583 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2585 return "";
2588 /* Return a REG that occurs in ADDR with coefficient 1.
2589 ADDR can be effectively incremented by incrementing REG. */
2591 static rtx
2592 find_addr_reg (rtx addr)
2594 while (GET_CODE (addr) == PLUS)
2596 if (GET_CODE (XEXP (addr, 0)) == REG)
2597 addr = XEXP (addr, 0);
2598 else if (GET_CODE (XEXP (addr, 1)) == REG)
2599 addr = XEXP (addr, 1);
2600 else if (CONSTANT_P (XEXP (addr, 0)))
2601 addr = XEXP (addr, 1);
2602 else if (CONSTANT_P (XEXP (addr, 1)))
2603 addr = XEXP (addr, 0);
2604 else
2605 gcc_unreachable ();
2607 gcc_assert (GET_CODE (addr) == REG);
2608 return addr;
2611 /* Emit code to perform a block move.
2613 OPERANDS[0] is the destination pointer as a REG, clobbered.
2614 OPERANDS[1] is the source pointer as a REG, clobbered.
2615 OPERANDS[2] is a register for temporary storage.
2616 OPERANDS[3] is a register for temporary storage.
2617 OPERANDS[4] is the size as a CONST_INT
2618 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2619 OPERANDS[6] is another temporary register. */
2621 const char *
2622 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2624 int align = INTVAL (operands[5]);
2625 unsigned long n_bytes = INTVAL (operands[4]);
2627 /* We can't move more than a word at a time because the PA
2628 has no longer integer move insns. (Could use fp mem ops?) */
2629 if (align > (TARGET_64BIT ? 8 : 4))
2630 align = (TARGET_64BIT ? 8 : 4);
2632 /* Note that we know each loop below will execute at least twice
2633 (else we would have open-coded the copy). */
2634 switch (align)
2636 case 8:
2637 /* Pre-adjust the loop counter. */
2638 operands[4] = GEN_INT (n_bytes - 16);
2639 output_asm_insn ("ldi %4,%2", operands);
2641 /* Copying loop. */
2642 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2643 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2644 output_asm_insn ("std,ma %3,8(%0)", operands);
2645 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2646 output_asm_insn ("std,ma %6,8(%0)", operands);
2648 /* Handle the residual. There could be up to 7 bytes of
2649 residual to copy! */
2650 if (n_bytes % 16 != 0)
2652 operands[4] = GEN_INT (n_bytes % 8);
2653 if (n_bytes % 16 >= 8)
2654 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2655 if (n_bytes % 8 != 0)
2656 output_asm_insn ("ldd 0(%1),%6", operands);
2657 if (n_bytes % 16 >= 8)
2658 output_asm_insn ("std,ma %3,8(%0)", operands);
2659 if (n_bytes % 8 != 0)
2660 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2662 return "";
2664 case 4:
2665 /* Pre-adjust the loop counter. */
2666 operands[4] = GEN_INT (n_bytes - 8);
2667 output_asm_insn ("ldi %4,%2", operands);
2669 /* Copying loop. */
2670 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2671 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2672 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2673 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2674 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2676 /* Handle the residual. There could be up to 7 bytes of
2677 residual to copy! */
2678 if (n_bytes % 8 != 0)
2680 operands[4] = GEN_INT (n_bytes % 4);
2681 if (n_bytes % 8 >= 4)
2682 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2683 if (n_bytes % 4 != 0)
2684 output_asm_insn ("ldw 0(%1),%6", operands);
2685 if (n_bytes % 8 >= 4)
2686 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2687 if (n_bytes % 4 != 0)
2688 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2690 return "";
2692 case 2:
2693 /* Pre-adjust the loop counter. */
2694 operands[4] = GEN_INT (n_bytes - 4);
2695 output_asm_insn ("ldi %4,%2", operands);
2697 /* Copying loop. */
2698 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2699 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2700 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2701 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2702 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2704 /* Handle the residual. */
2705 if (n_bytes % 4 != 0)
2707 if (n_bytes % 4 >= 2)
2708 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2709 if (n_bytes % 2 != 0)
2710 output_asm_insn ("ldb 0(%1),%6", operands);
2711 if (n_bytes % 4 >= 2)
2712 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2713 if (n_bytes % 2 != 0)
2714 output_asm_insn ("stb %6,0(%0)", operands);
2716 return "";
2718 case 1:
2719 /* Pre-adjust the loop counter. */
2720 operands[4] = GEN_INT (n_bytes - 2);
2721 output_asm_insn ("ldi %4,%2", operands);
2723 /* Copying loop. */
2724 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2725 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2726 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2727 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2728 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2730 /* Handle the residual. */
2731 if (n_bytes % 2 != 0)
2733 output_asm_insn ("ldb 0(%1),%3", operands);
2734 output_asm_insn ("stb %3,0(%0)", operands);
2736 return "";
2738 default:
2739 gcc_unreachable ();
2743 /* Count the number of insns necessary to handle this block move.
2745 Basic structure is the same as emit_block_move, except that we
2746 count insns rather than emit them. */
2748 static int
2749 compute_movmem_length (rtx insn)
2751 rtx pat = PATTERN (insn);
2752 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2753 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2754 unsigned int n_insns = 0;
2756 /* We can't move more than four bytes at a time because the PA
2757 has no longer integer move insns. (Could use fp mem ops?) */
2758 if (align > (TARGET_64BIT ? 8 : 4))
2759 align = (TARGET_64BIT ? 8 : 4);
2761 /* The basic copying loop. */
2762 n_insns = 6;
2764 /* Residuals. */
2765 if (n_bytes % (2 * align) != 0)
2767 if ((n_bytes % (2 * align)) >= align)
2768 n_insns += 2;
2770 if ((n_bytes % align) != 0)
2771 n_insns += 2;
2774 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2775 return n_insns * 4;
2778 /* Emit code to perform a block clear.
2780 OPERANDS[0] is the destination pointer as a REG, clobbered.
2781 OPERANDS[1] is a register for temporary storage.
2782 OPERANDS[2] is the size as a CONST_INT
2783 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2785 const char *
2786 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2788 int align = INTVAL (operands[3]);
2789 unsigned long n_bytes = INTVAL (operands[2]);
2791 /* We can't clear more than a word at a time because the PA
2792 has no longer integer move insns. */
2793 if (align > (TARGET_64BIT ? 8 : 4))
2794 align = (TARGET_64BIT ? 8 : 4);
2796 /* Note that we know each loop below will execute at least twice
2797 (else we would have open-coded the copy). */
2798 switch (align)
2800 case 8:
2801 /* Pre-adjust the loop counter. */
2802 operands[2] = GEN_INT (n_bytes - 16);
2803 output_asm_insn ("ldi %2,%1", operands);
2805 /* Loop. */
2806 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2807 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2808 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2810 /* Handle the residual. There could be up to 7 bytes of
2811 residual to copy! */
2812 if (n_bytes % 16 != 0)
2814 operands[2] = GEN_INT (n_bytes % 8);
2815 if (n_bytes % 16 >= 8)
2816 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2817 if (n_bytes % 8 != 0)
2818 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2820 return "";
2822 case 4:
2823 /* Pre-adjust the loop counter. */
2824 operands[2] = GEN_INT (n_bytes - 8);
2825 output_asm_insn ("ldi %2,%1", operands);
2827 /* Loop. */
2828 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2829 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2830 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2832 /* Handle the residual. There could be up to 7 bytes of
2833 residual to copy! */
2834 if (n_bytes % 8 != 0)
2836 operands[2] = GEN_INT (n_bytes % 4);
2837 if (n_bytes % 8 >= 4)
2838 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2839 if (n_bytes % 4 != 0)
2840 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2842 return "";
2844 case 2:
2845 /* Pre-adjust the loop counter. */
2846 operands[2] = GEN_INT (n_bytes - 4);
2847 output_asm_insn ("ldi %2,%1", operands);
2849 /* Loop. */
2850 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2851 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2852 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2854 /* Handle the residual. */
2855 if (n_bytes % 4 != 0)
2857 if (n_bytes % 4 >= 2)
2858 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2859 if (n_bytes % 2 != 0)
2860 output_asm_insn ("stb %%r0,0(%0)", operands);
2862 return "";
2864 case 1:
2865 /* Pre-adjust the loop counter. */
2866 operands[2] = GEN_INT (n_bytes - 2);
2867 output_asm_insn ("ldi %2,%1", operands);
2869 /* Loop. */
2870 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2871 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2872 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2874 /* Handle the residual. */
2875 if (n_bytes % 2 != 0)
2876 output_asm_insn ("stb %%r0,0(%0)", operands);
2878 return "";
2880 default:
2881 gcc_unreachable ();
2885 /* Count the number of insns necessary to handle this block move.
2887 Basic structure is the same as emit_block_move, except that we
2888 count insns rather than emit them. */
2890 static int
2891 compute_clrmem_length (rtx insn)
2893 rtx pat = PATTERN (insn);
2894 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2895 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2896 unsigned int n_insns = 0;
2898 /* We can't clear more than a word at a time because the PA
2899 has no longer integer move insns. */
2900 if (align > (TARGET_64BIT ? 8 : 4))
2901 align = (TARGET_64BIT ? 8 : 4);
2903 /* The basic loop. */
2904 n_insns = 4;
2906 /* Residuals. */
2907 if (n_bytes % (2 * align) != 0)
2909 if ((n_bytes % (2 * align)) >= align)
2910 n_insns++;
2912 if ((n_bytes % align) != 0)
2913 n_insns++;
2916 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2917 return n_insns * 4;
2921 const char *
2922 output_and (rtx *operands)
2924 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2926 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2927 int ls0, ls1, ms0, p, len;
2929 for (ls0 = 0; ls0 < 32; ls0++)
2930 if ((mask & (1 << ls0)) == 0)
2931 break;
2933 for (ls1 = ls0; ls1 < 32; ls1++)
2934 if ((mask & (1 << ls1)) != 0)
2935 break;
2937 for (ms0 = ls1; ms0 < 32; ms0++)
2938 if ((mask & (1 << ms0)) == 0)
2939 break;
2941 gcc_assert (ms0 == 32);
2943 if (ls1 == 32)
2945 len = ls0;
2947 gcc_assert (len);
2949 operands[2] = GEN_INT (len);
2950 return "{extru|extrw,u} %1,31,%2,%0";
2952 else
2954 /* We could use this `depi' for the case above as well, but `depi'
2955 requires one more register file access than an `extru'. */
2957 p = 31 - ls0;
2958 len = ls1 - ls0;
2960 operands[2] = GEN_INT (p);
2961 operands[3] = GEN_INT (len);
2962 return "{depi|depwi} 0,%2,%3,%0";
2965 else
2966 return "and %1,%2,%0";
2969 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2970 storing the result in operands[0]. */
2971 const char *
2972 output_64bit_and (rtx *operands)
2974 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2976 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2977 int ls0, ls1, ms0, p, len;
2979 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2980 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2981 break;
2983 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2984 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2985 break;
2987 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2988 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2989 break;
2991 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2993 if (ls1 == HOST_BITS_PER_WIDE_INT)
2995 len = ls0;
2997 gcc_assert (len);
2999 operands[2] = GEN_INT (len);
3000 return "extrd,u %1,63,%2,%0";
3002 else
3004 /* We could use this `depi' for the case above as well, but `depi'
3005 requires one more register file access than an `extru'. */
3007 p = 63 - ls0;
3008 len = ls1 - ls0;
3010 operands[2] = GEN_INT (p);
3011 operands[3] = GEN_INT (len);
3012 return "depdi 0,%2,%3,%0";
3015 else
3016 return "and %1,%2,%0";
3019 const char *
3020 output_ior (rtx *operands)
3022 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3023 int bs0, bs1, p, len;
3025 if (INTVAL (operands[2]) == 0)
3026 return "copy %1,%0";
3028 for (bs0 = 0; bs0 < 32; bs0++)
3029 if ((mask & (1 << bs0)) != 0)
3030 break;
3032 for (bs1 = bs0; bs1 < 32; bs1++)
3033 if ((mask & (1 << bs1)) == 0)
3034 break;
3036 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3038 p = 31 - bs0;
3039 len = bs1 - bs0;
3041 operands[2] = GEN_INT (p);
3042 operands[3] = GEN_INT (len);
3043 return "{depi|depwi} -1,%2,%3,%0";
3046 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3047 storing the result in operands[0]. */
3048 const char *
3049 output_64bit_ior (rtx *operands)
3051 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3052 int bs0, bs1, p, len;
3054 if (INTVAL (operands[2]) == 0)
3055 return "copy %1,%0";
3057 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3058 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3059 break;
3061 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3062 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3063 break;
3065 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3066 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3068 p = 63 - bs0;
3069 len = bs1 - bs0;
3071 operands[2] = GEN_INT (p);
3072 operands[3] = GEN_INT (len);
3073 return "depdi -1,%2,%3,%0";
3076 /* Target hook for assembling integer objects. This code handles
3077 aligned SI and DI integers specially since function references
3078 must be preceded by P%. */
3080 static bool
3081 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3083 if (size == UNITS_PER_WORD
3084 && aligned_p
3085 && function_label_operand (x, VOIDmode))
3087 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3088 output_addr_const (asm_out_file, x);
3089 fputc ('\n', asm_out_file);
3090 return true;
3092 return default_assemble_integer (x, size, aligned_p);
3095 /* Output an ascii string. */
3096 void
3097 output_ascii (FILE *file, const char *p, int size)
3099 int i;
3100 int chars_output;
3101 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3103 /* The HP assembler can only take strings of 256 characters at one
3104 time. This is a limitation on input line length, *not* the
3105 length of the string. Sigh. Even worse, it seems that the
3106 restriction is in number of input characters (see \xnn &
3107 \whatever). So we have to do this very carefully. */
3109 fputs ("\t.STRING \"", file);
3111 chars_output = 0;
3112 for (i = 0; i < size; i += 4)
3114 int co = 0;
3115 int io = 0;
3116 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3118 register unsigned int c = (unsigned char) p[i + io];
3120 if (c == '\"' || c == '\\')
3121 partial_output[co++] = '\\';
3122 if (c >= ' ' && c < 0177)
3123 partial_output[co++] = c;
3124 else
3126 unsigned int hexd;
3127 partial_output[co++] = '\\';
3128 partial_output[co++] = 'x';
3129 hexd = c / 16 - 0 + '0';
3130 if (hexd > '9')
3131 hexd -= '9' - 'a' + 1;
3132 partial_output[co++] = hexd;
3133 hexd = c % 16 - 0 + '0';
3134 if (hexd > '9')
3135 hexd -= '9' - 'a' + 1;
3136 partial_output[co++] = hexd;
3139 if (chars_output + co > 243)
3141 fputs ("\"\n\t.STRING \"", file);
3142 chars_output = 0;
3144 fwrite (partial_output, 1, (size_t) co, file);
3145 chars_output += co;
3146 co = 0;
3148 fputs ("\"\n", file);
3151 /* Try to rewrite floating point comparisons & branches to avoid
3152 useless add,tr insns.
3154 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3155 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3156 first attempt to remove useless add,tr insns. It is zero
3157 for the second pass as reorg sometimes leaves bogus REG_DEAD
3158 notes lying around.
3160 When CHECK_NOTES is zero we can only eliminate add,tr insns
3161 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3162 instructions. */
3163 static void
3164 remove_useless_addtr_insns (int check_notes)
3166 rtx insn;
3167 static int pass = 0;
3169 /* This is fairly cheap, so always run it when optimizing. */
3170 if (optimize > 0)
3172 int fcmp_count = 0;
3173 int fbranch_count = 0;
3175 /* Walk all the insns in this function looking for fcmp & fbranch
3176 instructions. Keep track of how many of each we find. */
3177 for (insn = get_insns (); insn; insn = next_insn (insn))
3179 rtx tmp;
3181 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3182 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3183 continue;
3185 tmp = PATTERN (insn);
3187 /* It must be a set. */
3188 if (GET_CODE (tmp) != SET)
3189 continue;
3191 /* If the destination is CCFP, then we've found an fcmp insn. */
3192 tmp = SET_DEST (tmp);
3193 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3195 fcmp_count++;
3196 continue;
3199 tmp = PATTERN (insn);
3200 /* If this is an fbranch instruction, bump the fbranch counter. */
3201 if (GET_CODE (tmp) == SET
3202 && SET_DEST (tmp) == pc_rtx
3203 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3204 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3205 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3206 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3208 fbranch_count++;
3209 continue;
3214 /* Find all floating point compare + branch insns. If possible,
3215 reverse the comparison & the branch to avoid add,tr insns. */
3216 for (insn = get_insns (); insn; insn = next_insn (insn))
3218 rtx tmp, next;
3220 /* Ignore anything that isn't an INSN. */
3221 if (GET_CODE (insn) != INSN)
3222 continue;
3224 tmp = PATTERN (insn);
3226 /* It must be a set. */
3227 if (GET_CODE (tmp) != SET)
3228 continue;
3230 /* The destination must be CCFP, which is register zero. */
3231 tmp = SET_DEST (tmp);
3232 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3233 continue;
3235 /* INSN should be a set of CCFP.
3237 See if the result of this insn is used in a reversed FP
3238 conditional branch. If so, reverse our condition and
3239 the branch. Doing so avoids useless add,tr insns. */
3240 next = next_insn (insn);
3241 while (next)
3243 /* Jumps, calls and labels stop our search. */
3244 if (GET_CODE (next) == JUMP_INSN
3245 || GET_CODE (next) == CALL_INSN
3246 || GET_CODE (next) == CODE_LABEL)
3247 break;
3249 /* As does another fcmp insn. */
3250 if (GET_CODE (next) == INSN
3251 && GET_CODE (PATTERN (next)) == SET
3252 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3253 && REGNO (SET_DEST (PATTERN (next))) == 0)
3254 break;
3256 next = next_insn (next);
3259 /* Is NEXT_INSN a branch? */
3260 if (next
3261 && GET_CODE (next) == JUMP_INSN)
3263 rtx pattern = PATTERN (next);
3265 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3266 and CCFP dies, then reverse our conditional and the branch
3267 to avoid the add,tr. */
3268 if (GET_CODE (pattern) == SET
3269 && SET_DEST (pattern) == pc_rtx
3270 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3271 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3272 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3273 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3274 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3275 && (fcmp_count == fbranch_count
3276 || (check_notes
3277 && find_regno_note (next, REG_DEAD, 0))))
3279 /* Reverse the branch. */
3280 tmp = XEXP (SET_SRC (pattern), 1);
3281 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3282 XEXP (SET_SRC (pattern), 2) = tmp;
3283 INSN_CODE (next) = -1;
3285 /* Reverse our condition. */
3286 tmp = PATTERN (insn);
3287 PUT_CODE (XEXP (tmp, 1),
3288 (reverse_condition_maybe_unordered
3289 (GET_CODE (XEXP (tmp, 1)))));
3295 pass = !pass;
3299 /* You may have trouble believing this, but this is the 32 bit HP-PA
3300 stack layout. Wow.
3302 Offset Contents
3304 Variable arguments (optional; any number may be allocated)
3306 SP-(4*(N+9)) arg word N
3308 SP-56 arg word 5
3309 SP-52 arg word 4
3311 Fixed arguments (must be allocated; may remain unused)
3313 SP-48 arg word 3
3314 SP-44 arg word 2
3315 SP-40 arg word 1
3316 SP-36 arg word 0
3318 Frame Marker
3320 SP-32 External Data Pointer (DP)
3321 SP-28 External sr4
3322 SP-24 External/stub RP (RP')
3323 SP-20 Current RP
3324 SP-16 Static Link
3325 SP-12 Clean up
3326 SP-8 Calling Stub RP (RP'')
3327 SP-4 Previous SP
3329 Top of Frame
3331 SP-0 Stack Pointer (points to next available address)
3335 /* This function saves registers as follows. Registers marked with ' are
3336 this function's registers (as opposed to the previous function's).
3337 If a frame_pointer isn't needed, r4 is saved as a general register;
3338 the space for the frame pointer is still allocated, though, to keep
3339 things simple.
3342 Top of Frame
3344 SP (FP') Previous FP
3345 SP + 4 Alignment filler (sigh)
3346 SP + 8 Space for locals reserved here.
3350 SP + n All call saved register used.
3354 SP + o All call saved fp registers used.
3358 SP + p (SP') points to next available address.
3362 /* Global variables set by output_function_prologue(). */
3363 /* Size of frame. Need to know this to emit return insns from
3364 leaf procedures. */
3365 static HOST_WIDE_INT actual_fsize, local_fsize;
3366 static int save_fregs;
3368 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3369 Handle case where DISP > 8k by using the add_high_const patterns.
3371 Note in DISP > 8k case, we will leave the high part of the address
3372 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3374 static void
3375 store_reg (int reg, HOST_WIDE_INT disp, int base)
3377 rtx insn, dest, src, basereg;
3379 src = gen_rtx_REG (word_mode, reg);
3380 basereg = gen_rtx_REG (Pmode, base);
3381 if (VAL_14_BITS_P (disp))
3383 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3384 insn = emit_move_insn (dest, src);
3386 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3388 rtx delta = GEN_INT (disp);
3389 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3391 emit_move_insn (tmpreg, delta);
3392 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3393 if (DO_FRAME_NOTES)
3395 REG_NOTES (insn)
3396 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3397 gen_rtx_SET (VOIDmode, tmpreg,
3398 gen_rtx_PLUS (Pmode, basereg, delta)),
3399 REG_NOTES (insn));
3400 RTX_FRAME_RELATED_P (insn) = 1;
3402 dest = gen_rtx_MEM (word_mode, tmpreg);
3403 insn = emit_move_insn (dest, src);
3405 else
3407 rtx delta = GEN_INT (disp);
3408 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3409 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3411 emit_move_insn (tmpreg, high);
3412 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3413 insn = emit_move_insn (dest, src);
3414 if (DO_FRAME_NOTES)
3416 REG_NOTES (insn)
3417 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3418 gen_rtx_SET (VOIDmode,
3419 gen_rtx_MEM (word_mode,
3420 gen_rtx_PLUS (word_mode, basereg,
3421 delta)),
3422 src),
3423 REG_NOTES (insn));
3427 if (DO_FRAME_NOTES)
3428 RTX_FRAME_RELATED_P (insn) = 1;
3431 /* Emit RTL to store REG at the memory location specified by BASE and then
3432 add MOD to BASE. MOD must be <= 8k. */
3434 static void
3435 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3437 rtx insn, basereg, srcreg, delta;
3439 gcc_assert (VAL_14_BITS_P (mod));
3441 basereg = gen_rtx_REG (Pmode, base);
3442 srcreg = gen_rtx_REG (word_mode, reg);
3443 delta = GEN_INT (mod);
3445 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3446 if (DO_FRAME_NOTES)
3448 RTX_FRAME_RELATED_P (insn) = 1;
3450 /* RTX_FRAME_RELATED_P must be set on each frame related set
3451 in a parallel with more than one element. */
3452 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3453 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3457 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3458 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3459 whether to add a frame note or not.
3461 In the DISP > 8k case, we leave the high part of the address in %r1.
3462 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3464 static void
3465 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3467 rtx insn;
3469 if (VAL_14_BITS_P (disp))
3471 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3472 plus_constant (gen_rtx_REG (Pmode, base), disp));
3474 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3476 rtx basereg = gen_rtx_REG (Pmode, base);
3477 rtx delta = GEN_INT (disp);
3478 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3480 emit_move_insn (tmpreg, delta);
3481 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3482 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3483 if (DO_FRAME_NOTES)
3484 REG_NOTES (insn)
3485 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3486 gen_rtx_SET (VOIDmode, tmpreg,
3487 gen_rtx_PLUS (Pmode, basereg, delta)),
3488 REG_NOTES (insn));
3490 else
3492 rtx basereg = gen_rtx_REG (Pmode, base);
3493 rtx delta = GEN_INT (disp);
3494 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3496 emit_move_insn (tmpreg,
3497 gen_rtx_PLUS (Pmode, basereg,
3498 gen_rtx_HIGH (Pmode, delta)));
3499 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3500 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3503 if (DO_FRAME_NOTES && note)
3504 RTX_FRAME_RELATED_P (insn) = 1;
3507 HOST_WIDE_INT
3508 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3510 int freg_saved = 0;
3511 int i, j;
3513 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3514 be consistent with the rounding and size calculation done here.
3515 Change them at the same time. */
3517 /* We do our own stack alignment. First, round the size of the
3518 stack locals up to a word boundary. */
3519 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3521 /* Space for previous frame pointer + filler. If any frame is
3522 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3523 waste some space here for the sake of HP compatibility. The
3524 first slot is only used when the frame pointer is needed. */
3525 if (size || frame_pointer_needed)
3526 size += STARTING_FRAME_OFFSET;
3528 /* If the current function calls __builtin_eh_return, then we need
3529 to allocate stack space for registers that will hold data for
3530 the exception handler. */
3531 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3533 unsigned int i;
3535 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3536 continue;
3537 size += i * UNITS_PER_WORD;
3540 /* Account for space used by the callee general register saves. */
3541 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3542 if (df_regs_ever_live_p (i))
3543 size += UNITS_PER_WORD;
3545 /* Account for space used by the callee floating point register saves. */
3546 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3547 if (df_regs_ever_live_p (i)
3548 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3550 freg_saved = 1;
3552 /* We always save both halves of the FP register, so always
3553 increment the frame size by 8 bytes. */
3554 size += 8;
3557 /* If any of the floating registers are saved, account for the
3558 alignment needed for the floating point register save block. */
3559 if (freg_saved)
3561 size = (size + 7) & ~7;
3562 if (fregs_live)
3563 *fregs_live = 1;
3566 /* The various ABIs include space for the outgoing parameters in the
3567 size of the current function's stack frame. We don't need to align
3568 for the outgoing arguments as their alignment is set by the final
3569 rounding for the frame as a whole. */
3570 size += crtl->outgoing_args_size;
3572 /* Allocate space for the fixed frame marker. This space must be
3573 allocated for any function that makes calls or allocates
3574 stack space. */
3575 if (!current_function_is_leaf || size)
3576 size += TARGET_64BIT ? 48 : 32;
3578 /* Finally, round to the preferred stack boundary. */
3579 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3580 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3583 /* Generate the assembly code for function entry. FILE is a stdio
3584 stream to output the code to. SIZE is an int: how many units of
3585 temporary storage to allocate.
3587 Refer to the array `regs_ever_live' to determine which registers to
3588 save; `regs_ever_live[I]' is nonzero if register number I is ever
3589 used in the function. This function is responsible for knowing
3590 which registers should not be saved even if used. */
3592 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3593 of memory. If any fpu reg is used in the function, we allocate
3594 such a block here, at the bottom of the frame, just in case it's needed.
3596 If this function is a leaf procedure, then we may choose not
3597 to do a "save" insn. The decision about whether or not
3598 to do this is made in regclass.c. */
3600 static void
3601 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3603 /* The function's label and associated .PROC must never be
3604 separated and must be output *after* any profiling declarations
3605 to avoid changing spaces/subspaces within a procedure. */
3606 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3607 fputs ("\t.PROC\n", file);
3609 /* hppa_expand_prologue does the dirty work now. We just need
3610 to output the assembler directives which denote the start
3611 of a function. */
3612 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3613 if (current_function_is_leaf)
3614 fputs (",NO_CALLS", file);
3615 else
3616 fputs (",CALLS", file);
3617 if (rp_saved)
3618 fputs (",SAVE_RP", file);
3620 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3621 at the beginning of the frame and that it is used as the frame
3622 pointer for the frame. We do this because our current frame
3623 layout doesn't conform to that specified in the HP runtime
3624 documentation and we need a way to indicate to programs such as
3625 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3626 isn't used by HP compilers but is supported by the assembler.
3627 However, SAVE_SP is supposed to indicate that the previous stack
3628 pointer has been saved in the frame marker. */
3629 if (frame_pointer_needed)
3630 fputs (",SAVE_SP", file);
3632 /* Pass on information about the number of callee register saves
3633 performed in the prologue.
3635 The compiler is supposed to pass the highest register number
3636 saved, the assembler then has to adjust that number before
3637 entering it into the unwind descriptor (to account for any
3638 caller saved registers with lower register numbers than the
3639 first callee saved register). */
3640 if (gr_saved)
3641 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3643 if (fr_saved)
3644 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3646 fputs ("\n\t.ENTRY\n", file);
3648 remove_useless_addtr_insns (0);
3651 void
3652 hppa_expand_prologue (void)
3654 int merge_sp_adjust_with_store = 0;
3655 HOST_WIDE_INT size = get_frame_size ();
3656 HOST_WIDE_INT offset;
3657 int i;
3658 rtx insn, tmpreg;
3660 gr_saved = 0;
3661 fr_saved = 0;
3662 save_fregs = 0;
3664 /* Compute total size for frame pointer, filler, locals and rounding to
3665 the next word boundary. Similar code appears in compute_frame_size
3666 and must be changed in tandem with this code. */
3667 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3668 if (local_fsize || frame_pointer_needed)
3669 local_fsize += STARTING_FRAME_OFFSET;
3671 actual_fsize = compute_frame_size (size, &save_fregs);
3673 /* Compute a few things we will use often. */
3674 tmpreg = gen_rtx_REG (word_mode, 1);
3676 /* Save RP first. The calling conventions manual states RP will
3677 always be stored into the caller's frame at sp - 20 or sp - 16
3678 depending on which ABI is in use. */
3679 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3681 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3682 rp_saved = true;
3684 else
3685 rp_saved = false;
3687 /* Allocate the local frame and set up the frame pointer if needed. */
3688 if (actual_fsize != 0)
3690 if (frame_pointer_needed)
3692 /* Copy the old frame pointer temporarily into %r1. Set up the
3693 new stack pointer, then store away the saved old frame pointer
3694 into the stack at sp and at the same time update the stack
3695 pointer by actual_fsize bytes. Two versions, first
3696 handles small (<8k) frames. The second handles large (>=8k)
3697 frames. */
3698 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3699 if (DO_FRAME_NOTES)
3700 RTX_FRAME_RELATED_P (insn) = 1;
3702 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3703 if (DO_FRAME_NOTES)
3704 RTX_FRAME_RELATED_P (insn) = 1;
3706 if (VAL_14_BITS_P (actual_fsize))
3707 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3708 else
3710 /* It is incorrect to store the saved frame pointer at *sp,
3711 then increment sp (writes beyond the current stack boundary).
3713 So instead use stwm to store at *sp and post-increment the
3714 stack pointer as an atomic operation. Then increment sp to
3715 finish allocating the new frame. */
3716 HOST_WIDE_INT adjust1 = 8192 - 64;
3717 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3719 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3720 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3721 adjust2, 1);
3724 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3725 we need to store the previous stack pointer (frame pointer)
3726 into the frame marker on targets that use the HP unwind
3727 library. This allows the HP unwind library to be used to
3728 unwind GCC frames. However, we are not fully compatible
3729 with the HP library because our frame layout differs from
3730 that specified in the HP runtime specification.
3732 We don't want a frame note on this instruction as the frame
3733 marker moves during dynamic stack allocation.
3735 This instruction also serves as a blockage to prevent
3736 register spills from being scheduled before the stack
3737 pointer is raised. This is necessary as we store
3738 registers using the frame pointer as a base register,
3739 and the frame pointer is set before sp is raised. */
3740 if (TARGET_HPUX_UNWIND_LIBRARY)
3742 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3743 GEN_INT (TARGET_64BIT ? -8 : -4));
3745 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3746 frame_pointer_rtx);
3748 else
3749 emit_insn (gen_blockage ());
3751 /* no frame pointer needed. */
3752 else
3754 /* In some cases we can perform the first callee register save
3755 and allocating the stack frame at the same time. If so, just
3756 make a note of it and defer allocating the frame until saving
3757 the callee registers. */
3758 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3759 merge_sp_adjust_with_store = 1;
3760 /* Can not optimize. Adjust the stack frame by actual_fsize
3761 bytes. */
3762 else
3763 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3764 actual_fsize, 1);
3768 /* Normal register save.
3770 Do not save the frame pointer in the frame_pointer_needed case. It
3771 was done earlier. */
3772 if (frame_pointer_needed)
3774 offset = local_fsize;
3776 /* Saving the EH return data registers in the frame is the simplest
3777 way to get the frame unwind information emitted. We put them
3778 just before the general registers. */
3779 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3781 unsigned int i, regno;
3783 for (i = 0; ; ++i)
3785 regno = EH_RETURN_DATA_REGNO (i);
3786 if (regno == INVALID_REGNUM)
3787 break;
3789 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3790 offset += UNITS_PER_WORD;
3794 for (i = 18; i >= 4; i--)
3795 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3797 store_reg (i, offset, FRAME_POINTER_REGNUM);
3798 offset += UNITS_PER_WORD;
3799 gr_saved++;
3801 /* Account for %r3 which is saved in a special place. */
3802 gr_saved++;
3804 /* No frame pointer needed. */
3805 else
3807 offset = local_fsize - actual_fsize;
3809 /* Saving the EH return data registers in the frame is the simplest
3810 way to get the frame unwind information emitted. */
3811 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3813 unsigned int i, regno;
3815 for (i = 0; ; ++i)
3817 regno = EH_RETURN_DATA_REGNO (i);
3818 if (regno == INVALID_REGNUM)
3819 break;
3821 /* If merge_sp_adjust_with_store is nonzero, then we can
3822 optimize the first save. */
3823 if (merge_sp_adjust_with_store)
3825 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3826 merge_sp_adjust_with_store = 0;
3828 else
3829 store_reg (regno, offset, STACK_POINTER_REGNUM);
3830 offset += UNITS_PER_WORD;
3834 for (i = 18; i >= 3; i--)
3835 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3837 /* If merge_sp_adjust_with_store is nonzero, then we can
3838 optimize the first GR save. */
3839 if (merge_sp_adjust_with_store)
3841 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3842 merge_sp_adjust_with_store = 0;
3844 else
3845 store_reg (i, offset, STACK_POINTER_REGNUM);
3846 offset += UNITS_PER_WORD;
3847 gr_saved++;
3850 /* If we wanted to merge the SP adjustment with a GR save, but we never
3851 did any GR saves, then just emit the adjustment here. */
3852 if (merge_sp_adjust_with_store)
3853 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3854 actual_fsize, 1);
3857 /* The hppa calling conventions say that %r19, the pic offset
3858 register, is saved at sp - 32 (in this function's frame)
3859 when generating PIC code. FIXME: What is the correct thing
3860 to do for functions which make no calls and allocate no
3861 frame? Do we need to allocate a frame, or can we just omit
3862 the save? For now we'll just omit the save.
3864 We don't want a note on this insn as the frame marker can
3865 move if there is a dynamic stack allocation. */
3866 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3868 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3870 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3874 /* Align pointer properly (doubleword boundary). */
3875 offset = (offset + 7) & ~7;
3877 /* Floating point register store. */
3878 if (save_fregs)
3880 rtx base;
3882 /* First get the frame or stack pointer to the start of the FP register
3883 save area. */
3884 if (frame_pointer_needed)
3886 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3887 base = frame_pointer_rtx;
3889 else
3891 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3892 base = stack_pointer_rtx;
3895 /* Now actually save the FP registers. */
3896 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3898 if (df_regs_ever_live_p (i)
3899 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3901 rtx addr, insn, reg;
3902 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3903 reg = gen_rtx_REG (DFmode, i);
3904 insn = emit_move_insn (addr, reg);
3905 if (DO_FRAME_NOTES)
3907 RTX_FRAME_RELATED_P (insn) = 1;
3908 if (TARGET_64BIT)
3910 rtx mem = gen_rtx_MEM (DFmode,
3911 plus_constant (base, offset));
3912 REG_NOTES (insn)
3913 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3914 gen_rtx_SET (VOIDmode, mem, reg),
3915 REG_NOTES (insn));
3917 else
3919 rtx meml = gen_rtx_MEM (SFmode,
3920 plus_constant (base, offset));
3921 rtx memr = gen_rtx_MEM (SFmode,
3922 plus_constant (base, offset + 4));
3923 rtx regl = gen_rtx_REG (SFmode, i);
3924 rtx regr = gen_rtx_REG (SFmode, i + 1);
3925 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3926 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3927 rtvec vec;
3929 RTX_FRAME_RELATED_P (setl) = 1;
3930 RTX_FRAME_RELATED_P (setr) = 1;
3931 vec = gen_rtvec (2, setl, setr);
3932 REG_NOTES (insn)
3933 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3934 gen_rtx_SEQUENCE (VOIDmode, vec),
3935 REG_NOTES (insn));
3938 offset += GET_MODE_SIZE (DFmode);
3939 fr_saved++;
3945 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3946 Handle case where DISP > 8k by using the add_high_const patterns. */
3948 static void
3949 load_reg (int reg, HOST_WIDE_INT disp, int base)
3951 rtx dest = gen_rtx_REG (word_mode, reg);
3952 rtx basereg = gen_rtx_REG (Pmode, base);
3953 rtx src;
3955 if (VAL_14_BITS_P (disp))
3956 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3957 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3959 rtx delta = GEN_INT (disp);
3960 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3962 emit_move_insn (tmpreg, delta);
3963 if (TARGET_DISABLE_INDEXING)
3965 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3966 src = gen_rtx_MEM (word_mode, tmpreg);
3968 else
3969 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3971 else
3973 rtx delta = GEN_INT (disp);
3974 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3975 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3977 emit_move_insn (tmpreg, high);
3978 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3981 emit_move_insn (dest, src);
3984 /* Update the total code bytes output to the text section. */
3986 static void
3987 update_total_code_bytes (int nbytes)
3989 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3990 && !IN_NAMED_SECTION_P (cfun->decl))
3992 if (INSN_ADDRESSES_SET_P ())
3994 unsigned long old_total = total_code_bytes;
3996 total_code_bytes += nbytes;
3998 /* Be prepared to handle overflows. */
3999 if (old_total > total_code_bytes)
4000 total_code_bytes = -1;
4002 else
4003 total_code_bytes = -1;
4007 /* This function generates the assembly code for function exit.
4008 Args are as for output_function_prologue ().
4010 The function epilogue should not depend on the current stack
4011 pointer! It should use the frame pointer only. This is mandatory
4012 because of alloca; we also take advantage of it to omit stack
4013 adjustments before returning. */
4015 static void
4016 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4018 rtx insn = get_last_insn ();
4020 last_address = 0;
4022 /* hppa_expand_epilogue does the dirty work now. We just need
4023 to output the assembler directives which denote the end
4024 of a function.
4026 To make debuggers happy, emit a nop if the epilogue was completely
4027 eliminated due to a volatile call as the last insn in the
4028 current function. That way the return address (in %r2) will
4029 always point to a valid instruction in the current function. */
4031 /* Get the last real insn. */
4032 if (GET_CODE (insn) == NOTE)
4033 insn = prev_real_insn (insn);
4035 /* If it is a sequence, then look inside. */
4036 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4037 insn = XVECEXP (PATTERN (insn), 0, 0);
4039 /* If insn is a CALL_INSN, then it must be a call to a volatile
4040 function (otherwise there would be epilogue insns). */
4041 if (insn && GET_CODE (insn) == CALL_INSN)
4043 fputs ("\tnop\n", file);
4044 last_address += 4;
4047 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4049 if (TARGET_SOM && TARGET_GAS)
4051 /* We done with this subspace except possibly for some additional
4052 debug information. Forget that we are in this subspace to ensure
4053 that the next function is output in its own subspace. */
4054 in_section = NULL;
4055 cfun->machine->in_nsubspa = 2;
4058 if (INSN_ADDRESSES_SET_P ())
4060 insn = get_last_nonnote_insn ();
4061 last_address += INSN_ADDRESSES (INSN_UID (insn));
4062 if (INSN_P (insn))
4063 last_address += insn_default_length (insn);
4064 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4065 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4068 /* Finally, update the total number of code bytes output so far. */
4069 update_total_code_bytes (last_address);
4072 void
4073 hppa_expand_epilogue (void)
4075 rtx tmpreg;
4076 HOST_WIDE_INT offset;
4077 HOST_WIDE_INT ret_off = 0;
4078 int i;
4079 int merge_sp_adjust_with_load = 0;
4081 /* We will use this often. */
4082 tmpreg = gen_rtx_REG (word_mode, 1);
4084 /* Try to restore RP early to avoid load/use interlocks when
4085 RP gets used in the return (bv) instruction. This appears to still
4086 be necessary even when we schedule the prologue and epilogue. */
4087 if (rp_saved)
4089 ret_off = TARGET_64BIT ? -16 : -20;
4090 if (frame_pointer_needed)
4092 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4093 ret_off = 0;
4095 else
4097 /* No frame pointer, and stack is smaller than 8k. */
4098 if (VAL_14_BITS_P (ret_off - actual_fsize))
4100 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4101 ret_off = 0;
4106 /* General register restores. */
4107 if (frame_pointer_needed)
4109 offset = local_fsize;
4111 /* If the current function calls __builtin_eh_return, then we need
4112 to restore the saved EH data registers. */
4113 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4115 unsigned int i, regno;
4117 for (i = 0; ; ++i)
4119 regno = EH_RETURN_DATA_REGNO (i);
4120 if (regno == INVALID_REGNUM)
4121 break;
4123 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4124 offset += UNITS_PER_WORD;
4128 for (i = 18; i >= 4; i--)
4129 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4131 load_reg (i, offset, FRAME_POINTER_REGNUM);
4132 offset += UNITS_PER_WORD;
4135 else
4137 offset = local_fsize - actual_fsize;
4139 /* If the current function calls __builtin_eh_return, then we need
4140 to restore the saved EH data registers. */
4141 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4143 unsigned int i, regno;
4145 for (i = 0; ; ++i)
4147 regno = EH_RETURN_DATA_REGNO (i);
4148 if (regno == INVALID_REGNUM)
4149 break;
4151 /* Only for the first load.
4152 merge_sp_adjust_with_load holds the register load
4153 with which we will merge the sp adjustment. */
4154 if (merge_sp_adjust_with_load == 0
4155 && local_fsize == 0
4156 && VAL_14_BITS_P (-actual_fsize))
4157 merge_sp_adjust_with_load = regno;
4158 else
4159 load_reg (regno, offset, STACK_POINTER_REGNUM);
4160 offset += UNITS_PER_WORD;
4164 for (i = 18; i >= 3; i--)
4166 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4168 /* Only for the first load.
4169 merge_sp_adjust_with_load holds the register load
4170 with which we will merge the sp adjustment. */
4171 if (merge_sp_adjust_with_load == 0
4172 && local_fsize == 0
4173 && VAL_14_BITS_P (-actual_fsize))
4174 merge_sp_adjust_with_load = i;
4175 else
4176 load_reg (i, offset, STACK_POINTER_REGNUM);
4177 offset += UNITS_PER_WORD;
4182 /* Align pointer properly (doubleword boundary). */
4183 offset = (offset + 7) & ~7;
4185 /* FP register restores. */
4186 if (save_fregs)
4188 /* Adjust the register to index off of. */
4189 if (frame_pointer_needed)
4190 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4191 else
4192 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4194 /* Actually do the restores now. */
4195 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4196 if (df_regs_ever_live_p (i)
4197 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4199 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4200 rtx dest = gen_rtx_REG (DFmode, i);
4201 emit_move_insn (dest, src);
4205 /* Emit a blockage insn here to keep these insns from being moved to
4206 an earlier spot in the epilogue, or into the main instruction stream.
4208 This is necessary as we must not cut the stack back before all the
4209 restores are finished. */
4210 emit_insn (gen_blockage ());
4212 /* Reset stack pointer (and possibly frame pointer). The stack
4213 pointer is initially set to fp + 64 to avoid a race condition. */
4214 if (frame_pointer_needed)
4216 rtx delta = GEN_INT (-64);
4218 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4219 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4221 /* If we were deferring a callee register restore, do it now. */
4222 else if (merge_sp_adjust_with_load)
4224 rtx delta = GEN_INT (-actual_fsize);
4225 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4227 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4229 else if (actual_fsize != 0)
4230 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4231 - actual_fsize, 0);
4233 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4234 frame greater than 8k), do so now. */
4235 if (ret_off != 0)
4236 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4238 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4240 rtx sa = EH_RETURN_STACKADJ_RTX;
4242 emit_insn (gen_blockage ());
4243 emit_insn (TARGET_64BIT
4244 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4245 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4250 hppa_pic_save_rtx (void)
4252 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4255 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4256 #define NO_DEFERRED_PROFILE_COUNTERS 0
4257 #endif
4260 /* Vector of funcdef numbers. */
4261 static VEC(int,heap) *funcdef_nos;
4263 /* Output deferred profile counters. */
4264 static void
4265 output_deferred_profile_counters (void)
4267 unsigned int i;
4268 int align, n;
4270 if (VEC_empty (int, funcdef_nos))
4271 return;
4273 switch_to_section (data_section);
4274 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4275 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4277 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4279 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4280 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4283 VEC_free (int, heap, funcdef_nos);
4286 void
4287 hppa_profile_hook (int label_no)
4289 /* We use SImode for the address of the function in both 32 and
4290 64-bit code to avoid having to provide DImode versions of the
4291 lcla2 and load_offset_label_address insn patterns. */
4292 rtx reg = gen_reg_rtx (SImode);
4293 rtx label_rtx = gen_label_rtx ();
4294 rtx begin_label_rtx, call_insn;
4295 char begin_label_name[16];
4297 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4298 label_no);
4299 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4301 if (TARGET_64BIT)
4302 emit_move_insn (arg_pointer_rtx,
4303 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4304 GEN_INT (64)));
4306 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4308 /* The address of the function is loaded into %r25 with an instruction-
4309 relative sequence that avoids the use of relocations. The sequence
4310 is split so that the load_offset_label_address instruction can
4311 occupy the delay slot of the call to _mcount. */
4312 if (TARGET_PA_20)
4313 emit_insn (gen_lcla2 (reg, label_rtx));
4314 else
4315 emit_insn (gen_lcla1 (reg, label_rtx));
4317 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4318 reg, begin_label_rtx, label_rtx));
4320 #if !NO_DEFERRED_PROFILE_COUNTERS
4322 rtx count_label_rtx, addr, r24;
4323 char count_label_name[16];
4325 VEC_safe_push (int, heap, funcdef_nos, label_no);
4326 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4327 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4329 addr = force_reg (Pmode, count_label_rtx);
4330 r24 = gen_rtx_REG (Pmode, 24);
4331 emit_move_insn (r24, addr);
4333 call_insn =
4334 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4335 gen_rtx_SYMBOL_REF (Pmode,
4336 "_mcount")),
4337 GEN_INT (TARGET_64BIT ? 24 : 12)));
4339 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4341 #else
4343 call_insn =
4344 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4345 gen_rtx_SYMBOL_REF (Pmode,
4346 "_mcount")),
4347 GEN_INT (TARGET_64BIT ? 16 : 8)));
4349 #endif
4351 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4352 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4354 /* Indicate the _mcount call cannot throw, nor will it execute a
4355 non-local goto. */
4356 REG_NOTES (call_insn)
4357 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4360 /* Fetch the return address for the frame COUNT steps up from
4361 the current frame, after the prologue. FRAMEADDR is the
4362 frame pointer of the COUNT frame.
4364 We want to ignore any export stub remnants here. To handle this,
4365 we examine the code at the return address, and if it is an export
4366 stub, we return a memory rtx for the stub return address stored
4367 at frame-24.
4369 The value returned is used in two different ways:
4371 1. To find a function's caller.
4373 2. To change the return address for a function.
4375 This function handles most instances of case 1; however, it will
4376 fail if there are two levels of stubs to execute on the return
4377 path. The only way I believe that can happen is if the return value
4378 needs a parameter relocation, which never happens for C code.
4380 This function handles most instances of case 2; however, it will
4381 fail if we did not originally have stub code on the return path
4382 but will need stub code on the new return path. This can happen if
4383 the caller & callee are both in the main program, but the new
4384 return location is in a shared library. */
4387 return_addr_rtx (int count, rtx frameaddr)
4389 rtx label;
4390 rtx rp;
4391 rtx saved_rp;
4392 rtx ins;
4394 if (count != 0)
4395 return NULL_RTX;
4397 rp = get_hard_reg_initial_val (Pmode, 2);
4399 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4400 return rp;
4402 saved_rp = gen_reg_rtx (Pmode);
4403 emit_move_insn (saved_rp, rp);
4405 /* Get pointer to the instruction stream. We have to mask out the
4406 privilege level from the two low order bits of the return address
4407 pointer here so that ins will point to the start of the first
4408 instruction that would have been executed if we returned. */
4409 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4410 label = gen_label_rtx ();
4412 /* Check the instruction stream at the normal return address for the
4413 export stub:
4415 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4416 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4417 0x00011820 | stub+16: mtsp r1,sr0
4418 0xe0400002 | stub+20: be,n 0(sr0,rp)
4420 If it is an export stub, than our return address is really in
4421 -24[frameaddr]. */
4423 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4424 NULL_RTX, SImode, 1);
4425 emit_jump_insn (gen_bne (label));
4427 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4428 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4429 emit_jump_insn (gen_bne (label));
4431 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4432 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4433 emit_jump_insn (gen_bne (label));
4435 /* 0xe0400002 must be specified as -532676606 so that it won't be
4436 rejected as an invalid immediate operand on 64-bit hosts. */
4437 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4438 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4440 /* If there is no export stub then just use the value saved from
4441 the return pointer register. */
4443 emit_jump_insn (gen_bne (label));
4445 /* Here we know that our return address points to an export
4446 stub. We don't want to return the address of the export stub,
4447 but rather the return address of the export stub. That return
4448 address is stored at -24[frameaddr]. */
4450 emit_move_insn (saved_rp,
4451 gen_rtx_MEM (Pmode,
4452 memory_address (Pmode,
4453 plus_constant (frameaddr,
4454 -24))));
4456 emit_label (label);
4457 return saved_rp;
4460 void
4461 emit_bcond_fp (enum rtx_code code, rtx operand0)
4463 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4464 gen_rtx_IF_THEN_ELSE (VOIDmode,
4465 gen_rtx_fmt_ee (code,
4466 VOIDmode,
4467 gen_rtx_REG (CCFPmode, 0),
4468 const0_rtx),
4469 gen_rtx_LABEL_REF (VOIDmode, operand0),
4470 pc_rtx)));
4475 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4477 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4478 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4481 /* Adjust the cost of a scheduling dependency. Return the new cost of
4482 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4484 static int
4485 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4487 enum attr_type attr_type;
4489 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4490 true dependencies as they are described with bypasses now. */
4491 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4492 return cost;
4494 if (! recog_memoized (insn))
4495 return 0;
4497 attr_type = get_attr_type (insn);
4499 switch (REG_NOTE_KIND (link))
4501 case REG_DEP_ANTI:
4502 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4503 cycles later. */
4505 if (attr_type == TYPE_FPLOAD)
4507 rtx pat = PATTERN (insn);
4508 rtx dep_pat = PATTERN (dep_insn);
4509 if (GET_CODE (pat) == PARALLEL)
4511 /* This happens for the fldXs,mb patterns. */
4512 pat = XVECEXP (pat, 0, 0);
4514 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4515 /* If this happens, we have to extend this to schedule
4516 optimally. Return 0 for now. */
4517 return 0;
4519 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4521 if (! recog_memoized (dep_insn))
4522 return 0;
4523 switch (get_attr_type (dep_insn))
4525 case TYPE_FPALU:
4526 case TYPE_FPMULSGL:
4527 case TYPE_FPMULDBL:
4528 case TYPE_FPDIVSGL:
4529 case TYPE_FPDIVDBL:
4530 case TYPE_FPSQRTSGL:
4531 case TYPE_FPSQRTDBL:
4532 /* A fpload can't be issued until one cycle before a
4533 preceding arithmetic operation has finished if
4534 the target of the fpload is any of the sources
4535 (or destination) of the arithmetic operation. */
4536 return insn_default_latency (dep_insn) - 1;
4538 default:
4539 return 0;
4543 else if (attr_type == TYPE_FPALU)
4545 rtx pat = PATTERN (insn);
4546 rtx dep_pat = PATTERN (dep_insn);
4547 if (GET_CODE (pat) == PARALLEL)
4549 /* This happens for the fldXs,mb patterns. */
4550 pat = XVECEXP (pat, 0, 0);
4552 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4553 /* If this happens, we have to extend this to schedule
4554 optimally. Return 0 for now. */
4555 return 0;
4557 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4559 if (! recog_memoized (dep_insn))
4560 return 0;
4561 switch (get_attr_type (dep_insn))
4563 case TYPE_FPDIVSGL:
4564 case TYPE_FPDIVDBL:
4565 case TYPE_FPSQRTSGL:
4566 case TYPE_FPSQRTDBL:
4567 /* An ALU flop can't be issued until two cycles before a
4568 preceding divide or sqrt operation has finished if
4569 the target of the ALU flop is any of the sources
4570 (or destination) of the divide or sqrt operation. */
4571 return insn_default_latency (dep_insn) - 2;
4573 default:
4574 return 0;
4579 /* For other anti dependencies, the cost is 0. */
4580 return 0;
4582 case REG_DEP_OUTPUT:
4583 /* Output dependency; DEP_INSN writes a register that INSN writes some
4584 cycles later. */
4585 if (attr_type == TYPE_FPLOAD)
4587 rtx pat = PATTERN (insn);
4588 rtx dep_pat = PATTERN (dep_insn);
4589 if (GET_CODE (pat) == PARALLEL)
4591 /* This happens for the fldXs,mb patterns. */
4592 pat = XVECEXP (pat, 0, 0);
4594 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4595 /* If this happens, we have to extend this to schedule
4596 optimally. Return 0 for now. */
4597 return 0;
4599 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4601 if (! recog_memoized (dep_insn))
4602 return 0;
4603 switch (get_attr_type (dep_insn))
4605 case TYPE_FPALU:
4606 case TYPE_FPMULSGL:
4607 case TYPE_FPMULDBL:
4608 case TYPE_FPDIVSGL:
4609 case TYPE_FPDIVDBL:
4610 case TYPE_FPSQRTSGL:
4611 case TYPE_FPSQRTDBL:
4612 /* A fpload can't be issued until one cycle before a
4613 preceding arithmetic operation has finished if
4614 the target of the fpload is the destination of the
4615 arithmetic operation.
4617 Exception: For PA7100LC, PA7200 and PA7300, the cost
4618 is 3 cycles, unless they bundle together. We also
4619 pay the penalty if the second insn is a fpload. */
4620 return insn_default_latency (dep_insn) - 1;
4622 default:
4623 return 0;
4627 else if (attr_type == TYPE_FPALU)
4629 rtx pat = PATTERN (insn);
4630 rtx dep_pat = PATTERN (dep_insn);
4631 if (GET_CODE (pat) == PARALLEL)
4633 /* This happens for the fldXs,mb patterns. */
4634 pat = XVECEXP (pat, 0, 0);
4636 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4637 /* If this happens, we have to extend this to schedule
4638 optimally. Return 0 for now. */
4639 return 0;
4641 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4643 if (! recog_memoized (dep_insn))
4644 return 0;
4645 switch (get_attr_type (dep_insn))
4647 case TYPE_FPDIVSGL:
4648 case TYPE_FPDIVDBL:
4649 case TYPE_FPSQRTSGL:
4650 case TYPE_FPSQRTDBL:
4651 /* An ALU flop can't be issued until two cycles before a
4652 preceding divide or sqrt operation has finished if
4653 the target of the ALU flop is also the target of
4654 the divide or sqrt operation. */
4655 return insn_default_latency (dep_insn) - 2;
4657 default:
4658 return 0;
4663 /* For other output dependencies, the cost is 0. */
4664 return 0;
4666 default:
4667 gcc_unreachable ();
4671 /* Adjust scheduling priorities. We use this to try and keep addil
4672 and the next use of %r1 close together. */
4673 static int
4674 pa_adjust_priority (rtx insn, int priority)
4676 rtx set = single_set (insn);
4677 rtx src, dest;
4678 if (set)
4680 src = SET_SRC (set);
4681 dest = SET_DEST (set);
4682 if (GET_CODE (src) == LO_SUM
4683 && symbolic_operand (XEXP (src, 1), VOIDmode)
4684 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4685 priority >>= 3;
4687 else if (GET_CODE (src) == MEM
4688 && GET_CODE (XEXP (src, 0)) == LO_SUM
4689 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4690 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4691 priority >>= 1;
4693 else if (GET_CODE (dest) == MEM
4694 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4695 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4696 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4697 priority >>= 3;
4699 return priority;
4702 /* The 700 can only issue a single insn at a time.
4703 The 7XXX processors can issue two insns at a time.
4704 The 8000 can issue 4 insns at a time. */
4705 static int
4706 pa_issue_rate (void)
4708 switch (pa_cpu)
4710 case PROCESSOR_700: return 1;
4711 case PROCESSOR_7100: return 2;
4712 case PROCESSOR_7100LC: return 2;
4713 case PROCESSOR_7200: return 2;
4714 case PROCESSOR_7300: return 2;
4715 case PROCESSOR_8000: return 4;
4717 default:
4718 gcc_unreachable ();
4724 /* Return any length adjustment needed by INSN which already has its length
4725 computed as LENGTH. Return zero if no adjustment is necessary.
4727 For the PA: function calls, millicode calls, and backwards short
4728 conditional branches with unfilled delay slots need an adjustment by +1
4729 (to account for the NOP which will be inserted into the instruction stream).
4731 Also compute the length of an inline block move here as it is too
4732 complicated to express as a length attribute in pa.md. */
4734 pa_adjust_insn_length (rtx insn, int length)
4736 rtx pat = PATTERN (insn);
4738 /* Jumps inside switch tables which have unfilled delay slots need
4739 adjustment. */
4740 if (GET_CODE (insn) == JUMP_INSN
4741 && GET_CODE (pat) == PARALLEL
4742 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4743 return 4;
4744 /* Millicode insn with an unfilled delay slot. */
4745 else if (GET_CODE (insn) == INSN
4746 && GET_CODE (pat) != SEQUENCE
4747 && GET_CODE (pat) != USE
4748 && GET_CODE (pat) != CLOBBER
4749 && get_attr_type (insn) == TYPE_MILLI)
4750 return 4;
4751 /* Block move pattern. */
4752 else if (GET_CODE (insn) == INSN
4753 && GET_CODE (pat) == PARALLEL
4754 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4755 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4756 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4757 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4758 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4759 return compute_movmem_length (insn) - 4;
4760 /* Block clear pattern. */
4761 else if (GET_CODE (insn) == INSN
4762 && GET_CODE (pat) == PARALLEL
4763 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4764 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4765 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4766 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4767 return compute_clrmem_length (insn) - 4;
4768 /* Conditional branch with an unfilled delay slot. */
4769 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4771 /* Adjust a short backwards conditional with an unfilled delay slot. */
4772 if (GET_CODE (pat) == SET
4773 && length == 4
4774 && ! forward_branch_p (insn))
4775 return 4;
4776 else if (GET_CODE (pat) == PARALLEL
4777 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4778 && length == 4)
4779 return 4;
4780 /* Adjust dbra insn with short backwards conditional branch with
4781 unfilled delay slot -- only for case where counter is in a
4782 general register register. */
4783 else if (GET_CODE (pat) == PARALLEL
4784 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4785 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4786 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4787 && length == 4
4788 && ! forward_branch_p (insn))
4789 return 4;
4790 else
4791 return 0;
4793 return 0;
4796 /* Print operand X (an rtx) in assembler syntax to file FILE.
4797 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4798 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4800 void
4801 print_operand (FILE *file, rtx x, int code)
4803 switch (code)
4805 case '#':
4806 /* Output a 'nop' if there's nothing for the delay slot. */
4807 if (dbr_sequence_length () == 0)
4808 fputs ("\n\tnop", file);
4809 return;
4810 case '*':
4811 /* Output a nullification completer if there's nothing for the */
4812 /* delay slot or nullification is requested. */
4813 if (dbr_sequence_length () == 0 ||
4814 (final_sequence &&
4815 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4816 fputs (",n", file);
4817 return;
4818 case 'R':
4819 /* Print out the second register name of a register pair.
4820 I.e., R (6) => 7. */
4821 fputs (reg_names[REGNO (x) + 1], file);
4822 return;
4823 case 'r':
4824 /* A register or zero. */
4825 if (x == const0_rtx
4826 || (x == CONST0_RTX (DFmode))
4827 || (x == CONST0_RTX (SFmode)))
4829 fputs ("%r0", file);
4830 return;
4832 else
4833 break;
4834 case 'f':
4835 /* A register or zero (floating point). */
4836 if (x == const0_rtx
4837 || (x == CONST0_RTX (DFmode))
4838 || (x == CONST0_RTX (SFmode)))
4840 fputs ("%fr0", file);
4841 return;
4843 else
4844 break;
4845 case 'A':
4847 rtx xoperands[2];
4849 xoperands[0] = XEXP (XEXP (x, 0), 0);
4850 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4851 output_global_address (file, xoperands[1], 0);
4852 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4853 return;
4856 case 'C': /* Plain (C)ondition */
4857 case 'X':
4858 switch (GET_CODE (x))
4860 case EQ:
4861 fputs ("=", file); break;
4862 case NE:
4863 fputs ("<>", file); break;
4864 case GT:
4865 fputs (">", file); break;
4866 case GE:
4867 fputs (">=", file); break;
4868 case GEU:
4869 fputs (">>=", file); break;
4870 case GTU:
4871 fputs (">>", file); break;
4872 case LT:
4873 fputs ("<", file); break;
4874 case LE:
4875 fputs ("<=", file); break;
4876 case LEU:
4877 fputs ("<<=", file); break;
4878 case LTU:
4879 fputs ("<<", file); break;
4880 default:
4881 gcc_unreachable ();
4883 return;
4884 case 'N': /* Condition, (N)egated */
4885 switch (GET_CODE (x))
4887 case EQ:
4888 fputs ("<>", file); break;
4889 case NE:
4890 fputs ("=", file); break;
4891 case GT:
4892 fputs ("<=", file); break;
4893 case GE:
4894 fputs ("<", file); break;
4895 case GEU:
4896 fputs ("<<", file); break;
4897 case GTU:
4898 fputs ("<<=", file); break;
4899 case LT:
4900 fputs (">=", file); break;
4901 case LE:
4902 fputs (">", file); break;
4903 case LEU:
4904 fputs (">>", file); break;
4905 case LTU:
4906 fputs (">>=", file); break;
4907 default:
4908 gcc_unreachable ();
4910 return;
4911 /* For floating point comparisons. Note that the output
4912 predicates are the complement of the desired mode. The
4913 conditions for GT, GE, LT, LE and LTGT cause an invalid
4914 operation exception if the result is unordered and this
4915 exception is enabled in the floating-point status register. */
4916 case 'Y':
4917 switch (GET_CODE (x))
4919 case EQ:
4920 fputs ("!=", file); break;
4921 case NE:
4922 fputs ("=", file); break;
4923 case GT:
4924 fputs ("!>", file); break;
4925 case GE:
4926 fputs ("!>=", file); break;
4927 case LT:
4928 fputs ("!<", file); break;
4929 case LE:
4930 fputs ("!<=", file); break;
4931 case LTGT:
4932 fputs ("!<>", file); break;
4933 case UNLE:
4934 fputs ("!?<=", file); break;
4935 case UNLT:
4936 fputs ("!?<", file); break;
4937 case UNGE:
4938 fputs ("!?>=", file); break;
4939 case UNGT:
4940 fputs ("!?>", file); break;
4941 case UNEQ:
4942 fputs ("!?=", file); break;
4943 case UNORDERED:
4944 fputs ("!?", file); break;
4945 case ORDERED:
4946 fputs ("?", file); break;
4947 default:
4948 gcc_unreachable ();
4950 return;
4951 case 'S': /* Condition, operands are (S)wapped. */
4952 switch (GET_CODE (x))
4954 case EQ:
4955 fputs ("=", file); break;
4956 case NE:
4957 fputs ("<>", file); break;
4958 case GT:
4959 fputs ("<", file); break;
4960 case GE:
4961 fputs ("<=", file); break;
4962 case GEU:
4963 fputs ("<<=", file); break;
4964 case GTU:
4965 fputs ("<<", file); break;
4966 case LT:
4967 fputs (">", file); break;
4968 case LE:
4969 fputs (">=", file); break;
4970 case LEU:
4971 fputs (">>=", file); break;
4972 case LTU:
4973 fputs (">>", file); break;
4974 default:
4975 gcc_unreachable ();
4977 return;
4978 case 'B': /* Condition, (B)oth swapped and negate. */
4979 switch (GET_CODE (x))
4981 case EQ:
4982 fputs ("<>", file); break;
4983 case NE:
4984 fputs ("=", file); break;
4985 case GT:
4986 fputs (">=", file); break;
4987 case GE:
4988 fputs (">", file); break;
4989 case GEU:
4990 fputs (">>", file); break;
4991 case GTU:
4992 fputs (">>=", file); break;
4993 case LT:
4994 fputs ("<=", file); break;
4995 case LE:
4996 fputs ("<", file); break;
4997 case LEU:
4998 fputs ("<<", file); break;
4999 case LTU:
5000 fputs ("<<=", file); break;
5001 default:
5002 gcc_unreachable ();
5004 return;
5005 case 'k':
5006 gcc_assert (GET_CODE (x) == CONST_INT);
5007 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5008 return;
5009 case 'Q':
5010 gcc_assert (GET_CODE (x) == CONST_INT);
5011 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5012 return;
5013 case 'L':
5014 gcc_assert (GET_CODE (x) == CONST_INT);
5015 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5016 return;
5017 case 'O':
5018 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5019 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5020 return;
5021 case 'p':
5022 gcc_assert (GET_CODE (x) == CONST_INT);
5023 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5024 return;
5025 case 'P':
5026 gcc_assert (GET_CODE (x) == CONST_INT);
5027 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5028 return;
5029 case 'I':
5030 if (GET_CODE (x) == CONST_INT)
5031 fputs ("i", file);
5032 return;
5033 case 'M':
5034 case 'F':
5035 switch (GET_CODE (XEXP (x, 0)))
5037 case PRE_DEC:
5038 case PRE_INC:
5039 if (ASSEMBLER_DIALECT == 0)
5040 fputs ("s,mb", file);
5041 else
5042 fputs (",mb", file);
5043 break;
5044 case POST_DEC:
5045 case POST_INC:
5046 if (ASSEMBLER_DIALECT == 0)
5047 fputs ("s,ma", file);
5048 else
5049 fputs (",ma", file);
5050 break;
5051 case PLUS:
5052 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5053 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5055 if (ASSEMBLER_DIALECT == 0)
5056 fputs ("x", file);
5058 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5059 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5061 if (ASSEMBLER_DIALECT == 0)
5062 fputs ("x,s", file);
5063 else
5064 fputs (",s", file);
5066 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5067 fputs ("s", file);
5068 break;
5069 default:
5070 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5071 fputs ("s", file);
5072 break;
5074 return;
5075 case 'G':
5076 output_global_address (file, x, 0);
5077 return;
5078 case 'H':
5079 output_global_address (file, x, 1);
5080 return;
5081 case 0: /* Don't do anything special */
5082 break;
5083 case 'Z':
5085 unsigned op[3];
5086 compute_zdepwi_operands (INTVAL (x), op);
5087 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5088 return;
5090 case 'z':
5092 unsigned op[3];
5093 compute_zdepdi_operands (INTVAL (x), op);
5094 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5095 return;
5097 case 'c':
5098 /* We can get here from a .vtable_inherit due to our
5099 CONSTANT_ADDRESS_P rejecting perfectly good constant
5100 addresses. */
5101 break;
5102 default:
5103 gcc_unreachable ();
5105 if (GET_CODE (x) == REG)
5107 fputs (reg_names [REGNO (x)], file);
5108 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5110 fputs ("R", file);
5111 return;
5113 if (FP_REG_P (x)
5114 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5115 && (REGNO (x) & 1) == 0)
5116 fputs ("L", file);
5118 else if (GET_CODE (x) == MEM)
5120 int size = GET_MODE_SIZE (GET_MODE (x));
5121 rtx base = NULL_RTX;
5122 switch (GET_CODE (XEXP (x, 0)))
5124 case PRE_DEC:
5125 case POST_DEC:
5126 base = XEXP (XEXP (x, 0), 0);
5127 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5128 break;
5129 case PRE_INC:
5130 case POST_INC:
5131 base = XEXP (XEXP (x, 0), 0);
5132 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5133 break;
5134 case PLUS:
5135 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5136 fprintf (file, "%s(%s)",
5137 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5138 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5139 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5140 fprintf (file, "%s(%s)",
5141 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5142 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5143 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5144 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5146 /* Because the REG_POINTER flag can get lost during reload,
5147 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5148 index and base registers in the combined move patterns. */
5149 rtx base = XEXP (XEXP (x, 0), 1);
5150 rtx index = XEXP (XEXP (x, 0), 0);
5152 fprintf (file, "%s(%s)",
5153 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5155 else
5156 output_address (XEXP (x, 0));
5157 break;
5158 default:
5159 output_address (XEXP (x, 0));
5160 break;
5163 else
5164 output_addr_const (file, x);
5167 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5169 void
5170 output_global_address (FILE *file, rtx x, int round_constant)
5173 /* Imagine (high (const (plus ...))). */
5174 if (GET_CODE (x) == HIGH)
5175 x = XEXP (x, 0);
5177 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5178 output_addr_const (file, x);
5179 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5181 output_addr_const (file, x);
5182 fputs ("-$global$", file);
5184 else if (GET_CODE (x) == CONST)
5186 const char *sep = "";
5187 int offset = 0; /* assembler wants -$global$ at end */
5188 rtx base = NULL_RTX;
5190 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5192 case SYMBOL_REF:
5193 base = XEXP (XEXP (x, 0), 0);
5194 output_addr_const (file, base);
5195 break;
5196 case CONST_INT:
5197 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5198 break;
5199 default:
5200 gcc_unreachable ();
5203 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5205 case SYMBOL_REF:
5206 base = XEXP (XEXP (x, 0), 1);
5207 output_addr_const (file, base);
5208 break;
5209 case CONST_INT:
5210 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5211 break;
5212 default:
5213 gcc_unreachable ();
5216 /* How bogus. The compiler is apparently responsible for
5217 rounding the constant if it uses an LR field selector.
5219 The linker and/or assembler seem a better place since
5220 they have to do this kind of thing already.
5222 If we fail to do this, HP's optimizing linker may eliminate
5223 an addil, but not update the ldw/stw/ldo instruction that
5224 uses the result of the addil. */
5225 if (round_constant)
5226 offset = ((offset + 0x1000) & ~0x1fff);
5228 switch (GET_CODE (XEXP (x, 0)))
5230 case PLUS:
5231 if (offset < 0)
5233 offset = -offset;
5234 sep = "-";
5236 else
5237 sep = "+";
5238 break;
5240 case MINUS:
5241 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5242 sep = "-";
5243 break;
5245 default:
5246 gcc_unreachable ();
5249 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5250 fputs ("-$global$", file);
5251 if (offset)
5252 fprintf (file, "%s%d", sep, offset);
5254 else
5255 output_addr_const (file, x);
5258 /* Output boilerplate text to appear at the beginning of the file.
5259 There are several possible versions. */
5260 #define aputs(x) fputs(x, asm_out_file)
5261 static inline void
5262 pa_file_start_level (void)
5264 if (TARGET_64BIT)
5265 aputs ("\t.LEVEL 2.0w\n");
5266 else if (TARGET_PA_20)
5267 aputs ("\t.LEVEL 2.0\n");
5268 else if (TARGET_PA_11)
5269 aputs ("\t.LEVEL 1.1\n");
5270 else
5271 aputs ("\t.LEVEL 1.0\n");
5274 static inline void
5275 pa_file_start_space (int sortspace)
5277 aputs ("\t.SPACE $PRIVATE$");
5278 if (sortspace)
5279 aputs (",SORT=16");
5280 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5281 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5282 "\n\t.SPACE $TEXT$");
5283 if (sortspace)
5284 aputs (",SORT=8");
5285 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5286 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5289 static inline void
5290 pa_file_start_file (int want_version)
5292 if (write_symbols != NO_DEBUG)
5294 output_file_directive (asm_out_file, main_input_filename);
5295 if (want_version)
5296 aputs ("\t.version\t\"01.01\"\n");
5300 static inline void
5301 pa_file_start_mcount (const char *aswhat)
5303 if (profile_flag)
5304 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5307 static void
5308 pa_elf_file_start (void)
5310 pa_file_start_level ();
5311 pa_file_start_mcount ("ENTRY");
5312 pa_file_start_file (0);
5315 static void
5316 pa_som_file_start (void)
5318 pa_file_start_level ();
5319 pa_file_start_space (0);
5320 aputs ("\t.IMPORT $global$,DATA\n"
5321 "\t.IMPORT $$dyncall,MILLICODE\n");
5322 pa_file_start_mcount ("CODE");
5323 pa_file_start_file (0);
5326 static void
5327 pa_linux_file_start (void)
5329 pa_file_start_file (1);
5330 pa_file_start_level ();
5331 pa_file_start_mcount ("CODE");
5334 static void
5335 pa_hpux64_gas_file_start (void)
5337 pa_file_start_level ();
5338 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5339 if (profile_flag)
5340 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5341 #endif
5342 pa_file_start_file (1);
5345 static void
5346 pa_hpux64_hpas_file_start (void)
5348 pa_file_start_level ();
5349 pa_file_start_space (1);
5350 pa_file_start_mcount ("CODE");
5351 pa_file_start_file (0);
5353 #undef aputs
5355 /* Search the deferred plabel list for SYMBOL and return its internal
5356 label. If an entry for SYMBOL is not found, a new entry is created. */
5359 get_deferred_plabel (rtx symbol)
5361 const char *fname = XSTR (symbol, 0);
5362 size_t i;
5364 /* See if we have already put this function on the list of deferred
5365 plabels. This list is generally small, so a liner search is not
5366 too ugly. If it proves too slow replace it with something faster. */
5367 for (i = 0; i < n_deferred_plabels; i++)
5368 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5369 break;
5371 /* If the deferred plabel list is empty, or this entry was not found
5372 on the list, create a new entry on the list. */
5373 if (deferred_plabels == NULL || i == n_deferred_plabels)
5375 tree id;
5377 if (deferred_plabels == 0)
5378 deferred_plabels = (struct deferred_plabel *)
5379 ggc_alloc (sizeof (struct deferred_plabel));
5380 else
5381 deferred_plabels = (struct deferred_plabel *)
5382 ggc_realloc (deferred_plabels,
5383 ((n_deferred_plabels + 1)
5384 * sizeof (struct deferred_plabel)));
5386 i = n_deferred_plabels++;
5387 deferred_plabels[i].internal_label = gen_label_rtx ();
5388 deferred_plabels[i].symbol = symbol;
5390 /* Gross. We have just implicitly taken the address of this
5391 function. Mark it in the same manner as assemble_name. */
5392 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5393 if (id)
5394 mark_referenced (id);
5397 return deferred_plabels[i].internal_label;
5400 static void
5401 output_deferred_plabels (void)
5403 size_t i;
5405 /* If we have some deferred plabels, then we need to switch into the
5406 data or readonly data section, and align it to a 4 byte boundary
5407 before outputting the deferred plabels. */
5408 if (n_deferred_plabels)
5410 switch_to_section (flag_pic ? data_section : readonly_data_section);
5411 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5414 /* Now output the deferred plabels. */
5415 for (i = 0; i < n_deferred_plabels; i++)
5417 targetm.asm_out.internal_label (asm_out_file, "L",
5418 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5419 assemble_integer (deferred_plabels[i].symbol,
5420 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5424 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5425 /* Initialize optabs to point to HPUX long double emulation routines. */
5426 static void
5427 pa_hpux_init_libfuncs (void)
5429 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5430 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5431 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5432 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5433 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5434 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5435 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5436 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5437 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5439 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5440 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5441 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5442 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5443 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5444 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5445 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5447 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5448 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5449 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5450 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5452 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5453 ? "__U_Qfcnvfxt_quad_to_sgl"
5454 : "_U_Qfcnvfxt_quad_to_sgl");
5455 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5456 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5457 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5459 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5460 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5461 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5462 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5464 #endif
5466 /* HP's millicode routines mean something special to the assembler.
5467 Keep track of which ones we have used. */
5469 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5470 static void import_milli (enum millicodes);
5471 static char imported[(int) end1000];
5472 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5473 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5474 #define MILLI_START 10
5476 static void
5477 import_milli (enum millicodes code)
5479 char str[sizeof (import_string)];
5481 if (!imported[(int) code])
5483 imported[(int) code] = 1;
5484 strcpy (str, import_string);
5485 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5486 output_asm_insn (str, 0);
5490 /* The register constraints have put the operands and return value in
5491 the proper registers. */
5493 const char *
5494 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5496 import_milli (mulI);
5497 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5500 /* Emit the rtl for doing a division by a constant. */
5502 /* Do magic division millicodes exist for this value? */
5503 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5505 /* We'll use an array to keep track of the magic millicodes and
5506 whether or not we've used them already. [n][0] is signed, [n][1] is
5507 unsigned. */
5509 static int div_milli[16][2];
5512 emit_hpdiv_const (rtx *operands, int unsignedp)
5514 if (GET_CODE (operands[2]) == CONST_INT
5515 && INTVAL (operands[2]) > 0
5516 && INTVAL (operands[2]) < 16
5517 && magic_milli[INTVAL (operands[2])])
5519 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5521 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5522 emit
5523 (gen_rtx_PARALLEL
5524 (VOIDmode,
5525 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5526 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5527 SImode,
5528 gen_rtx_REG (SImode, 26),
5529 operands[2])),
5530 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5531 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5532 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5533 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5534 gen_rtx_CLOBBER (VOIDmode, ret))));
5535 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5536 return 1;
5538 return 0;
5541 const char *
5542 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5544 int divisor;
5546 /* If the divisor is a constant, try to use one of the special
5547 opcodes .*/
5548 if (GET_CODE (operands[0]) == CONST_INT)
5550 static char buf[100];
5551 divisor = INTVAL (operands[0]);
5552 if (!div_milli[divisor][unsignedp])
5554 div_milli[divisor][unsignedp] = 1;
5555 if (unsignedp)
5556 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5557 else
5558 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5560 if (unsignedp)
5562 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5563 INTVAL (operands[0]));
5564 return output_millicode_call (insn,
5565 gen_rtx_SYMBOL_REF (SImode, buf));
5567 else
5569 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5570 INTVAL (operands[0]));
5571 return output_millicode_call (insn,
5572 gen_rtx_SYMBOL_REF (SImode, buf));
5575 /* Divisor isn't a special constant. */
5576 else
5578 if (unsignedp)
5580 import_milli (divU);
5581 return output_millicode_call (insn,
5582 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5584 else
5586 import_milli (divI);
5587 return output_millicode_call (insn,
5588 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5593 /* Output a $$rem millicode to do mod. */
5595 const char *
5596 output_mod_insn (int unsignedp, rtx insn)
5598 if (unsignedp)
5600 import_milli (remU);
5601 return output_millicode_call (insn,
5602 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5604 else
5606 import_milli (remI);
5607 return output_millicode_call (insn,
5608 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5612 void
5613 output_arg_descriptor (rtx call_insn)
5615 const char *arg_regs[4];
5616 enum machine_mode arg_mode;
5617 rtx link;
5618 int i, output_flag = 0;
5619 int regno;
5621 /* We neither need nor want argument location descriptors for the
5622 64bit runtime environment or the ELF32 environment. */
5623 if (TARGET_64BIT || TARGET_ELF32)
5624 return;
5626 for (i = 0; i < 4; i++)
5627 arg_regs[i] = 0;
5629 /* Specify explicitly that no argument relocations should take place
5630 if using the portable runtime calling conventions. */
5631 if (TARGET_PORTABLE_RUNTIME)
5633 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5634 asm_out_file);
5635 return;
5638 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5639 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5640 link; link = XEXP (link, 1))
5642 rtx use = XEXP (link, 0);
5644 if (! (GET_CODE (use) == USE
5645 && GET_CODE (XEXP (use, 0)) == REG
5646 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5647 continue;
5649 arg_mode = GET_MODE (XEXP (use, 0));
5650 regno = REGNO (XEXP (use, 0));
5651 if (regno >= 23 && regno <= 26)
5653 arg_regs[26 - regno] = "GR";
5654 if (arg_mode == DImode)
5655 arg_regs[25 - regno] = "GR";
5657 else if (regno >= 32 && regno <= 39)
5659 if (arg_mode == SFmode)
5660 arg_regs[(regno - 32) / 2] = "FR";
5661 else
5663 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5664 arg_regs[(regno - 34) / 2] = "FR";
5665 arg_regs[(regno - 34) / 2 + 1] = "FU";
5666 #else
5667 arg_regs[(regno - 34) / 2] = "FU";
5668 arg_regs[(regno - 34) / 2 + 1] = "FR";
5669 #endif
5673 fputs ("\t.CALL ", asm_out_file);
5674 for (i = 0; i < 4; i++)
5676 if (arg_regs[i])
5678 if (output_flag++)
5679 fputc (',', asm_out_file);
5680 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5683 fputc ('\n', asm_out_file);
5686 static enum reg_class
5687 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5688 enum machine_mode mode, secondary_reload_info *sri)
5690 int is_symbolic, regno;
5692 /* Handle the easy stuff first. */
5693 if (class == R1_REGS)
5694 return NO_REGS;
5696 if (REG_P (x))
5698 regno = REGNO (x);
5699 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5700 return NO_REGS;
5702 else
5703 regno = -1;
5705 /* If we have something like (mem (mem (...)), we can safely assume the
5706 inner MEM will end up in a general register after reloading, so there's
5707 no need for a secondary reload. */
5708 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5709 return NO_REGS;
5711 /* Trying to load a constant into a FP register during PIC code
5712 generation requires %r1 as a scratch register. */
5713 if (flag_pic
5714 && (mode == SImode || mode == DImode)
5715 && FP_REG_CLASS_P (class)
5716 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5718 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5719 : CODE_FOR_reload_indi_r1);
5720 return NO_REGS;
5723 /* Profiling showed the PA port spends about 1.3% of its compilation
5724 time in true_regnum from calls inside pa_secondary_reload_class. */
5725 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5726 regno = true_regnum (x);
5728 /* In order to allow 14-bit displacements in integer loads and stores,
5729 we need to prevent reload from generating out of range integer mode
5730 loads and stores to the floating point registers. Previously, we
5731 used to call for a secondary reload and have emit_move_sequence()
5732 fix the instruction sequence. However, reload occasionally wouldn't
5733 generate the reload and we would end up with an invalid REG+D memory
5734 address. So, now we use an intermediate general register for most
5735 memory loads and stores. */
5736 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5737 && GET_MODE_CLASS (mode) == MODE_INT
5738 && FP_REG_CLASS_P (class))
5740 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5741 the secondary reload needed for a pseudo. It never passes a
5742 REG+D address. */
5743 if (GET_CODE (x) == MEM)
5745 x = XEXP (x, 0);
5747 /* We don't need an intermediate for indexed and LO_SUM DLT
5748 memory addresses. When INT14_OK_STRICT is true, it might
5749 appear that we could directly allow register indirect
5750 memory addresses. However, this doesn't work because we
5751 don't support SUBREGs in floating-point register copies
5752 and reload doesn't tell us when it's going to use a SUBREG. */
5753 if (IS_INDEX_ADDR_P (x)
5754 || IS_LO_SUM_DLT_ADDR_P (x))
5755 return NO_REGS;
5757 /* Otherwise, we need an intermediate general register. */
5758 return GENERAL_REGS;
5761 /* Request a secondary reload with a general scratch register
5762 for everthing else. ??? Could symbolic operands be handled
5763 directly when generating non-pic PA 2.0 code? */
5764 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5765 return NO_REGS;
5768 /* We need a secondary register (GPR) for copies between the SAR
5769 and anything other than a general register. */
5770 if (class == SHIFT_REGS && (regno <= 0 || regno >= 32))
5772 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5773 return NO_REGS;
5776 /* A SAR<->FP register copy requires a secondary register (GPR) as
5777 well as secondary memory. */
5778 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5779 && (REGNO_REG_CLASS (regno) == SHIFT_REGS
5780 && FP_REG_CLASS_P (class)))
5782 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5783 return NO_REGS;
5786 /* Secondary reloads of symbolic operands require %r1 as a scratch
5787 register when we're generating PIC code and when the operand isn't
5788 readonly. */
5789 if (GET_CODE (x) == HIGH)
5790 x = XEXP (x, 0);
5792 /* Profiling has showed GCC spends about 2.6% of its compilation
5793 time in symbolic_operand from calls inside pa_secondary_reload_class.
5794 So, we use an inline copy to avoid useless work. */
5795 switch (GET_CODE (x))
5797 rtx op;
5799 case SYMBOL_REF:
5800 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5801 break;
5802 case LABEL_REF:
5803 is_symbolic = 1;
5804 break;
5805 case CONST:
5806 op = XEXP (x, 0);
5807 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5808 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5809 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5810 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5811 break;
5812 default:
5813 is_symbolic = 0;
5814 break;
5817 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5819 gcc_assert (mode == SImode || mode == DImode);
5820 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5821 : CODE_FOR_reload_indi_r1);
5824 return NO_REGS;
5827 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5828 is only marked as live on entry by df-scan when it is a fixed
5829 register. It isn't a fixed register in the 64-bit runtime,
5830 so we need to mark it here. */
5832 static void
5833 pa_extra_live_on_entry (bitmap regs)
5835 if (TARGET_64BIT)
5836 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5839 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5840 to prevent it from being deleted. */
5843 pa_eh_return_handler_rtx (void)
5845 rtx tmp;
5847 tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx,
5848 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5849 tmp = gen_rtx_MEM (word_mode, tmp);
5850 tmp->volatil = 1;
5851 return tmp;
5854 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5855 by invisible reference. As a GCC extension, we also pass anything
5856 with a zero or variable size by reference.
5858 The 64-bit runtime does not describe passing any types by invisible
5859 reference. The internals of GCC can't currently handle passing
5860 empty structures, and zero or variable length arrays when they are
5861 not passed entirely on the stack or by reference. Thus, as a GCC
5862 extension, we pass these types by reference. The HP compiler doesn't
5863 support these types, so hopefully there shouldn't be any compatibility
5864 issues. This may have to be revisited when HP releases a C99 compiler
5865 or updates the ABI. */
5867 static bool
5868 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5869 enum machine_mode mode, const_tree type,
5870 bool named ATTRIBUTE_UNUSED)
5872 HOST_WIDE_INT size;
5874 if (type)
5875 size = int_size_in_bytes (type);
5876 else
5877 size = GET_MODE_SIZE (mode);
5879 if (TARGET_64BIT)
5880 return size <= 0;
5881 else
5882 return size <= 0 || size > 8;
5885 enum direction
5886 function_arg_padding (enum machine_mode mode, const_tree type)
5888 if (mode == BLKmode
5889 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5891 /* Return none if justification is not required. */
5892 if (type
5893 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5894 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5895 return none;
5897 /* The directions set here are ignored when a BLKmode argument larger
5898 than a word is placed in a register. Different code is used for
5899 the stack and registers. This makes it difficult to have a
5900 consistent data representation for both the stack and registers.
5901 For both runtimes, the justification and padding for arguments on
5902 the stack and in registers should be identical. */
5903 if (TARGET_64BIT)
5904 /* The 64-bit runtime specifies left justification for aggregates. */
5905 return upward;
5906 else
5907 /* The 32-bit runtime architecture specifies right justification.
5908 When the argument is passed on the stack, the argument is padded
5909 with garbage on the left. The HP compiler pads with zeros. */
5910 return downward;
5913 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5914 return downward;
5915 else
5916 return none;
5920 /* Do what is necessary for `va_start'. We look at the current function
5921 to determine if stdargs or varargs is used and fill in an initial
5922 va_list. A pointer to this constructor is returned. */
5924 static rtx
5925 hppa_builtin_saveregs (void)
5927 rtx offset, dest;
5928 tree fntype = TREE_TYPE (current_function_decl);
5929 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5930 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5931 != void_type_node)))
5932 ? UNITS_PER_WORD : 0);
5934 if (argadj)
5935 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
5936 else
5937 offset = crtl->args.arg_offset_rtx;
5939 if (TARGET_64BIT)
5941 int i, off;
5943 /* Adjust for varargs/stdarg differences. */
5944 if (argadj)
5945 offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
5946 else
5947 offset = crtl->args.arg_offset_rtx;
5949 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5950 from the incoming arg pointer and growing to larger addresses. */
5951 for (i = 26, off = -64; i >= 19; i--, off += 8)
5952 emit_move_insn (gen_rtx_MEM (word_mode,
5953 plus_constant (arg_pointer_rtx, off)),
5954 gen_rtx_REG (word_mode, i));
5956 /* The incoming args pointer points just beyond the flushback area;
5957 normally this is not a serious concern. However, when we are doing
5958 varargs/stdargs we want to make the arg pointer point to the start
5959 of the incoming argument area. */
5960 emit_move_insn (virtual_incoming_args_rtx,
5961 plus_constant (arg_pointer_rtx, -64));
5963 /* Now return a pointer to the first anonymous argument. */
5964 return copy_to_reg (expand_binop (Pmode, add_optab,
5965 virtual_incoming_args_rtx,
5966 offset, 0, 0, OPTAB_LIB_WIDEN));
5969 /* Store general registers on the stack. */
5970 dest = gen_rtx_MEM (BLKmode,
5971 plus_constant (crtl->args.internal_arg_pointer,
5972 -16));
5973 set_mem_alias_set (dest, get_varargs_alias_set ());
5974 set_mem_align (dest, BITS_PER_WORD);
5975 move_block_from_reg (23, dest, 4);
5977 /* move_block_from_reg will emit code to store the argument registers
5978 individually as scalar stores.
5980 However, other insns may later load from the same addresses for
5981 a structure load (passing a struct to a varargs routine).
5983 The alias code assumes that such aliasing can never happen, so we
5984 have to keep memory referencing insns from moving up beyond the
5985 last argument register store. So we emit a blockage insn here. */
5986 emit_insn (gen_blockage ());
5988 return copy_to_reg (expand_binop (Pmode, add_optab,
5989 crtl->args.internal_arg_pointer,
5990 offset, 0, 0, OPTAB_LIB_WIDEN));
5993 static void
5994 hppa_va_start (tree valist, rtx nextarg)
5996 nextarg = expand_builtin_saveregs ();
5997 std_expand_builtin_va_start (valist, nextarg);
6000 static tree
6001 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6002 gimple_seq *post_p)
6004 if (TARGET_64BIT)
6006 /* Args grow upward. We can use the generic routines. */
6007 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6009 else /* !TARGET_64BIT */
6011 tree ptr = build_pointer_type (type);
6012 tree valist_type;
6013 tree t, u;
6014 unsigned int size, ofs;
6015 bool indirect;
6017 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6018 if (indirect)
6020 type = ptr;
6021 ptr = build_pointer_type (type);
6023 size = int_size_in_bytes (type);
6024 valist_type = TREE_TYPE (valist);
6026 /* Args grow down. Not handled by generic routines. */
6028 u = fold_convert (sizetype, size_in_bytes (type));
6029 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6030 t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
6032 /* Copied from va-pa.h, but we probably don't need to align to
6033 word size, since we generate and preserve that invariant. */
6034 u = size_int (size > 4 ? -8 : -4);
6035 t = fold_convert (sizetype, t);
6036 t = build2 (BIT_AND_EXPR, sizetype, t, u);
6037 t = fold_convert (valist_type, t);
6039 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6041 ofs = (8 - size) % 4;
6042 if (ofs != 0)
6044 u = size_int (ofs);
6045 t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
6048 t = fold_convert (ptr, t);
6049 t = build_va_arg_indirect_ref (t);
6051 if (indirect)
6052 t = build_va_arg_indirect_ref (t);
6054 return t;
6058 /* True if MODE is valid for the target. By "valid", we mean able to
6059 be manipulated in non-trivial ways. In particular, this means all
6060 the arithmetic is supported.
6062 Currently, TImode is not valid as the HP 64-bit runtime documentation
6063 doesn't document the alignment and calling conventions for this type.
6064 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6065 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6067 static bool
6068 pa_scalar_mode_supported_p (enum machine_mode mode)
6070 int precision = GET_MODE_PRECISION (mode);
6072 switch (GET_MODE_CLASS (mode))
6074 case MODE_PARTIAL_INT:
6075 case MODE_INT:
6076 if (precision == CHAR_TYPE_SIZE)
6077 return true;
6078 if (precision == SHORT_TYPE_SIZE)
6079 return true;
6080 if (precision == INT_TYPE_SIZE)
6081 return true;
6082 if (precision == LONG_TYPE_SIZE)
6083 return true;
6084 if (precision == LONG_LONG_TYPE_SIZE)
6085 return true;
6086 return false;
6088 case MODE_FLOAT:
6089 if (precision == FLOAT_TYPE_SIZE)
6090 return true;
6091 if (precision == DOUBLE_TYPE_SIZE)
6092 return true;
6093 if (precision == LONG_DOUBLE_TYPE_SIZE)
6094 return true;
6095 return false;
6097 case MODE_DECIMAL_FLOAT:
6098 return false;
6100 default:
6101 gcc_unreachable ();
6105 /* This routine handles all the normal conditional branch sequences we
6106 might need to generate. It handles compare immediate vs compare
6107 register, nullification of delay slots, varying length branches,
6108 negated branches, and all combinations of the above. It returns the
6109 output appropriate to emit the branch corresponding to all given
6110 parameters. */
6112 const char *
6113 output_cbranch (rtx *operands, int negated, rtx insn)
6115 static char buf[100];
6116 int useskip = 0;
6117 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6118 int length = get_attr_length (insn);
6119 int xdelay;
6121 /* A conditional branch to the following instruction (e.g. the delay slot)
6122 is asking for a disaster. This can happen when not optimizing and
6123 when jump optimization fails.
6125 While it is usually safe to emit nothing, this can fail if the
6126 preceding instruction is a nullified branch with an empty delay
6127 slot and the same branch target as this branch. We could check
6128 for this but jump optimization should eliminate nop jumps. It
6129 is always safe to emit a nop. */
6130 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6131 return "nop";
6133 /* The doubleword form of the cmpib instruction doesn't have the LEU
6134 and GTU conditions while the cmpb instruction does. Since we accept
6135 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6136 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6137 operands[2] = gen_rtx_REG (DImode, 0);
6138 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6139 operands[1] = gen_rtx_REG (DImode, 0);
6141 /* If this is a long branch with its delay slot unfilled, set `nullify'
6142 as it can nullify the delay slot and save a nop. */
6143 if (length == 8 && dbr_sequence_length () == 0)
6144 nullify = 1;
6146 /* If this is a short forward conditional branch which did not get
6147 its delay slot filled, the delay slot can still be nullified. */
6148 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6149 nullify = forward_branch_p (insn);
6151 /* A forward branch over a single nullified insn can be done with a
6152 comclr instruction. This avoids a single cycle penalty due to
6153 mis-predicted branch if we fall through (branch not taken). */
6154 if (length == 4
6155 && next_real_insn (insn) != 0
6156 && get_attr_length (next_real_insn (insn)) == 4
6157 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6158 && nullify)
6159 useskip = 1;
6161 switch (length)
6163 /* All short conditional branches except backwards with an unfilled
6164 delay slot. */
6165 case 4:
6166 if (useskip)
6167 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6168 else
6169 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6170 if (GET_MODE (operands[1]) == DImode)
6171 strcat (buf, "*");
6172 if (negated)
6173 strcat (buf, "%B3");
6174 else
6175 strcat (buf, "%S3");
6176 if (useskip)
6177 strcat (buf, " %2,%r1,%%r0");
6178 else if (nullify)
6179 strcat (buf, ",n %2,%r1,%0");
6180 else
6181 strcat (buf, " %2,%r1,%0");
6182 break;
6184 /* All long conditionals. Note a short backward branch with an
6185 unfilled delay slot is treated just like a long backward branch
6186 with an unfilled delay slot. */
6187 case 8:
6188 /* Handle weird backwards branch with a filled delay slot
6189 which is nullified. */
6190 if (dbr_sequence_length () != 0
6191 && ! forward_branch_p (insn)
6192 && nullify)
6194 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6195 if (GET_MODE (operands[1]) == DImode)
6196 strcat (buf, "*");
6197 if (negated)
6198 strcat (buf, "%S3");
6199 else
6200 strcat (buf, "%B3");
6201 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6203 /* Handle short backwards branch with an unfilled delay slot.
6204 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6205 taken and untaken branches. */
6206 else if (dbr_sequence_length () == 0
6207 && ! forward_branch_p (insn)
6208 && INSN_ADDRESSES_SET_P ()
6209 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6210 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6212 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6213 if (GET_MODE (operands[1]) == DImode)
6214 strcat (buf, "*");
6215 if (negated)
6216 strcat (buf, "%B3 %2,%r1,%0%#");
6217 else
6218 strcat (buf, "%S3 %2,%r1,%0%#");
6220 else
6222 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6223 if (GET_MODE (operands[1]) == DImode)
6224 strcat (buf, "*");
6225 if (negated)
6226 strcat (buf, "%S3");
6227 else
6228 strcat (buf, "%B3");
6229 if (nullify)
6230 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6231 else
6232 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6234 break;
6236 default:
6237 /* The reversed conditional branch must branch over one additional
6238 instruction if the delay slot is filled and needs to be extracted
6239 by output_lbranch. If the delay slot is empty or this is a
6240 nullified forward branch, the instruction after the reversed
6241 condition branch must be nullified. */
6242 if (dbr_sequence_length () == 0
6243 || (nullify && forward_branch_p (insn)))
6245 nullify = 1;
6246 xdelay = 0;
6247 operands[4] = GEN_INT (length);
6249 else
6251 xdelay = 1;
6252 operands[4] = GEN_INT (length + 4);
6255 /* Create a reversed conditional branch which branches around
6256 the following insns. */
6257 if (GET_MODE (operands[1]) != DImode)
6259 if (nullify)
6261 if (negated)
6262 strcpy (buf,
6263 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6264 else
6265 strcpy (buf,
6266 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6268 else
6270 if (negated)
6271 strcpy (buf,
6272 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6273 else
6274 strcpy (buf,
6275 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6278 else
6280 if (nullify)
6282 if (negated)
6283 strcpy (buf,
6284 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6285 else
6286 strcpy (buf,
6287 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6289 else
6291 if (negated)
6292 strcpy (buf,
6293 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6294 else
6295 strcpy (buf,
6296 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6300 output_asm_insn (buf, operands);
6301 return output_lbranch (operands[0], insn, xdelay);
6303 return buf;
6306 /* This routine handles output of long unconditional branches that
6307 exceed the maximum range of a simple branch instruction. Since
6308 we don't have a register available for the branch, we save register
6309 %r1 in the frame marker, load the branch destination DEST into %r1,
6310 execute the branch, and restore %r1 in the delay slot of the branch.
6312 Since long branches may have an insn in the delay slot and the
6313 delay slot is used to restore %r1, we in general need to extract
6314 this insn and execute it before the branch. However, to facilitate
6315 use of this function by conditional branches, we also provide an
6316 option to not extract the delay insn so that it will be emitted
6317 after the long branch. So, if there is an insn in the delay slot,
6318 it is extracted if XDELAY is nonzero.
6320 The lengths of the various long-branch sequences are 20, 16 and 24
6321 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6323 const char *
6324 output_lbranch (rtx dest, rtx insn, int xdelay)
6326 rtx xoperands[2];
6328 xoperands[0] = dest;
6330 /* First, free up the delay slot. */
6331 if (xdelay && dbr_sequence_length () != 0)
6333 /* We can't handle a jump in the delay slot. */
6334 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6336 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6337 optimize, 0, NULL);
6339 /* Now delete the delay insn. */
6340 SET_INSN_DELETED (NEXT_INSN (insn));
6343 /* Output an insn to save %r1. The runtime documentation doesn't
6344 specify whether the "Clean Up" slot in the callers frame can
6345 be clobbered by the callee. It isn't copied by HP's builtin
6346 alloca, so this suggests that it can be clobbered if necessary.
6347 The "Static Link" location is copied by HP builtin alloca, so
6348 we avoid using it. Using the cleanup slot might be a problem
6349 if we have to interoperate with languages that pass cleanup
6350 information. However, it should be possible to handle these
6351 situations with GCC's asm feature.
6353 The "Current RP" slot is reserved for the called procedure, so
6354 we try to use it when we don't have a frame of our own. It's
6355 rather unlikely that we won't have a frame when we need to emit
6356 a very long branch.
6358 Really the way to go long term is a register scavenger; goto
6359 the target of the jump and find a register which we can use
6360 as a scratch to hold the value in %r1. Then, we wouldn't have
6361 to free up the delay slot or clobber a slot that may be needed
6362 for other purposes. */
6363 if (TARGET_64BIT)
6365 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6366 /* Use the return pointer slot in the frame marker. */
6367 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6368 else
6369 /* Use the slot at -40 in the frame marker since HP builtin
6370 alloca doesn't copy it. */
6371 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6373 else
6375 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6376 /* Use the return pointer slot in the frame marker. */
6377 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6378 else
6379 /* Use the "Clean Up" slot in the frame marker. In GCC,
6380 the only other use of this location is for copying a
6381 floating point double argument from a floating-point
6382 register to two general registers. The copy is done
6383 as an "atomic" operation when outputting a call, so it
6384 won't interfere with our using the location here. */
6385 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6388 if (TARGET_PORTABLE_RUNTIME)
6390 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6391 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6392 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6394 else if (flag_pic)
6396 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6397 if (TARGET_SOM || !TARGET_GAS)
6399 xoperands[1] = gen_label_rtx ();
6400 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6401 targetm.asm_out.internal_label (asm_out_file, "L",
6402 CODE_LABEL_NUMBER (xoperands[1]));
6403 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6405 else
6407 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6408 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6410 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6412 else
6413 /* Now output a very long branch to the original target. */
6414 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6416 /* Now restore the value of %r1 in the delay slot. */
6417 if (TARGET_64BIT)
6419 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6420 return "ldd -16(%%r30),%%r1";
6421 else
6422 return "ldd -40(%%r30),%%r1";
6424 else
6426 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6427 return "ldw -20(%%r30),%%r1";
6428 else
6429 return "ldw -12(%%r30),%%r1";
6433 /* This routine handles all the branch-on-bit conditional branch sequences we
6434 might need to generate. It handles nullification of delay slots,
6435 varying length branches, negated branches and all combinations of the
6436 above. it returns the appropriate output template to emit the branch. */
6438 const char *
6439 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6441 static char buf[100];
6442 int useskip = 0;
6443 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6444 int length = get_attr_length (insn);
6445 int xdelay;
6447 /* A conditional branch to the following instruction (e.g. the delay slot) is
6448 asking for a disaster. I do not think this can happen as this pattern
6449 is only used when optimizing; jump optimization should eliminate the
6450 jump. But be prepared just in case. */
6452 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6453 return "nop";
6455 /* If this is a long branch with its delay slot unfilled, set `nullify'
6456 as it can nullify the delay slot and save a nop. */
6457 if (length == 8 && dbr_sequence_length () == 0)
6458 nullify = 1;
6460 /* If this is a short forward conditional branch which did not get
6461 its delay slot filled, the delay slot can still be nullified. */
6462 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6463 nullify = forward_branch_p (insn);
6465 /* A forward branch over a single nullified insn can be done with a
6466 extrs instruction. This avoids a single cycle penalty due to
6467 mis-predicted branch if we fall through (branch not taken). */
6469 if (length == 4
6470 && next_real_insn (insn) != 0
6471 && get_attr_length (next_real_insn (insn)) == 4
6472 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6473 && nullify)
6474 useskip = 1;
6476 switch (length)
6479 /* All short conditional branches except backwards with an unfilled
6480 delay slot. */
6481 case 4:
6482 if (useskip)
6483 strcpy (buf, "{extrs,|extrw,s,}");
6484 else
6485 strcpy (buf, "bb,");
6486 if (useskip && GET_MODE (operands[0]) == DImode)
6487 strcpy (buf, "extrd,s,*");
6488 else if (GET_MODE (operands[0]) == DImode)
6489 strcpy (buf, "bb,*");
6490 if ((which == 0 && negated)
6491 || (which == 1 && ! negated))
6492 strcat (buf, ">=");
6493 else
6494 strcat (buf, "<");
6495 if (useskip)
6496 strcat (buf, " %0,%1,1,%%r0");
6497 else if (nullify && negated)
6498 strcat (buf, ",n %0,%1,%3");
6499 else if (nullify && ! negated)
6500 strcat (buf, ",n %0,%1,%2");
6501 else if (! nullify && negated)
6502 strcat (buf, "%0,%1,%3");
6503 else if (! nullify && ! negated)
6504 strcat (buf, " %0,%1,%2");
6505 break;
6507 /* All long conditionals. Note a short backward branch with an
6508 unfilled delay slot is treated just like a long backward branch
6509 with an unfilled delay slot. */
6510 case 8:
6511 /* Handle weird backwards branch with a filled delay slot
6512 which is nullified. */
6513 if (dbr_sequence_length () != 0
6514 && ! forward_branch_p (insn)
6515 && nullify)
6517 strcpy (buf, "bb,");
6518 if (GET_MODE (operands[0]) == DImode)
6519 strcat (buf, "*");
6520 if ((which == 0 && negated)
6521 || (which == 1 && ! negated))
6522 strcat (buf, "<");
6523 else
6524 strcat (buf, ">=");
6525 if (negated)
6526 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6527 else
6528 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6530 /* Handle short backwards branch with an unfilled delay slot.
6531 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6532 taken and untaken branches. */
6533 else if (dbr_sequence_length () == 0
6534 && ! forward_branch_p (insn)
6535 && INSN_ADDRESSES_SET_P ()
6536 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6537 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6539 strcpy (buf, "bb,");
6540 if (GET_MODE (operands[0]) == DImode)
6541 strcat (buf, "*");
6542 if ((which == 0 && negated)
6543 || (which == 1 && ! negated))
6544 strcat (buf, ">=");
6545 else
6546 strcat (buf, "<");
6547 if (negated)
6548 strcat (buf, " %0,%1,%3%#");
6549 else
6550 strcat (buf, " %0,%1,%2%#");
6552 else
6554 if (GET_MODE (operands[0]) == DImode)
6555 strcpy (buf, "extrd,s,*");
6556 else
6557 strcpy (buf, "{extrs,|extrw,s,}");
6558 if ((which == 0 && negated)
6559 || (which == 1 && ! negated))
6560 strcat (buf, "<");
6561 else
6562 strcat (buf, ">=");
6563 if (nullify && negated)
6564 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6565 else if (nullify && ! negated)
6566 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6567 else if (negated)
6568 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6569 else
6570 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6572 break;
6574 default:
6575 /* The reversed conditional branch must branch over one additional
6576 instruction if the delay slot is filled and needs to be extracted
6577 by output_lbranch. If the delay slot is empty or this is a
6578 nullified forward branch, the instruction after the reversed
6579 condition branch must be nullified. */
6580 if (dbr_sequence_length () == 0
6581 || (nullify && forward_branch_p (insn)))
6583 nullify = 1;
6584 xdelay = 0;
6585 operands[4] = GEN_INT (length);
6587 else
6589 xdelay = 1;
6590 operands[4] = GEN_INT (length + 4);
6593 if (GET_MODE (operands[0]) == DImode)
6594 strcpy (buf, "bb,*");
6595 else
6596 strcpy (buf, "bb,");
6597 if ((which == 0 && negated)
6598 || (which == 1 && !negated))
6599 strcat (buf, "<");
6600 else
6601 strcat (buf, ">=");
6602 if (nullify)
6603 strcat (buf, ",n %0,%1,.+%4");
6604 else
6605 strcat (buf, " %0,%1,.+%4");
6606 output_asm_insn (buf, operands);
6607 return output_lbranch (negated ? operands[3] : operands[2],
6608 insn, xdelay);
6610 return buf;
6613 /* This routine handles all the branch-on-variable-bit conditional branch
6614 sequences we might need to generate. It handles nullification of delay
6615 slots, varying length branches, negated branches and all combinations
6616 of the above. it returns the appropriate output template to emit the
6617 branch. */
6619 const char *
6620 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6622 static char buf[100];
6623 int useskip = 0;
6624 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6625 int length = get_attr_length (insn);
6626 int xdelay;
6628 /* A conditional branch to the following instruction (e.g. the delay slot) is
6629 asking for a disaster. I do not think this can happen as this pattern
6630 is only used when optimizing; jump optimization should eliminate the
6631 jump. But be prepared just in case. */
6633 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6634 return "nop";
6636 /* If this is a long branch with its delay slot unfilled, set `nullify'
6637 as it can nullify the delay slot and save a nop. */
6638 if (length == 8 && dbr_sequence_length () == 0)
6639 nullify = 1;
6641 /* If this is a short forward conditional branch which did not get
6642 its delay slot filled, the delay slot can still be nullified. */
6643 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6644 nullify = forward_branch_p (insn);
6646 /* A forward branch over a single nullified insn can be done with a
6647 extrs instruction. This avoids a single cycle penalty due to
6648 mis-predicted branch if we fall through (branch not taken). */
6650 if (length == 4
6651 && next_real_insn (insn) != 0
6652 && get_attr_length (next_real_insn (insn)) == 4
6653 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6654 && nullify)
6655 useskip = 1;
6657 switch (length)
6660 /* All short conditional branches except backwards with an unfilled
6661 delay slot. */
6662 case 4:
6663 if (useskip)
6664 strcpy (buf, "{vextrs,|extrw,s,}");
6665 else
6666 strcpy (buf, "{bvb,|bb,}");
6667 if (useskip && GET_MODE (operands[0]) == DImode)
6668 strcpy (buf, "extrd,s,*");
6669 else if (GET_MODE (operands[0]) == DImode)
6670 strcpy (buf, "bb,*");
6671 if ((which == 0 && negated)
6672 || (which == 1 && ! negated))
6673 strcat (buf, ">=");
6674 else
6675 strcat (buf, "<");
6676 if (useskip)
6677 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6678 else if (nullify && negated)
6679 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6680 else if (nullify && ! negated)
6681 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6682 else if (! nullify && negated)
6683 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6684 else if (! nullify && ! negated)
6685 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6686 break;
6688 /* All long conditionals. Note a short backward branch with an
6689 unfilled delay slot is treated just like a long backward branch
6690 with an unfilled delay slot. */
6691 case 8:
6692 /* Handle weird backwards branch with a filled delay slot
6693 which is nullified. */
6694 if (dbr_sequence_length () != 0
6695 && ! forward_branch_p (insn)
6696 && nullify)
6698 strcpy (buf, "{bvb,|bb,}");
6699 if (GET_MODE (operands[0]) == DImode)
6700 strcat (buf, "*");
6701 if ((which == 0 && negated)
6702 || (which == 1 && ! negated))
6703 strcat (buf, "<");
6704 else
6705 strcat (buf, ">=");
6706 if (negated)
6707 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6708 else
6709 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6711 /* Handle short backwards branch with an unfilled delay slot.
6712 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6713 taken and untaken branches. */
6714 else if (dbr_sequence_length () == 0
6715 && ! forward_branch_p (insn)
6716 && INSN_ADDRESSES_SET_P ()
6717 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6718 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6720 strcpy (buf, "{bvb,|bb,}");
6721 if (GET_MODE (operands[0]) == DImode)
6722 strcat (buf, "*");
6723 if ((which == 0 && negated)
6724 || (which == 1 && ! negated))
6725 strcat (buf, ">=");
6726 else
6727 strcat (buf, "<");
6728 if (negated)
6729 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6730 else
6731 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6733 else
6735 strcpy (buf, "{vextrs,|extrw,s,}");
6736 if (GET_MODE (operands[0]) == DImode)
6737 strcpy (buf, "extrd,s,*");
6738 if ((which == 0 && negated)
6739 || (which == 1 && ! negated))
6740 strcat (buf, "<");
6741 else
6742 strcat (buf, ">=");
6743 if (nullify && negated)
6744 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6745 else if (nullify && ! negated)
6746 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6747 else if (negated)
6748 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6749 else
6750 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6752 break;
6754 default:
6755 /* The reversed conditional branch must branch over one additional
6756 instruction if the delay slot is filled and needs to be extracted
6757 by output_lbranch. If the delay slot is empty or this is a
6758 nullified forward branch, the instruction after the reversed
6759 condition branch must be nullified. */
6760 if (dbr_sequence_length () == 0
6761 || (nullify && forward_branch_p (insn)))
6763 nullify = 1;
6764 xdelay = 0;
6765 operands[4] = GEN_INT (length);
6767 else
6769 xdelay = 1;
6770 operands[4] = GEN_INT (length + 4);
6773 if (GET_MODE (operands[0]) == DImode)
6774 strcpy (buf, "bb,*");
6775 else
6776 strcpy (buf, "{bvb,|bb,}");
6777 if ((which == 0 && negated)
6778 || (which == 1 && !negated))
6779 strcat (buf, "<");
6780 else
6781 strcat (buf, ">=");
6782 if (nullify)
6783 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6784 else
6785 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6786 output_asm_insn (buf, operands);
6787 return output_lbranch (negated ? operands[3] : operands[2],
6788 insn, xdelay);
6790 return buf;
6793 /* Return the output template for emitting a dbra type insn.
6795 Note it may perform some output operations on its own before
6796 returning the final output string. */
6797 const char *
6798 output_dbra (rtx *operands, rtx insn, int which_alternative)
6800 int length = get_attr_length (insn);
6802 /* A conditional branch to the following instruction (e.g. the delay slot) is
6803 asking for a disaster. Be prepared! */
6805 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6807 if (which_alternative == 0)
6808 return "ldo %1(%0),%0";
6809 else if (which_alternative == 1)
6811 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6812 output_asm_insn ("ldw -16(%%r30),%4", operands);
6813 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6814 return "{fldws|fldw} -16(%%r30),%0";
6816 else
6818 output_asm_insn ("ldw %0,%4", operands);
6819 return "ldo %1(%4),%4\n\tstw %4,%0";
6823 if (which_alternative == 0)
6825 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6826 int xdelay;
6828 /* If this is a long branch with its delay slot unfilled, set `nullify'
6829 as it can nullify the delay slot and save a nop. */
6830 if (length == 8 && dbr_sequence_length () == 0)
6831 nullify = 1;
6833 /* If this is a short forward conditional branch which did not get
6834 its delay slot filled, the delay slot can still be nullified. */
6835 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6836 nullify = forward_branch_p (insn);
6838 switch (length)
6840 case 4:
6841 if (nullify)
6842 return "addib,%C2,n %1,%0,%3";
6843 else
6844 return "addib,%C2 %1,%0,%3";
6846 case 8:
6847 /* Handle weird backwards branch with a fulled delay slot
6848 which is nullified. */
6849 if (dbr_sequence_length () != 0
6850 && ! forward_branch_p (insn)
6851 && nullify)
6852 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6853 /* Handle short backwards branch with an unfilled delay slot.
6854 Using a addb;nop rather than addi;bl saves 1 cycle for both
6855 taken and untaken branches. */
6856 else if (dbr_sequence_length () == 0
6857 && ! forward_branch_p (insn)
6858 && INSN_ADDRESSES_SET_P ()
6859 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6860 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6861 return "addib,%C2 %1,%0,%3%#";
6863 /* Handle normal cases. */
6864 if (nullify)
6865 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6866 else
6867 return "addi,%N2 %1,%0,%0\n\tb %3";
6869 default:
6870 /* The reversed conditional branch must branch over one additional
6871 instruction if the delay slot is filled and needs to be extracted
6872 by output_lbranch. If the delay slot is empty or this is a
6873 nullified forward branch, the instruction after the reversed
6874 condition branch must be nullified. */
6875 if (dbr_sequence_length () == 0
6876 || (nullify && forward_branch_p (insn)))
6878 nullify = 1;
6879 xdelay = 0;
6880 operands[4] = GEN_INT (length);
6882 else
6884 xdelay = 1;
6885 operands[4] = GEN_INT (length + 4);
6888 if (nullify)
6889 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6890 else
6891 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6893 return output_lbranch (operands[3], insn, xdelay);
6897 /* Deal with gross reload from FP register case. */
6898 else if (which_alternative == 1)
6900 /* Move loop counter from FP register to MEM then into a GR,
6901 increment the GR, store the GR into MEM, and finally reload
6902 the FP register from MEM from within the branch's delay slot. */
6903 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6904 operands);
6905 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6906 if (length == 24)
6907 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6908 else if (length == 28)
6909 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6910 else
6912 operands[5] = GEN_INT (length - 16);
6913 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6914 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6915 return output_lbranch (operands[3], insn, 0);
6918 /* Deal with gross reload from memory case. */
6919 else
6921 /* Reload loop counter from memory, the store back to memory
6922 happens in the branch's delay slot. */
6923 output_asm_insn ("ldw %0,%4", operands);
6924 if (length == 12)
6925 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6926 else if (length == 16)
6927 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6928 else
6930 operands[5] = GEN_INT (length - 4);
6931 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6932 return output_lbranch (operands[3], insn, 0);
6937 /* Return the output template for emitting a movb type insn.
6939 Note it may perform some output operations on its own before
6940 returning the final output string. */
6941 const char *
6942 output_movb (rtx *operands, rtx insn, int which_alternative,
6943 int reverse_comparison)
6945 int length = get_attr_length (insn);
6947 /* A conditional branch to the following instruction (e.g. the delay slot) is
6948 asking for a disaster. Be prepared! */
6950 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6952 if (which_alternative == 0)
6953 return "copy %1,%0";
6954 else if (which_alternative == 1)
6956 output_asm_insn ("stw %1,-16(%%r30)", operands);
6957 return "{fldws|fldw} -16(%%r30),%0";
6959 else if (which_alternative == 2)
6960 return "stw %1,%0";
6961 else
6962 return "mtsar %r1";
6965 /* Support the second variant. */
6966 if (reverse_comparison)
6967 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6969 if (which_alternative == 0)
6971 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6972 int xdelay;
6974 /* If this is a long branch with its delay slot unfilled, set `nullify'
6975 as it can nullify the delay slot and save a nop. */
6976 if (length == 8 && dbr_sequence_length () == 0)
6977 nullify = 1;
6979 /* If this is a short forward conditional branch which did not get
6980 its delay slot filled, the delay slot can still be nullified. */
6981 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6982 nullify = forward_branch_p (insn);
6984 switch (length)
6986 case 4:
6987 if (nullify)
6988 return "movb,%C2,n %1,%0,%3";
6989 else
6990 return "movb,%C2 %1,%0,%3";
6992 case 8:
6993 /* Handle weird backwards branch with a filled delay slot
6994 which is nullified. */
6995 if (dbr_sequence_length () != 0
6996 && ! forward_branch_p (insn)
6997 && nullify)
6998 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7000 /* Handle short backwards branch with an unfilled delay slot.
7001 Using a movb;nop rather than or;bl saves 1 cycle for both
7002 taken and untaken branches. */
7003 else if (dbr_sequence_length () == 0
7004 && ! forward_branch_p (insn)
7005 && INSN_ADDRESSES_SET_P ()
7006 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7007 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7008 return "movb,%C2 %1,%0,%3%#";
7009 /* Handle normal cases. */
7010 if (nullify)
7011 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7012 else
7013 return "or,%N2 %1,%%r0,%0\n\tb %3";
7015 default:
7016 /* The reversed conditional branch must branch over one additional
7017 instruction if the delay slot is filled and needs to be extracted
7018 by output_lbranch. If the delay slot is empty or this is a
7019 nullified forward branch, the instruction after the reversed
7020 condition branch must be nullified. */
7021 if (dbr_sequence_length () == 0
7022 || (nullify && forward_branch_p (insn)))
7024 nullify = 1;
7025 xdelay = 0;
7026 operands[4] = GEN_INT (length);
7028 else
7030 xdelay = 1;
7031 operands[4] = GEN_INT (length + 4);
7034 if (nullify)
7035 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7036 else
7037 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7039 return output_lbranch (operands[3], insn, xdelay);
7042 /* Deal with gross reload for FP destination register case. */
7043 else if (which_alternative == 1)
7045 /* Move source register to MEM, perform the branch test, then
7046 finally load the FP register from MEM from within the branch's
7047 delay slot. */
7048 output_asm_insn ("stw %1,-16(%%r30)", operands);
7049 if (length == 12)
7050 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7051 else if (length == 16)
7052 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7053 else
7055 operands[4] = GEN_INT (length - 4);
7056 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7057 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7058 return output_lbranch (operands[3], insn, 0);
7061 /* Deal with gross reload from memory case. */
7062 else if (which_alternative == 2)
7064 /* Reload loop counter from memory, the store back to memory
7065 happens in the branch's delay slot. */
7066 if (length == 8)
7067 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7068 else if (length == 12)
7069 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7070 else
7072 operands[4] = GEN_INT (length);
7073 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7074 operands);
7075 return output_lbranch (operands[3], insn, 0);
7078 /* Handle SAR as a destination. */
7079 else
7081 if (length == 8)
7082 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7083 else if (length == 12)
7084 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7085 else
7087 operands[4] = GEN_INT (length);
7088 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7089 operands);
7090 return output_lbranch (operands[3], insn, 0);
7095 /* Copy any FP arguments in INSN into integer registers. */
7096 static void
7097 copy_fp_args (rtx insn)
7099 rtx link;
7100 rtx xoperands[2];
7102 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7104 int arg_mode, regno;
7105 rtx use = XEXP (link, 0);
7107 if (! (GET_CODE (use) == USE
7108 && GET_CODE (XEXP (use, 0)) == REG
7109 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7110 continue;
7112 arg_mode = GET_MODE (XEXP (use, 0));
7113 regno = REGNO (XEXP (use, 0));
7115 /* Is it a floating point register? */
7116 if (regno >= 32 && regno <= 39)
7118 /* Copy the FP register into an integer register via memory. */
7119 if (arg_mode == SFmode)
7121 xoperands[0] = XEXP (use, 0);
7122 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7123 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7124 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7126 else
7128 xoperands[0] = XEXP (use, 0);
7129 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7130 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7131 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7132 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7138 /* Compute length of the FP argument copy sequence for INSN. */
7139 static int
7140 length_fp_args (rtx insn)
7142 int length = 0;
7143 rtx link;
7145 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7147 int arg_mode, regno;
7148 rtx use = XEXP (link, 0);
7150 if (! (GET_CODE (use) == USE
7151 && GET_CODE (XEXP (use, 0)) == REG
7152 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7153 continue;
7155 arg_mode = GET_MODE (XEXP (use, 0));
7156 regno = REGNO (XEXP (use, 0));
7158 /* Is it a floating point register? */
7159 if (regno >= 32 && regno <= 39)
7161 if (arg_mode == SFmode)
7162 length += 8;
7163 else
7164 length += 12;
7168 return length;
7171 /* Return the attribute length for the millicode call instruction INSN.
7172 The length must match the code generated by output_millicode_call.
7173 We include the delay slot in the returned length as it is better to
7174 over estimate the length than to under estimate it. */
7177 attr_length_millicode_call (rtx insn)
7179 unsigned long distance = -1;
7180 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7182 if (INSN_ADDRESSES_SET_P ())
7184 distance = (total + insn_current_reference_address (insn));
7185 if (distance < total)
7186 distance = -1;
7189 if (TARGET_64BIT)
7191 if (!TARGET_LONG_CALLS && distance < 7600000)
7192 return 8;
7194 return 20;
7196 else if (TARGET_PORTABLE_RUNTIME)
7197 return 24;
7198 else
7200 if (!TARGET_LONG_CALLS && distance < 240000)
7201 return 8;
7203 if (TARGET_LONG_ABS_CALL && !flag_pic)
7204 return 12;
7206 return 24;
7210 /* INSN is a function call. It may have an unconditional jump
7211 in its delay slot.
7213 CALL_DEST is the routine we are calling. */
7215 const char *
7216 output_millicode_call (rtx insn, rtx call_dest)
7218 int attr_length = get_attr_length (insn);
7219 int seq_length = dbr_sequence_length ();
7220 int distance;
7221 rtx seq_insn;
7222 rtx xoperands[3];
7224 xoperands[0] = call_dest;
7225 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7227 /* Handle the common case where we are sure that the branch will
7228 reach the beginning of the $CODE$ subspace. The within reach
7229 form of the $$sh_func_adrs call has a length of 28. Because
7230 it has an attribute type of multi, it never has a nonzero
7231 sequence length. The length of the $$sh_func_adrs is the same
7232 as certain out of reach PIC calls to other routines. */
7233 if (!TARGET_LONG_CALLS
7234 && ((seq_length == 0
7235 && (attr_length == 12
7236 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7237 || (seq_length != 0 && attr_length == 8)))
7239 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7241 else
7243 if (TARGET_64BIT)
7245 /* It might seem that one insn could be saved by accessing
7246 the millicode function using the linkage table. However,
7247 this doesn't work in shared libraries and other dynamically
7248 loaded objects. Using a pc-relative sequence also avoids
7249 problems related to the implicit use of the gp register. */
7250 output_asm_insn ("b,l .+8,%%r1", xoperands);
7252 if (TARGET_GAS)
7254 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7255 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7257 else
7259 xoperands[1] = gen_label_rtx ();
7260 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7261 targetm.asm_out.internal_label (asm_out_file, "L",
7262 CODE_LABEL_NUMBER (xoperands[1]));
7263 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7266 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7268 else if (TARGET_PORTABLE_RUNTIME)
7270 /* Pure portable runtime doesn't allow be/ble; we also don't
7271 have PIC support in the assembler/linker, so this sequence
7272 is needed. */
7274 /* Get the address of our target into %r1. */
7275 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7276 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7278 /* Get our return address into %r31. */
7279 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7280 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7282 /* Jump to our target address in %r1. */
7283 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7285 else if (!flag_pic)
7287 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7288 if (TARGET_PA_20)
7289 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7290 else
7291 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7293 else
7295 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7296 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7298 if (TARGET_SOM || !TARGET_GAS)
7300 /* The HP assembler can generate relocations for the
7301 difference of two symbols. GAS can do this for a
7302 millicode symbol but not an arbitrary external
7303 symbol when generating SOM output. */
7304 xoperands[1] = gen_label_rtx ();
7305 targetm.asm_out.internal_label (asm_out_file, "L",
7306 CODE_LABEL_NUMBER (xoperands[1]));
7307 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7308 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7310 else
7312 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7313 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7314 xoperands);
7317 /* Jump to our target address in %r1. */
7318 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7322 if (seq_length == 0)
7323 output_asm_insn ("nop", xoperands);
7325 /* We are done if there isn't a jump in the delay slot. */
7326 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7327 return "";
7329 /* This call has an unconditional jump in its delay slot. */
7330 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7332 /* See if the return address can be adjusted. Use the containing
7333 sequence insn's address. */
7334 if (INSN_ADDRESSES_SET_P ())
7336 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7337 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7338 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7340 if (VAL_14_BITS_P (distance))
7342 xoperands[1] = gen_label_rtx ();
7343 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7344 targetm.asm_out.internal_label (asm_out_file, "L",
7345 CODE_LABEL_NUMBER (xoperands[1]));
7347 else
7348 /* ??? This branch may not reach its target. */
7349 output_asm_insn ("nop\n\tb,n %0", xoperands);
7351 else
7352 /* ??? This branch may not reach its target. */
7353 output_asm_insn ("nop\n\tb,n %0", xoperands);
7355 /* Delete the jump. */
7356 SET_INSN_DELETED (NEXT_INSN (insn));
7358 return "";
7361 /* Return the attribute length of the call instruction INSN. The SIBCALL
7362 flag indicates whether INSN is a regular call or a sibling call. The
7363 length returned must be longer than the code actually generated by
7364 output_call. Since branch shortening is done before delay branch
7365 sequencing, there is no way to determine whether or not the delay
7366 slot will be filled during branch shortening. Even when the delay
7367 slot is filled, we may have to add a nop if the delay slot contains
7368 a branch that can't reach its target. Thus, we always have to include
7369 the delay slot in the length estimate. This used to be done in
7370 pa_adjust_insn_length but we do it here now as some sequences always
7371 fill the delay slot and we can save four bytes in the estimate for
7372 these sequences. */
7375 attr_length_call (rtx insn, int sibcall)
7377 int local_call;
7378 rtx call_dest;
7379 tree call_decl;
7380 int length = 0;
7381 rtx pat = PATTERN (insn);
7382 unsigned long distance = -1;
7384 if (INSN_ADDRESSES_SET_P ())
7386 unsigned long total;
7388 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7389 distance = (total + insn_current_reference_address (insn));
7390 if (distance < total)
7391 distance = -1;
7394 /* Determine if this is a local call. */
7395 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7396 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7397 else
7398 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7400 call_decl = SYMBOL_REF_DECL (call_dest);
7401 local_call = call_decl && targetm.binds_local_p (call_decl);
7403 /* pc-relative branch. */
7404 if (!TARGET_LONG_CALLS
7405 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7406 || distance < 240000))
7407 length += 8;
7409 /* 64-bit plabel sequence. */
7410 else if (TARGET_64BIT && !local_call)
7411 length += sibcall ? 28 : 24;
7413 /* non-pic long absolute branch sequence. */
7414 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7415 length += 12;
7417 /* long pc-relative branch sequence. */
7418 else if (TARGET_LONG_PIC_SDIFF_CALL
7419 || (TARGET_GAS && !TARGET_SOM
7420 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7422 length += 20;
7424 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && flag_pic)
7425 length += 8;
7428 /* 32-bit plabel sequence. */
7429 else
7431 length += 32;
7433 if (TARGET_SOM)
7434 length += length_fp_args (insn);
7436 if (flag_pic)
7437 length += 4;
7439 if (!TARGET_PA_20)
7441 if (!sibcall)
7442 length += 8;
7444 if (!TARGET_NO_SPACE_REGS && flag_pic)
7445 length += 8;
7449 return length;
7452 /* INSN is a function call. It may have an unconditional jump
7453 in its delay slot.
7455 CALL_DEST is the routine we are calling. */
7457 const char *
7458 output_call (rtx insn, rtx call_dest, int sibcall)
7460 int delay_insn_deleted = 0;
7461 int delay_slot_filled = 0;
7462 int seq_length = dbr_sequence_length ();
7463 tree call_decl = SYMBOL_REF_DECL (call_dest);
7464 int local_call = call_decl && targetm.binds_local_p (call_decl);
7465 rtx xoperands[2];
7467 xoperands[0] = call_dest;
7469 /* Handle the common case where we're sure that the branch will reach
7470 the beginning of the "$CODE$" subspace. This is the beginning of
7471 the current function if we are in a named section. */
7472 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7474 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7475 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7477 else
7479 if (TARGET_64BIT && !local_call)
7481 /* ??? As far as I can tell, the HP linker doesn't support the
7482 long pc-relative sequence described in the 64-bit runtime
7483 architecture. So, we use a slightly longer indirect call. */
7484 xoperands[0] = get_deferred_plabel (call_dest);
7485 xoperands[1] = gen_label_rtx ();
7487 /* If this isn't a sibcall, we put the load of %r27 into the
7488 delay slot. We can't do this in a sibcall as we don't
7489 have a second call-clobbered scratch register available. */
7490 if (seq_length != 0
7491 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7492 && !sibcall)
7494 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7495 optimize, 0, NULL);
7497 /* Now delete the delay insn. */
7498 SET_INSN_DELETED (NEXT_INSN (insn));
7499 delay_insn_deleted = 1;
7502 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7503 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7504 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7506 if (sibcall)
7508 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7509 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7510 output_asm_insn ("bve (%%r1)", xoperands);
7512 else
7514 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7515 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7516 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7517 delay_slot_filled = 1;
7520 else
7522 int indirect_call = 0;
7524 /* Emit a long call. There are several different sequences
7525 of increasing length and complexity. In most cases,
7526 they don't allow an instruction in the delay slot. */
7527 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7528 && !TARGET_LONG_PIC_SDIFF_CALL
7529 && !(TARGET_GAS && !TARGET_SOM
7530 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7531 && !TARGET_64BIT)
7532 indirect_call = 1;
7534 if (seq_length != 0
7535 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7536 && !sibcall
7537 && (!TARGET_PA_20 || indirect_call))
7539 /* A non-jump insn in the delay slot. By definition we can
7540 emit this insn before the call (and in fact before argument
7541 relocating. */
7542 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7543 NULL);
7545 /* Now delete the delay insn. */
7546 SET_INSN_DELETED (NEXT_INSN (insn));
7547 delay_insn_deleted = 1;
7550 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7552 /* This is the best sequence for making long calls in
7553 non-pic code. Unfortunately, GNU ld doesn't provide
7554 the stub needed for external calls, and GAS's support
7555 for this with the SOM linker is buggy. It is safe
7556 to use this for local calls. */
7557 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7558 if (sibcall)
7559 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7560 else
7562 if (TARGET_PA_20)
7563 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7564 xoperands);
7565 else
7566 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7568 output_asm_insn ("copy %%r31,%%r2", xoperands);
7569 delay_slot_filled = 1;
7572 else
7574 if (TARGET_LONG_PIC_SDIFF_CALL)
7576 /* The HP assembler and linker can handle relocations
7577 for the difference of two symbols. The HP assembler
7578 recognizes the sequence as a pc-relative call and
7579 the linker provides stubs when needed. */
7580 xoperands[1] = gen_label_rtx ();
7581 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7582 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7583 targetm.asm_out.internal_label (asm_out_file, "L",
7584 CODE_LABEL_NUMBER (xoperands[1]));
7585 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7587 else if (TARGET_GAS && !TARGET_SOM
7588 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7590 /* GAS currently can't generate the relocations that
7591 are needed for the SOM linker under HP-UX using this
7592 sequence. The GNU linker doesn't generate the stubs
7593 that are needed for external calls on TARGET_ELF32
7594 with this sequence. For now, we have to use a
7595 longer plabel sequence when using GAS. */
7596 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7597 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7598 xoperands);
7599 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7600 xoperands);
7602 else
7604 /* Emit a long plabel-based call sequence. This is
7605 essentially an inline implementation of $$dyncall.
7606 We don't actually try to call $$dyncall as this is
7607 as difficult as calling the function itself. */
7608 xoperands[0] = get_deferred_plabel (call_dest);
7609 xoperands[1] = gen_label_rtx ();
7611 /* Since the call is indirect, FP arguments in registers
7612 need to be copied to the general registers. Then, the
7613 argument relocation stub will copy them back. */
7614 if (TARGET_SOM)
7615 copy_fp_args (insn);
7617 if (flag_pic)
7619 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7620 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7621 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7623 else
7625 output_asm_insn ("addil LR'%0-$global$,%%r27",
7626 xoperands);
7627 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7628 xoperands);
7631 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7632 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7633 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7634 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7636 if (!sibcall && !TARGET_PA_20)
7638 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7639 if (TARGET_NO_SPACE_REGS)
7640 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7641 else
7642 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7646 if (TARGET_PA_20)
7648 if (sibcall)
7649 output_asm_insn ("bve (%%r1)", xoperands);
7650 else
7652 if (indirect_call)
7654 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7655 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7656 delay_slot_filled = 1;
7658 else
7659 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7662 else
7664 if (!TARGET_NO_SPACE_REGS && flag_pic)
7665 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7666 xoperands);
7668 if (sibcall)
7670 if (TARGET_NO_SPACE_REGS || !flag_pic)
7671 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7672 else
7673 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7675 else
7677 if (TARGET_NO_SPACE_REGS || !flag_pic)
7678 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7679 else
7680 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7682 if (indirect_call)
7683 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7684 else
7685 output_asm_insn ("copy %%r31,%%r2", xoperands);
7686 delay_slot_filled = 1;
7693 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7694 output_asm_insn ("nop", xoperands);
7696 /* We are done if there isn't a jump in the delay slot. */
7697 if (seq_length == 0
7698 || delay_insn_deleted
7699 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7700 return "";
7702 /* A sibcall should never have a branch in the delay slot. */
7703 gcc_assert (!sibcall);
7705 /* This call has an unconditional jump in its delay slot. */
7706 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7708 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7710 /* See if the return address can be adjusted. Use the containing
7711 sequence insn's address. */
7712 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7713 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7714 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7716 if (VAL_14_BITS_P (distance))
7718 xoperands[1] = gen_label_rtx ();
7719 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7720 targetm.asm_out.internal_label (asm_out_file, "L",
7721 CODE_LABEL_NUMBER (xoperands[1]));
7723 else
7724 output_asm_insn ("nop\n\tb,n %0", xoperands);
7726 else
7727 output_asm_insn ("b,n %0", xoperands);
7729 /* Delete the jump. */
7730 SET_INSN_DELETED (NEXT_INSN (insn));
7732 return "";
7735 /* Return the attribute length of the indirect call instruction INSN.
7736 The length must match the code generated by output_indirect call.
7737 The returned length includes the delay slot. Currently, the delay
7738 slot of an indirect call sequence is not exposed and it is used by
7739 the sequence itself. */
7742 attr_length_indirect_call (rtx insn)
7744 unsigned long distance = -1;
7745 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7747 if (INSN_ADDRESSES_SET_P ())
7749 distance = (total + insn_current_reference_address (insn));
7750 if (distance < total)
7751 distance = -1;
7754 if (TARGET_64BIT)
7755 return 12;
7757 if (TARGET_FAST_INDIRECT_CALLS
7758 || (!TARGET_PORTABLE_RUNTIME
7759 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7760 || distance < 240000)))
7761 return 8;
7763 if (flag_pic)
7764 return 24;
7766 if (TARGET_PORTABLE_RUNTIME)
7767 return 20;
7769 /* Out of reach, can use ble. */
7770 return 12;
7773 const char *
7774 output_indirect_call (rtx insn, rtx call_dest)
7776 rtx xoperands[1];
7778 if (TARGET_64BIT)
7780 xoperands[0] = call_dest;
7781 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7782 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7783 return "";
7786 /* First the special case for kernels, level 0 systems, etc. */
7787 if (TARGET_FAST_INDIRECT_CALLS)
7788 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7790 /* Now the normal case -- we can reach $$dyncall directly or
7791 we're sure that we can get there via a long-branch stub.
7793 No need to check target flags as the length uniquely identifies
7794 the remaining cases. */
7795 if (attr_length_indirect_call (insn) == 8)
7797 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7798 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7799 variant of the B,L instruction can't be used on the SOM target. */
7800 if (TARGET_PA_20 && !TARGET_SOM)
7801 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7802 else
7803 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7806 /* Long millicode call, but we are not generating PIC or portable runtime
7807 code. */
7808 if (attr_length_indirect_call (insn) == 12)
7809 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7811 /* Long millicode call for portable runtime. */
7812 if (attr_length_indirect_call (insn) == 20)
7813 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7815 /* We need a long PIC call to $$dyncall. */
7816 xoperands[0] = NULL_RTX;
7817 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7818 if (TARGET_SOM || !TARGET_GAS)
7820 xoperands[0] = gen_label_rtx ();
7821 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7822 targetm.asm_out.internal_label (asm_out_file, "L",
7823 CODE_LABEL_NUMBER (xoperands[0]));
7824 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7826 else
7828 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7829 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7830 xoperands);
7832 output_asm_insn ("blr %%r0,%%r2", xoperands);
7833 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7834 return "";
7837 /* Return the total length of the save and restore instructions needed for
7838 the data linkage table pointer (i.e., the PIC register) across the call
7839 instruction INSN. No-return calls do not require a save and restore.
7840 In addition, we may be able to avoid the save and restore for calls
7841 within the same translation unit. */
7844 attr_length_save_restore_dltp (rtx insn)
7846 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7847 return 0;
7849 return 8;
7852 /* In HPUX 8.0's shared library scheme, special relocations are needed
7853 for function labels if they might be passed to a function
7854 in a shared library (because shared libraries don't live in code
7855 space), and special magic is needed to construct their address. */
7857 void
7858 hppa_encode_label (rtx sym)
7860 const char *str = XSTR (sym, 0);
7861 int len = strlen (str) + 1;
7862 char *newstr, *p;
7864 p = newstr = XALLOCAVEC (char, len + 1);
7865 *p++ = '@';
7866 strcpy (p, str);
7868 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7871 static void
7872 pa_encode_section_info (tree decl, rtx rtl, int first)
7874 int old_referenced = 0;
7876 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
7877 old_referenced
7878 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
7880 default_encode_section_info (decl, rtl, first);
7882 if (first && TEXT_SPACE_P (decl))
7884 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7885 if (TREE_CODE (decl) == FUNCTION_DECL)
7886 hppa_encode_label (XEXP (rtl, 0));
7888 else if (old_referenced)
7889 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
7892 /* This is sort of inverse to pa_encode_section_info. */
7894 static const char *
7895 pa_strip_name_encoding (const char *str)
7897 str += (*str == '@');
7898 str += (*str == '*');
7899 return str;
7903 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7905 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7908 /* Returns 1 if OP is a function label involved in a simple addition
7909 with a constant. Used to keep certain patterns from matching
7910 during instruction combination. */
7912 is_function_label_plus_const (rtx op)
7914 /* Strip off any CONST. */
7915 if (GET_CODE (op) == CONST)
7916 op = XEXP (op, 0);
7918 return (GET_CODE (op) == PLUS
7919 && function_label_operand (XEXP (op, 0), Pmode)
7920 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7923 /* Output assembly code for a thunk to FUNCTION. */
7925 static void
7926 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7927 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7928 tree function)
7930 static unsigned int current_thunk_number;
7931 int val_14 = VAL_14_BITS_P (delta);
7932 int nbytes = 0;
7933 char label[16];
7934 rtx xoperands[4];
7936 xoperands[0] = XEXP (DECL_RTL (function), 0);
7937 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7938 xoperands[2] = GEN_INT (delta);
7940 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7941 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7943 /* Output the thunk. We know that the function is in the same
7944 translation unit (i.e., the same space) as the thunk, and that
7945 thunks are output after their method. Thus, we don't need an
7946 external branch to reach the function. With SOM and GAS,
7947 functions and thunks are effectively in different sections.
7948 Thus, we can always use a IA-relative branch and the linker
7949 will add a long branch stub if necessary.
7951 However, we have to be careful when generating PIC code on the
7952 SOM port to ensure that the sequence does not transfer to an
7953 import stub for the target function as this could clobber the
7954 return value saved at SP-24. This would also apply to the
7955 32-bit linux port if the multi-space model is implemented. */
7956 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7957 && !(flag_pic && TREE_PUBLIC (function))
7958 && (TARGET_GAS || last_address < 262132))
7959 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7960 && ((targetm.have_named_sections
7961 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7962 /* The GNU 64-bit linker has rather poor stub management.
7963 So, we use a long branch from thunks that aren't in
7964 the same section as the target function. */
7965 && ((!TARGET_64BIT
7966 && (DECL_SECTION_NAME (thunk_fndecl)
7967 != DECL_SECTION_NAME (function)))
7968 || ((DECL_SECTION_NAME (thunk_fndecl)
7969 == DECL_SECTION_NAME (function))
7970 && last_address < 262132)))
7971 || (!targetm.have_named_sections && last_address < 262132))))
7973 if (!val_14)
7974 output_asm_insn ("addil L'%2,%%r26", xoperands);
7976 output_asm_insn ("b %0", xoperands);
7978 if (val_14)
7980 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7981 nbytes += 8;
7983 else
7985 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7986 nbytes += 12;
7989 else if (TARGET_64BIT)
7991 /* We only have one call-clobbered scratch register, so we can't
7992 make use of the delay slot if delta doesn't fit in 14 bits. */
7993 if (!val_14)
7995 output_asm_insn ("addil L'%2,%%r26", xoperands);
7996 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7999 output_asm_insn ("b,l .+8,%%r1", xoperands);
8001 if (TARGET_GAS)
8003 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8004 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8006 else
8008 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8009 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8012 if (val_14)
8014 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8015 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8016 nbytes += 20;
8018 else
8020 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8021 nbytes += 24;
8024 else if (TARGET_PORTABLE_RUNTIME)
8026 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8027 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8029 if (!val_14)
8030 output_asm_insn ("addil L'%2,%%r26", xoperands);
8032 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8034 if (val_14)
8036 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8037 nbytes += 16;
8039 else
8041 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8042 nbytes += 20;
8045 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8047 /* The function is accessible from outside this module. The only
8048 way to avoid an import stub between the thunk and function is to
8049 call the function directly with an indirect sequence similar to
8050 that used by $$dyncall. This is possible because $$dyncall acts
8051 as the import stub in an indirect call. */
8052 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8053 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8054 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8055 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8056 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8057 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8058 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8059 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8060 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8062 if (!val_14)
8064 output_asm_insn ("addil L'%2,%%r26", xoperands);
8065 nbytes += 4;
8068 if (TARGET_PA_20)
8070 output_asm_insn ("bve (%%r22)", xoperands);
8071 nbytes += 36;
8073 else if (TARGET_NO_SPACE_REGS)
8075 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8076 nbytes += 36;
8078 else
8080 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8081 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8082 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8083 nbytes += 44;
8086 if (val_14)
8087 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8088 else
8089 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8091 else if (flag_pic)
8093 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8095 if (TARGET_SOM || !TARGET_GAS)
8097 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8098 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8100 else
8102 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8103 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8106 if (!val_14)
8107 output_asm_insn ("addil L'%2,%%r26", xoperands);
8109 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8111 if (val_14)
8113 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8114 nbytes += 20;
8116 else
8118 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8119 nbytes += 24;
8122 else
8124 if (!val_14)
8125 output_asm_insn ("addil L'%2,%%r26", xoperands);
8127 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8128 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8130 if (val_14)
8132 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8133 nbytes += 12;
8135 else
8137 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8138 nbytes += 16;
8142 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8144 if (TARGET_SOM && TARGET_GAS)
8146 /* We done with this subspace except possibly for some additional
8147 debug information. Forget that we are in this subspace to ensure
8148 that the next function is output in its own subspace. */
8149 in_section = NULL;
8150 cfun->machine->in_nsubspa = 2;
8153 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8155 switch_to_section (data_section);
8156 output_asm_insn (".align 4", xoperands);
8157 ASM_OUTPUT_LABEL (file, label);
8158 output_asm_insn (".word P'%0", xoperands);
8161 current_thunk_number++;
8162 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8163 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8164 last_address += nbytes;
8165 update_total_code_bytes (nbytes);
8168 /* Only direct calls to static functions are allowed to be sibling (tail)
8169 call optimized.
8171 This restriction is necessary because some linker generated stubs will
8172 store return pointers into rp' in some cases which might clobber a
8173 live value already in rp'.
8175 In a sibcall the current function and the target function share stack
8176 space. Thus if the path to the current function and the path to the
8177 target function save a value in rp', they save the value into the
8178 same stack slot, which has undesirable consequences.
8180 Because of the deferred binding nature of shared libraries any function
8181 with external scope could be in a different load module and thus require
8182 rp' to be saved when calling that function. So sibcall optimizations
8183 can only be safe for static function.
8185 Note that GCC never needs return value relocations, so we don't have to
8186 worry about static calls with return value relocations (which require
8187 saving rp').
8189 It is safe to perform a sibcall optimization when the target function
8190 will never return. */
8191 static bool
8192 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8194 if (TARGET_PORTABLE_RUNTIME)
8195 return false;
8197 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8198 single subspace mode and the call is not indirect. As far as I know,
8199 there is no operating system support for the multiple subspace mode.
8200 It might be possible to support indirect calls if we didn't use
8201 $$dyncall (see the indirect sequence generated in output_call). */
8202 if (TARGET_ELF32)
8203 return (decl != NULL_TREE);
8205 /* Sibcalls are not ok because the arg pointer register is not a fixed
8206 register. This prevents the sibcall optimization from occurring. In
8207 addition, there are problems with stub placement using GNU ld. This
8208 is because a normal sibcall branch uses a 17-bit relocation while
8209 a regular call branch uses a 22-bit relocation. As a result, more
8210 care needs to be taken in the placement of long-branch stubs. */
8211 if (TARGET_64BIT)
8212 return false;
8214 /* Sibcalls are only ok within a translation unit. */
8215 return (decl && !TREE_PUBLIC (decl));
8218 /* ??? Addition is not commutative on the PA due to the weird implicit
8219 space register selection rules for memory addresses. Therefore, we
8220 don't consider a + b == b + a, as this might be inside a MEM. */
8221 static bool
8222 pa_commutative_p (const_rtx x, int outer_code)
8224 return (COMMUTATIVE_P (x)
8225 && (TARGET_NO_SPACE_REGS
8226 || (outer_code != UNKNOWN && outer_code != MEM)
8227 || GET_CODE (x) != PLUS));
8230 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8231 use in fmpyadd instructions. */
8233 fmpyaddoperands (rtx *operands)
8235 enum machine_mode mode = GET_MODE (operands[0]);
8237 /* Must be a floating point mode. */
8238 if (mode != SFmode && mode != DFmode)
8239 return 0;
8241 /* All modes must be the same. */
8242 if (! (mode == GET_MODE (operands[1])
8243 && mode == GET_MODE (operands[2])
8244 && mode == GET_MODE (operands[3])
8245 && mode == GET_MODE (operands[4])
8246 && mode == GET_MODE (operands[5])))
8247 return 0;
8249 /* All operands must be registers. */
8250 if (! (GET_CODE (operands[1]) == REG
8251 && GET_CODE (operands[2]) == REG
8252 && GET_CODE (operands[3]) == REG
8253 && GET_CODE (operands[4]) == REG
8254 && GET_CODE (operands[5]) == REG))
8255 return 0;
8257 /* Only 2 real operands to the addition. One of the input operands must
8258 be the same as the output operand. */
8259 if (! rtx_equal_p (operands[3], operands[4])
8260 && ! rtx_equal_p (operands[3], operands[5]))
8261 return 0;
8263 /* Inout operand of add cannot conflict with any operands from multiply. */
8264 if (rtx_equal_p (operands[3], operands[0])
8265 || rtx_equal_p (operands[3], operands[1])
8266 || rtx_equal_p (operands[3], operands[2]))
8267 return 0;
8269 /* multiply cannot feed into addition operands. */
8270 if (rtx_equal_p (operands[4], operands[0])
8271 || rtx_equal_p (operands[5], operands[0]))
8272 return 0;
8274 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8275 if (mode == SFmode
8276 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8277 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8278 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8279 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8280 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8281 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8282 return 0;
8284 /* Passed. Operands are suitable for fmpyadd. */
8285 return 1;
8288 #if !defined(USE_COLLECT2)
8289 static void
8290 pa_asm_out_constructor (rtx symbol, int priority)
8292 if (!function_label_operand (symbol, VOIDmode))
8293 hppa_encode_label (symbol);
8295 #ifdef CTORS_SECTION_ASM_OP
8296 default_ctor_section_asm_out_constructor (symbol, priority);
8297 #else
8298 # ifdef TARGET_ASM_NAMED_SECTION
8299 default_named_section_asm_out_constructor (symbol, priority);
8300 # else
8301 default_stabs_asm_out_constructor (symbol, priority);
8302 # endif
8303 #endif
8306 static void
8307 pa_asm_out_destructor (rtx symbol, int priority)
8309 if (!function_label_operand (symbol, VOIDmode))
8310 hppa_encode_label (symbol);
8312 #ifdef DTORS_SECTION_ASM_OP
8313 default_dtor_section_asm_out_destructor (symbol, priority);
8314 #else
8315 # ifdef TARGET_ASM_NAMED_SECTION
8316 default_named_section_asm_out_destructor (symbol, priority);
8317 # else
8318 default_stabs_asm_out_destructor (symbol, priority);
8319 # endif
8320 #endif
8322 #endif
8324 /* This function places uninitialized global data in the bss section.
8325 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8326 function on the SOM port to prevent uninitialized global data from
8327 being placed in the data section. */
8329 void
8330 pa_asm_output_aligned_bss (FILE *stream,
8331 const char *name,
8332 unsigned HOST_WIDE_INT size,
8333 unsigned int align)
8335 switch_to_section (bss_section);
8336 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8338 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8339 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8340 #endif
8342 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8343 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8344 #endif
8346 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8347 ASM_OUTPUT_LABEL (stream, name);
8348 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8351 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8352 that doesn't allow the alignment of global common storage to be directly
8353 specified. The SOM linker aligns common storage based on the rounded
8354 value of the NUM_BYTES parameter in the .comm directive. It's not
8355 possible to use the .align directive as it doesn't affect the alignment
8356 of the label associated with a .comm directive. */
8358 void
8359 pa_asm_output_aligned_common (FILE *stream,
8360 const char *name,
8361 unsigned HOST_WIDE_INT size,
8362 unsigned int align)
8364 unsigned int max_common_align;
8366 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8367 if (align > max_common_align)
8369 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8370 "for global common data. Using %u",
8371 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8372 align = max_common_align;
8375 switch_to_section (bss_section);
8377 assemble_name (stream, name);
8378 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8379 MAX (size, align / BITS_PER_UNIT));
8382 /* We can't use .comm for local common storage as the SOM linker effectively
8383 treats the symbol as universal and uses the same storage for local symbols
8384 with the same name in different object files. The .block directive
8385 reserves an uninitialized block of storage. However, it's not common
8386 storage. Fortunately, GCC never requests common storage with the same
8387 name in any given translation unit. */
8389 void
8390 pa_asm_output_aligned_local (FILE *stream,
8391 const char *name,
8392 unsigned HOST_WIDE_INT size,
8393 unsigned int align)
8395 switch_to_section (bss_section);
8396 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8398 #ifdef LOCAL_ASM_OP
8399 fprintf (stream, "%s", LOCAL_ASM_OP);
8400 assemble_name (stream, name);
8401 fprintf (stream, "\n");
8402 #endif
8404 ASM_OUTPUT_LABEL (stream, name);
8405 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8408 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8409 use in fmpysub instructions. */
8411 fmpysuboperands (rtx *operands)
8413 enum machine_mode mode = GET_MODE (operands[0]);
8415 /* Must be a floating point mode. */
8416 if (mode != SFmode && mode != DFmode)
8417 return 0;
8419 /* All modes must be the same. */
8420 if (! (mode == GET_MODE (operands[1])
8421 && mode == GET_MODE (operands[2])
8422 && mode == GET_MODE (operands[3])
8423 && mode == GET_MODE (operands[4])
8424 && mode == GET_MODE (operands[5])))
8425 return 0;
8427 /* All operands must be registers. */
8428 if (! (GET_CODE (operands[1]) == REG
8429 && GET_CODE (operands[2]) == REG
8430 && GET_CODE (operands[3]) == REG
8431 && GET_CODE (operands[4]) == REG
8432 && GET_CODE (operands[5]) == REG))
8433 return 0;
8435 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8436 operation, so operands[4] must be the same as operand[3]. */
8437 if (! rtx_equal_p (operands[3], operands[4]))
8438 return 0;
8440 /* multiply cannot feed into subtraction. */
8441 if (rtx_equal_p (operands[5], operands[0]))
8442 return 0;
8444 /* Inout operand of sub cannot conflict with any operands from multiply. */
8445 if (rtx_equal_p (operands[3], operands[0])
8446 || rtx_equal_p (operands[3], operands[1])
8447 || rtx_equal_p (operands[3], operands[2]))
8448 return 0;
8450 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8451 if (mode == SFmode
8452 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8453 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8454 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8455 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8456 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8457 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8458 return 0;
8460 /* Passed. Operands are suitable for fmpysub. */
8461 return 1;
8464 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8465 constants for shadd instructions. */
8467 shadd_constant_p (int val)
8469 if (val == 2 || val == 4 || val == 8)
8470 return 1;
8471 else
8472 return 0;
8475 /* Return 1 if OP is valid as a base or index register in a
8476 REG+REG address. */
8479 borx_reg_operand (rtx op, enum machine_mode mode)
8481 if (GET_CODE (op) != REG)
8482 return 0;
8484 /* We must reject virtual registers as the only expressions that
8485 can be instantiated are REG and REG+CONST. */
8486 if (op == virtual_incoming_args_rtx
8487 || op == virtual_stack_vars_rtx
8488 || op == virtual_stack_dynamic_rtx
8489 || op == virtual_outgoing_args_rtx
8490 || op == virtual_cfa_rtx)
8491 return 0;
8493 /* While it's always safe to index off the frame pointer, it's not
8494 profitable to do so when the frame pointer is being eliminated. */
8495 if (!reload_completed
8496 && flag_omit_frame_pointer
8497 && !cfun->calls_alloca
8498 && op == frame_pointer_rtx)
8499 return 0;
8501 return register_operand (op, mode);
8504 /* Return 1 if this operand is anything other than a hard register. */
8507 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8509 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8512 /* Return 1 if INSN branches forward. Should be using insn_addresses
8513 to avoid walking through all the insns... */
8514 static int
8515 forward_branch_p (rtx insn)
8517 rtx label = JUMP_LABEL (insn);
8519 while (insn)
8521 if (insn == label)
8522 break;
8523 else
8524 insn = NEXT_INSN (insn);
8527 return (insn == label);
8530 /* Return 1 if OP is an equality comparison, else return 0. */
8532 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8534 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8537 /* Return 1 if INSN is in the delay slot of a call instruction. */
8539 jump_in_call_delay (rtx insn)
8542 if (GET_CODE (insn) != JUMP_INSN)
8543 return 0;
8545 if (PREV_INSN (insn)
8546 && PREV_INSN (PREV_INSN (insn))
8547 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8549 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8551 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8552 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8555 else
8556 return 0;
8559 /* Output an unconditional move and branch insn. */
8561 const char *
8562 output_parallel_movb (rtx *operands, rtx insn)
8564 int length = get_attr_length (insn);
8566 /* These are the cases in which we win. */
8567 if (length == 4)
8568 return "mov%I1b,tr %1,%0,%2";
8570 /* None of the following cases win, but they don't lose either. */
8571 if (length == 8)
8573 if (dbr_sequence_length () == 0)
8575 /* Nothing in the delay slot, fake it by putting the combined
8576 insn (the copy or add) in the delay slot of a bl. */
8577 if (GET_CODE (operands[1]) == CONST_INT)
8578 return "b %2\n\tldi %1,%0";
8579 else
8580 return "b %2\n\tcopy %1,%0";
8582 else
8584 /* Something in the delay slot, but we've got a long branch. */
8585 if (GET_CODE (operands[1]) == CONST_INT)
8586 return "ldi %1,%0\n\tb %2";
8587 else
8588 return "copy %1,%0\n\tb %2";
8592 if (GET_CODE (operands[1]) == CONST_INT)
8593 output_asm_insn ("ldi %1,%0", operands);
8594 else
8595 output_asm_insn ("copy %1,%0", operands);
8596 return output_lbranch (operands[2], insn, 1);
8599 /* Output an unconditional add and branch insn. */
8601 const char *
8602 output_parallel_addb (rtx *operands, rtx insn)
8604 int length = get_attr_length (insn);
8606 /* To make life easy we want operand0 to be the shared input/output
8607 operand and operand1 to be the readonly operand. */
8608 if (operands[0] == operands[1])
8609 operands[1] = operands[2];
8611 /* These are the cases in which we win. */
8612 if (length == 4)
8613 return "add%I1b,tr %1,%0,%3";
8615 /* None of the following cases win, but they don't lose either. */
8616 if (length == 8)
8618 if (dbr_sequence_length () == 0)
8619 /* Nothing in the delay slot, fake it by putting the combined
8620 insn (the copy or add) in the delay slot of a bl. */
8621 return "b %3\n\tadd%I1 %1,%0,%0";
8622 else
8623 /* Something in the delay slot, but we've got a long branch. */
8624 return "add%I1 %1,%0,%0\n\tb %3";
8627 output_asm_insn ("add%I1 %1,%0,%0", operands);
8628 return output_lbranch (operands[3], insn, 1);
8631 /* Return nonzero if INSN (a jump insn) immediately follows a call
8632 to a named function. This is used to avoid filling the delay slot
8633 of the jump since it can usually be eliminated by modifying RP in
8634 the delay slot of the call. */
8637 following_call (rtx insn)
8639 if (! TARGET_JUMP_IN_DELAY)
8640 return 0;
8642 /* Find the previous real insn, skipping NOTEs. */
8643 insn = PREV_INSN (insn);
8644 while (insn && GET_CODE (insn) == NOTE)
8645 insn = PREV_INSN (insn);
8647 /* Check for CALL_INSNs and millicode calls. */
8648 if (insn
8649 && ((GET_CODE (insn) == CALL_INSN
8650 && get_attr_type (insn) != TYPE_DYNCALL)
8651 || (GET_CODE (insn) == INSN
8652 && GET_CODE (PATTERN (insn)) != SEQUENCE
8653 && GET_CODE (PATTERN (insn)) != USE
8654 && GET_CODE (PATTERN (insn)) != CLOBBER
8655 && get_attr_type (insn) == TYPE_MILLI)))
8656 return 1;
8658 return 0;
8661 /* We use this hook to perform a PA specific optimization which is difficult
8662 to do in earlier passes.
8664 We want the delay slots of branches within jump tables to be filled.
8665 None of the compiler passes at the moment even has the notion that a
8666 PA jump table doesn't contain addresses, but instead contains actual
8667 instructions!
8669 Because we actually jump into the table, the addresses of each entry
8670 must stay constant in relation to the beginning of the table (which
8671 itself must stay constant relative to the instruction to jump into
8672 it). I don't believe we can guarantee earlier passes of the compiler
8673 will adhere to those rules.
8675 So, late in the compilation process we find all the jump tables, and
8676 expand them into real code -- e.g. each entry in the jump table vector
8677 will get an appropriate label followed by a jump to the final target.
8679 Reorg and the final jump pass can then optimize these branches and
8680 fill their delay slots. We end up with smaller, more efficient code.
8682 The jump instructions within the table are special; we must be able
8683 to identify them during assembly output (if the jumps don't get filled
8684 we need to emit a nop rather than nullifying the delay slot)). We
8685 identify jumps in switch tables by using insns with the attribute
8686 type TYPE_BTABLE_BRANCH.
8688 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8689 insns. This serves two purposes, first it prevents jump.c from
8690 noticing that the last N entries in the table jump to the instruction
8691 immediately after the table and deleting the jumps. Second, those
8692 insns mark where we should emit .begin_brtab and .end_brtab directives
8693 when using GAS (allows for better link time optimizations). */
8695 static void
8696 pa_reorg (void)
8698 rtx insn;
8700 remove_useless_addtr_insns (1);
8702 if (pa_cpu < PROCESSOR_8000)
8703 pa_combine_instructions ();
8706 /* This is fairly cheap, so always run it if optimizing. */
8707 if (optimize > 0 && !TARGET_BIG_SWITCH)
8709 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8710 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8712 rtx pattern, tmp, location, label;
8713 unsigned int length, i;
8715 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8716 if (GET_CODE (insn) != JUMP_INSN
8717 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8718 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8719 continue;
8721 /* Emit marker for the beginning of the branch table. */
8722 emit_insn_before (gen_begin_brtab (), insn);
8724 pattern = PATTERN (insn);
8725 location = PREV_INSN (insn);
8726 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8728 for (i = 0; i < length; i++)
8730 /* Emit a label before each jump to keep jump.c from
8731 removing this code. */
8732 tmp = gen_label_rtx ();
8733 LABEL_NUSES (tmp) = 1;
8734 emit_label_after (tmp, location);
8735 location = NEXT_INSN (location);
8737 if (GET_CODE (pattern) == ADDR_VEC)
8738 label = XEXP (XVECEXP (pattern, 0, i), 0);
8739 else
8740 label = XEXP (XVECEXP (pattern, 1, i), 0);
8742 tmp = gen_short_jump (label);
8744 /* Emit the jump itself. */
8745 tmp = emit_jump_insn_after (tmp, location);
8746 JUMP_LABEL (tmp) = label;
8747 LABEL_NUSES (label)++;
8748 location = NEXT_INSN (location);
8750 /* Emit a BARRIER after the jump. */
8751 emit_barrier_after (location);
8752 location = NEXT_INSN (location);
8755 /* Emit marker for the end of the branch table. */
8756 emit_insn_before (gen_end_brtab (), location);
8757 location = NEXT_INSN (location);
8758 emit_barrier_after (location);
8760 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8761 delete_insn (insn);
8764 else
8766 /* Still need brtab marker insns. FIXME: the presence of these
8767 markers disables output of the branch table to readonly memory,
8768 and any alignment directives that might be needed. Possibly,
8769 the begin_brtab insn should be output before the label for the
8770 table. This doesn't matter at the moment since the tables are
8771 always output in the text section. */
8772 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8774 /* Find an ADDR_VEC insn. */
8775 if (GET_CODE (insn) != JUMP_INSN
8776 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8777 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8778 continue;
8780 /* Now generate markers for the beginning and end of the
8781 branch table. */
8782 emit_insn_before (gen_begin_brtab (), insn);
8783 emit_insn_after (gen_end_brtab (), insn);
8788 /* The PA has a number of odd instructions which can perform multiple
8789 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8790 it may be profitable to combine two instructions into one instruction
8791 with two outputs. It's not profitable PA2.0 machines because the
8792 two outputs would take two slots in the reorder buffers.
8794 This routine finds instructions which can be combined and combines
8795 them. We only support some of the potential combinations, and we
8796 only try common ways to find suitable instructions.
8798 * addb can add two registers or a register and a small integer
8799 and jump to a nearby (+-8k) location. Normally the jump to the
8800 nearby location is conditional on the result of the add, but by
8801 using the "true" condition we can make the jump unconditional.
8802 Thus addb can perform two independent operations in one insn.
8804 * movb is similar to addb in that it can perform a reg->reg
8805 or small immediate->reg copy and jump to a nearby (+-8k location).
8807 * fmpyadd and fmpysub can perform a FP multiply and either an
8808 FP add or FP sub if the operands of the multiply and add/sub are
8809 independent (there are other minor restrictions). Note both
8810 the fmpy and fadd/fsub can in theory move to better spots according
8811 to data dependencies, but for now we require the fmpy stay at a
8812 fixed location.
8814 * Many of the memory operations can perform pre & post updates
8815 of index registers. GCC's pre/post increment/decrement addressing
8816 is far too simple to take advantage of all the possibilities. This
8817 pass may not be suitable since those insns may not be independent.
8819 * comclr can compare two ints or an int and a register, nullify
8820 the following instruction and zero some other register. This
8821 is more difficult to use as it's harder to find an insn which
8822 will generate a comclr than finding something like an unconditional
8823 branch. (conditional moves & long branches create comclr insns).
8825 * Most arithmetic operations can conditionally skip the next
8826 instruction. They can be viewed as "perform this operation
8827 and conditionally jump to this nearby location" (where nearby
8828 is an insns away). These are difficult to use due to the
8829 branch length restrictions. */
8831 static void
8832 pa_combine_instructions (void)
8834 rtx anchor, new;
8836 /* This can get expensive since the basic algorithm is on the
8837 order of O(n^2) (or worse). Only do it for -O2 or higher
8838 levels of optimization. */
8839 if (optimize < 2)
8840 return;
8842 /* Walk down the list of insns looking for "anchor" insns which
8843 may be combined with "floating" insns. As the name implies,
8844 "anchor" instructions don't move, while "floating" insns may
8845 move around. */
8846 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8847 new = make_insn_raw (new);
8849 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8851 enum attr_pa_combine_type anchor_attr;
8852 enum attr_pa_combine_type floater_attr;
8854 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8855 Also ignore any special USE insns. */
8856 if ((GET_CODE (anchor) != INSN
8857 && GET_CODE (anchor) != JUMP_INSN
8858 && GET_CODE (anchor) != CALL_INSN)
8859 || GET_CODE (PATTERN (anchor)) == USE
8860 || GET_CODE (PATTERN (anchor)) == CLOBBER
8861 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8862 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8863 continue;
8865 anchor_attr = get_attr_pa_combine_type (anchor);
8866 /* See if anchor is an insn suitable for combination. */
8867 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8868 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8869 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8870 && ! forward_branch_p (anchor)))
8872 rtx floater;
8874 for (floater = PREV_INSN (anchor);
8875 floater;
8876 floater = PREV_INSN (floater))
8878 if (GET_CODE (floater) == NOTE
8879 || (GET_CODE (floater) == INSN
8880 && (GET_CODE (PATTERN (floater)) == USE
8881 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8882 continue;
8884 /* Anything except a regular INSN will stop our search. */
8885 if (GET_CODE (floater) != INSN
8886 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8887 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8889 floater = NULL_RTX;
8890 break;
8893 /* See if FLOATER is suitable for combination with the
8894 anchor. */
8895 floater_attr = get_attr_pa_combine_type (floater);
8896 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8897 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8898 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8899 && floater_attr == PA_COMBINE_TYPE_FMPY))
8901 /* If ANCHOR and FLOATER can be combined, then we're
8902 done with this pass. */
8903 if (pa_can_combine_p (new, anchor, floater, 0,
8904 SET_DEST (PATTERN (floater)),
8905 XEXP (SET_SRC (PATTERN (floater)), 0),
8906 XEXP (SET_SRC (PATTERN (floater)), 1)))
8907 break;
8910 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8911 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8913 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8915 if (pa_can_combine_p (new, anchor, floater, 0,
8916 SET_DEST (PATTERN (floater)),
8917 XEXP (SET_SRC (PATTERN (floater)), 0),
8918 XEXP (SET_SRC (PATTERN (floater)), 1)))
8919 break;
8921 else
8923 if (pa_can_combine_p (new, anchor, floater, 0,
8924 SET_DEST (PATTERN (floater)),
8925 SET_SRC (PATTERN (floater)),
8926 SET_SRC (PATTERN (floater))))
8927 break;
8932 /* If we didn't find anything on the backwards scan try forwards. */
8933 if (!floater
8934 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8935 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8937 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8939 if (GET_CODE (floater) == NOTE
8940 || (GET_CODE (floater) == INSN
8941 && (GET_CODE (PATTERN (floater)) == USE
8942 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8944 continue;
8946 /* Anything except a regular INSN will stop our search. */
8947 if (GET_CODE (floater) != INSN
8948 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8949 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8951 floater = NULL_RTX;
8952 break;
8955 /* See if FLOATER is suitable for combination with the
8956 anchor. */
8957 floater_attr = get_attr_pa_combine_type (floater);
8958 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8959 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8960 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8961 && floater_attr == PA_COMBINE_TYPE_FMPY))
8963 /* If ANCHOR and FLOATER can be combined, then we're
8964 done with this pass. */
8965 if (pa_can_combine_p (new, anchor, floater, 1,
8966 SET_DEST (PATTERN (floater)),
8967 XEXP (SET_SRC (PATTERN (floater)),
8969 XEXP (SET_SRC (PATTERN (floater)),
8970 1)))
8971 break;
8976 /* FLOATER will be nonzero if we found a suitable floating
8977 insn for combination with ANCHOR. */
8978 if (floater
8979 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8980 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8982 /* Emit the new instruction and delete the old anchor. */
8983 emit_insn_before (gen_rtx_PARALLEL
8984 (VOIDmode,
8985 gen_rtvec (2, PATTERN (anchor),
8986 PATTERN (floater))),
8987 anchor);
8989 SET_INSN_DELETED (anchor);
8991 /* Emit a special USE insn for FLOATER, then delete
8992 the floating insn. */
8993 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8994 delete_insn (floater);
8996 continue;
8998 else if (floater
8999 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9001 rtx temp;
9002 /* Emit the new_jump instruction and delete the old anchor. */
9003 temp
9004 = emit_jump_insn_before (gen_rtx_PARALLEL
9005 (VOIDmode,
9006 gen_rtvec (2, PATTERN (anchor),
9007 PATTERN (floater))),
9008 anchor);
9010 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9011 SET_INSN_DELETED (anchor);
9013 /* Emit a special USE insn for FLOATER, then delete
9014 the floating insn. */
9015 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9016 delete_insn (floater);
9017 continue;
9023 static int
9024 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
9025 rtx src1, rtx src2)
9027 int insn_code_number;
9028 rtx start, end;
9030 /* Create a PARALLEL with the patterns of ANCHOR and
9031 FLOATER, try to recognize it, then test constraints
9032 for the resulting pattern.
9034 If the pattern doesn't match or the constraints
9035 aren't met keep searching for a suitable floater
9036 insn. */
9037 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
9038 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
9039 INSN_CODE (new) = -1;
9040 insn_code_number = recog_memoized (new);
9041 if (insn_code_number < 0
9042 || (extract_insn (new), ! constrain_operands (1)))
9043 return 0;
9045 if (reversed)
9047 start = anchor;
9048 end = floater;
9050 else
9052 start = floater;
9053 end = anchor;
9056 /* There's up to three operands to consider. One
9057 output and two inputs.
9059 The output must not be used between FLOATER & ANCHOR
9060 exclusive. The inputs must not be set between
9061 FLOATER and ANCHOR exclusive. */
9063 if (reg_used_between_p (dest, start, end))
9064 return 0;
9066 if (reg_set_between_p (src1, start, end))
9067 return 0;
9069 if (reg_set_between_p (src2, start, end))
9070 return 0;
9072 /* If we get here, then everything is good. */
9073 return 1;
9076 /* Return nonzero if references for INSN are delayed.
9078 Millicode insns are actually function calls with some special
9079 constraints on arguments and register usage.
9081 Millicode calls always expect their arguments in the integer argument
9082 registers, and always return their result in %r29 (ret1). They
9083 are expected to clobber their arguments, %r1, %r29, and the return
9084 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9086 This function tells reorg that the references to arguments and
9087 millicode calls do not appear to happen until after the millicode call.
9088 This allows reorg to put insns which set the argument registers into the
9089 delay slot of the millicode call -- thus they act more like traditional
9090 CALL_INSNs.
9092 Note we cannot consider side effects of the insn to be delayed because
9093 the branch and link insn will clobber the return pointer. If we happened
9094 to use the return pointer in the delay slot of the call, then we lose.
9096 get_attr_type will try to recognize the given insn, so make sure to
9097 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9098 in particular. */
9100 insn_refs_are_delayed (rtx insn)
9102 return ((GET_CODE (insn) == INSN
9103 && GET_CODE (PATTERN (insn)) != SEQUENCE
9104 && GET_CODE (PATTERN (insn)) != USE
9105 && GET_CODE (PATTERN (insn)) != CLOBBER
9106 && get_attr_type (insn) == TYPE_MILLI));
9109 /* On the HP-PA the value is found in register(s) 28(-29), unless
9110 the mode is SF or DF. Then the value is returned in fr4 (32).
9112 This must perform the same promotions as PROMOTE_MODE, else
9113 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
9115 Small structures must be returned in a PARALLEL on PA64 in order
9116 to match the HP Compiler ABI. */
9119 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
9121 enum machine_mode valmode;
9123 if (AGGREGATE_TYPE_P (valtype)
9124 || TREE_CODE (valtype) == COMPLEX_TYPE
9125 || TREE_CODE (valtype) == VECTOR_TYPE)
9127 if (TARGET_64BIT)
9129 /* Aggregates with a size less than or equal to 128 bits are
9130 returned in GR 28(-29). They are left justified. The pad
9131 bits are undefined. Larger aggregates are returned in
9132 memory. */
9133 rtx loc[2];
9134 int i, offset = 0;
9135 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9137 for (i = 0; i < ub; i++)
9139 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9140 gen_rtx_REG (DImode, 28 + i),
9141 GEN_INT (offset));
9142 offset += 8;
9145 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9147 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9149 /* Aggregates 5 to 8 bytes in size are returned in general
9150 registers r28-r29 in the same manner as other non
9151 floating-point objects. The data is right-justified and
9152 zero-extended to 64 bits. This is opposite to the normal
9153 justification used on big endian targets and requires
9154 special treatment. */
9155 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9156 gen_rtx_REG (DImode, 28), const0_rtx);
9157 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9161 if ((INTEGRAL_TYPE_P (valtype)
9162 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9163 || POINTER_TYPE_P (valtype))
9164 valmode = word_mode;
9165 else
9166 valmode = TYPE_MODE (valtype);
9168 if (TREE_CODE (valtype) == REAL_TYPE
9169 && !AGGREGATE_TYPE_P (valtype)
9170 && TYPE_MODE (valtype) != TFmode
9171 && !TARGET_SOFT_FLOAT)
9172 return gen_rtx_REG (valmode, 32);
9174 return gen_rtx_REG (valmode, 28);
9177 /* Return the location of a parameter that is passed in a register or NULL
9178 if the parameter has any component that is passed in memory.
9180 This is new code and will be pushed to into the net sources after
9181 further testing.
9183 ??? We might want to restructure this so that it looks more like other
9184 ports. */
9186 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9187 int named ATTRIBUTE_UNUSED)
9189 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9190 int alignment = 0;
9191 int arg_size;
9192 int fpr_reg_base;
9193 int gpr_reg_base;
9194 rtx retval;
9196 if (mode == VOIDmode)
9197 return NULL_RTX;
9199 arg_size = FUNCTION_ARG_SIZE (mode, type);
9201 /* If this arg would be passed partially or totally on the stack, then
9202 this routine should return zero. pa_arg_partial_bytes will
9203 handle arguments which are split between regs and stack slots if
9204 the ABI mandates split arguments. */
9205 if (!TARGET_64BIT)
9207 /* The 32-bit ABI does not split arguments. */
9208 if (cum->words + arg_size > max_arg_words)
9209 return NULL_RTX;
9211 else
9213 if (arg_size > 1)
9214 alignment = cum->words & 1;
9215 if (cum->words + alignment >= max_arg_words)
9216 return NULL_RTX;
9219 /* The 32bit ABIs and the 64bit ABIs are rather different,
9220 particularly in their handling of FP registers. We might
9221 be able to cleverly share code between them, but I'm not
9222 going to bother in the hope that splitting them up results
9223 in code that is more easily understood. */
9225 if (TARGET_64BIT)
9227 /* Advance the base registers to their current locations.
9229 Remember, gprs grow towards smaller register numbers while
9230 fprs grow to higher register numbers. Also remember that
9231 although FP regs are 32-bit addressable, we pretend that
9232 the registers are 64-bits wide. */
9233 gpr_reg_base = 26 - cum->words;
9234 fpr_reg_base = 32 + cum->words;
9236 /* Arguments wider than one word and small aggregates need special
9237 treatment. */
9238 if (arg_size > 1
9239 || mode == BLKmode
9240 || (type && (AGGREGATE_TYPE_P (type)
9241 || TREE_CODE (type) == COMPLEX_TYPE
9242 || TREE_CODE (type) == VECTOR_TYPE)))
9244 /* Double-extended precision (80-bit), quad-precision (128-bit)
9245 and aggregates including complex numbers are aligned on
9246 128-bit boundaries. The first eight 64-bit argument slots
9247 are associated one-to-one, with general registers r26
9248 through r19, and also with floating-point registers fr4
9249 through fr11. Arguments larger than one word are always
9250 passed in general registers.
9252 Using a PARALLEL with a word mode register results in left
9253 justified data on a big-endian target. */
9255 rtx loc[8];
9256 int i, offset = 0, ub = arg_size;
9258 /* Align the base register. */
9259 gpr_reg_base -= alignment;
9261 ub = MIN (ub, max_arg_words - cum->words - alignment);
9262 for (i = 0; i < ub; i++)
9264 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9265 gen_rtx_REG (DImode, gpr_reg_base),
9266 GEN_INT (offset));
9267 gpr_reg_base -= 1;
9268 offset += 8;
9271 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9274 else
9276 /* If the argument is larger than a word, then we know precisely
9277 which registers we must use. */
9278 if (arg_size > 1)
9280 if (cum->words)
9282 gpr_reg_base = 23;
9283 fpr_reg_base = 38;
9285 else
9287 gpr_reg_base = 25;
9288 fpr_reg_base = 34;
9291 /* Structures 5 to 8 bytes in size are passed in the general
9292 registers in the same manner as other non floating-point
9293 objects. The data is right-justified and zero-extended
9294 to 64 bits. This is opposite to the normal justification
9295 used on big endian targets and requires special treatment.
9296 We now define BLOCK_REG_PADDING to pad these objects.
9297 Aggregates, complex and vector types are passed in the same
9298 manner as structures. */
9299 if (mode == BLKmode
9300 || (type && (AGGREGATE_TYPE_P (type)
9301 || TREE_CODE (type) == COMPLEX_TYPE
9302 || TREE_CODE (type) == VECTOR_TYPE)))
9304 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9305 gen_rtx_REG (DImode, gpr_reg_base),
9306 const0_rtx);
9307 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9310 else
9312 /* We have a single word (32 bits). A simple computation
9313 will get us the register #s we need. */
9314 gpr_reg_base = 26 - cum->words;
9315 fpr_reg_base = 32 + 2 * cum->words;
9319 /* Determine if the argument needs to be passed in both general and
9320 floating point registers. */
9321 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9322 /* If we are doing soft-float with portable runtime, then there
9323 is no need to worry about FP regs. */
9324 && !TARGET_SOFT_FLOAT
9325 /* The parameter must be some kind of scalar float, else we just
9326 pass it in integer registers. */
9327 && GET_MODE_CLASS (mode) == MODE_FLOAT
9328 /* The target function must not have a prototype. */
9329 && cum->nargs_prototype <= 0
9330 /* libcalls do not need to pass items in both FP and general
9331 registers. */
9332 && type != NULL_TREE
9333 /* All this hair applies to "outgoing" args only. This includes
9334 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9335 && !cum->incoming)
9336 /* Also pass outgoing floating arguments in both registers in indirect
9337 calls with the 32 bit ABI and the HP assembler since there is no
9338 way to the specify argument locations in static functions. */
9339 || (!TARGET_64BIT
9340 && !TARGET_GAS
9341 && !cum->incoming
9342 && cum->indirect
9343 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9345 retval
9346 = gen_rtx_PARALLEL
9347 (mode,
9348 gen_rtvec (2,
9349 gen_rtx_EXPR_LIST (VOIDmode,
9350 gen_rtx_REG (mode, fpr_reg_base),
9351 const0_rtx),
9352 gen_rtx_EXPR_LIST (VOIDmode,
9353 gen_rtx_REG (mode, gpr_reg_base),
9354 const0_rtx)));
9356 else
9358 /* See if we should pass this parameter in a general register. */
9359 if (TARGET_SOFT_FLOAT
9360 /* Indirect calls in the normal 32bit ABI require all arguments
9361 to be passed in general registers. */
9362 || (!TARGET_PORTABLE_RUNTIME
9363 && !TARGET_64BIT
9364 && !TARGET_ELF32
9365 && cum->indirect)
9366 /* If the parameter is not a scalar floating-point parameter,
9367 then it belongs in GPRs. */
9368 || GET_MODE_CLASS (mode) != MODE_FLOAT
9369 /* Structure with single SFmode field belongs in GPR. */
9370 || (type && AGGREGATE_TYPE_P (type)))
9371 retval = gen_rtx_REG (mode, gpr_reg_base);
9372 else
9373 retval = gen_rtx_REG (mode, fpr_reg_base);
9375 return retval;
9379 /* If this arg would be passed totally in registers or totally on the stack,
9380 then this routine should return zero. */
9382 static int
9383 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9384 tree type, bool named ATTRIBUTE_UNUSED)
9386 unsigned int max_arg_words = 8;
9387 unsigned int offset = 0;
9389 if (!TARGET_64BIT)
9390 return 0;
9392 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9393 offset = 1;
9395 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9396 /* Arg fits fully into registers. */
9397 return 0;
9398 else if (cum->words + offset >= max_arg_words)
9399 /* Arg fully on the stack. */
9400 return 0;
9401 else
9402 /* Arg is split. */
9403 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9407 /* A get_unnamed_section callback for switching to the text section.
9409 This function is only used with SOM. Because we don't support
9410 named subspaces, we can only create a new subspace or switch back
9411 to the default text subspace. */
9413 static void
9414 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9416 gcc_assert (TARGET_SOM);
9417 if (TARGET_GAS)
9419 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9421 /* We only want to emit a .nsubspa directive once at the
9422 start of the function. */
9423 cfun->machine->in_nsubspa = 1;
9425 /* Create a new subspace for the text. This provides
9426 better stub placement and one-only functions. */
9427 if (cfun->decl
9428 && DECL_ONE_ONLY (cfun->decl)
9429 && !DECL_WEAK (cfun->decl))
9431 output_section_asm_op ("\t.SPACE $TEXT$\n"
9432 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9433 "ACCESS=44,SORT=24,COMDAT");
9434 return;
9437 else
9439 /* There isn't a current function or the body of the current
9440 function has been completed. So, we are changing to the
9441 text section to output debugging information. Thus, we
9442 need to forget that we are in the text section so that
9443 varasm.c will call us when text_section is selected again. */
9444 gcc_assert (!cfun || !cfun->machine
9445 || cfun->machine->in_nsubspa == 2);
9446 in_section = NULL;
9448 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9449 return;
9451 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9454 /* A get_unnamed_section callback for switching to comdat data
9455 sections. This function is only used with SOM. */
9457 static void
9458 som_output_comdat_data_section_asm_op (const void *data)
9460 in_section = NULL;
9461 output_section_asm_op (data);
9464 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9466 static void
9467 pa_som_asm_init_sections (void)
9469 text_section
9470 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9472 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9473 is not being generated. */
9474 som_readonly_data_section
9475 = get_unnamed_section (0, output_section_asm_op,
9476 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9478 /* When secondary definitions are not supported, SOM makes readonly
9479 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9480 the comdat flag. */
9481 som_one_only_readonly_data_section
9482 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9483 "\t.SPACE $TEXT$\n"
9484 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9485 "ACCESS=0x2c,SORT=16,COMDAT");
9488 /* When secondary definitions are not supported, SOM makes data one-only
9489 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9490 som_one_only_data_section
9491 = get_unnamed_section (SECTION_WRITE,
9492 som_output_comdat_data_section_asm_op,
9493 "\t.SPACE $PRIVATE$\n"
9494 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9495 "ACCESS=31,SORT=24,COMDAT");
9497 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9498 which reference data within the $TEXT$ space (for example constant
9499 strings in the $LIT$ subspace).
9501 The assemblers (GAS and HP as) both have problems with handling
9502 the difference of two symbols which is the other correct way to
9503 reference constant data during PIC code generation.
9505 So, there's no way to reference constant data which is in the
9506 $TEXT$ space during PIC generation. Instead place all constant
9507 data into the $PRIVATE$ subspace (this reduces sharing, but it
9508 works correctly). */
9509 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9511 /* We must not have a reference to an external symbol defined in a
9512 shared library in a readonly section, else the SOM linker will
9513 complain.
9515 So, we force exception information into the data section. */
9516 exception_section = data_section;
9519 /* On hpux10, the linker will give an error if we have a reference
9520 in the read-only data section to a symbol defined in a shared
9521 library. Therefore, expressions that might require a reloc can
9522 not be placed in the read-only data section. */
9524 static section *
9525 pa_select_section (tree exp, int reloc,
9526 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9528 if (TREE_CODE (exp) == VAR_DECL
9529 && TREE_READONLY (exp)
9530 && !TREE_THIS_VOLATILE (exp)
9531 && DECL_INITIAL (exp)
9532 && (DECL_INITIAL (exp) == error_mark_node
9533 || TREE_CONSTANT (DECL_INITIAL (exp)))
9534 && !reloc)
9536 if (TARGET_SOM
9537 && DECL_ONE_ONLY (exp)
9538 && !DECL_WEAK (exp))
9539 return som_one_only_readonly_data_section;
9540 else
9541 return readonly_data_section;
9543 else if (CONSTANT_CLASS_P (exp) && !reloc)
9544 return readonly_data_section;
9545 else if (TARGET_SOM
9546 && TREE_CODE (exp) == VAR_DECL
9547 && DECL_ONE_ONLY (exp)
9548 && !DECL_WEAK (exp))
9549 return som_one_only_data_section;
9550 else
9551 return data_section;
9554 static void
9555 pa_globalize_label (FILE *stream, const char *name)
9557 /* We only handle DATA objects here, functions are globalized in
9558 ASM_DECLARE_FUNCTION_NAME. */
9559 if (! FUNCTION_NAME_P (name))
9561 fputs ("\t.EXPORT ", stream);
9562 assemble_name (stream, name);
9563 fputs (",DATA\n", stream);
9567 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9569 static rtx
9570 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9571 int incoming ATTRIBUTE_UNUSED)
9573 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9576 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9578 bool
9579 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9581 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9582 PA64 ABI says that objects larger than 128 bits are returned in memory.
9583 Note, int_size_in_bytes can return -1 if the size of the object is
9584 variable or larger than the maximum value that can be expressed as
9585 a HOST_WIDE_INT. It can also return zero for an empty type. The
9586 simplest way to handle variable and empty types is to pass them in
9587 memory. This avoids problems in defining the boundaries of argument
9588 slots, allocating registers, etc. */
9589 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9590 || int_size_in_bytes (type) <= 0);
9593 /* Structure to hold declaration and name of external symbols that are
9594 emitted by GCC. We generate a vector of these symbols and output them
9595 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9596 This avoids putting out names that are never really used. */
9598 typedef struct extern_symbol GTY(())
9600 tree decl;
9601 const char *name;
9602 } extern_symbol;
9604 /* Define gc'd vector type for extern_symbol. */
9605 DEF_VEC_O(extern_symbol);
9606 DEF_VEC_ALLOC_O(extern_symbol,gc);
9608 /* Vector of extern_symbol pointers. */
9609 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9611 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9612 /* Mark DECL (name NAME) as an external reference (assembler output
9613 file FILE). This saves the names to output at the end of the file
9614 if actually referenced. */
9616 void
9617 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9619 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9621 gcc_assert (file == asm_out_file);
9622 p->decl = decl;
9623 p->name = name;
9626 /* Output text required at the end of an assembler file.
9627 This includes deferred plabels and .import directives for
9628 all external symbols that were actually referenced. */
9630 static void
9631 pa_hpux_file_end (void)
9633 unsigned int i;
9634 extern_symbol *p;
9636 if (!NO_DEFERRED_PROFILE_COUNTERS)
9637 output_deferred_profile_counters ();
9639 output_deferred_plabels ();
9641 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9643 tree decl = p->decl;
9645 if (!TREE_ASM_WRITTEN (decl)
9646 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9647 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9650 VEC_free (extern_symbol, gc, extern_symbols);
9652 #endif
9654 /* Return true if a change from mode FROM to mode TO for a register
9655 in register class CLASS is invalid. */
9657 bool
9658 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9659 enum reg_class class)
9661 if (from == to)
9662 return false;
9664 /* Reject changes to/from complex and vector modes. */
9665 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9666 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9667 return true;
9669 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9670 return false;
9672 /* There is no way to load QImode or HImode values directly from
9673 memory. SImode loads to the FP registers are not zero extended.
9674 On the 64-bit target, this conflicts with the definition of
9675 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9676 with different sizes in the floating-point registers. */
9677 if (MAYBE_FP_REG_CLASS_P (class))
9678 return true;
9680 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9681 in specific sets of registers. Thus, we cannot allow changing
9682 to a larger mode when it's larger than a word. */
9683 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9684 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9685 return true;
9687 return false;
9690 /* Returns TRUE if it is a good idea to tie two pseudo registers
9691 when one has mode MODE1 and one has mode MODE2.
9692 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9693 for any hard reg, then this must be FALSE for correct output.
9695 We should return FALSE for QImode and HImode because these modes
9696 are not ok in the floating-point registers. However, this prevents
9697 tieing these modes to SImode and DImode in the general registers.
9698 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9699 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9700 in the floating-point registers. */
9702 bool
9703 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9705 /* Don't tie modes in different classes. */
9706 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9707 return false;
9709 return true;
9712 #include "gt-pa.h"