2008-11-18 Kai Tietz <kai.tietz@onevision.com>
[official-gcc.git] / gcc / config / pa / pa.c
blobe06a2ec4809b15197d3ea4cde4c2e36b6135f7af
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "tree.h"
35 #include "output.h"
36 #include "except.h"
37 #include "expr.h"
38 #include "optabs.h"
39 #include "reload.h"
40 #include "integrate.h"
41 #include "function.h"
42 #include "toplev.h"
43 #include "ggc.h"
44 #include "recog.h"
45 #include "predict.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "df.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || (get_attr_type (in_insn) != TYPE_FPSTORE
62 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
63 || recog_memoized (out_insn) < 0)
64 return 0;
66 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
68 set = single_set (out_insn);
69 if (!set)
70 return 0;
72 other_mode = GET_MODE (SET_SRC (set));
74 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
78 #ifndef DO_FRAME_NOTES
79 #ifdef INCOMING_RETURN_ADDR_RTX
80 #define DO_FRAME_NOTES 1
81 #else
82 #define DO_FRAME_NOTES 0
83 #endif
84 #endif
86 static void copy_reg_pointer (rtx, rtx);
87 static void fix_range (const char *);
88 static bool pa_handle_option (size_t, const char *, int);
89 static int hppa_address_cost (rtx, bool);
90 static bool hppa_rtx_costs (rtx, int, int, int *, bool);
91 static inline rtx force_mode (enum machine_mode, rtx);
92 static void pa_reorg (void);
93 static void pa_combine_instructions (void);
94 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
95 static int forward_branch_p (rtx);
96 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
97 static int compute_movmem_length (rtx);
98 static int compute_clrmem_length (rtx);
99 static bool pa_assemble_integer (rtx, unsigned int, int);
100 static void remove_useless_addtr_insns (int);
101 static void store_reg (int, HOST_WIDE_INT, int);
102 static void store_reg_modify (int, int, HOST_WIDE_INT);
103 static void load_reg (int, HOST_WIDE_INT, int);
104 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
105 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
106 static void update_total_code_bytes (int);
107 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
108 static int pa_adjust_cost (rtx, rtx, rtx, int);
109 static int pa_adjust_priority (rtx, int);
110 static int pa_issue_rate (void);
111 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
112 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 ATTRIBUTE_UNUSED;
114 static void pa_encode_section_info (tree, rtx, int);
115 static const char *pa_strip_name_encoding (const char *);
116 static bool pa_function_ok_for_sibcall (tree, tree);
117 static void pa_globalize_label (FILE *, const char *)
118 ATTRIBUTE_UNUSED;
119 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
120 HOST_WIDE_INT, tree);
121 #if !defined(USE_COLLECT2)
122 static void pa_asm_out_constructor (rtx, int);
123 static void pa_asm_out_destructor (rtx, int);
124 #endif
125 static void pa_init_builtins (void);
126 static rtx hppa_builtin_saveregs (void);
127 static void hppa_va_start (tree, rtx);
128 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
129 static bool pa_scalar_mode_supported_p (enum machine_mode);
130 static bool pa_commutative_p (const_rtx x, int outer_code);
131 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
132 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
136 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
137 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
141 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
142 static void output_deferred_plabels (void);
143 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
144 #ifdef ASM_OUTPUT_EXTERNAL_REAL
145 static void pa_hpux_file_end (void);
146 #endif
147 #ifdef HPUX_LONG_DOUBLE_LIBRARY
148 static void pa_hpux_init_libfuncs (void);
149 #endif
150 static rtx pa_struct_value_rtx (tree, int);
151 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
152 const_tree, bool);
153 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
154 tree, bool);
155 static struct machine_function * pa_init_machine_status (void);
156 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
157 enum machine_mode,
158 secondary_reload_info *);
159 static void pa_extra_live_on_entry (bitmap);
161 /* The following extra sections are only used for SOM. */
162 static GTY(()) section *som_readonly_data_section;
163 static GTY(()) section *som_one_only_readonly_data_section;
164 static GTY(()) section *som_one_only_data_section;
166 /* Save the operands last given to a compare for use when we
167 generate a scc or bcc insn. */
168 rtx hppa_compare_op0, hppa_compare_op1;
169 enum cmp_type hppa_branch_type;
171 /* Which cpu we are scheduling for. */
172 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
174 /* The UNIX standard to use for predefines and linking. */
175 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
177 /* Counts for the number of callee-saved general and floating point
178 registers which were saved by the current function's prologue. */
179 static int gr_saved, fr_saved;
181 /* Boolean indicating whether the return pointer was saved by the
182 current function's prologue. */
183 static bool rp_saved;
185 static rtx find_addr_reg (rtx);
187 /* Keep track of the number of bytes we have output in the CODE subspace
188 during this compilation so we'll know when to emit inline long-calls. */
189 unsigned long total_code_bytes;
191 /* The last address of the previous function plus the number of bytes in
192 associated thunks that have been output. This is used to determine if
193 a thunk can use an IA-relative branch to reach its target function. */
194 static int last_address;
196 /* Variables to handle plabels that we discover are necessary at assembly
197 output time. They are output after the current function. */
198 struct deferred_plabel GTY(())
200 rtx internal_label;
201 rtx symbol;
203 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
204 deferred_plabels;
205 static size_t n_deferred_plabels = 0;
208 /* Initialize the GCC target structure. */
210 #undef TARGET_ASM_ALIGNED_HI_OP
211 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
212 #undef TARGET_ASM_ALIGNED_SI_OP
213 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
214 #undef TARGET_ASM_ALIGNED_DI_OP
215 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
216 #undef TARGET_ASM_UNALIGNED_HI_OP
217 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
218 #undef TARGET_ASM_UNALIGNED_SI_OP
219 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
220 #undef TARGET_ASM_UNALIGNED_DI_OP
221 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
222 #undef TARGET_ASM_INTEGER
223 #define TARGET_ASM_INTEGER pa_assemble_integer
225 #undef TARGET_ASM_FUNCTION_PROLOGUE
226 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
230 #undef TARGET_SCHED_ADJUST_COST
231 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
232 #undef TARGET_SCHED_ADJUST_PRIORITY
233 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
234 #undef TARGET_SCHED_ISSUE_RATE
235 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
237 #undef TARGET_ENCODE_SECTION_INFO
238 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
242 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
243 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
245 #undef TARGET_COMMUTATIVE_P
246 #define TARGET_COMMUTATIVE_P pa_commutative_p
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
253 #undef TARGET_ASM_FILE_END
254 #ifdef ASM_OUTPUT_EXTERNAL_REAL
255 #define TARGET_ASM_FILE_END pa_hpux_file_end
256 #else
257 #define TARGET_ASM_FILE_END output_deferred_plabels
258 #endif
260 #if !defined(USE_COLLECT2)
261 #undef TARGET_ASM_CONSTRUCTOR
262 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
263 #undef TARGET_ASM_DESTRUCTOR
264 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
265 #endif
267 #undef TARGET_DEFAULT_TARGET_FLAGS
268 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
269 #undef TARGET_HANDLE_OPTION
270 #define TARGET_HANDLE_OPTION pa_handle_option
272 #undef TARGET_INIT_BUILTINS
273 #define TARGET_INIT_BUILTINS pa_init_builtins
275 #undef TARGET_RTX_COSTS
276 #define TARGET_RTX_COSTS hppa_rtx_costs
277 #undef TARGET_ADDRESS_COST
278 #define TARGET_ADDRESS_COST hppa_address_cost
280 #undef TARGET_MACHINE_DEPENDENT_REORG
281 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
283 #ifdef HPUX_LONG_DOUBLE_LIBRARY
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
286 #endif
288 #undef TARGET_PROMOTE_FUNCTION_RETURN
289 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
290 #undef TARGET_PROMOTE_PROTOTYPES
291 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
293 #undef TARGET_STRUCT_VALUE_RTX
294 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
295 #undef TARGET_RETURN_IN_MEMORY
296 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
297 #undef TARGET_MUST_PASS_IN_STACK
298 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
299 #undef TARGET_PASS_BY_REFERENCE
300 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
301 #undef TARGET_CALLEE_COPIES
302 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
303 #undef TARGET_ARG_PARTIAL_BYTES
304 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
306 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
307 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
308 #undef TARGET_EXPAND_BUILTIN_VA_START
309 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
310 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
311 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
313 #undef TARGET_SCALAR_MODE_SUPPORTED_P
314 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
316 #undef TARGET_CANNOT_FORCE_CONST_MEM
317 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
319 #undef TARGET_SECONDARY_RELOAD
320 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
322 #undef TARGET_EXTRA_LIVE_ON_ENTRY
323 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
325 struct gcc_target targetm = TARGET_INITIALIZER;
327 /* Parse the -mfixed-range= option string. */
329 static void
330 fix_range (const char *const_str)
332 int i, first, last;
333 char *str, *dash, *comma;
335 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
336 REG2 are either register names or register numbers. The effect
337 of this option is to mark the registers in the range from REG1 to
338 REG2 as ``fixed'' so they won't be used by the compiler. This is
339 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
341 i = strlen (const_str);
342 str = (char *) alloca (i + 1);
343 memcpy (str, const_str, i + 1);
345 while (1)
347 dash = strchr (str, '-');
348 if (!dash)
350 warning (0, "value of -mfixed-range must have form REG1-REG2");
351 return;
353 *dash = '\0';
355 comma = strchr (dash + 1, ',');
356 if (comma)
357 *comma = '\0';
359 first = decode_reg_name (str);
360 if (first < 0)
362 warning (0, "unknown register name: %s", str);
363 return;
366 last = decode_reg_name (dash + 1);
367 if (last < 0)
369 warning (0, "unknown register name: %s", dash + 1);
370 return;
373 *dash = '-';
375 if (first > last)
377 warning (0, "%s-%s is an empty range", str, dash + 1);
378 return;
381 for (i = first; i <= last; ++i)
382 fixed_regs[i] = call_used_regs[i] = 1;
384 if (!comma)
385 break;
387 *comma = ',';
388 str = comma + 1;
391 /* Check if all floating point registers have been fixed. */
392 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
393 if (!fixed_regs[i])
394 break;
396 if (i > FP_REG_LAST)
397 target_flags |= MASK_DISABLE_FPREGS;
400 /* Implement TARGET_HANDLE_OPTION. */
402 static bool
403 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
405 switch (code)
407 case OPT_mnosnake:
408 case OPT_mpa_risc_1_0:
409 case OPT_march_1_0:
410 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
411 return true;
413 case OPT_msnake:
414 case OPT_mpa_risc_1_1:
415 case OPT_march_1_1:
416 target_flags &= ~MASK_PA_20;
417 target_flags |= MASK_PA_11;
418 return true;
420 case OPT_mpa_risc_2_0:
421 case OPT_march_2_0:
422 target_flags |= MASK_PA_11 | MASK_PA_20;
423 return true;
425 case OPT_mschedule_:
426 if (strcmp (arg, "8000") == 0)
427 pa_cpu = PROCESSOR_8000;
428 else if (strcmp (arg, "7100") == 0)
429 pa_cpu = PROCESSOR_7100;
430 else if (strcmp (arg, "700") == 0)
431 pa_cpu = PROCESSOR_700;
432 else if (strcmp (arg, "7100LC") == 0)
433 pa_cpu = PROCESSOR_7100LC;
434 else if (strcmp (arg, "7200") == 0)
435 pa_cpu = PROCESSOR_7200;
436 else if (strcmp (arg, "7300") == 0)
437 pa_cpu = PROCESSOR_7300;
438 else
439 return false;
440 return true;
442 case OPT_mfixed_range_:
443 fix_range (arg);
444 return true;
446 #if TARGET_HPUX
447 case OPT_munix_93:
448 flag_pa_unix = 1993;
449 return true;
450 #endif
452 #if TARGET_HPUX_10_10
453 case OPT_munix_95:
454 flag_pa_unix = 1995;
455 return true;
456 #endif
458 #if TARGET_HPUX_11_11
459 case OPT_munix_98:
460 flag_pa_unix = 1998;
461 return true;
462 #endif
464 default:
465 return true;
469 void
470 override_options (void)
472 /* Unconditional branches in the delay slot are not compatible with dwarf2
473 call frame information. There is no benefit in using this optimization
474 on PA8000 and later processors. */
475 if (pa_cpu >= PROCESSOR_8000
476 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
477 || flag_unwind_tables)
478 target_flags &= ~MASK_JUMP_IN_DELAY;
480 if (flag_pic && TARGET_PORTABLE_RUNTIME)
482 warning (0, "PIC code generation is not supported in the portable runtime model");
485 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
487 warning (0, "PIC code generation is not compatible with fast indirect calls");
490 if (! TARGET_GAS && write_symbols != NO_DEBUG)
492 warning (0, "-g is only supported when using GAS on this processor,");
493 warning (0, "-g option disabled");
494 write_symbols = NO_DEBUG;
497 /* We only support the "big PIC" model now. And we always generate PIC
498 code when in 64bit mode. */
499 if (flag_pic == 1 || TARGET_64BIT)
500 flag_pic = 2;
502 /* We can't guarantee that .dword is available for 32-bit targets. */
503 if (UNITS_PER_WORD == 4)
504 targetm.asm_out.aligned_op.di = NULL;
506 /* The unaligned ops are only available when using GAS. */
507 if (!TARGET_GAS)
509 targetm.asm_out.unaligned_op.hi = NULL;
510 targetm.asm_out.unaligned_op.si = NULL;
511 targetm.asm_out.unaligned_op.di = NULL;
514 init_machine_status = pa_init_machine_status;
517 static void
518 pa_init_builtins (void)
520 #ifdef DONT_HAVE_FPUTC_UNLOCKED
521 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
522 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
523 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
524 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
525 #endif
526 #if TARGET_HPUX_11
527 if (built_in_decls [BUILT_IN_FINITE])
528 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
529 if (built_in_decls [BUILT_IN_FINITEF])
530 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
531 #endif
534 /* Function to init struct machine_function.
535 This will be called, via a pointer variable,
536 from push_function_context. */
538 static struct machine_function *
539 pa_init_machine_status (void)
541 return GGC_CNEW (machine_function);
544 /* If FROM is a probable pointer register, mark TO as a probable
545 pointer register with the same pointer alignment as FROM. */
547 static void
548 copy_reg_pointer (rtx to, rtx from)
550 if (REG_POINTER (from))
551 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
554 /* Return 1 if X contains a symbolic expression. We know these
555 expressions will have one of a few well defined forms, so
556 we need only check those forms. */
558 symbolic_expression_p (rtx x)
561 /* Strip off any HIGH. */
562 if (GET_CODE (x) == HIGH)
563 x = XEXP (x, 0);
565 return (symbolic_operand (x, VOIDmode));
568 /* Accept any constant that can be moved in one instruction into a
569 general register. */
571 cint_ok_for_move (HOST_WIDE_INT ival)
573 /* OK if ldo, ldil, or zdepi, can be used. */
574 return (VAL_14_BITS_P (ival)
575 || ldil_cint_p (ival)
576 || zdepi_cint_p (ival));
579 /* Return truth value of whether OP can be used as an operand in a
580 adddi3 insn. */
582 adddi3_operand (rtx op, enum machine_mode mode)
584 return (register_operand (op, mode)
585 || (GET_CODE (op) == CONST_INT
586 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
589 /* True iff the operand OP can be used as the destination operand of
590 an integer store. This also implies the operand could be used as
591 the source operand of an integer load. Symbolic, lo_sum and indexed
592 memory operands are not allowed. We accept reloading pseudos and
593 other memory operands. */
595 integer_store_memory_operand (rtx op, enum machine_mode mode)
597 return ((reload_in_progress
598 && REG_P (op)
599 && REGNO (op) >= FIRST_PSEUDO_REGISTER
600 && reg_renumber [REGNO (op)] < 0)
601 || (GET_CODE (op) == MEM
602 && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
603 && !symbolic_memory_operand (op, VOIDmode)
604 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
605 && !IS_INDEX_ADDR_P (XEXP (op, 0))));
608 /* True iff ldil can be used to load this CONST_INT. The least
609 significant 11 bits of the value must be zero and the value must
610 not change sign when extended from 32 to 64 bits. */
612 ldil_cint_p (HOST_WIDE_INT ival)
614 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
616 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
619 /* True iff zdepi can be used to generate this CONST_INT.
620 zdepi first sign extends a 5-bit signed number to a given field
621 length, then places this field anywhere in a zero. */
623 zdepi_cint_p (unsigned HOST_WIDE_INT x)
625 unsigned HOST_WIDE_INT lsb_mask, t;
627 /* This might not be obvious, but it's at least fast.
628 This function is critical; we don't have the time loops would take. */
629 lsb_mask = x & -x;
630 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
631 /* Return true iff t is a power of two. */
632 return ((t & (t - 1)) == 0);
635 /* True iff depi or extru can be used to compute (reg & mask).
636 Accept bit pattern like these:
637 0....01....1
638 1....10....0
639 1..10..01..1 */
641 and_mask_p (unsigned HOST_WIDE_INT mask)
643 mask = ~mask;
644 mask += mask & -mask;
645 return (mask & (mask - 1)) == 0;
648 /* True iff depi can be used to compute (reg | MASK). */
650 ior_mask_p (unsigned HOST_WIDE_INT mask)
652 mask += mask & -mask;
653 return (mask & (mask - 1)) == 0;
656 /* Legitimize PIC addresses. If the address is already
657 position-independent, we return ORIG. Newly generated
658 position-independent addresses go to REG. If we need more
659 than one register, we lose. */
662 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
664 rtx pic_ref = orig;
666 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
668 /* Labels need special handling. */
669 if (pic_label_operand (orig, mode))
671 rtx insn;
673 /* We do not want to go through the movXX expanders here since that
674 would create recursion.
676 Nor do we really want to call a generator for a named pattern
677 since that requires multiple patterns if we want to support
678 multiple word sizes.
680 So instead we just emit the raw set, which avoids the movXX
681 expanders completely. */
682 mark_reg_pointer (reg, BITS_PER_UNIT);
683 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
685 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
686 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
688 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
689 and update LABEL_NUSES because this is not done automatically. */
690 if (reload_in_progress || reload_completed)
692 /* Extract LABEL_REF. */
693 if (GET_CODE (orig) == CONST)
694 orig = XEXP (XEXP (orig, 0), 0);
695 /* Extract CODE_LABEL. */
696 orig = XEXP (orig, 0);
697 add_reg_note (insn, REG_LABEL_OPERAND, orig);
698 LABEL_NUSES (orig)++;
700 crtl->uses_pic_offset_table = 1;
701 return reg;
703 if (GET_CODE (orig) == SYMBOL_REF)
705 rtx insn, tmp_reg;
707 gcc_assert (reg);
709 /* Before reload, allocate a temporary register for the intermediate
710 result. This allows the sequence to be deleted when the final
711 result is unused and the insns are trivially dead. */
712 tmp_reg = ((reload_in_progress || reload_completed)
713 ? reg : gen_reg_rtx (Pmode));
715 if (function_label_operand (orig, mode))
717 /* Force function label into memory in word mode. */
718 orig = XEXP (force_const_mem (word_mode, orig), 0);
719 /* Load plabel address from DLT. */
720 emit_move_insn (tmp_reg,
721 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
722 gen_rtx_HIGH (word_mode, orig)));
723 pic_ref
724 = gen_const_mem (Pmode,
725 gen_rtx_LO_SUM (Pmode, tmp_reg,
726 gen_rtx_UNSPEC (Pmode,
727 gen_rtvec (1, orig),
728 UNSPEC_DLTIND14R)));
729 emit_move_insn (reg, pic_ref);
730 /* Now load address of function descriptor. */
731 pic_ref = gen_rtx_MEM (Pmode, reg);
733 else
735 /* Load symbol reference from DLT. */
736 emit_move_insn (tmp_reg,
737 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
738 gen_rtx_HIGH (word_mode, orig)));
739 pic_ref
740 = gen_const_mem (Pmode,
741 gen_rtx_LO_SUM (Pmode, tmp_reg,
742 gen_rtx_UNSPEC (Pmode,
743 gen_rtvec (1, orig),
744 UNSPEC_DLTIND14R)));
747 crtl->uses_pic_offset_table = 1;
748 mark_reg_pointer (reg, BITS_PER_UNIT);
749 insn = emit_move_insn (reg, pic_ref);
751 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
752 set_unique_reg_note (insn, REG_EQUAL, orig);
754 return reg;
756 else if (GET_CODE (orig) == CONST)
758 rtx base;
760 if (GET_CODE (XEXP (orig, 0)) == PLUS
761 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
762 return orig;
764 gcc_assert (reg);
765 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
767 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
768 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
769 base == reg ? 0 : reg);
771 if (GET_CODE (orig) == CONST_INT)
773 if (INT_14_BITS (orig))
774 return plus_constant (base, INTVAL (orig));
775 orig = force_reg (Pmode, orig);
777 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
778 /* Likewise, should we set special REG_NOTEs here? */
781 return pic_ref;
784 static GTY(()) rtx gen_tls_tga;
786 static rtx
787 gen_tls_get_addr (void)
789 if (!gen_tls_tga)
790 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
791 return gen_tls_tga;
794 static rtx
795 hppa_tls_call (rtx arg)
797 rtx ret;
799 ret = gen_reg_rtx (Pmode);
800 emit_library_call_value (gen_tls_get_addr (), ret,
801 LCT_CONST, Pmode, 1, arg, Pmode);
803 return ret;
806 static rtx
807 legitimize_tls_address (rtx addr)
809 rtx ret, insn, tmp, t1, t2, tp;
810 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
812 switch (model)
814 case TLS_MODEL_GLOBAL_DYNAMIC:
815 tmp = gen_reg_rtx (Pmode);
816 if (flag_pic)
817 emit_insn (gen_tgd_load_pic (tmp, addr));
818 else
819 emit_insn (gen_tgd_load (tmp, addr));
820 ret = hppa_tls_call (tmp);
821 break;
823 case TLS_MODEL_LOCAL_DYNAMIC:
824 ret = gen_reg_rtx (Pmode);
825 tmp = gen_reg_rtx (Pmode);
826 start_sequence ();
827 if (flag_pic)
828 emit_insn (gen_tld_load_pic (tmp, addr));
829 else
830 emit_insn (gen_tld_load (tmp, addr));
831 t1 = hppa_tls_call (tmp);
832 insn = get_insns ();
833 end_sequence ();
834 t2 = gen_reg_rtx (Pmode);
835 emit_libcall_block (insn, t2, t1,
836 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
837 UNSPEC_TLSLDBASE));
838 emit_insn (gen_tld_offset_load (ret, addr, t2));
839 break;
841 case TLS_MODEL_INITIAL_EXEC:
842 tp = gen_reg_rtx (Pmode);
843 tmp = gen_reg_rtx (Pmode);
844 ret = gen_reg_rtx (Pmode);
845 emit_insn (gen_tp_load (tp));
846 if (flag_pic)
847 emit_insn (gen_tie_load_pic (tmp, addr));
848 else
849 emit_insn (gen_tie_load (tmp, addr));
850 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
851 break;
853 case TLS_MODEL_LOCAL_EXEC:
854 tp = gen_reg_rtx (Pmode);
855 ret = gen_reg_rtx (Pmode);
856 emit_insn (gen_tp_load (tp));
857 emit_insn (gen_tle_load (ret, addr, tp));
858 break;
860 default:
861 gcc_unreachable ();
864 return ret;
867 /* Try machine-dependent ways of modifying an illegitimate address
868 to be legitimate. If we find one, return the new, valid address.
869 This macro is used in only one place: `memory_address' in explow.c.
871 OLDX is the address as it was before break_out_memory_refs was called.
872 In some cases it is useful to look at this to decide what needs to be done.
874 MODE and WIN are passed so that this macro can use
875 GO_IF_LEGITIMATE_ADDRESS.
877 It is always safe for this macro to do nothing. It exists to recognize
878 opportunities to optimize the output.
880 For the PA, transform:
882 memory(X + <large int>)
884 into:
886 if (<large int> & mask) >= 16
887 Y = (<large int> & ~mask) + mask + 1 Round up.
888 else
889 Y = (<large int> & ~mask) Round down.
890 Z = X + Y
891 memory (Z + (<large int> - Y));
893 This is for CSE to find several similar references, and only use one Z.
895 X can either be a SYMBOL_REF or REG, but because combine cannot
896 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
897 D will not fit in 14 bits.
899 MODE_FLOAT references allow displacements which fit in 5 bits, so use
900 0x1f as the mask.
902 MODE_INT references allow displacements which fit in 14 bits, so use
903 0x3fff as the mask.
905 This relies on the fact that most mode MODE_FLOAT references will use FP
906 registers and most mode MODE_INT references will use integer registers.
907 (In the rare case of an FP register used in an integer MODE, we depend
908 on secondary reloads to clean things up.)
911 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
912 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
913 addressing modes to be used).
915 Put X and Z into registers. Then put the entire expression into
916 a register. */
919 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
920 enum machine_mode mode)
922 rtx orig = x;
924 /* We need to canonicalize the order of operands in unscaled indexed
925 addresses since the code that checks if an address is valid doesn't
926 always try both orders. */
927 if (!TARGET_NO_SPACE_REGS
928 && GET_CODE (x) == PLUS
929 && GET_MODE (x) == Pmode
930 && REG_P (XEXP (x, 0))
931 && REG_P (XEXP (x, 1))
932 && REG_POINTER (XEXP (x, 0))
933 && !REG_POINTER (XEXP (x, 1)))
934 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
936 if (PA_SYMBOL_REF_TLS_P (x))
937 return legitimize_tls_address (x);
938 else if (flag_pic)
939 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
941 /* Strip off CONST. */
942 if (GET_CODE (x) == CONST)
943 x = XEXP (x, 0);
945 /* Special case. Get the SYMBOL_REF into a register and use indexing.
946 That should always be safe. */
947 if (GET_CODE (x) == PLUS
948 && GET_CODE (XEXP (x, 0)) == REG
949 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
951 rtx reg = force_reg (Pmode, XEXP (x, 1));
952 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
955 /* Note we must reject symbols which represent function addresses
956 since the assembler/linker can't handle arithmetic on plabels. */
957 if (GET_CODE (x) == PLUS
958 && GET_CODE (XEXP (x, 1)) == CONST_INT
959 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
960 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
961 || GET_CODE (XEXP (x, 0)) == REG))
963 rtx int_part, ptr_reg;
964 int newoffset;
965 int offset = INTVAL (XEXP (x, 1));
966 int mask;
968 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
969 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
971 /* Choose which way to round the offset. Round up if we
972 are >= halfway to the next boundary. */
973 if ((offset & mask) >= ((mask + 1) / 2))
974 newoffset = (offset & ~ mask) + mask + 1;
975 else
976 newoffset = (offset & ~ mask);
978 /* If the newoffset will not fit in 14 bits (ldo), then
979 handling this would take 4 or 5 instructions (2 to load
980 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
981 add the new offset and the SYMBOL_REF.) Combine can
982 not handle 4->2 or 5->2 combinations, so do not create
983 them. */
984 if (! VAL_14_BITS_P (newoffset)
985 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
987 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
988 rtx tmp_reg
989 = force_reg (Pmode,
990 gen_rtx_HIGH (Pmode, const_part));
991 ptr_reg
992 = force_reg (Pmode,
993 gen_rtx_LO_SUM (Pmode,
994 tmp_reg, const_part));
996 else
998 if (! VAL_14_BITS_P (newoffset))
999 int_part = force_reg (Pmode, GEN_INT (newoffset));
1000 else
1001 int_part = GEN_INT (newoffset);
1003 ptr_reg = force_reg (Pmode,
1004 gen_rtx_PLUS (Pmode,
1005 force_reg (Pmode, XEXP (x, 0)),
1006 int_part));
1008 return plus_constant (ptr_reg, offset - newoffset);
1011 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1013 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1014 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1015 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1016 && (OBJECT_P (XEXP (x, 1))
1017 || GET_CODE (XEXP (x, 1)) == SUBREG)
1018 && GET_CODE (XEXP (x, 1)) != CONST)
1020 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1021 rtx reg1, reg2;
1023 reg1 = XEXP (x, 1);
1024 if (GET_CODE (reg1) != REG)
1025 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1027 reg2 = XEXP (XEXP (x, 0), 0);
1028 if (GET_CODE (reg2) != REG)
1029 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1031 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1032 gen_rtx_MULT (Pmode,
1033 reg2,
1034 GEN_INT (val)),
1035 reg1));
1038 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1040 Only do so for floating point modes since this is more speculative
1041 and we lose if it's an integer store. */
1042 if (GET_CODE (x) == PLUS
1043 && GET_CODE (XEXP (x, 0)) == PLUS
1044 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1045 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1046 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1047 && (mode == SFmode || mode == DFmode))
1050 /* First, try and figure out what to use as a base register. */
1051 rtx reg1, reg2, base, idx, orig_base;
1053 reg1 = XEXP (XEXP (x, 0), 1);
1054 reg2 = XEXP (x, 1);
1055 base = NULL_RTX;
1056 idx = NULL_RTX;
1058 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1059 then emit_move_sequence will turn on REG_POINTER so we'll know
1060 it's a base register below. */
1061 if (GET_CODE (reg1) != REG)
1062 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1064 if (GET_CODE (reg2) != REG)
1065 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1067 /* Figure out what the base and index are. */
1069 if (GET_CODE (reg1) == REG
1070 && REG_POINTER (reg1))
1072 base = reg1;
1073 orig_base = XEXP (XEXP (x, 0), 1);
1074 idx = gen_rtx_PLUS (Pmode,
1075 gen_rtx_MULT (Pmode,
1076 XEXP (XEXP (XEXP (x, 0), 0), 0),
1077 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1078 XEXP (x, 1));
1080 else if (GET_CODE (reg2) == REG
1081 && REG_POINTER (reg2))
1083 base = reg2;
1084 orig_base = XEXP (x, 1);
1085 idx = XEXP (x, 0);
1088 if (base == 0)
1089 return orig;
1091 /* If the index adds a large constant, try to scale the
1092 constant so that it can be loaded with only one insn. */
1093 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1094 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1095 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1096 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1098 /* Divide the CONST_INT by the scale factor, then add it to A. */
1099 int val = INTVAL (XEXP (idx, 1));
1101 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1102 reg1 = XEXP (XEXP (idx, 0), 0);
1103 if (GET_CODE (reg1) != REG)
1104 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1106 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1108 /* We can now generate a simple scaled indexed address. */
1109 return
1110 force_reg
1111 (Pmode, gen_rtx_PLUS (Pmode,
1112 gen_rtx_MULT (Pmode, reg1,
1113 XEXP (XEXP (idx, 0), 1)),
1114 base));
1117 /* If B + C is still a valid base register, then add them. */
1118 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1119 && INTVAL (XEXP (idx, 1)) <= 4096
1120 && INTVAL (XEXP (idx, 1)) >= -4096)
1122 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1123 rtx reg1, reg2;
1125 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1127 reg2 = XEXP (XEXP (idx, 0), 0);
1128 if (GET_CODE (reg2) != CONST_INT)
1129 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1131 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1132 gen_rtx_MULT (Pmode,
1133 reg2,
1134 GEN_INT (val)),
1135 reg1));
1138 /* Get the index into a register, then add the base + index and
1139 return a register holding the result. */
1141 /* First get A into a register. */
1142 reg1 = XEXP (XEXP (idx, 0), 0);
1143 if (GET_CODE (reg1) != REG)
1144 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1146 /* And get B into a register. */
1147 reg2 = XEXP (idx, 1);
1148 if (GET_CODE (reg2) != REG)
1149 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1151 reg1 = force_reg (Pmode,
1152 gen_rtx_PLUS (Pmode,
1153 gen_rtx_MULT (Pmode, reg1,
1154 XEXP (XEXP (idx, 0), 1)),
1155 reg2));
1157 /* Add the result to our base register and return. */
1158 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1162 /* Uh-oh. We might have an address for x[n-100000]. This needs
1163 special handling to avoid creating an indexed memory address
1164 with x-100000 as the base.
1166 If the constant part is small enough, then it's still safe because
1167 there is a guard page at the beginning and end of the data segment.
1169 Scaled references are common enough that we want to try and rearrange the
1170 terms so that we can use indexing for these addresses too. Only
1171 do the optimization for floatint point modes. */
1173 if (GET_CODE (x) == PLUS
1174 && symbolic_expression_p (XEXP (x, 1)))
1176 /* Ugly. We modify things here so that the address offset specified
1177 by the index expression is computed first, then added to x to form
1178 the entire address. */
1180 rtx regx1, regx2, regy1, regy2, y;
1182 /* Strip off any CONST. */
1183 y = XEXP (x, 1);
1184 if (GET_CODE (y) == CONST)
1185 y = XEXP (y, 0);
1187 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1189 /* See if this looks like
1190 (plus (mult (reg) (shadd_const))
1191 (const (plus (symbol_ref) (const_int))))
1193 Where const_int is small. In that case the const
1194 expression is a valid pointer for indexing.
1196 If const_int is big, but can be divided evenly by shadd_const
1197 and added to (reg). This allows more scaled indexed addresses. */
1198 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1199 && GET_CODE (XEXP (x, 0)) == MULT
1200 && GET_CODE (XEXP (y, 1)) == CONST_INT
1201 && INTVAL (XEXP (y, 1)) >= -4096
1202 && INTVAL (XEXP (y, 1)) <= 4095
1203 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1204 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1206 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1207 rtx reg1, reg2;
1209 reg1 = XEXP (x, 1);
1210 if (GET_CODE (reg1) != REG)
1211 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1213 reg2 = XEXP (XEXP (x, 0), 0);
1214 if (GET_CODE (reg2) != REG)
1215 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1217 return force_reg (Pmode,
1218 gen_rtx_PLUS (Pmode,
1219 gen_rtx_MULT (Pmode,
1220 reg2,
1221 GEN_INT (val)),
1222 reg1));
1224 else if ((mode == DFmode || mode == SFmode)
1225 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1226 && GET_CODE (XEXP (x, 0)) == MULT
1227 && GET_CODE (XEXP (y, 1)) == CONST_INT
1228 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1229 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1230 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1232 regx1
1233 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1234 / INTVAL (XEXP (XEXP (x, 0), 1))));
1235 regx2 = XEXP (XEXP (x, 0), 0);
1236 if (GET_CODE (regx2) != REG)
1237 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1238 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1239 regx2, regx1));
1240 return
1241 force_reg (Pmode,
1242 gen_rtx_PLUS (Pmode,
1243 gen_rtx_MULT (Pmode, regx2,
1244 XEXP (XEXP (x, 0), 1)),
1245 force_reg (Pmode, XEXP (y, 0))));
1247 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1248 && INTVAL (XEXP (y, 1)) >= -4096
1249 && INTVAL (XEXP (y, 1)) <= 4095)
1251 /* This is safe because of the guard page at the
1252 beginning and end of the data space. Just
1253 return the original address. */
1254 return orig;
1256 else
1258 /* Doesn't look like one we can optimize. */
1259 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1260 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1261 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1262 regx1 = force_reg (Pmode,
1263 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1264 regx1, regy2));
1265 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1270 return orig;
1273 /* For the HPPA, REG and REG+CONST is cost 0
1274 and addresses involving symbolic constants are cost 2.
1276 PIC addresses are very expensive.
1278 It is no coincidence that this has the same structure
1279 as GO_IF_LEGITIMATE_ADDRESS. */
1281 static int
1282 hppa_address_cost (rtx X,
1283 bool speed ATTRIBUTE_UNUSED)
1285 switch (GET_CODE (X))
1287 case REG:
1288 case PLUS:
1289 case LO_SUM:
1290 return 1;
1291 case HIGH:
1292 return 2;
1293 default:
1294 return 4;
1298 /* Compute a (partial) cost for rtx X. Return true if the complete
1299 cost has been computed, and false if subexpressions should be
1300 scanned. In either case, *TOTAL contains the cost result. */
1302 static bool
1303 hppa_rtx_costs (rtx x, int code, int outer_code, int *total,
1304 bool speed ATTRIBUTE_UNUSED)
1306 switch (code)
1308 case CONST_INT:
1309 if (INTVAL (x) == 0)
1310 *total = 0;
1311 else if (INT_14_BITS (x))
1312 *total = 1;
1313 else
1314 *total = 2;
1315 return true;
1317 case HIGH:
1318 *total = 2;
1319 return true;
1321 case CONST:
1322 case LABEL_REF:
1323 case SYMBOL_REF:
1324 *total = 4;
1325 return true;
1327 case CONST_DOUBLE:
1328 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1329 && outer_code != SET)
1330 *total = 0;
1331 else
1332 *total = 8;
1333 return true;
1335 case MULT:
1336 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1337 *total = COSTS_N_INSNS (3);
1338 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1339 *total = COSTS_N_INSNS (8);
1340 else
1341 *total = COSTS_N_INSNS (20);
1342 return true;
1344 case DIV:
1345 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1347 *total = COSTS_N_INSNS (14);
1348 return true;
1350 /* FALLTHRU */
1352 case UDIV:
1353 case MOD:
1354 case UMOD:
1355 *total = COSTS_N_INSNS (60);
1356 return true;
1358 case PLUS: /* this includes shNadd insns */
1359 case MINUS:
1360 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1361 *total = COSTS_N_INSNS (3);
1362 else
1363 *total = COSTS_N_INSNS (1);
1364 return true;
1366 case ASHIFT:
1367 case ASHIFTRT:
1368 case LSHIFTRT:
1369 *total = COSTS_N_INSNS (1);
1370 return true;
1372 default:
1373 return false;
1377 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1378 new rtx with the correct mode. */
1379 static inline rtx
1380 force_mode (enum machine_mode mode, rtx orig)
1382 if (mode == GET_MODE (orig))
1383 return orig;
1385 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1387 return gen_rtx_REG (mode, REGNO (orig));
1390 /* Return 1 if *X is a thread-local symbol. */
1392 static int
1393 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1395 return PA_SYMBOL_REF_TLS_P (*x);
1398 /* Return 1 if X contains a thread-local symbol. */
1400 bool
1401 pa_tls_referenced_p (rtx x)
1403 if (!TARGET_HAVE_TLS)
1404 return false;
1406 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1409 /* Emit insns to move operands[1] into operands[0].
1411 Return 1 if we have written out everything that needs to be done to
1412 do the move. Otherwise, return 0 and the caller will emit the move
1413 normally.
1415 Note SCRATCH_REG may not be in the proper mode depending on how it
1416 will be used. This routine is responsible for creating a new copy
1417 of SCRATCH_REG in the proper mode. */
1420 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1422 register rtx operand0 = operands[0];
1423 register rtx operand1 = operands[1];
1424 register rtx tem;
1426 /* We can only handle indexed addresses in the destination operand
1427 of floating point stores. Thus, we need to break out indexed
1428 addresses from the destination operand. */
1429 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1431 gcc_assert (can_create_pseudo_p ());
1433 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1434 operand0 = replace_equiv_address (operand0, tem);
1437 /* On targets with non-equivalent space registers, break out unscaled
1438 indexed addresses from the source operand before the final CSE.
1439 We have to do this because the REG_POINTER flag is not correctly
1440 carried through various optimization passes and CSE may substitute
1441 a pseudo without the pointer set for one with the pointer set. As
1442 a result, we loose various opportunities to create insns with
1443 unscaled indexed addresses. */
1444 if (!TARGET_NO_SPACE_REGS
1445 && !cse_not_expected
1446 && GET_CODE (operand1) == MEM
1447 && GET_CODE (XEXP (operand1, 0)) == PLUS
1448 && REG_P (XEXP (XEXP (operand1, 0), 0))
1449 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1450 operand1
1451 = replace_equiv_address (operand1,
1452 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1454 if (scratch_reg
1455 && reload_in_progress && GET_CODE (operand0) == REG
1456 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1457 operand0 = reg_equiv_mem[REGNO (operand0)];
1458 else if (scratch_reg
1459 && reload_in_progress && GET_CODE (operand0) == SUBREG
1460 && GET_CODE (SUBREG_REG (operand0)) == REG
1461 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1463 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1464 the code which tracks sets/uses for delete_output_reload. */
1465 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1466 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1467 SUBREG_BYTE (operand0));
1468 operand0 = alter_subreg (&temp);
1471 if (scratch_reg
1472 && reload_in_progress && GET_CODE (operand1) == REG
1473 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1474 operand1 = reg_equiv_mem[REGNO (operand1)];
1475 else if (scratch_reg
1476 && reload_in_progress && GET_CODE (operand1) == SUBREG
1477 && GET_CODE (SUBREG_REG (operand1)) == REG
1478 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1480 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1481 the code which tracks sets/uses for delete_output_reload. */
1482 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1483 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1484 SUBREG_BYTE (operand1));
1485 operand1 = alter_subreg (&temp);
1488 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1489 && ((tem = find_replacement (&XEXP (operand0, 0)))
1490 != XEXP (operand0, 0)))
1491 operand0 = replace_equiv_address (operand0, tem);
1493 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1494 && ((tem = find_replacement (&XEXP (operand1, 0)))
1495 != XEXP (operand1, 0)))
1496 operand1 = replace_equiv_address (operand1, tem);
1498 /* Handle secondary reloads for loads/stores of FP registers from
1499 REG+D addresses where D does not fit in 5 or 14 bits, including
1500 (subreg (mem (addr))) cases. */
1501 if (scratch_reg
1502 && fp_reg_operand (operand0, mode)
1503 && ((GET_CODE (operand1) == MEM
1504 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1505 XEXP (operand1, 0)))
1506 || ((GET_CODE (operand1) == SUBREG
1507 && GET_CODE (XEXP (operand1, 0)) == MEM
1508 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1509 ? SFmode : DFmode),
1510 XEXP (XEXP (operand1, 0), 0))))))
1512 if (GET_CODE (operand1) == SUBREG)
1513 operand1 = XEXP (operand1, 0);
1515 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1516 it in WORD_MODE regardless of what mode it was originally given
1517 to us. */
1518 scratch_reg = force_mode (word_mode, scratch_reg);
1520 /* D might not fit in 14 bits either; for such cases load D into
1521 scratch reg. */
1522 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1524 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1525 emit_move_insn (scratch_reg,
1526 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1527 Pmode,
1528 XEXP (XEXP (operand1, 0), 0),
1529 scratch_reg));
1531 else
1532 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1533 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1534 replace_equiv_address (operand1, scratch_reg)));
1535 return 1;
1537 else if (scratch_reg
1538 && fp_reg_operand (operand1, mode)
1539 && ((GET_CODE (operand0) == MEM
1540 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1541 ? SFmode : DFmode),
1542 XEXP (operand0, 0)))
1543 || ((GET_CODE (operand0) == SUBREG)
1544 && GET_CODE (XEXP (operand0, 0)) == MEM
1545 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1546 ? SFmode : DFmode),
1547 XEXP (XEXP (operand0, 0), 0)))))
1549 if (GET_CODE (operand0) == SUBREG)
1550 operand0 = XEXP (operand0, 0);
1552 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1553 it in WORD_MODE regardless of what mode it was originally given
1554 to us. */
1555 scratch_reg = force_mode (word_mode, scratch_reg);
1557 /* D might not fit in 14 bits either; for such cases load D into
1558 scratch reg. */
1559 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1561 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1562 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1563 0)),
1564 Pmode,
1565 XEXP (XEXP (operand0, 0),
1567 scratch_reg));
1569 else
1570 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1571 emit_insn (gen_rtx_SET (VOIDmode,
1572 replace_equiv_address (operand0, scratch_reg),
1573 operand1));
1574 return 1;
1576 /* Handle secondary reloads for loads of FP registers from constant
1577 expressions by forcing the constant into memory.
1579 Use scratch_reg to hold the address of the memory location.
1581 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1582 NO_REGS when presented with a const_int and a register class
1583 containing only FP registers. Doing so unfortunately creates
1584 more problems than it solves. Fix this for 2.5. */
1585 else if (scratch_reg
1586 && CONSTANT_P (operand1)
1587 && fp_reg_operand (operand0, mode))
1589 rtx const_mem, xoperands[2];
1591 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1592 it in WORD_MODE regardless of what mode it was originally given
1593 to us. */
1594 scratch_reg = force_mode (word_mode, scratch_reg);
1596 /* Force the constant into memory and put the address of the
1597 memory location into scratch_reg. */
1598 const_mem = force_const_mem (mode, operand1);
1599 xoperands[0] = scratch_reg;
1600 xoperands[1] = XEXP (const_mem, 0);
1601 emit_move_sequence (xoperands, Pmode, 0);
1603 /* Now load the destination register. */
1604 emit_insn (gen_rtx_SET (mode, operand0,
1605 replace_equiv_address (const_mem, scratch_reg)));
1606 return 1;
1608 /* Handle secondary reloads for SAR. These occur when trying to load
1609 the SAR from memory, FP register, or with a constant. */
1610 else if (scratch_reg
1611 && GET_CODE (operand0) == REG
1612 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1613 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1614 && (GET_CODE (operand1) == MEM
1615 || GET_CODE (operand1) == CONST_INT
1616 || (GET_CODE (operand1) == REG
1617 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1619 /* D might not fit in 14 bits either; for such cases load D into
1620 scratch reg. */
1621 if (GET_CODE (operand1) == MEM
1622 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1624 /* We are reloading the address into the scratch register, so we
1625 want to make sure the scratch register is a full register. */
1626 scratch_reg = force_mode (word_mode, scratch_reg);
1628 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1629 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1630 0)),
1631 Pmode,
1632 XEXP (XEXP (operand1, 0),
1634 scratch_reg));
1636 /* Now we are going to load the scratch register from memory,
1637 we want to load it in the same width as the original MEM,
1638 which must be the same as the width of the ultimate destination,
1639 OPERAND0. */
1640 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1642 emit_move_insn (scratch_reg,
1643 replace_equiv_address (operand1, scratch_reg));
1645 else
1647 /* We want to load the scratch register using the same mode as
1648 the ultimate destination. */
1649 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1651 emit_move_insn (scratch_reg, operand1);
1654 /* And emit the insn to set the ultimate destination. We know that
1655 the scratch register has the same mode as the destination at this
1656 point. */
1657 emit_move_insn (operand0, scratch_reg);
1658 return 1;
1660 /* Handle the most common case: storing into a register. */
1661 else if (register_operand (operand0, mode))
1663 if (register_operand (operand1, mode)
1664 || (GET_CODE (operand1) == CONST_INT
1665 && cint_ok_for_move (INTVAL (operand1)))
1666 || (operand1 == CONST0_RTX (mode))
1667 || (GET_CODE (operand1) == HIGH
1668 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1669 /* Only `general_operands' can come here, so MEM is ok. */
1670 || GET_CODE (operand1) == MEM)
1672 /* Various sets are created during RTL generation which don't
1673 have the REG_POINTER flag correctly set. After the CSE pass,
1674 instruction recognition can fail if we don't consistently
1675 set this flag when performing register copies. This should
1676 also improve the opportunities for creating insns that use
1677 unscaled indexing. */
1678 if (REG_P (operand0) && REG_P (operand1))
1680 if (REG_POINTER (operand1)
1681 && !REG_POINTER (operand0)
1682 && !HARD_REGISTER_P (operand0))
1683 copy_reg_pointer (operand0, operand1);
1684 else if (REG_POINTER (operand0)
1685 && !REG_POINTER (operand1)
1686 && !HARD_REGISTER_P (operand1))
1687 copy_reg_pointer (operand1, operand0);
1690 /* When MEMs are broken out, the REG_POINTER flag doesn't
1691 get set. In some cases, we can set the REG_POINTER flag
1692 from the declaration for the MEM. */
1693 if (REG_P (operand0)
1694 && GET_CODE (operand1) == MEM
1695 && !REG_POINTER (operand0))
1697 tree decl = MEM_EXPR (operand1);
1699 /* Set the register pointer flag and register alignment
1700 if the declaration for this memory reference is a
1701 pointer type. Fortran indirect argument references
1702 are ignored. */
1703 if (decl
1704 && !(flag_argument_noalias > 1
1705 && TREE_CODE (decl) == INDIRECT_REF
1706 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1708 tree type;
1710 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1711 tree operand 1. */
1712 if (TREE_CODE (decl) == COMPONENT_REF)
1713 decl = TREE_OPERAND (decl, 1);
1715 type = TREE_TYPE (decl);
1716 type = strip_array_types (type);
1718 if (POINTER_TYPE_P (type))
1720 int align;
1722 type = TREE_TYPE (type);
1723 /* Using TYPE_ALIGN_OK is rather conservative as
1724 only the ada frontend actually sets it. */
1725 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1726 : BITS_PER_UNIT);
1727 mark_reg_pointer (operand0, align);
1732 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1733 return 1;
1736 else if (GET_CODE (operand0) == MEM)
1738 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1739 && !(reload_in_progress || reload_completed))
1741 rtx temp = gen_reg_rtx (DFmode);
1743 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1744 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1745 return 1;
1747 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1749 /* Run this case quickly. */
1750 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1751 return 1;
1753 if (! (reload_in_progress || reload_completed))
1755 operands[0] = validize_mem (operand0);
1756 operands[1] = operand1 = force_reg (mode, operand1);
1760 /* Simplify the source if we need to.
1761 Note we do have to handle function labels here, even though we do
1762 not consider them legitimate constants. Loop optimizations can
1763 call the emit_move_xxx with one as a source. */
1764 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1765 || function_label_operand (operand1, mode)
1766 || (GET_CODE (operand1) == HIGH
1767 && symbolic_operand (XEXP (operand1, 0), mode)))
1769 int ishighonly = 0;
1771 if (GET_CODE (operand1) == HIGH)
1773 ishighonly = 1;
1774 operand1 = XEXP (operand1, 0);
1776 if (symbolic_operand (operand1, mode))
1778 /* Argh. The assembler and linker can't handle arithmetic
1779 involving plabels.
1781 So we force the plabel into memory, load operand0 from
1782 the memory location, then add in the constant part. */
1783 if ((GET_CODE (operand1) == CONST
1784 && GET_CODE (XEXP (operand1, 0)) == PLUS
1785 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1786 || function_label_operand (operand1, mode))
1788 rtx temp, const_part;
1790 /* Figure out what (if any) scratch register to use. */
1791 if (reload_in_progress || reload_completed)
1793 scratch_reg = scratch_reg ? scratch_reg : operand0;
1794 /* SCRATCH_REG will hold an address and maybe the actual
1795 data. We want it in WORD_MODE regardless of what mode it
1796 was originally given to us. */
1797 scratch_reg = force_mode (word_mode, scratch_reg);
1799 else if (flag_pic)
1800 scratch_reg = gen_reg_rtx (Pmode);
1802 if (GET_CODE (operand1) == CONST)
1804 /* Save away the constant part of the expression. */
1805 const_part = XEXP (XEXP (operand1, 0), 1);
1806 gcc_assert (GET_CODE (const_part) == CONST_INT);
1808 /* Force the function label into memory. */
1809 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1811 else
1813 /* No constant part. */
1814 const_part = NULL_RTX;
1816 /* Force the function label into memory. */
1817 temp = force_const_mem (mode, operand1);
1821 /* Get the address of the memory location. PIC-ify it if
1822 necessary. */
1823 temp = XEXP (temp, 0);
1824 if (flag_pic)
1825 temp = legitimize_pic_address (temp, mode, scratch_reg);
1827 /* Put the address of the memory location into our destination
1828 register. */
1829 operands[1] = temp;
1830 emit_move_sequence (operands, mode, scratch_reg);
1832 /* Now load from the memory location into our destination
1833 register. */
1834 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1835 emit_move_sequence (operands, mode, scratch_reg);
1837 /* And add back in the constant part. */
1838 if (const_part != NULL_RTX)
1839 expand_inc (operand0, const_part);
1841 return 1;
1844 if (flag_pic)
1846 rtx temp;
1848 if (reload_in_progress || reload_completed)
1850 temp = scratch_reg ? scratch_reg : operand0;
1851 /* TEMP will hold an address and maybe the actual
1852 data. We want it in WORD_MODE regardless of what mode it
1853 was originally given to us. */
1854 temp = force_mode (word_mode, temp);
1856 else
1857 temp = gen_reg_rtx (Pmode);
1859 /* (const (plus (symbol) (const_int))) must be forced to
1860 memory during/after reload if the const_int will not fit
1861 in 14 bits. */
1862 if (GET_CODE (operand1) == CONST
1863 && GET_CODE (XEXP (operand1, 0)) == PLUS
1864 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1865 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1866 && (reload_completed || reload_in_progress)
1867 && flag_pic)
1869 rtx const_mem = force_const_mem (mode, operand1);
1870 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1871 mode, temp);
1872 operands[1] = replace_equiv_address (const_mem, operands[1]);
1873 emit_move_sequence (operands, mode, temp);
1875 else
1877 operands[1] = legitimize_pic_address (operand1, mode, temp);
1878 if (REG_P (operand0) && REG_P (operands[1]))
1879 copy_reg_pointer (operand0, operands[1]);
1880 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1883 /* On the HPPA, references to data space are supposed to use dp,
1884 register 27, but showing it in the RTL inhibits various cse
1885 and loop optimizations. */
1886 else
1888 rtx temp, set;
1890 if (reload_in_progress || reload_completed)
1892 temp = scratch_reg ? scratch_reg : operand0;
1893 /* TEMP will hold an address and maybe the actual
1894 data. We want it in WORD_MODE regardless of what mode it
1895 was originally given to us. */
1896 temp = force_mode (word_mode, temp);
1898 else
1899 temp = gen_reg_rtx (mode);
1901 /* Loading a SYMBOL_REF into a register makes that register
1902 safe to be used as the base in an indexed address.
1904 Don't mark hard registers though. That loses. */
1905 if (GET_CODE (operand0) == REG
1906 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1907 mark_reg_pointer (operand0, BITS_PER_UNIT);
1908 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1909 mark_reg_pointer (temp, BITS_PER_UNIT);
1911 if (ishighonly)
1912 set = gen_rtx_SET (mode, operand0, temp);
1913 else
1914 set = gen_rtx_SET (VOIDmode,
1915 operand0,
1916 gen_rtx_LO_SUM (mode, temp, operand1));
1918 emit_insn (gen_rtx_SET (VOIDmode,
1919 temp,
1920 gen_rtx_HIGH (mode, operand1)));
1921 emit_insn (set);
1924 return 1;
1926 else if (pa_tls_referenced_p (operand1))
1928 rtx tmp = operand1;
1929 rtx addend = NULL;
1931 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1933 addend = XEXP (XEXP (tmp, 0), 1);
1934 tmp = XEXP (XEXP (tmp, 0), 0);
1937 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1938 tmp = legitimize_tls_address (tmp);
1939 if (addend)
1941 tmp = gen_rtx_PLUS (mode, tmp, addend);
1942 tmp = force_operand (tmp, operands[0]);
1944 operands[1] = tmp;
1946 else if (GET_CODE (operand1) != CONST_INT
1947 || !cint_ok_for_move (INTVAL (operand1)))
1949 rtx insn, temp;
1950 rtx op1 = operand1;
1951 HOST_WIDE_INT value = 0;
1952 HOST_WIDE_INT insv = 0;
1953 int insert = 0;
1955 if (GET_CODE (operand1) == CONST_INT)
1956 value = INTVAL (operand1);
1958 if (TARGET_64BIT
1959 && GET_CODE (operand1) == CONST_INT
1960 && HOST_BITS_PER_WIDE_INT > 32
1961 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1963 HOST_WIDE_INT nval;
1965 /* Extract the low order 32 bits of the value and sign extend.
1966 If the new value is the same as the original value, we can
1967 can use the original value as-is. If the new value is
1968 different, we use it and insert the most-significant 32-bits
1969 of the original value into the final result. */
1970 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1971 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1972 if (value != nval)
1974 #if HOST_BITS_PER_WIDE_INT > 32
1975 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1976 #endif
1977 insert = 1;
1978 value = nval;
1979 operand1 = GEN_INT (nval);
1983 if (reload_in_progress || reload_completed)
1984 temp = scratch_reg ? scratch_reg : operand0;
1985 else
1986 temp = gen_reg_rtx (mode);
1988 /* We don't directly split DImode constants on 32-bit targets
1989 because PLUS uses an 11-bit immediate and the insn sequence
1990 generated is not as efficient as the one using HIGH/LO_SUM. */
1991 if (GET_CODE (operand1) == CONST_INT
1992 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1993 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1994 && !insert)
1996 /* Directly break constant into high and low parts. This
1997 provides better optimization opportunities because various
1998 passes recognize constants split with PLUS but not LO_SUM.
1999 We use a 14-bit signed low part except when the addition
2000 of 0x4000 to the high part might change the sign of the
2001 high part. */
2002 HOST_WIDE_INT low = value & 0x3fff;
2003 HOST_WIDE_INT high = value & ~ 0x3fff;
2005 if (low >= 0x2000)
2007 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2008 high += 0x2000;
2009 else
2010 high += 0x4000;
2013 low = value - high;
2015 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2016 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2018 else
2020 emit_insn (gen_rtx_SET (VOIDmode, temp,
2021 gen_rtx_HIGH (mode, operand1)));
2022 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2025 insn = emit_move_insn (operands[0], operands[1]);
2027 /* Now insert the most significant 32 bits of the value
2028 into the register. When we don't have a second register
2029 available, it could take up to nine instructions to load
2030 a 64-bit integer constant. Prior to reload, we force
2031 constants that would take more than three instructions
2032 to load to the constant pool. During and after reload,
2033 we have to handle all possible values. */
2034 if (insert)
2036 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2037 register and the value to be inserted is outside the
2038 range that can be loaded with three depdi instructions. */
2039 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2041 operand1 = GEN_INT (insv);
2043 emit_insn (gen_rtx_SET (VOIDmode, temp,
2044 gen_rtx_HIGH (mode, operand1)));
2045 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2046 emit_insn (gen_insv (operand0, GEN_INT (32),
2047 const0_rtx, temp));
2049 else
2051 int len = 5, pos = 27;
2053 /* Insert the bits using the depdi instruction. */
2054 while (pos >= 0)
2056 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2057 HOST_WIDE_INT sign = v5 < 0;
2059 /* Left extend the insertion. */
2060 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2061 while (pos > 0 && (insv & 1) == sign)
2063 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2064 len += 1;
2065 pos -= 1;
2068 emit_insn (gen_insv (operand0, GEN_INT (len),
2069 GEN_INT (pos), GEN_INT (v5)));
2071 len = pos > 0 && pos < 5 ? pos : 5;
2072 pos -= len;
2077 set_unique_reg_note (insn, REG_EQUAL, op1);
2079 return 1;
2082 /* Now have insn-emit do whatever it normally does. */
2083 return 0;
2086 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2087 it will need a link/runtime reloc). */
2090 reloc_needed (tree exp)
2092 int reloc = 0;
2094 switch (TREE_CODE (exp))
2096 case ADDR_EXPR:
2097 return 1;
2099 case POINTER_PLUS_EXPR:
2100 case PLUS_EXPR:
2101 case MINUS_EXPR:
2102 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2103 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2104 break;
2106 CASE_CONVERT:
2107 case NON_LVALUE_EXPR:
2108 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2109 break;
2111 case CONSTRUCTOR:
2113 tree value;
2114 unsigned HOST_WIDE_INT ix;
2116 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2117 if (value)
2118 reloc |= reloc_needed (value);
2120 break;
2122 case ERROR_MARK:
2123 break;
2125 default:
2126 break;
2128 return reloc;
2131 /* Does operand (which is a symbolic_operand) live in text space?
2132 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2133 will be true. */
2136 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2138 if (GET_CODE (operand) == CONST)
2139 operand = XEXP (XEXP (operand, 0), 0);
2140 if (flag_pic)
2142 if (GET_CODE (operand) == SYMBOL_REF)
2143 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2145 else
2147 if (GET_CODE (operand) == SYMBOL_REF)
2148 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2150 return 1;
2154 /* Return the best assembler insn template
2155 for moving operands[1] into operands[0] as a fullword. */
2156 const char *
2157 singlemove_string (rtx *operands)
2159 HOST_WIDE_INT intval;
2161 if (GET_CODE (operands[0]) == MEM)
2162 return "stw %r1,%0";
2163 if (GET_CODE (operands[1]) == MEM)
2164 return "ldw %1,%0";
2165 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2167 long i;
2168 REAL_VALUE_TYPE d;
2170 gcc_assert (GET_MODE (operands[1]) == SFmode);
2172 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2173 bit pattern. */
2174 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2175 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2177 operands[1] = GEN_INT (i);
2178 /* Fall through to CONST_INT case. */
2180 if (GET_CODE (operands[1]) == CONST_INT)
2182 intval = INTVAL (operands[1]);
2184 if (VAL_14_BITS_P (intval))
2185 return "ldi %1,%0";
2186 else if ((intval & 0x7ff) == 0)
2187 return "ldil L'%1,%0";
2188 else if (zdepi_cint_p (intval))
2189 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2190 else
2191 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2193 return "copy %1,%0";
2197 /* Compute position (in OP[1]) and width (in OP[2])
2198 useful for copying IMM to a register using the zdepi
2199 instructions. Store the immediate value to insert in OP[0]. */
2200 static void
2201 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2203 int lsb, len;
2205 /* Find the least significant set bit in IMM. */
2206 for (lsb = 0; lsb < 32; lsb++)
2208 if ((imm & 1) != 0)
2209 break;
2210 imm >>= 1;
2213 /* Choose variants based on *sign* of the 5-bit field. */
2214 if ((imm & 0x10) == 0)
2215 len = (lsb <= 28) ? 4 : 32 - lsb;
2216 else
2218 /* Find the width of the bitstring in IMM. */
2219 for (len = 5; len < 32; len++)
2221 if ((imm & (1 << len)) == 0)
2222 break;
2225 /* Sign extend IMM as a 5-bit value. */
2226 imm = (imm & 0xf) - 0x10;
2229 op[0] = imm;
2230 op[1] = 31 - lsb;
2231 op[2] = len;
2234 /* Compute position (in OP[1]) and width (in OP[2])
2235 useful for copying IMM to a register using the depdi,z
2236 instructions. Store the immediate value to insert in OP[0]. */
2237 void
2238 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2240 HOST_WIDE_INT lsb, len;
2242 /* Find the least significant set bit in IMM. */
2243 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2245 if ((imm & 1) != 0)
2246 break;
2247 imm >>= 1;
2250 /* Choose variants based on *sign* of the 5-bit field. */
2251 if ((imm & 0x10) == 0)
2252 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2253 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2254 else
2256 /* Find the width of the bitstring in IMM. */
2257 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2259 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2260 break;
2263 /* Sign extend IMM as a 5-bit value. */
2264 imm = (imm & 0xf) - 0x10;
2267 op[0] = imm;
2268 op[1] = 63 - lsb;
2269 op[2] = len;
2272 /* Output assembler code to perform a doubleword move insn
2273 with operands OPERANDS. */
2275 const char *
2276 output_move_double (rtx *operands)
2278 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2279 rtx latehalf[2];
2280 rtx addreg0 = 0, addreg1 = 0;
2282 /* First classify both operands. */
2284 if (REG_P (operands[0]))
2285 optype0 = REGOP;
2286 else if (offsettable_memref_p (operands[0]))
2287 optype0 = OFFSOP;
2288 else if (GET_CODE (operands[0]) == MEM)
2289 optype0 = MEMOP;
2290 else
2291 optype0 = RNDOP;
2293 if (REG_P (operands[1]))
2294 optype1 = REGOP;
2295 else if (CONSTANT_P (operands[1]))
2296 optype1 = CNSTOP;
2297 else if (offsettable_memref_p (operands[1]))
2298 optype1 = OFFSOP;
2299 else if (GET_CODE (operands[1]) == MEM)
2300 optype1 = MEMOP;
2301 else
2302 optype1 = RNDOP;
2304 /* Check for the cases that the operand constraints are not
2305 supposed to allow to happen. */
2306 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2308 /* Handle copies between general and floating registers. */
2310 if (optype0 == REGOP && optype1 == REGOP
2311 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2313 if (FP_REG_P (operands[0]))
2315 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2316 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2317 return "{fldds|fldd} -16(%%sp),%0";
2319 else
2321 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2322 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2323 return "{ldws|ldw} -12(%%sp),%R0";
2327 /* Handle auto decrementing and incrementing loads and stores
2328 specifically, since the structure of the function doesn't work
2329 for them without major modification. Do it better when we learn
2330 this port about the general inc/dec addressing of PA.
2331 (This was written by tege. Chide him if it doesn't work.) */
2333 if (optype0 == MEMOP)
2335 /* We have to output the address syntax ourselves, since print_operand
2336 doesn't deal with the addresses we want to use. Fix this later. */
2338 rtx addr = XEXP (operands[0], 0);
2339 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2341 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2343 operands[0] = XEXP (addr, 0);
2344 gcc_assert (GET_CODE (operands[1]) == REG
2345 && GET_CODE (operands[0]) == REG);
2347 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2349 /* No overlap between high target register and address
2350 register. (We do this in a non-obvious way to
2351 save a register file writeback) */
2352 if (GET_CODE (addr) == POST_INC)
2353 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2354 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2356 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2358 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2360 operands[0] = XEXP (addr, 0);
2361 gcc_assert (GET_CODE (operands[1]) == REG
2362 && GET_CODE (operands[0]) == REG);
2364 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2365 /* No overlap between high target register and address
2366 register. (We do this in a non-obvious way to save a
2367 register file writeback) */
2368 if (GET_CODE (addr) == PRE_INC)
2369 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2370 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2373 if (optype1 == MEMOP)
2375 /* We have to output the address syntax ourselves, since print_operand
2376 doesn't deal with the addresses we want to use. Fix this later. */
2378 rtx addr = XEXP (operands[1], 0);
2379 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2381 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2383 operands[1] = XEXP (addr, 0);
2384 gcc_assert (GET_CODE (operands[0]) == REG
2385 && GET_CODE (operands[1]) == REG);
2387 if (!reg_overlap_mentioned_p (high_reg, addr))
2389 /* No overlap between high target register and address
2390 register. (We do this in a non-obvious way to
2391 save a register file writeback) */
2392 if (GET_CODE (addr) == POST_INC)
2393 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2394 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2396 else
2398 /* This is an undefined situation. We should load into the
2399 address register *and* update that register. Probably
2400 we don't need to handle this at all. */
2401 if (GET_CODE (addr) == POST_INC)
2402 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2403 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2406 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2408 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2410 operands[1] = XEXP (addr, 0);
2411 gcc_assert (GET_CODE (operands[0]) == REG
2412 && GET_CODE (operands[1]) == REG);
2414 if (!reg_overlap_mentioned_p (high_reg, addr))
2416 /* No overlap between high target register and address
2417 register. (We do this in a non-obvious way to
2418 save a register file writeback) */
2419 if (GET_CODE (addr) == PRE_INC)
2420 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2421 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2423 else
2425 /* This is an undefined situation. We should load into the
2426 address register *and* update that register. Probably
2427 we don't need to handle this at all. */
2428 if (GET_CODE (addr) == PRE_INC)
2429 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2430 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2433 else if (GET_CODE (addr) == PLUS
2434 && GET_CODE (XEXP (addr, 0)) == MULT)
2436 rtx xoperands[4];
2437 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2439 if (!reg_overlap_mentioned_p (high_reg, addr))
2441 xoperands[0] = high_reg;
2442 xoperands[1] = XEXP (addr, 1);
2443 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2444 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2445 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2446 xoperands);
2447 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2449 else
2451 xoperands[0] = high_reg;
2452 xoperands[1] = XEXP (addr, 1);
2453 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2454 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2455 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2456 xoperands);
2457 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2462 /* If an operand is an unoffsettable memory ref, find a register
2463 we can increment temporarily to make it refer to the second word. */
2465 if (optype0 == MEMOP)
2466 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2468 if (optype1 == MEMOP)
2469 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2471 /* Ok, we can do one word at a time.
2472 Normally we do the low-numbered word first.
2474 In either case, set up in LATEHALF the operands to use
2475 for the high-numbered word and in some cases alter the
2476 operands in OPERANDS to be suitable for the low-numbered word. */
2478 if (optype0 == REGOP)
2479 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2480 else if (optype0 == OFFSOP)
2481 latehalf[0] = adjust_address (operands[0], SImode, 4);
2482 else
2483 latehalf[0] = operands[0];
2485 if (optype1 == REGOP)
2486 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2487 else if (optype1 == OFFSOP)
2488 latehalf[1] = adjust_address (operands[1], SImode, 4);
2489 else if (optype1 == CNSTOP)
2490 split_double (operands[1], &operands[1], &latehalf[1]);
2491 else
2492 latehalf[1] = operands[1];
2494 /* If the first move would clobber the source of the second one,
2495 do them in the other order.
2497 This can happen in two cases:
2499 mem -> register where the first half of the destination register
2500 is the same register used in the memory's address. Reload
2501 can create such insns.
2503 mem in this case will be either register indirect or register
2504 indirect plus a valid offset.
2506 register -> register move where REGNO(dst) == REGNO(src + 1)
2507 someone (Tim/Tege?) claimed this can happen for parameter loads.
2509 Handle mem -> register case first. */
2510 if (optype0 == REGOP
2511 && (optype1 == MEMOP || optype1 == OFFSOP)
2512 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2513 operands[1], 0))
2515 /* Do the late half first. */
2516 if (addreg1)
2517 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2518 output_asm_insn (singlemove_string (latehalf), latehalf);
2520 /* Then clobber. */
2521 if (addreg1)
2522 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2523 return singlemove_string (operands);
2526 /* Now handle register -> register case. */
2527 if (optype0 == REGOP && optype1 == REGOP
2528 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2530 output_asm_insn (singlemove_string (latehalf), latehalf);
2531 return singlemove_string (operands);
2534 /* Normal case: do the two words, low-numbered first. */
2536 output_asm_insn (singlemove_string (operands), operands);
2538 /* Make any unoffsettable addresses point at high-numbered word. */
2539 if (addreg0)
2540 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2541 if (addreg1)
2542 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2544 /* Do that word. */
2545 output_asm_insn (singlemove_string (latehalf), latehalf);
2547 /* Undo the adds we just did. */
2548 if (addreg0)
2549 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2550 if (addreg1)
2551 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2553 return "";
2556 const char *
2557 output_fp_move_double (rtx *operands)
2559 if (FP_REG_P (operands[0]))
2561 if (FP_REG_P (operands[1])
2562 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2563 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2564 else
2565 output_asm_insn ("fldd%F1 %1,%0", operands);
2567 else if (FP_REG_P (operands[1]))
2569 output_asm_insn ("fstd%F0 %1,%0", operands);
2571 else
2573 rtx xoperands[2];
2575 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2577 /* This is a pain. You have to be prepared to deal with an
2578 arbitrary address here including pre/post increment/decrement.
2580 so avoid this in the MD. */
2581 gcc_assert (GET_CODE (operands[0]) == REG);
2583 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2584 xoperands[0] = operands[0];
2585 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2587 return "";
2590 /* Return a REG that occurs in ADDR with coefficient 1.
2591 ADDR can be effectively incremented by incrementing REG. */
2593 static rtx
2594 find_addr_reg (rtx addr)
2596 while (GET_CODE (addr) == PLUS)
2598 if (GET_CODE (XEXP (addr, 0)) == REG)
2599 addr = XEXP (addr, 0);
2600 else if (GET_CODE (XEXP (addr, 1)) == REG)
2601 addr = XEXP (addr, 1);
2602 else if (CONSTANT_P (XEXP (addr, 0)))
2603 addr = XEXP (addr, 1);
2604 else if (CONSTANT_P (XEXP (addr, 1)))
2605 addr = XEXP (addr, 0);
2606 else
2607 gcc_unreachable ();
2609 gcc_assert (GET_CODE (addr) == REG);
2610 return addr;
2613 /* Emit code to perform a block move.
2615 OPERANDS[0] is the destination pointer as a REG, clobbered.
2616 OPERANDS[1] is the source pointer as a REG, clobbered.
2617 OPERANDS[2] is a register for temporary storage.
2618 OPERANDS[3] is a register for temporary storage.
2619 OPERANDS[4] is the size as a CONST_INT
2620 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2621 OPERANDS[6] is another temporary register. */
2623 const char *
2624 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2626 int align = INTVAL (operands[5]);
2627 unsigned long n_bytes = INTVAL (operands[4]);
2629 /* We can't move more than a word at a time because the PA
2630 has no longer integer move insns. (Could use fp mem ops?) */
2631 if (align > (TARGET_64BIT ? 8 : 4))
2632 align = (TARGET_64BIT ? 8 : 4);
2634 /* Note that we know each loop below will execute at least twice
2635 (else we would have open-coded the copy). */
2636 switch (align)
2638 case 8:
2639 /* Pre-adjust the loop counter. */
2640 operands[4] = GEN_INT (n_bytes - 16);
2641 output_asm_insn ("ldi %4,%2", operands);
2643 /* Copying loop. */
2644 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2645 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2646 output_asm_insn ("std,ma %3,8(%0)", operands);
2647 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2648 output_asm_insn ("std,ma %6,8(%0)", operands);
2650 /* Handle the residual. There could be up to 7 bytes of
2651 residual to copy! */
2652 if (n_bytes % 16 != 0)
2654 operands[4] = GEN_INT (n_bytes % 8);
2655 if (n_bytes % 16 >= 8)
2656 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2657 if (n_bytes % 8 != 0)
2658 output_asm_insn ("ldd 0(%1),%6", operands);
2659 if (n_bytes % 16 >= 8)
2660 output_asm_insn ("std,ma %3,8(%0)", operands);
2661 if (n_bytes % 8 != 0)
2662 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2664 return "";
2666 case 4:
2667 /* Pre-adjust the loop counter. */
2668 operands[4] = GEN_INT (n_bytes - 8);
2669 output_asm_insn ("ldi %4,%2", operands);
2671 /* Copying loop. */
2672 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2673 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2674 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2675 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2676 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2678 /* Handle the residual. There could be up to 7 bytes of
2679 residual to copy! */
2680 if (n_bytes % 8 != 0)
2682 operands[4] = GEN_INT (n_bytes % 4);
2683 if (n_bytes % 8 >= 4)
2684 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2685 if (n_bytes % 4 != 0)
2686 output_asm_insn ("ldw 0(%1),%6", operands);
2687 if (n_bytes % 8 >= 4)
2688 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2689 if (n_bytes % 4 != 0)
2690 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2692 return "";
2694 case 2:
2695 /* Pre-adjust the loop counter. */
2696 operands[4] = GEN_INT (n_bytes - 4);
2697 output_asm_insn ("ldi %4,%2", operands);
2699 /* Copying loop. */
2700 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2701 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2702 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2703 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2704 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2706 /* Handle the residual. */
2707 if (n_bytes % 4 != 0)
2709 if (n_bytes % 4 >= 2)
2710 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2711 if (n_bytes % 2 != 0)
2712 output_asm_insn ("ldb 0(%1),%6", operands);
2713 if (n_bytes % 4 >= 2)
2714 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2715 if (n_bytes % 2 != 0)
2716 output_asm_insn ("stb %6,0(%0)", operands);
2718 return "";
2720 case 1:
2721 /* Pre-adjust the loop counter. */
2722 operands[4] = GEN_INT (n_bytes - 2);
2723 output_asm_insn ("ldi %4,%2", operands);
2725 /* Copying loop. */
2726 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2727 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2728 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2729 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2730 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2732 /* Handle the residual. */
2733 if (n_bytes % 2 != 0)
2735 output_asm_insn ("ldb 0(%1),%3", operands);
2736 output_asm_insn ("stb %3,0(%0)", operands);
2738 return "";
2740 default:
2741 gcc_unreachable ();
2745 /* Count the number of insns necessary to handle this block move.
2747 Basic structure is the same as emit_block_move, except that we
2748 count insns rather than emit them. */
2750 static int
2751 compute_movmem_length (rtx insn)
2753 rtx pat = PATTERN (insn);
2754 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2755 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2756 unsigned int n_insns = 0;
2758 /* We can't move more than four bytes at a time because the PA
2759 has no longer integer move insns. (Could use fp mem ops?) */
2760 if (align > (TARGET_64BIT ? 8 : 4))
2761 align = (TARGET_64BIT ? 8 : 4);
2763 /* The basic copying loop. */
2764 n_insns = 6;
2766 /* Residuals. */
2767 if (n_bytes % (2 * align) != 0)
2769 if ((n_bytes % (2 * align)) >= align)
2770 n_insns += 2;
2772 if ((n_bytes % align) != 0)
2773 n_insns += 2;
2776 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2777 return n_insns * 4;
2780 /* Emit code to perform a block clear.
2782 OPERANDS[0] is the destination pointer as a REG, clobbered.
2783 OPERANDS[1] is a register for temporary storage.
2784 OPERANDS[2] is the size as a CONST_INT
2785 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2787 const char *
2788 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2790 int align = INTVAL (operands[3]);
2791 unsigned long n_bytes = INTVAL (operands[2]);
2793 /* We can't clear more than a word at a time because the PA
2794 has no longer integer move insns. */
2795 if (align > (TARGET_64BIT ? 8 : 4))
2796 align = (TARGET_64BIT ? 8 : 4);
2798 /* Note that we know each loop below will execute at least twice
2799 (else we would have open-coded the copy). */
2800 switch (align)
2802 case 8:
2803 /* Pre-adjust the loop counter. */
2804 operands[2] = GEN_INT (n_bytes - 16);
2805 output_asm_insn ("ldi %2,%1", operands);
2807 /* Loop. */
2808 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2809 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2810 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2812 /* Handle the residual. There could be up to 7 bytes of
2813 residual to copy! */
2814 if (n_bytes % 16 != 0)
2816 operands[2] = GEN_INT (n_bytes % 8);
2817 if (n_bytes % 16 >= 8)
2818 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2819 if (n_bytes % 8 != 0)
2820 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2822 return "";
2824 case 4:
2825 /* Pre-adjust the loop counter. */
2826 operands[2] = GEN_INT (n_bytes - 8);
2827 output_asm_insn ("ldi %2,%1", operands);
2829 /* Loop. */
2830 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2831 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2832 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2834 /* Handle the residual. There could be up to 7 bytes of
2835 residual to copy! */
2836 if (n_bytes % 8 != 0)
2838 operands[2] = GEN_INT (n_bytes % 4);
2839 if (n_bytes % 8 >= 4)
2840 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2841 if (n_bytes % 4 != 0)
2842 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2844 return "";
2846 case 2:
2847 /* Pre-adjust the loop counter. */
2848 operands[2] = GEN_INT (n_bytes - 4);
2849 output_asm_insn ("ldi %2,%1", operands);
2851 /* Loop. */
2852 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2853 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2854 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2856 /* Handle the residual. */
2857 if (n_bytes % 4 != 0)
2859 if (n_bytes % 4 >= 2)
2860 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2861 if (n_bytes % 2 != 0)
2862 output_asm_insn ("stb %%r0,0(%0)", operands);
2864 return "";
2866 case 1:
2867 /* Pre-adjust the loop counter. */
2868 operands[2] = GEN_INT (n_bytes - 2);
2869 output_asm_insn ("ldi %2,%1", operands);
2871 /* Loop. */
2872 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2873 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2874 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2876 /* Handle the residual. */
2877 if (n_bytes % 2 != 0)
2878 output_asm_insn ("stb %%r0,0(%0)", operands);
2880 return "";
2882 default:
2883 gcc_unreachable ();
2887 /* Count the number of insns necessary to handle this block move.
2889 Basic structure is the same as emit_block_move, except that we
2890 count insns rather than emit them. */
2892 static int
2893 compute_clrmem_length (rtx insn)
2895 rtx pat = PATTERN (insn);
2896 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2897 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2898 unsigned int n_insns = 0;
2900 /* We can't clear more than a word at a time because the PA
2901 has no longer integer move insns. */
2902 if (align > (TARGET_64BIT ? 8 : 4))
2903 align = (TARGET_64BIT ? 8 : 4);
2905 /* The basic loop. */
2906 n_insns = 4;
2908 /* Residuals. */
2909 if (n_bytes % (2 * align) != 0)
2911 if ((n_bytes % (2 * align)) >= align)
2912 n_insns++;
2914 if ((n_bytes % align) != 0)
2915 n_insns++;
2918 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2919 return n_insns * 4;
2923 const char *
2924 output_and (rtx *operands)
2926 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2928 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2929 int ls0, ls1, ms0, p, len;
2931 for (ls0 = 0; ls0 < 32; ls0++)
2932 if ((mask & (1 << ls0)) == 0)
2933 break;
2935 for (ls1 = ls0; ls1 < 32; ls1++)
2936 if ((mask & (1 << ls1)) != 0)
2937 break;
2939 for (ms0 = ls1; ms0 < 32; ms0++)
2940 if ((mask & (1 << ms0)) == 0)
2941 break;
2943 gcc_assert (ms0 == 32);
2945 if (ls1 == 32)
2947 len = ls0;
2949 gcc_assert (len);
2951 operands[2] = GEN_INT (len);
2952 return "{extru|extrw,u} %1,31,%2,%0";
2954 else
2956 /* We could use this `depi' for the case above as well, but `depi'
2957 requires one more register file access than an `extru'. */
2959 p = 31 - ls0;
2960 len = ls1 - ls0;
2962 operands[2] = GEN_INT (p);
2963 operands[3] = GEN_INT (len);
2964 return "{depi|depwi} 0,%2,%3,%0";
2967 else
2968 return "and %1,%2,%0";
2971 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2972 storing the result in operands[0]. */
2973 const char *
2974 output_64bit_and (rtx *operands)
2976 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2978 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2979 int ls0, ls1, ms0, p, len;
2981 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2982 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2983 break;
2985 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2986 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2987 break;
2989 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2990 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2991 break;
2993 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2995 if (ls1 == HOST_BITS_PER_WIDE_INT)
2997 len = ls0;
2999 gcc_assert (len);
3001 operands[2] = GEN_INT (len);
3002 return "extrd,u %1,63,%2,%0";
3004 else
3006 /* We could use this `depi' for the case above as well, but `depi'
3007 requires one more register file access than an `extru'. */
3009 p = 63 - ls0;
3010 len = ls1 - ls0;
3012 operands[2] = GEN_INT (p);
3013 operands[3] = GEN_INT (len);
3014 return "depdi 0,%2,%3,%0";
3017 else
3018 return "and %1,%2,%0";
3021 const char *
3022 output_ior (rtx *operands)
3024 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3025 int bs0, bs1, p, len;
3027 if (INTVAL (operands[2]) == 0)
3028 return "copy %1,%0";
3030 for (bs0 = 0; bs0 < 32; bs0++)
3031 if ((mask & (1 << bs0)) != 0)
3032 break;
3034 for (bs1 = bs0; bs1 < 32; bs1++)
3035 if ((mask & (1 << bs1)) == 0)
3036 break;
3038 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3040 p = 31 - bs0;
3041 len = bs1 - bs0;
3043 operands[2] = GEN_INT (p);
3044 operands[3] = GEN_INT (len);
3045 return "{depi|depwi} -1,%2,%3,%0";
3048 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3049 storing the result in operands[0]. */
3050 const char *
3051 output_64bit_ior (rtx *operands)
3053 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3054 int bs0, bs1, p, len;
3056 if (INTVAL (operands[2]) == 0)
3057 return "copy %1,%0";
3059 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3060 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3061 break;
3063 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3064 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3065 break;
3067 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3068 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3070 p = 63 - bs0;
3071 len = bs1 - bs0;
3073 operands[2] = GEN_INT (p);
3074 operands[3] = GEN_INT (len);
3075 return "depdi -1,%2,%3,%0";
3078 /* Target hook for assembling integer objects. This code handles
3079 aligned SI and DI integers specially since function references
3080 must be preceded by P%. */
3082 static bool
3083 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3085 if (size == UNITS_PER_WORD
3086 && aligned_p
3087 && function_label_operand (x, VOIDmode))
3089 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3090 output_addr_const (asm_out_file, x);
3091 fputc ('\n', asm_out_file);
3092 return true;
3094 return default_assemble_integer (x, size, aligned_p);
3097 /* Output an ascii string. */
3098 void
3099 output_ascii (FILE *file, const char *p, int size)
3101 int i;
3102 int chars_output;
3103 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3105 /* The HP assembler can only take strings of 256 characters at one
3106 time. This is a limitation on input line length, *not* the
3107 length of the string. Sigh. Even worse, it seems that the
3108 restriction is in number of input characters (see \xnn &
3109 \whatever). So we have to do this very carefully. */
3111 fputs ("\t.STRING \"", file);
3113 chars_output = 0;
3114 for (i = 0; i < size; i += 4)
3116 int co = 0;
3117 int io = 0;
3118 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3120 register unsigned int c = (unsigned char) p[i + io];
3122 if (c == '\"' || c == '\\')
3123 partial_output[co++] = '\\';
3124 if (c >= ' ' && c < 0177)
3125 partial_output[co++] = c;
3126 else
3128 unsigned int hexd;
3129 partial_output[co++] = '\\';
3130 partial_output[co++] = 'x';
3131 hexd = c / 16 - 0 + '0';
3132 if (hexd > '9')
3133 hexd -= '9' - 'a' + 1;
3134 partial_output[co++] = hexd;
3135 hexd = c % 16 - 0 + '0';
3136 if (hexd > '9')
3137 hexd -= '9' - 'a' + 1;
3138 partial_output[co++] = hexd;
3141 if (chars_output + co > 243)
3143 fputs ("\"\n\t.STRING \"", file);
3144 chars_output = 0;
3146 fwrite (partial_output, 1, (size_t) co, file);
3147 chars_output += co;
3148 co = 0;
3150 fputs ("\"\n", file);
3153 /* Try to rewrite floating point comparisons & branches to avoid
3154 useless add,tr insns.
3156 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3157 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3158 first attempt to remove useless add,tr insns. It is zero
3159 for the second pass as reorg sometimes leaves bogus REG_DEAD
3160 notes lying around.
3162 When CHECK_NOTES is zero we can only eliminate add,tr insns
3163 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3164 instructions. */
3165 static void
3166 remove_useless_addtr_insns (int check_notes)
3168 rtx insn;
3169 static int pass = 0;
3171 /* This is fairly cheap, so always run it when optimizing. */
3172 if (optimize > 0)
3174 int fcmp_count = 0;
3175 int fbranch_count = 0;
3177 /* Walk all the insns in this function looking for fcmp & fbranch
3178 instructions. Keep track of how many of each we find. */
3179 for (insn = get_insns (); insn; insn = next_insn (insn))
3181 rtx tmp;
3183 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3184 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3185 continue;
3187 tmp = PATTERN (insn);
3189 /* It must be a set. */
3190 if (GET_CODE (tmp) != SET)
3191 continue;
3193 /* If the destination is CCFP, then we've found an fcmp insn. */
3194 tmp = SET_DEST (tmp);
3195 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3197 fcmp_count++;
3198 continue;
3201 tmp = PATTERN (insn);
3202 /* If this is an fbranch instruction, bump the fbranch counter. */
3203 if (GET_CODE (tmp) == SET
3204 && SET_DEST (tmp) == pc_rtx
3205 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3206 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3207 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3208 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3210 fbranch_count++;
3211 continue;
3216 /* Find all floating point compare + branch insns. If possible,
3217 reverse the comparison & the branch to avoid add,tr insns. */
3218 for (insn = get_insns (); insn; insn = next_insn (insn))
3220 rtx tmp, next;
3222 /* Ignore anything that isn't an INSN. */
3223 if (GET_CODE (insn) != INSN)
3224 continue;
3226 tmp = PATTERN (insn);
3228 /* It must be a set. */
3229 if (GET_CODE (tmp) != SET)
3230 continue;
3232 /* The destination must be CCFP, which is register zero. */
3233 tmp = SET_DEST (tmp);
3234 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3235 continue;
3237 /* INSN should be a set of CCFP.
3239 See if the result of this insn is used in a reversed FP
3240 conditional branch. If so, reverse our condition and
3241 the branch. Doing so avoids useless add,tr insns. */
3242 next = next_insn (insn);
3243 while (next)
3245 /* Jumps, calls and labels stop our search. */
3246 if (GET_CODE (next) == JUMP_INSN
3247 || GET_CODE (next) == CALL_INSN
3248 || GET_CODE (next) == CODE_LABEL)
3249 break;
3251 /* As does another fcmp insn. */
3252 if (GET_CODE (next) == INSN
3253 && GET_CODE (PATTERN (next)) == SET
3254 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3255 && REGNO (SET_DEST (PATTERN (next))) == 0)
3256 break;
3258 next = next_insn (next);
3261 /* Is NEXT_INSN a branch? */
3262 if (next
3263 && GET_CODE (next) == JUMP_INSN)
3265 rtx pattern = PATTERN (next);
3267 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3268 and CCFP dies, then reverse our conditional and the branch
3269 to avoid the add,tr. */
3270 if (GET_CODE (pattern) == SET
3271 && SET_DEST (pattern) == pc_rtx
3272 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3273 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3274 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3275 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3276 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3277 && (fcmp_count == fbranch_count
3278 || (check_notes
3279 && find_regno_note (next, REG_DEAD, 0))))
3281 /* Reverse the branch. */
3282 tmp = XEXP (SET_SRC (pattern), 1);
3283 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3284 XEXP (SET_SRC (pattern), 2) = tmp;
3285 INSN_CODE (next) = -1;
3287 /* Reverse our condition. */
3288 tmp = PATTERN (insn);
3289 PUT_CODE (XEXP (tmp, 1),
3290 (reverse_condition_maybe_unordered
3291 (GET_CODE (XEXP (tmp, 1)))));
3297 pass = !pass;
3301 /* You may have trouble believing this, but this is the 32 bit HP-PA
3302 stack layout. Wow.
3304 Offset Contents
3306 Variable arguments (optional; any number may be allocated)
3308 SP-(4*(N+9)) arg word N
3310 SP-56 arg word 5
3311 SP-52 arg word 4
3313 Fixed arguments (must be allocated; may remain unused)
3315 SP-48 arg word 3
3316 SP-44 arg word 2
3317 SP-40 arg word 1
3318 SP-36 arg word 0
3320 Frame Marker
3322 SP-32 External Data Pointer (DP)
3323 SP-28 External sr4
3324 SP-24 External/stub RP (RP')
3325 SP-20 Current RP
3326 SP-16 Static Link
3327 SP-12 Clean up
3328 SP-8 Calling Stub RP (RP'')
3329 SP-4 Previous SP
3331 Top of Frame
3333 SP-0 Stack Pointer (points to next available address)
3337 /* This function saves registers as follows. Registers marked with ' are
3338 this function's registers (as opposed to the previous function's).
3339 If a frame_pointer isn't needed, r4 is saved as a general register;
3340 the space for the frame pointer is still allocated, though, to keep
3341 things simple.
3344 Top of Frame
3346 SP (FP') Previous FP
3347 SP + 4 Alignment filler (sigh)
3348 SP + 8 Space for locals reserved here.
3352 SP + n All call saved register used.
3356 SP + o All call saved fp registers used.
3360 SP + p (SP') points to next available address.
3364 /* Global variables set by output_function_prologue(). */
3365 /* Size of frame. Need to know this to emit return insns from
3366 leaf procedures. */
3367 static HOST_WIDE_INT actual_fsize, local_fsize;
3368 static int save_fregs;
3370 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3371 Handle case where DISP > 8k by using the add_high_const patterns.
3373 Note in DISP > 8k case, we will leave the high part of the address
3374 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3376 static void
3377 store_reg (int reg, HOST_WIDE_INT disp, int base)
3379 rtx insn, dest, src, basereg;
3381 src = gen_rtx_REG (word_mode, reg);
3382 basereg = gen_rtx_REG (Pmode, base);
3383 if (VAL_14_BITS_P (disp))
3385 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3386 insn = emit_move_insn (dest, src);
3388 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3390 rtx delta = GEN_INT (disp);
3391 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3393 emit_move_insn (tmpreg, delta);
3394 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3395 if (DO_FRAME_NOTES)
3397 REG_NOTES (insn)
3398 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3399 gen_rtx_SET (VOIDmode, tmpreg,
3400 gen_rtx_PLUS (Pmode, basereg, delta)),
3401 REG_NOTES (insn));
3402 RTX_FRAME_RELATED_P (insn) = 1;
3404 dest = gen_rtx_MEM (word_mode, tmpreg);
3405 insn = emit_move_insn (dest, src);
3407 else
3409 rtx delta = GEN_INT (disp);
3410 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3411 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3413 emit_move_insn (tmpreg, high);
3414 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3415 insn = emit_move_insn (dest, src);
3416 if (DO_FRAME_NOTES)
3418 REG_NOTES (insn)
3419 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3420 gen_rtx_SET (VOIDmode,
3421 gen_rtx_MEM (word_mode,
3422 gen_rtx_PLUS (word_mode, basereg,
3423 delta)),
3424 src),
3425 REG_NOTES (insn));
3429 if (DO_FRAME_NOTES)
3430 RTX_FRAME_RELATED_P (insn) = 1;
3433 /* Emit RTL to store REG at the memory location specified by BASE and then
3434 add MOD to BASE. MOD must be <= 8k. */
3436 static void
3437 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3439 rtx insn, basereg, srcreg, delta;
3441 gcc_assert (VAL_14_BITS_P (mod));
3443 basereg = gen_rtx_REG (Pmode, base);
3444 srcreg = gen_rtx_REG (word_mode, reg);
3445 delta = GEN_INT (mod);
3447 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3448 if (DO_FRAME_NOTES)
3450 RTX_FRAME_RELATED_P (insn) = 1;
3452 /* RTX_FRAME_RELATED_P must be set on each frame related set
3453 in a parallel with more than one element. */
3454 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3455 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3459 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3460 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3461 whether to add a frame note or not.
3463 In the DISP > 8k case, we leave the high part of the address in %r1.
3464 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3466 static void
3467 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3469 rtx insn;
3471 if (VAL_14_BITS_P (disp))
3473 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3474 plus_constant (gen_rtx_REG (Pmode, base), disp));
3476 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3478 rtx basereg = gen_rtx_REG (Pmode, base);
3479 rtx delta = GEN_INT (disp);
3480 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3482 emit_move_insn (tmpreg, delta);
3483 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3484 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3485 if (DO_FRAME_NOTES)
3486 REG_NOTES (insn)
3487 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3488 gen_rtx_SET (VOIDmode, tmpreg,
3489 gen_rtx_PLUS (Pmode, basereg, delta)),
3490 REG_NOTES (insn));
3492 else
3494 rtx basereg = gen_rtx_REG (Pmode, base);
3495 rtx delta = GEN_INT (disp);
3496 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3498 emit_move_insn (tmpreg,
3499 gen_rtx_PLUS (Pmode, basereg,
3500 gen_rtx_HIGH (Pmode, delta)));
3501 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3502 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3505 if (DO_FRAME_NOTES && note)
3506 RTX_FRAME_RELATED_P (insn) = 1;
3509 HOST_WIDE_INT
3510 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3512 int freg_saved = 0;
3513 int i, j;
3515 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3516 be consistent with the rounding and size calculation done here.
3517 Change them at the same time. */
3519 /* We do our own stack alignment. First, round the size of the
3520 stack locals up to a word boundary. */
3521 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3523 /* Space for previous frame pointer + filler. If any frame is
3524 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3525 waste some space here for the sake of HP compatibility. The
3526 first slot is only used when the frame pointer is needed. */
3527 if (size || frame_pointer_needed)
3528 size += STARTING_FRAME_OFFSET;
3530 /* If the current function calls __builtin_eh_return, then we need
3531 to allocate stack space for registers that will hold data for
3532 the exception handler. */
3533 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3535 unsigned int i;
3537 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3538 continue;
3539 size += i * UNITS_PER_WORD;
3542 /* Account for space used by the callee general register saves. */
3543 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3544 if (df_regs_ever_live_p (i))
3545 size += UNITS_PER_WORD;
3547 /* Account for space used by the callee floating point register saves. */
3548 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3549 if (df_regs_ever_live_p (i)
3550 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3552 freg_saved = 1;
3554 /* We always save both halves of the FP register, so always
3555 increment the frame size by 8 bytes. */
3556 size += 8;
3559 /* If any of the floating registers are saved, account for the
3560 alignment needed for the floating point register save block. */
3561 if (freg_saved)
3563 size = (size + 7) & ~7;
3564 if (fregs_live)
3565 *fregs_live = 1;
3568 /* The various ABIs include space for the outgoing parameters in the
3569 size of the current function's stack frame. We don't need to align
3570 for the outgoing arguments as their alignment is set by the final
3571 rounding for the frame as a whole. */
3572 size += crtl->outgoing_args_size;
3574 /* Allocate space for the fixed frame marker. This space must be
3575 allocated for any function that makes calls or allocates
3576 stack space. */
3577 if (!current_function_is_leaf || size)
3578 size += TARGET_64BIT ? 48 : 32;
3580 /* Finally, round to the preferred stack boundary. */
3581 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3582 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3585 /* Generate the assembly code for function entry. FILE is a stdio
3586 stream to output the code to. SIZE is an int: how many units of
3587 temporary storage to allocate.
3589 Refer to the array `regs_ever_live' to determine which registers to
3590 save; `regs_ever_live[I]' is nonzero if register number I is ever
3591 used in the function. This function is responsible for knowing
3592 which registers should not be saved even if used. */
3594 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3595 of memory. If any fpu reg is used in the function, we allocate
3596 such a block here, at the bottom of the frame, just in case it's needed.
3598 If this function is a leaf procedure, then we may choose not
3599 to do a "save" insn. The decision about whether or not
3600 to do this is made in regclass.c. */
3602 static void
3603 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3605 /* The function's label and associated .PROC must never be
3606 separated and must be output *after* any profiling declarations
3607 to avoid changing spaces/subspaces within a procedure. */
3608 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3609 fputs ("\t.PROC\n", file);
3611 /* hppa_expand_prologue does the dirty work now. We just need
3612 to output the assembler directives which denote the start
3613 of a function. */
3614 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3615 if (current_function_is_leaf)
3616 fputs (",NO_CALLS", file);
3617 else
3618 fputs (",CALLS", file);
3619 if (rp_saved)
3620 fputs (",SAVE_RP", file);
3622 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3623 at the beginning of the frame and that it is used as the frame
3624 pointer for the frame. We do this because our current frame
3625 layout doesn't conform to that specified in the HP runtime
3626 documentation and we need a way to indicate to programs such as
3627 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3628 isn't used by HP compilers but is supported by the assembler.
3629 However, SAVE_SP is supposed to indicate that the previous stack
3630 pointer has been saved in the frame marker. */
3631 if (frame_pointer_needed)
3632 fputs (",SAVE_SP", file);
3634 /* Pass on information about the number of callee register saves
3635 performed in the prologue.
3637 The compiler is supposed to pass the highest register number
3638 saved, the assembler then has to adjust that number before
3639 entering it into the unwind descriptor (to account for any
3640 caller saved registers with lower register numbers than the
3641 first callee saved register). */
3642 if (gr_saved)
3643 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3645 if (fr_saved)
3646 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3648 fputs ("\n\t.ENTRY\n", file);
3650 remove_useless_addtr_insns (0);
3653 void
3654 hppa_expand_prologue (void)
3656 int merge_sp_adjust_with_store = 0;
3657 HOST_WIDE_INT size = get_frame_size ();
3658 HOST_WIDE_INT offset;
3659 int i;
3660 rtx insn, tmpreg;
3662 gr_saved = 0;
3663 fr_saved = 0;
3664 save_fregs = 0;
3666 /* Compute total size for frame pointer, filler, locals and rounding to
3667 the next word boundary. Similar code appears in compute_frame_size
3668 and must be changed in tandem with this code. */
3669 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3670 if (local_fsize || frame_pointer_needed)
3671 local_fsize += STARTING_FRAME_OFFSET;
3673 actual_fsize = compute_frame_size (size, &save_fregs);
3675 /* Compute a few things we will use often. */
3676 tmpreg = gen_rtx_REG (word_mode, 1);
3678 /* Save RP first. The calling conventions manual states RP will
3679 always be stored into the caller's frame at sp - 20 or sp - 16
3680 depending on which ABI is in use. */
3681 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3683 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3684 rp_saved = true;
3686 else
3687 rp_saved = false;
3689 /* Allocate the local frame and set up the frame pointer if needed. */
3690 if (actual_fsize != 0)
3692 if (frame_pointer_needed)
3694 /* Copy the old frame pointer temporarily into %r1. Set up the
3695 new stack pointer, then store away the saved old frame pointer
3696 into the stack at sp and at the same time update the stack
3697 pointer by actual_fsize bytes. Two versions, first
3698 handles small (<8k) frames. The second handles large (>=8k)
3699 frames. */
3700 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3701 if (DO_FRAME_NOTES)
3702 RTX_FRAME_RELATED_P (insn) = 1;
3704 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3705 if (DO_FRAME_NOTES)
3706 RTX_FRAME_RELATED_P (insn) = 1;
3708 if (VAL_14_BITS_P (actual_fsize))
3709 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3710 else
3712 /* It is incorrect to store the saved frame pointer at *sp,
3713 then increment sp (writes beyond the current stack boundary).
3715 So instead use stwm to store at *sp and post-increment the
3716 stack pointer as an atomic operation. Then increment sp to
3717 finish allocating the new frame. */
3718 HOST_WIDE_INT adjust1 = 8192 - 64;
3719 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3721 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3722 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3723 adjust2, 1);
3726 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3727 we need to store the previous stack pointer (frame pointer)
3728 into the frame marker on targets that use the HP unwind
3729 library. This allows the HP unwind library to be used to
3730 unwind GCC frames. However, we are not fully compatible
3731 with the HP library because our frame layout differs from
3732 that specified in the HP runtime specification.
3734 We don't want a frame note on this instruction as the frame
3735 marker moves during dynamic stack allocation.
3737 This instruction also serves as a blockage to prevent
3738 register spills from being scheduled before the stack
3739 pointer is raised. This is necessary as we store
3740 registers using the frame pointer as a base register,
3741 and the frame pointer is set before sp is raised. */
3742 if (TARGET_HPUX_UNWIND_LIBRARY)
3744 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3745 GEN_INT (TARGET_64BIT ? -8 : -4));
3747 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3748 frame_pointer_rtx);
3750 else
3751 emit_insn (gen_blockage ());
3753 /* no frame pointer needed. */
3754 else
3756 /* In some cases we can perform the first callee register save
3757 and allocating the stack frame at the same time. If so, just
3758 make a note of it and defer allocating the frame until saving
3759 the callee registers. */
3760 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3761 merge_sp_adjust_with_store = 1;
3762 /* Can not optimize. Adjust the stack frame by actual_fsize
3763 bytes. */
3764 else
3765 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3766 actual_fsize, 1);
3770 /* Normal register save.
3772 Do not save the frame pointer in the frame_pointer_needed case. It
3773 was done earlier. */
3774 if (frame_pointer_needed)
3776 offset = local_fsize;
3778 /* Saving the EH return data registers in the frame is the simplest
3779 way to get the frame unwind information emitted. We put them
3780 just before the general registers. */
3781 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3783 unsigned int i, regno;
3785 for (i = 0; ; ++i)
3787 regno = EH_RETURN_DATA_REGNO (i);
3788 if (regno == INVALID_REGNUM)
3789 break;
3791 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3792 offset += UNITS_PER_WORD;
3796 for (i = 18; i >= 4; i--)
3797 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3799 store_reg (i, offset, FRAME_POINTER_REGNUM);
3800 offset += UNITS_PER_WORD;
3801 gr_saved++;
3803 /* Account for %r3 which is saved in a special place. */
3804 gr_saved++;
3806 /* No frame pointer needed. */
3807 else
3809 offset = local_fsize - actual_fsize;
3811 /* Saving the EH return data registers in the frame is the simplest
3812 way to get the frame unwind information emitted. */
3813 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3815 unsigned int i, regno;
3817 for (i = 0; ; ++i)
3819 regno = EH_RETURN_DATA_REGNO (i);
3820 if (regno == INVALID_REGNUM)
3821 break;
3823 /* If merge_sp_adjust_with_store is nonzero, then we can
3824 optimize the first save. */
3825 if (merge_sp_adjust_with_store)
3827 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3828 merge_sp_adjust_with_store = 0;
3830 else
3831 store_reg (regno, offset, STACK_POINTER_REGNUM);
3832 offset += UNITS_PER_WORD;
3836 for (i = 18; i >= 3; i--)
3837 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3839 /* If merge_sp_adjust_with_store is nonzero, then we can
3840 optimize the first GR save. */
3841 if (merge_sp_adjust_with_store)
3843 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3844 merge_sp_adjust_with_store = 0;
3846 else
3847 store_reg (i, offset, STACK_POINTER_REGNUM);
3848 offset += UNITS_PER_WORD;
3849 gr_saved++;
3852 /* If we wanted to merge the SP adjustment with a GR save, but we never
3853 did any GR saves, then just emit the adjustment here. */
3854 if (merge_sp_adjust_with_store)
3855 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3856 actual_fsize, 1);
3859 /* The hppa calling conventions say that %r19, the pic offset
3860 register, is saved at sp - 32 (in this function's frame)
3861 when generating PIC code. FIXME: What is the correct thing
3862 to do for functions which make no calls and allocate no
3863 frame? Do we need to allocate a frame, or can we just omit
3864 the save? For now we'll just omit the save.
3866 We don't want a note on this insn as the frame marker can
3867 move if there is a dynamic stack allocation. */
3868 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3870 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3872 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3876 /* Align pointer properly (doubleword boundary). */
3877 offset = (offset + 7) & ~7;
3879 /* Floating point register store. */
3880 if (save_fregs)
3882 rtx base;
3884 /* First get the frame or stack pointer to the start of the FP register
3885 save area. */
3886 if (frame_pointer_needed)
3888 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3889 base = frame_pointer_rtx;
3891 else
3893 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3894 base = stack_pointer_rtx;
3897 /* Now actually save the FP registers. */
3898 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3900 if (df_regs_ever_live_p (i)
3901 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3903 rtx addr, insn, reg;
3904 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3905 reg = gen_rtx_REG (DFmode, i);
3906 insn = emit_move_insn (addr, reg);
3907 if (DO_FRAME_NOTES)
3909 RTX_FRAME_RELATED_P (insn) = 1;
3910 if (TARGET_64BIT)
3912 rtx mem = gen_rtx_MEM (DFmode,
3913 plus_constant (base, offset));
3914 REG_NOTES (insn)
3915 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3916 gen_rtx_SET (VOIDmode, mem, reg),
3917 REG_NOTES (insn));
3919 else
3921 rtx meml = gen_rtx_MEM (SFmode,
3922 plus_constant (base, offset));
3923 rtx memr = gen_rtx_MEM (SFmode,
3924 plus_constant (base, offset + 4));
3925 rtx regl = gen_rtx_REG (SFmode, i);
3926 rtx regr = gen_rtx_REG (SFmode, i + 1);
3927 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3928 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3929 rtvec vec;
3931 RTX_FRAME_RELATED_P (setl) = 1;
3932 RTX_FRAME_RELATED_P (setr) = 1;
3933 vec = gen_rtvec (2, setl, setr);
3934 REG_NOTES (insn)
3935 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3936 gen_rtx_SEQUENCE (VOIDmode, vec),
3937 REG_NOTES (insn));
3940 offset += GET_MODE_SIZE (DFmode);
3941 fr_saved++;
3947 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3948 Handle case where DISP > 8k by using the add_high_const patterns. */
3950 static void
3951 load_reg (int reg, HOST_WIDE_INT disp, int base)
3953 rtx dest = gen_rtx_REG (word_mode, reg);
3954 rtx basereg = gen_rtx_REG (Pmode, base);
3955 rtx src;
3957 if (VAL_14_BITS_P (disp))
3958 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3959 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3961 rtx delta = GEN_INT (disp);
3962 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3964 emit_move_insn (tmpreg, delta);
3965 if (TARGET_DISABLE_INDEXING)
3967 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3968 src = gen_rtx_MEM (word_mode, tmpreg);
3970 else
3971 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3973 else
3975 rtx delta = GEN_INT (disp);
3976 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3977 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3979 emit_move_insn (tmpreg, high);
3980 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3983 emit_move_insn (dest, src);
3986 /* Update the total code bytes output to the text section. */
3988 static void
3989 update_total_code_bytes (int nbytes)
3991 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3992 && !IN_NAMED_SECTION_P (cfun->decl))
3994 if (INSN_ADDRESSES_SET_P ())
3996 unsigned long old_total = total_code_bytes;
3998 total_code_bytes += nbytes;
4000 /* Be prepared to handle overflows. */
4001 if (old_total > total_code_bytes)
4002 total_code_bytes = -1;
4004 else
4005 total_code_bytes = -1;
4009 /* This function generates the assembly code for function exit.
4010 Args are as for output_function_prologue ().
4012 The function epilogue should not depend on the current stack
4013 pointer! It should use the frame pointer only. This is mandatory
4014 because of alloca; we also take advantage of it to omit stack
4015 adjustments before returning. */
4017 static void
4018 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4020 rtx insn = get_last_insn ();
4022 last_address = 0;
4024 /* hppa_expand_epilogue does the dirty work now. We just need
4025 to output the assembler directives which denote the end
4026 of a function.
4028 To make debuggers happy, emit a nop if the epilogue was completely
4029 eliminated due to a volatile call as the last insn in the
4030 current function. That way the return address (in %r2) will
4031 always point to a valid instruction in the current function. */
4033 /* Get the last real insn. */
4034 if (GET_CODE (insn) == NOTE)
4035 insn = prev_real_insn (insn);
4037 /* If it is a sequence, then look inside. */
4038 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4039 insn = XVECEXP (PATTERN (insn), 0, 0);
4041 /* If insn is a CALL_INSN, then it must be a call to a volatile
4042 function (otherwise there would be epilogue insns). */
4043 if (insn && GET_CODE (insn) == CALL_INSN)
4045 fputs ("\tnop\n", file);
4046 last_address += 4;
4049 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4051 if (TARGET_SOM && TARGET_GAS)
4053 /* We done with this subspace except possibly for some additional
4054 debug information. Forget that we are in this subspace to ensure
4055 that the next function is output in its own subspace. */
4056 in_section = NULL;
4057 cfun->machine->in_nsubspa = 2;
4060 if (INSN_ADDRESSES_SET_P ())
4062 insn = get_last_nonnote_insn ();
4063 last_address += INSN_ADDRESSES (INSN_UID (insn));
4064 if (INSN_P (insn))
4065 last_address += insn_default_length (insn);
4066 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4067 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4070 /* Finally, update the total number of code bytes output so far. */
4071 update_total_code_bytes (last_address);
4074 void
4075 hppa_expand_epilogue (void)
4077 rtx tmpreg;
4078 HOST_WIDE_INT offset;
4079 HOST_WIDE_INT ret_off = 0;
4080 int i;
4081 int merge_sp_adjust_with_load = 0;
4083 /* We will use this often. */
4084 tmpreg = gen_rtx_REG (word_mode, 1);
4086 /* Try to restore RP early to avoid load/use interlocks when
4087 RP gets used in the return (bv) instruction. This appears to still
4088 be necessary even when we schedule the prologue and epilogue. */
4089 if (rp_saved)
4091 ret_off = TARGET_64BIT ? -16 : -20;
4092 if (frame_pointer_needed)
4094 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4095 ret_off = 0;
4097 else
4099 /* No frame pointer, and stack is smaller than 8k. */
4100 if (VAL_14_BITS_P (ret_off - actual_fsize))
4102 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4103 ret_off = 0;
4108 /* General register restores. */
4109 if (frame_pointer_needed)
4111 offset = local_fsize;
4113 /* If the current function calls __builtin_eh_return, then we need
4114 to restore the saved EH data registers. */
4115 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4117 unsigned int i, regno;
4119 for (i = 0; ; ++i)
4121 regno = EH_RETURN_DATA_REGNO (i);
4122 if (regno == INVALID_REGNUM)
4123 break;
4125 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4126 offset += UNITS_PER_WORD;
4130 for (i = 18; i >= 4; i--)
4131 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4133 load_reg (i, offset, FRAME_POINTER_REGNUM);
4134 offset += UNITS_PER_WORD;
4137 else
4139 offset = local_fsize - actual_fsize;
4141 /* If the current function calls __builtin_eh_return, then we need
4142 to restore the saved EH data registers. */
4143 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4145 unsigned int i, regno;
4147 for (i = 0; ; ++i)
4149 regno = EH_RETURN_DATA_REGNO (i);
4150 if (regno == INVALID_REGNUM)
4151 break;
4153 /* Only for the first load.
4154 merge_sp_adjust_with_load holds the register load
4155 with which we will merge the sp adjustment. */
4156 if (merge_sp_adjust_with_load == 0
4157 && local_fsize == 0
4158 && VAL_14_BITS_P (-actual_fsize))
4159 merge_sp_adjust_with_load = regno;
4160 else
4161 load_reg (regno, offset, STACK_POINTER_REGNUM);
4162 offset += UNITS_PER_WORD;
4166 for (i = 18; i >= 3; i--)
4168 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4170 /* Only for the first load.
4171 merge_sp_adjust_with_load holds the register load
4172 with which we will merge the sp adjustment. */
4173 if (merge_sp_adjust_with_load == 0
4174 && local_fsize == 0
4175 && VAL_14_BITS_P (-actual_fsize))
4176 merge_sp_adjust_with_load = i;
4177 else
4178 load_reg (i, offset, STACK_POINTER_REGNUM);
4179 offset += UNITS_PER_WORD;
4184 /* Align pointer properly (doubleword boundary). */
4185 offset = (offset + 7) & ~7;
4187 /* FP register restores. */
4188 if (save_fregs)
4190 /* Adjust the register to index off of. */
4191 if (frame_pointer_needed)
4192 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4193 else
4194 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4196 /* Actually do the restores now. */
4197 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4198 if (df_regs_ever_live_p (i)
4199 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4201 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4202 rtx dest = gen_rtx_REG (DFmode, i);
4203 emit_move_insn (dest, src);
4207 /* Emit a blockage insn here to keep these insns from being moved to
4208 an earlier spot in the epilogue, or into the main instruction stream.
4210 This is necessary as we must not cut the stack back before all the
4211 restores are finished. */
4212 emit_insn (gen_blockage ());
4214 /* Reset stack pointer (and possibly frame pointer). The stack
4215 pointer is initially set to fp + 64 to avoid a race condition. */
4216 if (frame_pointer_needed)
4218 rtx delta = GEN_INT (-64);
4220 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4221 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4223 /* If we were deferring a callee register restore, do it now. */
4224 else if (merge_sp_adjust_with_load)
4226 rtx delta = GEN_INT (-actual_fsize);
4227 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4229 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4231 else if (actual_fsize != 0)
4232 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4233 - actual_fsize, 0);
4235 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4236 frame greater than 8k), do so now. */
4237 if (ret_off != 0)
4238 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4240 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4242 rtx sa = EH_RETURN_STACKADJ_RTX;
4244 emit_insn (gen_blockage ());
4245 emit_insn (TARGET_64BIT
4246 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4247 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4252 hppa_pic_save_rtx (void)
4254 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4257 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4258 #define NO_DEFERRED_PROFILE_COUNTERS 0
4259 #endif
4262 /* Vector of funcdef numbers. */
4263 static VEC(int,heap) *funcdef_nos;
4265 /* Output deferred profile counters. */
4266 static void
4267 output_deferred_profile_counters (void)
4269 unsigned int i;
4270 int align, n;
4272 if (VEC_empty (int, funcdef_nos))
4273 return;
4275 switch_to_section (data_section);
4276 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4277 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4279 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4281 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4282 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4285 VEC_free (int, heap, funcdef_nos);
4288 void
4289 hppa_profile_hook (int label_no)
4291 /* We use SImode for the address of the function in both 32 and
4292 64-bit code to avoid having to provide DImode versions of the
4293 lcla2 and load_offset_label_address insn patterns. */
4294 rtx reg = gen_reg_rtx (SImode);
4295 rtx label_rtx = gen_label_rtx ();
4296 rtx begin_label_rtx, call_insn;
4297 char begin_label_name[16];
4299 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4300 label_no);
4301 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4303 if (TARGET_64BIT)
4304 emit_move_insn (arg_pointer_rtx,
4305 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4306 GEN_INT (64)));
4308 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4310 /* The address of the function is loaded into %r25 with an instruction-
4311 relative sequence that avoids the use of relocations. The sequence
4312 is split so that the load_offset_label_address instruction can
4313 occupy the delay slot of the call to _mcount. */
4314 if (TARGET_PA_20)
4315 emit_insn (gen_lcla2 (reg, label_rtx));
4316 else
4317 emit_insn (gen_lcla1 (reg, label_rtx));
4319 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4320 reg, begin_label_rtx, label_rtx));
4322 #if !NO_DEFERRED_PROFILE_COUNTERS
4324 rtx count_label_rtx, addr, r24;
4325 char count_label_name[16];
4327 VEC_safe_push (int, heap, funcdef_nos, label_no);
4328 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4329 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4331 addr = force_reg (Pmode, count_label_rtx);
4332 r24 = gen_rtx_REG (Pmode, 24);
4333 emit_move_insn (r24, addr);
4335 call_insn =
4336 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4337 gen_rtx_SYMBOL_REF (Pmode,
4338 "_mcount")),
4339 GEN_INT (TARGET_64BIT ? 24 : 12)));
4341 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4343 #else
4345 call_insn =
4346 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4347 gen_rtx_SYMBOL_REF (Pmode,
4348 "_mcount")),
4349 GEN_INT (TARGET_64BIT ? 16 : 8)));
4351 #endif
4353 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4354 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4356 /* Indicate the _mcount call cannot throw, nor will it execute a
4357 non-local goto. */
4358 REG_NOTES (call_insn)
4359 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4362 /* Fetch the return address for the frame COUNT steps up from
4363 the current frame, after the prologue. FRAMEADDR is the
4364 frame pointer of the COUNT frame.
4366 We want to ignore any export stub remnants here. To handle this,
4367 we examine the code at the return address, and if it is an export
4368 stub, we return a memory rtx for the stub return address stored
4369 at frame-24.
4371 The value returned is used in two different ways:
4373 1. To find a function's caller.
4375 2. To change the return address for a function.
4377 This function handles most instances of case 1; however, it will
4378 fail if there are two levels of stubs to execute on the return
4379 path. The only way I believe that can happen is if the return value
4380 needs a parameter relocation, which never happens for C code.
4382 This function handles most instances of case 2; however, it will
4383 fail if we did not originally have stub code on the return path
4384 but will need stub code on the new return path. This can happen if
4385 the caller & callee are both in the main program, but the new
4386 return location is in a shared library. */
4389 return_addr_rtx (int count, rtx frameaddr)
4391 rtx label;
4392 rtx rp;
4393 rtx saved_rp;
4394 rtx ins;
4396 if (count != 0)
4397 return NULL_RTX;
4399 rp = get_hard_reg_initial_val (Pmode, 2);
4401 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4402 return rp;
4404 saved_rp = gen_reg_rtx (Pmode);
4405 emit_move_insn (saved_rp, rp);
4407 /* Get pointer to the instruction stream. We have to mask out the
4408 privilege level from the two low order bits of the return address
4409 pointer here so that ins will point to the start of the first
4410 instruction that would have been executed if we returned. */
4411 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4412 label = gen_label_rtx ();
4414 /* Check the instruction stream at the normal return address for the
4415 export stub:
4417 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4418 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4419 0x00011820 | stub+16: mtsp r1,sr0
4420 0xe0400002 | stub+20: be,n 0(sr0,rp)
4422 If it is an export stub, than our return address is really in
4423 -24[frameaddr]. */
4425 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4426 NULL_RTX, SImode, 1);
4427 emit_jump_insn (gen_bne (label));
4429 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4430 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4431 emit_jump_insn (gen_bne (label));
4433 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4434 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4435 emit_jump_insn (gen_bne (label));
4437 /* 0xe0400002 must be specified as -532676606 so that it won't be
4438 rejected as an invalid immediate operand on 64-bit hosts. */
4439 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4440 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4442 /* If there is no export stub then just use the value saved from
4443 the return pointer register. */
4445 emit_jump_insn (gen_bne (label));
4447 /* Here we know that our return address points to an export
4448 stub. We don't want to return the address of the export stub,
4449 but rather the return address of the export stub. That return
4450 address is stored at -24[frameaddr]. */
4452 emit_move_insn (saved_rp,
4453 gen_rtx_MEM (Pmode,
4454 memory_address (Pmode,
4455 plus_constant (frameaddr,
4456 -24))));
4458 emit_label (label);
4459 return saved_rp;
4462 void
4463 emit_bcond_fp (enum rtx_code code, rtx operand0)
4465 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4466 gen_rtx_IF_THEN_ELSE (VOIDmode,
4467 gen_rtx_fmt_ee (code,
4468 VOIDmode,
4469 gen_rtx_REG (CCFPmode, 0),
4470 const0_rtx),
4471 gen_rtx_LABEL_REF (VOIDmode, operand0),
4472 pc_rtx)));
4477 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4479 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4480 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4483 /* Adjust the cost of a scheduling dependency. Return the new cost of
4484 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4486 static int
4487 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4489 enum attr_type attr_type;
4491 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4492 true dependencies as they are described with bypasses now. */
4493 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4494 return cost;
4496 if (! recog_memoized (insn))
4497 return 0;
4499 attr_type = get_attr_type (insn);
4501 switch (REG_NOTE_KIND (link))
4503 case REG_DEP_ANTI:
4504 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4505 cycles later. */
4507 if (attr_type == TYPE_FPLOAD)
4509 rtx pat = PATTERN (insn);
4510 rtx dep_pat = PATTERN (dep_insn);
4511 if (GET_CODE (pat) == PARALLEL)
4513 /* This happens for the fldXs,mb patterns. */
4514 pat = XVECEXP (pat, 0, 0);
4516 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4517 /* If this happens, we have to extend this to schedule
4518 optimally. Return 0 for now. */
4519 return 0;
4521 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4523 if (! recog_memoized (dep_insn))
4524 return 0;
4525 switch (get_attr_type (dep_insn))
4527 case TYPE_FPALU:
4528 case TYPE_FPMULSGL:
4529 case TYPE_FPMULDBL:
4530 case TYPE_FPDIVSGL:
4531 case TYPE_FPDIVDBL:
4532 case TYPE_FPSQRTSGL:
4533 case TYPE_FPSQRTDBL:
4534 /* A fpload can't be issued until one cycle before a
4535 preceding arithmetic operation has finished if
4536 the target of the fpload is any of the sources
4537 (or destination) of the arithmetic operation. */
4538 return insn_default_latency (dep_insn) - 1;
4540 default:
4541 return 0;
4545 else if (attr_type == TYPE_FPALU)
4547 rtx pat = PATTERN (insn);
4548 rtx dep_pat = PATTERN (dep_insn);
4549 if (GET_CODE (pat) == PARALLEL)
4551 /* This happens for the fldXs,mb patterns. */
4552 pat = XVECEXP (pat, 0, 0);
4554 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4555 /* If this happens, we have to extend this to schedule
4556 optimally. Return 0 for now. */
4557 return 0;
4559 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4561 if (! recog_memoized (dep_insn))
4562 return 0;
4563 switch (get_attr_type (dep_insn))
4565 case TYPE_FPDIVSGL:
4566 case TYPE_FPDIVDBL:
4567 case TYPE_FPSQRTSGL:
4568 case TYPE_FPSQRTDBL:
4569 /* An ALU flop can't be issued until two cycles before a
4570 preceding divide or sqrt operation has finished if
4571 the target of the ALU flop is any of the sources
4572 (or destination) of the divide or sqrt operation. */
4573 return insn_default_latency (dep_insn) - 2;
4575 default:
4576 return 0;
4581 /* For other anti dependencies, the cost is 0. */
4582 return 0;
4584 case REG_DEP_OUTPUT:
4585 /* Output dependency; DEP_INSN writes a register that INSN writes some
4586 cycles later. */
4587 if (attr_type == TYPE_FPLOAD)
4589 rtx pat = PATTERN (insn);
4590 rtx dep_pat = PATTERN (dep_insn);
4591 if (GET_CODE (pat) == PARALLEL)
4593 /* This happens for the fldXs,mb patterns. */
4594 pat = XVECEXP (pat, 0, 0);
4596 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4597 /* If this happens, we have to extend this to schedule
4598 optimally. Return 0 for now. */
4599 return 0;
4601 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4603 if (! recog_memoized (dep_insn))
4604 return 0;
4605 switch (get_attr_type (dep_insn))
4607 case TYPE_FPALU:
4608 case TYPE_FPMULSGL:
4609 case TYPE_FPMULDBL:
4610 case TYPE_FPDIVSGL:
4611 case TYPE_FPDIVDBL:
4612 case TYPE_FPSQRTSGL:
4613 case TYPE_FPSQRTDBL:
4614 /* A fpload can't be issued until one cycle before a
4615 preceding arithmetic operation has finished if
4616 the target of the fpload is the destination of the
4617 arithmetic operation.
4619 Exception: For PA7100LC, PA7200 and PA7300, the cost
4620 is 3 cycles, unless they bundle together. We also
4621 pay the penalty if the second insn is a fpload. */
4622 return insn_default_latency (dep_insn) - 1;
4624 default:
4625 return 0;
4629 else if (attr_type == TYPE_FPALU)
4631 rtx pat = PATTERN (insn);
4632 rtx dep_pat = PATTERN (dep_insn);
4633 if (GET_CODE (pat) == PARALLEL)
4635 /* This happens for the fldXs,mb patterns. */
4636 pat = XVECEXP (pat, 0, 0);
4638 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4639 /* If this happens, we have to extend this to schedule
4640 optimally. Return 0 for now. */
4641 return 0;
4643 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4645 if (! recog_memoized (dep_insn))
4646 return 0;
4647 switch (get_attr_type (dep_insn))
4649 case TYPE_FPDIVSGL:
4650 case TYPE_FPDIVDBL:
4651 case TYPE_FPSQRTSGL:
4652 case TYPE_FPSQRTDBL:
4653 /* An ALU flop can't be issued until two cycles before a
4654 preceding divide or sqrt operation has finished if
4655 the target of the ALU flop is also the target of
4656 the divide or sqrt operation. */
4657 return insn_default_latency (dep_insn) - 2;
4659 default:
4660 return 0;
4665 /* For other output dependencies, the cost is 0. */
4666 return 0;
4668 default:
4669 gcc_unreachable ();
4673 /* Adjust scheduling priorities. We use this to try and keep addil
4674 and the next use of %r1 close together. */
4675 static int
4676 pa_adjust_priority (rtx insn, int priority)
4678 rtx set = single_set (insn);
4679 rtx src, dest;
4680 if (set)
4682 src = SET_SRC (set);
4683 dest = SET_DEST (set);
4684 if (GET_CODE (src) == LO_SUM
4685 && symbolic_operand (XEXP (src, 1), VOIDmode)
4686 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4687 priority >>= 3;
4689 else if (GET_CODE (src) == MEM
4690 && GET_CODE (XEXP (src, 0)) == LO_SUM
4691 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4692 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4693 priority >>= 1;
4695 else if (GET_CODE (dest) == MEM
4696 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4697 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4698 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4699 priority >>= 3;
4701 return priority;
4704 /* The 700 can only issue a single insn at a time.
4705 The 7XXX processors can issue two insns at a time.
4706 The 8000 can issue 4 insns at a time. */
4707 static int
4708 pa_issue_rate (void)
4710 switch (pa_cpu)
4712 case PROCESSOR_700: return 1;
4713 case PROCESSOR_7100: return 2;
4714 case PROCESSOR_7100LC: return 2;
4715 case PROCESSOR_7200: return 2;
4716 case PROCESSOR_7300: return 2;
4717 case PROCESSOR_8000: return 4;
4719 default:
4720 gcc_unreachable ();
4726 /* Return any length adjustment needed by INSN which already has its length
4727 computed as LENGTH. Return zero if no adjustment is necessary.
4729 For the PA: function calls, millicode calls, and backwards short
4730 conditional branches with unfilled delay slots need an adjustment by +1
4731 (to account for the NOP which will be inserted into the instruction stream).
4733 Also compute the length of an inline block move here as it is too
4734 complicated to express as a length attribute in pa.md. */
4736 pa_adjust_insn_length (rtx insn, int length)
4738 rtx pat = PATTERN (insn);
4740 /* Jumps inside switch tables which have unfilled delay slots need
4741 adjustment. */
4742 if (GET_CODE (insn) == JUMP_INSN
4743 && GET_CODE (pat) == PARALLEL
4744 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4745 return 4;
4746 /* Millicode insn with an unfilled delay slot. */
4747 else if (GET_CODE (insn) == INSN
4748 && GET_CODE (pat) != SEQUENCE
4749 && GET_CODE (pat) != USE
4750 && GET_CODE (pat) != CLOBBER
4751 && get_attr_type (insn) == TYPE_MILLI)
4752 return 4;
4753 /* Block move pattern. */
4754 else if (GET_CODE (insn) == INSN
4755 && GET_CODE (pat) == PARALLEL
4756 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4757 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4758 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4759 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4760 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4761 return compute_movmem_length (insn) - 4;
4762 /* Block clear pattern. */
4763 else if (GET_CODE (insn) == INSN
4764 && GET_CODE (pat) == PARALLEL
4765 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4766 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4767 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4768 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4769 return compute_clrmem_length (insn) - 4;
4770 /* Conditional branch with an unfilled delay slot. */
4771 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4773 /* Adjust a short backwards conditional with an unfilled delay slot. */
4774 if (GET_CODE (pat) == SET
4775 && length == 4
4776 && ! forward_branch_p (insn))
4777 return 4;
4778 else if (GET_CODE (pat) == PARALLEL
4779 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4780 && length == 4)
4781 return 4;
4782 /* Adjust dbra insn with short backwards conditional branch with
4783 unfilled delay slot -- only for case where counter is in a
4784 general register register. */
4785 else if (GET_CODE (pat) == PARALLEL
4786 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4787 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4788 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4789 && length == 4
4790 && ! forward_branch_p (insn))
4791 return 4;
4792 else
4793 return 0;
4795 return 0;
4798 /* Print operand X (an rtx) in assembler syntax to file FILE.
4799 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4800 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4802 void
4803 print_operand (FILE *file, rtx x, int code)
4805 switch (code)
4807 case '#':
4808 /* Output a 'nop' if there's nothing for the delay slot. */
4809 if (dbr_sequence_length () == 0)
4810 fputs ("\n\tnop", file);
4811 return;
4812 case '*':
4813 /* Output a nullification completer if there's nothing for the */
4814 /* delay slot or nullification is requested. */
4815 if (dbr_sequence_length () == 0 ||
4816 (final_sequence &&
4817 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4818 fputs (",n", file);
4819 return;
4820 case 'R':
4821 /* Print out the second register name of a register pair.
4822 I.e., R (6) => 7. */
4823 fputs (reg_names[REGNO (x) + 1], file);
4824 return;
4825 case 'r':
4826 /* A register or zero. */
4827 if (x == const0_rtx
4828 || (x == CONST0_RTX (DFmode))
4829 || (x == CONST0_RTX (SFmode)))
4831 fputs ("%r0", file);
4832 return;
4834 else
4835 break;
4836 case 'f':
4837 /* A register or zero (floating point). */
4838 if (x == const0_rtx
4839 || (x == CONST0_RTX (DFmode))
4840 || (x == CONST0_RTX (SFmode)))
4842 fputs ("%fr0", file);
4843 return;
4845 else
4846 break;
4847 case 'A':
4849 rtx xoperands[2];
4851 xoperands[0] = XEXP (XEXP (x, 0), 0);
4852 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4853 output_global_address (file, xoperands[1], 0);
4854 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4855 return;
4858 case 'C': /* Plain (C)ondition */
4859 case 'X':
4860 switch (GET_CODE (x))
4862 case EQ:
4863 fputs ("=", file); break;
4864 case NE:
4865 fputs ("<>", file); break;
4866 case GT:
4867 fputs (">", file); break;
4868 case GE:
4869 fputs (">=", file); break;
4870 case GEU:
4871 fputs (">>=", file); break;
4872 case GTU:
4873 fputs (">>", file); break;
4874 case LT:
4875 fputs ("<", file); break;
4876 case LE:
4877 fputs ("<=", file); break;
4878 case LEU:
4879 fputs ("<<=", file); break;
4880 case LTU:
4881 fputs ("<<", file); break;
4882 default:
4883 gcc_unreachable ();
4885 return;
4886 case 'N': /* Condition, (N)egated */
4887 switch (GET_CODE (x))
4889 case EQ:
4890 fputs ("<>", file); break;
4891 case NE:
4892 fputs ("=", file); break;
4893 case GT:
4894 fputs ("<=", file); break;
4895 case GE:
4896 fputs ("<", file); break;
4897 case GEU:
4898 fputs ("<<", file); break;
4899 case GTU:
4900 fputs ("<<=", file); break;
4901 case LT:
4902 fputs (">=", file); break;
4903 case LE:
4904 fputs (">", file); break;
4905 case LEU:
4906 fputs (">>", file); break;
4907 case LTU:
4908 fputs (">>=", file); break;
4909 default:
4910 gcc_unreachable ();
4912 return;
4913 /* For floating point comparisons. Note that the output
4914 predicates are the complement of the desired mode. The
4915 conditions for GT, GE, LT, LE and LTGT cause an invalid
4916 operation exception if the result is unordered and this
4917 exception is enabled in the floating-point status register. */
4918 case 'Y':
4919 switch (GET_CODE (x))
4921 case EQ:
4922 fputs ("!=", file); break;
4923 case NE:
4924 fputs ("=", file); break;
4925 case GT:
4926 fputs ("!>", file); break;
4927 case GE:
4928 fputs ("!>=", file); break;
4929 case LT:
4930 fputs ("!<", file); break;
4931 case LE:
4932 fputs ("!<=", file); break;
4933 case LTGT:
4934 fputs ("!<>", file); break;
4935 case UNLE:
4936 fputs ("!?<=", file); break;
4937 case UNLT:
4938 fputs ("!?<", file); break;
4939 case UNGE:
4940 fputs ("!?>=", file); break;
4941 case UNGT:
4942 fputs ("!?>", file); break;
4943 case UNEQ:
4944 fputs ("!?=", file); break;
4945 case UNORDERED:
4946 fputs ("!?", file); break;
4947 case ORDERED:
4948 fputs ("?", file); break;
4949 default:
4950 gcc_unreachable ();
4952 return;
4953 case 'S': /* Condition, operands are (S)wapped. */
4954 switch (GET_CODE (x))
4956 case EQ:
4957 fputs ("=", file); break;
4958 case NE:
4959 fputs ("<>", file); break;
4960 case GT:
4961 fputs ("<", file); break;
4962 case GE:
4963 fputs ("<=", file); break;
4964 case GEU:
4965 fputs ("<<=", file); break;
4966 case GTU:
4967 fputs ("<<", file); break;
4968 case LT:
4969 fputs (">", file); break;
4970 case LE:
4971 fputs (">=", file); break;
4972 case LEU:
4973 fputs (">>=", file); break;
4974 case LTU:
4975 fputs (">>", file); break;
4976 default:
4977 gcc_unreachable ();
4979 return;
4980 case 'B': /* Condition, (B)oth swapped and negate. */
4981 switch (GET_CODE (x))
4983 case EQ:
4984 fputs ("<>", file); break;
4985 case NE:
4986 fputs ("=", file); break;
4987 case GT:
4988 fputs (">=", file); break;
4989 case GE:
4990 fputs (">", file); break;
4991 case GEU:
4992 fputs (">>", file); break;
4993 case GTU:
4994 fputs (">>=", file); break;
4995 case LT:
4996 fputs ("<=", file); break;
4997 case LE:
4998 fputs ("<", file); break;
4999 case LEU:
5000 fputs ("<<", file); break;
5001 case LTU:
5002 fputs ("<<=", file); break;
5003 default:
5004 gcc_unreachable ();
5006 return;
5007 case 'k':
5008 gcc_assert (GET_CODE (x) == CONST_INT);
5009 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5010 return;
5011 case 'Q':
5012 gcc_assert (GET_CODE (x) == CONST_INT);
5013 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5014 return;
5015 case 'L':
5016 gcc_assert (GET_CODE (x) == CONST_INT);
5017 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5018 return;
5019 case 'O':
5020 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5021 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5022 return;
5023 case 'p':
5024 gcc_assert (GET_CODE (x) == CONST_INT);
5025 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5026 return;
5027 case 'P':
5028 gcc_assert (GET_CODE (x) == CONST_INT);
5029 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5030 return;
5031 case 'I':
5032 if (GET_CODE (x) == CONST_INT)
5033 fputs ("i", file);
5034 return;
5035 case 'M':
5036 case 'F':
5037 switch (GET_CODE (XEXP (x, 0)))
5039 case PRE_DEC:
5040 case PRE_INC:
5041 if (ASSEMBLER_DIALECT == 0)
5042 fputs ("s,mb", file);
5043 else
5044 fputs (",mb", file);
5045 break;
5046 case POST_DEC:
5047 case POST_INC:
5048 if (ASSEMBLER_DIALECT == 0)
5049 fputs ("s,ma", file);
5050 else
5051 fputs (",ma", file);
5052 break;
5053 case PLUS:
5054 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5055 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5057 if (ASSEMBLER_DIALECT == 0)
5058 fputs ("x", file);
5060 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5061 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5063 if (ASSEMBLER_DIALECT == 0)
5064 fputs ("x,s", file);
5065 else
5066 fputs (",s", file);
5068 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5069 fputs ("s", file);
5070 break;
5071 default:
5072 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5073 fputs ("s", file);
5074 break;
5076 return;
5077 case 'G':
5078 output_global_address (file, x, 0);
5079 return;
5080 case 'H':
5081 output_global_address (file, x, 1);
5082 return;
5083 case 0: /* Don't do anything special */
5084 break;
5085 case 'Z':
5087 unsigned op[3];
5088 compute_zdepwi_operands (INTVAL (x), op);
5089 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5090 return;
5092 case 'z':
5094 unsigned op[3];
5095 compute_zdepdi_operands (INTVAL (x), op);
5096 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5097 return;
5099 case 'c':
5100 /* We can get here from a .vtable_inherit due to our
5101 CONSTANT_ADDRESS_P rejecting perfectly good constant
5102 addresses. */
5103 break;
5104 default:
5105 gcc_unreachable ();
5107 if (GET_CODE (x) == REG)
5109 fputs (reg_names [REGNO (x)], file);
5110 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5112 fputs ("R", file);
5113 return;
5115 if (FP_REG_P (x)
5116 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5117 && (REGNO (x) & 1) == 0)
5118 fputs ("L", file);
5120 else if (GET_CODE (x) == MEM)
5122 int size = GET_MODE_SIZE (GET_MODE (x));
5123 rtx base = NULL_RTX;
5124 switch (GET_CODE (XEXP (x, 0)))
5126 case PRE_DEC:
5127 case POST_DEC:
5128 base = XEXP (XEXP (x, 0), 0);
5129 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5130 break;
5131 case PRE_INC:
5132 case POST_INC:
5133 base = XEXP (XEXP (x, 0), 0);
5134 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5135 break;
5136 case PLUS:
5137 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5138 fprintf (file, "%s(%s)",
5139 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5140 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5141 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5142 fprintf (file, "%s(%s)",
5143 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5144 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5145 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5146 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5148 /* Because the REG_POINTER flag can get lost during reload,
5149 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5150 index and base registers in the combined move patterns. */
5151 rtx base = XEXP (XEXP (x, 0), 1);
5152 rtx index = XEXP (XEXP (x, 0), 0);
5154 fprintf (file, "%s(%s)",
5155 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5157 else
5158 output_address (XEXP (x, 0));
5159 break;
5160 default:
5161 output_address (XEXP (x, 0));
5162 break;
5165 else
5166 output_addr_const (file, x);
5169 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5171 void
5172 output_global_address (FILE *file, rtx x, int round_constant)
5175 /* Imagine (high (const (plus ...))). */
5176 if (GET_CODE (x) == HIGH)
5177 x = XEXP (x, 0);
5179 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5180 output_addr_const (file, x);
5181 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5183 output_addr_const (file, x);
5184 fputs ("-$global$", file);
5186 else if (GET_CODE (x) == CONST)
5188 const char *sep = "";
5189 int offset = 0; /* assembler wants -$global$ at end */
5190 rtx base = NULL_RTX;
5192 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5194 case SYMBOL_REF:
5195 base = XEXP (XEXP (x, 0), 0);
5196 output_addr_const (file, base);
5197 break;
5198 case CONST_INT:
5199 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5200 break;
5201 default:
5202 gcc_unreachable ();
5205 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5207 case SYMBOL_REF:
5208 base = XEXP (XEXP (x, 0), 1);
5209 output_addr_const (file, base);
5210 break;
5211 case CONST_INT:
5212 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5213 break;
5214 default:
5215 gcc_unreachable ();
5218 /* How bogus. The compiler is apparently responsible for
5219 rounding the constant if it uses an LR field selector.
5221 The linker and/or assembler seem a better place since
5222 they have to do this kind of thing already.
5224 If we fail to do this, HP's optimizing linker may eliminate
5225 an addil, but not update the ldw/stw/ldo instruction that
5226 uses the result of the addil. */
5227 if (round_constant)
5228 offset = ((offset + 0x1000) & ~0x1fff);
5230 switch (GET_CODE (XEXP (x, 0)))
5232 case PLUS:
5233 if (offset < 0)
5235 offset = -offset;
5236 sep = "-";
5238 else
5239 sep = "+";
5240 break;
5242 case MINUS:
5243 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5244 sep = "-";
5245 break;
5247 default:
5248 gcc_unreachable ();
5251 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5252 fputs ("-$global$", file);
5253 if (offset)
5254 fprintf (file, "%s%d", sep, offset);
5256 else
5257 output_addr_const (file, x);
5260 /* Output boilerplate text to appear at the beginning of the file.
5261 There are several possible versions. */
5262 #define aputs(x) fputs(x, asm_out_file)
5263 static inline void
5264 pa_file_start_level (void)
5266 if (TARGET_64BIT)
5267 aputs ("\t.LEVEL 2.0w\n");
5268 else if (TARGET_PA_20)
5269 aputs ("\t.LEVEL 2.0\n");
5270 else if (TARGET_PA_11)
5271 aputs ("\t.LEVEL 1.1\n");
5272 else
5273 aputs ("\t.LEVEL 1.0\n");
5276 static inline void
5277 pa_file_start_space (int sortspace)
5279 aputs ("\t.SPACE $PRIVATE$");
5280 if (sortspace)
5281 aputs (",SORT=16");
5282 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5283 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5284 "\n\t.SPACE $TEXT$");
5285 if (sortspace)
5286 aputs (",SORT=8");
5287 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5288 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5291 static inline void
5292 pa_file_start_file (int want_version)
5294 if (write_symbols != NO_DEBUG)
5296 output_file_directive (asm_out_file, main_input_filename);
5297 if (want_version)
5298 aputs ("\t.version\t\"01.01\"\n");
5302 static inline void
5303 pa_file_start_mcount (const char *aswhat)
5305 if (profile_flag)
5306 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5309 static void
5310 pa_elf_file_start (void)
5312 pa_file_start_level ();
5313 pa_file_start_mcount ("ENTRY");
5314 pa_file_start_file (0);
5317 static void
5318 pa_som_file_start (void)
5320 pa_file_start_level ();
5321 pa_file_start_space (0);
5322 aputs ("\t.IMPORT $global$,DATA\n"
5323 "\t.IMPORT $$dyncall,MILLICODE\n");
5324 pa_file_start_mcount ("CODE");
5325 pa_file_start_file (0);
5328 static void
5329 pa_linux_file_start (void)
5331 pa_file_start_file (1);
5332 pa_file_start_level ();
5333 pa_file_start_mcount ("CODE");
5336 static void
5337 pa_hpux64_gas_file_start (void)
5339 pa_file_start_level ();
5340 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5341 if (profile_flag)
5342 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5343 #endif
5344 pa_file_start_file (1);
5347 static void
5348 pa_hpux64_hpas_file_start (void)
5350 pa_file_start_level ();
5351 pa_file_start_space (1);
5352 pa_file_start_mcount ("CODE");
5353 pa_file_start_file (0);
5355 #undef aputs
5357 /* Search the deferred plabel list for SYMBOL and return its internal
5358 label. If an entry for SYMBOL is not found, a new entry is created. */
5361 get_deferred_plabel (rtx symbol)
5363 const char *fname = XSTR (symbol, 0);
5364 size_t i;
5366 /* See if we have already put this function on the list of deferred
5367 plabels. This list is generally small, so a liner search is not
5368 too ugly. If it proves too slow replace it with something faster. */
5369 for (i = 0; i < n_deferred_plabels; i++)
5370 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5371 break;
5373 /* If the deferred plabel list is empty, or this entry was not found
5374 on the list, create a new entry on the list. */
5375 if (deferred_plabels == NULL || i == n_deferred_plabels)
5377 tree id;
5379 if (deferred_plabels == 0)
5380 deferred_plabels = (struct deferred_plabel *)
5381 ggc_alloc (sizeof (struct deferred_plabel));
5382 else
5383 deferred_plabels = (struct deferred_plabel *)
5384 ggc_realloc (deferred_plabels,
5385 ((n_deferred_plabels + 1)
5386 * sizeof (struct deferred_plabel)));
5388 i = n_deferred_plabels++;
5389 deferred_plabels[i].internal_label = gen_label_rtx ();
5390 deferred_plabels[i].symbol = symbol;
5392 /* Gross. We have just implicitly taken the address of this
5393 function. Mark it in the same manner as assemble_name. */
5394 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5395 if (id)
5396 mark_referenced (id);
5399 return deferred_plabels[i].internal_label;
5402 static void
5403 output_deferred_plabels (void)
5405 size_t i;
5407 /* If we have some deferred plabels, then we need to switch into the
5408 data or readonly data section, and align it to a 4 byte boundary
5409 before outputting the deferred plabels. */
5410 if (n_deferred_plabels)
5412 switch_to_section (flag_pic ? data_section : readonly_data_section);
5413 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5416 /* Now output the deferred plabels. */
5417 for (i = 0; i < n_deferred_plabels; i++)
5419 targetm.asm_out.internal_label (asm_out_file, "L",
5420 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5421 assemble_integer (deferred_plabels[i].symbol,
5422 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5426 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5427 /* Initialize optabs to point to HPUX long double emulation routines. */
5428 static void
5429 pa_hpux_init_libfuncs (void)
5431 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5432 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5433 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5434 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5435 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5436 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5437 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5438 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5439 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5441 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5442 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5443 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5444 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5445 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5446 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5447 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5449 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5450 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5451 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5452 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5454 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5455 ? "__U_Qfcnvfxt_quad_to_sgl"
5456 : "_U_Qfcnvfxt_quad_to_sgl");
5457 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5458 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5459 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5461 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5462 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5463 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5464 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5466 #endif
5468 /* HP's millicode routines mean something special to the assembler.
5469 Keep track of which ones we have used. */
5471 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5472 static void import_milli (enum millicodes);
5473 static char imported[(int) end1000];
5474 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5475 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5476 #define MILLI_START 10
5478 static void
5479 import_milli (enum millicodes code)
5481 char str[sizeof (import_string)];
5483 if (!imported[(int) code])
5485 imported[(int) code] = 1;
5486 strcpy (str, import_string);
5487 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5488 output_asm_insn (str, 0);
5492 /* The register constraints have put the operands and return value in
5493 the proper registers. */
5495 const char *
5496 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5498 import_milli (mulI);
5499 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5502 /* Emit the rtl for doing a division by a constant. */
5504 /* Do magic division millicodes exist for this value? */
5505 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5507 /* We'll use an array to keep track of the magic millicodes and
5508 whether or not we've used them already. [n][0] is signed, [n][1] is
5509 unsigned. */
5511 static int div_milli[16][2];
5514 emit_hpdiv_const (rtx *operands, int unsignedp)
5516 if (GET_CODE (operands[2]) == CONST_INT
5517 && INTVAL (operands[2]) > 0
5518 && INTVAL (operands[2]) < 16
5519 && magic_milli[INTVAL (operands[2])])
5521 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5523 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5524 emit
5525 (gen_rtx_PARALLEL
5526 (VOIDmode,
5527 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5528 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5529 SImode,
5530 gen_rtx_REG (SImode, 26),
5531 operands[2])),
5532 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5533 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5534 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5535 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5536 gen_rtx_CLOBBER (VOIDmode, ret))));
5537 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5538 return 1;
5540 return 0;
5543 const char *
5544 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5546 int divisor;
5548 /* If the divisor is a constant, try to use one of the special
5549 opcodes .*/
5550 if (GET_CODE (operands[0]) == CONST_INT)
5552 static char buf[100];
5553 divisor = INTVAL (operands[0]);
5554 if (!div_milli[divisor][unsignedp])
5556 div_milli[divisor][unsignedp] = 1;
5557 if (unsignedp)
5558 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5559 else
5560 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5562 if (unsignedp)
5564 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5565 INTVAL (operands[0]));
5566 return output_millicode_call (insn,
5567 gen_rtx_SYMBOL_REF (SImode, buf));
5569 else
5571 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5572 INTVAL (operands[0]));
5573 return output_millicode_call (insn,
5574 gen_rtx_SYMBOL_REF (SImode, buf));
5577 /* Divisor isn't a special constant. */
5578 else
5580 if (unsignedp)
5582 import_milli (divU);
5583 return output_millicode_call (insn,
5584 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5586 else
5588 import_milli (divI);
5589 return output_millicode_call (insn,
5590 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5595 /* Output a $$rem millicode to do mod. */
5597 const char *
5598 output_mod_insn (int unsignedp, rtx insn)
5600 if (unsignedp)
5602 import_milli (remU);
5603 return output_millicode_call (insn,
5604 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5606 else
5608 import_milli (remI);
5609 return output_millicode_call (insn,
5610 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5614 void
5615 output_arg_descriptor (rtx call_insn)
5617 const char *arg_regs[4];
5618 enum machine_mode arg_mode;
5619 rtx link;
5620 int i, output_flag = 0;
5621 int regno;
5623 /* We neither need nor want argument location descriptors for the
5624 64bit runtime environment or the ELF32 environment. */
5625 if (TARGET_64BIT || TARGET_ELF32)
5626 return;
5628 for (i = 0; i < 4; i++)
5629 arg_regs[i] = 0;
5631 /* Specify explicitly that no argument relocations should take place
5632 if using the portable runtime calling conventions. */
5633 if (TARGET_PORTABLE_RUNTIME)
5635 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5636 asm_out_file);
5637 return;
5640 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5641 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5642 link; link = XEXP (link, 1))
5644 rtx use = XEXP (link, 0);
5646 if (! (GET_CODE (use) == USE
5647 && GET_CODE (XEXP (use, 0)) == REG
5648 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5649 continue;
5651 arg_mode = GET_MODE (XEXP (use, 0));
5652 regno = REGNO (XEXP (use, 0));
5653 if (regno >= 23 && regno <= 26)
5655 arg_regs[26 - regno] = "GR";
5656 if (arg_mode == DImode)
5657 arg_regs[25 - regno] = "GR";
5659 else if (regno >= 32 && regno <= 39)
5661 if (arg_mode == SFmode)
5662 arg_regs[(regno - 32) / 2] = "FR";
5663 else
5665 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5666 arg_regs[(regno - 34) / 2] = "FR";
5667 arg_regs[(regno - 34) / 2 + 1] = "FU";
5668 #else
5669 arg_regs[(regno - 34) / 2] = "FU";
5670 arg_regs[(regno - 34) / 2 + 1] = "FR";
5671 #endif
5675 fputs ("\t.CALL ", asm_out_file);
5676 for (i = 0; i < 4; i++)
5678 if (arg_regs[i])
5680 if (output_flag++)
5681 fputc (',', asm_out_file);
5682 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5685 fputc ('\n', asm_out_file);
5688 static enum reg_class
5689 pa_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
5690 enum machine_mode mode, secondary_reload_info *sri)
5692 int is_symbolic, regno;
5694 /* Handle the easy stuff first. */
5695 if (rclass == R1_REGS)
5696 return NO_REGS;
5698 if (REG_P (x))
5700 regno = REGNO (x);
5701 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5702 return NO_REGS;
5704 else
5705 regno = -1;
5707 /* If we have something like (mem (mem (...)), we can safely assume the
5708 inner MEM will end up in a general register after reloading, so there's
5709 no need for a secondary reload. */
5710 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5711 return NO_REGS;
5713 /* Trying to load a constant into a FP register during PIC code
5714 generation requires %r1 as a scratch register. */
5715 if (flag_pic
5716 && (mode == SImode || mode == DImode)
5717 && FP_REG_CLASS_P (rclass)
5718 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5720 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5721 : CODE_FOR_reload_indi_r1);
5722 return NO_REGS;
5725 /* Profiling showed the PA port spends about 1.3% of its compilation
5726 time in true_regnum from calls inside pa_secondary_reload_class. */
5727 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5728 regno = true_regnum (x);
5730 /* In order to allow 14-bit displacements in integer loads and stores,
5731 we need to prevent reload from generating out of range integer mode
5732 loads and stores to the floating point registers. Previously, we
5733 used to call for a secondary reload and have emit_move_sequence()
5734 fix the instruction sequence. However, reload occasionally wouldn't
5735 generate the reload and we would end up with an invalid REG+D memory
5736 address. So, now we use an intermediate general register for most
5737 memory loads and stores. */
5738 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5739 && GET_MODE_CLASS (mode) == MODE_INT
5740 && FP_REG_CLASS_P (rclass))
5742 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5743 the secondary reload needed for a pseudo. It never passes a
5744 REG+D address. */
5745 if (GET_CODE (x) == MEM)
5747 x = XEXP (x, 0);
5749 /* We don't need an intermediate for indexed and LO_SUM DLT
5750 memory addresses. When INT14_OK_STRICT is true, it might
5751 appear that we could directly allow register indirect
5752 memory addresses. However, this doesn't work because we
5753 don't support SUBREGs in floating-point register copies
5754 and reload doesn't tell us when it's going to use a SUBREG. */
5755 if (IS_INDEX_ADDR_P (x)
5756 || IS_LO_SUM_DLT_ADDR_P (x))
5757 return NO_REGS;
5759 /* Otherwise, we need an intermediate general register. */
5760 return GENERAL_REGS;
5763 /* Request a secondary reload with a general scratch register
5764 for everthing else. ??? Could symbolic operands be handled
5765 directly when generating non-pic PA 2.0 code? */
5766 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5767 return NO_REGS;
5770 /* We need a secondary register (GPR) for copies between the SAR
5771 and anything other than a general register. */
5772 if (rclass == SHIFT_REGS && (regno <= 0 || regno >= 32))
5774 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5775 return NO_REGS;
5778 /* A SAR<->FP register copy requires a secondary register (GPR) as
5779 well as secondary memory. */
5780 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5781 && (REGNO_REG_CLASS (regno) == SHIFT_REGS
5782 && FP_REG_CLASS_P (rclass)))
5784 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5785 return NO_REGS;
5788 /* Secondary reloads of symbolic operands require %r1 as a scratch
5789 register when we're generating PIC code and when the operand isn't
5790 readonly. */
5791 if (GET_CODE (x) == HIGH)
5792 x = XEXP (x, 0);
5794 /* Profiling has showed GCC spends about 2.6% of its compilation
5795 time in symbolic_operand from calls inside pa_secondary_reload_class.
5796 So, we use an inline copy to avoid useless work. */
5797 switch (GET_CODE (x))
5799 rtx op;
5801 case SYMBOL_REF:
5802 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5803 break;
5804 case LABEL_REF:
5805 is_symbolic = 1;
5806 break;
5807 case CONST:
5808 op = XEXP (x, 0);
5809 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5810 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5811 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5812 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5813 break;
5814 default:
5815 is_symbolic = 0;
5816 break;
5819 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5821 gcc_assert (mode == SImode || mode == DImode);
5822 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5823 : CODE_FOR_reload_indi_r1);
5826 return NO_REGS;
5829 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5830 is only marked as live on entry by df-scan when it is a fixed
5831 register. It isn't a fixed register in the 64-bit runtime,
5832 so we need to mark it here. */
5834 static void
5835 pa_extra_live_on_entry (bitmap regs)
5837 if (TARGET_64BIT)
5838 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5841 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5842 to prevent it from being deleted. */
5845 pa_eh_return_handler_rtx (void)
5847 rtx tmp;
5849 tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx,
5850 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5851 tmp = gen_rtx_MEM (word_mode, tmp);
5852 tmp->volatil = 1;
5853 return tmp;
5856 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5857 by invisible reference. As a GCC extension, we also pass anything
5858 with a zero or variable size by reference.
5860 The 64-bit runtime does not describe passing any types by invisible
5861 reference. The internals of GCC can't currently handle passing
5862 empty structures, and zero or variable length arrays when they are
5863 not passed entirely on the stack or by reference. Thus, as a GCC
5864 extension, we pass these types by reference. The HP compiler doesn't
5865 support these types, so hopefully there shouldn't be any compatibility
5866 issues. This may have to be revisited when HP releases a C99 compiler
5867 or updates the ABI. */
5869 static bool
5870 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5871 enum machine_mode mode, const_tree type,
5872 bool named ATTRIBUTE_UNUSED)
5874 HOST_WIDE_INT size;
5876 if (type)
5877 size = int_size_in_bytes (type);
5878 else
5879 size = GET_MODE_SIZE (mode);
5881 if (TARGET_64BIT)
5882 return size <= 0;
5883 else
5884 return size <= 0 || size > 8;
5887 enum direction
5888 function_arg_padding (enum machine_mode mode, const_tree type)
5890 if (mode == BLKmode
5891 || (TARGET_64BIT
5892 && type
5893 && (AGGREGATE_TYPE_P (type)
5894 || TREE_CODE (type) == COMPLEX_TYPE
5895 || TREE_CODE (type) == VECTOR_TYPE)))
5897 /* Return none if justification is not required. */
5898 if (type
5899 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5900 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5901 return none;
5903 /* The directions set here are ignored when a BLKmode argument larger
5904 than a word is placed in a register. Different code is used for
5905 the stack and registers. This makes it difficult to have a
5906 consistent data representation for both the stack and registers.
5907 For both runtimes, the justification and padding for arguments on
5908 the stack and in registers should be identical. */
5909 if (TARGET_64BIT)
5910 /* The 64-bit runtime specifies left justification for aggregates. */
5911 return upward;
5912 else
5913 /* The 32-bit runtime architecture specifies right justification.
5914 When the argument is passed on the stack, the argument is padded
5915 with garbage on the left. The HP compiler pads with zeros. */
5916 return downward;
5919 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5920 return downward;
5921 else
5922 return none;
5926 /* Do what is necessary for `va_start'. We look at the current function
5927 to determine if stdargs or varargs is used and fill in an initial
5928 va_list. A pointer to this constructor is returned. */
5930 static rtx
5931 hppa_builtin_saveregs (void)
5933 rtx offset, dest;
5934 tree fntype = TREE_TYPE (current_function_decl);
5935 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5936 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5937 != void_type_node)))
5938 ? UNITS_PER_WORD : 0);
5940 if (argadj)
5941 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
5942 else
5943 offset = crtl->args.arg_offset_rtx;
5945 if (TARGET_64BIT)
5947 int i, off;
5949 /* Adjust for varargs/stdarg differences. */
5950 if (argadj)
5951 offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
5952 else
5953 offset = crtl->args.arg_offset_rtx;
5955 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5956 from the incoming arg pointer and growing to larger addresses. */
5957 for (i = 26, off = -64; i >= 19; i--, off += 8)
5958 emit_move_insn (gen_rtx_MEM (word_mode,
5959 plus_constant (arg_pointer_rtx, off)),
5960 gen_rtx_REG (word_mode, i));
5962 /* The incoming args pointer points just beyond the flushback area;
5963 normally this is not a serious concern. However, when we are doing
5964 varargs/stdargs we want to make the arg pointer point to the start
5965 of the incoming argument area. */
5966 emit_move_insn (virtual_incoming_args_rtx,
5967 plus_constant (arg_pointer_rtx, -64));
5969 /* Now return a pointer to the first anonymous argument. */
5970 return copy_to_reg (expand_binop (Pmode, add_optab,
5971 virtual_incoming_args_rtx,
5972 offset, 0, 0, OPTAB_LIB_WIDEN));
5975 /* Store general registers on the stack. */
5976 dest = gen_rtx_MEM (BLKmode,
5977 plus_constant (crtl->args.internal_arg_pointer,
5978 -16));
5979 set_mem_alias_set (dest, get_varargs_alias_set ());
5980 set_mem_align (dest, BITS_PER_WORD);
5981 move_block_from_reg (23, dest, 4);
5983 /* move_block_from_reg will emit code to store the argument registers
5984 individually as scalar stores.
5986 However, other insns may later load from the same addresses for
5987 a structure load (passing a struct to a varargs routine).
5989 The alias code assumes that such aliasing can never happen, so we
5990 have to keep memory referencing insns from moving up beyond the
5991 last argument register store. So we emit a blockage insn here. */
5992 emit_insn (gen_blockage ());
5994 return copy_to_reg (expand_binop (Pmode, add_optab,
5995 crtl->args.internal_arg_pointer,
5996 offset, 0, 0, OPTAB_LIB_WIDEN));
5999 static void
6000 hppa_va_start (tree valist, rtx nextarg)
6002 nextarg = expand_builtin_saveregs ();
6003 std_expand_builtin_va_start (valist, nextarg);
6006 static tree
6007 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6008 gimple_seq *post_p)
6010 if (TARGET_64BIT)
6012 /* Args grow upward. We can use the generic routines. */
6013 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6015 else /* !TARGET_64BIT */
6017 tree ptr = build_pointer_type (type);
6018 tree valist_type;
6019 tree t, u;
6020 unsigned int size, ofs;
6021 bool indirect;
6023 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6024 if (indirect)
6026 type = ptr;
6027 ptr = build_pointer_type (type);
6029 size = int_size_in_bytes (type);
6030 valist_type = TREE_TYPE (valist);
6032 /* Args grow down. Not handled by generic routines. */
6034 u = fold_convert (sizetype, size_in_bytes (type));
6035 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6036 t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
6038 /* Copied from va-pa.h, but we probably don't need to align to
6039 word size, since we generate and preserve that invariant. */
6040 u = size_int (size > 4 ? -8 : -4);
6041 t = fold_convert (sizetype, t);
6042 t = build2 (BIT_AND_EXPR, sizetype, t, u);
6043 t = fold_convert (valist_type, t);
6045 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6047 ofs = (8 - size) % 4;
6048 if (ofs != 0)
6050 u = size_int (ofs);
6051 t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
6054 t = fold_convert (ptr, t);
6055 t = build_va_arg_indirect_ref (t);
6057 if (indirect)
6058 t = build_va_arg_indirect_ref (t);
6060 return t;
6064 /* True if MODE is valid for the target. By "valid", we mean able to
6065 be manipulated in non-trivial ways. In particular, this means all
6066 the arithmetic is supported.
6068 Currently, TImode is not valid as the HP 64-bit runtime documentation
6069 doesn't document the alignment and calling conventions for this type.
6070 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6071 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6073 static bool
6074 pa_scalar_mode_supported_p (enum machine_mode mode)
6076 int precision = GET_MODE_PRECISION (mode);
6078 switch (GET_MODE_CLASS (mode))
6080 case MODE_PARTIAL_INT:
6081 case MODE_INT:
6082 if (precision == CHAR_TYPE_SIZE)
6083 return true;
6084 if (precision == SHORT_TYPE_SIZE)
6085 return true;
6086 if (precision == INT_TYPE_SIZE)
6087 return true;
6088 if (precision == LONG_TYPE_SIZE)
6089 return true;
6090 if (precision == LONG_LONG_TYPE_SIZE)
6091 return true;
6092 return false;
6094 case MODE_FLOAT:
6095 if (precision == FLOAT_TYPE_SIZE)
6096 return true;
6097 if (precision == DOUBLE_TYPE_SIZE)
6098 return true;
6099 if (precision == LONG_DOUBLE_TYPE_SIZE)
6100 return true;
6101 return false;
6103 case MODE_DECIMAL_FLOAT:
6104 return false;
6106 default:
6107 gcc_unreachable ();
6111 /* This routine handles all the normal conditional branch sequences we
6112 might need to generate. It handles compare immediate vs compare
6113 register, nullification of delay slots, varying length branches,
6114 negated branches, and all combinations of the above. It returns the
6115 output appropriate to emit the branch corresponding to all given
6116 parameters. */
6118 const char *
6119 output_cbranch (rtx *operands, int negated, rtx insn)
6121 static char buf[100];
6122 int useskip = 0;
6123 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6124 int length = get_attr_length (insn);
6125 int xdelay;
6127 /* A conditional branch to the following instruction (e.g. the delay slot)
6128 is asking for a disaster. This can happen when not optimizing and
6129 when jump optimization fails.
6131 While it is usually safe to emit nothing, this can fail if the
6132 preceding instruction is a nullified branch with an empty delay
6133 slot and the same branch target as this branch. We could check
6134 for this but jump optimization should eliminate nop jumps. It
6135 is always safe to emit a nop. */
6136 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6137 return "nop";
6139 /* The doubleword form of the cmpib instruction doesn't have the LEU
6140 and GTU conditions while the cmpb instruction does. Since we accept
6141 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6142 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6143 operands[2] = gen_rtx_REG (DImode, 0);
6144 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6145 operands[1] = gen_rtx_REG (DImode, 0);
6147 /* If this is a long branch with its delay slot unfilled, set `nullify'
6148 as it can nullify the delay slot and save a nop. */
6149 if (length == 8 && dbr_sequence_length () == 0)
6150 nullify = 1;
6152 /* If this is a short forward conditional branch which did not get
6153 its delay slot filled, the delay slot can still be nullified. */
6154 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6155 nullify = forward_branch_p (insn);
6157 /* A forward branch over a single nullified insn can be done with a
6158 comclr instruction. This avoids a single cycle penalty due to
6159 mis-predicted branch if we fall through (branch not taken). */
6160 if (length == 4
6161 && next_real_insn (insn) != 0
6162 && get_attr_length (next_real_insn (insn)) == 4
6163 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6164 && nullify)
6165 useskip = 1;
6167 switch (length)
6169 /* All short conditional branches except backwards with an unfilled
6170 delay slot. */
6171 case 4:
6172 if (useskip)
6173 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6174 else
6175 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6176 if (GET_MODE (operands[1]) == DImode)
6177 strcat (buf, "*");
6178 if (negated)
6179 strcat (buf, "%B3");
6180 else
6181 strcat (buf, "%S3");
6182 if (useskip)
6183 strcat (buf, " %2,%r1,%%r0");
6184 else if (nullify)
6185 strcat (buf, ",n %2,%r1,%0");
6186 else
6187 strcat (buf, " %2,%r1,%0");
6188 break;
6190 /* All long conditionals. Note a short backward branch with an
6191 unfilled delay slot is treated just like a long backward branch
6192 with an unfilled delay slot. */
6193 case 8:
6194 /* Handle weird backwards branch with a filled delay slot
6195 which is nullified. */
6196 if (dbr_sequence_length () != 0
6197 && ! forward_branch_p (insn)
6198 && nullify)
6200 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6201 if (GET_MODE (operands[1]) == DImode)
6202 strcat (buf, "*");
6203 if (negated)
6204 strcat (buf, "%S3");
6205 else
6206 strcat (buf, "%B3");
6207 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6209 /* Handle short backwards branch with an unfilled delay slot.
6210 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6211 taken and untaken branches. */
6212 else if (dbr_sequence_length () == 0
6213 && ! forward_branch_p (insn)
6214 && INSN_ADDRESSES_SET_P ()
6215 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6216 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6218 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6219 if (GET_MODE (operands[1]) == DImode)
6220 strcat (buf, "*");
6221 if (negated)
6222 strcat (buf, "%B3 %2,%r1,%0%#");
6223 else
6224 strcat (buf, "%S3 %2,%r1,%0%#");
6226 else
6228 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6229 if (GET_MODE (operands[1]) == DImode)
6230 strcat (buf, "*");
6231 if (negated)
6232 strcat (buf, "%S3");
6233 else
6234 strcat (buf, "%B3");
6235 if (nullify)
6236 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6237 else
6238 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6240 break;
6242 default:
6243 /* The reversed conditional branch must branch over one additional
6244 instruction if the delay slot is filled and needs to be extracted
6245 by output_lbranch. If the delay slot is empty or this is a
6246 nullified forward branch, the instruction after the reversed
6247 condition branch must be nullified. */
6248 if (dbr_sequence_length () == 0
6249 || (nullify && forward_branch_p (insn)))
6251 nullify = 1;
6252 xdelay = 0;
6253 operands[4] = GEN_INT (length);
6255 else
6257 xdelay = 1;
6258 operands[4] = GEN_INT (length + 4);
6261 /* Create a reversed conditional branch which branches around
6262 the following insns. */
6263 if (GET_MODE (operands[1]) != DImode)
6265 if (nullify)
6267 if (negated)
6268 strcpy (buf,
6269 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6270 else
6271 strcpy (buf,
6272 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6274 else
6276 if (negated)
6277 strcpy (buf,
6278 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6279 else
6280 strcpy (buf,
6281 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6284 else
6286 if (nullify)
6288 if (negated)
6289 strcpy (buf,
6290 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6291 else
6292 strcpy (buf,
6293 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6295 else
6297 if (negated)
6298 strcpy (buf,
6299 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6300 else
6301 strcpy (buf,
6302 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6306 output_asm_insn (buf, operands);
6307 return output_lbranch (operands[0], insn, xdelay);
6309 return buf;
6312 /* This routine handles output of long unconditional branches that
6313 exceed the maximum range of a simple branch instruction. Since
6314 we don't have a register available for the branch, we save register
6315 %r1 in the frame marker, load the branch destination DEST into %r1,
6316 execute the branch, and restore %r1 in the delay slot of the branch.
6318 Since long branches may have an insn in the delay slot and the
6319 delay slot is used to restore %r1, we in general need to extract
6320 this insn and execute it before the branch. However, to facilitate
6321 use of this function by conditional branches, we also provide an
6322 option to not extract the delay insn so that it will be emitted
6323 after the long branch. So, if there is an insn in the delay slot,
6324 it is extracted if XDELAY is nonzero.
6326 The lengths of the various long-branch sequences are 20, 16 and 24
6327 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6329 const char *
6330 output_lbranch (rtx dest, rtx insn, int xdelay)
6332 rtx xoperands[2];
6334 xoperands[0] = dest;
6336 /* First, free up the delay slot. */
6337 if (xdelay && dbr_sequence_length () != 0)
6339 /* We can't handle a jump in the delay slot. */
6340 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6342 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6343 optimize, 0, NULL);
6345 /* Now delete the delay insn. */
6346 SET_INSN_DELETED (NEXT_INSN (insn));
6349 /* Output an insn to save %r1. The runtime documentation doesn't
6350 specify whether the "Clean Up" slot in the callers frame can
6351 be clobbered by the callee. It isn't copied by HP's builtin
6352 alloca, so this suggests that it can be clobbered if necessary.
6353 The "Static Link" location is copied by HP builtin alloca, so
6354 we avoid using it. Using the cleanup slot might be a problem
6355 if we have to interoperate with languages that pass cleanup
6356 information. However, it should be possible to handle these
6357 situations with GCC's asm feature.
6359 The "Current RP" slot is reserved for the called procedure, so
6360 we try to use it when we don't have a frame of our own. It's
6361 rather unlikely that we won't have a frame when we need to emit
6362 a very long branch.
6364 Really the way to go long term is a register scavenger; goto
6365 the target of the jump and find a register which we can use
6366 as a scratch to hold the value in %r1. Then, we wouldn't have
6367 to free up the delay slot or clobber a slot that may be needed
6368 for other purposes. */
6369 if (TARGET_64BIT)
6371 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6372 /* Use the return pointer slot in the frame marker. */
6373 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6374 else
6375 /* Use the slot at -40 in the frame marker since HP builtin
6376 alloca doesn't copy it. */
6377 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6379 else
6381 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6382 /* Use the return pointer slot in the frame marker. */
6383 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6384 else
6385 /* Use the "Clean Up" slot in the frame marker. In GCC,
6386 the only other use of this location is for copying a
6387 floating point double argument from a floating-point
6388 register to two general registers. The copy is done
6389 as an "atomic" operation when outputting a call, so it
6390 won't interfere with our using the location here. */
6391 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6394 if (TARGET_PORTABLE_RUNTIME)
6396 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6397 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6398 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6400 else if (flag_pic)
6402 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6403 if (TARGET_SOM || !TARGET_GAS)
6405 xoperands[1] = gen_label_rtx ();
6406 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6407 targetm.asm_out.internal_label (asm_out_file, "L",
6408 CODE_LABEL_NUMBER (xoperands[1]));
6409 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6411 else
6413 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6414 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6416 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6418 else
6419 /* Now output a very long branch to the original target. */
6420 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6422 /* Now restore the value of %r1 in the delay slot. */
6423 if (TARGET_64BIT)
6425 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6426 return "ldd -16(%%r30),%%r1";
6427 else
6428 return "ldd -40(%%r30),%%r1";
6430 else
6432 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6433 return "ldw -20(%%r30),%%r1";
6434 else
6435 return "ldw -12(%%r30),%%r1";
6439 /* This routine handles all the branch-on-bit conditional branch sequences we
6440 might need to generate. It handles nullification of delay slots,
6441 varying length branches, negated branches and all combinations of the
6442 above. it returns the appropriate output template to emit the branch. */
6444 const char *
6445 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6447 static char buf[100];
6448 int useskip = 0;
6449 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6450 int length = get_attr_length (insn);
6451 int xdelay;
6453 /* A conditional branch to the following instruction (e.g. the delay slot) is
6454 asking for a disaster. I do not think this can happen as this pattern
6455 is only used when optimizing; jump optimization should eliminate the
6456 jump. But be prepared just in case. */
6458 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6459 return "nop";
6461 /* If this is a long branch with its delay slot unfilled, set `nullify'
6462 as it can nullify the delay slot and save a nop. */
6463 if (length == 8 && dbr_sequence_length () == 0)
6464 nullify = 1;
6466 /* If this is a short forward conditional branch which did not get
6467 its delay slot filled, the delay slot can still be nullified. */
6468 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6469 nullify = forward_branch_p (insn);
6471 /* A forward branch over a single nullified insn can be done with a
6472 extrs instruction. This avoids a single cycle penalty due to
6473 mis-predicted branch if we fall through (branch not taken). */
6475 if (length == 4
6476 && next_real_insn (insn) != 0
6477 && get_attr_length (next_real_insn (insn)) == 4
6478 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6479 && nullify)
6480 useskip = 1;
6482 switch (length)
6485 /* All short conditional branches except backwards with an unfilled
6486 delay slot. */
6487 case 4:
6488 if (useskip)
6489 strcpy (buf, "{extrs,|extrw,s,}");
6490 else
6491 strcpy (buf, "bb,");
6492 if (useskip && GET_MODE (operands[0]) == DImode)
6493 strcpy (buf, "extrd,s,*");
6494 else if (GET_MODE (operands[0]) == DImode)
6495 strcpy (buf, "bb,*");
6496 if ((which == 0 && negated)
6497 || (which == 1 && ! negated))
6498 strcat (buf, ">=");
6499 else
6500 strcat (buf, "<");
6501 if (useskip)
6502 strcat (buf, " %0,%1,1,%%r0");
6503 else if (nullify && negated)
6504 strcat (buf, ",n %0,%1,%3");
6505 else if (nullify && ! negated)
6506 strcat (buf, ",n %0,%1,%2");
6507 else if (! nullify && negated)
6508 strcat (buf, "%0,%1,%3");
6509 else if (! nullify && ! negated)
6510 strcat (buf, " %0,%1,%2");
6511 break;
6513 /* All long conditionals. Note a short backward branch with an
6514 unfilled delay slot is treated just like a long backward branch
6515 with an unfilled delay slot. */
6516 case 8:
6517 /* Handle weird backwards branch with a filled delay slot
6518 which is nullified. */
6519 if (dbr_sequence_length () != 0
6520 && ! forward_branch_p (insn)
6521 && nullify)
6523 strcpy (buf, "bb,");
6524 if (GET_MODE (operands[0]) == DImode)
6525 strcat (buf, "*");
6526 if ((which == 0 && negated)
6527 || (which == 1 && ! negated))
6528 strcat (buf, "<");
6529 else
6530 strcat (buf, ">=");
6531 if (negated)
6532 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6533 else
6534 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6536 /* Handle short backwards branch with an unfilled delay slot.
6537 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6538 taken and untaken branches. */
6539 else if (dbr_sequence_length () == 0
6540 && ! forward_branch_p (insn)
6541 && INSN_ADDRESSES_SET_P ()
6542 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6543 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6545 strcpy (buf, "bb,");
6546 if (GET_MODE (operands[0]) == DImode)
6547 strcat (buf, "*");
6548 if ((which == 0 && negated)
6549 || (which == 1 && ! negated))
6550 strcat (buf, ">=");
6551 else
6552 strcat (buf, "<");
6553 if (negated)
6554 strcat (buf, " %0,%1,%3%#");
6555 else
6556 strcat (buf, " %0,%1,%2%#");
6558 else
6560 if (GET_MODE (operands[0]) == DImode)
6561 strcpy (buf, "extrd,s,*");
6562 else
6563 strcpy (buf, "{extrs,|extrw,s,}");
6564 if ((which == 0 && negated)
6565 || (which == 1 && ! negated))
6566 strcat (buf, "<");
6567 else
6568 strcat (buf, ">=");
6569 if (nullify && negated)
6570 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6571 else if (nullify && ! negated)
6572 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6573 else if (negated)
6574 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6575 else
6576 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6578 break;
6580 default:
6581 /* The reversed conditional branch must branch over one additional
6582 instruction if the delay slot is filled and needs to be extracted
6583 by output_lbranch. If the delay slot is empty or this is a
6584 nullified forward branch, the instruction after the reversed
6585 condition branch must be nullified. */
6586 if (dbr_sequence_length () == 0
6587 || (nullify && forward_branch_p (insn)))
6589 nullify = 1;
6590 xdelay = 0;
6591 operands[4] = GEN_INT (length);
6593 else
6595 xdelay = 1;
6596 operands[4] = GEN_INT (length + 4);
6599 if (GET_MODE (operands[0]) == DImode)
6600 strcpy (buf, "bb,*");
6601 else
6602 strcpy (buf, "bb,");
6603 if ((which == 0 && negated)
6604 || (which == 1 && !negated))
6605 strcat (buf, "<");
6606 else
6607 strcat (buf, ">=");
6608 if (nullify)
6609 strcat (buf, ",n %0,%1,.+%4");
6610 else
6611 strcat (buf, " %0,%1,.+%4");
6612 output_asm_insn (buf, operands);
6613 return output_lbranch (negated ? operands[3] : operands[2],
6614 insn, xdelay);
6616 return buf;
6619 /* This routine handles all the branch-on-variable-bit conditional branch
6620 sequences we might need to generate. It handles nullification of delay
6621 slots, varying length branches, negated branches and all combinations
6622 of the above. it returns the appropriate output template to emit the
6623 branch. */
6625 const char *
6626 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6628 static char buf[100];
6629 int useskip = 0;
6630 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6631 int length = get_attr_length (insn);
6632 int xdelay;
6634 /* A conditional branch to the following instruction (e.g. the delay slot) is
6635 asking for a disaster. I do not think this can happen as this pattern
6636 is only used when optimizing; jump optimization should eliminate the
6637 jump. But be prepared just in case. */
6639 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6640 return "nop";
6642 /* If this is a long branch with its delay slot unfilled, set `nullify'
6643 as it can nullify the delay slot and save a nop. */
6644 if (length == 8 && dbr_sequence_length () == 0)
6645 nullify = 1;
6647 /* If this is a short forward conditional branch which did not get
6648 its delay slot filled, the delay slot can still be nullified. */
6649 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6650 nullify = forward_branch_p (insn);
6652 /* A forward branch over a single nullified insn can be done with a
6653 extrs instruction. This avoids a single cycle penalty due to
6654 mis-predicted branch if we fall through (branch not taken). */
6656 if (length == 4
6657 && next_real_insn (insn) != 0
6658 && get_attr_length (next_real_insn (insn)) == 4
6659 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6660 && nullify)
6661 useskip = 1;
6663 switch (length)
6666 /* All short conditional branches except backwards with an unfilled
6667 delay slot. */
6668 case 4:
6669 if (useskip)
6670 strcpy (buf, "{vextrs,|extrw,s,}");
6671 else
6672 strcpy (buf, "{bvb,|bb,}");
6673 if (useskip && GET_MODE (operands[0]) == DImode)
6674 strcpy (buf, "extrd,s,*");
6675 else if (GET_MODE (operands[0]) == DImode)
6676 strcpy (buf, "bb,*");
6677 if ((which == 0 && negated)
6678 || (which == 1 && ! negated))
6679 strcat (buf, ">=");
6680 else
6681 strcat (buf, "<");
6682 if (useskip)
6683 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6684 else if (nullify && negated)
6685 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6686 else if (nullify && ! negated)
6687 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6688 else if (! nullify && negated)
6689 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6690 else if (! nullify && ! negated)
6691 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6692 break;
6694 /* All long conditionals. Note a short backward branch with an
6695 unfilled delay slot is treated just like a long backward branch
6696 with an unfilled delay slot. */
6697 case 8:
6698 /* Handle weird backwards branch with a filled delay slot
6699 which is nullified. */
6700 if (dbr_sequence_length () != 0
6701 && ! forward_branch_p (insn)
6702 && nullify)
6704 strcpy (buf, "{bvb,|bb,}");
6705 if (GET_MODE (operands[0]) == DImode)
6706 strcat (buf, "*");
6707 if ((which == 0 && negated)
6708 || (which == 1 && ! negated))
6709 strcat (buf, "<");
6710 else
6711 strcat (buf, ">=");
6712 if (negated)
6713 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6714 else
6715 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6717 /* Handle short backwards branch with an unfilled delay slot.
6718 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6719 taken and untaken branches. */
6720 else if (dbr_sequence_length () == 0
6721 && ! forward_branch_p (insn)
6722 && INSN_ADDRESSES_SET_P ()
6723 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6724 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6726 strcpy (buf, "{bvb,|bb,}");
6727 if (GET_MODE (operands[0]) == DImode)
6728 strcat (buf, "*");
6729 if ((which == 0 && negated)
6730 || (which == 1 && ! negated))
6731 strcat (buf, ">=");
6732 else
6733 strcat (buf, "<");
6734 if (negated)
6735 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6736 else
6737 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6739 else
6741 strcpy (buf, "{vextrs,|extrw,s,}");
6742 if (GET_MODE (operands[0]) == DImode)
6743 strcpy (buf, "extrd,s,*");
6744 if ((which == 0 && negated)
6745 || (which == 1 && ! negated))
6746 strcat (buf, "<");
6747 else
6748 strcat (buf, ">=");
6749 if (nullify && negated)
6750 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6751 else if (nullify && ! negated)
6752 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6753 else if (negated)
6754 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6755 else
6756 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6758 break;
6760 default:
6761 /* The reversed conditional branch must branch over one additional
6762 instruction if the delay slot is filled and needs to be extracted
6763 by output_lbranch. If the delay slot is empty or this is a
6764 nullified forward branch, the instruction after the reversed
6765 condition branch must be nullified. */
6766 if (dbr_sequence_length () == 0
6767 || (nullify && forward_branch_p (insn)))
6769 nullify = 1;
6770 xdelay = 0;
6771 operands[4] = GEN_INT (length);
6773 else
6775 xdelay = 1;
6776 operands[4] = GEN_INT (length + 4);
6779 if (GET_MODE (operands[0]) == DImode)
6780 strcpy (buf, "bb,*");
6781 else
6782 strcpy (buf, "{bvb,|bb,}");
6783 if ((which == 0 && negated)
6784 || (which == 1 && !negated))
6785 strcat (buf, "<");
6786 else
6787 strcat (buf, ">=");
6788 if (nullify)
6789 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6790 else
6791 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6792 output_asm_insn (buf, operands);
6793 return output_lbranch (negated ? operands[3] : operands[2],
6794 insn, xdelay);
6796 return buf;
6799 /* Return the output template for emitting a dbra type insn.
6801 Note it may perform some output operations on its own before
6802 returning the final output string. */
6803 const char *
6804 output_dbra (rtx *operands, rtx insn, int which_alternative)
6806 int length = get_attr_length (insn);
6808 /* A conditional branch to the following instruction (e.g. the delay slot) is
6809 asking for a disaster. Be prepared! */
6811 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6813 if (which_alternative == 0)
6814 return "ldo %1(%0),%0";
6815 else if (which_alternative == 1)
6817 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6818 output_asm_insn ("ldw -16(%%r30),%4", operands);
6819 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6820 return "{fldws|fldw} -16(%%r30),%0";
6822 else
6824 output_asm_insn ("ldw %0,%4", operands);
6825 return "ldo %1(%4),%4\n\tstw %4,%0";
6829 if (which_alternative == 0)
6831 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6832 int xdelay;
6834 /* If this is a long branch with its delay slot unfilled, set `nullify'
6835 as it can nullify the delay slot and save a nop. */
6836 if (length == 8 && dbr_sequence_length () == 0)
6837 nullify = 1;
6839 /* If this is a short forward conditional branch which did not get
6840 its delay slot filled, the delay slot can still be nullified. */
6841 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6842 nullify = forward_branch_p (insn);
6844 switch (length)
6846 case 4:
6847 if (nullify)
6848 return "addib,%C2,n %1,%0,%3";
6849 else
6850 return "addib,%C2 %1,%0,%3";
6852 case 8:
6853 /* Handle weird backwards branch with a fulled delay slot
6854 which is nullified. */
6855 if (dbr_sequence_length () != 0
6856 && ! forward_branch_p (insn)
6857 && nullify)
6858 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6859 /* Handle short backwards branch with an unfilled delay slot.
6860 Using a addb;nop rather than addi;bl saves 1 cycle for both
6861 taken and untaken branches. */
6862 else if (dbr_sequence_length () == 0
6863 && ! forward_branch_p (insn)
6864 && INSN_ADDRESSES_SET_P ()
6865 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6866 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6867 return "addib,%C2 %1,%0,%3%#";
6869 /* Handle normal cases. */
6870 if (nullify)
6871 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6872 else
6873 return "addi,%N2 %1,%0,%0\n\tb %3";
6875 default:
6876 /* The reversed conditional branch must branch over one additional
6877 instruction if the delay slot is filled and needs to be extracted
6878 by output_lbranch. If the delay slot is empty or this is a
6879 nullified forward branch, the instruction after the reversed
6880 condition branch must be nullified. */
6881 if (dbr_sequence_length () == 0
6882 || (nullify && forward_branch_p (insn)))
6884 nullify = 1;
6885 xdelay = 0;
6886 operands[4] = GEN_INT (length);
6888 else
6890 xdelay = 1;
6891 operands[4] = GEN_INT (length + 4);
6894 if (nullify)
6895 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6896 else
6897 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6899 return output_lbranch (operands[3], insn, xdelay);
6903 /* Deal with gross reload from FP register case. */
6904 else if (which_alternative == 1)
6906 /* Move loop counter from FP register to MEM then into a GR,
6907 increment the GR, store the GR into MEM, and finally reload
6908 the FP register from MEM from within the branch's delay slot. */
6909 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6910 operands);
6911 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6912 if (length == 24)
6913 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6914 else if (length == 28)
6915 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6916 else
6918 operands[5] = GEN_INT (length - 16);
6919 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6920 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6921 return output_lbranch (operands[3], insn, 0);
6924 /* Deal with gross reload from memory case. */
6925 else
6927 /* Reload loop counter from memory, the store back to memory
6928 happens in the branch's delay slot. */
6929 output_asm_insn ("ldw %0,%4", operands);
6930 if (length == 12)
6931 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6932 else if (length == 16)
6933 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6934 else
6936 operands[5] = GEN_INT (length - 4);
6937 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6938 return output_lbranch (operands[3], insn, 0);
6943 /* Return the output template for emitting a movb type insn.
6945 Note it may perform some output operations on its own before
6946 returning the final output string. */
6947 const char *
6948 output_movb (rtx *operands, rtx insn, int which_alternative,
6949 int reverse_comparison)
6951 int length = get_attr_length (insn);
6953 /* A conditional branch to the following instruction (e.g. the delay slot) is
6954 asking for a disaster. Be prepared! */
6956 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6958 if (which_alternative == 0)
6959 return "copy %1,%0";
6960 else if (which_alternative == 1)
6962 output_asm_insn ("stw %1,-16(%%r30)", operands);
6963 return "{fldws|fldw} -16(%%r30),%0";
6965 else if (which_alternative == 2)
6966 return "stw %1,%0";
6967 else
6968 return "mtsar %r1";
6971 /* Support the second variant. */
6972 if (reverse_comparison)
6973 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6975 if (which_alternative == 0)
6977 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6978 int xdelay;
6980 /* If this is a long branch with its delay slot unfilled, set `nullify'
6981 as it can nullify the delay slot and save a nop. */
6982 if (length == 8 && dbr_sequence_length () == 0)
6983 nullify = 1;
6985 /* If this is a short forward conditional branch which did not get
6986 its delay slot filled, the delay slot can still be nullified. */
6987 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6988 nullify = forward_branch_p (insn);
6990 switch (length)
6992 case 4:
6993 if (nullify)
6994 return "movb,%C2,n %1,%0,%3";
6995 else
6996 return "movb,%C2 %1,%0,%3";
6998 case 8:
6999 /* Handle weird backwards branch with a filled delay slot
7000 which is nullified. */
7001 if (dbr_sequence_length () != 0
7002 && ! forward_branch_p (insn)
7003 && nullify)
7004 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7006 /* Handle short backwards branch with an unfilled delay slot.
7007 Using a movb;nop rather than or;bl saves 1 cycle for both
7008 taken and untaken branches. */
7009 else if (dbr_sequence_length () == 0
7010 && ! forward_branch_p (insn)
7011 && INSN_ADDRESSES_SET_P ()
7012 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7013 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7014 return "movb,%C2 %1,%0,%3%#";
7015 /* Handle normal cases. */
7016 if (nullify)
7017 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7018 else
7019 return "or,%N2 %1,%%r0,%0\n\tb %3";
7021 default:
7022 /* The reversed conditional branch must branch over one additional
7023 instruction if the delay slot is filled and needs to be extracted
7024 by output_lbranch. If the delay slot is empty or this is a
7025 nullified forward branch, the instruction after the reversed
7026 condition branch must be nullified. */
7027 if (dbr_sequence_length () == 0
7028 || (nullify && forward_branch_p (insn)))
7030 nullify = 1;
7031 xdelay = 0;
7032 operands[4] = GEN_INT (length);
7034 else
7036 xdelay = 1;
7037 operands[4] = GEN_INT (length + 4);
7040 if (nullify)
7041 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7042 else
7043 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7045 return output_lbranch (operands[3], insn, xdelay);
7048 /* Deal with gross reload for FP destination register case. */
7049 else if (which_alternative == 1)
7051 /* Move source register to MEM, perform the branch test, then
7052 finally load the FP register from MEM from within the branch's
7053 delay slot. */
7054 output_asm_insn ("stw %1,-16(%%r30)", operands);
7055 if (length == 12)
7056 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7057 else if (length == 16)
7058 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7059 else
7061 operands[4] = GEN_INT (length - 4);
7062 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7063 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7064 return output_lbranch (operands[3], insn, 0);
7067 /* Deal with gross reload from memory case. */
7068 else if (which_alternative == 2)
7070 /* Reload loop counter from memory, the store back to memory
7071 happens in the branch's delay slot. */
7072 if (length == 8)
7073 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7074 else if (length == 12)
7075 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7076 else
7078 operands[4] = GEN_INT (length);
7079 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7080 operands);
7081 return output_lbranch (operands[3], insn, 0);
7084 /* Handle SAR as a destination. */
7085 else
7087 if (length == 8)
7088 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7089 else if (length == 12)
7090 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7091 else
7093 operands[4] = GEN_INT (length);
7094 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7095 operands);
7096 return output_lbranch (operands[3], insn, 0);
7101 /* Copy any FP arguments in INSN into integer registers. */
7102 static void
7103 copy_fp_args (rtx insn)
7105 rtx link;
7106 rtx xoperands[2];
7108 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7110 int arg_mode, regno;
7111 rtx use = XEXP (link, 0);
7113 if (! (GET_CODE (use) == USE
7114 && GET_CODE (XEXP (use, 0)) == REG
7115 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7116 continue;
7118 arg_mode = GET_MODE (XEXP (use, 0));
7119 regno = REGNO (XEXP (use, 0));
7121 /* Is it a floating point register? */
7122 if (regno >= 32 && regno <= 39)
7124 /* Copy the FP register into an integer register via memory. */
7125 if (arg_mode == SFmode)
7127 xoperands[0] = XEXP (use, 0);
7128 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7129 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7130 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7132 else
7134 xoperands[0] = XEXP (use, 0);
7135 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7136 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7137 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7138 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7144 /* Compute length of the FP argument copy sequence for INSN. */
7145 static int
7146 length_fp_args (rtx insn)
7148 int length = 0;
7149 rtx link;
7151 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7153 int arg_mode, regno;
7154 rtx use = XEXP (link, 0);
7156 if (! (GET_CODE (use) == USE
7157 && GET_CODE (XEXP (use, 0)) == REG
7158 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7159 continue;
7161 arg_mode = GET_MODE (XEXP (use, 0));
7162 regno = REGNO (XEXP (use, 0));
7164 /* Is it a floating point register? */
7165 if (regno >= 32 && regno <= 39)
7167 if (arg_mode == SFmode)
7168 length += 8;
7169 else
7170 length += 12;
7174 return length;
7177 /* Return the attribute length for the millicode call instruction INSN.
7178 The length must match the code generated by output_millicode_call.
7179 We include the delay slot in the returned length as it is better to
7180 over estimate the length than to under estimate it. */
7183 attr_length_millicode_call (rtx insn)
7185 unsigned long distance = -1;
7186 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7188 if (INSN_ADDRESSES_SET_P ())
7190 distance = (total + insn_current_reference_address (insn));
7191 if (distance < total)
7192 distance = -1;
7195 if (TARGET_64BIT)
7197 if (!TARGET_LONG_CALLS && distance < 7600000)
7198 return 8;
7200 return 20;
7202 else if (TARGET_PORTABLE_RUNTIME)
7203 return 24;
7204 else
7206 if (!TARGET_LONG_CALLS && distance < 240000)
7207 return 8;
7209 if (TARGET_LONG_ABS_CALL && !flag_pic)
7210 return 12;
7212 return 24;
7216 /* INSN is a function call. It may have an unconditional jump
7217 in its delay slot.
7219 CALL_DEST is the routine we are calling. */
7221 const char *
7222 output_millicode_call (rtx insn, rtx call_dest)
7224 int attr_length = get_attr_length (insn);
7225 int seq_length = dbr_sequence_length ();
7226 int distance;
7227 rtx seq_insn;
7228 rtx xoperands[3];
7230 xoperands[0] = call_dest;
7231 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7233 /* Handle the common case where we are sure that the branch will
7234 reach the beginning of the $CODE$ subspace. The within reach
7235 form of the $$sh_func_adrs call has a length of 28. Because
7236 it has an attribute type of multi, it never has a nonzero
7237 sequence length. The length of the $$sh_func_adrs is the same
7238 as certain out of reach PIC calls to other routines. */
7239 if (!TARGET_LONG_CALLS
7240 && ((seq_length == 0
7241 && (attr_length == 12
7242 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7243 || (seq_length != 0 && attr_length == 8)))
7245 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7247 else
7249 if (TARGET_64BIT)
7251 /* It might seem that one insn could be saved by accessing
7252 the millicode function using the linkage table. However,
7253 this doesn't work in shared libraries and other dynamically
7254 loaded objects. Using a pc-relative sequence also avoids
7255 problems related to the implicit use of the gp register. */
7256 output_asm_insn ("b,l .+8,%%r1", xoperands);
7258 if (TARGET_GAS)
7260 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7261 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7263 else
7265 xoperands[1] = gen_label_rtx ();
7266 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7267 targetm.asm_out.internal_label (asm_out_file, "L",
7268 CODE_LABEL_NUMBER (xoperands[1]));
7269 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7272 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7274 else if (TARGET_PORTABLE_RUNTIME)
7276 /* Pure portable runtime doesn't allow be/ble; we also don't
7277 have PIC support in the assembler/linker, so this sequence
7278 is needed. */
7280 /* Get the address of our target into %r1. */
7281 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7282 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7284 /* Get our return address into %r31. */
7285 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7286 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7288 /* Jump to our target address in %r1. */
7289 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7291 else if (!flag_pic)
7293 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7294 if (TARGET_PA_20)
7295 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7296 else
7297 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7299 else
7301 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7302 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7304 if (TARGET_SOM || !TARGET_GAS)
7306 /* The HP assembler can generate relocations for the
7307 difference of two symbols. GAS can do this for a
7308 millicode symbol but not an arbitrary external
7309 symbol when generating SOM output. */
7310 xoperands[1] = gen_label_rtx ();
7311 targetm.asm_out.internal_label (asm_out_file, "L",
7312 CODE_LABEL_NUMBER (xoperands[1]));
7313 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7314 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7316 else
7318 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7319 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7320 xoperands);
7323 /* Jump to our target address in %r1. */
7324 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7328 if (seq_length == 0)
7329 output_asm_insn ("nop", xoperands);
7331 /* We are done if there isn't a jump in the delay slot. */
7332 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7333 return "";
7335 /* This call has an unconditional jump in its delay slot. */
7336 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7338 /* See if the return address can be adjusted. Use the containing
7339 sequence insn's address. */
7340 if (INSN_ADDRESSES_SET_P ())
7342 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7343 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7344 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7346 if (VAL_14_BITS_P (distance))
7348 xoperands[1] = gen_label_rtx ();
7349 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7350 targetm.asm_out.internal_label (asm_out_file, "L",
7351 CODE_LABEL_NUMBER (xoperands[1]));
7353 else
7354 /* ??? This branch may not reach its target. */
7355 output_asm_insn ("nop\n\tb,n %0", xoperands);
7357 else
7358 /* ??? This branch may not reach its target. */
7359 output_asm_insn ("nop\n\tb,n %0", xoperands);
7361 /* Delete the jump. */
7362 SET_INSN_DELETED (NEXT_INSN (insn));
7364 return "";
7367 /* Return the attribute length of the call instruction INSN. The SIBCALL
7368 flag indicates whether INSN is a regular call or a sibling call. The
7369 length returned must be longer than the code actually generated by
7370 output_call. Since branch shortening is done before delay branch
7371 sequencing, there is no way to determine whether or not the delay
7372 slot will be filled during branch shortening. Even when the delay
7373 slot is filled, we may have to add a nop if the delay slot contains
7374 a branch that can't reach its target. Thus, we always have to include
7375 the delay slot in the length estimate. This used to be done in
7376 pa_adjust_insn_length but we do it here now as some sequences always
7377 fill the delay slot and we can save four bytes in the estimate for
7378 these sequences. */
7381 attr_length_call (rtx insn, int sibcall)
7383 int local_call;
7384 rtx call, call_dest;
7385 tree call_decl;
7386 int length = 0;
7387 rtx pat = PATTERN (insn);
7388 unsigned long distance = -1;
7390 gcc_assert (GET_CODE (insn) == CALL_INSN);
7392 if (INSN_ADDRESSES_SET_P ())
7394 unsigned long total;
7396 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7397 distance = (total + insn_current_reference_address (insn));
7398 if (distance < total)
7399 distance = -1;
7402 gcc_assert (GET_CODE (pat) == PARALLEL);
7404 /* Get the call rtx. */
7405 call = XVECEXP (pat, 0, 0);
7406 if (GET_CODE (call) == SET)
7407 call = SET_SRC (call);
7409 gcc_assert (GET_CODE (call) == CALL);
7411 /* Determine if this is a local call. */
7412 call_dest = XEXP (XEXP (call, 0), 0);
7413 call_decl = SYMBOL_REF_DECL (call_dest);
7414 local_call = call_decl && targetm.binds_local_p (call_decl);
7416 /* pc-relative branch. */
7417 if (!TARGET_LONG_CALLS
7418 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7419 || distance < 240000))
7420 length += 8;
7422 /* 64-bit plabel sequence. */
7423 else if (TARGET_64BIT && !local_call)
7424 length += sibcall ? 28 : 24;
7426 /* non-pic long absolute branch sequence. */
7427 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7428 length += 12;
7430 /* long pc-relative branch sequence. */
7431 else if (TARGET_LONG_PIC_SDIFF_CALL
7432 || (TARGET_GAS && !TARGET_SOM
7433 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7435 length += 20;
7437 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && flag_pic)
7438 length += 8;
7441 /* 32-bit plabel sequence. */
7442 else
7444 length += 32;
7446 if (TARGET_SOM)
7447 length += length_fp_args (insn);
7449 if (flag_pic)
7450 length += 4;
7452 if (!TARGET_PA_20)
7454 if (!sibcall)
7455 length += 8;
7457 if (!TARGET_NO_SPACE_REGS && flag_pic)
7458 length += 8;
7462 return length;
7465 /* INSN is a function call. It may have an unconditional jump
7466 in its delay slot.
7468 CALL_DEST is the routine we are calling. */
7470 const char *
7471 output_call (rtx insn, rtx call_dest, int sibcall)
7473 int delay_insn_deleted = 0;
7474 int delay_slot_filled = 0;
7475 int seq_length = dbr_sequence_length ();
7476 tree call_decl = SYMBOL_REF_DECL (call_dest);
7477 int local_call = call_decl && targetm.binds_local_p (call_decl);
7478 rtx xoperands[2];
7480 xoperands[0] = call_dest;
7482 /* Handle the common case where we're sure that the branch will reach
7483 the beginning of the "$CODE$" subspace. This is the beginning of
7484 the current function if we are in a named section. */
7485 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7487 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7488 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7490 else
7492 if (TARGET_64BIT && !local_call)
7494 /* ??? As far as I can tell, the HP linker doesn't support the
7495 long pc-relative sequence described in the 64-bit runtime
7496 architecture. So, we use a slightly longer indirect call. */
7497 xoperands[0] = get_deferred_plabel (call_dest);
7498 xoperands[1] = gen_label_rtx ();
7500 /* If this isn't a sibcall, we put the load of %r27 into the
7501 delay slot. We can't do this in a sibcall as we don't
7502 have a second call-clobbered scratch register available. */
7503 if (seq_length != 0
7504 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7505 && !sibcall)
7507 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7508 optimize, 0, NULL);
7510 /* Now delete the delay insn. */
7511 SET_INSN_DELETED (NEXT_INSN (insn));
7512 delay_insn_deleted = 1;
7515 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7516 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7517 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7519 if (sibcall)
7521 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7522 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7523 output_asm_insn ("bve (%%r1)", xoperands);
7525 else
7527 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7528 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7529 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7530 delay_slot_filled = 1;
7533 else
7535 int indirect_call = 0;
7537 /* Emit a long call. There are several different sequences
7538 of increasing length and complexity. In most cases,
7539 they don't allow an instruction in the delay slot. */
7540 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7541 && !TARGET_LONG_PIC_SDIFF_CALL
7542 && !(TARGET_GAS && !TARGET_SOM
7543 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7544 && !TARGET_64BIT)
7545 indirect_call = 1;
7547 if (seq_length != 0
7548 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7549 && !sibcall
7550 && (!TARGET_PA_20 || indirect_call))
7552 /* A non-jump insn in the delay slot. By definition we can
7553 emit this insn before the call (and in fact before argument
7554 relocating. */
7555 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7556 NULL);
7558 /* Now delete the delay insn. */
7559 SET_INSN_DELETED (NEXT_INSN (insn));
7560 delay_insn_deleted = 1;
7563 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7565 /* This is the best sequence for making long calls in
7566 non-pic code. Unfortunately, GNU ld doesn't provide
7567 the stub needed for external calls, and GAS's support
7568 for this with the SOM linker is buggy. It is safe
7569 to use this for local calls. */
7570 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7571 if (sibcall)
7572 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7573 else
7575 if (TARGET_PA_20)
7576 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7577 xoperands);
7578 else
7579 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7581 output_asm_insn ("copy %%r31,%%r2", xoperands);
7582 delay_slot_filled = 1;
7585 else
7587 if (TARGET_LONG_PIC_SDIFF_CALL)
7589 /* The HP assembler and linker can handle relocations
7590 for the difference of two symbols. The HP assembler
7591 recognizes the sequence as a pc-relative call and
7592 the linker provides stubs when needed. */
7593 xoperands[1] = gen_label_rtx ();
7594 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7595 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7596 targetm.asm_out.internal_label (asm_out_file, "L",
7597 CODE_LABEL_NUMBER (xoperands[1]));
7598 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7600 else if (TARGET_GAS && !TARGET_SOM
7601 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7603 /* GAS currently can't generate the relocations that
7604 are needed for the SOM linker under HP-UX using this
7605 sequence. The GNU linker doesn't generate the stubs
7606 that are needed for external calls on TARGET_ELF32
7607 with this sequence. For now, we have to use a
7608 longer plabel sequence when using GAS. */
7609 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7610 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7611 xoperands);
7612 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7613 xoperands);
7615 else
7617 /* Emit a long plabel-based call sequence. This is
7618 essentially an inline implementation of $$dyncall.
7619 We don't actually try to call $$dyncall as this is
7620 as difficult as calling the function itself. */
7621 xoperands[0] = get_deferred_plabel (call_dest);
7622 xoperands[1] = gen_label_rtx ();
7624 /* Since the call is indirect, FP arguments in registers
7625 need to be copied to the general registers. Then, the
7626 argument relocation stub will copy them back. */
7627 if (TARGET_SOM)
7628 copy_fp_args (insn);
7630 if (flag_pic)
7632 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7633 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7634 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7636 else
7638 output_asm_insn ("addil LR'%0-$global$,%%r27",
7639 xoperands);
7640 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7641 xoperands);
7644 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7645 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7646 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7647 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7649 if (!sibcall && !TARGET_PA_20)
7651 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7652 if (TARGET_NO_SPACE_REGS)
7653 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7654 else
7655 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7659 if (TARGET_PA_20)
7661 if (sibcall)
7662 output_asm_insn ("bve (%%r1)", xoperands);
7663 else
7665 if (indirect_call)
7667 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7668 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7669 delay_slot_filled = 1;
7671 else
7672 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7675 else
7677 if (!TARGET_NO_SPACE_REGS && flag_pic)
7678 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7679 xoperands);
7681 if (sibcall)
7683 if (TARGET_NO_SPACE_REGS || !flag_pic)
7684 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7685 else
7686 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7688 else
7690 if (TARGET_NO_SPACE_REGS || !flag_pic)
7691 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7692 else
7693 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7695 if (indirect_call)
7696 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7697 else
7698 output_asm_insn ("copy %%r31,%%r2", xoperands);
7699 delay_slot_filled = 1;
7706 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7707 output_asm_insn ("nop", xoperands);
7709 /* We are done if there isn't a jump in the delay slot. */
7710 if (seq_length == 0
7711 || delay_insn_deleted
7712 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7713 return "";
7715 /* A sibcall should never have a branch in the delay slot. */
7716 gcc_assert (!sibcall);
7718 /* This call has an unconditional jump in its delay slot. */
7719 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7721 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7723 /* See if the return address can be adjusted. Use the containing
7724 sequence insn's address. */
7725 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7726 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7727 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7729 if (VAL_14_BITS_P (distance))
7731 xoperands[1] = gen_label_rtx ();
7732 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7733 targetm.asm_out.internal_label (asm_out_file, "L",
7734 CODE_LABEL_NUMBER (xoperands[1]));
7736 else
7737 output_asm_insn ("nop\n\tb,n %0", xoperands);
7739 else
7740 output_asm_insn ("b,n %0", xoperands);
7742 /* Delete the jump. */
7743 SET_INSN_DELETED (NEXT_INSN (insn));
7745 return "";
7748 /* Return the attribute length of the indirect call instruction INSN.
7749 The length must match the code generated by output_indirect call.
7750 The returned length includes the delay slot. Currently, the delay
7751 slot of an indirect call sequence is not exposed and it is used by
7752 the sequence itself. */
7755 attr_length_indirect_call (rtx insn)
7757 unsigned long distance = -1;
7758 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7760 if (INSN_ADDRESSES_SET_P ())
7762 distance = (total + insn_current_reference_address (insn));
7763 if (distance < total)
7764 distance = -1;
7767 if (TARGET_64BIT)
7768 return 12;
7770 if (TARGET_FAST_INDIRECT_CALLS
7771 || (!TARGET_PORTABLE_RUNTIME
7772 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7773 || distance < 240000)))
7774 return 8;
7776 if (flag_pic)
7777 return 24;
7779 if (TARGET_PORTABLE_RUNTIME)
7780 return 20;
7782 /* Out of reach, can use ble. */
7783 return 12;
7786 const char *
7787 output_indirect_call (rtx insn, rtx call_dest)
7789 rtx xoperands[1];
7791 if (TARGET_64BIT)
7793 xoperands[0] = call_dest;
7794 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7795 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7796 return "";
7799 /* First the special case for kernels, level 0 systems, etc. */
7800 if (TARGET_FAST_INDIRECT_CALLS)
7801 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7803 /* Now the normal case -- we can reach $$dyncall directly or
7804 we're sure that we can get there via a long-branch stub.
7806 No need to check target flags as the length uniquely identifies
7807 the remaining cases. */
7808 if (attr_length_indirect_call (insn) == 8)
7810 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7811 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7812 variant of the B,L instruction can't be used on the SOM target. */
7813 if (TARGET_PA_20 && !TARGET_SOM)
7814 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7815 else
7816 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7819 /* Long millicode call, but we are not generating PIC or portable runtime
7820 code. */
7821 if (attr_length_indirect_call (insn) == 12)
7822 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7824 /* Long millicode call for portable runtime. */
7825 if (attr_length_indirect_call (insn) == 20)
7826 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7828 /* We need a long PIC call to $$dyncall. */
7829 xoperands[0] = NULL_RTX;
7830 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7831 if (TARGET_SOM || !TARGET_GAS)
7833 xoperands[0] = gen_label_rtx ();
7834 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7835 targetm.asm_out.internal_label (asm_out_file, "L",
7836 CODE_LABEL_NUMBER (xoperands[0]));
7837 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7839 else
7841 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7842 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7843 xoperands);
7845 output_asm_insn ("blr %%r0,%%r2", xoperands);
7846 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7847 return "";
7850 /* Return the total length of the save and restore instructions needed for
7851 the data linkage table pointer (i.e., the PIC register) across the call
7852 instruction INSN. No-return calls do not require a save and restore.
7853 In addition, we may be able to avoid the save and restore for calls
7854 within the same translation unit. */
7857 attr_length_save_restore_dltp (rtx insn)
7859 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7860 return 0;
7862 return 8;
7865 /* In HPUX 8.0's shared library scheme, special relocations are needed
7866 for function labels if they might be passed to a function
7867 in a shared library (because shared libraries don't live in code
7868 space), and special magic is needed to construct their address. */
7870 void
7871 hppa_encode_label (rtx sym)
7873 const char *str = XSTR (sym, 0);
7874 int len = strlen (str) + 1;
7875 char *newstr, *p;
7877 p = newstr = XALLOCAVEC (char, len + 1);
7878 *p++ = '@';
7879 strcpy (p, str);
7881 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7884 static void
7885 pa_encode_section_info (tree decl, rtx rtl, int first)
7887 int old_referenced = 0;
7889 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
7890 old_referenced
7891 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
7893 default_encode_section_info (decl, rtl, first);
7895 if (first && TEXT_SPACE_P (decl))
7897 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7898 if (TREE_CODE (decl) == FUNCTION_DECL)
7899 hppa_encode_label (XEXP (rtl, 0));
7901 else if (old_referenced)
7902 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
7905 /* This is sort of inverse to pa_encode_section_info. */
7907 static const char *
7908 pa_strip_name_encoding (const char *str)
7910 str += (*str == '@');
7911 str += (*str == '*');
7912 return str;
7916 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7918 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7921 /* Returns 1 if OP is a function label involved in a simple addition
7922 with a constant. Used to keep certain patterns from matching
7923 during instruction combination. */
7925 is_function_label_plus_const (rtx op)
7927 /* Strip off any CONST. */
7928 if (GET_CODE (op) == CONST)
7929 op = XEXP (op, 0);
7931 return (GET_CODE (op) == PLUS
7932 && function_label_operand (XEXP (op, 0), Pmode)
7933 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7936 /* Output assembly code for a thunk to FUNCTION. */
7938 static void
7939 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7940 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7941 tree function)
7943 static unsigned int current_thunk_number;
7944 int val_14 = VAL_14_BITS_P (delta);
7945 int nbytes = 0;
7946 char label[16];
7947 rtx xoperands[4];
7949 xoperands[0] = XEXP (DECL_RTL (function), 0);
7950 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7951 xoperands[2] = GEN_INT (delta);
7953 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7954 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7956 /* Output the thunk. We know that the function is in the same
7957 translation unit (i.e., the same space) as the thunk, and that
7958 thunks are output after their method. Thus, we don't need an
7959 external branch to reach the function. With SOM and GAS,
7960 functions and thunks are effectively in different sections.
7961 Thus, we can always use a IA-relative branch and the linker
7962 will add a long branch stub if necessary.
7964 However, we have to be careful when generating PIC code on the
7965 SOM port to ensure that the sequence does not transfer to an
7966 import stub for the target function as this could clobber the
7967 return value saved at SP-24. This would also apply to the
7968 32-bit linux port if the multi-space model is implemented. */
7969 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7970 && !(flag_pic && TREE_PUBLIC (function))
7971 && (TARGET_GAS || last_address < 262132))
7972 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7973 && ((targetm.have_named_sections
7974 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7975 /* The GNU 64-bit linker has rather poor stub management.
7976 So, we use a long branch from thunks that aren't in
7977 the same section as the target function. */
7978 && ((!TARGET_64BIT
7979 && (DECL_SECTION_NAME (thunk_fndecl)
7980 != DECL_SECTION_NAME (function)))
7981 || ((DECL_SECTION_NAME (thunk_fndecl)
7982 == DECL_SECTION_NAME (function))
7983 && last_address < 262132)))
7984 || (!targetm.have_named_sections && last_address < 262132))))
7986 if (!val_14)
7987 output_asm_insn ("addil L'%2,%%r26", xoperands);
7989 output_asm_insn ("b %0", xoperands);
7991 if (val_14)
7993 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7994 nbytes += 8;
7996 else
7998 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7999 nbytes += 12;
8002 else if (TARGET_64BIT)
8004 /* We only have one call-clobbered scratch register, so we can't
8005 make use of the delay slot if delta doesn't fit in 14 bits. */
8006 if (!val_14)
8008 output_asm_insn ("addil L'%2,%%r26", xoperands);
8009 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8012 output_asm_insn ("b,l .+8,%%r1", xoperands);
8014 if (TARGET_GAS)
8016 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8017 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8019 else
8021 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8022 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8025 if (val_14)
8027 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8028 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8029 nbytes += 20;
8031 else
8033 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8034 nbytes += 24;
8037 else if (TARGET_PORTABLE_RUNTIME)
8039 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8040 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8042 if (!val_14)
8043 output_asm_insn ("addil L'%2,%%r26", xoperands);
8045 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8047 if (val_14)
8049 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8050 nbytes += 16;
8052 else
8054 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8055 nbytes += 20;
8058 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8060 /* The function is accessible from outside this module. The only
8061 way to avoid an import stub between the thunk and function is to
8062 call the function directly with an indirect sequence similar to
8063 that used by $$dyncall. This is possible because $$dyncall acts
8064 as the import stub in an indirect call. */
8065 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8066 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8067 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8068 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8069 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8070 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8071 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8072 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8073 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8075 if (!val_14)
8077 output_asm_insn ("addil L'%2,%%r26", xoperands);
8078 nbytes += 4;
8081 if (TARGET_PA_20)
8083 output_asm_insn ("bve (%%r22)", xoperands);
8084 nbytes += 36;
8086 else if (TARGET_NO_SPACE_REGS)
8088 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8089 nbytes += 36;
8091 else
8093 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8094 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8095 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8096 nbytes += 44;
8099 if (val_14)
8100 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8101 else
8102 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8104 else if (flag_pic)
8106 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8108 if (TARGET_SOM || !TARGET_GAS)
8110 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8111 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8113 else
8115 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8116 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8119 if (!val_14)
8120 output_asm_insn ("addil L'%2,%%r26", xoperands);
8122 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8124 if (val_14)
8126 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8127 nbytes += 20;
8129 else
8131 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8132 nbytes += 24;
8135 else
8137 if (!val_14)
8138 output_asm_insn ("addil L'%2,%%r26", xoperands);
8140 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8141 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8143 if (val_14)
8145 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8146 nbytes += 12;
8148 else
8150 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8151 nbytes += 16;
8155 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8157 if (TARGET_SOM && TARGET_GAS)
8159 /* We done with this subspace except possibly for some additional
8160 debug information. Forget that we are in this subspace to ensure
8161 that the next function is output in its own subspace. */
8162 in_section = NULL;
8163 cfun->machine->in_nsubspa = 2;
8166 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8168 switch_to_section (data_section);
8169 output_asm_insn (".align 4", xoperands);
8170 ASM_OUTPUT_LABEL (file, label);
8171 output_asm_insn (".word P'%0", xoperands);
8174 current_thunk_number++;
8175 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8176 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8177 last_address += nbytes;
8178 update_total_code_bytes (nbytes);
8181 /* Only direct calls to static functions are allowed to be sibling (tail)
8182 call optimized.
8184 This restriction is necessary because some linker generated stubs will
8185 store return pointers into rp' in some cases which might clobber a
8186 live value already in rp'.
8188 In a sibcall the current function and the target function share stack
8189 space. Thus if the path to the current function and the path to the
8190 target function save a value in rp', they save the value into the
8191 same stack slot, which has undesirable consequences.
8193 Because of the deferred binding nature of shared libraries any function
8194 with external scope could be in a different load module and thus require
8195 rp' to be saved when calling that function. So sibcall optimizations
8196 can only be safe for static function.
8198 Note that GCC never needs return value relocations, so we don't have to
8199 worry about static calls with return value relocations (which require
8200 saving rp').
8202 It is safe to perform a sibcall optimization when the target function
8203 will never return. */
8204 static bool
8205 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8207 if (TARGET_PORTABLE_RUNTIME)
8208 return false;
8210 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8211 single subspace mode and the call is not indirect. As far as I know,
8212 there is no operating system support for the multiple subspace mode.
8213 It might be possible to support indirect calls if we didn't use
8214 $$dyncall (see the indirect sequence generated in output_call). */
8215 if (TARGET_ELF32)
8216 return (decl != NULL_TREE);
8218 /* Sibcalls are not ok because the arg pointer register is not a fixed
8219 register. This prevents the sibcall optimization from occurring. In
8220 addition, there are problems with stub placement using GNU ld. This
8221 is because a normal sibcall branch uses a 17-bit relocation while
8222 a regular call branch uses a 22-bit relocation. As a result, more
8223 care needs to be taken in the placement of long-branch stubs. */
8224 if (TARGET_64BIT)
8225 return false;
8227 /* Sibcalls are only ok within a translation unit. */
8228 return (decl && !TREE_PUBLIC (decl));
8231 /* ??? Addition is not commutative on the PA due to the weird implicit
8232 space register selection rules for memory addresses. Therefore, we
8233 don't consider a + b == b + a, as this might be inside a MEM. */
8234 static bool
8235 pa_commutative_p (const_rtx x, int outer_code)
8237 return (COMMUTATIVE_P (x)
8238 && (TARGET_NO_SPACE_REGS
8239 || (outer_code != UNKNOWN && outer_code != MEM)
8240 || GET_CODE (x) != PLUS));
8243 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8244 use in fmpyadd instructions. */
8246 fmpyaddoperands (rtx *operands)
8248 enum machine_mode mode = GET_MODE (operands[0]);
8250 /* Must be a floating point mode. */
8251 if (mode != SFmode && mode != DFmode)
8252 return 0;
8254 /* All modes must be the same. */
8255 if (! (mode == GET_MODE (operands[1])
8256 && mode == GET_MODE (operands[2])
8257 && mode == GET_MODE (operands[3])
8258 && mode == GET_MODE (operands[4])
8259 && mode == GET_MODE (operands[5])))
8260 return 0;
8262 /* All operands must be registers. */
8263 if (! (GET_CODE (operands[1]) == REG
8264 && GET_CODE (operands[2]) == REG
8265 && GET_CODE (operands[3]) == REG
8266 && GET_CODE (operands[4]) == REG
8267 && GET_CODE (operands[5]) == REG))
8268 return 0;
8270 /* Only 2 real operands to the addition. One of the input operands must
8271 be the same as the output operand. */
8272 if (! rtx_equal_p (operands[3], operands[4])
8273 && ! rtx_equal_p (operands[3], operands[5]))
8274 return 0;
8276 /* Inout operand of add cannot conflict with any operands from multiply. */
8277 if (rtx_equal_p (operands[3], operands[0])
8278 || rtx_equal_p (operands[3], operands[1])
8279 || rtx_equal_p (operands[3], operands[2]))
8280 return 0;
8282 /* multiply cannot feed into addition operands. */
8283 if (rtx_equal_p (operands[4], operands[0])
8284 || rtx_equal_p (operands[5], operands[0]))
8285 return 0;
8287 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8288 if (mode == SFmode
8289 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8290 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8291 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8292 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8293 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8294 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8295 return 0;
8297 /* Passed. Operands are suitable for fmpyadd. */
8298 return 1;
8301 #if !defined(USE_COLLECT2)
8302 static void
8303 pa_asm_out_constructor (rtx symbol, int priority)
8305 if (!function_label_operand (symbol, VOIDmode))
8306 hppa_encode_label (symbol);
8308 #ifdef CTORS_SECTION_ASM_OP
8309 default_ctor_section_asm_out_constructor (symbol, priority);
8310 #else
8311 # ifdef TARGET_ASM_NAMED_SECTION
8312 default_named_section_asm_out_constructor (symbol, priority);
8313 # else
8314 default_stabs_asm_out_constructor (symbol, priority);
8315 # endif
8316 #endif
8319 static void
8320 pa_asm_out_destructor (rtx symbol, int priority)
8322 if (!function_label_operand (symbol, VOIDmode))
8323 hppa_encode_label (symbol);
8325 #ifdef DTORS_SECTION_ASM_OP
8326 default_dtor_section_asm_out_destructor (symbol, priority);
8327 #else
8328 # ifdef TARGET_ASM_NAMED_SECTION
8329 default_named_section_asm_out_destructor (symbol, priority);
8330 # else
8331 default_stabs_asm_out_destructor (symbol, priority);
8332 # endif
8333 #endif
8335 #endif
8337 /* This function places uninitialized global data in the bss section.
8338 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8339 function on the SOM port to prevent uninitialized global data from
8340 being placed in the data section. */
8342 void
8343 pa_asm_output_aligned_bss (FILE *stream,
8344 const char *name,
8345 unsigned HOST_WIDE_INT size,
8346 unsigned int align)
8348 switch_to_section (bss_section);
8349 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8351 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8352 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8353 #endif
8355 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8356 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8357 #endif
8359 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8360 ASM_OUTPUT_LABEL (stream, name);
8361 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8364 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8365 that doesn't allow the alignment of global common storage to be directly
8366 specified. The SOM linker aligns common storage based on the rounded
8367 value of the NUM_BYTES parameter in the .comm directive. It's not
8368 possible to use the .align directive as it doesn't affect the alignment
8369 of the label associated with a .comm directive. */
8371 void
8372 pa_asm_output_aligned_common (FILE *stream,
8373 const char *name,
8374 unsigned HOST_WIDE_INT size,
8375 unsigned int align)
8377 unsigned int max_common_align;
8379 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8380 if (align > max_common_align)
8382 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8383 "for global common data. Using %u",
8384 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8385 align = max_common_align;
8388 switch_to_section (bss_section);
8390 assemble_name (stream, name);
8391 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8392 MAX (size, align / BITS_PER_UNIT));
8395 /* We can't use .comm for local common storage as the SOM linker effectively
8396 treats the symbol as universal and uses the same storage for local symbols
8397 with the same name in different object files. The .block directive
8398 reserves an uninitialized block of storage. However, it's not common
8399 storage. Fortunately, GCC never requests common storage with the same
8400 name in any given translation unit. */
8402 void
8403 pa_asm_output_aligned_local (FILE *stream,
8404 const char *name,
8405 unsigned HOST_WIDE_INT size,
8406 unsigned int align)
8408 switch_to_section (bss_section);
8409 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8411 #ifdef LOCAL_ASM_OP
8412 fprintf (stream, "%s", LOCAL_ASM_OP);
8413 assemble_name (stream, name);
8414 fprintf (stream, "\n");
8415 #endif
8417 ASM_OUTPUT_LABEL (stream, name);
8418 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8421 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8422 use in fmpysub instructions. */
8424 fmpysuboperands (rtx *operands)
8426 enum machine_mode mode = GET_MODE (operands[0]);
8428 /* Must be a floating point mode. */
8429 if (mode != SFmode && mode != DFmode)
8430 return 0;
8432 /* All modes must be the same. */
8433 if (! (mode == GET_MODE (operands[1])
8434 && mode == GET_MODE (operands[2])
8435 && mode == GET_MODE (operands[3])
8436 && mode == GET_MODE (operands[4])
8437 && mode == GET_MODE (operands[5])))
8438 return 0;
8440 /* All operands must be registers. */
8441 if (! (GET_CODE (operands[1]) == REG
8442 && GET_CODE (operands[2]) == REG
8443 && GET_CODE (operands[3]) == REG
8444 && GET_CODE (operands[4]) == REG
8445 && GET_CODE (operands[5]) == REG))
8446 return 0;
8448 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8449 operation, so operands[4] must be the same as operand[3]. */
8450 if (! rtx_equal_p (operands[3], operands[4]))
8451 return 0;
8453 /* multiply cannot feed into subtraction. */
8454 if (rtx_equal_p (operands[5], operands[0]))
8455 return 0;
8457 /* Inout operand of sub cannot conflict with any operands from multiply. */
8458 if (rtx_equal_p (operands[3], operands[0])
8459 || rtx_equal_p (operands[3], operands[1])
8460 || rtx_equal_p (operands[3], operands[2]))
8461 return 0;
8463 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8464 if (mode == SFmode
8465 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8466 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8467 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8468 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8469 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8470 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8471 return 0;
8473 /* Passed. Operands are suitable for fmpysub. */
8474 return 1;
8477 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8478 constants for shadd instructions. */
8480 shadd_constant_p (int val)
8482 if (val == 2 || val == 4 || val == 8)
8483 return 1;
8484 else
8485 return 0;
8488 /* Return 1 if OP is valid as a base or index register in a
8489 REG+REG address. */
8492 borx_reg_operand (rtx op, enum machine_mode mode)
8494 if (GET_CODE (op) != REG)
8495 return 0;
8497 /* We must reject virtual registers as the only expressions that
8498 can be instantiated are REG and REG+CONST. */
8499 if (op == virtual_incoming_args_rtx
8500 || op == virtual_stack_vars_rtx
8501 || op == virtual_stack_dynamic_rtx
8502 || op == virtual_outgoing_args_rtx
8503 || op == virtual_cfa_rtx)
8504 return 0;
8506 /* While it's always safe to index off the frame pointer, it's not
8507 profitable to do so when the frame pointer is being eliminated. */
8508 if (!reload_completed
8509 && flag_omit_frame_pointer
8510 && !cfun->calls_alloca
8511 && op == frame_pointer_rtx)
8512 return 0;
8514 return register_operand (op, mode);
8517 /* Return 1 if this operand is anything other than a hard register. */
8520 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8522 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8525 /* Return 1 if INSN branches forward. Should be using insn_addresses
8526 to avoid walking through all the insns... */
8527 static int
8528 forward_branch_p (rtx insn)
8530 rtx label = JUMP_LABEL (insn);
8532 while (insn)
8534 if (insn == label)
8535 break;
8536 else
8537 insn = NEXT_INSN (insn);
8540 return (insn == label);
8543 /* Return 1 if OP is an equality comparison, else return 0. */
8545 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8547 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8550 /* Return 1 if INSN is in the delay slot of a call instruction. */
8552 jump_in_call_delay (rtx insn)
8555 if (GET_CODE (insn) != JUMP_INSN)
8556 return 0;
8558 if (PREV_INSN (insn)
8559 && PREV_INSN (PREV_INSN (insn))
8560 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8562 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8564 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8565 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8568 else
8569 return 0;
8572 /* Output an unconditional move and branch insn. */
8574 const char *
8575 output_parallel_movb (rtx *operands, rtx insn)
8577 int length = get_attr_length (insn);
8579 /* These are the cases in which we win. */
8580 if (length == 4)
8581 return "mov%I1b,tr %1,%0,%2";
8583 /* None of the following cases win, but they don't lose either. */
8584 if (length == 8)
8586 if (dbr_sequence_length () == 0)
8588 /* Nothing in the delay slot, fake it by putting the combined
8589 insn (the copy or add) in the delay slot of a bl. */
8590 if (GET_CODE (operands[1]) == CONST_INT)
8591 return "b %2\n\tldi %1,%0";
8592 else
8593 return "b %2\n\tcopy %1,%0";
8595 else
8597 /* Something in the delay slot, but we've got a long branch. */
8598 if (GET_CODE (operands[1]) == CONST_INT)
8599 return "ldi %1,%0\n\tb %2";
8600 else
8601 return "copy %1,%0\n\tb %2";
8605 if (GET_CODE (operands[1]) == CONST_INT)
8606 output_asm_insn ("ldi %1,%0", operands);
8607 else
8608 output_asm_insn ("copy %1,%0", operands);
8609 return output_lbranch (operands[2], insn, 1);
8612 /* Output an unconditional add and branch insn. */
8614 const char *
8615 output_parallel_addb (rtx *operands, rtx insn)
8617 int length = get_attr_length (insn);
8619 /* To make life easy we want operand0 to be the shared input/output
8620 operand and operand1 to be the readonly operand. */
8621 if (operands[0] == operands[1])
8622 operands[1] = operands[2];
8624 /* These are the cases in which we win. */
8625 if (length == 4)
8626 return "add%I1b,tr %1,%0,%3";
8628 /* None of the following cases win, but they don't lose either. */
8629 if (length == 8)
8631 if (dbr_sequence_length () == 0)
8632 /* Nothing in the delay slot, fake it by putting the combined
8633 insn (the copy or add) in the delay slot of a bl. */
8634 return "b %3\n\tadd%I1 %1,%0,%0";
8635 else
8636 /* Something in the delay slot, but we've got a long branch. */
8637 return "add%I1 %1,%0,%0\n\tb %3";
8640 output_asm_insn ("add%I1 %1,%0,%0", operands);
8641 return output_lbranch (operands[3], insn, 1);
8644 /* Return nonzero if INSN (a jump insn) immediately follows a call
8645 to a named function. This is used to avoid filling the delay slot
8646 of the jump since it can usually be eliminated by modifying RP in
8647 the delay slot of the call. */
8650 following_call (rtx insn)
8652 if (! TARGET_JUMP_IN_DELAY)
8653 return 0;
8655 /* Find the previous real insn, skipping NOTEs. */
8656 insn = PREV_INSN (insn);
8657 while (insn && GET_CODE (insn) == NOTE)
8658 insn = PREV_INSN (insn);
8660 /* Check for CALL_INSNs and millicode calls. */
8661 if (insn
8662 && ((GET_CODE (insn) == CALL_INSN
8663 && get_attr_type (insn) != TYPE_DYNCALL)
8664 || (GET_CODE (insn) == INSN
8665 && GET_CODE (PATTERN (insn)) != SEQUENCE
8666 && GET_CODE (PATTERN (insn)) != USE
8667 && GET_CODE (PATTERN (insn)) != CLOBBER
8668 && get_attr_type (insn) == TYPE_MILLI)))
8669 return 1;
8671 return 0;
8674 /* We use this hook to perform a PA specific optimization which is difficult
8675 to do in earlier passes.
8677 We want the delay slots of branches within jump tables to be filled.
8678 None of the compiler passes at the moment even has the notion that a
8679 PA jump table doesn't contain addresses, but instead contains actual
8680 instructions!
8682 Because we actually jump into the table, the addresses of each entry
8683 must stay constant in relation to the beginning of the table (which
8684 itself must stay constant relative to the instruction to jump into
8685 it). I don't believe we can guarantee earlier passes of the compiler
8686 will adhere to those rules.
8688 So, late in the compilation process we find all the jump tables, and
8689 expand them into real code -- e.g. each entry in the jump table vector
8690 will get an appropriate label followed by a jump to the final target.
8692 Reorg and the final jump pass can then optimize these branches and
8693 fill their delay slots. We end up with smaller, more efficient code.
8695 The jump instructions within the table are special; we must be able
8696 to identify them during assembly output (if the jumps don't get filled
8697 we need to emit a nop rather than nullifying the delay slot)). We
8698 identify jumps in switch tables by using insns with the attribute
8699 type TYPE_BTABLE_BRANCH.
8701 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8702 insns. This serves two purposes, first it prevents jump.c from
8703 noticing that the last N entries in the table jump to the instruction
8704 immediately after the table and deleting the jumps. Second, those
8705 insns mark where we should emit .begin_brtab and .end_brtab directives
8706 when using GAS (allows for better link time optimizations). */
8708 static void
8709 pa_reorg (void)
8711 rtx insn;
8713 remove_useless_addtr_insns (1);
8715 if (pa_cpu < PROCESSOR_8000)
8716 pa_combine_instructions ();
8719 /* This is fairly cheap, so always run it if optimizing. */
8720 if (optimize > 0 && !TARGET_BIG_SWITCH)
8722 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8723 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8725 rtx pattern, tmp, location, label;
8726 unsigned int length, i;
8728 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8729 if (GET_CODE (insn) != JUMP_INSN
8730 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8731 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8732 continue;
8734 /* Emit marker for the beginning of the branch table. */
8735 emit_insn_before (gen_begin_brtab (), insn);
8737 pattern = PATTERN (insn);
8738 location = PREV_INSN (insn);
8739 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8741 for (i = 0; i < length; i++)
8743 /* Emit a label before each jump to keep jump.c from
8744 removing this code. */
8745 tmp = gen_label_rtx ();
8746 LABEL_NUSES (tmp) = 1;
8747 emit_label_after (tmp, location);
8748 location = NEXT_INSN (location);
8750 if (GET_CODE (pattern) == ADDR_VEC)
8751 label = XEXP (XVECEXP (pattern, 0, i), 0);
8752 else
8753 label = XEXP (XVECEXP (pattern, 1, i), 0);
8755 tmp = gen_short_jump (label);
8757 /* Emit the jump itself. */
8758 tmp = emit_jump_insn_after (tmp, location);
8759 JUMP_LABEL (tmp) = label;
8760 LABEL_NUSES (label)++;
8761 location = NEXT_INSN (location);
8763 /* Emit a BARRIER after the jump. */
8764 emit_barrier_after (location);
8765 location = NEXT_INSN (location);
8768 /* Emit marker for the end of the branch table. */
8769 emit_insn_before (gen_end_brtab (), location);
8770 location = NEXT_INSN (location);
8771 emit_barrier_after (location);
8773 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8774 delete_insn (insn);
8777 else
8779 /* Still need brtab marker insns. FIXME: the presence of these
8780 markers disables output of the branch table to readonly memory,
8781 and any alignment directives that might be needed. Possibly,
8782 the begin_brtab insn should be output before the label for the
8783 table. This doesn't matter at the moment since the tables are
8784 always output in the text section. */
8785 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8787 /* Find an ADDR_VEC insn. */
8788 if (GET_CODE (insn) != JUMP_INSN
8789 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8790 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8791 continue;
8793 /* Now generate markers for the beginning and end of the
8794 branch table. */
8795 emit_insn_before (gen_begin_brtab (), insn);
8796 emit_insn_after (gen_end_brtab (), insn);
8801 /* The PA has a number of odd instructions which can perform multiple
8802 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8803 it may be profitable to combine two instructions into one instruction
8804 with two outputs. It's not profitable PA2.0 machines because the
8805 two outputs would take two slots in the reorder buffers.
8807 This routine finds instructions which can be combined and combines
8808 them. We only support some of the potential combinations, and we
8809 only try common ways to find suitable instructions.
8811 * addb can add two registers or a register and a small integer
8812 and jump to a nearby (+-8k) location. Normally the jump to the
8813 nearby location is conditional on the result of the add, but by
8814 using the "true" condition we can make the jump unconditional.
8815 Thus addb can perform two independent operations in one insn.
8817 * movb is similar to addb in that it can perform a reg->reg
8818 or small immediate->reg copy and jump to a nearby (+-8k location).
8820 * fmpyadd and fmpysub can perform a FP multiply and either an
8821 FP add or FP sub if the operands of the multiply and add/sub are
8822 independent (there are other minor restrictions). Note both
8823 the fmpy and fadd/fsub can in theory move to better spots according
8824 to data dependencies, but for now we require the fmpy stay at a
8825 fixed location.
8827 * Many of the memory operations can perform pre & post updates
8828 of index registers. GCC's pre/post increment/decrement addressing
8829 is far too simple to take advantage of all the possibilities. This
8830 pass may not be suitable since those insns may not be independent.
8832 * comclr can compare two ints or an int and a register, nullify
8833 the following instruction and zero some other register. This
8834 is more difficult to use as it's harder to find an insn which
8835 will generate a comclr than finding something like an unconditional
8836 branch. (conditional moves & long branches create comclr insns).
8838 * Most arithmetic operations can conditionally skip the next
8839 instruction. They can be viewed as "perform this operation
8840 and conditionally jump to this nearby location" (where nearby
8841 is an insns away). These are difficult to use due to the
8842 branch length restrictions. */
8844 static void
8845 pa_combine_instructions (void)
8847 rtx anchor, new_rtx;
8849 /* This can get expensive since the basic algorithm is on the
8850 order of O(n^2) (or worse). Only do it for -O2 or higher
8851 levels of optimization. */
8852 if (optimize < 2)
8853 return;
8855 /* Walk down the list of insns looking for "anchor" insns which
8856 may be combined with "floating" insns. As the name implies,
8857 "anchor" instructions don't move, while "floating" insns may
8858 move around. */
8859 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8860 new_rtx = make_insn_raw (new_rtx);
8862 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8864 enum attr_pa_combine_type anchor_attr;
8865 enum attr_pa_combine_type floater_attr;
8867 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8868 Also ignore any special USE insns. */
8869 if ((GET_CODE (anchor) != INSN
8870 && GET_CODE (anchor) != JUMP_INSN
8871 && GET_CODE (anchor) != CALL_INSN)
8872 || GET_CODE (PATTERN (anchor)) == USE
8873 || GET_CODE (PATTERN (anchor)) == CLOBBER
8874 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8875 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8876 continue;
8878 anchor_attr = get_attr_pa_combine_type (anchor);
8879 /* See if anchor is an insn suitable for combination. */
8880 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8881 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8882 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8883 && ! forward_branch_p (anchor)))
8885 rtx floater;
8887 for (floater = PREV_INSN (anchor);
8888 floater;
8889 floater = PREV_INSN (floater))
8891 if (GET_CODE (floater) == NOTE
8892 || (GET_CODE (floater) == INSN
8893 && (GET_CODE (PATTERN (floater)) == USE
8894 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8895 continue;
8897 /* Anything except a regular INSN will stop our search. */
8898 if (GET_CODE (floater) != INSN
8899 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8900 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8902 floater = NULL_RTX;
8903 break;
8906 /* See if FLOATER is suitable for combination with the
8907 anchor. */
8908 floater_attr = get_attr_pa_combine_type (floater);
8909 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8910 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8911 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8912 && floater_attr == PA_COMBINE_TYPE_FMPY))
8914 /* If ANCHOR and FLOATER can be combined, then we're
8915 done with this pass. */
8916 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8917 SET_DEST (PATTERN (floater)),
8918 XEXP (SET_SRC (PATTERN (floater)), 0),
8919 XEXP (SET_SRC (PATTERN (floater)), 1)))
8920 break;
8923 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8924 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8926 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8928 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8929 SET_DEST (PATTERN (floater)),
8930 XEXP (SET_SRC (PATTERN (floater)), 0),
8931 XEXP (SET_SRC (PATTERN (floater)), 1)))
8932 break;
8934 else
8936 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8937 SET_DEST (PATTERN (floater)),
8938 SET_SRC (PATTERN (floater)),
8939 SET_SRC (PATTERN (floater))))
8940 break;
8945 /* If we didn't find anything on the backwards scan try forwards. */
8946 if (!floater
8947 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8948 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8950 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8952 if (GET_CODE (floater) == NOTE
8953 || (GET_CODE (floater) == INSN
8954 && (GET_CODE (PATTERN (floater)) == USE
8955 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8957 continue;
8959 /* Anything except a regular INSN will stop our search. */
8960 if (GET_CODE (floater) != INSN
8961 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8962 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8964 floater = NULL_RTX;
8965 break;
8968 /* See if FLOATER is suitable for combination with the
8969 anchor. */
8970 floater_attr = get_attr_pa_combine_type (floater);
8971 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8972 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8973 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8974 && floater_attr == PA_COMBINE_TYPE_FMPY))
8976 /* If ANCHOR and FLOATER can be combined, then we're
8977 done with this pass. */
8978 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
8979 SET_DEST (PATTERN (floater)),
8980 XEXP (SET_SRC (PATTERN (floater)),
8982 XEXP (SET_SRC (PATTERN (floater)),
8983 1)))
8984 break;
8989 /* FLOATER will be nonzero if we found a suitable floating
8990 insn for combination with ANCHOR. */
8991 if (floater
8992 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8993 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8995 /* Emit the new instruction and delete the old anchor. */
8996 emit_insn_before (gen_rtx_PARALLEL
8997 (VOIDmode,
8998 gen_rtvec (2, PATTERN (anchor),
8999 PATTERN (floater))),
9000 anchor);
9002 SET_INSN_DELETED (anchor);
9004 /* Emit a special USE insn for FLOATER, then delete
9005 the floating insn. */
9006 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9007 delete_insn (floater);
9009 continue;
9011 else if (floater
9012 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9014 rtx temp;
9015 /* Emit the new_jump instruction and delete the old anchor. */
9016 temp
9017 = emit_jump_insn_before (gen_rtx_PARALLEL
9018 (VOIDmode,
9019 gen_rtvec (2, PATTERN (anchor),
9020 PATTERN (floater))),
9021 anchor);
9023 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9024 SET_INSN_DELETED (anchor);
9026 /* Emit a special USE insn for FLOATER, then delete
9027 the floating insn. */
9028 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9029 delete_insn (floater);
9030 continue;
9036 static int
9037 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9038 rtx src1, rtx src2)
9040 int insn_code_number;
9041 rtx start, end;
9043 /* Create a PARALLEL with the patterns of ANCHOR and
9044 FLOATER, try to recognize it, then test constraints
9045 for the resulting pattern.
9047 If the pattern doesn't match or the constraints
9048 aren't met keep searching for a suitable floater
9049 insn. */
9050 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9051 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9052 INSN_CODE (new_rtx) = -1;
9053 insn_code_number = recog_memoized (new_rtx);
9054 if (insn_code_number < 0
9055 || (extract_insn (new_rtx), ! constrain_operands (1)))
9056 return 0;
9058 if (reversed)
9060 start = anchor;
9061 end = floater;
9063 else
9065 start = floater;
9066 end = anchor;
9069 /* There's up to three operands to consider. One
9070 output and two inputs.
9072 The output must not be used between FLOATER & ANCHOR
9073 exclusive. The inputs must not be set between
9074 FLOATER and ANCHOR exclusive. */
9076 if (reg_used_between_p (dest, start, end))
9077 return 0;
9079 if (reg_set_between_p (src1, start, end))
9080 return 0;
9082 if (reg_set_between_p (src2, start, end))
9083 return 0;
9085 /* If we get here, then everything is good. */
9086 return 1;
9089 /* Return nonzero if references for INSN are delayed.
9091 Millicode insns are actually function calls with some special
9092 constraints on arguments and register usage.
9094 Millicode calls always expect their arguments in the integer argument
9095 registers, and always return their result in %r29 (ret1). They
9096 are expected to clobber their arguments, %r1, %r29, and the return
9097 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9099 This function tells reorg that the references to arguments and
9100 millicode calls do not appear to happen until after the millicode call.
9101 This allows reorg to put insns which set the argument registers into the
9102 delay slot of the millicode call -- thus they act more like traditional
9103 CALL_INSNs.
9105 Note we cannot consider side effects of the insn to be delayed because
9106 the branch and link insn will clobber the return pointer. If we happened
9107 to use the return pointer in the delay slot of the call, then we lose.
9109 get_attr_type will try to recognize the given insn, so make sure to
9110 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9111 in particular. */
9113 insn_refs_are_delayed (rtx insn)
9115 return ((GET_CODE (insn) == INSN
9116 && GET_CODE (PATTERN (insn)) != SEQUENCE
9117 && GET_CODE (PATTERN (insn)) != USE
9118 && GET_CODE (PATTERN (insn)) != CLOBBER
9119 && get_attr_type (insn) == TYPE_MILLI));
9122 /* On the HP-PA the value is found in register(s) 28(-29), unless
9123 the mode is SF or DF. Then the value is returned in fr4 (32).
9125 This must perform the same promotions as PROMOTE_MODE, else
9126 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
9128 Small structures must be returned in a PARALLEL on PA64 in order
9129 to match the HP Compiler ABI. */
9132 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
9134 enum machine_mode valmode;
9136 if (AGGREGATE_TYPE_P (valtype)
9137 || TREE_CODE (valtype) == COMPLEX_TYPE
9138 || TREE_CODE (valtype) == VECTOR_TYPE)
9140 if (TARGET_64BIT)
9142 /* Aggregates with a size less than or equal to 128 bits are
9143 returned in GR 28(-29). They are left justified. The pad
9144 bits are undefined. Larger aggregates are returned in
9145 memory. */
9146 rtx loc[2];
9147 int i, offset = 0;
9148 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9150 for (i = 0; i < ub; i++)
9152 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9153 gen_rtx_REG (DImode, 28 + i),
9154 GEN_INT (offset));
9155 offset += 8;
9158 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9160 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9162 /* Aggregates 5 to 8 bytes in size are returned in general
9163 registers r28-r29 in the same manner as other non
9164 floating-point objects. The data is right-justified and
9165 zero-extended to 64 bits. This is opposite to the normal
9166 justification used on big endian targets and requires
9167 special treatment. */
9168 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9169 gen_rtx_REG (DImode, 28), const0_rtx);
9170 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9174 if ((INTEGRAL_TYPE_P (valtype)
9175 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9176 || POINTER_TYPE_P (valtype))
9177 valmode = word_mode;
9178 else
9179 valmode = TYPE_MODE (valtype);
9181 if (TREE_CODE (valtype) == REAL_TYPE
9182 && !AGGREGATE_TYPE_P (valtype)
9183 && TYPE_MODE (valtype) != TFmode
9184 && !TARGET_SOFT_FLOAT)
9185 return gen_rtx_REG (valmode, 32);
9187 return gen_rtx_REG (valmode, 28);
9190 /* Return the location of a parameter that is passed in a register or NULL
9191 if the parameter has any component that is passed in memory.
9193 This is new code and will be pushed to into the net sources after
9194 further testing.
9196 ??? We might want to restructure this so that it looks more like other
9197 ports. */
9199 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9200 int named ATTRIBUTE_UNUSED)
9202 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9203 int alignment = 0;
9204 int arg_size;
9205 int fpr_reg_base;
9206 int gpr_reg_base;
9207 rtx retval;
9209 if (mode == VOIDmode)
9210 return NULL_RTX;
9212 arg_size = FUNCTION_ARG_SIZE (mode, type);
9214 /* If this arg would be passed partially or totally on the stack, then
9215 this routine should return zero. pa_arg_partial_bytes will
9216 handle arguments which are split between regs and stack slots if
9217 the ABI mandates split arguments. */
9218 if (!TARGET_64BIT)
9220 /* The 32-bit ABI does not split arguments. */
9221 if (cum->words + arg_size > max_arg_words)
9222 return NULL_RTX;
9224 else
9226 if (arg_size > 1)
9227 alignment = cum->words & 1;
9228 if (cum->words + alignment >= max_arg_words)
9229 return NULL_RTX;
9232 /* The 32bit ABIs and the 64bit ABIs are rather different,
9233 particularly in their handling of FP registers. We might
9234 be able to cleverly share code between them, but I'm not
9235 going to bother in the hope that splitting them up results
9236 in code that is more easily understood. */
9238 if (TARGET_64BIT)
9240 /* Advance the base registers to their current locations.
9242 Remember, gprs grow towards smaller register numbers while
9243 fprs grow to higher register numbers. Also remember that
9244 although FP regs are 32-bit addressable, we pretend that
9245 the registers are 64-bits wide. */
9246 gpr_reg_base = 26 - cum->words;
9247 fpr_reg_base = 32 + cum->words;
9249 /* Arguments wider than one word and small aggregates need special
9250 treatment. */
9251 if (arg_size > 1
9252 || mode == BLKmode
9253 || (type && (AGGREGATE_TYPE_P (type)
9254 || TREE_CODE (type) == COMPLEX_TYPE
9255 || TREE_CODE (type) == VECTOR_TYPE)))
9257 /* Double-extended precision (80-bit), quad-precision (128-bit)
9258 and aggregates including complex numbers are aligned on
9259 128-bit boundaries. The first eight 64-bit argument slots
9260 are associated one-to-one, with general registers r26
9261 through r19, and also with floating-point registers fr4
9262 through fr11. Arguments larger than one word are always
9263 passed in general registers.
9265 Using a PARALLEL with a word mode register results in left
9266 justified data on a big-endian target. */
9268 rtx loc[8];
9269 int i, offset = 0, ub = arg_size;
9271 /* Align the base register. */
9272 gpr_reg_base -= alignment;
9274 ub = MIN (ub, max_arg_words - cum->words - alignment);
9275 for (i = 0; i < ub; i++)
9277 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9278 gen_rtx_REG (DImode, gpr_reg_base),
9279 GEN_INT (offset));
9280 gpr_reg_base -= 1;
9281 offset += 8;
9284 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9287 else
9289 /* If the argument is larger than a word, then we know precisely
9290 which registers we must use. */
9291 if (arg_size > 1)
9293 if (cum->words)
9295 gpr_reg_base = 23;
9296 fpr_reg_base = 38;
9298 else
9300 gpr_reg_base = 25;
9301 fpr_reg_base = 34;
9304 /* Structures 5 to 8 bytes in size are passed in the general
9305 registers in the same manner as other non floating-point
9306 objects. The data is right-justified and zero-extended
9307 to 64 bits. This is opposite to the normal justification
9308 used on big endian targets and requires special treatment.
9309 We now define BLOCK_REG_PADDING to pad these objects.
9310 Aggregates, complex and vector types are passed in the same
9311 manner as structures. */
9312 if (mode == BLKmode
9313 || (type && (AGGREGATE_TYPE_P (type)
9314 || TREE_CODE (type) == COMPLEX_TYPE
9315 || TREE_CODE (type) == VECTOR_TYPE)))
9317 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9318 gen_rtx_REG (DImode, gpr_reg_base),
9319 const0_rtx);
9320 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9323 else
9325 /* We have a single word (32 bits). A simple computation
9326 will get us the register #s we need. */
9327 gpr_reg_base = 26 - cum->words;
9328 fpr_reg_base = 32 + 2 * cum->words;
9332 /* Determine if the argument needs to be passed in both general and
9333 floating point registers. */
9334 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9335 /* If we are doing soft-float with portable runtime, then there
9336 is no need to worry about FP regs. */
9337 && !TARGET_SOFT_FLOAT
9338 /* The parameter must be some kind of scalar float, else we just
9339 pass it in integer registers. */
9340 && GET_MODE_CLASS (mode) == MODE_FLOAT
9341 /* The target function must not have a prototype. */
9342 && cum->nargs_prototype <= 0
9343 /* libcalls do not need to pass items in both FP and general
9344 registers. */
9345 && type != NULL_TREE
9346 /* All this hair applies to "outgoing" args only. This includes
9347 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9348 && !cum->incoming)
9349 /* Also pass outgoing floating arguments in both registers in indirect
9350 calls with the 32 bit ABI and the HP assembler since there is no
9351 way to the specify argument locations in static functions. */
9352 || (!TARGET_64BIT
9353 && !TARGET_GAS
9354 && !cum->incoming
9355 && cum->indirect
9356 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9358 retval
9359 = gen_rtx_PARALLEL
9360 (mode,
9361 gen_rtvec (2,
9362 gen_rtx_EXPR_LIST (VOIDmode,
9363 gen_rtx_REG (mode, fpr_reg_base),
9364 const0_rtx),
9365 gen_rtx_EXPR_LIST (VOIDmode,
9366 gen_rtx_REG (mode, gpr_reg_base),
9367 const0_rtx)));
9369 else
9371 /* See if we should pass this parameter in a general register. */
9372 if (TARGET_SOFT_FLOAT
9373 /* Indirect calls in the normal 32bit ABI require all arguments
9374 to be passed in general registers. */
9375 || (!TARGET_PORTABLE_RUNTIME
9376 && !TARGET_64BIT
9377 && !TARGET_ELF32
9378 && cum->indirect)
9379 /* If the parameter is not a scalar floating-point parameter,
9380 then it belongs in GPRs. */
9381 || GET_MODE_CLASS (mode) != MODE_FLOAT
9382 /* Structure with single SFmode field belongs in GPR. */
9383 || (type && AGGREGATE_TYPE_P (type)))
9384 retval = gen_rtx_REG (mode, gpr_reg_base);
9385 else
9386 retval = gen_rtx_REG (mode, fpr_reg_base);
9388 return retval;
9392 /* If this arg would be passed totally in registers or totally on the stack,
9393 then this routine should return zero. */
9395 static int
9396 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9397 tree type, bool named ATTRIBUTE_UNUSED)
9399 unsigned int max_arg_words = 8;
9400 unsigned int offset = 0;
9402 if (!TARGET_64BIT)
9403 return 0;
9405 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9406 offset = 1;
9408 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9409 /* Arg fits fully into registers. */
9410 return 0;
9411 else if (cum->words + offset >= max_arg_words)
9412 /* Arg fully on the stack. */
9413 return 0;
9414 else
9415 /* Arg is split. */
9416 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9420 /* A get_unnamed_section callback for switching to the text section.
9422 This function is only used with SOM. Because we don't support
9423 named subspaces, we can only create a new subspace or switch back
9424 to the default text subspace. */
9426 static void
9427 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9429 gcc_assert (TARGET_SOM);
9430 if (TARGET_GAS)
9432 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9434 /* We only want to emit a .nsubspa directive once at the
9435 start of the function. */
9436 cfun->machine->in_nsubspa = 1;
9438 /* Create a new subspace for the text. This provides
9439 better stub placement and one-only functions. */
9440 if (cfun->decl
9441 && DECL_ONE_ONLY (cfun->decl)
9442 && !DECL_WEAK (cfun->decl))
9444 output_section_asm_op ("\t.SPACE $TEXT$\n"
9445 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9446 "ACCESS=44,SORT=24,COMDAT");
9447 return;
9450 else
9452 /* There isn't a current function or the body of the current
9453 function has been completed. So, we are changing to the
9454 text section to output debugging information. Thus, we
9455 need to forget that we are in the text section so that
9456 varasm.c will call us when text_section is selected again. */
9457 gcc_assert (!cfun || !cfun->machine
9458 || cfun->machine->in_nsubspa == 2);
9459 in_section = NULL;
9461 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9462 return;
9464 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9467 /* A get_unnamed_section callback for switching to comdat data
9468 sections. This function is only used with SOM. */
9470 static void
9471 som_output_comdat_data_section_asm_op (const void *data)
9473 in_section = NULL;
9474 output_section_asm_op (data);
9477 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9479 static void
9480 pa_som_asm_init_sections (void)
9482 text_section
9483 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9485 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9486 is not being generated. */
9487 som_readonly_data_section
9488 = get_unnamed_section (0, output_section_asm_op,
9489 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9491 /* When secondary definitions are not supported, SOM makes readonly
9492 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9493 the comdat flag. */
9494 som_one_only_readonly_data_section
9495 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9496 "\t.SPACE $TEXT$\n"
9497 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9498 "ACCESS=0x2c,SORT=16,COMDAT");
9501 /* When secondary definitions are not supported, SOM makes data one-only
9502 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9503 som_one_only_data_section
9504 = get_unnamed_section (SECTION_WRITE,
9505 som_output_comdat_data_section_asm_op,
9506 "\t.SPACE $PRIVATE$\n"
9507 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9508 "ACCESS=31,SORT=24,COMDAT");
9510 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9511 which reference data within the $TEXT$ space (for example constant
9512 strings in the $LIT$ subspace).
9514 The assemblers (GAS and HP as) both have problems with handling
9515 the difference of two symbols which is the other correct way to
9516 reference constant data during PIC code generation.
9518 So, there's no way to reference constant data which is in the
9519 $TEXT$ space during PIC generation. Instead place all constant
9520 data into the $PRIVATE$ subspace (this reduces sharing, but it
9521 works correctly). */
9522 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9524 /* We must not have a reference to an external symbol defined in a
9525 shared library in a readonly section, else the SOM linker will
9526 complain.
9528 So, we force exception information into the data section. */
9529 exception_section = data_section;
9532 /* On hpux10, the linker will give an error if we have a reference
9533 in the read-only data section to a symbol defined in a shared
9534 library. Therefore, expressions that might require a reloc can
9535 not be placed in the read-only data section. */
9537 static section *
9538 pa_select_section (tree exp, int reloc,
9539 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9541 if (TREE_CODE (exp) == VAR_DECL
9542 && TREE_READONLY (exp)
9543 && !TREE_THIS_VOLATILE (exp)
9544 && DECL_INITIAL (exp)
9545 && (DECL_INITIAL (exp) == error_mark_node
9546 || TREE_CONSTANT (DECL_INITIAL (exp)))
9547 && !reloc)
9549 if (TARGET_SOM
9550 && DECL_ONE_ONLY (exp)
9551 && !DECL_WEAK (exp))
9552 return som_one_only_readonly_data_section;
9553 else
9554 return readonly_data_section;
9556 else if (CONSTANT_CLASS_P (exp) && !reloc)
9557 return readonly_data_section;
9558 else if (TARGET_SOM
9559 && TREE_CODE (exp) == VAR_DECL
9560 && DECL_ONE_ONLY (exp)
9561 && !DECL_WEAK (exp))
9562 return som_one_only_data_section;
9563 else
9564 return data_section;
9567 static void
9568 pa_globalize_label (FILE *stream, const char *name)
9570 /* We only handle DATA objects here, functions are globalized in
9571 ASM_DECLARE_FUNCTION_NAME. */
9572 if (! FUNCTION_NAME_P (name))
9574 fputs ("\t.EXPORT ", stream);
9575 assemble_name (stream, name);
9576 fputs (",DATA\n", stream);
9580 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9582 static rtx
9583 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9584 int incoming ATTRIBUTE_UNUSED)
9586 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9589 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9591 bool
9592 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9594 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9595 PA64 ABI says that objects larger than 128 bits are returned in memory.
9596 Note, int_size_in_bytes can return -1 if the size of the object is
9597 variable or larger than the maximum value that can be expressed as
9598 a HOST_WIDE_INT. It can also return zero for an empty type. The
9599 simplest way to handle variable and empty types is to pass them in
9600 memory. This avoids problems in defining the boundaries of argument
9601 slots, allocating registers, etc. */
9602 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9603 || int_size_in_bytes (type) <= 0);
9606 /* Structure to hold declaration and name of external symbols that are
9607 emitted by GCC. We generate a vector of these symbols and output them
9608 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9609 This avoids putting out names that are never really used. */
9611 typedef struct extern_symbol GTY(())
9613 tree decl;
9614 const char *name;
9615 } extern_symbol;
9617 /* Define gc'd vector type for extern_symbol. */
9618 DEF_VEC_O(extern_symbol);
9619 DEF_VEC_ALLOC_O(extern_symbol,gc);
9621 /* Vector of extern_symbol pointers. */
9622 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9624 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9625 /* Mark DECL (name NAME) as an external reference (assembler output
9626 file FILE). This saves the names to output at the end of the file
9627 if actually referenced. */
9629 void
9630 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9632 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9634 gcc_assert (file == asm_out_file);
9635 p->decl = decl;
9636 p->name = name;
9639 /* Output text required at the end of an assembler file.
9640 This includes deferred plabels and .import directives for
9641 all external symbols that were actually referenced. */
9643 static void
9644 pa_hpux_file_end (void)
9646 unsigned int i;
9647 extern_symbol *p;
9649 if (!NO_DEFERRED_PROFILE_COUNTERS)
9650 output_deferred_profile_counters ();
9652 output_deferred_plabels ();
9654 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9656 tree decl = p->decl;
9658 if (!TREE_ASM_WRITTEN (decl)
9659 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9660 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9663 VEC_free (extern_symbol, gc, extern_symbols);
9665 #endif
9667 /* Return true if a change from mode FROM to mode TO for a register
9668 in register class RCLASS is invalid. */
9670 bool
9671 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9672 enum reg_class rclass)
9674 if (from == to)
9675 return false;
9677 /* Reject changes to/from complex and vector modes. */
9678 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9679 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9680 return true;
9682 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9683 return false;
9685 /* There is no way to load QImode or HImode values directly from
9686 memory. SImode loads to the FP registers are not zero extended.
9687 On the 64-bit target, this conflicts with the definition of
9688 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9689 with different sizes in the floating-point registers. */
9690 if (MAYBE_FP_REG_CLASS_P (rclass))
9691 return true;
9693 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9694 in specific sets of registers. Thus, we cannot allow changing
9695 to a larger mode when it's larger than a word. */
9696 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9697 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9698 return true;
9700 return false;
9703 /* Returns TRUE if it is a good idea to tie two pseudo registers
9704 when one has mode MODE1 and one has mode MODE2.
9705 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9706 for any hard reg, then this must be FALSE for correct output.
9708 We should return FALSE for QImode and HImode because these modes
9709 are not ok in the floating-point registers. However, this prevents
9710 tieing these modes to SImode and DImode in the general registers.
9711 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9712 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9713 in the floating-point registers. */
9715 bool
9716 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9718 /* Don't tie modes in different classes. */
9719 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9720 return false;
9722 return true;
9725 #include "gt-pa.h"