Update FSF address.
[official-gcc.git] / gcc / config / pa / pa.c
blobde0f56f217ae0c58f4f724de3eaa5abaf01b1077
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
111 ATTRIBUTE_UNUSED;
112 static void pa_encode_section_info (tree, rtx, int);
113 static const char *pa_strip_name_encoding (const char *);
114 static bool pa_function_ok_for_sibcall (tree, tree);
115 static void pa_globalize_label (FILE *, const char *)
116 ATTRIBUTE_UNUSED;
117 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
118 HOST_WIDE_INT, tree);
119 #if !defined(USE_COLLECT2)
120 static void pa_asm_out_constructor (rtx, int);
121 static void pa_asm_out_destructor (rtx, int);
122 #endif
123 static void pa_init_builtins (void);
124 static rtx hppa_builtin_saveregs (void);
125 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
126 static bool pa_scalar_mode_supported_p (enum machine_mode);
127 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
128 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
129 static struct deferred_plabel *get_plabel (rtx) ATTRIBUTE_UNUSED;
130 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
134 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
135 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
139 static void output_deferred_plabels (void);
140 #ifdef ASM_OUTPUT_EXTERNAL_REAL
141 static void pa_hpux_file_end (void);
142 #endif
143 #ifdef HPUX_LONG_DOUBLE_LIBRARY
144 static void pa_hpux_init_libfuncs (void);
145 #endif
146 static rtx pa_struct_value_rtx (tree, int);
147 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
148 tree, bool);
149 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
150 tree, bool);
151 static struct machine_function * pa_init_machine_status (void);
154 /* Save the operands last given to a compare for use when we
155 generate a scc or bcc insn. */
156 rtx hppa_compare_op0, hppa_compare_op1;
157 enum cmp_type hppa_branch_type;
159 /* Which cpu we are scheduling for. */
160 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
162 /* The UNIX standard to use for predefines and linking. */
163 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
165 /* Counts for the number of callee-saved general and floating point
166 registers which were saved by the current function's prologue. */
167 static int gr_saved, fr_saved;
169 static rtx find_addr_reg (rtx);
171 /* Keep track of the number of bytes we have output in the CODE subspace
172 during this compilation so we'll know when to emit inline long-calls. */
173 unsigned long total_code_bytes;
175 /* The last address of the previous function plus the number of bytes in
176 associated thunks that have been output. This is used to determine if
177 a thunk can use an IA-relative branch to reach its target function. */
178 static int last_address;
180 /* Variables to handle plabels that we discover are necessary at assembly
181 output time. They are output after the current function. */
182 struct deferred_plabel GTY(())
184 rtx internal_label;
185 rtx symbol;
187 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
188 deferred_plabels;
189 static size_t n_deferred_plabels = 0;
192 /* Initialize the GCC target structure. */
194 #undef TARGET_ASM_ALIGNED_HI_OP
195 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
196 #undef TARGET_ASM_ALIGNED_SI_OP
197 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
198 #undef TARGET_ASM_ALIGNED_DI_OP
199 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
200 #undef TARGET_ASM_UNALIGNED_HI_OP
201 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
202 #undef TARGET_ASM_UNALIGNED_SI_OP
203 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
204 #undef TARGET_ASM_UNALIGNED_DI_OP
205 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
206 #undef TARGET_ASM_INTEGER
207 #define TARGET_ASM_INTEGER pa_assemble_integer
209 #undef TARGET_ASM_FUNCTION_PROLOGUE
210 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
211 #undef TARGET_ASM_FUNCTION_EPILOGUE
212 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
214 #undef TARGET_SCHED_ADJUST_COST
215 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
216 #undef TARGET_SCHED_ADJUST_PRIORITY
217 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
218 #undef TARGET_SCHED_ISSUE_RATE
219 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
221 #undef TARGET_ENCODE_SECTION_INFO
222 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
223 #undef TARGET_STRIP_NAME_ENCODING
224 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
226 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
227 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
229 #undef TARGET_ASM_OUTPUT_MI_THUNK
230 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
231 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
232 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
234 #undef TARGET_ASM_FILE_END
235 #ifdef ASM_OUTPUT_EXTERNAL_REAL
236 #define TARGET_ASM_FILE_END pa_hpux_file_end
237 #else
238 #define TARGET_ASM_FILE_END output_deferred_plabels
239 #endif
241 #if !defined(USE_COLLECT2)
242 #undef TARGET_ASM_CONSTRUCTOR
243 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
244 #undef TARGET_ASM_DESTRUCTOR
245 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
246 #endif
248 #undef TARGET_DEFAULT_TARGET_FLAGS
249 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
250 #undef TARGET_HANDLE_OPTION
251 #define TARGET_HANDLE_OPTION pa_handle_option
253 #undef TARGET_INIT_BUILTINS
254 #define TARGET_INIT_BUILTINS pa_init_builtins
256 #undef TARGET_RTX_COSTS
257 #define TARGET_RTX_COSTS hppa_rtx_costs
258 #undef TARGET_ADDRESS_COST
259 #define TARGET_ADDRESS_COST hppa_address_cost
261 #undef TARGET_MACHINE_DEPENDENT_REORG
262 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
264 #ifdef HPUX_LONG_DOUBLE_LIBRARY
265 #undef TARGET_INIT_LIBFUNCS
266 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
267 #endif
269 #undef TARGET_PROMOTE_FUNCTION_RETURN
270 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
271 #undef TARGET_PROMOTE_PROTOTYPES
272 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
274 #undef TARGET_STRUCT_VALUE_RTX
275 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
276 #undef TARGET_RETURN_IN_MEMORY
277 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
278 #undef TARGET_MUST_PASS_IN_STACK
279 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
280 #undef TARGET_PASS_BY_REFERENCE
281 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
282 #undef TARGET_CALLEE_COPIES
283 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
284 #undef TARGET_ARG_PARTIAL_BYTES
285 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
287 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
288 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
289 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
290 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
292 #undef TARGET_SCALAR_MODE_SUPPORTED_P
293 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
295 struct gcc_target targetm = TARGET_INITIALIZER;
297 /* Parse the -mfixed-range= option string. */
299 static void
300 fix_range (const char *const_str)
302 int i, first, last;
303 char *str, *dash, *comma;
305 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
306 REG2 are either register names or register numbers. The effect
307 of this option is to mark the registers in the range from REG1 to
308 REG2 as ``fixed'' so they won't be used by the compiler. This is
309 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
311 i = strlen (const_str);
312 str = (char *) alloca (i + 1);
313 memcpy (str, const_str, i + 1);
315 while (1)
317 dash = strchr (str, '-');
318 if (!dash)
320 warning (0, "value of -mfixed-range must have form REG1-REG2");
321 return;
323 *dash = '\0';
325 comma = strchr (dash + 1, ',');
326 if (comma)
327 *comma = '\0';
329 first = decode_reg_name (str);
330 if (first < 0)
332 warning (0, "unknown register name: %s", str);
333 return;
336 last = decode_reg_name (dash + 1);
337 if (last < 0)
339 warning (0, "unknown register name: %s", dash + 1);
340 return;
343 *dash = '-';
345 if (first > last)
347 warning (0, "%s-%s is an empty range", str, dash + 1);
348 return;
351 for (i = first; i <= last; ++i)
352 fixed_regs[i] = call_used_regs[i] = 1;
354 if (!comma)
355 break;
357 *comma = ',';
358 str = comma + 1;
361 /* Check if all floating point registers have been fixed. */
362 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
363 if (!fixed_regs[i])
364 break;
366 if (i > FP_REG_LAST)
367 target_flags |= MASK_DISABLE_FPREGS;
370 /* Implement TARGET_HANDLE_OPTION. */
372 static bool
373 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
375 switch (code)
377 case OPT_mnosnake:
378 case OPT_mpa_risc_1_0:
379 case OPT_march_1_0:
380 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
381 return true;
383 case OPT_msnake:
384 case OPT_mpa_risc_1_1:
385 case OPT_march_1_1:
386 target_flags &= ~MASK_PA_20;
387 target_flags |= MASK_PA_11;
388 return true;
390 case OPT_mpa_risc_2_0:
391 case OPT_march_2_0:
392 target_flags |= MASK_PA_11 | MASK_PA_20;
393 return true;
395 case OPT_mschedule_:
396 if (strcmp (arg, "8000") == 0)
397 pa_cpu = PROCESSOR_8000;
398 else if (strcmp (arg, "7100") == 0)
399 pa_cpu = PROCESSOR_7100;
400 else if (strcmp (arg, "700") == 0)
401 pa_cpu = PROCESSOR_700;
402 else if (strcmp (arg, "7100LC") == 0)
403 pa_cpu = PROCESSOR_7100LC;
404 else if (strcmp (arg, "7200") == 0)
405 pa_cpu = PROCESSOR_7200;
406 else if (strcmp (arg, "7300") == 0)
407 pa_cpu = PROCESSOR_7300;
408 else
409 return false;
410 return true;
412 case OPT_mfixed_range_:
413 fix_range (arg);
414 return true;
416 #if TARGET_HPUX
417 case OPT_munix_93:
418 flag_pa_unix = 1993;
419 return true;
420 #endif
422 #if TARGET_HPUX_10_10
423 case OPT_munix_95:
424 flag_pa_unix = 1995;
425 return true;
426 #endif
428 #if TARGET_HPUX_11_11
429 case OPT_munix_98:
430 flag_pa_unix = 1998;
431 return true;
432 #endif
434 default:
435 return true;
439 void
440 override_options (void)
442 /* Unconditional branches in the delay slot are not compatible with dwarf2
443 call frame information. There is no benefit in using this optimization
444 on PA8000 and later processors. */
445 if (pa_cpu >= PROCESSOR_8000
446 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
447 || flag_unwind_tables)
448 target_flags &= ~MASK_JUMP_IN_DELAY;
450 if (flag_pic && TARGET_PORTABLE_RUNTIME)
452 warning (0, "PIC code generation is not supported in the portable runtime model\n");
455 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
457 warning (0, "PIC code generation is not compatible with fast indirect calls\n");
460 if (! TARGET_GAS && write_symbols != NO_DEBUG)
462 warning (0, "-g is only supported when using GAS on this processor,");
463 warning (0, "-g option disabled");
464 write_symbols = NO_DEBUG;
467 /* We only support the "big PIC" model now. And we always generate PIC
468 code when in 64bit mode. */
469 if (flag_pic == 1 || TARGET_64BIT)
470 flag_pic = 2;
472 /* We can't guarantee that .dword is available for 32-bit targets. */
473 if (UNITS_PER_WORD == 4)
474 targetm.asm_out.aligned_op.di = NULL;
476 /* The unaligned ops are only available when using GAS. */
477 if (!TARGET_GAS)
479 targetm.asm_out.unaligned_op.hi = NULL;
480 targetm.asm_out.unaligned_op.si = NULL;
481 targetm.asm_out.unaligned_op.di = NULL;
484 init_machine_status = pa_init_machine_status;
487 static void
488 pa_init_builtins (void)
490 #ifdef DONT_HAVE_FPUTC_UNLOCKED
491 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
492 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
493 #endif
496 /* Function to init struct machine_function.
497 This will be called, via a pointer variable,
498 from push_function_context. */
500 static struct machine_function *
501 pa_init_machine_status (void)
503 return ggc_alloc_cleared (sizeof (machine_function));
506 /* If FROM is a probable pointer register, mark TO as a probable
507 pointer register with the same pointer alignment as FROM. */
509 static void
510 copy_reg_pointer (rtx to, rtx from)
512 if (REG_POINTER (from))
513 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
516 /* Return 1 if X contains a symbolic expression. We know these
517 expressions will have one of a few well defined forms, so
518 we need only check those forms. */
520 symbolic_expression_p (rtx x)
523 /* Strip off any HIGH. */
524 if (GET_CODE (x) == HIGH)
525 x = XEXP (x, 0);
527 return (symbolic_operand (x, VOIDmode));
530 /* Accept any constant that can be moved in one instruction into a
531 general register. */
533 cint_ok_for_move (HOST_WIDE_INT intval)
535 /* OK if ldo, ldil, or zdepi, can be used. */
536 return (CONST_OK_FOR_LETTER_P (intval, 'J')
537 || CONST_OK_FOR_LETTER_P (intval, 'N')
538 || CONST_OK_FOR_LETTER_P (intval, 'K'));
541 /* Return truth value of whether OP can be used as an operand in a
542 adddi3 insn. */
544 adddi3_operand (rtx op, enum machine_mode mode)
546 return (register_operand (op, mode)
547 || (GET_CODE (op) == CONST_INT
548 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
551 /* True iff zdepi can be used to generate this CONST_INT.
552 zdepi first sign extends a 5 bit signed number to a given field
553 length, then places this field anywhere in a zero. */
555 zdepi_cint_p (unsigned HOST_WIDE_INT x)
557 unsigned HOST_WIDE_INT lsb_mask, t;
559 /* This might not be obvious, but it's at least fast.
560 This function is critical; we don't have the time loops would take. */
561 lsb_mask = x & -x;
562 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
563 /* Return true iff t is a power of two. */
564 return ((t & (t - 1)) == 0);
567 /* True iff depi or extru can be used to compute (reg & mask).
568 Accept bit pattern like these:
569 0....01....1
570 1....10....0
571 1..10..01..1 */
573 and_mask_p (unsigned HOST_WIDE_INT mask)
575 mask = ~mask;
576 mask += mask & -mask;
577 return (mask & (mask - 1)) == 0;
580 /* True iff depi can be used to compute (reg | MASK). */
582 ior_mask_p (unsigned HOST_WIDE_INT mask)
584 mask += mask & -mask;
585 return (mask & (mask - 1)) == 0;
588 /* Legitimize PIC addresses. If the address is already
589 position-independent, we return ORIG. Newly generated
590 position-independent addresses go to REG. If we need more
591 than one register, we lose. */
594 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
596 rtx pic_ref = orig;
598 /* Labels need special handling. */
599 if (pic_label_operand (orig, mode))
601 /* We do not want to go through the movXX expanders here since that
602 would create recursion.
604 Nor do we really want to call a generator for a named pattern
605 since that requires multiple patterns if we want to support
606 multiple word sizes.
608 So instead we just emit the raw set, which avoids the movXX
609 expanders completely. */
610 mark_reg_pointer (reg, BITS_PER_UNIT);
611 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
612 current_function_uses_pic_offset_table = 1;
613 return reg;
615 if (GET_CODE (orig) == SYMBOL_REF)
617 rtx insn, tmp_reg;
619 gcc_assert (reg);
621 /* Before reload, allocate a temporary register for the intermediate
622 result. This allows the sequence to be deleted when the final
623 result is unused and the insns are trivially dead. */
624 tmp_reg = ((reload_in_progress || reload_completed)
625 ? reg : gen_reg_rtx (Pmode));
627 emit_move_insn (tmp_reg,
628 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
629 gen_rtx_HIGH (word_mode, orig)));
630 pic_ref
631 = gen_const_mem (Pmode,
632 gen_rtx_LO_SUM (Pmode, tmp_reg,
633 gen_rtx_UNSPEC (Pmode,
634 gen_rtvec (1, orig),
635 UNSPEC_DLTIND14R)));
637 current_function_uses_pic_offset_table = 1;
638 mark_reg_pointer (reg, BITS_PER_UNIT);
639 insn = emit_move_insn (reg, pic_ref);
641 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
642 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
644 return reg;
646 else if (GET_CODE (orig) == CONST)
648 rtx base;
650 if (GET_CODE (XEXP (orig, 0)) == PLUS
651 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
652 return orig;
654 gcc_assert (reg);
655 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
657 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
658 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
659 base == reg ? 0 : reg);
661 if (GET_CODE (orig) == CONST_INT)
663 if (INT_14_BITS (orig))
664 return plus_constant (base, INTVAL (orig));
665 orig = force_reg (Pmode, orig);
667 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
668 /* Likewise, should we set special REG_NOTEs here? */
671 return pic_ref;
674 /* Try machine-dependent ways of modifying an illegitimate address
675 to be legitimate. If we find one, return the new, valid address.
676 This macro is used in only one place: `memory_address' in explow.c.
678 OLDX is the address as it was before break_out_memory_refs was called.
679 In some cases it is useful to look at this to decide what needs to be done.
681 MODE and WIN are passed so that this macro can use
682 GO_IF_LEGITIMATE_ADDRESS.
684 It is always safe for this macro to do nothing. It exists to recognize
685 opportunities to optimize the output.
687 For the PA, transform:
689 memory(X + <large int>)
691 into:
693 if (<large int> & mask) >= 16
694 Y = (<large int> & ~mask) + mask + 1 Round up.
695 else
696 Y = (<large int> & ~mask) Round down.
697 Z = X + Y
698 memory (Z + (<large int> - Y));
700 This is for CSE to find several similar references, and only use one Z.
702 X can either be a SYMBOL_REF or REG, but because combine cannot
703 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
704 D will not fit in 14 bits.
706 MODE_FLOAT references allow displacements which fit in 5 bits, so use
707 0x1f as the mask.
709 MODE_INT references allow displacements which fit in 14 bits, so use
710 0x3fff as the mask.
712 This relies on the fact that most mode MODE_FLOAT references will use FP
713 registers and most mode MODE_INT references will use integer registers.
714 (In the rare case of an FP register used in an integer MODE, we depend
715 on secondary reloads to clean things up.)
718 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
719 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
720 addressing modes to be used).
722 Put X and Z into registers. Then put the entire expression into
723 a register. */
726 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
727 enum machine_mode mode)
729 rtx orig = x;
731 /* We need to canonicalize the order of operands in unscaled indexed
732 addresses since the code that checks if an address is valid doesn't
733 always try both orders. */
734 if (!TARGET_NO_SPACE_REGS
735 && GET_CODE (x) == PLUS
736 && GET_MODE (x) == Pmode
737 && REG_P (XEXP (x, 0))
738 && REG_P (XEXP (x, 1))
739 && REG_POINTER (XEXP (x, 0))
740 && !REG_POINTER (XEXP (x, 1)))
741 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
743 if (flag_pic)
744 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
746 /* Strip off CONST. */
747 if (GET_CODE (x) == CONST)
748 x = XEXP (x, 0);
750 /* Special case. Get the SYMBOL_REF into a register and use indexing.
751 That should always be safe. */
752 if (GET_CODE (x) == PLUS
753 && GET_CODE (XEXP (x, 0)) == REG
754 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
756 rtx reg = force_reg (Pmode, XEXP (x, 1));
757 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
760 /* Note we must reject symbols which represent function addresses
761 since the assembler/linker can't handle arithmetic on plabels. */
762 if (GET_CODE (x) == PLUS
763 && GET_CODE (XEXP (x, 1)) == CONST_INT
764 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
765 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
766 || GET_CODE (XEXP (x, 0)) == REG))
768 rtx int_part, ptr_reg;
769 int newoffset;
770 int offset = INTVAL (XEXP (x, 1));
771 int mask;
773 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
774 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
776 /* Choose which way to round the offset. Round up if we
777 are >= halfway to the next boundary. */
778 if ((offset & mask) >= ((mask + 1) / 2))
779 newoffset = (offset & ~ mask) + mask + 1;
780 else
781 newoffset = (offset & ~ mask);
783 /* If the newoffset will not fit in 14 bits (ldo), then
784 handling this would take 4 or 5 instructions (2 to load
785 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
786 add the new offset and the SYMBOL_REF.) Combine can
787 not handle 4->2 or 5->2 combinations, so do not create
788 them. */
789 if (! VAL_14_BITS_P (newoffset)
790 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
792 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
793 rtx tmp_reg
794 = force_reg (Pmode,
795 gen_rtx_HIGH (Pmode, const_part));
796 ptr_reg
797 = force_reg (Pmode,
798 gen_rtx_LO_SUM (Pmode,
799 tmp_reg, const_part));
801 else
803 if (! VAL_14_BITS_P (newoffset))
804 int_part = force_reg (Pmode, GEN_INT (newoffset));
805 else
806 int_part = GEN_INT (newoffset);
808 ptr_reg = force_reg (Pmode,
809 gen_rtx_PLUS (Pmode,
810 force_reg (Pmode, XEXP (x, 0)),
811 int_part));
813 return plus_constant (ptr_reg, offset - newoffset);
816 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
818 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
819 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
820 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
821 && (OBJECT_P (XEXP (x, 1))
822 || GET_CODE (XEXP (x, 1)) == SUBREG)
823 && GET_CODE (XEXP (x, 1)) != CONST)
825 int val = INTVAL (XEXP (XEXP (x, 0), 1));
826 rtx reg1, reg2;
828 reg1 = XEXP (x, 1);
829 if (GET_CODE (reg1) != REG)
830 reg1 = force_reg (Pmode, force_operand (reg1, 0));
832 reg2 = XEXP (XEXP (x, 0), 0);
833 if (GET_CODE (reg2) != REG)
834 reg2 = force_reg (Pmode, force_operand (reg2, 0));
836 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
837 gen_rtx_MULT (Pmode,
838 reg2,
839 GEN_INT (val)),
840 reg1));
843 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
845 Only do so for floating point modes since this is more speculative
846 and we lose if it's an integer store. */
847 if (GET_CODE (x) == PLUS
848 && GET_CODE (XEXP (x, 0)) == PLUS
849 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
850 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
851 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
852 && (mode == SFmode || mode == DFmode))
855 /* First, try and figure out what to use as a base register. */
856 rtx reg1, reg2, base, idx, orig_base;
858 reg1 = XEXP (XEXP (x, 0), 1);
859 reg2 = XEXP (x, 1);
860 base = NULL_RTX;
861 idx = NULL_RTX;
863 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
864 then emit_move_sequence will turn on REG_POINTER so we'll know
865 it's a base register below. */
866 if (GET_CODE (reg1) != REG)
867 reg1 = force_reg (Pmode, force_operand (reg1, 0));
869 if (GET_CODE (reg2) != REG)
870 reg2 = force_reg (Pmode, force_operand (reg2, 0));
872 /* Figure out what the base and index are. */
874 if (GET_CODE (reg1) == REG
875 && REG_POINTER (reg1))
877 base = reg1;
878 orig_base = XEXP (XEXP (x, 0), 1);
879 idx = gen_rtx_PLUS (Pmode,
880 gen_rtx_MULT (Pmode,
881 XEXP (XEXP (XEXP (x, 0), 0), 0),
882 XEXP (XEXP (XEXP (x, 0), 0), 1)),
883 XEXP (x, 1));
885 else if (GET_CODE (reg2) == REG
886 && REG_POINTER (reg2))
888 base = reg2;
889 orig_base = XEXP (x, 1);
890 idx = XEXP (x, 0);
893 if (base == 0)
894 return orig;
896 /* If the index adds a large constant, try to scale the
897 constant so that it can be loaded with only one insn. */
898 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
899 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
900 / INTVAL (XEXP (XEXP (idx, 0), 1)))
901 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
903 /* Divide the CONST_INT by the scale factor, then add it to A. */
904 int val = INTVAL (XEXP (idx, 1));
906 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
907 reg1 = XEXP (XEXP (idx, 0), 0);
908 if (GET_CODE (reg1) != REG)
909 reg1 = force_reg (Pmode, force_operand (reg1, 0));
911 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
913 /* We can now generate a simple scaled indexed address. */
914 return
915 force_reg
916 (Pmode, gen_rtx_PLUS (Pmode,
917 gen_rtx_MULT (Pmode, reg1,
918 XEXP (XEXP (idx, 0), 1)),
919 base));
922 /* If B + C is still a valid base register, then add them. */
923 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
924 && INTVAL (XEXP (idx, 1)) <= 4096
925 && INTVAL (XEXP (idx, 1)) >= -4096)
927 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
928 rtx reg1, reg2;
930 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
932 reg2 = XEXP (XEXP (idx, 0), 0);
933 if (GET_CODE (reg2) != CONST_INT)
934 reg2 = force_reg (Pmode, force_operand (reg2, 0));
936 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
937 gen_rtx_MULT (Pmode,
938 reg2,
939 GEN_INT (val)),
940 reg1));
943 /* Get the index into a register, then add the base + index and
944 return a register holding the result. */
946 /* First get A into a register. */
947 reg1 = XEXP (XEXP (idx, 0), 0);
948 if (GET_CODE (reg1) != REG)
949 reg1 = force_reg (Pmode, force_operand (reg1, 0));
951 /* And get B into a register. */
952 reg2 = XEXP (idx, 1);
953 if (GET_CODE (reg2) != REG)
954 reg2 = force_reg (Pmode, force_operand (reg2, 0));
956 reg1 = force_reg (Pmode,
957 gen_rtx_PLUS (Pmode,
958 gen_rtx_MULT (Pmode, reg1,
959 XEXP (XEXP (idx, 0), 1)),
960 reg2));
962 /* Add the result to our base register and return. */
963 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
967 /* Uh-oh. We might have an address for x[n-100000]. This needs
968 special handling to avoid creating an indexed memory address
969 with x-100000 as the base.
971 If the constant part is small enough, then it's still safe because
972 there is a guard page at the beginning and end of the data segment.
974 Scaled references are common enough that we want to try and rearrange the
975 terms so that we can use indexing for these addresses too. Only
976 do the optimization for floatint point modes. */
978 if (GET_CODE (x) == PLUS
979 && symbolic_expression_p (XEXP (x, 1)))
981 /* Ugly. We modify things here so that the address offset specified
982 by the index expression is computed first, then added to x to form
983 the entire address. */
985 rtx regx1, regx2, regy1, regy2, y;
987 /* Strip off any CONST. */
988 y = XEXP (x, 1);
989 if (GET_CODE (y) == CONST)
990 y = XEXP (y, 0);
992 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
994 /* See if this looks like
995 (plus (mult (reg) (shadd_const))
996 (const (plus (symbol_ref) (const_int))))
998 Where const_int is small. In that case the const
999 expression is a valid pointer for indexing.
1001 If const_int is big, but can be divided evenly by shadd_const
1002 and added to (reg). This allows more scaled indexed addresses. */
1003 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1004 && GET_CODE (XEXP (x, 0)) == MULT
1005 && GET_CODE (XEXP (y, 1)) == CONST_INT
1006 && INTVAL (XEXP (y, 1)) >= -4096
1007 && INTVAL (XEXP (y, 1)) <= 4095
1008 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1009 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1011 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1012 rtx reg1, reg2;
1014 reg1 = XEXP (x, 1);
1015 if (GET_CODE (reg1) != REG)
1016 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1018 reg2 = XEXP (XEXP (x, 0), 0);
1019 if (GET_CODE (reg2) != REG)
1020 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1022 return force_reg (Pmode,
1023 gen_rtx_PLUS (Pmode,
1024 gen_rtx_MULT (Pmode,
1025 reg2,
1026 GEN_INT (val)),
1027 reg1));
1029 else if ((mode == DFmode || mode == SFmode)
1030 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1031 && GET_CODE (XEXP (x, 0)) == MULT
1032 && GET_CODE (XEXP (y, 1)) == CONST_INT
1033 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1034 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1035 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1037 regx1
1038 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1039 / INTVAL (XEXP (XEXP (x, 0), 1))));
1040 regx2 = XEXP (XEXP (x, 0), 0);
1041 if (GET_CODE (regx2) != REG)
1042 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1043 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1044 regx2, regx1));
1045 return
1046 force_reg (Pmode,
1047 gen_rtx_PLUS (Pmode,
1048 gen_rtx_MULT (Pmode, regx2,
1049 XEXP (XEXP (x, 0), 1)),
1050 force_reg (Pmode, XEXP (y, 0))));
1052 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1053 && INTVAL (XEXP (y, 1)) >= -4096
1054 && INTVAL (XEXP (y, 1)) <= 4095)
1056 /* This is safe because of the guard page at the
1057 beginning and end of the data space. Just
1058 return the original address. */
1059 return orig;
1061 else
1063 /* Doesn't look like one we can optimize. */
1064 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1065 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1066 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1067 regx1 = force_reg (Pmode,
1068 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1069 regx1, regy2));
1070 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1075 return orig;
1078 /* For the HPPA, REG and REG+CONST is cost 0
1079 and addresses involving symbolic constants are cost 2.
1081 PIC addresses are very expensive.
1083 It is no coincidence that this has the same structure
1084 as GO_IF_LEGITIMATE_ADDRESS. */
1086 static int
1087 hppa_address_cost (rtx X)
1089 switch (GET_CODE (X))
1091 case REG:
1092 case PLUS:
1093 case LO_SUM:
1094 return 1;
1095 case HIGH:
1096 return 2;
1097 default:
1098 return 4;
1102 /* Compute a (partial) cost for rtx X. Return true if the complete
1103 cost has been computed, and false if subexpressions should be
1104 scanned. In either case, *TOTAL contains the cost result. */
1106 static bool
1107 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1109 switch (code)
1111 case CONST_INT:
1112 if (INTVAL (x) == 0)
1113 *total = 0;
1114 else if (INT_14_BITS (x))
1115 *total = 1;
1116 else
1117 *total = 2;
1118 return true;
1120 case HIGH:
1121 *total = 2;
1122 return true;
1124 case CONST:
1125 case LABEL_REF:
1126 case SYMBOL_REF:
1127 *total = 4;
1128 return true;
1130 case CONST_DOUBLE:
1131 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1132 && outer_code != SET)
1133 *total = 0;
1134 else
1135 *total = 8;
1136 return true;
1138 case MULT:
1139 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1140 *total = COSTS_N_INSNS (3);
1141 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1142 *total = COSTS_N_INSNS (8);
1143 else
1144 *total = COSTS_N_INSNS (20);
1145 return true;
1147 case DIV:
1148 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1150 *total = COSTS_N_INSNS (14);
1151 return true;
1153 /* FALLTHRU */
1155 case UDIV:
1156 case MOD:
1157 case UMOD:
1158 *total = COSTS_N_INSNS (60);
1159 return true;
1161 case PLUS: /* this includes shNadd insns */
1162 case MINUS:
1163 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1164 *total = COSTS_N_INSNS (3);
1165 else
1166 *total = COSTS_N_INSNS (1);
1167 return true;
1169 case ASHIFT:
1170 case ASHIFTRT:
1171 case LSHIFTRT:
1172 *total = COSTS_N_INSNS (1);
1173 return true;
1175 default:
1176 return false;
1180 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1181 new rtx with the correct mode. */
1182 static inline rtx
1183 force_mode (enum machine_mode mode, rtx orig)
1185 if (mode == GET_MODE (orig))
1186 return orig;
1188 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1190 return gen_rtx_REG (mode, REGNO (orig));
1193 /* Emit insns to move operands[1] into operands[0].
1195 Return 1 if we have written out everything that needs to be done to
1196 do the move. Otherwise, return 0 and the caller will emit the move
1197 normally.
1199 Note SCRATCH_REG may not be in the proper mode depending on how it
1200 will be used. This routine is responsible for creating a new copy
1201 of SCRATCH_REG in the proper mode. */
1204 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1206 register rtx operand0 = operands[0];
1207 register rtx operand1 = operands[1];
1208 register rtx tem;
1210 /* We can only handle indexed addresses in the destination operand
1211 of floating point stores. Thus, we need to break out indexed
1212 addresses from the destination operand. */
1213 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1215 /* This is only safe up to the beginning of life analysis. */
1216 gcc_assert (!no_new_pseudos);
1218 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1219 operand0 = replace_equiv_address (operand0, tem);
1222 /* On targets with non-equivalent space registers, break out unscaled
1223 indexed addresses from the source operand before the final CSE.
1224 We have to do this because the REG_POINTER flag is not correctly
1225 carried through various optimization passes and CSE may substitute
1226 a pseudo without the pointer set for one with the pointer set. As
1227 a result, we loose various opportunities to create insns with
1228 unscaled indexed addresses. */
1229 if (!TARGET_NO_SPACE_REGS
1230 && !cse_not_expected
1231 && GET_CODE (operand1) == MEM
1232 && GET_CODE (XEXP (operand1, 0)) == PLUS
1233 && REG_P (XEXP (XEXP (operand1, 0), 0))
1234 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1235 operand1
1236 = replace_equiv_address (operand1,
1237 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1239 if (scratch_reg
1240 && reload_in_progress && GET_CODE (operand0) == REG
1241 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1242 operand0 = reg_equiv_mem[REGNO (operand0)];
1243 else if (scratch_reg
1244 && reload_in_progress && GET_CODE (operand0) == SUBREG
1245 && GET_CODE (SUBREG_REG (operand0)) == REG
1246 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1248 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1249 the code which tracks sets/uses for delete_output_reload. */
1250 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1251 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1252 SUBREG_BYTE (operand0));
1253 operand0 = alter_subreg (&temp);
1256 if (scratch_reg
1257 && reload_in_progress && GET_CODE (operand1) == REG
1258 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1259 operand1 = reg_equiv_mem[REGNO (operand1)];
1260 else if (scratch_reg
1261 && reload_in_progress && GET_CODE (operand1) == SUBREG
1262 && GET_CODE (SUBREG_REG (operand1)) == REG
1263 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1265 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1266 the code which tracks sets/uses for delete_output_reload. */
1267 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1268 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1269 SUBREG_BYTE (operand1));
1270 operand1 = alter_subreg (&temp);
1273 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1274 && ((tem = find_replacement (&XEXP (operand0, 0)))
1275 != XEXP (operand0, 0)))
1276 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1278 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1279 && ((tem = find_replacement (&XEXP (operand1, 0)))
1280 != XEXP (operand1, 0)))
1281 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1283 /* Handle secondary reloads for loads/stores of FP registers from
1284 REG+D addresses where D does not fit in 5 or 14 bits, including
1285 (subreg (mem (addr))) cases. */
1286 if (scratch_reg
1287 && fp_reg_operand (operand0, mode)
1288 && ((GET_CODE (operand1) == MEM
1289 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1290 XEXP (operand1, 0)))
1291 || ((GET_CODE (operand1) == SUBREG
1292 && GET_CODE (XEXP (operand1, 0)) == MEM
1293 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1294 ? SFmode : DFmode),
1295 XEXP (XEXP (operand1, 0), 0))))))
1297 if (GET_CODE (operand1) == SUBREG)
1298 operand1 = XEXP (operand1, 0);
1300 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1301 it in WORD_MODE regardless of what mode it was originally given
1302 to us. */
1303 scratch_reg = force_mode (word_mode, scratch_reg);
1305 /* D might not fit in 14 bits either; for such cases load D into
1306 scratch reg. */
1307 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1309 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1310 emit_move_insn (scratch_reg,
1311 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1312 Pmode,
1313 XEXP (XEXP (operand1, 0), 0),
1314 scratch_reg));
1316 else
1317 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1318 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1319 gen_rtx_MEM (mode, scratch_reg)));
1320 return 1;
1322 else if (scratch_reg
1323 && fp_reg_operand (operand1, mode)
1324 && ((GET_CODE (operand0) == MEM
1325 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1326 ? SFmode : DFmode),
1327 XEXP (operand0, 0)))
1328 || ((GET_CODE (operand0) == SUBREG)
1329 && GET_CODE (XEXP (operand0, 0)) == MEM
1330 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1331 ? SFmode : DFmode),
1332 XEXP (XEXP (operand0, 0), 0)))))
1334 if (GET_CODE (operand0) == SUBREG)
1335 operand0 = XEXP (operand0, 0);
1337 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1338 it in WORD_MODE regardless of what mode it was originally given
1339 to us. */
1340 scratch_reg = force_mode (word_mode, scratch_reg);
1342 /* D might not fit in 14 bits either; for such cases load D into
1343 scratch reg. */
1344 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1346 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1347 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1348 0)),
1349 Pmode,
1350 XEXP (XEXP (operand0, 0),
1352 scratch_reg));
1354 else
1355 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1356 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1357 operand1));
1358 return 1;
1360 /* Handle secondary reloads for loads of FP registers from constant
1361 expressions by forcing the constant into memory.
1363 Use scratch_reg to hold the address of the memory location.
1365 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1366 NO_REGS when presented with a const_int and a register class
1367 containing only FP registers. Doing so unfortunately creates
1368 more problems than it solves. Fix this for 2.5. */
1369 else if (scratch_reg
1370 && CONSTANT_P (operand1)
1371 && fp_reg_operand (operand0, mode))
1373 rtx xoperands[2];
1375 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1376 it in WORD_MODE regardless of what mode it was originally given
1377 to us. */
1378 scratch_reg = force_mode (word_mode, scratch_reg);
1380 /* Force the constant into memory and put the address of the
1381 memory location into scratch_reg. */
1382 xoperands[0] = scratch_reg;
1383 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1384 emit_move_sequence (xoperands, Pmode, 0);
1386 /* Now load the destination register. */
1387 emit_insn (gen_rtx_SET (mode, operand0,
1388 gen_rtx_MEM (mode, scratch_reg)));
1389 return 1;
1391 /* Handle secondary reloads for SAR. These occur when trying to load
1392 the SAR from memory, FP register, or with a constant. */
1393 else if (scratch_reg
1394 && GET_CODE (operand0) == REG
1395 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1396 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1397 && (GET_CODE (operand1) == MEM
1398 || GET_CODE (operand1) == CONST_INT
1399 || (GET_CODE (operand1) == REG
1400 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1402 /* D might not fit in 14 bits either; for such cases load D into
1403 scratch reg. */
1404 if (GET_CODE (operand1) == MEM
1405 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1407 /* We are reloading the address into the scratch register, so we
1408 want to make sure the scratch register is a full register. */
1409 scratch_reg = force_mode (word_mode, scratch_reg);
1411 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1412 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1413 0)),
1414 Pmode,
1415 XEXP (XEXP (operand1, 0),
1417 scratch_reg));
1419 /* Now we are going to load the scratch register from memory,
1420 we want to load it in the same width as the original MEM,
1421 which must be the same as the width of the ultimate destination,
1422 OPERAND0. */
1423 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1425 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1426 scratch_reg));
1428 else
1430 /* We want to load the scratch register using the same mode as
1431 the ultimate destination. */
1432 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1434 emit_move_insn (scratch_reg, operand1);
1437 /* And emit the insn to set the ultimate destination. We know that
1438 the scratch register has the same mode as the destination at this
1439 point. */
1440 emit_move_insn (operand0, scratch_reg);
1441 return 1;
1443 /* Handle the most common case: storing into a register. */
1444 else if (register_operand (operand0, mode))
1446 if (register_operand (operand1, mode)
1447 || (GET_CODE (operand1) == CONST_INT
1448 && cint_ok_for_move (INTVAL (operand1)))
1449 || (operand1 == CONST0_RTX (mode))
1450 || (GET_CODE (operand1) == HIGH
1451 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1452 /* Only `general_operands' can come here, so MEM is ok. */
1453 || GET_CODE (operand1) == MEM)
1455 /* Various sets are created during RTL generation which don't
1456 have the REG_POINTER flag correctly set. After the CSE pass,
1457 instruction recognition can fail if we don't consistently
1458 set this flag when performing register copies. This should
1459 also improve the opportunities for creating insns that use
1460 unscaled indexing. */
1461 if (REG_P (operand0) && REG_P (operand1))
1463 if (REG_POINTER (operand1)
1464 && !REG_POINTER (operand0)
1465 && !HARD_REGISTER_P (operand0))
1466 copy_reg_pointer (operand0, operand1);
1467 else if (REG_POINTER (operand0)
1468 && !REG_POINTER (operand1)
1469 && !HARD_REGISTER_P (operand1))
1470 copy_reg_pointer (operand1, operand0);
1473 /* When MEMs are broken out, the REG_POINTER flag doesn't
1474 get set. In some cases, we can set the REG_POINTER flag
1475 from the declaration for the MEM. */
1476 if (REG_P (operand0)
1477 && GET_CODE (operand1) == MEM
1478 && !REG_POINTER (operand0))
1480 tree decl = MEM_EXPR (operand1);
1482 /* Set the register pointer flag and register alignment
1483 if the declaration for this memory reference is a
1484 pointer type. Fortran indirect argument references
1485 are ignored. */
1486 if (decl
1487 && !(flag_argument_noalias > 1
1488 && TREE_CODE (decl) == INDIRECT_REF
1489 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1491 tree type;
1493 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1494 tree operand 1. */
1495 if (TREE_CODE (decl) == COMPONENT_REF)
1496 decl = TREE_OPERAND (decl, 1);
1498 type = TREE_TYPE (decl);
1499 if (TREE_CODE (type) == ARRAY_TYPE)
1500 type = get_inner_array_type (type);
1502 if (POINTER_TYPE_P (type))
1504 int align;
1506 type = TREE_TYPE (type);
1507 /* Using TYPE_ALIGN_OK is rather conservative as
1508 only the ada frontend actually sets it. */
1509 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1510 : BITS_PER_UNIT);
1511 mark_reg_pointer (operand0, align);
1516 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1517 return 1;
1520 else if (GET_CODE (operand0) == MEM)
1522 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1523 && !(reload_in_progress || reload_completed))
1525 rtx temp = gen_reg_rtx (DFmode);
1527 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1528 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1529 return 1;
1531 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1533 /* Run this case quickly. */
1534 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1535 return 1;
1537 if (! (reload_in_progress || reload_completed))
1539 operands[0] = validize_mem (operand0);
1540 operands[1] = operand1 = force_reg (mode, operand1);
1544 /* Simplify the source if we need to.
1545 Note we do have to handle function labels here, even though we do
1546 not consider them legitimate constants. Loop optimizations can
1547 call the emit_move_xxx with one as a source. */
1548 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1549 || function_label_operand (operand1, mode)
1550 || (GET_CODE (operand1) == HIGH
1551 && symbolic_operand (XEXP (operand1, 0), mode)))
1553 int ishighonly = 0;
1555 if (GET_CODE (operand1) == HIGH)
1557 ishighonly = 1;
1558 operand1 = XEXP (operand1, 0);
1560 if (symbolic_operand (operand1, mode))
1562 /* Argh. The assembler and linker can't handle arithmetic
1563 involving plabels.
1565 So we force the plabel into memory, load operand0 from
1566 the memory location, then add in the constant part. */
1567 if ((GET_CODE (operand1) == CONST
1568 && GET_CODE (XEXP (operand1, 0)) == PLUS
1569 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1570 || function_label_operand (operand1, mode))
1572 rtx temp, const_part;
1574 /* Figure out what (if any) scratch register to use. */
1575 if (reload_in_progress || reload_completed)
1577 scratch_reg = scratch_reg ? scratch_reg : operand0;
1578 /* SCRATCH_REG will hold an address and maybe the actual
1579 data. We want it in WORD_MODE regardless of what mode it
1580 was originally given to us. */
1581 scratch_reg = force_mode (word_mode, scratch_reg);
1583 else if (flag_pic)
1584 scratch_reg = gen_reg_rtx (Pmode);
1586 if (GET_CODE (operand1) == CONST)
1588 /* Save away the constant part of the expression. */
1589 const_part = XEXP (XEXP (operand1, 0), 1);
1590 gcc_assert (GET_CODE (const_part) == CONST_INT);
1592 /* Force the function label into memory. */
1593 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1595 else
1597 /* No constant part. */
1598 const_part = NULL_RTX;
1600 /* Force the function label into memory. */
1601 temp = force_const_mem (mode, operand1);
1605 /* Get the address of the memory location. PIC-ify it if
1606 necessary. */
1607 temp = XEXP (temp, 0);
1608 if (flag_pic)
1609 temp = legitimize_pic_address (temp, mode, scratch_reg);
1611 /* Put the address of the memory location into our destination
1612 register. */
1613 operands[1] = temp;
1614 emit_move_sequence (operands, mode, scratch_reg);
1616 /* Now load from the memory location into our destination
1617 register. */
1618 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1619 emit_move_sequence (operands, mode, scratch_reg);
1621 /* And add back in the constant part. */
1622 if (const_part != NULL_RTX)
1623 expand_inc (operand0, const_part);
1625 return 1;
1628 if (flag_pic)
1630 rtx temp;
1632 if (reload_in_progress || reload_completed)
1634 temp = scratch_reg ? scratch_reg : operand0;
1635 /* TEMP will hold an address and maybe the actual
1636 data. We want it in WORD_MODE regardless of what mode it
1637 was originally given to us. */
1638 temp = force_mode (word_mode, temp);
1640 else
1641 temp = gen_reg_rtx (Pmode);
1643 /* (const (plus (symbol) (const_int))) must be forced to
1644 memory during/after reload if the const_int will not fit
1645 in 14 bits. */
1646 if (GET_CODE (operand1) == CONST
1647 && GET_CODE (XEXP (operand1, 0)) == PLUS
1648 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1649 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1650 && (reload_completed || reload_in_progress)
1651 && flag_pic)
1653 operands[1] = force_const_mem (mode, operand1);
1654 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
1655 mode, temp);
1656 operands[1] = gen_rtx_MEM (mode, operands[1]);
1657 emit_move_sequence (operands, mode, temp);
1659 else
1661 operands[1] = legitimize_pic_address (operand1, mode, temp);
1662 if (REG_P (operand0) && REG_P (operands[1]))
1663 copy_reg_pointer (operand0, operands[1]);
1664 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1667 /* On the HPPA, references to data space are supposed to use dp,
1668 register 27, but showing it in the RTL inhibits various cse
1669 and loop optimizations. */
1670 else
1672 rtx temp, set;
1674 if (reload_in_progress || reload_completed)
1676 temp = scratch_reg ? scratch_reg : operand0;
1677 /* TEMP will hold an address and maybe the actual
1678 data. We want it in WORD_MODE regardless of what mode it
1679 was originally given to us. */
1680 temp = force_mode (word_mode, temp);
1682 else
1683 temp = gen_reg_rtx (mode);
1685 /* Loading a SYMBOL_REF into a register makes that register
1686 safe to be used as the base in an indexed address.
1688 Don't mark hard registers though. That loses. */
1689 if (GET_CODE (operand0) == REG
1690 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1691 mark_reg_pointer (operand0, BITS_PER_UNIT);
1692 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1693 mark_reg_pointer (temp, BITS_PER_UNIT);
1695 if (ishighonly)
1696 set = gen_rtx_SET (mode, operand0, temp);
1697 else
1698 set = gen_rtx_SET (VOIDmode,
1699 operand0,
1700 gen_rtx_LO_SUM (mode, temp, operand1));
1702 emit_insn (gen_rtx_SET (VOIDmode,
1703 temp,
1704 gen_rtx_HIGH (mode, operand1)));
1705 emit_insn (set);
1708 return 1;
1710 else if (GET_CODE (operand1) != CONST_INT
1711 || !cint_ok_for_move (INTVAL (operand1)))
1713 rtx insn, temp;
1714 rtx op1 = operand1;
1715 HOST_WIDE_INT value = 0;
1716 HOST_WIDE_INT insv = 0;
1717 int insert = 0;
1719 if (GET_CODE (operand1) == CONST_INT)
1720 value = INTVAL (operand1);
1722 if (TARGET_64BIT
1723 && GET_CODE (operand1) == CONST_INT
1724 && HOST_BITS_PER_WIDE_INT > 32
1725 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1727 HOST_WIDE_INT nval;
1729 /* Extract the low order 32 bits of the value and sign extend.
1730 If the new value is the same as the original value, we can
1731 can use the original value as-is. If the new value is
1732 different, we use it and insert the most-significant 32-bits
1733 of the original value into the final result. */
1734 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1735 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1736 if (value != nval)
1738 #if HOST_BITS_PER_WIDE_INT > 32
1739 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1740 #endif
1741 insert = 1;
1742 value = nval;
1743 operand1 = GEN_INT (nval);
1747 if (reload_in_progress || reload_completed)
1748 temp = scratch_reg ? scratch_reg : operand0;
1749 else
1750 temp = gen_reg_rtx (mode);
1752 /* We don't directly split DImode constants on 32-bit targets
1753 because PLUS uses an 11-bit immediate and the insn sequence
1754 generated is not as efficient as the one using HIGH/LO_SUM. */
1755 if (GET_CODE (operand1) == CONST_INT
1756 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1757 && !insert)
1759 /* Directly break constant into high and low parts. This
1760 provides better optimization opportunities because various
1761 passes recognize constants split with PLUS but not LO_SUM.
1762 We use a 14-bit signed low part except when the addition
1763 of 0x4000 to the high part might change the sign of the
1764 high part. */
1765 HOST_WIDE_INT low = value & 0x3fff;
1766 HOST_WIDE_INT high = value & ~ 0x3fff;
1768 if (low >= 0x2000)
1770 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1771 high += 0x2000;
1772 else
1773 high += 0x4000;
1776 low = value - high;
1778 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1779 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1781 else
1783 emit_insn (gen_rtx_SET (VOIDmode, temp,
1784 gen_rtx_HIGH (mode, operand1)));
1785 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1788 insn = emit_move_insn (operands[0], operands[1]);
1790 /* Now insert the most significant 32 bits of the value
1791 into the register. When we don't have a second register
1792 available, it could take up to nine instructions to load
1793 a 64-bit integer constant. Prior to reload, we force
1794 constants that would take more than three instructions
1795 to load to the constant pool. During and after reload,
1796 we have to handle all possible values. */
1797 if (insert)
1799 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1800 register and the value to be inserted is outside the
1801 range that can be loaded with three depdi instructions. */
1802 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1804 operand1 = GEN_INT (insv);
1806 emit_insn (gen_rtx_SET (VOIDmode, temp,
1807 gen_rtx_HIGH (mode, operand1)));
1808 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1809 emit_insn (gen_insv (operand0, GEN_INT (32),
1810 const0_rtx, temp));
1812 else
1814 int len = 5, pos = 27;
1816 /* Insert the bits using the depdi instruction. */
1817 while (pos >= 0)
1819 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1820 HOST_WIDE_INT sign = v5 < 0;
1822 /* Left extend the insertion. */
1823 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1824 while (pos > 0 && (insv & 1) == sign)
1826 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1827 len += 1;
1828 pos -= 1;
1831 emit_insn (gen_insv (operand0, GEN_INT (len),
1832 GEN_INT (pos), GEN_INT (v5)));
1834 len = pos > 0 && pos < 5 ? pos : 5;
1835 pos -= len;
1840 REG_NOTES (insn)
1841 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
1843 return 1;
1846 /* Now have insn-emit do whatever it normally does. */
1847 return 0;
1850 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1851 it will need a link/runtime reloc). */
1854 reloc_needed (tree exp)
1856 int reloc = 0;
1858 switch (TREE_CODE (exp))
1860 case ADDR_EXPR:
1861 return 1;
1863 case PLUS_EXPR:
1864 case MINUS_EXPR:
1865 reloc = reloc_needed (TREE_OPERAND (exp, 0));
1866 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
1867 break;
1869 case NOP_EXPR:
1870 case CONVERT_EXPR:
1871 case NON_LVALUE_EXPR:
1872 reloc = reloc_needed (TREE_OPERAND (exp, 0));
1873 break;
1875 case CONSTRUCTOR:
1877 register tree link;
1878 for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
1879 if (TREE_VALUE (link) != 0)
1880 reloc |= reloc_needed (TREE_VALUE (link));
1882 break;
1884 case ERROR_MARK:
1885 break;
1887 default:
1888 break;
1890 return reloc;
1893 /* Does operand (which is a symbolic_operand) live in text space?
1894 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
1895 will be true. */
1898 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
1900 if (GET_CODE (operand) == CONST)
1901 operand = XEXP (XEXP (operand, 0), 0);
1902 if (flag_pic)
1904 if (GET_CODE (operand) == SYMBOL_REF)
1905 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
1907 else
1909 if (GET_CODE (operand) == SYMBOL_REF)
1910 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
1912 return 1;
1916 /* Return the best assembler insn template
1917 for moving operands[1] into operands[0] as a fullword. */
1918 const char *
1919 singlemove_string (rtx *operands)
1921 HOST_WIDE_INT intval;
1923 if (GET_CODE (operands[0]) == MEM)
1924 return "stw %r1,%0";
1925 if (GET_CODE (operands[1]) == MEM)
1926 return "ldw %1,%0";
1927 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1929 long i;
1930 REAL_VALUE_TYPE d;
1932 gcc_assert (GET_MODE (operands[1]) == SFmode);
1934 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
1935 bit pattern. */
1936 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
1937 REAL_VALUE_TO_TARGET_SINGLE (d, i);
1939 operands[1] = GEN_INT (i);
1940 /* Fall through to CONST_INT case. */
1942 if (GET_CODE (operands[1]) == CONST_INT)
1944 intval = INTVAL (operands[1]);
1946 if (VAL_14_BITS_P (intval))
1947 return "ldi %1,%0";
1948 else if ((intval & 0x7ff) == 0)
1949 return "ldil L'%1,%0";
1950 else if (zdepi_cint_p (intval))
1951 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
1952 else
1953 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
1955 return "copy %1,%0";
1959 /* Compute position (in OP[1]) and width (in OP[2])
1960 useful for copying IMM to a register using the zdepi
1961 instructions. Store the immediate value to insert in OP[0]. */
1962 static void
1963 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
1965 int lsb, len;
1967 /* Find the least significant set bit in IMM. */
1968 for (lsb = 0; lsb < 32; lsb++)
1970 if ((imm & 1) != 0)
1971 break;
1972 imm >>= 1;
1975 /* Choose variants based on *sign* of the 5-bit field. */
1976 if ((imm & 0x10) == 0)
1977 len = (lsb <= 28) ? 4 : 32 - lsb;
1978 else
1980 /* Find the width of the bitstring in IMM. */
1981 for (len = 5; len < 32; len++)
1983 if ((imm & (1 << len)) == 0)
1984 break;
1987 /* Sign extend IMM as a 5-bit value. */
1988 imm = (imm & 0xf) - 0x10;
1991 op[0] = imm;
1992 op[1] = 31 - lsb;
1993 op[2] = len;
1996 /* Compute position (in OP[1]) and width (in OP[2])
1997 useful for copying IMM to a register using the depdi,z
1998 instructions. Store the immediate value to insert in OP[0]. */
1999 void
2000 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2002 HOST_WIDE_INT lsb, len;
2004 /* Find the least significant set bit in IMM. */
2005 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2007 if ((imm & 1) != 0)
2008 break;
2009 imm >>= 1;
2012 /* Choose variants based on *sign* of the 5-bit field. */
2013 if ((imm & 0x10) == 0)
2014 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2015 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2016 else
2018 /* Find the width of the bitstring in IMM. */
2019 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2021 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2022 break;
2025 /* Sign extend IMM as a 5-bit value. */
2026 imm = (imm & 0xf) - 0x10;
2029 op[0] = imm;
2030 op[1] = 63 - lsb;
2031 op[2] = len;
2034 /* Output assembler code to perform a doubleword move insn
2035 with operands OPERANDS. */
2037 const char *
2038 output_move_double (rtx *operands)
2040 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2041 rtx latehalf[2];
2042 rtx addreg0 = 0, addreg1 = 0;
2044 /* First classify both operands. */
2046 if (REG_P (operands[0]))
2047 optype0 = REGOP;
2048 else if (offsettable_memref_p (operands[0]))
2049 optype0 = OFFSOP;
2050 else if (GET_CODE (operands[0]) == MEM)
2051 optype0 = MEMOP;
2052 else
2053 optype0 = RNDOP;
2055 if (REG_P (operands[1]))
2056 optype1 = REGOP;
2057 else if (CONSTANT_P (operands[1]))
2058 optype1 = CNSTOP;
2059 else if (offsettable_memref_p (operands[1]))
2060 optype1 = OFFSOP;
2061 else if (GET_CODE (operands[1]) == MEM)
2062 optype1 = MEMOP;
2063 else
2064 optype1 = RNDOP;
2066 /* Check for the cases that the operand constraints are not
2067 supposed to allow to happen. */
2068 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2070 /* Handle auto decrementing and incrementing loads and stores
2071 specifically, since the structure of the function doesn't work
2072 for them without major modification. Do it better when we learn
2073 this port about the general inc/dec addressing of PA.
2074 (This was written by tege. Chide him if it doesn't work.) */
2076 if (optype0 == MEMOP)
2078 /* We have to output the address syntax ourselves, since print_operand
2079 doesn't deal with the addresses we want to use. Fix this later. */
2081 rtx addr = XEXP (operands[0], 0);
2082 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2084 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2086 operands[0] = XEXP (addr, 0);
2087 gcc_assert (GET_CODE (operands[1]) == REG
2088 && GET_CODE (operands[0]) == REG);
2090 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2092 /* No overlap between high target register and address
2093 register. (We do this in a non-obvious way to
2094 save a register file writeback) */
2095 if (GET_CODE (addr) == POST_INC)
2096 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2097 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2099 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2101 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2103 operands[0] = XEXP (addr, 0);
2104 gcc_assert (GET_CODE (operands[1]) == REG
2105 && GET_CODE (operands[0]) == REG);
2107 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2108 /* No overlap between high target register and address
2109 register. (We do this in a non-obvious way to save a
2110 register file writeback) */
2111 if (GET_CODE (addr) == PRE_INC)
2112 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2113 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2116 if (optype1 == MEMOP)
2118 /* We have to output the address syntax ourselves, since print_operand
2119 doesn't deal with the addresses we want to use. Fix this later. */
2121 rtx addr = XEXP (operands[1], 0);
2122 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2124 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2126 operands[1] = XEXP (addr, 0);
2127 gcc_assert (GET_CODE (operands[0]) == REG
2128 && GET_CODE (operands[1]) == REG);
2130 if (!reg_overlap_mentioned_p (high_reg, addr))
2132 /* No overlap between high target register and address
2133 register. (We do this in a non-obvious way to
2134 save a register file writeback) */
2135 if (GET_CODE (addr) == POST_INC)
2136 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2137 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2139 else
2141 /* This is an undefined situation. We should load into the
2142 address register *and* update that register. Probably
2143 we don't need to handle this at all. */
2144 if (GET_CODE (addr) == POST_INC)
2145 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2146 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2149 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2151 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2153 operands[1] = XEXP (addr, 0);
2154 gcc_assert (GET_CODE (operands[0]) == REG
2155 && GET_CODE (operands[1]) == REG);
2157 if (!reg_overlap_mentioned_p (high_reg, addr))
2159 /* No overlap between high target register and address
2160 register. (We do this in a non-obvious way to
2161 save a register file writeback) */
2162 if (GET_CODE (addr) == PRE_INC)
2163 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2164 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2166 else
2168 /* This is an undefined situation. We should load into the
2169 address register *and* update that register. Probably
2170 we don't need to handle this at all. */
2171 if (GET_CODE (addr) == PRE_INC)
2172 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2173 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2176 else if (GET_CODE (addr) == PLUS
2177 && GET_CODE (XEXP (addr, 0)) == MULT)
2179 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2181 if (!reg_overlap_mentioned_p (high_reg, addr))
2183 rtx xoperands[3];
2185 xoperands[0] = high_reg;
2186 xoperands[1] = XEXP (addr, 1);
2187 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2188 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2189 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2190 xoperands);
2191 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2193 else
2195 rtx xoperands[3];
2197 xoperands[0] = high_reg;
2198 xoperands[1] = XEXP (addr, 1);
2199 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2200 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2201 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2202 xoperands);
2203 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2208 /* If an operand is an unoffsettable memory ref, find a register
2209 we can increment temporarily to make it refer to the second word. */
2211 if (optype0 == MEMOP)
2212 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2214 if (optype1 == MEMOP)
2215 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2217 /* Ok, we can do one word at a time.
2218 Normally we do the low-numbered word first.
2220 In either case, set up in LATEHALF the operands to use
2221 for the high-numbered word and in some cases alter the
2222 operands in OPERANDS to be suitable for the low-numbered word. */
2224 if (optype0 == REGOP)
2225 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2226 else if (optype0 == OFFSOP)
2227 latehalf[0] = adjust_address (operands[0], SImode, 4);
2228 else
2229 latehalf[0] = operands[0];
2231 if (optype1 == REGOP)
2232 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2233 else if (optype1 == OFFSOP)
2234 latehalf[1] = adjust_address (operands[1], SImode, 4);
2235 else if (optype1 == CNSTOP)
2236 split_double (operands[1], &operands[1], &latehalf[1]);
2237 else
2238 latehalf[1] = operands[1];
2240 /* If the first move would clobber the source of the second one,
2241 do them in the other order.
2243 This can happen in two cases:
2245 mem -> register where the first half of the destination register
2246 is the same register used in the memory's address. Reload
2247 can create such insns.
2249 mem in this case will be either register indirect or register
2250 indirect plus a valid offset.
2252 register -> register move where REGNO(dst) == REGNO(src + 1)
2253 someone (Tim/Tege?) claimed this can happen for parameter loads.
2255 Handle mem -> register case first. */
2256 if (optype0 == REGOP
2257 && (optype1 == MEMOP || optype1 == OFFSOP)
2258 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2259 operands[1], 0))
2261 /* Do the late half first. */
2262 if (addreg1)
2263 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2264 output_asm_insn (singlemove_string (latehalf), latehalf);
2266 /* Then clobber. */
2267 if (addreg1)
2268 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2269 return singlemove_string (operands);
2272 /* Now handle register -> register case. */
2273 if (optype0 == REGOP && optype1 == REGOP
2274 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2276 output_asm_insn (singlemove_string (latehalf), latehalf);
2277 return singlemove_string (operands);
2280 /* Normal case: do the two words, low-numbered first. */
2282 output_asm_insn (singlemove_string (operands), operands);
2284 /* Make any unoffsettable addresses point at high-numbered word. */
2285 if (addreg0)
2286 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2287 if (addreg1)
2288 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2290 /* Do that word. */
2291 output_asm_insn (singlemove_string (latehalf), latehalf);
2293 /* Undo the adds we just did. */
2294 if (addreg0)
2295 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2296 if (addreg1)
2297 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2299 return "";
2302 const char *
2303 output_fp_move_double (rtx *operands)
2305 if (FP_REG_P (operands[0]))
2307 if (FP_REG_P (operands[1])
2308 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2309 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2310 else
2311 output_asm_insn ("fldd%F1 %1,%0", operands);
2313 else if (FP_REG_P (operands[1]))
2315 output_asm_insn ("fstd%F0 %1,%0", operands);
2317 else
2319 rtx xoperands[2];
2321 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2323 /* This is a pain. You have to be prepared to deal with an
2324 arbitrary address here including pre/post increment/decrement.
2326 so avoid this in the MD. */
2327 gcc_assert (GET_CODE (operands[0]) == REG);
2329 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2330 xoperands[0] = operands[0];
2331 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2333 return "";
2336 /* Return a REG that occurs in ADDR with coefficient 1.
2337 ADDR can be effectively incremented by incrementing REG. */
2339 static rtx
2340 find_addr_reg (rtx addr)
2342 while (GET_CODE (addr) == PLUS)
2344 if (GET_CODE (XEXP (addr, 0)) == REG)
2345 addr = XEXP (addr, 0);
2346 else if (GET_CODE (XEXP (addr, 1)) == REG)
2347 addr = XEXP (addr, 1);
2348 else if (CONSTANT_P (XEXP (addr, 0)))
2349 addr = XEXP (addr, 1);
2350 else if (CONSTANT_P (XEXP (addr, 1)))
2351 addr = XEXP (addr, 0);
2352 else
2353 gcc_unreachable ();
2355 gcc_assert (GET_CODE (addr) == REG);
2356 return addr;
2359 /* Emit code to perform a block move.
2361 OPERANDS[0] is the destination pointer as a REG, clobbered.
2362 OPERANDS[1] is the source pointer as a REG, clobbered.
2363 OPERANDS[2] is a register for temporary storage.
2364 OPERANDS[3] is a register for temporary storage.
2365 OPERANDS[4] is the size as a CONST_INT
2366 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2367 OPERANDS[6] is another temporary register. */
2369 const char *
2370 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2372 int align = INTVAL (operands[5]);
2373 unsigned long n_bytes = INTVAL (operands[4]);
2375 /* We can't move more than a word at a time because the PA
2376 has no longer integer move insns. (Could use fp mem ops?) */
2377 if (align > (TARGET_64BIT ? 8 : 4))
2378 align = (TARGET_64BIT ? 8 : 4);
2380 /* Note that we know each loop below will execute at least twice
2381 (else we would have open-coded the copy). */
2382 switch (align)
2384 case 8:
2385 /* Pre-adjust the loop counter. */
2386 operands[4] = GEN_INT (n_bytes - 16);
2387 output_asm_insn ("ldi %4,%2", operands);
2389 /* Copying loop. */
2390 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2391 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2392 output_asm_insn ("std,ma %3,8(%0)", operands);
2393 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2394 output_asm_insn ("std,ma %6,8(%0)", operands);
2396 /* Handle the residual. There could be up to 7 bytes of
2397 residual to copy! */
2398 if (n_bytes % 16 != 0)
2400 operands[4] = GEN_INT (n_bytes % 8);
2401 if (n_bytes % 16 >= 8)
2402 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2403 if (n_bytes % 8 != 0)
2404 output_asm_insn ("ldd 0(%1),%6", operands);
2405 if (n_bytes % 16 >= 8)
2406 output_asm_insn ("std,ma %3,8(%0)", operands);
2407 if (n_bytes % 8 != 0)
2408 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2410 return "";
2412 case 4:
2413 /* Pre-adjust the loop counter. */
2414 operands[4] = GEN_INT (n_bytes - 8);
2415 output_asm_insn ("ldi %4,%2", operands);
2417 /* Copying loop. */
2418 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2419 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2420 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2421 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2422 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2424 /* Handle the residual. There could be up to 7 bytes of
2425 residual to copy! */
2426 if (n_bytes % 8 != 0)
2428 operands[4] = GEN_INT (n_bytes % 4);
2429 if (n_bytes % 8 >= 4)
2430 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2431 if (n_bytes % 4 != 0)
2432 output_asm_insn ("ldw 0(%1),%6", operands);
2433 if (n_bytes % 8 >= 4)
2434 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2435 if (n_bytes % 4 != 0)
2436 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2438 return "";
2440 case 2:
2441 /* Pre-adjust the loop counter. */
2442 operands[4] = GEN_INT (n_bytes - 4);
2443 output_asm_insn ("ldi %4,%2", operands);
2445 /* Copying loop. */
2446 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2447 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2448 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2449 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2450 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2452 /* Handle the residual. */
2453 if (n_bytes % 4 != 0)
2455 if (n_bytes % 4 >= 2)
2456 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2457 if (n_bytes % 2 != 0)
2458 output_asm_insn ("ldb 0(%1),%6", operands);
2459 if (n_bytes % 4 >= 2)
2460 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2461 if (n_bytes % 2 != 0)
2462 output_asm_insn ("stb %6,0(%0)", operands);
2464 return "";
2466 case 1:
2467 /* Pre-adjust the loop counter. */
2468 operands[4] = GEN_INT (n_bytes - 2);
2469 output_asm_insn ("ldi %4,%2", operands);
2471 /* Copying loop. */
2472 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2473 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2474 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2475 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2476 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2478 /* Handle the residual. */
2479 if (n_bytes % 2 != 0)
2481 output_asm_insn ("ldb 0(%1),%3", operands);
2482 output_asm_insn ("stb %3,0(%0)", operands);
2484 return "";
2486 default:
2487 gcc_unreachable ();
2491 /* Count the number of insns necessary to handle this block move.
2493 Basic structure is the same as emit_block_move, except that we
2494 count insns rather than emit them. */
2496 static int
2497 compute_movmem_length (rtx insn)
2499 rtx pat = PATTERN (insn);
2500 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2501 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2502 unsigned int n_insns = 0;
2504 /* We can't move more than four bytes at a time because the PA
2505 has no longer integer move insns. (Could use fp mem ops?) */
2506 if (align > (TARGET_64BIT ? 8 : 4))
2507 align = (TARGET_64BIT ? 8 : 4);
2509 /* The basic copying loop. */
2510 n_insns = 6;
2512 /* Residuals. */
2513 if (n_bytes % (2 * align) != 0)
2515 if ((n_bytes % (2 * align)) >= align)
2516 n_insns += 2;
2518 if ((n_bytes % align) != 0)
2519 n_insns += 2;
2522 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2523 return n_insns * 4;
2526 /* Emit code to perform a block clear.
2528 OPERANDS[0] is the destination pointer as a REG, clobbered.
2529 OPERANDS[1] is a register for temporary storage.
2530 OPERANDS[2] is the size as a CONST_INT
2531 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2533 const char *
2534 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2536 int align = INTVAL (operands[3]);
2537 unsigned long n_bytes = INTVAL (operands[2]);
2539 /* We can't clear more than a word at a time because the PA
2540 has no longer integer move insns. */
2541 if (align > (TARGET_64BIT ? 8 : 4))
2542 align = (TARGET_64BIT ? 8 : 4);
2544 /* Note that we know each loop below will execute at least twice
2545 (else we would have open-coded the copy). */
2546 switch (align)
2548 case 8:
2549 /* Pre-adjust the loop counter. */
2550 operands[2] = GEN_INT (n_bytes - 16);
2551 output_asm_insn ("ldi %2,%1", operands);
2553 /* Loop. */
2554 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2555 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2556 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2558 /* Handle the residual. There could be up to 7 bytes of
2559 residual to copy! */
2560 if (n_bytes % 16 != 0)
2562 operands[2] = GEN_INT (n_bytes % 8);
2563 if (n_bytes % 16 >= 8)
2564 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2565 if (n_bytes % 8 != 0)
2566 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2568 return "";
2570 case 4:
2571 /* Pre-adjust the loop counter. */
2572 operands[2] = GEN_INT (n_bytes - 8);
2573 output_asm_insn ("ldi %2,%1", operands);
2575 /* Loop. */
2576 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2577 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2578 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2580 /* Handle the residual. There could be up to 7 bytes of
2581 residual to copy! */
2582 if (n_bytes % 8 != 0)
2584 operands[2] = GEN_INT (n_bytes % 4);
2585 if (n_bytes % 8 >= 4)
2586 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2587 if (n_bytes % 4 != 0)
2588 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2590 return "";
2592 case 2:
2593 /* Pre-adjust the loop counter. */
2594 operands[2] = GEN_INT (n_bytes - 4);
2595 output_asm_insn ("ldi %2,%1", operands);
2597 /* Loop. */
2598 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2599 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2600 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2602 /* Handle the residual. */
2603 if (n_bytes % 4 != 0)
2605 if (n_bytes % 4 >= 2)
2606 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2607 if (n_bytes % 2 != 0)
2608 output_asm_insn ("stb %%r0,0(%0)", operands);
2610 return "";
2612 case 1:
2613 /* Pre-adjust the loop counter. */
2614 operands[2] = GEN_INT (n_bytes - 2);
2615 output_asm_insn ("ldi %2,%1", operands);
2617 /* Loop. */
2618 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2619 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2620 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2622 /* Handle the residual. */
2623 if (n_bytes % 2 != 0)
2624 output_asm_insn ("stb %%r0,0(%0)", operands);
2626 return "";
2628 default:
2629 gcc_unreachable ();
2633 /* Count the number of insns necessary to handle this block move.
2635 Basic structure is the same as emit_block_move, except that we
2636 count insns rather than emit them. */
2638 static int
2639 compute_clrmem_length (rtx insn)
2641 rtx pat = PATTERN (insn);
2642 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2643 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2644 unsigned int n_insns = 0;
2646 /* We can't clear more than a word at a time because the PA
2647 has no longer integer move insns. */
2648 if (align > (TARGET_64BIT ? 8 : 4))
2649 align = (TARGET_64BIT ? 8 : 4);
2651 /* The basic loop. */
2652 n_insns = 4;
2654 /* Residuals. */
2655 if (n_bytes % (2 * align) != 0)
2657 if ((n_bytes % (2 * align)) >= align)
2658 n_insns++;
2660 if ((n_bytes % align) != 0)
2661 n_insns++;
2664 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2665 return n_insns * 4;
2669 const char *
2670 output_and (rtx *operands)
2672 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2674 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2675 int ls0, ls1, ms0, p, len;
2677 for (ls0 = 0; ls0 < 32; ls0++)
2678 if ((mask & (1 << ls0)) == 0)
2679 break;
2681 for (ls1 = ls0; ls1 < 32; ls1++)
2682 if ((mask & (1 << ls1)) != 0)
2683 break;
2685 for (ms0 = ls1; ms0 < 32; ms0++)
2686 if ((mask & (1 << ms0)) == 0)
2687 break;
2689 gcc_assert (ms0 == 32);
2691 if (ls1 == 32)
2693 len = ls0;
2695 gcc_assert (len);
2697 operands[2] = GEN_INT (len);
2698 return "{extru|extrw,u} %1,31,%2,%0";
2700 else
2702 /* We could use this `depi' for the case above as well, but `depi'
2703 requires one more register file access than an `extru'. */
2705 p = 31 - ls0;
2706 len = ls1 - ls0;
2708 operands[2] = GEN_INT (p);
2709 operands[3] = GEN_INT (len);
2710 return "{depi|depwi} 0,%2,%3,%0";
2713 else
2714 return "and %1,%2,%0";
2717 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2718 storing the result in operands[0]. */
2719 const char *
2720 output_64bit_and (rtx *operands)
2722 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2724 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2725 int ls0, ls1, ms0, p, len;
2727 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2728 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2729 break;
2731 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2732 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2733 break;
2735 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2736 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2737 break;
2739 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2741 if (ls1 == HOST_BITS_PER_WIDE_INT)
2743 len = ls0;
2745 gcc_assert (len);
2747 operands[2] = GEN_INT (len);
2748 return "extrd,u %1,63,%2,%0";
2750 else
2752 /* We could use this `depi' for the case above as well, but `depi'
2753 requires one more register file access than an `extru'. */
2755 p = 63 - ls0;
2756 len = ls1 - ls0;
2758 operands[2] = GEN_INT (p);
2759 operands[3] = GEN_INT (len);
2760 return "depdi 0,%2,%3,%0";
2763 else
2764 return "and %1,%2,%0";
2767 const char *
2768 output_ior (rtx *operands)
2770 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2771 int bs0, bs1, p, len;
2773 if (INTVAL (operands[2]) == 0)
2774 return "copy %1,%0";
2776 for (bs0 = 0; bs0 < 32; bs0++)
2777 if ((mask & (1 << bs0)) != 0)
2778 break;
2780 for (bs1 = bs0; bs1 < 32; bs1++)
2781 if ((mask & (1 << bs1)) == 0)
2782 break;
2784 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2786 p = 31 - bs0;
2787 len = bs1 - bs0;
2789 operands[2] = GEN_INT (p);
2790 operands[3] = GEN_INT (len);
2791 return "{depi|depwi} -1,%2,%3,%0";
2794 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2795 storing the result in operands[0]. */
2796 const char *
2797 output_64bit_ior (rtx *operands)
2799 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2800 int bs0, bs1, p, len;
2802 if (INTVAL (operands[2]) == 0)
2803 return "copy %1,%0";
2805 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2806 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2807 break;
2809 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2810 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2811 break;
2813 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2814 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2816 p = 63 - bs0;
2817 len = bs1 - bs0;
2819 operands[2] = GEN_INT (p);
2820 operands[3] = GEN_INT (len);
2821 return "depdi -1,%2,%3,%0";
2824 /* Target hook for assembling integer objects. This code handles
2825 aligned SI and DI integers specially since function references
2826 must be preceded by P%. */
2828 static bool
2829 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2831 if (size == UNITS_PER_WORD
2832 && aligned_p
2833 && function_label_operand (x, VOIDmode))
2835 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2836 output_addr_const (asm_out_file, x);
2837 fputc ('\n', asm_out_file);
2838 return true;
2840 return default_assemble_integer (x, size, aligned_p);
2843 /* Output an ascii string. */
2844 void
2845 output_ascii (FILE *file, const char *p, int size)
2847 int i;
2848 int chars_output;
2849 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
2851 /* The HP assembler can only take strings of 256 characters at one
2852 time. This is a limitation on input line length, *not* the
2853 length of the string. Sigh. Even worse, it seems that the
2854 restriction is in number of input characters (see \xnn &
2855 \whatever). So we have to do this very carefully. */
2857 fputs ("\t.STRING \"", file);
2859 chars_output = 0;
2860 for (i = 0; i < size; i += 4)
2862 int co = 0;
2863 int io = 0;
2864 for (io = 0, co = 0; io < MIN (4, size - i); io++)
2866 register unsigned int c = (unsigned char) p[i + io];
2868 if (c == '\"' || c == '\\')
2869 partial_output[co++] = '\\';
2870 if (c >= ' ' && c < 0177)
2871 partial_output[co++] = c;
2872 else
2874 unsigned int hexd;
2875 partial_output[co++] = '\\';
2876 partial_output[co++] = 'x';
2877 hexd = c / 16 - 0 + '0';
2878 if (hexd > '9')
2879 hexd -= '9' - 'a' + 1;
2880 partial_output[co++] = hexd;
2881 hexd = c % 16 - 0 + '0';
2882 if (hexd > '9')
2883 hexd -= '9' - 'a' + 1;
2884 partial_output[co++] = hexd;
2887 if (chars_output + co > 243)
2889 fputs ("\"\n\t.STRING \"", file);
2890 chars_output = 0;
2892 fwrite (partial_output, 1, (size_t) co, file);
2893 chars_output += co;
2894 co = 0;
2896 fputs ("\"\n", file);
2899 /* Try to rewrite floating point comparisons & branches to avoid
2900 useless add,tr insns.
2902 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
2903 to see if FPCC is dead. CHECK_NOTES is nonzero for the
2904 first attempt to remove useless add,tr insns. It is zero
2905 for the second pass as reorg sometimes leaves bogus REG_DEAD
2906 notes lying around.
2908 When CHECK_NOTES is zero we can only eliminate add,tr insns
2909 when there's a 1:1 correspondence between fcmp and ftest/fbranch
2910 instructions. */
2911 static void
2912 remove_useless_addtr_insns (int check_notes)
2914 rtx insn;
2915 static int pass = 0;
2917 /* This is fairly cheap, so always run it when optimizing. */
2918 if (optimize > 0)
2920 int fcmp_count = 0;
2921 int fbranch_count = 0;
2923 /* Walk all the insns in this function looking for fcmp & fbranch
2924 instructions. Keep track of how many of each we find. */
2925 for (insn = get_insns (); insn; insn = next_insn (insn))
2927 rtx tmp;
2929 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
2930 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
2931 continue;
2933 tmp = PATTERN (insn);
2935 /* It must be a set. */
2936 if (GET_CODE (tmp) != SET)
2937 continue;
2939 /* If the destination is CCFP, then we've found an fcmp insn. */
2940 tmp = SET_DEST (tmp);
2941 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
2943 fcmp_count++;
2944 continue;
2947 tmp = PATTERN (insn);
2948 /* If this is an fbranch instruction, bump the fbranch counter. */
2949 if (GET_CODE (tmp) == SET
2950 && SET_DEST (tmp) == pc_rtx
2951 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
2952 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
2953 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
2954 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
2956 fbranch_count++;
2957 continue;
2962 /* Find all floating point compare + branch insns. If possible,
2963 reverse the comparison & the branch to avoid add,tr insns. */
2964 for (insn = get_insns (); insn; insn = next_insn (insn))
2966 rtx tmp, next;
2968 /* Ignore anything that isn't an INSN. */
2969 if (GET_CODE (insn) != INSN)
2970 continue;
2972 tmp = PATTERN (insn);
2974 /* It must be a set. */
2975 if (GET_CODE (tmp) != SET)
2976 continue;
2978 /* The destination must be CCFP, which is register zero. */
2979 tmp = SET_DEST (tmp);
2980 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
2981 continue;
2983 /* INSN should be a set of CCFP.
2985 See if the result of this insn is used in a reversed FP
2986 conditional branch. If so, reverse our condition and
2987 the branch. Doing so avoids useless add,tr insns. */
2988 next = next_insn (insn);
2989 while (next)
2991 /* Jumps, calls and labels stop our search. */
2992 if (GET_CODE (next) == JUMP_INSN
2993 || GET_CODE (next) == CALL_INSN
2994 || GET_CODE (next) == CODE_LABEL)
2995 break;
2997 /* As does another fcmp insn. */
2998 if (GET_CODE (next) == INSN
2999 && GET_CODE (PATTERN (next)) == SET
3000 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3001 && REGNO (SET_DEST (PATTERN (next))) == 0)
3002 break;
3004 next = next_insn (next);
3007 /* Is NEXT_INSN a branch? */
3008 if (next
3009 && GET_CODE (next) == JUMP_INSN)
3011 rtx pattern = PATTERN (next);
3013 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3014 and CCFP dies, then reverse our conditional and the branch
3015 to avoid the add,tr. */
3016 if (GET_CODE (pattern) == SET
3017 && SET_DEST (pattern) == pc_rtx
3018 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3019 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3020 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3021 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3022 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3023 && (fcmp_count == fbranch_count
3024 || (check_notes
3025 && find_regno_note (next, REG_DEAD, 0))))
3027 /* Reverse the branch. */
3028 tmp = XEXP (SET_SRC (pattern), 1);
3029 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3030 XEXP (SET_SRC (pattern), 2) = tmp;
3031 INSN_CODE (next) = -1;
3033 /* Reverse our condition. */
3034 tmp = PATTERN (insn);
3035 PUT_CODE (XEXP (tmp, 1),
3036 (reverse_condition_maybe_unordered
3037 (GET_CODE (XEXP (tmp, 1)))));
3043 pass = !pass;
3047 /* You may have trouble believing this, but this is the 32 bit HP-PA
3048 stack layout. Wow.
3050 Offset Contents
3052 Variable arguments (optional; any number may be allocated)
3054 SP-(4*(N+9)) arg word N
3056 SP-56 arg word 5
3057 SP-52 arg word 4
3059 Fixed arguments (must be allocated; may remain unused)
3061 SP-48 arg word 3
3062 SP-44 arg word 2
3063 SP-40 arg word 1
3064 SP-36 arg word 0
3066 Frame Marker
3068 SP-32 External Data Pointer (DP)
3069 SP-28 External sr4
3070 SP-24 External/stub RP (RP')
3071 SP-20 Current RP
3072 SP-16 Static Link
3073 SP-12 Clean up
3074 SP-8 Calling Stub RP (RP'')
3075 SP-4 Previous SP
3077 Top of Frame
3079 SP-0 Stack Pointer (points to next available address)
3083 /* This function saves registers as follows. Registers marked with ' are
3084 this function's registers (as opposed to the previous function's).
3085 If a frame_pointer isn't needed, r4 is saved as a general register;
3086 the space for the frame pointer is still allocated, though, to keep
3087 things simple.
3090 Top of Frame
3092 SP (FP') Previous FP
3093 SP + 4 Alignment filler (sigh)
3094 SP + 8 Space for locals reserved here.
3098 SP + n All call saved register used.
3102 SP + o All call saved fp registers used.
3106 SP + p (SP') points to next available address.
3110 /* Global variables set by output_function_prologue(). */
3111 /* Size of frame. Need to know this to emit return insns from
3112 leaf procedures. */
3113 static HOST_WIDE_INT actual_fsize, local_fsize;
3114 static int save_fregs;
3116 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3117 Handle case where DISP > 8k by using the add_high_const patterns.
3119 Note in DISP > 8k case, we will leave the high part of the address
3120 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3122 static void
3123 store_reg (int reg, HOST_WIDE_INT disp, int base)
3125 rtx insn, dest, src, basereg;
3127 src = gen_rtx_REG (word_mode, reg);
3128 basereg = gen_rtx_REG (Pmode, base);
3129 if (VAL_14_BITS_P (disp))
3131 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3132 insn = emit_move_insn (dest, src);
3134 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3136 rtx delta = GEN_INT (disp);
3137 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3139 emit_move_insn (tmpreg, delta);
3140 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3141 dest = gen_rtx_MEM (word_mode, tmpreg);
3142 insn = emit_move_insn (dest, src);
3143 if (DO_FRAME_NOTES)
3145 REG_NOTES (insn)
3146 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3147 gen_rtx_SET (VOIDmode,
3148 gen_rtx_MEM (word_mode,
3149 gen_rtx_PLUS (word_mode, basereg,
3150 delta)),
3151 src),
3152 REG_NOTES (insn));
3155 else
3157 rtx delta = GEN_INT (disp);
3158 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3159 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3161 emit_move_insn (tmpreg, high);
3162 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3163 insn = emit_move_insn (dest, src);
3164 if (DO_FRAME_NOTES)
3166 REG_NOTES (insn)
3167 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3168 gen_rtx_SET (VOIDmode,
3169 gen_rtx_MEM (word_mode,
3170 gen_rtx_PLUS (word_mode, basereg,
3171 delta)),
3172 src),
3173 REG_NOTES (insn));
3177 if (DO_FRAME_NOTES)
3178 RTX_FRAME_RELATED_P (insn) = 1;
3181 /* Emit RTL to store REG at the memory location specified by BASE and then
3182 add MOD to BASE. MOD must be <= 8k. */
3184 static void
3185 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3187 rtx insn, basereg, srcreg, delta;
3189 gcc_assert (VAL_14_BITS_P (mod));
3191 basereg = gen_rtx_REG (Pmode, base);
3192 srcreg = gen_rtx_REG (word_mode, reg);
3193 delta = GEN_INT (mod);
3195 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3196 if (DO_FRAME_NOTES)
3198 RTX_FRAME_RELATED_P (insn) = 1;
3200 /* RTX_FRAME_RELATED_P must be set on each frame related set
3201 in a parallel with more than one element. Don't set
3202 RTX_FRAME_RELATED_P in the first set if reg is temporary
3203 register 1. The effect of this operation is recorded in
3204 the initial copy. */
3205 if (reg != 1)
3207 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3208 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3210 else
3212 /* The first element of a PARALLEL is always processed if it is
3213 a SET. Thus, we need an expression list for this case. */
3214 REG_NOTES (insn)
3215 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3216 gen_rtx_SET (VOIDmode, basereg,
3217 gen_rtx_PLUS (word_mode, basereg, delta)),
3218 REG_NOTES (insn));
3223 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3224 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3225 whether to add a frame note or not.
3227 In the DISP > 8k case, we leave the high part of the address in %r1.
3228 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3230 static void
3231 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3233 rtx insn;
3235 if (VAL_14_BITS_P (disp))
3237 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3238 plus_constant (gen_rtx_REG (Pmode, base), disp));
3240 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3242 rtx basereg = gen_rtx_REG (Pmode, base);
3243 rtx delta = GEN_INT (disp);
3244 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3246 emit_move_insn (tmpreg, delta);
3247 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3248 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3250 else
3252 rtx basereg = gen_rtx_REG (Pmode, base);
3253 rtx delta = GEN_INT (disp);
3254 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3256 emit_move_insn (tmpreg,
3257 gen_rtx_PLUS (Pmode, basereg,
3258 gen_rtx_HIGH (Pmode, delta)));
3259 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3260 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3263 if (DO_FRAME_NOTES && note)
3264 RTX_FRAME_RELATED_P (insn) = 1;
3267 HOST_WIDE_INT
3268 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3270 int freg_saved = 0;
3271 int i, j;
3273 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3274 be consistent with the rounding and size calculation done here.
3275 Change them at the same time. */
3277 /* We do our own stack alignment. First, round the size of the
3278 stack locals up to a word boundary. */
3279 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3281 /* Space for previous frame pointer + filler. If any frame is
3282 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3283 waste some space here for the sake of HP compatibility. The
3284 first slot is only used when the frame pointer is needed. */
3285 if (size || frame_pointer_needed)
3286 size += STARTING_FRAME_OFFSET;
3288 /* If the current function calls __builtin_eh_return, then we need
3289 to allocate stack space for registers that will hold data for
3290 the exception handler. */
3291 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3293 unsigned int i;
3295 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3296 continue;
3297 size += i * UNITS_PER_WORD;
3300 /* Account for space used by the callee general register saves. */
3301 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3302 if (regs_ever_live[i])
3303 size += UNITS_PER_WORD;
3305 /* Account for space used by the callee floating point register saves. */
3306 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3307 if (regs_ever_live[i]
3308 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3310 freg_saved = 1;
3312 /* We always save both halves of the FP register, so always
3313 increment the frame size by 8 bytes. */
3314 size += 8;
3317 /* If any of the floating registers are saved, account for the
3318 alignment needed for the floating point register save block. */
3319 if (freg_saved)
3321 size = (size + 7) & ~7;
3322 if (fregs_live)
3323 *fregs_live = 1;
3326 /* The various ABIs include space for the outgoing parameters in the
3327 size of the current function's stack frame. We don't need to align
3328 for the outgoing arguments as their alignment is set by the final
3329 rounding for the frame as a whole. */
3330 size += current_function_outgoing_args_size;
3332 /* Allocate space for the fixed frame marker. This space must be
3333 allocated for any function that makes calls or allocates
3334 stack space. */
3335 if (!current_function_is_leaf || size)
3336 size += TARGET_64BIT ? 48 : 32;
3338 /* Finally, round to the preferred stack boundary. */
3339 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3340 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3343 /* Generate the assembly code for function entry. FILE is a stdio
3344 stream to output the code to. SIZE is an int: how many units of
3345 temporary storage to allocate.
3347 Refer to the array `regs_ever_live' to determine which registers to
3348 save; `regs_ever_live[I]' is nonzero if register number I is ever
3349 used in the function. This function is responsible for knowing
3350 which registers should not be saved even if used. */
3352 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3353 of memory. If any fpu reg is used in the function, we allocate
3354 such a block here, at the bottom of the frame, just in case it's needed.
3356 If this function is a leaf procedure, then we may choose not
3357 to do a "save" insn. The decision about whether or not
3358 to do this is made in regclass.c. */
3360 static void
3361 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3363 /* The function's label and associated .PROC must never be
3364 separated and must be output *after* any profiling declarations
3365 to avoid changing spaces/subspaces within a procedure. */
3366 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3367 fputs ("\t.PROC\n", file);
3369 /* hppa_expand_prologue does the dirty work now. We just need
3370 to output the assembler directives which denote the start
3371 of a function. */
3372 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3373 if (regs_ever_live[2])
3374 fputs (",CALLS,SAVE_RP", file);
3375 else
3376 fputs (",NO_CALLS", file);
3378 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3379 at the beginning of the frame and that it is used as the frame
3380 pointer for the frame. We do this because our current frame
3381 layout doesn't conform to that specified in the the HP runtime
3382 documentation and we need a way to indicate to programs such as
3383 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3384 isn't used by HP compilers but is supported by the assembler.
3385 However, SAVE_SP is supposed to indicate that the previous stack
3386 pointer has been saved in the frame marker. */
3387 if (frame_pointer_needed)
3388 fputs (",SAVE_SP", file);
3390 /* Pass on information about the number of callee register saves
3391 performed in the prologue.
3393 The compiler is supposed to pass the highest register number
3394 saved, the assembler then has to adjust that number before
3395 entering it into the unwind descriptor (to account for any
3396 caller saved registers with lower register numbers than the
3397 first callee saved register). */
3398 if (gr_saved)
3399 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3401 if (fr_saved)
3402 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3404 fputs ("\n\t.ENTRY\n", file);
3406 remove_useless_addtr_insns (0);
3409 void
3410 hppa_expand_prologue (void)
3412 int merge_sp_adjust_with_store = 0;
3413 HOST_WIDE_INT size = get_frame_size ();
3414 HOST_WIDE_INT offset;
3415 int i;
3416 rtx insn, tmpreg;
3418 gr_saved = 0;
3419 fr_saved = 0;
3420 save_fregs = 0;
3422 /* Compute total size for frame pointer, filler, locals and rounding to
3423 the next word boundary. Similar code appears in compute_frame_size
3424 and must be changed in tandem with this code. */
3425 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3426 if (local_fsize || frame_pointer_needed)
3427 local_fsize += STARTING_FRAME_OFFSET;
3429 actual_fsize = compute_frame_size (size, &save_fregs);
3431 /* Compute a few things we will use often. */
3432 tmpreg = gen_rtx_REG (word_mode, 1);
3434 /* Save RP first. The calling conventions manual states RP will
3435 always be stored into the caller's frame at sp - 20 or sp - 16
3436 depending on which ABI is in use. */
3437 if (regs_ever_live[2] || current_function_calls_eh_return)
3438 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3440 /* Allocate the local frame and set up the frame pointer if needed. */
3441 if (actual_fsize != 0)
3443 if (frame_pointer_needed)
3445 /* Copy the old frame pointer temporarily into %r1. Set up the
3446 new stack pointer, then store away the saved old frame pointer
3447 into the stack at sp and at the same time update the stack
3448 pointer by actual_fsize bytes. Two versions, first
3449 handles small (<8k) frames. The second handles large (>=8k)
3450 frames. */
3451 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3452 if (DO_FRAME_NOTES)
3454 /* We need to record the frame pointer save here since the
3455 new frame pointer is set in the following insn. */
3456 RTX_FRAME_RELATED_P (insn) = 1;
3457 REG_NOTES (insn)
3458 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3459 gen_rtx_SET (VOIDmode,
3460 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3461 frame_pointer_rtx),
3462 REG_NOTES (insn));
3465 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3466 if (DO_FRAME_NOTES)
3467 RTX_FRAME_RELATED_P (insn) = 1;
3469 if (VAL_14_BITS_P (actual_fsize))
3470 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3471 else
3473 /* It is incorrect to store the saved frame pointer at *sp,
3474 then increment sp (writes beyond the current stack boundary).
3476 So instead use stwm to store at *sp and post-increment the
3477 stack pointer as an atomic operation. Then increment sp to
3478 finish allocating the new frame. */
3479 HOST_WIDE_INT adjust1 = 8192 - 64;
3480 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3482 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3483 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3484 adjust2, 1);
3487 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3488 we need to store the previous stack pointer (frame pointer)
3489 into the frame marker on targets that use the HP unwind
3490 library. This allows the HP unwind library to be used to
3491 unwind GCC frames. However, we are not fully compatible
3492 with the HP library because our frame layout differs from
3493 that specified in the HP runtime specification.
3495 We don't want a frame note on this instruction as the frame
3496 marker moves during dynamic stack allocation.
3498 This instruction also serves as a blockage to prevent
3499 register spills from being scheduled before the stack
3500 pointer is raised. This is necessary as we store
3501 registers using the frame pointer as a base register,
3502 and the frame pointer is set before sp is raised. */
3503 if (TARGET_HPUX_UNWIND_LIBRARY)
3505 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3506 GEN_INT (TARGET_64BIT ? -8 : -4));
3508 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3509 frame_pointer_rtx);
3511 else
3512 emit_insn (gen_blockage ());
3514 /* no frame pointer needed. */
3515 else
3517 /* In some cases we can perform the first callee register save
3518 and allocating the stack frame at the same time. If so, just
3519 make a note of it and defer allocating the frame until saving
3520 the callee registers. */
3521 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3522 merge_sp_adjust_with_store = 1;
3523 /* Can not optimize. Adjust the stack frame by actual_fsize
3524 bytes. */
3525 else
3526 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3527 actual_fsize, 1);
3531 /* Normal register save.
3533 Do not save the frame pointer in the frame_pointer_needed case. It
3534 was done earlier. */
3535 if (frame_pointer_needed)
3537 offset = local_fsize;
3539 /* Saving the EH return data registers in the frame is the simplest
3540 way to get the frame unwind information emitted. We put them
3541 just before the general registers. */
3542 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3544 unsigned int i, regno;
3546 for (i = 0; ; ++i)
3548 regno = EH_RETURN_DATA_REGNO (i);
3549 if (regno == INVALID_REGNUM)
3550 break;
3552 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3553 offset += UNITS_PER_WORD;
3557 for (i = 18; i >= 4; i--)
3558 if (regs_ever_live[i] && ! call_used_regs[i])
3560 store_reg (i, offset, FRAME_POINTER_REGNUM);
3561 offset += UNITS_PER_WORD;
3562 gr_saved++;
3564 /* Account for %r3 which is saved in a special place. */
3565 gr_saved++;
3567 /* No frame pointer needed. */
3568 else
3570 offset = local_fsize - actual_fsize;
3572 /* Saving the EH return data registers in the frame is the simplest
3573 way to get the frame unwind information emitted. */
3574 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3576 unsigned int i, regno;
3578 for (i = 0; ; ++i)
3580 regno = EH_RETURN_DATA_REGNO (i);
3581 if (regno == INVALID_REGNUM)
3582 break;
3584 /* If merge_sp_adjust_with_store is nonzero, then we can
3585 optimize the first save. */
3586 if (merge_sp_adjust_with_store)
3588 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3589 merge_sp_adjust_with_store = 0;
3591 else
3592 store_reg (regno, offset, STACK_POINTER_REGNUM);
3593 offset += UNITS_PER_WORD;
3597 for (i = 18; i >= 3; i--)
3598 if (regs_ever_live[i] && ! call_used_regs[i])
3600 /* If merge_sp_adjust_with_store is nonzero, then we can
3601 optimize the first GR save. */
3602 if (merge_sp_adjust_with_store)
3604 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3605 merge_sp_adjust_with_store = 0;
3607 else
3608 store_reg (i, offset, STACK_POINTER_REGNUM);
3609 offset += UNITS_PER_WORD;
3610 gr_saved++;
3613 /* If we wanted to merge the SP adjustment with a GR save, but we never
3614 did any GR saves, then just emit the adjustment here. */
3615 if (merge_sp_adjust_with_store)
3616 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3617 actual_fsize, 1);
3620 /* The hppa calling conventions say that %r19, the pic offset
3621 register, is saved at sp - 32 (in this function's frame)
3622 when generating PIC code. FIXME: What is the correct thing
3623 to do for functions which make no calls and allocate no
3624 frame? Do we need to allocate a frame, or can we just omit
3625 the save? For now we'll just omit the save.
3627 We don't want a note on this insn as the frame marker can
3628 move if there is a dynamic stack allocation. */
3629 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3631 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3633 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3637 /* Align pointer properly (doubleword boundary). */
3638 offset = (offset + 7) & ~7;
3640 /* Floating point register store. */
3641 if (save_fregs)
3643 rtx base;
3645 /* First get the frame or stack pointer to the start of the FP register
3646 save area. */
3647 if (frame_pointer_needed)
3649 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3650 base = frame_pointer_rtx;
3652 else
3654 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3655 base = stack_pointer_rtx;
3658 /* Now actually save the FP registers. */
3659 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3661 if (regs_ever_live[i]
3662 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3664 rtx addr, insn, reg;
3665 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3666 reg = gen_rtx_REG (DFmode, i);
3667 insn = emit_move_insn (addr, reg);
3668 if (DO_FRAME_NOTES)
3670 RTX_FRAME_RELATED_P (insn) = 1;
3671 if (TARGET_64BIT)
3673 rtx mem = gen_rtx_MEM (DFmode,
3674 plus_constant (base, offset));
3675 REG_NOTES (insn)
3676 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3677 gen_rtx_SET (VOIDmode, mem, reg),
3678 REG_NOTES (insn));
3680 else
3682 rtx meml = gen_rtx_MEM (SFmode,
3683 plus_constant (base, offset));
3684 rtx memr = gen_rtx_MEM (SFmode,
3685 plus_constant (base, offset + 4));
3686 rtx regl = gen_rtx_REG (SFmode, i);
3687 rtx regr = gen_rtx_REG (SFmode, i + 1);
3688 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3689 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3690 rtvec vec;
3692 RTX_FRAME_RELATED_P (setl) = 1;
3693 RTX_FRAME_RELATED_P (setr) = 1;
3694 vec = gen_rtvec (2, setl, setr);
3695 REG_NOTES (insn)
3696 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3697 gen_rtx_SEQUENCE (VOIDmode, vec),
3698 REG_NOTES (insn));
3701 offset += GET_MODE_SIZE (DFmode);
3702 fr_saved++;
3708 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3709 Handle case where DISP > 8k by using the add_high_const patterns. */
3711 static void
3712 load_reg (int reg, HOST_WIDE_INT disp, int base)
3714 rtx dest = gen_rtx_REG (word_mode, reg);
3715 rtx basereg = gen_rtx_REG (Pmode, base);
3716 rtx src;
3718 if (VAL_14_BITS_P (disp))
3719 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3720 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3722 rtx delta = GEN_INT (disp);
3723 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3725 emit_move_insn (tmpreg, delta);
3726 if (TARGET_DISABLE_INDEXING)
3728 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3729 src = gen_rtx_MEM (word_mode, tmpreg);
3731 else
3732 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3734 else
3736 rtx delta = GEN_INT (disp);
3737 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3738 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3740 emit_move_insn (tmpreg, high);
3741 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3744 emit_move_insn (dest, src);
3747 /* Update the total code bytes output to the text section. */
3749 static void
3750 update_total_code_bytes (int nbytes)
3752 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3753 && !IN_NAMED_SECTION_P (cfun->decl))
3755 if (INSN_ADDRESSES_SET_P ())
3757 unsigned long old_total = total_code_bytes;
3759 total_code_bytes += nbytes;
3761 /* Be prepared to handle overflows. */
3762 if (old_total > total_code_bytes)
3763 total_code_bytes = -1;
3765 else
3766 total_code_bytes = -1;
3770 /* This function generates the assembly code for function exit.
3771 Args are as for output_function_prologue ().
3773 The function epilogue should not depend on the current stack
3774 pointer! It should use the frame pointer only. This is mandatory
3775 because of alloca; we also take advantage of it to omit stack
3776 adjustments before returning. */
3778 static void
3779 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3781 rtx insn = get_last_insn ();
3783 last_address = 0;
3785 /* hppa_expand_epilogue does the dirty work now. We just need
3786 to output the assembler directives which denote the end
3787 of a function.
3789 To make debuggers happy, emit a nop if the epilogue was completely
3790 eliminated due to a volatile call as the last insn in the
3791 current function. That way the return address (in %r2) will
3792 always point to a valid instruction in the current function. */
3794 /* Get the last real insn. */
3795 if (GET_CODE (insn) == NOTE)
3796 insn = prev_real_insn (insn);
3798 /* If it is a sequence, then look inside. */
3799 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3800 insn = XVECEXP (PATTERN (insn), 0, 0);
3802 /* If insn is a CALL_INSN, then it must be a call to a volatile
3803 function (otherwise there would be epilogue insns). */
3804 if (insn && GET_CODE (insn) == CALL_INSN)
3806 fputs ("\tnop\n", file);
3807 last_address += 4;
3810 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3812 if (TARGET_SOM && TARGET_GAS)
3814 /* We done with this subspace except possibly for some additional
3815 debug information. Forget that we are in this subspace to ensure
3816 that the next function is output in its own subspace. */
3817 forget_section ();
3820 if (INSN_ADDRESSES_SET_P ())
3822 insn = get_last_nonnote_insn ();
3823 last_address += INSN_ADDRESSES (INSN_UID (insn));
3824 if (INSN_P (insn))
3825 last_address += insn_default_length (insn);
3826 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3827 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3830 /* Finally, update the total number of code bytes output so far. */
3831 update_total_code_bytes (last_address);
3834 void
3835 hppa_expand_epilogue (void)
3837 rtx tmpreg;
3838 HOST_WIDE_INT offset;
3839 HOST_WIDE_INT ret_off = 0;
3840 int i;
3841 int merge_sp_adjust_with_load = 0;
3843 /* We will use this often. */
3844 tmpreg = gen_rtx_REG (word_mode, 1);
3846 /* Try to restore RP early to avoid load/use interlocks when
3847 RP gets used in the return (bv) instruction. This appears to still
3848 be necessary even when we schedule the prologue and epilogue. */
3849 if (regs_ever_live [2] || current_function_calls_eh_return)
3851 ret_off = TARGET_64BIT ? -16 : -20;
3852 if (frame_pointer_needed)
3854 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
3855 ret_off = 0;
3857 else
3859 /* No frame pointer, and stack is smaller than 8k. */
3860 if (VAL_14_BITS_P (ret_off - actual_fsize))
3862 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
3863 ret_off = 0;
3868 /* General register restores. */
3869 if (frame_pointer_needed)
3871 offset = local_fsize;
3873 /* If the current function calls __builtin_eh_return, then we need
3874 to restore the saved EH data registers. */
3875 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3877 unsigned int i, regno;
3879 for (i = 0; ; ++i)
3881 regno = EH_RETURN_DATA_REGNO (i);
3882 if (regno == INVALID_REGNUM)
3883 break;
3885 load_reg (regno, offset, FRAME_POINTER_REGNUM);
3886 offset += UNITS_PER_WORD;
3890 for (i = 18; i >= 4; i--)
3891 if (regs_ever_live[i] && ! call_used_regs[i])
3893 load_reg (i, offset, FRAME_POINTER_REGNUM);
3894 offset += UNITS_PER_WORD;
3897 else
3899 offset = local_fsize - actual_fsize;
3901 /* If the current function calls __builtin_eh_return, then we need
3902 to restore the saved EH data registers. */
3903 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3905 unsigned int i, regno;
3907 for (i = 0; ; ++i)
3909 regno = EH_RETURN_DATA_REGNO (i);
3910 if (regno == INVALID_REGNUM)
3911 break;
3913 /* Only for the first load.
3914 merge_sp_adjust_with_load holds the register load
3915 with which we will merge the sp adjustment. */
3916 if (merge_sp_adjust_with_load == 0
3917 && local_fsize == 0
3918 && VAL_14_BITS_P (-actual_fsize))
3919 merge_sp_adjust_with_load = regno;
3920 else
3921 load_reg (regno, offset, STACK_POINTER_REGNUM);
3922 offset += UNITS_PER_WORD;
3926 for (i = 18; i >= 3; i--)
3928 if (regs_ever_live[i] && ! call_used_regs[i])
3930 /* Only for the first load.
3931 merge_sp_adjust_with_load holds the register load
3932 with which we will merge the sp adjustment. */
3933 if (merge_sp_adjust_with_load == 0
3934 && local_fsize == 0
3935 && VAL_14_BITS_P (-actual_fsize))
3936 merge_sp_adjust_with_load = i;
3937 else
3938 load_reg (i, offset, STACK_POINTER_REGNUM);
3939 offset += UNITS_PER_WORD;
3944 /* Align pointer properly (doubleword boundary). */
3945 offset = (offset + 7) & ~7;
3947 /* FP register restores. */
3948 if (save_fregs)
3950 /* Adjust the register to index off of. */
3951 if (frame_pointer_needed)
3952 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3953 else
3954 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3956 /* Actually do the restores now. */
3957 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3958 if (regs_ever_live[i]
3959 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3961 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3962 rtx dest = gen_rtx_REG (DFmode, i);
3963 emit_move_insn (dest, src);
3967 /* Emit a blockage insn here to keep these insns from being moved to
3968 an earlier spot in the epilogue, or into the main instruction stream.
3970 This is necessary as we must not cut the stack back before all the
3971 restores are finished. */
3972 emit_insn (gen_blockage ());
3974 /* Reset stack pointer (and possibly frame pointer). The stack
3975 pointer is initially set to fp + 64 to avoid a race condition. */
3976 if (frame_pointer_needed)
3978 rtx delta = GEN_INT (-64);
3980 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
3981 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
3983 /* If we were deferring a callee register restore, do it now. */
3984 else if (merge_sp_adjust_with_load)
3986 rtx delta = GEN_INT (-actual_fsize);
3987 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
3989 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
3991 else if (actual_fsize != 0)
3992 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3993 - actual_fsize, 0);
3995 /* If we haven't restored %r2 yet (no frame pointer, and a stack
3996 frame greater than 8k), do so now. */
3997 if (ret_off != 0)
3998 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4000 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4002 rtx sa = EH_RETURN_STACKADJ_RTX;
4004 emit_insn (gen_blockage ());
4005 emit_insn (TARGET_64BIT
4006 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4007 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4012 hppa_pic_save_rtx (void)
4014 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4017 void
4018 hppa_profile_hook (int label_no)
4020 /* We use SImode for the address of the function in both 32 and
4021 64-bit code to avoid having to provide DImode versions of the
4022 lcla2 and load_offset_label_address insn patterns. */
4023 rtx reg = gen_reg_rtx (SImode);
4024 rtx label_rtx = gen_label_rtx ();
4025 rtx begin_label_rtx, call_insn;
4026 char begin_label_name[16];
4028 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4029 label_no);
4030 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4032 if (TARGET_64BIT)
4033 emit_move_insn (arg_pointer_rtx,
4034 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4035 GEN_INT (64)));
4037 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4039 /* The address of the function is loaded into %r25 with a instruction-
4040 relative sequence that avoids the use of relocations. The sequence
4041 is split so that the load_offset_label_address instruction can
4042 occupy the delay slot of the call to _mcount. */
4043 if (TARGET_PA_20)
4044 emit_insn (gen_lcla2 (reg, label_rtx));
4045 else
4046 emit_insn (gen_lcla1 (reg, label_rtx));
4048 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4049 reg, begin_label_rtx, label_rtx));
4051 #ifndef NO_PROFILE_COUNTERS
4053 rtx count_label_rtx, addr, r24;
4054 char count_label_name[16];
4056 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4057 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4059 addr = force_reg (Pmode, count_label_rtx);
4060 r24 = gen_rtx_REG (Pmode, 24);
4061 emit_move_insn (r24, addr);
4063 call_insn =
4064 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4065 gen_rtx_SYMBOL_REF (Pmode,
4066 "_mcount")),
4067 GEN_INT (TARGET_64BIT ? 24 : 12)));
4069 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4071 #else
4073 call_insn =
4074 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4075 gen_rtx_SYMBOL_REF (Pmode,
4076 "_mcount")),
4077 GEN_INT (TARGET_64BIT ? 16 : 8)));
4079 #endif
4081 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4082 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4084 /* Indicate the _mcount call cannot throw, nor will it execute a
4085 non-local goto. */
4086 REG_NOTES (call_insn)
4087 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4090 /* Fetch the return address for the frame COUNT steps up from
4091 the current frame, after the prologue. FRAMEADDR is the
4092 frame pointer of the COUNT frame.
4094 We want to ignore any export stub remnants here. To handle this,
4095 we examine the code at the return address, and if it is an export
4096 stub, we return a memory rtx for the stub return address stored
4097 at frame-24.
4099 The value returned is used in two different ways:
4101 1. To find a function's caller.
4103 2. To change the return address for a function.
4105 This function handles most instances of case 1; however, it will
4106 fail if there are two levels of stubs to execute on the return
4107 path. The only way I believe that can happen is if the return value
4108 needs a parameter relocation, which never happens for C code.
4110 This function handles most instances of case 2; however, it will
4111 fail if we did not originally have stub code on the return path
4112 but will need stub code on the new return path. This can happen if
4113 the caller & callee are both in the main program, but the new
4114 return location is in a shared library. */
4117 return_addr_rtx (int count, rtx frameaddr)
4119 rtx label;
4120 rtx rp;
4121 rtx saved_rp;
4122 rtx ins;
4124 if (count != 0)
4125 return NULL_RTX;
4127 rp = get_hard_reg_initial_val (Pmode, 2);
4129 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4130 return rp;
4132 saved_rp = gen_reg_rtx (Pmode);
4133 emit_move_insn (saved_rp, rp);
4135 /* Get pointer to the instruction stream. We have to mask out the
4136 privilege level from the two low order bits of the return address
4137 pointer here so that ins will point to the start of the first
4138 instruction that would have been executed if we returned. */
4139 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4140 label = gen_label_rtx ();
4142 /* Check the instruction stream at the normal return address for the
4143 export stub:
4145 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4146 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4147 0x00011820 | stub+16: mtsp r1,sr0
4148 0xe0400002 | stub+20: be,n 0(sr0,rp)
4150 If it is an export stub, than our return address is really in
4151 -24[frameaddr]. */
4153 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4154 NULL_RTX, SImode, 1);
4155 emit_jump_insn (gen_bne (label));
4157 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4158 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4159 emit_jump_insn (gen_bne (label));
4161 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4162 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4163 emit_jump_insn (gen_bne (label));
4165 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4166 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4168 /* If there is no export stub then just use the value saved from
4169 the return pointer register. */
4171 emit_jump_insn (gen_bne (label));
4173 /* Here we know that our return address points to an export
4174 stub. We don't want to return the address of the export stub,
4175 but rather the return address of the export stub. That return
4176 address is stored at -24[frameaddr]. */
4178 emit_move_insn (saved_rp,
4179 gen_rtx_MEM (Pmode,
4180 memory_address (Pmode,
4181 plus_constant (frameaddr,
4182 -24))));
4184 emit_label (label);
4185 return saved_rp;
4188 /* This is only valid once reload has completed because it depends on
4189 knowing exactly how much (if any) frame there is and...
4191 It's only valid if there is no frame marker to de-allocate and...
4193 It's only valid if %r2 hasn't been saved into the caller's frame
4194 (we're not profiling and %r2 isn't live anywhere). */
4196 hppa_can_use_return_insn_p (void)
4198 return (reload_completed
4199 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4200 && ! regs_ever_live[2]
4201 && ! frame_pointer_needed);
4204 void
4205 emit_bcond_fp (enum rtx_code code, rtx operand0)
4207 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4208 gen_rtx_IF_THEN_ELSE (VOIDmode,
4209 gen_rtx_fmt_ee (code,
4210 VOIDmode,
4211 gen_rtx_REG (CCFPmode, 0),
4212 const0_rtx),
4213 gen_rtx_LABEL_REF (VOIDmode, operand0),
4214 pc_rtx)));
4219 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4221 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4222 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4225 /* Adjust the cost of a scheduling dependency. Return the new cost of
4226 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4228 static int
4229 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4231 enum attr_type attr_type;
4233 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4234 true dependencies as they are described with bypasses now. */
4235 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4236 return cost;
4238 if (! recog_memoized (insn))
4239 return 0;
4241 attr_type = get_attr_type (insn);
4243 switch (REG_NOTE_KIND (link))
4245 case REG_DEP_ANTI:
4246 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4247 cycles later. */
4249 if (attr_type == TYPE_FPLOAD)
4251 rtx pat = PATTERN (insn);
4252 rtx dep_pat = PATTERN (dep_insn);
4253 if (GET_CODE (pat) == PARALLEL)
4255 /* This happens for the fldXs,mb patterns. */
4256 pat = XVECEXP (pat, 0, 0);
4258 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4259 /* If this happens, we have to extend this to schedule
4260 optimally. Return 0 for now. */
4261 return 0;
4263 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4265 if (! recog_memoized (dep_insn))
4266 return 0;
4267 switch (get_attr_type (dep_insn))
4269 case TYPE_FPALU:
4270 case TYPE_FPMULSGL:
4271 case TYPE_FPMULDBL:
4272 case TYPE_FPDIVSGL:
4273 case TYPE_FPDIVDBL:
4274 case TYPE_FPSQRTSGL:
4275 case TYPE_FPSQRTDBL:
4276 /* A fpload can't be issued until one cycle before a
4277 preceding arithmetic operation has finished if
4278 the target of the fpload is any of the sources
4279 (or destination) of the arithmetic operation. */
4280 return insn_default_latency (dep_insn) - 1;
4282 default:
4283 return 0;
4287 else if (attr_type == TYPE_FPALU)
4289 rtx pat = PATTERN (insn);
4290 rtx dep_pat = PATTERN (dep_insn);
4291 if (GET_CODE (pat) == PARALLEL)
4293 /* This happens for the fldXs,mb patterns. */
4294 pat = XVECEXP (pat, 0, 0);
4296 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4297 /* If this happens, we have to extend this to schedule
4298 optimally. Return 0 for now. */
4299 return 0;
4301 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4303 if (! recog_memoized (dep_insn))
4304 return 0;
4305 switch (get_attr_type (dep_insn))
4307 case TYPE_FPDIVSGL:
4308 case TYPE_FPDIVDBL:
4309 case TYPE_FPSQRTSGL:
4310 case TYPE_FPSQRTDBL:
4311 /* An ALU flop can't be issued until two cycles before a
4312 preceding divide or sqrt operation has finished if
4313 the target of the ALU flop is any of the sources
4314 (or destination) of the divide or sqrt operation. */
4315 return insn_default_latency (dep_insn) - 2;
4317 default:
4318 return 0;
4323 /* For other anti dependencies, the cost is 0. */
4324 return 0;
4326 case REG_DEP_OUTPUT:
4327 /* Output dependency; DEP_INSN writes a register that INSN writes some
4328 cycles later. */
4329 if (attr_type == TYPE_FPLOAD)
4331 rtx pat = PATTERN (insn);
4332 rtx dep_pat = PATTERN (dep_insn);
4333 if (GET_CODE (pat) == PARALLEL)
4335 /* This happens for the fldXs,mb patterns. */
4336 pat = XVECEXP (pat, 0, 0);
4338 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4339 /* If this happens, we have to extend this to schedule
4340 optimally. Return 0 for now. */
4341 return 0;
4343 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4345 if (! recog_memoized (dep_insn))
4346 return 0;
4347 switch (get_attr_type (dep_insn))
4349 case TYPE_FPALU:
4350 case TYPE_FPMULSGL:
4351 case TYPE_FPMULDBL:
4352 case TYPE_FPDIVSGL:
4353 case TYPE_FPDIVDBL:
4354 case TYPE_FPSQRTSGL:
4355 case TYPE_FPSQRTDBL:
4356 /* A fpload can't be issued until one cycle before a
4357 preceding arithmetic operation has finished if
4358 the target of the fpload is the destination of the
4359 arithmetic operation.
4361 Exception: For PA7100LC, PA7200 and PA7300, the cost
4362 is 3 cycles, unless they bundle together. We also
4363 pay the penalty if the second insn is a fpload. */
4364 return insn_default_latency (dep_insn) - 1;
4366 default:
4367 return 0;
4371 else if (attr_type == TYPE_FPALU)
4373 rtx pat = PATTERN (insn);
4374 rtx dep_pat = PATTERN (dep_insn);
4375 if (GET_CODE (pat) == PARALLEL)
4377 /* This happens for the fldXs,mb patterns. */
4378 pat = XVECEXP (pat, 0, 0);
4380 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4381 /* If this happens, we have to extend this to schedule
4382 optimally. Return 0 for now. */
4383 return 0;
4385 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4387 if (! recog_memoized (dep_insn))
4388 return 0;
4389 switch (get_attr_type (dep_insn))
4391 case TYPE_FPDIVSGL:
4392 case TYPE_FPDIVDBL:
4393 case TYPE_FPSQRTSGL:
4394 case TYPE_FPSQRTDBL:
4395 /* An ALU flop can't be issued until two cycles before a
4396 preceding divide or sqrt operation has finished if
4397 the target of the ALU flop is also the target of
4398 the divide or sqrt operation. */
4399 return insn_default_latency (dep_insn) - 2;
4401 default:
4402 return 0;
4407 /* For other output dependencies, the cost is 0. */
4408 return 0;
4410 default:
4411 gcc_unreachable ();
4415 /* Adjust scheduling priorities. We use this to try and keep addil
4416 and the next use of %r1 close together. */
4417 static int
4418 pa_adjust_priority (rtx insn, int priority)
4420 rtx set = single_set (insn);
4421 rtx src, dest;
4422 if (set)
4424 src = SET_SRC (set);
4425 dest = SET_DEST (set);
4426 if (GET_CODE (src) == LO_SUM
4427 && symbolic_operand (XEXP (src, 1), VOIDmode)
4428 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4429 priority >>= 3;
4431 else if (GET_CODE (src) == MEM
4432 && GET_CODE (XEXP (src, 0)) == LO_SUM
4433 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4434 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4435 priority >>= 1;
4437 else if (GET_CODE (dest) == MEM
4438 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4439 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4440 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4441 priority >>= 3;
4443 return priority;
4446 /* The 700 can only issue a single insn at a time.
4447 The 7XXX processors can issue two insns at a time.
4448 The 8000 can issue 4 insns at a time. */
4449 static int
4450 pa_issue_rate (void)
4452 switch (pa_cpu)
4454 case PROCESSOR_700: return 1;
4455 case PROCESSOR_7100: return 2;
4456 case PROCESSOR_7100LC: return 2;
4457 case PROCESSOR_7200: return 2;
4458 case PROCESSOR_7300: return 2;
4459 case PROCESSOR_8000: return 4;
4461 default:
4462 gcc_unreachable ();
4468 /* Return any length adjustment needed by INSN which already has its length
4469 computed as LENGTH. Return zero if no adjustment is necessary.
4471 For the PA: function calls, millicode calls, and backwards short
4472 conditional branches with unfilled delay slots need an adjustment by +1
4473 (to account for the NOP which will be inserted into the instruction stream).
4475 Also compute the length of an inline block move here as it is too
4476 complicated to express as a length attribute in pa.md. */
4478 pa_adjust_insn_length (rtx insn, int length)
4480 rtx pat = PATTERN (insn);
4482 /* Jumps inside switch tables which have unfilled delay slots need
4483 adjustment. */
4484 if (GET_CODE (insn) == JUMP_INSN
4485 && GET_CODE (pat) == PARALLEL
4486 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4487 return 4;
4488 /* Millicode insn with an unfilled delay slot. */
4489 else if (GET_CODE (insn) == INSN
4490 && GET_CODE (pat) != SEQUENCE
4491 && GET_CODE (pat) != USE
4492 && GET_CODE (pat) != CLOBBER
4493 && get_attr_type (insn) == TYPE_MILLI)
4494 return 4;
4495 /* Block move pattern. */
4496 else if (GET_CODE (insn) == INSN
4497 && GET_CODE (pat) == PARALLEL
4498 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4499 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4500 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4501 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4502 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4503 return compute_movmem_length (insn) - 4;
4504 /* Block clear pattern. */
4505 else if (GET_CODE (insn) == INSN
4506 && GET_CODE (pat) == PARALLEL
4507 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4508 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4509 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4510 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4511 return compute_clrmem_length (insn) - 4;
4512 /* Conditional branch with an unfilled delay slot. */
4513 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4515 /* Adjust a short backwards conditional with an unfilled delay slot. */
4516 if (GET_CODE (pat) == SET
4517 && length == 4
4518 && ! forward_branch_p (insn))
4519 return 4;
4520 else if (GET_CODE (pat) == PARALLEL
4521 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4522 && length == 4)
4523 return 4;
4524 /* Adjust dbra insn with short backwards conditional branch with
4525 unfilled delay slot -- only for case where counter is in a
4526 general register register. */
4527 else if (GET_CODE (pat) == PARALLEL
4528 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4529 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4530 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4531 && length == 4
4532 && ! forward_branch_p (insn))
4533 return 4;
4534 else
4535 return 0;
4537 return 0;
4540 /* Print operand X (an rtx) in assembler syntax to file FILE.
4541 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4542 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4544 void
4545 print_operand (FILE *file, rtx x, int code)
4547 switch (code)
4549 case '#':
4550 /* Output a 'nop' if there's nothing for the delay slot. */
4551 if (dbr_sequence_length () == 0)
4552 fputs ("\n\tnop", file);
4553 return;
4554 case '*':
4555 /* Output a nullification completer if there's nothing for the */
4556 /* delay slot or nullification is requested. */
4557 if (dbr_sequence_length () == 0 ||
4558 (final_sequence &&
4559 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4560 fputs (",n", file);
4561 return;
4562 case 'R':
4563 /* Print out the second register name of a register pair.
4564 I.e., R (6) => 7. */
4565 fputs (reg_names[REGNO (x) + 1], file);
4566 return;
4567 case 'r':
4568 /* A register or zero. */
4569 if (x == const0_rtx
4570 || (x == CONST0_RTX (DFmode))
4571 || (x == CONST0_RTX (SFmode)))
4573 fputs ("%r0", file);
4574 return;
4576 else
4577 break;
4578 case 'f':
4579 /* A register or zero (floating point). */
4580 if (x == const0_rtx
4581 || (x == CONST0_RTX (DFmode))
4582 || (x == CONST0_RTX (SFmode)))
4584 fputs ("%fr0", file);
4585 return;
4587 else
4588 break;
4589 case 'A':
4591 rtx xoperands[2];
4593 xoperands[0] = XEXP (XEXP (x, 0), 0);
4594 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4595 output_global_address (file, xoperands[1], 0);
4596 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4597 return;
4600 case 'C': /* Plain (C)ondition */
4601 case 'X':
4602 switch (GET_CODE (x))
4604 case EQ:
4605 fputs ("=", file); break;
4606 case NE:
4607 fputs ("<>", file); break;
4608 case GT:
4609 fputs (">", file); break;
4610 case GE:
4611 fputs (">=", file); break;
4612 case GEU:
4613 fputs (">>=", file); break;
4614 case GTU:
4615 fputs (">>", file); break;
4616 case LT:
4617 fputs ("<", file); break;
4618 case LE:
4619 fputs ("<=", file); break;
4620 case LEU:
4621 fputs ("<<=", file); break;
4622 case LTU:
4623 fputs ("<<", file); break;
4624 default:
4625 gcc_unreachable ();
4627 return;
4628 case 'N': /* Condition, (N)egated */
4629 switch (GET_CODE (x))
4631 case EQ:
4632 fputs ("<>", file); break;
4633 case NE:
4634 fputs ("=", file); break;
4635 case GT:
4636 fputs ("<=", file); break;
4637 case GE:
4638 fputs ("<", file); break;
4639 case GEU:
4640 fputs ("<<", file); break;
4641 case GTU:
4642 fputs ("<<=", file); break;
4643 case LT:
4644 fputs (">=", file); break;
4645 case LE:
4646 fputs (">", file); break;
4647 case LEU:
4648 fputs (">>", file); break;
4649 case LTU:
4650 fputs (">>=", file); break;
4651 default:
4652 gcc_unreachable ();
4654 return;
4655 /* For floating point comparisons. Note that the output
4656 predicates are the complement of the desired mode. The
4657 conditions for GT, GE, LT, LE and LTGT cause an invalid
4658 operation exception if the result is unordered and this
4659 exception is enabled in the floating-point status register. */
4660 case 'Y':
4661 switch (GET_CODE (x))
4663 case EQ:
4664 fputs ("!=", file); break;
4665 case NE:
4666 fputs ("=", file); break;
4667 case GT:
4668 fputs ("!>", file); break;
4669 case GE:
4670 fputs ("!>=", file); break;
4671 case LT:
4672 fputs ("!<", file); break;
4673 case LE:
4674 fputs ("!<=", file); break;
4675 case LTGT:
4676 fputs ("!<>", file); break;
4677 case UNLE:
4678 fputs ("!?<=", file); break;
4679 case UNLT:
4680 fputs ("!?<", file); break;
4681 case UNGE:
4682 fputs ("!?>=", file); break;
4683 case UNGT:
4684 fputs ("!?>", file); break;
4685 case UNEQ:
4686 fputs ("!?=", file); break;
4687 case UNORDERED:
4688 fputs ("!?", file); break;
4689 case ORDERED:
4690 fputs ("?", file); break;
4691 default:
4692 gcc_unreachable ();
4694 return;
4695 case 'S': /* Condition, operands are (S)wapped. */
4696 switch (GET_CODE (x))
4698 case EQ:
4699 fputs ("=", file); break;
4700 case NE:
4701 fputs ("<>", file); break;
4702 case GT:
4703 fputs ("<", file); break;
4704 case GE:
4705 fputs ("<=", file); break;
4706 case GEU:
4707 fputs ("<<=", file); break;
4708 case GTU:
4709 fputs ("<<", file); break;
4710 case LT:
4711 fputs (">", file); break;
4712 case LE:
4713 fputs (">=", file); break;
4714 case LEU:
4715 fputs (">>=", file); break;
4716 case LTU:
4717 fputs (">>", file); break;
4718 default:
4719 gcc_unreachable ();
4721 return;
4722 case 'B': /* Condition, (B)oth swapped and negate. */
4723 switch (GET_CODE (x))
4725 case EQ:
4726 fputs ("<>", file); break;
4727 case NE:
4728 fputs ("=", file); break;
4729 case GT:
4730 fputs (">=", file); break;
4731 case GE:
4732 fputs (">", file); break;
4733 case GEU:
4734 fputs (">>", file); break;
4735 case GTU:
4736 fputs (">>=", file); break;
4737 case LT:
4738 fputs ("<=", file); break;
4739 case LE:
4740 fputs ("<", file); break;
4741 case LEU:
4742 fputs ("<<", file); break;
4743 case LTU:
4744 fputs ("<<=", file); break;
4745 default:
4746 gcc_unreachable ();
4748 return;
4749 case 'k':
4750 gcc_assert (GET_CODE (x) == CONST_INT);
4751 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4752 return;
4753 case 'Q':
4754 gcc_assert (GET_CODE (x) == CONST_INT);
4755 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4756 return;
4757 case 'L':
4758 gcc_assert (GET_CODE (x) == CONST_INT);
4759 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4760 return;
4761 case 'O':
4762 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4763 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4764 return;
4765 case 'p':
4766 gcc_assert (GET_CODE (x) == CONST_INT);
4767 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4768 return;
4769 case 'P':
4770 gcc_assert (GET_CODE (x) == CONST_INT);
4771 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4772 return;
4773 case 'I':
4774 if (GET_CODE (x) == CONST_INT)
4775 fputs ("i", file);
4776 return;
4777 case 'M':
4778 case 'F':
4779 switch (GET_CODE (XEXP (x, 0)))
4781 case PRE_DEC:
4782 case PRE_INC:
4783 if (ASSEMBLER_DIALECT == 0)
4784 fputs ("s,mb", file);
4785 else
4786 fputs (",mb", file);
4787 break;
4788 case POST_DEC:
4789 case POST_INC:
4790 if (ASSEMBLER_DIALECT == 0)
4791 fputs ("s,ma", file);
4792 else
4793 fputs (",ma", file);
4794 break;
4795 case PLUS:
4796 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4797 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4799 if (ASSEMBLER_DIALECT == 0)
4800 fputs ("x", file);
4802 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4803 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4805 if (ASSEMBLER_DIALECT == 0)
4806 fputs ("x,s", file);
4807 else
4808 fputs (",s", file);
4810 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4811 fputs ("s", file);
4812 break;
4813 default:
4814 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4815 fputs ("s", file);
4816 break;
4818 return;
4819 case 'G':
4820 output_global_address (file, x, 0);
4821 return;
4822 case 'H':
4823 output_global_address (file, x, 1);
4824 return;
4825 case 0: /* Don't do anything special */
4826 break;
4827 case 'Z':
4829 unsigned op[3];
4830 compute_zdepwi_operands (INTVAL (x), op);
4831 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4832 return;
4834 case 'z':
4836 unsigned op[3];
4837 compute_zdepdi_operands (INTVAL (x), op);
4838 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4839 return;
4841 case 'c':
4842 /* We can get here from a .vtable_inherit due to our
4843 CONSTANT_ADDRESS_P rejecting perfectly good constant
4844 addresses. */
4845 break;
4846 default:
4847 gcc_unreachable ();
4849 if (GET_CODE (x) == REG)
4851 fputs (reg_names [REGNO (x)], file);
4852 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
4854 fputs ("R", file);
4855 return;
4857 if (FP_REG_P (x)
4858 && GET_MODE_SIZE (GET_MODE (x)) <= 4
4859 && (REGNO (x) & 1) == 0)
4860 fputs ("L", file);
4862 else if (GET_CODE (x) == MEM)
4864 int size = GET_MODE_SIZE (GET_MODE (x));
4865 rtx base = NULL_RTX;
4866 switch (GET_CODE (XEXP (x, 0)))
4868 case PRE_DEC:
4869 case POST_DEC:
4870 base = XEXP (XEXP (x, 0), 0);
4871 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
4872 break;
4873 case PRE_INC:
4874 case POST_INC:
4875 base = XEXP (XEXP (x, 0), 0);
4876 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
4877 break;
4878 case PLUS:
4879 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
4880 fprintf (file, "%s(%s)",
4881 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
4882 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
4883 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4884 fprintf (file, "%s(%s)",
4885 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
4886 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
4887 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4888 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4890 /* Because the REG_POINTER flag can get lost during reload,
4891 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
4892 index and base registers in the combined move patterns. */
4893 rtx base = XEXP (XEXP (x, 0), 1);
4894 rtx index = XEXP (XEXP (x, 0), 0);
4896 fprintf (file, "%s(%s)",
4897 reg_names [REGNO (index)], reg_names [REGNO (base)]);
4899 else
4900 output_address (XEXP (x, 0));
4901 break;
4902 default:
4903 output_address (XEXP (x, 0));
4904 break;
4907 else
4908 output_addr_const (file, x);
4911 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
4913 void
4914 output_global_address (FILE *file, rtx x, int round_constant)
4917 /* Imagine (high (const (plus ...))). */
4918 if (GET_CODE (x) == HIGH)
4919 x = XEXP (x, 0);
4921 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
4922 output_addr_const (file, x);
4923 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
4925 output_addr_const (file, x);
4926 fputs ("-$global$", file);
4928 else if (GET_CODE (x) == CONST)
4930 const char *sep = "";
4931 int offset = 0; /* assembler wants -$global$ at end */
4932 rtx base = NULL_RTX;
4934 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
4936 case SYMBOL_REF:
4937 base = XEXP (XEXP (x, 0), 0);
4938 output_addr_const (file, base);
4939 break;
4940 case CONST_INT:
4941 offset = INTVAL (XEXP (XEXP (x, 0), 0));
4942 break;
4943 default:
4944 gcc_unreachable ();
4947 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
4949 case SYMBOL_REF:
4950 base = XEXP (XEXP (x, 0), 1);
4951 output_addr_const (file, base);
4952 break;
4953 case CONST_INT:
4954 offset = INTVAL (XEXP (XEXP (x, 0), 1));
4955 break;
4956 default:
4957 gcc_unreachable ();
4960 /* How bogus. The compiler is apparently responsible for
4961 rounding the constant if it uses an LR field selector.
4963 The linker and/or assembler seem a better place since
4964 they have to do this kind of thing already.
4966 If we fail to do this, HP's optimizing linker may eliminate
4967 an addil, but not update the ldw/stw/ldo instruction that
4968 uses the result of the addil. */
4969 if (round_constant)
4970 offset = ((offset + 0x1000) & ~0x1fff);
4972 switch (GET_CODE (XEXP (x, 0)))
4974 case PLUS:
4975 if (offset < 0)
4977 offset = -offset;
4978 sep = "-";
4980 else
4981 sep = "+";
4982 break;
4984 case MINUS:
4985 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
4986 sep = "-";
4987 break;
4989 default:
4990 gcc_unreachable ();
4993 if (!read_only_operand (base, VOIDmode) && !flag_pic)
4994 fputs ("-$global$", file);
4995 if (offset)
4996 fprintf (file, "%s%d", sep, offset);
4998 else
4999 output_addr_const (file, x);
5002 /* Output boilerplate text to appear at the beginning of the file.
5003 There are several possible versions. */
5004 #define aputs(x) fputs(x, asm_out_file)
5005 static inline void
5006 pa_file_start_level (void)
5008 if (TARGET_64BIT)
5009 aputs ("\t.LEVEL 2.0w\n");
5010 else if (TARGET_PA_20)
5011 aputs ("\t.LEVEL 2.0\n");
5012 else if (TARGET_PA_11)
5013 aputs ("\t.LEVEL 1.1\n");
5014 else
5015 aputs ("\t.LEVEL 1.0\n");
5018 static inline void
5019 pa_file_start_space (int sortspace)
5021 aputs ("\t.SPACE $PRIVATE$");
5022 if (sortspace)
5023 aputs (",SORT=16");
5024 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5025 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5026 "\n\t.SPACE $TEXT$");
5027 if (sortspace)
5028 aputs (",SORT=8");
5029 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5030 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5033 static inline void
5034 pa_file_start_file (int want_version)
5036 if (write_symbols != NO_DEBUG)
5038 output_file_directive (asm_out_file, main_input_filename);
5039 if (want_version)
5040 aputs ("\t.version\t\"01.01\"\n");
5044 static inline void
5045 pa_file_start_mcount (const char *aswhat)
5047 if (profile_flag)
5048 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5051 static void
5052 pa_elf_file_start (void)
5054 pa_file_start_level ();
5055 pa_file_start_mcount ("ENTRY");
5056 pa_file_start_file (0);
5059 static void
5060 pa_som_file_start (void)
5062 pa_file_start_level ();
5063 pa_file_start_space (0);
5064 aputs ("\t.IMPORT $global$,DATA\n"
5065 "\t.IMPORT $$dyncall,MILLICODE\n");
5066 pa_file_start_mcount ("CODE");
5067 pa_file_start_file (0);
5070 static void
5071 pa_linux_file_start (void)
5073 pa_file_start_file (1);
5074 pa_file_start_level ();
5075 pa_file_start_mcount ("CODE");
5078 static void
5079 pa_hpux64_gas_file_start (void)
5081 pa_file_start_level ();
5082 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5083 if (profile_flag)
5084 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5085 #endif
5086 pa_file_start_file (1);
5089 static void
5090 pa_hpux64_hpas_file_start (void)
5092 pa_file_start_level ();
5093 pa_file_start_space (1);
5094 pa_file_start_mcount ("CODE");
5095 pa_file_start_file (0);
5097 #undef aputs
5099 static struct deferred_plabel *
5100 get_plabel (rtx symbol)
5102 const char *fname = XSTR (symbol, 0);
5103 size_t i;
5105 /* See if we have already put this function on the list of deferred
5106 plabels. This list is generally small, so a liner search is not
5107 too ugly. If it proves too slow replace it with something faster. */
5108 for (i = 0; i < n_deferred_plabels; i++)
5109 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5110 break;
5112 /* If the deferred plabel list is empty, or this entry was not found
5113 on the list, create a new entry on the list. */
5114 if (deferred_plabels == NULL || i == n_deferred_plabels)
5116 tree id;
5118 if (deferred_plabels == 0)
5119 deferred_plabels = (struct deferred_plabel *)
5120 ggc_alloc (sizeof (struct deferred_plabel));
5121 else
5122 deferred_plabels = (struct deferred_plabel *)
5123 ggc_realloc (deferred_plabels,
5124 ((n_deferred_plabels + 1)
5125 * sizeof (struct deferred_plabel)));
5127 i = n_deferred_plabels++;
5128 deferred_plabels[i].internal_label = gen_label_rtx ();
5129 deferred_plabels[i].symbol = symbol;
5131 /* Gross. We have just implicitly taken the address of this
5132 function. Mark it in the same manner as assemble_name. */
5133 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5134 if (id)
5135 mark_referenced (id);
5138 return &deferred_plabels[i];
5141 static void
5142 output_deferred_plabels (void)
5144 size_t i;
5145 /* If we have deferred plabels, then we need to switch into the data
5146 section and align it to a 4 byte boundary before we output the
5147 deferred plabels. */
5148 if (n_deferred_plabels)
5150 data_section ();
5151 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5154 /* Now output the deferred plabels. */
5155 for (i = 0; i < n_deferred_plabels; i++)
5157 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5158 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5159 assemble_integer (deferred_plabels[i].symbol,
5160 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5164 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5165 /* Initialize optabs to point to HPUX long double emulation routines. */
5166 static void
5167 pa_hpux_init_libfuncs (void)
5169 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5170 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5171 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5172 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5173 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5174 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5175 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5176 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5177 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5179 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5180 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5181 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5182 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5183 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5184 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5185 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5187 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5188 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5189 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5190 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5192 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5193 ? "__U_Qfcnvfxt_quad_to_sgl"
5194 : "_U_Qfcnvfxt_quad_to_sgl");
5195 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5196 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5197 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5199 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5200 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5202 #endif
5204 /* HP's millicode routines mean something special to the assembler.
5205 Keep track of which ones we have used. */
5207 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5208 static void import_milli (enum millicodes);
5209 static char imported[(int) end1000];
5210 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5211 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5212 #define MILLI_START 10
5214 static void
5215 import_milli (enum millicodes code)
5217 char str[sizeof (import_string)];
5219 if (!imported[(int) code])
5221 imported[(int) code] = 1;
5222 strcpy (str, import_string);
5223 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5224 output_asm_insn (str, 0);
5228 /* The register constraints have put the operands and return value in
5229 the proper registers. */
5231 const char *
5232 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5234 import_milli (mulI);
5235 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5238 /* Emit the rtl for doing a division by a constant. */
5240 /* Do magic division millicodes exist for this value? */
5241 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5243 /* We'll use an array to keep track of the magic millicodes and
5244 whether or not we've used them already. [n][0] is signed, [n][1] is
5245 unsigned. */
5247 static int div_milli[16][2];
5250 emit_hpdiv_const (rtx *operands, int unsignedp)
5252 if (GET_CODE (operands[2]) == CONST_INT
5253 && INTVAL (operands[2]) > 0
5254 && INTVAL (operands[2]) < 16
5255 && magic_milli[INTVAL (operands[2])])
5257 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5259 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5260 emit
5261 (gen_rtx_PARALLEL
5262 (VOIDmode,
5263 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5264 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5265 SImode,
5266 gen_rtx_REG (SImode, 26),
5267 operands[2])),
5268 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5269 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5270 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5271 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5272 gen_rtx_CLOBBER (VOIDmode, ret))));
5273 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5274 return 1;
5276 return 0;
5279 const char *
5280 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5282 int divisor;
5284 /* If the divisor is a constant, try to use one of the special
5285 opcodes .*/
5286 if (GET_CODE (operands[0]) == CONST_INT)
5288 static char buf[100];
5289 divisor = INTVAL (operands[0]);
5290 if (!div_milli[divisor][unsignedp])
5292 div_milli[divisor][unsignedp] = 1;
5293 if (unsignedp)
5294 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5295 else
5296 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5298 if (unsignedp)
5300 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5301 INTVAL (operands[0]));
5302 return output_millicode_call (insn,
5303 gen_rtx_SYMBOL_REF (SImode, buf));
5305 else
5307 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5308 INTVAL (operands[0]));
5309 return output_millicode_call (insn,
5310 gen_rtx_SYMBOL_REF (SImode, buf));
5313 /* Divisor isn't a special constant. */
5314 else
5316 if (unsignedp)
5318 import_milli (divU);
5319 return output_millicode_call (insn,
5320 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5322 else
5324 import_milli (divI);
5325 return output_millicode_call (insn,
5326 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5331 /* Output a $$rem millicode to do mod. */
5333 const char *
5334 output_mod_insn (int unsignedp, rtx insn)
5336 if (unsignedp)
5338 import_milli (remU);
5339 return output_millicode_call (insn,
5340 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5342 else
5344 import_milli (remI);
5345 return output_millicode_call (insn,
5346 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5350 void
5351 output_arg_descriptor (rtx call_insn)
5353 const char *arg_regs[4];
5354 enum machine_mode arg_mode;
5355 rtx link;
5356 int i, output_flag = 0;
5357 int regno;
5359 /* We neither need nor want argument location descriptors for the
5360 64bit runtime environment or the ELF32 environment. */
5361 if (TARGET_64BIT || TARGET_ELF32)
5362 return;
5364 for (i = 0; i < 4; i++)
5365 arg_regs[i] = 0;
5367 /* Specify explicitly that no argument relocations should take place
5368 if using the portable runtime calling conventions. */
5369 if (TARGET_PORTABLE_RUNTIME)
5371 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5372 asm_out_file);
5373 return;
5376 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5377 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5378 link; link = XEXP (link, 1))
5380 rtx use = XEXP (link, 0);
5382 if (! (GET_CODE (use) == USE
5383 && GET_CODE (XEXP (use, 0)) == REG
5384 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5385 continue;
5387 arg_mode = GET_MODE (XEXP (use, 0));
5388 regno = REGNO (XEXP (use, 0));
5389 if (regno >= 23 && regno <= 26)
5391 arg_regs[26 - regno] = "GR";
5392 if (arg_mode == DImode)
5393 arg_regs[25 - regno] = "GR";
5395 else if (regno >= 32 && regno <= 39)
5397 if (arg_mode == SFmode)
5398 arg_regs[(regno - 32) / 2] = "FR";
5399 else
5401 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5402 arg_regs[(regno - 34) / 2] = "FR";
5403 arg_regs[(regno - 34) / 2 + 1] = "FU";
5404 #else
5405 arg_regs[(regno - 34) / 2] = "FU";
5406 arg_regs[(regno - 34) / 2 + 1] = "FR";
5407 #endif
5411 fputs ("\t.CALL ", asm_out_file);
5412 for (i = 0; i < 4; i++)
5414 if (arg_regs[i])
5416 if (output_flag++)
5417 fputc (',', asm_out_file);
5418 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5421 fputc ('\n', asm_out_file);
5424 /* Return the class of any secondary reload register that is needed to
5425 move IN into a register in class CLASS using mode MODE.
5427 Profiling has showed this routine and its descendants account for
5428 a significant amount of compile time (~7%). So it has been
5429 optimized to reduce redundant computations and eliminate useless
5430 function calls.
5432 It might be worthwhile to try and make this a leaf function too. */
5434 enum reg_class
5435 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5437 int regno, is_symbolic;
5439 /* Trying to load a constant into a FP register during PIC code
5440 generation will require %r1 as a scratch register. */
5441 if (flag_pic
5442 && GET_MODE_CLASS (mode) == MODE_INT
5443 && FP_REG_CLASS_P (class)
5444 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5445 return R1_REGS;
5447 /* Profiling showed the PA port spends about 1.3% of its compilation
5448 time in true_regnum from calls inside secondary_reload_class. */
5450 if (GET_CODE (in) == REG)
5452 regno = REGNO (in);
5453 if (regno >= FIRST_PSEUDO_REGISTER)
5454 regno = true_regnum (in);
5456 else if (GET_CODE (in) == SUBREG)
5457 regno = true_regnum (in);
5458 else
5459 regno = -1;
5461 /* If we have something like (mem (mem (...)), we can safely assume the
5462 inner MEM will end up in a general register after reloading, so there's
5463 no need for a secondary reload. */
5464 if (GET_CODE (in) == MEM
5465 && GET_CODE (XEXP (in, 0)) == MEM)
5466 return NO_REGS;
5468 /* Handle out of range displacement for integer mode loads/stores of
5469 FP registers. */
5470 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5471 && GET_MODE_CLASS (mode) == MODE_INT
5472 && FP_REG_CLASS_P (class))
5473 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5474 return GENERAL_REGS;
5476 /* A SAR<->FP register copy requires a secondary register (GPR) as
5477 well as secondary memory. */
5478 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5479 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5480 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5481 return GENERAL_REGS;
5483 if (GET_CODE (in) == HIGH)
5484 in = XEXP (in, 0);
5486 /* Profiling has showed GCC spends about 2.6% of its compilation
5487 time in symbolic_operand from calls inside secondary_reload_class.
5489 We use an inline copy and only compute its return value once to avoid
5490 useless work. */
5491 switch (GET_CODE (in))
5493 rtx tmp;
5495 case SYMBOL_REF:
5496 case LABEL_REF:
5497 is_symbolic = 1;
5498 break;
5499 case CONST:
5500 tmp = XEXP (in, 0);
5501 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5502 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5503 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5504 break;
5506 default:
5507 is_symbolic = 0;
5508 break;
5511 if (!flag_pic
5512 && is_symbolic
5513 && read_only_operand (in, VOIDmode))
5514 return NO_REGS;
5516 if (class != R1_REGS && is_symbolic)
5517 return R1_REGS;
5519 return NO_REGS;
5522 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5523 by invisible reference. As a GCC extension, we also pass anything
5524 with a zero or variable size by reference.
5526 The 64-bit runtime does not describe passing any types by invisible
5527 reference. The internals of GCC can't currently handle passing
5528 empty structures, and zero or variable length arrays when they are
5529 not passed entirely on the stack or by reference. Thus, as a GCC
5530 extension, we pass these types by reference. The HP compiler doesn't
5531 support these types, so hopefully there shouldn't be any compatibility
5532 issues. This may have to be revisited when HP releases a C99 compiler
5533 or updates the ABI. */
5535 static bool
5536 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5537 enum machine_mode mode, tree type,
5538 bool named ATTRIBUTE_UNUSED)
5540 HOST_WIDE_INT size;
5542 if (type)
5543 size = int_size_in_bytes (type);
5544 else
5545 size = GET_MODE_SIZE (mode);
5547 if (TARGET_64BIT)
5548 return size <= 0;
5549 else
5550 return size <= 0 || size > 8;
5553 enum direction
5554 function_arg_padding (enum machine_mode mode, tree type)
5556 if (mode == BLKmode
5557 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5559 /* Return none if justification is not required. */
5560 if (type
5561 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5562 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5563 return none;
5565 /* The directions set here are ignored when a BLKmode argument larger
5566 than a word is placed in a register. Different code is used for
5567 the stack and registers. This makes it difficult to have a
5568 consistent data representation for both the stack and registers.
5569 For both runtimes, the justification and padding for arguments on
5570 the stack and in registers should be identical. */
5571 if (TARGET_64BIT)
5572 /* The 64-bit runtime specifies left justification for aggregates. */
5573 return upward;
5574 else
5575 /* The 32-bit runtime architecture specifies right justification.
5576 When the argument is passed on the stack, the argument is padded
5577 with garbage on the left. The HP compiler pads with zeros. */
5578 return downward;
5581 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5582 return downward;
5583 else
5584 return none;
5588 /* Do what is necessary for `va_start'. We look at the current function
5589 to determine if stdargs or varargs is used and fill in an initial
5590 va_list. A pointer to this constructor is returned. */
5592 static rtx
5593 hppa_builtin_saveregs (void)
5595 rtx offset, dest;
5596 tree fntype = TREE_TYPE (current_function_decl);
5597 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5598 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5599 != void_type_node)))
5600 ? UNITS_PER_WORD : 0);
5602 if (argadj)
5603 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5604 else
5605 offset = current_function_arg_offset_rtx;
5607 if (TARGET_64BIT)
5609 int i, off;
5611 /* Adjust for varargs/stdarg differences. */
5612 if (argadj)
5613 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5614 else
5615 offset = current_function_arg_offset_rtx;
5617 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5618 from the incoming arg pointer and growing to larger addresses. */
5619 for (i = 26, off = -64; i >= 19; i--, off += 8)
5620 emit_move_insn (gen_rtx_MEM (word_mode,
5621 plus_constant (arg_pointer_rtx, off)),
5622 gen_rtx_REG (word_mode, i));
5624 /* The incoming args pointer points just beyond the flushback area;
5625 normally this is not a serious concern. However, when we are doing
5626 varargs/stdargs we want to make the arg pointer point to the start
5627 of the incoming argument area. */
5628 emit_move_insn (virtual_incoming_args_rtx,
5629 plus_constant (arg_pointer_rtx, -64));
5631 /* Now return a pointer to the first anonymous argument. */
5632 return copy_to_reg (expand_binop (Pmode, add_optab,
5633 virtual_incoming_args_rtx,
5634 offset, 0, 0, OPTAB_LIB_WIDEN));
5637 /* Store general registers on the stack. */
5638 dest = gen_rtx_MEM (BLKmode,
5639 plus_constant (current_function_internal_arg_pointer,
5640 -16));
5641 set_mem_alias_set (dest, get_varargs_alias_set ());
5642 set_mem_align (dest, BITS_PER_WORD);
5643 move_block_from_reg (23, dest, 4);
5645 /* move_block_from_reg will emit code to store the argument registers
5646 individually as scalar stores.
5648 However, other insns may later load from the same addresses for
5649 a structure load (passing a struct to a varargs routine).
5651 The alias code assumes that such aliasing can never happen, so we
5652 have to keep memory referencing insns from moving up beyond the
5653 last argument register store. So we emit a blockage insn here. */
5654 emit_insn (gen_blockage ());
5656 return copy_to_reg (expand_binop (Pmode, add_optab,
5657 current_function_internal_arg_pointer,
5658 offset, 0, 0, OPTAB_LIB_WIDEN));
5661 void
5662 hppa_va_start (tree valist, rtx nextarg)
5664 nextarg = expand_builtin_saveregs ();
5665 std_expand_builtin_va_start (valist, nextarg);
5668 static tree
5669 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5671 if (TARGET_64BIT)
5673 /* Args grow upward. We can use the generic routines. */
5674 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5676 else /* !TARGET_64BIT */
5678 tree ptr = build_pointer_type (type);
5679 tree valist_type;
5680 tree t, u;
5681 unsigned int size, ofs;
5682 bool indirect;
5684 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5685 if (indirect)
5687 type = ptr;
5688 ptr = build_pointer_type (type);
5690 size = int_size_in_bytes (type);
5691 valist_type = TREE_TYPE (valist);
5693 /* Args grow down. Not handled by generic routines. */
5695 u = fold_convert (valist_type, size_in_bytes (type));
5696 t = build (MINUS_EXPR, valist_type, valist, u);
5698 /* Copied from va-pa.h, but we probably don't need to align to
5699 word size, since we generate and preserve that invariant. */
5700 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5701 t = build (BIT_AND_EXPR, valist_type, t, u);
5703 t = build (MODIFY_EXPR, valist_type, valist, t);
5705 ofs = (8 - size) % 4;
5706 if (ofs != 0)
5708 u = fold_convert (valist_type, size_int (ofs));
5709 t = build (PLUS_EXPR, valist_type, t, u);
5712 t = fold_convert (ptr, t);
5713 t = build_fold_indirect_ref (t);
5715 if (indirect)
5716 t = build_fold_indirect_ref (t);
5718 return t;
5722 /* True if MODE is valid for the target. By "valid", we mean able to
5723 be manipulated in non-trivial ways. In particular, this means all
5724 the arithmetic is supported.
5726 Currently, TImode is not valid as the HP 64-bit runtime documentation
5727 doesn't document the alignment and calling conventions for this type.
5728 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5729 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5731 static bool
5732 pa_scalar_mode_supported_p (enum machine_mode mode)
5734 int precision = GET_MODE_PRECISION (mode);
5736 switch (GET_MODE_CLASS (mode))
5738 case MODE_PARTIAL_INT:
5739 case MODE_INT:
5740 if (precision == CHAR_TYPE_SIZE)
5741 return true;
5742 if (precision == SHORT_TYPE_SIZE)
5743 return true;
5744 if (precision == INT_TYPE_SIZE)
5745 return true;
5746 if (precision == LONG_TYPE_SIZE)
5747 return true;
5748 if (precision == LONG_LONG_TYPE_SIZE)
5749 return true;
5750 return false;
5752 case MODE_FLOAT:
5753 if (precision == FLOAT_TYPE_SIZE)
5754 return true;
5755 if (precision == DOUBLE_TYPE_SIZE)
5756 return true;
5757 if (precision == LONG_DOUBLE_TYPE_SIZE)
5758 return true;
5759 return false;
5761 default:
5762 gcc_unreachable ();
5766 /* This routine handles all the normal conditional branch sequences we
5767 might need to generate. It handles compare immediate vs compare
5768 register, nullification of delay slots, varying length branches,
5769 negated branches, and all combinations of the above. It returns the
5770 output appropriate to emit the branch corresponding to all given
5771 parameters. */
5773 const char *
5774 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
5776 static char buf[100];
5777 int useskip = 0;
5778 rtx xoperands[5];
5780 /* A conditional branch to the following instruction (e.g. the delay slot)
5781 is asking for a disaster. This can happen when not optimizing and
5782 when jump optimization fails.
5784 While it is usually safe to emit nothing, this can fail if the
5785 preceding instruction is a nullified branch with an empty delay
5786 slot and the same branch target as this branch. We could check
5787 for this but jump optimization should eliminate nop jumps. It
5788 is always safe to emit a nop. */
5789 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5790 return "nop";
5792 /* The doubleword form of the cmpib instruction doesn't have the LEU
5793 and GTU conditions while the cmpb instruction does. Since we accept
5794 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5795 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5796 operands[2] = gen_rtx_REG (DImode, 0);
5798 /* If this is a long branch with its delay slot unfilled, set `nullify'
5799 as it can nullify the delay slot and save a nop. */
5800 if (length == 8 && dbr_sequence_length () == 0)
5801 nullify = 1;
5803 /* If this is a short forward conditional branch which did not get
5804 its delay slot filled, the delay slot can still be nullified. */
5805 if (! nullify && length == 4 && dbr_sequence_length () == 0)
5806 nullify = forward_branch_p (insn);
5808 /* A forward branch over a single nullified insn can be done with a
5809 comclr instruction. This avoids a single cycle penalty due to
5810 mis-predicted branch if we fall through (branch not taken). */
5811 if (length == 4
5812 && next_real_insn (insn) != 0
5813 && get_attr_length (next_real_insn (insn)) == 4
5814 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
5815 && nullify)
5816 useskip = 1;
5818 switch (length)
5820 /* All short conditional branches except backwards with an unfilled
5821 delay slot. */
5822 case 4:
5823 if (useskip)
5824 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
5825 else
5826 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5827 if (GET_MODE (operands[1]) == DImode)
5828 strcat (buf, "*");
5829 if (negated)
5830 strcat (buf, "%B3");
5831 else
5832 strcat (buf, "%S3");
5833 if (useskip)
5834 strcat (buf, " %2,%r1,%%r0");
5835 else if (nullify)
5836 strcat (buf, ",n %2,%r1,%0");
5837 else
5838 strcat (buf, " %2,%r1,%0");
5839 break;
5841 /* All long conditionals. Note a short backward branch with an
5842 unfilled delay slot is treated just like a long backward branch
5843 with an unfilled delay slot. */
5844 case 8:
5845 /* Handle weird backwards branch with a filled delay slot
5846 with is nullified. */
5847 if (dbr_sequence_length () != 0
5848 && ! forward_branch_p (insn)
5849 && nullify)
5851 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5852 if (GET_MODE (operands[1]) == DImode)
5853 strcat (buf, "*");
5854 if (negated)
5855 strcat (buf, "%S3");
5856 else
5857 strcat (buf, "%B3");
5858 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
5860 /* Handle short backwards branch with an unfilled delay slot.
5861 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
5862 taken and untaken branches. */
5863 else if (dbr_sequence_length () == 0
5864 && ! forward_branch_p (insn)
5865 && INSN_ADDRESSES_SET_P ()
5866 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
5867 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
5869 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5870 if (GET_MODE (operands[1]) == DImode)
5871 strcat (buf, "*");
5872 if (negated)
5873 strcat (buf, "%B3 %2,%r1,%0%#");
5874 else
5875 strcat (buf, "%S3 %2,%r1,%0%#");
5877 else
5879 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
5880 if (GET_MODE (operands[1]) == DImode)
5881 strcat (buf, "*");
5882 if (negated)
5883 strcat (buf, "%S3");
5884 else
5885 strcat (buf, "%B3");
5886 if (nullify)
5887 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
5888 else
5889 strcat (buf, " %2,%r1,%%r0\n\tb %0");
5891 break;
5893 case 20:
5894 case 28:
5895 xoperands[0] = operands[0];
5896 xoperands[1] = operands[1];
5897 xoperands[2] = operands[2];
5898 xoperands[3] = operands[3];
5900 /* The reversed conditional branch must branch over one additional
5901 instruction if the delay slot is filled. If the delay slot
5902 is empty, the instruction after the reversed condition branch
5903 must be nullified. */
5904 nullify = dbr_sequence_length () == 0;
5905 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
5907 /* Create a reversed conditional branch which branches around
5908 the following insns. */
5909 if (GET_MODE (operands[1]) != DImode)
5911 if (nullify)
5913 if (negated)
5914 strcpy (buf,
5915 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
5916 else
5917 strcpy (buf,
5918 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
5920 else
5922 if (negated)
5923 strcpy (buf,
5924 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
5925 else
5926 strcpy (buf,
5927 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
5930 else
5932 if (nullify)
5934 if (negated)
5935 strcpy (buf,
5936 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
5937 else
5938 strcpy (buf,
5939 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
5941 else
5943 if (negated)
5944 strcpy (buf,
5945 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
5946 else
5947 strcpy (buf,
5948 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
5952 output_asm_insn (buf, xoperands);
5953 return output_lbranch (operands[0], insn);
5955 default:
5956 gcc_unreachable ();
5958 return buf;
5961 /* This routine handles long unconditional branches that exceed the
5962 maximum range of a simple branch instruction. */
5964 const char *
5965 output_lbranch (rtx dest, rtx insn)
5967 rtx xoperands[2];
5969 xoperands[0] = dest;
5971 /* First, free up the delay slot. */
5972 if (dbr_sequence_length () != 0)
5974 /* We can't handle a jump in the delay slot. */
5975 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
5977 final_scan_insn (NEXT_INSN (insn), asm_out_file,
5978 optimize, 0, NULL);
5980 /* Now delete the delay insn. */
5981 PUT_CODE (NEXT_INSN (insn), NOTE);
5982 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
5983 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
5986 /* Output an insn to save %r1. The runtime documentation doesn't
5987 specify whether the "Clean Up" slot in the callers frame can
5988 be clobbered by the callee. It isn't copied by HP's builtin
5989 alloca, so this suggests that it can be clobbered if necessary.
5990 The "Static Link" location is copied by HP builtin alloca, so
5991 we avoid using it. Using the cleanup slot might be a problem
5992 if we have to interoperate with languages that pass cleanup
5993 information. However, it should be possible to handle these
5994 situations with GCC's asm feature.
5996 The "Current RP" slot is reserved for the called procedure, so
5997 we try to use it when we don't have a frame of our own. It's
5998 rather unlikely that we won't have a frame when we need to emit
5999 a very long branch.
6001 Really the way to go long term is a register scavenger; goto
6002 the target of the jump and find a register which we can use
6003 as a scratch to hold the value in %r1. Then, we wouldn't have
6004 to free up the delay slot or clobber a slot that may be needed
6005 for other purposes. */
6006 if (TARGET_64BIT)
6008 if (actual_fsize == 0 && !regs_ever_live[2])
6009 /* Use the return pointer slot in the frame marker. */
6010 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6011 else
6012 /* Use the slot at -40 in the frame marker since HP builtin
6013 alloca doesn't copy it. */
6014 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6016 else
6018 if (actual_fsize == 0 && !regs_ever_live[2])
6019 /* Use the return pointer slot in the frame marker. */
6020 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6021 else
6022 /* Use the "Clean Up" slot in the frame marker. In GCC,
6023 the only other use of this location is for copying a
6024 floating point double argument from a floating-point
6025 register to two general registers. The copy is done
6026 as an "atomic" operation when outputting a call, so it
6027 won't interfere with our using the location here. */
6028 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6031 if (TARGET_PORTABLE_RUNTIME)
6033 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6034 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6035 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6037 else if (flag_pic)
6039 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6040 if (TARGET_SOM || !TARGET_GAS)
6042 xoperands[1] = gen_label_rtx ();
6043 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6044 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6045 CODE_LABEL_NUMBER (xoperands[1]));
6046 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6048 else
6050 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6051 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6053 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6055 else
6056 /* Now output a very long branch to the original target. */
6057 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6059 /* Now restore the value of %r1 in the delay slot. */
6060 if (TARGET_64BIT)
6062 if (actual_fsize == 0 && !regs_ever_live[2])
6063 return "ldd -16(%%r30),%%r1";
6064 else
6065 return "ldd -40(%%r30),%%r1";
6067 else
6069 if (actual_fsize == 0 && !regs_ever_live[2])
6070 return "ldw -20(%%r30),%%r1";
6071 else
6072 return "ldw -12(%%r30),%%r1";
6076 /* This routine handles all the branch-on-bit conditional branch sequences we
6077 might need to generate. It handles nullification of delay slots,
6078 varying length branches, negated branches and all combinations of the
6079 above. it returns the appropriate output template to emit the branch. */
6081 const char *
6082 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6083 int negated, rtx insn, int which)
6085 static char buf[100];
6086 int useskip = 0;
6088 /* A conditional branch to the following instruction (e.g. the delay slot) is
6089 asking for a disaster. I do not think this can happen as this pattern
6090 is only used when optimizing; jump optimization should eliminate the
6091 jump. But be prepared just in case. */
6093 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6094 return "nop";
6096 /* If this is a long branch with its delay slot unfilled, set `nullify'
6097 as it can nullify the delay slot and save a nop. */
6098 if (length == 8 && dbr_sequence_length () == 0)
6099 nullify = 1;
6101 /* If this is a short forward conditional branch which did not get
6102 its delay slot filled, the delay slot can still be nullified. */
6103 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6104 nullify = forward_branch_p (insn);
6106 /* A forward branch over a single nullified insn can be done with a
6107 extrs instruction. This avoids a single cycle penalty due to
6108 mis-predicted branch if we fall through (branch not taken). */
6110 if (length == 4
6111 && next_real_insn (insn) != 0
6112 && get_attr_length (next_real_insn (insn)) == 4
6113 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6114 && nullify)
6115 useskip = 1;
6117 switch (length)
6120 /* All short conditional branches except backwards with an unfilled
6121 delay slot. */
6122 case 4:
6123 if (useskip)
6124 strcpy (buf, "{extrs,|extrw,s,}");
6125 else
6126 strcpy (buf, "bb,");
6127 if (useskip && GET_MODE (operands[0]) == DImode)
6128 strcpy (buf, "extrd,s,*");
6129 else if (GET_MODE (operands[0]) == DImode)
6130 strcpy (buf, "bb,*");
6131 if ((which == 0 && negated)
6132 || (which == 1 && ! negated))
6133 strcat (buf, ">=");
6134 else
6135 strcat (buf, "<");
6136 if (useskip)
6137 strcat (buf, " %0,%1,1,%%r0");
6138 else if (nullify && negated)
6139 strcat (buf, ",n %0,%1,%3");
6140 else if (nullify && ! negated)
6141 strcat (buf, ",n %0,%1,%2");
6142 else if (! nullify && negated)
6143 strcat (buf, "%0,%1,%3");
6144 else if (! nullify && ! negated)
6145 strcat (buf, " %0,%1,%2");
6146 break;
6148 /* All long conditionals. Note a short backward branch with an
6149 unfilled delay slot is treated just like a long backward branch
6150 with an unfilled delay slot. */
6151 case 8:
6152 /* Handle weird backwards branch with a filled delay slot
6153 with is nullified. */
6154 if (dbr_sequence_length () != 0
6155 && ! forward_branch_p (insn)
6156 && nullify)
6158 strcpy (buf, "bb,");
6159 if (GET_MODE (operands[0]) == DImode)
6160 strcat (buf, "*");
6161 if ((which == 0 && negated)
6162 || (which == 1 && ! negated))
6163 strcat (buf, "<");
6164 else
6165 strcat (buf, ">=");
6166 if (negated)
6167 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6168 else
6169 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6171 /* Handle short backwards branch with an unfilled delay slot.
6172 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6173 taken and untaken branches. */
6174 else if (dbr_sequence_length () == 0
6175 && ! forward_branch_p (insn)
6176 && INSN_ADDRESSES_SET_P ()
6177 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6178 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6180 strcpy (buf, "bb,");
6181 if (GET_MODE (operands[0]) == DImode)
6182 strcat (buf, "*");
6183 if ((which == 0 && negated)
6184 || (which == 1 && ! negated))
6185 strcat (buf, ">=");
6186 else
6187 strcat (buf, "<");
6188 if (negated)
6189 strcat (buf, " %0,%1,%3%#");
6190 else
6191 strcat (buf, " %0,%1,%2%#");
6193 else
6195 strcpy (buf, "{extrs,|extrw,s,}");
6196 if (GET_MODE (operands[0]) == DImode)
6197 strcpy (buf, "extrd,s,*");
6198 if ((which == 0 && negated)
6199 || (which == 1 && ! negated))
6200 strcat (buf, "<");
6201 else
6202 strcat (buf, ">=");
6203 if (nullify && negated)
6204 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6205 else if (nullify && ! negated)
6206 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6207 else if (negated)
6208 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6209 else
6210 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6212 break;
6214 default:
6215 gcc_unreachable ();
6217 return buf;
6220 /* This routine handles all the branch-on-variable-bit conditional branch
6221 sequences we might need to generate. It handles nullification of delay
6222 slots, varying length branches, negated branches and all combinations
6223 of the above. it returns the appropriate output template to emit the
6224 branch. */
6226 const char *
6227 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6228 int negated, rtx insn, int which)
6230 static char buf[100];
6231 int useskip = 0;
6233 /* A conditional branch to the following instruction (e.g. the delay slot) is
6234 asking for a disaster. I do not think this can happen as this pattern
6235 is only used when optimizing; jump optimization should eliminate the
6236 jump. But be prepared just in case. */
6238 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6239 return "nop";
6241 /* If this is a long branch with its delay slot unfilled, set `nullify'
6242 as it can nullify the delay slot and save a nop. */
6243 if (length == 8 && dbr_sequence_length () == 0)
6244 nullify = 1;
6246 /* If this is a short forward conditional branch which did not get
6247 its delay slot filled, the delay slot can still be nullified. */
6248 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6249 nullify = forward_branch_p (insn);
6251 /* A forward branch over a single nullified insn can be done with a
6252 extrs instruction. This avoids a single cycle penalty due to
6253 mis-predicted branch if we fall through (branch not taken). */
6255 if (length == 4
6256 && next_real_insn (insn) != 0
6257 && get_attr_length (next_real_insn (insn)) == 4
6258 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6259 && nullify)
6260 useskip = 1;
6262 switch (length)
6265 /* All short conditional branches except backwards with an unfilled
6266 delay slot. */
6267 case 4:
6268 if (useskip)
6269 strcpy (buf, "{vextrs,|extrw,s,}");
6270 else
6271 strcpy (buf, "{bvb,|bb,}");
6272 if (useskip && GET_MODE (operands[0]) == DImode)
6273 strcpy (buf, "extrd,s,*");
6274 else if (GET_MODE (operands[0]) == DImode)
6275 strcpy (buf, "bb,*");
6276 if ((which == 0 && negated)
6277 || (which == 1 && ! negated))
6278 strcat (buf, ">=");
6279 else
6280 strcat (buf, "<");
6281 if (useskip)
6282 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6283 else if (nullify && negated)
6284 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6285 else if (nullify && ! negated)
6286 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6287 else if (! nullify && negated)
6288 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6289 else if (! nullify && ! negated)
6290 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6291 break;
6293 /* All long conditionals. Note a short backward branch with an
6294 unfilled delay slot is treated just like a long backward branch
6295 with an unfilled delay slot. */
6296 case 8:
6297 /* Handle weird backwards branch with a filled delay slot
6298 with is nullified. */
6299 if (dbr_sequence_length () != 0
6300 && ! forward_branch_p (insn)
6301 && nullify)
6303 strcpy (buf, "{bvb,|bb,}");
6304 if (GET_MODE (operands[0]) == DImode)
6305 strcat (buf, "*");
6306 if ((which == 0 && negated)
6307 || (which == 1 && ! negated))
6308 strcat (buf, "<");
6309 else
6310 strcat (buf, ">=");
6311 if (negated)
6312 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6313 else
6314 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6316 /* Handle short backwards branch with an unfilled delay slot.
6317 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6318 taken and untaken branches. */
6319 else if (dbr_sequence_length () == 0
6320 && ! forward_branch_p (insn)
6321 && INSN_ADDRESSES_SET_P ()
6322 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6323 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6325 strcpy (buf, "{bvb,|bb,}");
6326 if (GET_MODE (operands[0]) == DImode)
6327 strcat (buf, "*");
6328 if ((which == 0 && negated)
6329 || (which == 1 && ! negated))
6330 strcat (buf, ">=");
6331 else
6332 strcat (buf, "<");
6333 if (negated)
6334 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6335 else
6336 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6338 else
6340 strcpy (buf, "{vextrs,|extrw,s,}");
6341 if (GET_MODE (operands[0]) == DImode)
6342 strcpy (buf, "extrd,s,*");
6343 if ((which == 0 && negated)
6344 || (which == 1 && ! negated))
6345 strcat (buf, "<");
6346 else
6347 strcat (buf, ">=");
6348 if (nullify && negated)
6349 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6350 else if (nullify && ! negated)
6351 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6352 else if (negated)
6353 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6354 else
6355 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6357 break;
6359 default:
6360 gcc_unreachable ();
6362 return buf;
6365 /* Return the output template for emitting a dbra type insn.
6367 Note it may perform some output operations on its own before
6368 returning the final output string. */
6369 const char *
6370 output_dbra (rtx *operands, rtx insn, int which_alternative)
6373 /* A conditional branch to the following instruction (e.g. the delay slot) is
6374 asking for a disaster. Be prepared! */
6376 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6378 if (which_alternative == 0)
6379 return "ldo %1(%0),%0";
6380 else if (which_alternative == 1)
6382 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6383 output_asm_insn ("ldw -16(%%r30),%4", operands);
6384 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6385 return "{fldws|fldw} -16(%%r30),%0";
6387 else
6389 output_asm_insn ("ldw %0,%4", operands);
6390 return "ldo %1(%4),%4\n\tstw %4,%0";
6394 if (which_alternative == 0)
6396 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6397 int length = get_attr_length (insn);
6399 /* If this is a long branch with its delay slot unfilled, set `nullify'
6400 as it can nullify the delay slot and save a nop. */
6401 if (length == 8 && dbr_sequence_length () == 0)
6402 nullify = 1;
6404 /* If this is a short forward conditional branch which did not get
6405 its delay slot filled, the delay slot can still be nullified. */
6406 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6407 nullify = forward_branch_p (insn);
6409 switch (length)
6411 case 4:
6412 if (nullify)
6413 return "addib,%C2,n %1,%0,%3";
6414 else
6415 return "addib,%C2 %1,%0,%3";
6417 case 8:
6418 /* Handle weird backwards branch with a fulled delay slot
6419 which is nullified. */
6420 if (dbr_sequence_length () != 0
6421 && ! forward_branch_p (insn)
6422 && nullify)
6423 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6424 /* Handle short backwards branch with an unfilled delay slot.
6425 Using a addb;nop rather than addi;bl saves 1 cycle for both
6426 taken and untaken branches. */
6427 else if (dbr_sequence_length () == 0
6428 && ! forward_branch_p (insn)
6429 && INSN_ADDRESSES_SET_P ()
6430 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6431 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6432 return "addib,%C2 %1,%0,%3%#";
6434 /* Handle normal cases. */
6435 if (nullify)
6436 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6437 else
6438 return "addi,%N2 %1,%0,%0\n\tb %3";
6440 default:
6441 gcc_unreachable ();
6445 /* Deal with gross reload from FP register case. */
6446 else if (which_alternative == 1)
6448 /* Move loop counter from FP register to MEM then into a GR,
6449 increment the GR, store the GR into MEM, and finally reload
6450 the FP register from MEM from within the branch's delay slot. */
6451 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6452 operands);
6453 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6454 if (get_attr_length (insn) == 24)
6455 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6456 else
6457 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6459 /* Deal with gross reload from memory case. */
6460 else
6462 /* Reload loop counter from memory, the store back to memory
6463 happens in the branch's delay slot. */
6464 output_asm_insn ("ldw %0,%4", operands);
6465 if (get_attr_length (insn) == 12)
6466 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6467 else
6468 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6472 /* Return the output template for emitting a dbra type insn.
6474 Note it may perform some output operations on its own before
6475 returning the final output string. */
6476 const char *
6477 output_movb (rtx *operands, rtx insn, int which_alternative,
6478 int reverse_comparison)
6481 /* A conditional branch to the following instruction (e.g. the delay slot) is
6482 asking for a disaster. Be prepared! */
6484 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6486 if (which_alternative == 0)
6487 return "copy %1,%0";
6488 else if (which_alternative == 1)
6490 output_asm_insn ("stw %1,-16(%%r30)", operands);
6491 return "{fldws|fldw} -16(%%r30),%0";
6493 else if (which_alternative == 2)
6494 return "stw %1,%0";
6495 else
6496 return "mtsar %r1";
6499 /* Support the second variant. */
6500 if (reverse_comparison)
6501 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6503 if (which_alternative == 0)
6505 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6506 int length = get_attr_length (insn);
6508 /* If this is a long branch with its delay slot unfilled, set `nullify'
6509 as it can nullify the delay slot and save a nop. */
6510 if (length == 8 && dbr_sequence_length () == 0)
6511 nullify = 1;
6513 /* If this is a short forward conditional branch which did not get
6514 its delay slot filled, the delay slot can still be nullified. */
6515 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6516 nullify = forward_branch_p (insn);
6518 switch (length)
6520 case 4:
6521 if (nullify)
6522 return "movb,%C2,n %1,%0,%3";
6523 else
6524 return "movb,%C2 %1,%0,%3";
6526 case 8:
6527 /* Handle weird backwards branch with a filled delay slot
6528 which is nullified. */
6529 if (dbr_sequence_length () != 0
6530 && ! forward_branch_p (insn)
6531 && nullify)
6532 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6534 /* Handle short backwards branch with an unfilled delay slot.
6535 Using a movb;nop rather than or;bl saves 1 cycle for both
6536 taken and untaken branches. */
6537 else if (dbr_sequence_length () == 0
6538 && ! forward_branch_p (insn)
6539 && INSN_ADDRESSES_SET_P ()
6540 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6541 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6542 return "movb,%C2 %1,%0,%3%#";
6543 /* Handle normal cases. */
6544 if (nullify)
6545 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6546 else
6547 return "or,%N2 %1,%%r0,%0\n\tb %3";
6549 default:
6550 gcc_unreachable ();
6553 /* Deal with gross reload from FP register case. */
6554 else if (which_alternative == 1)
6556 /* Move loop counter from FP register to MEM then into a GR,
6557 increment the GR, store the GR into MEM, and finally reload
6558 the FP register from MEM from within the branch's delay slot. */
6559 output_asm_insn ("stw %1,-16(%%r30)", operands);
6560 if (get_attr_length (insn) == 12)
6561 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6562 else
6563 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6565 /* Deal with gross reload from memory case. */
6566 else if (which_alternative == 2)
6568 /* Reload loop counter from memory, the store back to memory
6569 happens in the branch's delay slot. */
6570 if (get_attr_length (insn) == 8)
6571 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6572 else
6573 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6575 /* Handle SAR as a destination. */
6576 else
6578 if (get_attr_length (insn) == 8)
6579 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6580 else
6581 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6585 /* Copy any FP arguments in INSN into integer registers. */
6586 static void
6587 copy_fp_args (rtx insn)
6589 rtx link;
6590 rtx xoperands[2];
6592 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6594 int arg_mode, regno;
6595 rtx use = XEXP (link, 0);
6597 if (! (GET_CODE (use) == USE
6598 && GET_CODE (XEXP (use, 0)) == REG
6599 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6600 continue;
6602 arg_mode = GET_MODE (XEXP (use, 0));
6603 regno = REGNO (XEXP (use, 0));
6605 /* Is it a floating point register? */
6606 if (regno >= 32 && regno <= 39)
6608 /* Copy the FP register into an integer register via memory. */
6609 if (arg_mode == SFmode)
6611 xoperands[0] = XEXP (use, 0);
6612 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6613 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6614 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6616 else
6618 xoperands[0] = XEXP (use, 0);
6619 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6620 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6621 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6622 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6628 /* Compute length of the FP argument copy sequence for INSN. */
6629 static int
6630 length_fp_args (rtx insn)
6632 int length = 0;
6633 rtx link;
6635 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6637 int arg_mode, regno;
6638 rtx use = XEXP (link, 0);
6640 if (! (GET_CODE (use) == USE
6641 && GET_CODE (XEXP (use, 0)) == REG
6642 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6643 continue;
6645 arg_mode = GET_MODE (XEXP (use, 0));
6646 regno = REGNO (XEXP (use, 0));
6648 /* Is it a floating point register? */
6649 if (regno >= 32 && regno <= 39)
6651 if (arg_mode == SFmode)
6652 length += 8;
6653 else
6654 length += 12;
6658 return length;
6661 /* Return the attribute length for the millicode call instruction INSN.
6662 The length must match the code generated by output_millicode_call.
6663 We include the delay slot in the returned length as it is better to
6664 over estimate the length than to under estimate it. */
6667 attr_length_millicode_call (rtx insn)
6669 unsigned long distance = -1;
6670 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6672 if (INSN_ADDRESSES_SET_P ())
6674 distance = (total + insn_current_reference_address (insn));
6675 if (distance < total)
6676 distance = -1;
6679 if (TARGET_64BIT)
6681 if (!TARGET_LONG_CALLS && distance < 7600000)
6682 return 8;
6684 return 20;
6686 else if (TARGET_PORTABLE_RUNTIME)
6687 return 24;
6688 else
6690 if (!TARGET_LONG_CALLS && distance < 240000)
6691 return 8;
6693 if (TARGET_LONG_ABS_CALL && !flag_pic)
6694 return 12;
6696 return 24;
6700 /* INSN is a function call. It may have an unconditional jump
6701 in its delay slot.
6703 CALL_DEST is the routine we are calling. */
6705 const char *
6706 output_millicode_call (rtx insn, rtx call_dest)
6708 int attr_length = get_attr_length (insn);
6709 int seq_length = dbr_sequence_length ();
6710 int distance;
6711 rtx seq_insn;
6712 rtx xoperands[3];
6714 xoperands[0] = call_dest;
6715 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6717 /* Handle the common case where we are sure that the branch will
6718 reach the beginning of the $CODE$ subspace. The within reach
6719 form of the $$sh_func_adrs call has a length of 28. Because
6720 it has an attribute type of multi, it never has a nonzero
6721 sequence length. The length of the $$sh_func_adrs is the same
6722 as certain out of reach PIC calls to other routines. */
6723 if (!TARGET_LONG_CALLS
6724 && ((seq_length == 0
6725 && (attr_length == 12
6726 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6727 || (seq_length != 0 && attr_length == 8)))
6729 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6731 else
6733 if (TARGET_64BIT)
6735 /* It might seem that one insn could be saved by accessing
6736 the millicode function using the linkage table. However,
6737 this doesn't work in shared libraries and other dynamically
6738 loaded objects. Using a pc-relative sequence also avoids
6739 problems related to the implicit use of the gp register. */
6740 output_asm_insn ("b,l .+8,%%r1", xoperands);
6742 if (TARGET_GAS)
6744 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6745 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6747 else
6749 xoperands[1] = gen_label_rtx ();
6750 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6751 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6752 CODE_LABEL_NUMBER (xoperands[1]));
6753 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6756 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
6758 else if (TARGET_PORTABLE_RUNTIME)
6760 /* Pure portable runtime doesn't allow be/ble; we also don't
6761 have PIC support in the assembler/linker, so this sequence
6762 is needed. */
6764 /* Get the address of our target into %r1. */
6765 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6766 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6768 /* Get our return address into %r31. */
6769 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
6770 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
6772 /* Jump to our target address in %r1. */
6773 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6775 else if (!flag_pic)
6777 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6778 if (TARGET_PA_20)
6779 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
6780 else
6781 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
6783 else
6785 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6786 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
6788 if (TARGET_SOM || !TARGET_GAS)
6790 /* The HP assembler can generate relocations for the
6791 difference of two symbols. GAS can do this for a
6792 millicode symbol but not an arbitrary external
6793 symbol when generating SOM output. */
6794 xoperands[1] = gen_label_rtx ();
6795 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6796 CODE_LABEL_NUMBER (xoperands[1]));
6797 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6798 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6800 else
6802 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
6803 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
6804 xoperands);
6807 /* Jump to our target address in %r1. */
6808 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6812 if (seq_length == 0)
6813 output_asm_insn ("nop", xoperands);
6815 /* We are done if there isn't a jump in the delay slot. */
6816 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
6817 return "";
6819 /* This call has an unconditional jump in its delay slot. */
6820 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
6822 /* See if the return address can be adjusted. Use the containing
6823 sequence insn's address. */
6824 if (INSN_ADDRESSES_SET_P ())
6826 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
6827 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
6828 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
6830 if (VAL_14_BITS_P (distance))
6832 xoperands[1] = gen_label_rtx ();
6833 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
6834 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6835 CODE_LABEL_NUMBER (xoperands[1]));
6837 else
6838 /* ??? This branch may not reach its target. */
6839 output_asm_insn ("nop\n\tb,n %0", xoperands);
6841 else
6842 /* ??? This branch may not reach its target. */
6843 output_asm_insn ("nop\n\tb,n %0", xoperands);
6845 /* Delete the jump. */
6846 PUT_CODE (NEXT_INSN (insn), NOTE);
6847 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6848 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6850 return "";
6853 /* Return the attribute length of the call instruction INSN. The SIBCALL
6854 flag indicates whether INSN is a regular call or a sibling call. The
6855 length returned must be longer than the code actually generated by
6856 output_call. Since branch shortening is done before delay branch
6857 sequencing, there is no way to determine whether or not the delay
6858 slot will be filled during branch shortening. Even when the delay
6859 slot is filled, we may have to add a nop if the delay slot contains
6860 a branch that can't reach its target. Thus, we always have to include
6861 the delay slot in the length estimate. This used to be done in
6862 pa_adjust_insn_length but we do it here now as some sequences always
6863 fill the delay slot and we can save four bytes in the estimate for
6864 these sequences. */
6867 attr_length_call (rtx insn, int sibcall)
6869 int local_call;
6870 rtx call_dest;
6871 tree call_decl;
6872 int length = 0;
6873 rtx pat = PATTERN (insn);
6874 unsigned long distance = -1;
6876 if (INSN_ADDRESSES_SET_P ())
6878 unsigned long total;
6880 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6881 distance = (total + insn_current_reference_address (insn));
6882 if (distance < total)
6883 distance = -1;
6886 /* Determine if this is a local call. */
6887 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
6888 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
6889 else
6890 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
6892 call_decl = SYMBOL_REF_DECL (call_dest);
6893 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
6895 /* pc-relative branch. */
6896 if (!TARGET_LONG_CALLS
6897 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
6898 || distance < 240000))
6899 length += 8;
6901 /* 64-bit plabel sequence. */
6902 else if (TARGET_64BIT && !local_call)
6903 length += sibcall ? 28 : 24;
6905 /* non-pic long absolute branch sequence. */
6906 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
6907 length += 12;
6909 /* long pc-relative branch sequence. */
6910 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
6911 || (TARGET_64BIT && !TARGET_GAS)
6912 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
6914 length += 20;
6916 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
6917 length += 8;
6920 /* 32-bit plabel sequence. */
6921 else
6923 length += 32;
6925 if (TARGET_SOM)
6926 length += length_fp_args (insn);
6928 if (flag_pic)
6929 length += 4;
6931 if (!TARGET_PA_20)
6933 if (!sibcall)
6934 length += 8;
6936 if (!TARGET_NO_SPACE_REGS)
6937 length += 8;
6941 return length;
6944 /* INSN is a function call. It may have an unconditional jump
6945 in its delay slot.
6947 CALL_DEST is the routine we are calling. */
6949 const char *
6950 output_call (rtx insn, rtx call_dest, int sibcall)
6952 int delay_insn_deleted = 0;
6953 int delay_slot_filled = 0;
6954 int seq_length = dbr_sequence_length ();
6955 tree call_decl = SYMBOL_REF_DECL (call_dest);
6956 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
6957 rtx xoperands[2];
6959 xoperands[0] = call_dest;
6961 /* Handle the common case where we're sure that the branch will reach
6962 the beginning of the "$CODE$" subspace. This is the beginning of
6963 the current function if we are in a named section. */
6964 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
6966 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
6967 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
6969 else
6971 if (TARGET_64BIT && !local_call)
6973 /* ??? As far as I can tell, the HP linker doesn't support the
6974 long pc-relative sequence described in the 64-bit runtime
6975 architecture. So, we use a slightly longer indirect call. */
6976 struct deferred_plabel *p = get_plabel (call_dest);
6978 xoperands[0] = p->internal_label;
6979 xoperands[1] = gen_label_rtx ();
6981 /* If this isn't a sibcall, we put the load of %r27 into the
6982 delay slot. We can't do this in a sibcall as we don't
6983 have a second call-clobbered scratch register available. */
6984 if (seq_length != 0
6985 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
6986 && !sibcall)
6988 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6989 optimize, 0, NULL);
6991 /* Now delete the delay insn. */
6992 PUT_CODE (NEXT_INSN (insn), NOTE);
6993 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6994 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6995 delay_insn_deleted = 1;
6998 output_asm_insn ("addil LT'%0,%%r27", xoperands);
6999 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7000 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7002 if (sibcall)
7004 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7005 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7006 output_asm_insn ("bve (%%r1)", xoperands);
7008 else
7010 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7011 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7012 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7013 delay_slot_filled = 1;
7016 else
7018 int indirect_call = 0;
7020 /* Emit a long call. There are several different sequences
7021 of increasing length and complexity. In most cases,
7022 they don't allow an instruction in the delay slot. */
7023 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7024 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7025 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7026 && !TARGET_64BIT)
7027 indirect_call = 1;
7029 if (seq_length != 0
7030 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7031 && !sibcall
7032 && (!TARGET_PA_20 || indirect_call))
7034 /* A non-jump insn in the delay slot. By definition we can
7035 emit this insn before the call (and in fact before argument
7036 relocating. */
7037 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7038 NULL);
7040 /* Now delete the delay insn. */
7041 PUT_CODE (NEXT_INSN (insn), NOTE);
7042 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7043 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7044 delay_insn_deleted = 1;
7047 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7049 /* This is the best sequence for making long calls in
7050 non-pic code. Unfortunately, GNU ld doesn't provide
7051 the stub needed for external calls, and GAS's support
7052 for this with the SOM linker is buggy. It is safe
7053 to use this for local calls. */
7054 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7055 if (sibcall)
7056 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7057 else
7059 if (TARGET_PA_20)
7060 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7061 xoperands);
7062 else
7063 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7065 output_asm_insn ("copy %%r31,%%r2", xoperands);
7066 delay_slot_filled = 1;
7069 else
7071 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7072 || (TARGET_64BIT && !TARGET_GAS))
7074 /* The HP assembler and linker can handle relocations
7075 for the difference of two symbols. GAS and the HP
7076 linker can't do this when one of the symbols is
7077 external. */
7078 xoperands[1] = gen_label_rtx ();
7079 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7080 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7081 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7082 CODE_LABEL_NUMBER (xoperands[1]));
7083 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7085 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7087 /* GAS currently can't generate the relocations that
7088 are needed for the SOM linker under HP-UX using this
7089 sequence. The GNU linker doesn't generate the stubs
7090 that are needed for external calls on TARGET_ELF32
7091 with this sequence. For now, we have to use a
7092 longer plabel sequence when using GAS. */
7093 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7094 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7095 xoperands);
7096 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7097 xoperands);
7099 else
7101 /* Emit a long plabel-based call sequence. This is
7102 essentially an inline implementation of $$dyncall.
7103 We don't actually try to call $$dyncall as this is
7104 as difficult as calling the function itself. */
7105 struct deferred_plabel *p = get_plabel (call_dest);
7107 xoperands[0] = p->internal_label;
7108 xoperands[1] = gen_label_rtx ();
7110 /* Since the call is indirect, FP arguments in registers
7111 need to be copied to the general registers. Then, the
7112 argument relocation stub will copy them back. */
7113 if (TARGET_SOM)
7114 copy_fp_args (insn);
7116 if (flag_pic)
7118 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7119 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7120 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7122 else
7124 output_asm_insn ("addil LR'%0-$global$,%%r27",
7125 xoperands);
7126 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7127 xoperands);
7130 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7131 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7132 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7133 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7135 if (!sibcall && !TARGET_PA_20)
7137 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7138 if (TARGET_NO_SPACE_REGS)
7139 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7140 else
7141 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7145 if (TARGET_PA_20)
7147 if (sibcall)
7148 output_asm_insn ("bve (%%r1)", xoperands);
7149 else
7151 if (indirect_call)
7153 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7154 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7155 delay_slot_filled = 1;
7157 else
7158 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7161 else
7163 if (!TARGET_NO_SPACE_REGS)
7164 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7165 xoperands);
7167 if (sibcall)
7169 if (TARGET_NO_SPACE_REGS)
7170 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7171 else
7172 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7174 else
7176 if (TARGET_NO_SPACE_REGS)
7177 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7178 else
7179 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7181 if (indirect_call)
7182 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7183 else
7184 output_asm_insn ("copy %%r31,%%r2", xoperands);
7185 delay_slot_filled = 1;
7192 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7193 output_asm_insn ("nop", xoperands);
7195 /* We are done if there isn't a jump in the delay slot. */
7196 if (seq_length == 0
7197 || delay_insn_deleted
7198 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7199 return "";
7201 /* A sibcall should never have a branch in the delay slot. */
7202 gcc_assert (!sibcall);
7204 /* This call has an unconditional jump in its delay slot. */
7205 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7207 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7209 /* See if the return address can be adjusted. Use the containing
7210 sequence insn's address. */
7211 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7212 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7213 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7215 if (VAL_14_BITS_P (distance))
7217 xoperands[1] = gen_label_rtx ();
7218 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7219 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7220 CODE_LABEL_NUMBER (xoperands[1]));
7222 else
7223 output_asm_insn ("nop\n\tb,n %0", xoperands);
7225 else
7226 output_asm_insn ("b,n %0", xoperands);
7228 /* Delete the jump. */
7229 PUT_CODE (NEXT_INSN (insn), NOTE);
7230 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7231 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7233 return "";
7236 /* Return the attribute length of the indirect call instruction INSN.
7237 The length must match the code generated by output_indirect call.
7238 The returned length includes the delay slot. Currently, the delay
7239 slot of an indirect call sequence is not exposed and it is used by
7240 the sequence itself. */
7243 attr_length_indirect_call (rtx insn)
7245 unsigned long distance = -1;
7246 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7248 if (INSN_ADDRESSES_SET_P ())
7250 distance = (total + insn_current_reference_address (insn));
7251 if (distance < total)
7252 distance = -1;
7255 if (TARGET_64BIT)
7256 return 12;
7258 if (TARGET_FAST_INDIRECT_CALLS
7259 || (!TARGET_PORTABLE_RUNTIME
7260 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7261 return 8;
7263 if (flag_pic)
7264 return 24;
7266 if (TARGET_PORTABLE_RUNTIME)
7267 return 20;
7269 /* Out of reach, can use ble. */
7270 return 12;
7273 const char *
7274 output_indirect_call (rtx insn, rtx call_dest)
7276 rtx xoperands[1];
7278 if (TARGET_64BIT)
7280 xoperands[0] = call_dest;
7281 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7282 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7283 return "";
7286 /* First the special case for kernels, level 0 systems, etc. */
7287 if (TARGET_FAST_INDIRECT_CALLS)
7288 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7290 /* Now the normal case -- we can reach $$dyncall directly or
7291 we're sure that we can get there via a long-branch stub.
7293 No need to check target flags as the length uniquely identifies
7294 the remaining cases. */
7295 if (attr_length_indirect_call (insn) == 8)
7297 /* The HP linker substitutes a BLE for millicode calls using
7298 the short PIC PCREL form. Thus, we must use %r31 as the
7299 link register when generating PA 1.x code. */
7300 if (TARGET_PA_20)
7301 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7302 else
7303 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7306 /* Long millicode call, but we are not generating PIC or portable runtime
7307 code. */
7308 if (attr_length_indirect_call (insn) == 12)
7309 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7311 /* Long millicode call for portable runtime. */
7312 if (attr_length_indirect_call (insn) == 20)
7313 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7315 /* We need a long PIC call to $$dyncall. */
7316 xoperands[0] = NULL_RTX;
7317 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7318 if (TARGET_SOM || !TARGET_GAS)
7320 xoperands[0] = gen_label_rtx ();
7321 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7322 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7323 CODE_LABEL_NUMBER (xoperands[0]));
7324 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7326 else
7328 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7329 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7330 xoperands);
7332 output_asm_insn ("blr %%r0,%%r2", xoperands);
7333 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7334 return "";
7337 /* Return the total length of the save and restore instructions needed for
7338 the data linkage table pointer (i.e., the PIC register) across the call
7339 instruction INSN. No-return calls do not require a save and restore.
7340 In addition, we may be able to avoid the save and restore for calls
7341 within the same translation unit. */
7344 attr_length_save_restore_dltp (rtx insn)
7346 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7347 return 0;
7349 return 8;
7352 /* In HPUX 8.0's shared library scheme, special relocations are needed
7353 for function labels if they might be passed to a function
7354 in a shared library (because shared libraries don't live in code
7355 space), and special magic is needed to construct their address. */
7357 void
7358 hppa_encode_label (rtx sym)
7360 const char *str = XSTR (sym, 0);
7361 int len = strlen (str) + 1;
7362 char *newstr, *p;
7364 p = newstr = alloca (len + 1);
7365 *p++ = '@';
7366 strcpy (p, str);
7368 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7371 static void
7372 pa_encode_section_info (tree decl, rtx rtl, int first)
7374 if (first && TEXT_SPACE_P (decl))
7376 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7377 if (TREE_CODE (decl) == FUNCTION_DECL)
7378 hppa_encode_label (XEXP (rtl, 0));
7382 /* This is sort of inverse to pa_encode_section_info. */
7384 static const char *
7385 pa_strip_name_encoding (const char *str)
7387 str += (*str == '@');
7388 str += (*str == '*');
7389 return str;
7393 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7395 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7398 /* Returns 1 if OP is a function label involved in a simple addition
7399 with a constant. Used to keep certain patterns from matching
7400 during instruction combination. */
7402 is_function_label_plus_const (rtx op)
7404 /* Strip off any CONST. */
7405 if (GET_CODE (op) == CONST)
7406 op = XEXP (op, 0);
7408 return (GET_CODE (op) == PLUS
7409 && function_label_operand (XEXP (op, 0), Pmode)
7410 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7413 /* Output assembly code for a thunk to FUNCTION. */
7415 static void
7416 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7417 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7418 tree function)
7420 static unsigned int current_thunk_number;
7421 int val_14 = VAL_14_BITS_P (delta);
7422 int nbytes = 0;
7423 char label[16];
7424 rtx xoperands[4];
7426 xoperands[0] = XEXP (DECL_RTL (function), 0);
7427 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7428 xoperands[2] = GEN_INT (delta);
7430 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7431 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7433 /* Output the thunk. We know that the function is in the same
7434 translation unit (i.e., the same space) as the thunk, and that
7435 thunks are output after their method. Thus, we don't need an
7436 external branch to reach the function. With SOM and GAS,
7437 functions and thunks are effectively in different sections.
7438 Thus, we can always use a IA-relative branch and the linker
7439 will add a long branch stub if necessary.
7441 However, we have to be careful when generating PIC code on the
7442 SOM port to ensure that the sequence does not transfer to an
7443 import stub for the target function as this could clobber the
7444 return value saved at SP-24. This would also apply to the
7445 32-bit linux port if the multi-space model is implemented. */
7446 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7447 && !(flag_pic && TREE_PUBLIC (function))
7448 && (TARGET_GAS || last_address < 262132))
7449 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7450 && ((targetm.have_named_sections
7451 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7452 /* The GNU 64-bit linker has rather poor stub management.
7453 So, we use a long branch from thunks that aren't in
7454 the same section as the target function. */
7455 && ((!TARGET_64BIT
7456 && (DECL_SECTION_NAME (thunk_fndecl)
7457 != DECL_SECTION_NAME (function)))
7458 || ((DECL_SECTION_NAME (thunk_fndecl)
7459 == DECL_SECTION_NAME (function))
7460 && last_address < 262132)))
7461 || (!targetm.have_named_sections && last_address < 262132))))
7463 if (!val_14)
7464 output_asm_insn ("addil L'%2,%%r26", xoperands);
7466 output_asm_insn ("b %0", xoperands);
7468 if (val_14)
7470 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7471 nbytes += 8;
7473 else
7475 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7476 nbytes += 12;
7479 else if (TARGET_64BIT)
7481 /* We only have one call-clobbered scratch register, so we can't
7482 make use of the delay slot if delta doesn't fit in 14 bits. */
7483 if (!val_14)
7485 output_asm_insn ("addil L'%2,%%r26", xoperands);
7486 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7489 output_asm_insn ("b,l .+8,%%r1", xoperands);
7491 if (TARGET_GAS)
7493 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7494 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7496 else
7498 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7499 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7502 if (val_14)
7504 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7505 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7506 nbytes += 20;
7508 else
7510 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7511 nbytes += 24;
7514 else if (TARGET_PORTABLE_RUNTIME)
7516 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7517 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7519 if (!val_14)
7520 output_asm_insn ("addil L'%2,%%r26", xoperands);
7522 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7524 if (val_14)
7526 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7527 nbytes += 16;
7529 else
7531 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7532 nbytes += 20;
7535 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7537 /* The function is accessible from outside this module. The only
7538 way to avoid an import stub between the thunk and function is to
7539 call the function directly with an indirect sequence similar to
7540 that used by $$dyncall. This is possible because $$dyncall acts
7541 as the import stub in an indirect call. */
7542 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7543 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7544 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7545 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7546 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7547 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7548 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7549 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7550 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7552 if (!val_14)
7554 output_asm_insn ("addil L'%2,%%r26", xoperands);
7555 nbytes += 4;
7558 if (TARGET_PA_20)
7560 output_asm_insn ("bve (%%r22)", xoperands);
7561 nbytes += 36;
7563 else if (TARGET_NO_SPACE_REGS)
7565 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7566 nbytes += 36;
7568 else
7570 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7571 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7572 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7573 nbytes += 44;
7576 if (val_14)
7577 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7578 else
7579 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7581 else if (flag_pic)
7583 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7585 if (TARGET_SOM || !TARGET_GAS)
7587 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7588 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7590 else
7592 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7593 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7596 if (!val_14)
7597 output_asm_insn ("addil L'%2,%%r26", xoperands);
7599 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7601 if (val_14)
7603 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7604 nbytes += 20;
7606 else
7608 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7609 nbytes += 24;
7612 else
7614 if (!val_14)
7615 output_asm_insn ("addil L'%2,%%r26", xoperands);
7617 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7618 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7620 if (val_14)
7622 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7623 nbytes += 12;
7625 else
7627 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7628 nbytes += 16;
7632 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7634 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7636 data_section ();
7637 output_asm_insn (".align 4", xoperands);
7638 ASM_OUTPUT_LABEL (file, label);
7639 output_asm_insn (".word P'%0", xoperands);
7641 else if (TARGET_SOM && TARGET_GAS)
7642 forget_section ();
7644 current_thunk_number++;
7645 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7646 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7647 last_address += nbytes;
7648 update_total_code_bytes (nbytes);
7651 /* Only direct calls to static functions are allowed to be sibling (tail)
7652 call optimized.
7654 This restriction is necessary because some linker generated stubs will
7655 store return pointers into rp' in some cases which might clobber a
7656 live value already in rp'.
7658 In a sibcall the current function and the target function share stack
7659 space. Thus if the path to the current function and the path to the
7660 target function save a value in rp', they save the value into the
7661 same stack slot, which has undesirable consequences.
7663 Because of the deferred binding nature of shared libraries any function
7664 with external scope could be in a different load module and thus require
7665 rp' to be saved when calling that function. So sibcall optimizations
7666 can only be safe for static function.
7668 Note that GCC never needs return value relocations, so we don't have to
7669 worry about static calls with return value relocations (which require
7670 saving rp').
7672 It is safe to perform a sibcall optimization when the target function
7673 will never return. */
7674 static bool
7675 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7677 if (TARGET_PORTABLE_RUNTIME)
7678 return false;
7680 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7681 single subspace mode and the call is not indirect. As far as I know,
7682 there is no operating system support for the multiple subspace mode.
7683 It might be possible to support indirect calls if we didn't use
7684 $$dyncall (see the indirect sequence generated in output_call). */
7685 if (TARGET_ELF32)
7686 return (decl != NULL_TREE);
7688 /* Sibcalls are not ok because the arg pointer register is not a fixed
7689 register. This prevents the sibcall optimization from occurring. In
7690 addition, there are problems with stub placement using GNU ld. This
7691 is because a normal sibcall branch uses a 17-bit relocation while
7692 a regular call branch uses a 22-bit relocation. As a result, more
7693 care needs to be taken in the placement of long-branch stubs. */
7694 if (TARGET_64BIT)
7695 return false;
7697 /* Sibcalls are only ok within a translation unit. */
7698 return (decl && !TREE_PUBLIC (decl));
7701 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7702 use in fmpyadd instructions. */
7704 fmpyaddoperands (rtx *operands)
7706 enum machine_mode mode = GET_MODE (operands[0]);
7708 /* Must be a floating point mode. */
7709 if (mode != SFmode && mode != DFmode)
7710 return 0;
7712 /* All modes must be the same. */
7713 if (! (mode == GET_MODE (operands[1])
7714 && mode == GET_MODE (operands[2])
7715 && mode == GET_MODE (operands[3])
7716 && mode == GET_MODE (operands[4])
7717 && mode == GET_MODE (operands[5])))
7718 return 0;
7720 /* All operands must be registers. */
7721 if (! (GET_CODE (operands[1]) == REG
7722 && GET_CODE (operands[2]) == REG
7723 && GET_CODE (operands[3]) == REG
7724 && GET_CODE (operands[4]) == REG
7725 && GET_CODE (operands[5]) == REG))
7726 return 0;
7728 /* Only 2 real operands to the addition. One of the input operands must
7729 be the same as the output operand. */
7730 if (! rtx_equal_p (operands[3], operands[4])
7731 && ! rtx_equal_p (operands[3], operands[5]))
7732 return 0;
7734 /* Inout operand of add cannot conflict with any operands from multiply. */
7735 if (rtx_equal_p (operands[3], operands[0])
7736 || rtx_equal_p (operands[3], operands[1])
7737 || rtx_equal_p (operands[3], operands[2]))
7738 return 0;
7740 /* multiply cannot feed into addition operands. */
7741 if (rtx_equal_p (operands[4], operands[0])
7742 || rtx_equal_p (operands[5], operands[0]))
7743 return 0;
7745 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7746 if (mode == SFmode
7747 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7748 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7749 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7750 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7751 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7752 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7753 return 0;
7755 /* Passed. Operands are suitable for fmpyadd. */
7756 return 1;
7759 #if !defined(USE_COLLECT2)
7760 static void
7761 pa_asm_out_constructor (rtx symbol, int priority)
7763 if (!function_label_operand (symbol, VOIDmode))
7764 hppa_encode_label (symbol);
7766 #ifdef CTORS_SECTION_ASM_OP
7767 default_ctor_section_asm_out_constructor (symbol, priority);
7768 #else
7769 # ifdef TARGET_ASM_NAMED_SECTION
7770 default_named_section_asm_out_constructor (symbol, priority);
7771 # else
7772 default_stabs_asm_out_constructor (symbol, priority);
7773 # endif
7774 #endif
7777 static void
7778 pa_asm_out_destructor (rtx symbol, int priority)
7780 if (!function_label_operand (symbol, VOIDmode))
7781 hppa_encode_label (symbol);
7783 #ifdef DTORS_SECTION_ASM_OP
7784 default_dtor_section_asm_out_destructor (symbol, priority);
7785 #else
7786 # ifdef TARGET_ASM_NAMED_SECTION
7787 default_named_section_asm_out_destructor (symbol, priority);
7788 # else
7789 default_stabs_asm_out_destructor (symbol, priority);
7790 # endif
7791 #endif
7793 #endif
7795 /* This function places uninitialized global data in the bss section.
7796 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
7797 function on the SOM port to prevent uninitialized global data from
7798 being placed in the data section. */
7800 void
7801 pa_asm_output_aligned_bss (FILE *stream,
7802 const char *name,
7803 unsigned HOST_WIDE_INT size,
7804 unsigned int align)
7806 bss_section ();
7807 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7809 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7810 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7811 #endif
7813 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7814 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7815 #endif
7817 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7818 ASM_OUTPUT_LABEL (stream, name);
7819 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
7822 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
7823 that doesn't allow the alignment of global common storage to be directly
7824 specified. The SOM linker aligns common storage based on the rounded
7825 value of the NUM_BYTES parameter in the .comm directive. It's not
7826 possible to use the .align directive as it doesn't affect the alignment
7827 of the label associated with a .comm directive. */
7829 void
7830 pa_asm_output_aligned_common (FILE *stream,
7831 const char *name,
7832 unsigned HOST_WIDE_INT size,
7833 unsigned int align)
7835 unsigned int max_common_align;
7837 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
7838 if (align > max_common_align)
7840 warning (0, "alignment (%u) for %s exceeds maximum alignment "
7841 "for global common data. Using %u",
7842 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
7843 align = max_common_align;
7846 bss_section ();
7848 assemble_name (stream, name);
7849 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
7850 MAX (size, align / BITS_PER_UNIT));
7853 /* We can't use .comm for local common storage as the SOM linker effectively
7854 treats the symbol as universal and uses the same storage for local symbols
7855 with the same name in different object files. The .block directive
7856 reserves an uninitialized block of storage. However, it's not common
7857 storage. Fortunately, GCC never requests common storage with the same
7858 name in any given translation unit. */
7860 void
7861 pa_asm_output_aligned_local (FILE *stream,
7862 const char *name,
7863 unsigned HOST_WIDE_INT size,
7864 unsigned int align)
7866 bss_section ();
7867 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7869 #ifdef LOCAL_ASM_OP
7870 fprintf (stream, "%s", LOCAL_ASM_OP);
7871 assemble_name (stream, name);
7872 fprintf (stream, "\n");
7873 #endif
7875 ASM_OUTPUT_LABEL (stream, name);
7876 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
7879 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7880 use in fmpysub instructions. */
7882 fmpysuboperands (rtx *operands)
7884 enum machine_mode mode = GET_MODE (operands[0]);
7886 /* Must be a floating point mode. */
7887 if (mode != SFmode && mode != DFmode)
7888 return 0;
7890 /* All modes must be the same. */
7891 if (! (mode == GET_MODE (operands[1])
7892 && mode == GET_MODE (operands[2])
7893 && mode == GET_MODE (operands[3])
7894 && mode == GET_MODE (operands[4])
7895 && mode == GET_MODE (operands[5])))
7896 return 0;
7898 /* All operands must be registers. */
7899 if (! (GET_CODE (operands[1]) == REG
7900 && GET_CODE (operands[2]) == REG
7901 && GET_CODE (operands[3]) == REG
7902 && GET_CODE (operands[4]) == REG
7903 && GET_CODE (operands[5]) == REG))
7904 return 0;
7906 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
7907 operation, so operands[4] must be the same as operand[3]. */
7908 if (! rtx_equal_p (operands[3], operands[4]))
7909 return 0;
7911 /* multiply cannot feed into subtraction. */
7912 if (rtx_equal_p (operands[5], operands[0]))
7913 return 0;
7915 /* Inout operand of sub cannot conflict with any operands from multiply. */
7916 if (rtx_equal_p (operands[3], operands[0])
7917 || rtx_equal_p (operands[3], operands[1])
7918 || rtx_equal_p (operands[3], operands[2]))
7919 return 0;
7921 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7922 if (mode == SFmode
7923 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7924 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7925 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7926 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7927 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7928 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7929 return 0;
7931 /* Passed. Operands are suitable for fmpysub. */
7932 return 1;
7935 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
7936 constants for shadd instructions. */
7938 shadd_constant_p (int val)
7940 if (val == 2 || val == 4 || val == 8)
7941 return 1;
7942 else
7943 return 0;
7946 /* Return 1 if OP is valid as a base or index register in a
7947 REG+REG address. */
7950 borx_reg_operand (rtx op, enum machine_mode mode)
7952 if (GET_CODE (op) != REG)
7953 return 0;
7955 /* We must reject virtual registers as the only expressions that
7956 can be instantiated are REG and REG+CONST. */
7957 if (op == virtual_incoming_args_rtx
7958 || op == virtual_stack_vars_rtx
7959 || op == virtual_stack_dynamic_rtx
7960 || op == virtual_outgoing_args_rtx
7961 || op == virtual_cfa_rtx)
7962 return 0;
7964 /* While it's always safe to index off the frame pointer, it's not
7965 profitable to do so when the frame pointer is being eliminated. */
7966 if (!reload_completed
7967 && flag_omit_frame_pointer
7968 && !current_function_calls_alloca
7969 && op == frame_pointer_rtx)
7970 return 0;
7972 return register_operand (op, mode);
7975 /* Return 1 if this operand is anything other than a hard register. */
7978 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7980 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
7983 /* Return 1 if INSN branches forward. Should be using insn_addresses
7984 to avoid walking through all the insns... */
7985 static int
7986 forward_branch_p (rtx insn)
7988 rtx label = JUMP_LABEL (insn);
7990 while (insn)
7992 if (insn == label)
7993 break;
7994 else
7995 insn = NEXT_INSN (insn);
7998 return (insn == label);
8001 /* Return 1 if OP is an equality comparison, else return 0. */
8003 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8005 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8008 /* Return 1 if INSN is in the delay slot of a call instruction. */
8010 jump_in_call_delay (rtx insn)
8013 if (GET_CODE (insn) != JUMP_INSN)
8014 return 0;
8016 if (PREV_INSN (insn)
8017 && PREV_INSN (PREV_INSN (insn))
8018 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8020 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8022 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8023 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8026 else
8027 return 0;
8030 /* Output an unconditional move and branch insn. */
8032 const char *
8033 output_parallel_movb (rtx *operands, int length)
8035 /* These are the cases in which we win. */
8036 if (length == 4)
8037 return "mov%I1b,tr %1,%0,%2";
8039 /* None of these cases wins, but they don't lose either. */
8040 if (dbr_sequence_length () == 0)
8042 /* Nothing in the delay slot, fake it by putting the combined
8043 insn (the copy or add) in the delay slot of a bl. */
8044 if (GET_CODE (operands[1]) == CONST_INT)
8045 return "b %2\n\tldi %1,%0";
8046 else
8047 return "b %2\n\tcopy %1,%0";
8049 else
8051 /* Something in the delay slot, but we've got a long branch. */
8052 if (GET_CODE (operands[1]) == CONST_INT)
8053 return "ldi %1,%0\n\tb %2";
8054 else
8055 return "copy %1,%0\n\tb %2";
8059 /* Output an unconditional add and branch insn. */
8061 const char *
8062 output_parallel_addb (rtx *operands, int length)
8064 /* To make life easy we want operand0 to be the shared input/output
8065 operand and operand1 to be the readonly operand. */
8066 if (operands[0] == operands[1])
8067 operands[1] = operands[2];
8069 /* These are the cases in which we win. */
8070 if (length == 4)
8071 return "add%I1b,tr %1,%0,%3";
8073 /* None of these cases win, but they don't lose either. */
8074 if (dbr_sequence_length () == 0)
8076 /* Nothing in the delay slot, fake it by putting the combined
8077 insn (the copy or add) in the delay slot of a bl. */
8078 return "b %3\n\tadd%I1 %1,%0,%0";
8080 else
8082 /* Something in the delay slot, but we've got a long branch. */
8083 return "add%I1 %1,%0,%0\n\tb %3";
8087 /* Return nonzero if INSN (a jump insn) immediately follows a call
8088 to a named function. This is used to avoid filling the delay slot
8089 of the jump since it can usually be eliminated by modifying RP in
8090 the delay slot of the call. */
8093 following_call (rtx insn)
8095 if (! TARGET_JUMP_IN_DELAY)
8096 return 0;
8098 /* Find the previous real insn, skipping NOTEs. */
8099 insn = PREV_INSN (insn);
8100 while (insn && GET_CODE (insn) == NOTE)
8101 insn = PREV_INSN (insn);
8103 /* Check for CALL_INSNs and millicode calls. */
8104 if (insn
8105 && ((GET_CODE (insn) == CALL_INSN
8106 && get_attr_type (insn) != TYPE_DYNCALL)
8107 || (GET_CODE (insn) == INSN
8108 && GET_CODE (PATTERN (insn)) != SEQUENCE
8109 && GET_CODE (PATTERN (insn)) != USE
8110 && GET_CODE (PATTERN (insn)) != CLOBBER
8111 && get_attr_type (insn) == TYPE_MILLI)))
8112 return 1;
8114 return 0;
8117 /* We use this hook to perform a PA specific optimization which is difficult
8118 to do in earlier passes.
8120 We want the delay slots of branches within jump tables to be filled.
8121 None of the compiler passes at the moment even has the notion that a
8122 PA jump table doesn't contain addresses, but instead contains actual
8123 instructions!
8125 Because we actually jump into the table, the addresses of each entry
8126 must stay constant in relation to the beginning of the table (which
8127 itself must stay constant relative to the instruction to jump into
8128 it). I don't believe we can guarantee earlier passes of the compiler
8129 will adhere to those rules.
8131 So, late in the compilation process we find all the jump tables, and
8132 expand them into real code -- e.g. each entry in the jump table vector
8133 will get an appropriate label followed by a jump to the final target.
8135 Reorg and the final jump pass can then optimize these branches and
8136 fill their delay slots. We end up with smaller, more efficient code.
8138 The jump instructions within the table are special; we must be able
8139 to identify them during assembly output (if the jumps don't get filled
8140 we need to emit a nop rather than nullifying the delay slot)). We
8141 identify jumps in switch tables by using insns with the attribute
8142 type TYPE_BTABLE_BRANCH.
8144 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8145 insns. This serves two purposes, first it prevents jump.c from
8146 noticing that the last N entries in the table jump to the instruction
8147 immediately after the table and deleting the jumps. Second, those
8148 insns mark where we should emit .begin_brtab and .end_brtab directives
8149 when using GAS (allows for better link time optimizations). */
8151 static void
8152 pa_reorg (void)
8154 rtx insn;
8156 remove_useless_addtr_insns (1);
8158 if (pa_cpu < PROCESSOR_8000)
8159 pa_combine_instructions ();
8162 /* This is fairly cheap, so always run it if optimizing. */
8163 if (optimize > 0 && !TARGET_BIG_SWITCH)
8165 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8166 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8168 rtx pattern, tmp, location, label;
8169 unsigned int length, i;
8171 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8172 if (GET_CODE (insn) != JUMP_INSN
8173 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8174 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8175 continue;
8177 /* Emit marker for the beginning of the branch table. */
8178 emit_insn_before (gen_begin_brtab (), insn);
8180 pattern = PATTERN (insn);
8181 location = PREV_INSN (insn);
8182 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8184 for (i = 0; i < length; i++)
8186 /* Emit a label before each jump to keep jump.c from
8187 removing this code. */
8188 tmp = gen_label_rtx ();
8189 LABEL_NUSES (tmp) = 1;
8190 emit_label_after (tmp, location);
8191 location = NEXT_INSN (location);
8193 if (GET_CODE (pattern) == ADDR_VEC)
8194 label = XEXP (XVECEXP (pattern, 0, i), 0);
8195 else
8196 label = XEXP (XVECEXP (pattern, 1, i), 0);
8198 tmp = gen_short_jump (label);
8200 /* Emit the jump itself. */
8201 tmp = emit_jump_insn_after (tmp, location);
8202 JUMP_LABEL (tmp) = label;
8203 LABEL_NUSES (label)++;
8204 location = NEXT_INSN (location);
8206 /* Emit a BARRIER after the jump. */
8207 emit_barrier_after (location);
8208 location = NEXT_INSN (location);
8211 /* Emit marker for the end of the branch table. */
8212 emit_insn_before (gen_end_brtab (), location);
8213 location = NEXT_INSN (location);
8214 emit_barrier_after (location);
8216 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8217 delete_insn (insn);
8220 else
8222 /* Still need brtab marker insns. FIXME: the presence of these
8223 markers disables output of the branch table to readonly memory,
8224 and any alignment directives that might be needed. Possibly,
8225 the begin_brtab insn should be output before the label for the
8226 table. This doesn't matter at the moment since the tables are
8227 always output in the text section. */
8228 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8230 /* Find an ADDR_VEC insn. */
8231 if (GET_CODE (insn) != JUMP_INSN
8232 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8233 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8234 continue;
8236 /* Now generate markers for the beginning and end of the
8237 branch table. */
8238 emit_insn_before (gen_begin_brtab (), insn);
8239 emit_insn_after (gen_end_brtab (), insn);
8244 /* The PA has a number of odd instructions which can perform multiple
8245 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8246 it may be profitable to combine two instructions into one instruction
8247 with two outputs. It's not profitable PA2.0 machines because the
8248 two outputs would take two slots in the reorder buffers.
8250 This routine finds instructions which can be combined and combines
8251 them. We only support some of the potential combinations, and we
8252 only try common ways to find suitable instructions.
8254 * addb can add two registers or a register and a small integer
8255 and jump to a nearby (+-8k) location. Normally the jump to the
8256 nearby location is conditional on the result of the add, but by
8257 using the "true" condition we can make the jump unconditional.
8258 Thus addb can perform two independent operations in one insn.
8260 * movb is similar to addb in that it can perform a reg->reg
8261 or small immediate->reg copy and jump to a nearby (+-8k location).
8263 * fmpyadd and fmpysub can perform a FP multiply and either an
8264 FP add or FP sub if the operands of the multiply and add/sub are
8265 independent (there are other minor restrictions). Note both
8266 the fmpy and fadd/fsub can in theory move to better spots according
8267 to data dependencies, but for now we require the fmpy stay at a
8268 fixed location.
8270 * Many of the memory operations can perform pre & post updates
8271 of index registers. GCC's pre/post increment/decrement addressing
8272 is far too simple to take advantage of all the possibilities. This
8273 pass may not be suitable since those insns may not be independent.
8275 * comclr can compare two ints or an int and a register, nullify
8276 the following instruction and zero some other register. This
8277 is more difficult to use as it's harder to find an insn which
8278 will generate a comclr than finding something like an unconditional
8279 branch. (conditional moves & long branches create comclr insns).
8281 * Most arithmetic operations can conditionally skip the next
8282 instruction. They can be viewed as "perform this operation
8283 and conditionally jump to this nearby location" (where nearby
8284 is an insns away). These are difficult to use due to the
8285 branch length restrictions. */
8287 static void
8288 pa_combine_instructions (void)
8290 rtx anchor, new;
8292 /* This can get expensive since the basic algorithm is on the
8293 order of O(n^2) (or worse). Only do it for -O2 or higher
8294 levels of optimization. */
8295 if (optimize < 2)
8296 return;
8298 /* Walk down the list of insns looking for "anchor" insns which
8299 may be combined with "floating" insns. As the name implies,
8300 "anchor" instructions don't move, while "floating" insns may
8301 move around. */
8302 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8303 new = make_insn_raw (new);
8305 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8307 enum attr_pa_combine_type anchor_attr;
8308 enum attr_pa_combine_type floater_attr;
8310 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8311 Also ignore any special USE insns. */
8312 if ((GET_CODE (anchor) != INSN
8313 && GET_CODE (anchor) != JUMP_INSN
8314 && GET_CODE (anchor) != CALL_INSN)
8315 || GET_CODE (PATTERN (anchor)) == USE
8316 || GET_CODE (PATTERN (anchor)) == CLOBBER
8317 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8318 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8319 continue;
8321 anchor_attr = get_attr_pa_combine_type (anchor);
8322 /* See if anchor is an insn suitable for combination. */
8323 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8324 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8325 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8326 && ! forward_branch_p (anchor)))
8328 rtx floater;
8330 for (floater = PREV_INSN (anchor);
8331 floater;
8332 floater = PREV_INSN (floater))
8334 if (GET_CODE (floater) == NOTE
8335 || (GET_CODE (floater) == INSN
8336 && (GET_CODE (PATTERN (floater)) == USE
8337 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8338 continue;
8340 /* Anything except a regular INSN will stop our search. */
8341 if (GET_CODE (floater) != INSN
8342 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8343 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8345 floater = NULL_RTX;
8346 break;
8349 /* See if FLOATER is suitable for combination with the
8350 anchor. */
8351 floater_attr = get_attr_pa_combine_type (floater);
8352 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8353 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8354 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8355 && floater_attr == PA_COMBINE_TYPE_FMPY))
8357 /* If ANCHOR and FLOATER can be combined, then we're
8358 done with this pass. */
8359 if (pa_can_combine_p (new, anchor, floater, 0,
8360 SET_DEST (PATTERN (floater)),
8361 XEXP (SET_SRC (PATTERN (floater)), 0),
8362 XEXP (SET_SRC (PATTERN (floater)), 1)))
8363 break;
8366 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8367 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8369 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8371 if (pa_can_combine_p (new, anchor, floater, 0,
8372 SET_DEST (PATTERN (floater)),
8373 XEXP (SET_SRC (PATTERN (floater)), 0),
8374 XEXP (SET_SRC (PATTERN (floater)), 1)))
8375 break;
8377 else
8379 if (pa_can_combine_p (new, anchor, floater, 0,
8380 SET_DEST (PATTERN (floater)),
8381 SET_SRC (PATTERN (floater)),
8382 SET_SRC (PATTERN (floater))))
8383 break;
8388 /* If we didn't find anything on the backwards scan try forwards. */
8389 if (!floater
8390 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8391 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8393 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8395 if (GET_CODE (floater) == NOTE
8396 || (GET_CODE (floater) == INSN
8397 && (GET_CODE (PATTERN (floater)) == USE
8398 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8400 continue;
8402 /* Anything except a regular INSN will stop our search. */
8403 if (GET_CODE (floater) != INSN
8404 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8405 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8407 floater = NULL_RTX;
8408 break;
8411 /* See if FLOATER is suitable for combination with the
8412 anchor. */
8413 floater_attr = get_attr_pa_combine_type (floater);
8414 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8415 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8416 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8417 && floater_attr == PA_COMBINE_TYPE_FMPY))
8419 /* If ANCHOR and FLOATER can be combined, then we're
8420 done with this pass. */
8421 if (pa_can_combine_p (new, anchor, floater, 1,
8422 SET_DEST (PATTERN (floater)),
8423 XEXP (SET_SRC (PATTERN (floater)),
8425 XEXP (SET_SRC (PATTERN (floater)),
8426 1)))
8427 break;
8432 /* FLOATER will be nonzero if we found a suitable floating
8433 insn for combination with ANCHOR. */
8434 if (floater
8435 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8436 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8438 /* Emit the new instruction and delete the old anchor. */
8439 emit_insn_before (gen_rtx_PARALLEL
8440 (VOIDmode,
8441 gen_rtvec (2, PATTERN (anchor),
8442 PATTERN (floater))),
8443 anchor);
8445 PUT_CODE (anchor, NOTE);
8446 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8447 NOTE_SOURCE_FILE (anchor) = 0;
8449 /* Emit a special USE insn for FLOATER, then delete
8450 the floating insn. */
8451 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8452 delete_insn (floater);
8454 continue;
8456 else if (floater
8457 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8459 rtx temp;
8460 /* Emit the new_jump instruction and delete the old anchor. */
8461 temp
8462 = emit_jump_insn_before (gen_rtx_PARALLEL
8463 (VOIDmode,
8464 gen_rtvec (2, PATTERN (anchor),
8465 PATTERN (floater))),
8466 anchor);
8468 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8469 PUT_CODE (anchor, NOTE);
8470 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8471 NOTE_SOURCE_FILE (anchor) = 0;
8473 /* Emit a special USE insn for FLOATER, then delete
8474 the floating insn. */
8475 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8476 delete_insn (floater);
8477 continue;
8483 static int
8484 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8485 rtx src1, rtx src2)
8487 int insn_code_number;
8488 rtx start, end;
8490 /* Create a PARALLEL with the patterns of ANCHOR and
8491 FLOATER, try to recognize it, then test constraints
8492 for the resulting pattern.
8494 If the pattern doesn't match or the constraints
8495 aren't met keep searching for a suitable floater
8496 insn. */
8497 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8498 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8499 INSN_CODE (new) = -1;
8500 insn_code_number = recog_memoized (new);
8501 if (insn_code_number < 0
8502 || (extract_insn (new), ! constrain_operands (1)))
8503 return 0;
8505 if (reversed)
8507 start = anchor;
8508 end = floater;
8510 else
8512 start = floater;
8513 end = anchor;
8516 /* There's up to three operands to consider. One
8517 output and two inputs.
8519 The output must not be used between FLOATER & ANCHOR
8520 exclusive. The inputs must not be set between
8521 FLOATER and ANCHOR exclusive. */
8523 if (reg_used_between_p (dest, start, end))
8524 return 0;
8526 if (reg_set_between_p (src1, start, end))
8527 return 0;
8529 if (reg_set_between_p (src2, start, end))
8530 return 0;
8532 /* If we get here, then everything is good. */
8533 return 1;
8536 /* Return nonzero if references for INSN are delayed.
8538 Millicode insns are actually function calls with some special
8539 constraints on arguments and register usage.
8541 Millicode calls always expect their arguments in the integer argument
8542 registers, and always return their result in %r29 (ret1). They
8543 are expected to clobber their arguments, %r1, %r29, and the return
8544 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8546 This function tells reorg that the references to arguments and
8547 millicode calls do not appear to happen until after the millicode call.
8548 This allows reorg to put insns which set the argument registers into the
8549 delay slot of the millicode call -- thus they act more like traditional
8550 CALL_INSNs.
8552 Note we cannot consider side effects of the insn to be delayed because
8553 the branch and link insn will clobber the return pointer. If we happened
8554 to use the return pointer in the delay slot of the call, then we lose.
8556 get_attr_type will try to recognize the given insn, so make sure to
8557 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8558 in particular. */
8560 insn_refs_are_delayed (rtx insn)
8562 return ((GET_CODE (insn) == INSN
8563 && GET_CODE (PATTERN (insn)) != SEQUENCE
8564 && GET_CODE (PATTERN (insn)) != USE
8565 && GET_CODE (PATTERN (insn)) != CLOBBER
8566 && get_attr_type (insn) == TYPE_MILLI));
8569 /* On the HP-PA the value is found in register(s) 28(-29), unless
8570 the mode is SF or DF. Then the value is returned in fr4 (32).
8572 This must perform the same promotions as PROMOTE_MODE, else
8573 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8575 Small structures must be returned in a PARALLEL on PA64 in order
8576 to match the HP Compiler ABI. */
8579 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8581 enum machine_mode valmode;
8583 /* Aggregates with a size less than or equal to 128 bits are returned
8584 in GR 28(-29). They are left justified. The pad bits are undefined.
8585 Larger aggregates are returned in memory. */
8586 if (TARGET_64BIT && AGGREGATE_TYPE_P (valtype))
8588 rtx loc[2];
8589 int i, offset = 0;
8590 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8592 for (i = 0; i < ub; i++)
8594 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8595 gen_rtx_REG (DImode, 28 + i),
8596 GEN_INT (offset));
8597 offset += 8;
8600 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8603 if ((INTEGRAL_TYPE_P (valtype)
8604 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8605 || POINTER_TYPE_P (valtype))
8606 valmode = word_mode;
8607 else
8608 valmode = TYPE_MODE (valtype);
8610 if (TREE_CODE (valtype) == REAL_TYPE
8611 && TYPE_MODE (valtype) != TFmode
8612 && !TARGET_SOFT_FLOAT)
8613 return gen_rtx_REG (valmode, 32);
8615 return gen_rtx_REG (valmode, 28);
8618 /* Return the location of a parameter that is passed in a register or NULL
8619 if the parameter has any component that is passed in memory.
8621 This is new code and will be pushed to into the net sources after
8622 further testing.
8624 ??? We might want to restructure this so that it looks more like other
8625 ports. */
8627 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8628 int named ATTRIBUTE_UNUSED)
8630 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8631 int alignment = 0;
8632 int arg_size;
8633 int fpr_reg_base;
8634 int gpr_reg_base;
8635 rtx retval;
8637 if (mode == VOIDmode)
8638 return NULL_RTX;
8640 arg_size = FUNCTION_ARG_SIZE (mode, type);
8642 /* If this arg would be passed partially or totally on the stack, then
8643 this routine should return zero. pa_arg_partial_bytes will
8644 handle arguments which are split between regs and stack slots if
8645 the ABI mandates split arguments. */
8646 if (! TARGET_64BIT)
8648 /* The 32-bit ABI does not split arguments. */
8649 if (cum->words + arg_size > max_arg_words)
8650 return NULL_RTX;
8652 else
8654 if (arg_size > 1)
8655 alignment = cum->words & 1;
8656 if (cum->words + alignment >= max_arg_words)
8657 return NULL_RTX;
8660 /* The 32bit ABIs and the 64bit ABIs are rather different,
8661 particularly in their handling of FP registers. We might
8662 be able to cleverly share code between them, but I'm not
8663 going to bother in the hope that splitting them up results
8664 in code that is more easily understood. */
8666 if (TARGET_64BIT)
8668 /* Advance the base registers to their current locations.
8670 Remember, gprs grow towards smaller register numbers while
8671 fprs grow to higher register numbers. Also remember that
8672 although FP regs are 32-bit addressable, we pretend that
8673 the registers are 64-bits wide. */
8674 gpr_reg_base = 26 - cum->words;
8675 fpr_reg_base = 32 + cum->words;
8677 /* Arguments wider than one word and small aggregates need special
8678 treatment. */
8679 if (arg_size > 1
8680 || mode == BLKmode
8681 || (type && AGGREGATE_TYPE_P (type)))
8683 /* Double-extended precision (80-bit), quad-precision (128-bit)
8684 and aggregates including complex numbers are aligned on
8685 128-bit boundaries. The first eight 64-bit argument slots
8686 are associated one-to-one, with general registers r26
8687 through r19, and also with floating-point registers fr4
8688 through fr11. Arguments larger than one word are always
8689 passed in general registers.
8691 Using a PARALLEL with a word mode register results in left
8692 justified data on a big-endian target. */
8694 rtx loc[8];
8695 int i, offset = 0, ub = arg_size;
8697 /* Align the base register. */
8698 gpr_reg_base -= alignment;
8700 ub = MIN (ub, max_arg_words - cum->words - alignment);
8701 for (i = 0; i < ub; i++)
8703 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8704 gen_rtx_REG (DImode, gpr_reg_base),
8705 GEN_INT (offset));
8706 gpr_reg_base -= 1;
8707 offset += 8;
8710 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8713 else
8715 /* If the argument is larger than a word, then we know precisely
8716 which registers we must use. */
8717 if (arg_size > 1)
8719 if (cum->words)
8721 gpr_reg_base = 23;
8722 fpr_reg_base = 38;
8724 else
8726 gpr_reg_base = 25;
8727 fpr_reg_base = 34;
8730 /* Structures 5 to 8 bytes in size are passed in the general
8731 registers in the same manner as other non floating-point
8732 objects. The data is right-justified and zero-extended
8733 to 64 bits. This is opposite to the normal justification
8734 used on big endian targets and requires special treatment.
8735 We now define BLOCK_REG_PADDING to pad these objects. */
8736 if (mode == BLKmode)
8738 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8739 gen_rtx_REG (DImode, gpr_reg_base),
8740 const0_rtx);
8741 return gen_rtx_PARALLEL (mode, gen_rtvec (1, loc));
8744 else
8746 /* We have a single word (32 bits). A simple computation
8747 will get us the register #s we need. */
8748 gpr_reg_base = 26 - cum->words;
8749 fpr_reg_base = 32 + 2 * cum->words;
8753 /* Determine if the argument needs to be passed in both general and
8754 floating point registers. */
8755 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8756 /* If we are doing soft-float with portable runtime, then there
8757 is no need to worry about FP regs. */
8758 && !TARGET_SOFT_FLOAT
8759 /* The parameter must be some kind of float, else we can just
8760 pass it in integer registers. */
8761 && FLOAT_MODE_P (mode)
8762 /* The target function must not have a prototype. */
8763 && cum->nargs_prototype <= 0
8764 /* libcalls do not need to pass items in both FP and general
8765 registers. */
8766 && type != NULL_TREE
8767 /* All this hair applies to "outgoing" args only. This includes
8768 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8769 && !cum->incoming)
8770 /* Also pass outgoing floating arguments in both registers in indirect
8771 calls with the 32 bit ABI and the HP assembler since there is no
8772 way to the specify argument locations in static functions. */
8773 || (!TARGET_64BIT
8774 && !TARGET_GAS
8775 && !cum->incoming
8776 && cum->indirect
8777 && FLOAT_MODE_P (mode)))
8779 retval
8780 = gen_rtx_PARALLEL
8781 (mode,
8782 gen_rtvec (2,
8783 gen_rtx_EXPR_LIST (VOIDmode,
8784 gen_rtx_REG (mode, fpr_reg_base),
8785 const0_rtx),
8786 gen_rtx_EXPR_LIST (VOIDmode,
8787 gen_rtx_REG (mode, gpr_reg_base),
8788 const0_rtx)));
8790 else
8792 /* See if we should pass this parameter in a general register. */
8793 if (TARGET_SOFT_FLOAT
8794 /* Indirect calls in the normal 32bit ABI require all arguments
8795 to be passed in general registers. */
8796 || (!TARGET_PORTABLE_RUNTIME
8797 && !TARGET_64BIT
8798 && !TARGET_ELF32
8799 && cum->indirect)
8800 /* If the parameter is not a floating point parameter, then
8801 it belongs in GPRs. */
8802 || !FLOAT_MODE_P (mode))
8803 retval = gen_rtx_REG (mode, gpr_reg_base);
8804 else
8805 retval = gen_rtx_REG (mode, fpr_reg_base);
8807 return retval;
8811 /* If this arg would be passed totally in registers or totally on the stack,
8812 then this routine should return zero. */
8814 static int
8815 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8816 tree type, bool named ATTRIBUTE_UNUSED)
8818 unsigned int max_arg_words = 8;
8819 unsigned int offset = 0;
8821 if (!TARGET_64BIT)
8822 return 0;
8824 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
8825 offset = 1;
8827 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
8828 /* Arg fits fully into registers. */
8829 return 0;
8830 else if (cum->words + offset >= max_arg_words)
8831 /* Arg fully on the stack. */
8832 return 0;
8833 else
8834 /* Arg is split. */
8835 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
8839 /* Return a string to output before text in the current function.
8841 This function is only used with SOM. Because we don't support
8842 named subspaces, we can only create a new subspace or switch back
8843 to the default text subspace. */
8844 const char *
8845 som_text_section_asm_op (void)
8847 if (!TARGET_SOM)
8848 return "";
8850 if (TARGET_GAS)
8852 if (cfun && !cfun->machine->in_nsubspa)
8854 /* We only want to emit a .nsubspa directive once at the
8855 start of the function. */
8856 cfun->machine->in_nsubspa = 1;
8858 /* Create a new subspace for the text. This provides
8859 better stub placement and one-only functions. */
8860 if (cfun->decl
8861 && DECL_ONE_ONLY (cfun->decl)
8862 && !DECL_WEAK (cfun->decl))
8863 return
8864 "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=24,COMDAT";
8866 return "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$";
8868 else
8870 /* There isn't a current function or the body of the current
8871 function has been completed. So, we are changing to the
8872 text section to output debugging information. Do this in
8873 the default text section. We need to forget that we are
8874 in the text section so that the function text_section in
8875 varasm.c will call us the next time around. */
8876 forget_section ();
8880 return "\t.SPACE $TEXT$\n\t.SUBSPA $CODE$";
8883 /* On hpux10, the linker will give an error if we have a reference
8884 in the read-only data section to a symbol defined in a shared
8885 library. Therefore, expressions that might require a reloc can
8886 not be placed in the read-only data section. */
8888 static void
8889 pa_select_section (tree exp, int reloc,
8890 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
8892 if (TREE_CODE (exp) == VAR_DECL
8893 && TREE_READONLY (exp)
8894 && !TREE_THIS_VOLATILE (exp)
8895 && DECL_INITIAL (exp)
8896 && (DECL_INITIAL (exp) == error_mark_node
8897 || TREE_CONSTANT (DECL_INITIAL (exp)))
8898 && !reloc)
8900 if (TARGET_SOM
8901 && DECL_ONE_ONLY (exp)
8902 && !DECL_WEAK (exp))
8903 som_one_only_readonly_data_section ();
8904 else
8905 readonly_data_section ();
8907 else if (CONSTANT_CLASS_P (exp) && !reloc)
8908 readonly_data_section ();
8909 else if (TARGET_SOM
8910 && TREE_CODE (exp) == VAR_DECL
8911 && DECL_ONE_ONLY (exp)
8912 && !DECL_WEAK (exp))
8913 som_one_only_data_section ();
8914 else
8915 data_section ();
8918 static void
8919 pa_globalize_label (FILE *stream, const char *name)
8921 /* We only handle DATA objects here, functions are globalized in
8922 ASM_DECLARE_FUNCTION_NAME. */
8923 if (! FUNCTION_NAME_P (name))
8925 fputs ("\t.EXPORT ", stream);
8926 assemble_name (stream, name);
8927 fputs (",DATA\n", stream);
8931 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8933 static rtx
8934 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
8935 int incoming ATTRIBUTE_UNUSED)
8937 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
8940 /* Worker function for TARGET_RETURN_IN_MEMORY. */
8942 bool
8943 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
8945 /* SOM ABI says that objects larger than 64 bits are returned in memory.
8946 PA64 ABI says that objects larger than 128 bits are returned in memory.
8947 Note, int_size_in_bytes can return -1 if the size of the object is
8948 variable or larger than the maximum value that can be expressed as
8949 a HOST_WIDE_INT. It can also return zero for an empty type. The
8950 simplest way to handle variable and empty types is to pass them in
8951 memory. This avoids problems in defining the boundaries of argument
8952 slots, allocating registers, etc. */
8953 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
8954 || int_size_in_bytes (type) <= 0);
8957 /* Structure to hold declaration and name of external symbols that are
8958 emitted by GCC. We generate a vector of these symbols and output them
8959 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
8960 This avoids putting out names that are never really used. */
8962 typedef struct extern_symbol GTY(())
8964 tree decl;
8965 const char *name;
8966 } extern_symbol;
8968 /* Define gc'd vector type for extern_symbol. */
8969 DEF_VEC_O(extern_symbol);
8970 DEF_VEC_ALLOC_O(extern_symbol,gc);
8972 /* Vector of extern_symbol pointers. */
8973 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
8975 #ifdef ASM_OUTPUT_EXTERNAL_REAL
8976 /* Mark DECL (name NAME) as an external reference (assembler output
8977 file FILE). This saves the names to output at the end of the file
8978 if actually referenced. */
8980 void
8981 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
8983 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
8985 gcc_assert (file == asm_out_file);
8986 p->decl = decl;
8987 p->name = name;
8990 /* Output text required at the end of an assembler file.
8991 This includes deferred plabels and .import directives for
8992 all external symbols that were actually referenced. */
8994 static void
8995 pa_hpux_file_end (void)
8997 unsigned int i;
8998 extern_symbol *p;
9000 output_deferred_plabels ();
9002 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9004 tree decl = p->decl;
9006 if (!TREE_ASM_WRITTEN (decl)
9007 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9008 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9011 VEC_free (extern_symbol, gc, extern_symbols);
9013 #endif
9015 #include "gt-pa.h"