* config/arm/arm.c (arm_legitimize_address): Limit the value passed
[official-gcc.git] / gcc / config / pa / pa.c
blob260d2125e944bde1ac644734b5204363cea31c21
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
111 ATTRIBUTE_UNUSED;
112 static void pa_encode_section_info (tree, rtx, int);
113 static const char *pa_strip_name_encoding (const char *);
114 static bool pa_function_ok_for_sibcall (tree, tree);
115 static void pa_globalize_label (FILE *, const char *)
116 ATTRIBUTE_UNUSED;
117 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
118 HOST_WIDE_INT, tree);
119 #if !defined(USE_COLLECT2)
120 static void pa_asm_out_constructor (rtx, int);
121 static void pa_asm_out_destructor (rtx, int);
122 #endif
123 static void pa_init_builtins (void);
124 static rtx hppa_builtin_saveregs (void);
125 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
126 static bool pa_scalar_mode_supported_p (enum machine_mode);
127 static bool pa_commutative_p (rtx x, int outer_code);
128 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
129 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static struct deferred_plabel *get_plabel (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 #ifdef ASM_OUTPUT_EXTERNAL_REAL
142 static void pa_hpux_file_end (void);
143 #endif
144 #ifdef HPUX_LONG_DOUBLE_LIBRARY
145 static void pa_hpux_init_libfuncs (void);
146 #endif
147 static rtx pa_struct_value_rtx (tree, int);
148 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
149 tree, bool);
150 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
151 tree, bool);
152 static struct machine_function * pa_init_machine_status (void);
155 /* Save the operands last given to a compare for use when we
156 generate a scc or bcc insn. */
157 rtx hppa_compare_op0, hppa_compare_op1;
158 enum cmp_type hppa_branch_type;
160 /* Which cpu we are scheduling for. */
161 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
163 /* The UNIX standard to use for predefines and linking. */
164 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
166 /* Counts for the number of callee-saved general and floating point
167 registers which were saved by the current function's prologue. */
168 static int gr_saved, fr_saved;
170 static rtx find_addr_reg (rtx);
172 /* Keep track of the number of bytes we have output in the CODE subspace
173 during this compilation so we'll know when to emit inline long-calls. */
174 unsigned long total_code_bytes;
176 /* The last address of the previous function plus the number of bytes in
177 associated thunks that have been output. This is used to determine if
178 a thunk can use an IA-relative branch to reach its target function. */
179 static int last_address;
181 /* Variables to handle plabels that we discover are necessary at assembly
182 output time. They are output after the current function. */
183 struct deferred_plabel GTY(())
185 rtx internal_label;
186 rtx symbol;
188 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
189 deferred_plabels;
190 static size_t n_deferred_plabels = 0;
193 /* Initialize the GCC target structure. */
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
199 #undef TARGET_ASM_ALIGNED_DI_OP
200 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
201 #undef TARGET_ASM_UNALIGNED_HI_OP
202 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
203 #undef TARGET_ASM_UNALIGNED_SI_OP
204 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
205 #undef TARGET_ASM_UNALIGNED_DI_OP
206 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
207 #undef TARGET_ASM_INTEGER
208 #define TARGET_ASM_INTEGER pa_assemble_integer
210 #undef TARGET_ASM_FUNCTION_PROLOGUE
211 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
212 #undef TARGET_ASM_FUNCTION_EPILOGUE
213 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
215 #undef TARGET_SCHED_ADJUST_COST
216 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
217 #undef TARGET_SCHED_ADJUST_PRIORITY
218 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
219 #undef TARGET_SCHED_ISSUE_RATE
220 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
222 #undef TARGET_ENCODE_SECTION_INFO
223 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
224 #undef TARGET_STRIP_NAME_ENCODING
225 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
227 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
228 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
230 #undef TARGET_COMMUTATIVE_P
231 #define TARGET_COMMUTATIVE_P pa_commutative_p
233 #undef TARGET_ASM_OUTPUT_MI_THUNK
234 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
235 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
236 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
238 #undef TARGET_ASM_FILE_END
239 #ifdef ASM_OUTPUT_EXTERNAL_REAL
240 #define TARGET_ASM_FILE_END pa_hpux_file_end
241 #else
242 #define TARGET_ASM_FILE_END output_deferred_plabels
243 #endif
245 #if !defined(USE_COLLECT2)
246 #undef TARGET_ASM_CONSTRUCTOR
247 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
248 #undef TARGET_ASM_DESTRUCTOR
249 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
250 #endif
252 #undef TARGET_DEFAULT_TARGET_FLAGS
253 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
254 #undef TARGET_HANDLE_OPTION
255 #define TARGET_HANDLE_OPTION pa_handle_option
257 #undef TARGET_INIT_BUILTINS
258 #define TARGET_INIT_BUILTINS pa_init_builtins
260 #undef TARGET_RTX_COSTS
261 #define TARGET_RTX_COSTS hppa_rtx_costs
262 #undef TARGET_ADDRESS_COST
263 #define TARGET_ADDRESS_COST hppa_address_cost
265 #undef TARGET_MACHINE_DEPENDENT_REORG
266 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
268 #ifdef HPUX_LONG_DOUBLE_LIBRARY
269 #undef TARGET_INIT_LIBFUNCS
270 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
271 #endif
273 #undef TARGET_PROMOTE_FUNCTION_RETURN
274 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
275 #undef TARGET_PROMOTE_PROTOTYPES
276 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
278 #undef TARGET_STRUCT_VALUE_RTX
279 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
280 #undef TARGET_RETURN_IN_MEMORY
281 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
282 #undef TARGET_MUST_PASS_IN_STACK
283 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
284 #undef TARGET_PASS_BY_REFERENCE
285 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
286 #undef TARGET_CALLEE_COPIES
287 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
288 #undef TARGET_ARG_PARTIAL_BYTES
289 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
291 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
292 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
293 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
294 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
296 #undef TARGET_SCALAR_MODE_SUPPORTED_P
297 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
299 #undef TARGET_CANNOT_FORCE_CONST_MEM
300 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
302 struct gcc_target targetm = TARGET_INITIALIZER;
304 /* Parse the -mfixed-range= option string. */
306 static void
307 fix_range (const char *const_str)
309 int i, first, last;
310 char *str, *dash, *comma;
312 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
313 REG2 are either register names or register numbers. The effect
314 of this option is to mark the registers in the range from REG1 to
315 REG2 as ``fixed'' so they won't be used by the compiler. This is
316 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
318 i = strlen (const_str);
319 str = (char *) alloca (i + 1);
320 memcpy (str, const_str, i + 1);
322 while (1)
324 dash = strchr (str, '-');
325 if (!dash)
327 warning (0, "value of -mfixed-range must have form REG1-REG2");
328 return;
330 *dash = '\0';
332 comma = strchr (dash + 1, ',');
333 if (comma)
334 *comma = '\0';
336 first = decode_reg_name (str);
337 if (first < 0)
339 warning (0, "unknown register name: %s", str);
340 return;
343 last = decode_reg_name (dash + 1);
344 if (last < 0)
346 warning (0, "unknown register name: %s", dash + 1);
347 return;
350 *dash = '-';
352 if (first > last)
354 warning (0, "%s-%s is an empty range", str, dash + 1);
355 return;
358 for (i = first; i <= last; ++i)
359 fixed_regs[i] = call_used_regs[i] = 1;
361 if (!comma)
362 break;
364 *comma = ',';
365 str = comma + 1;
368 /* Check if all floating point registers have been fixed. */
369 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
370 if (!fixed_regs[i])
371 break;
373 if (i > FP_REG_LAST)
374 target_flags |= MASK_DISABLE_FPREGS;
377 /* Implement TARGET_HANDLE_OPTION. */
379 static bool
380 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
382 switch (code)
384 case OPT_mnosnake:
385 case OPT_mpa_risc_1_0:
386 case OPT_march_1_0:
387 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
388 return true;
390 case OPT_msnake:
391 case OPT_mpa_risc_1_1:
392 case OPT_march_1_1:
393 target_flags &= ~MASK_PA_20;
394 target_flags |= MASK_PA_11;
395 return true;
397 case OPT_mpa_risc_2_0:
398 case OPT_march_2_0:
399 target_flags |= MASK_PA_11 | MASK_PA_20;
400 return true;
402 case OPT_mschedule_:
403 if (strcmp (arg, "8000") == 0)
404 pa_cpu = PROCESSOR_8000;
405 else if (strcmp (arg, "7100") == 0)
406 pa_cpu = PROCESSOR_7100;
407 else if (strcmp (arg, "700") == 0)
408 pa_cpu = PROCESSOR_700;
409 else if (strcmp (arg, "7100LC") == 0)
410 pa_cpu = PROCESSOR_7100LC;
411 else if (strcmp (arg, "7200") == 0)
412 pa_cpu = PROCESSOR_7200;
413 else if (strcmp (arg, "7300") == 0)
414 pa_cpu = PROCESSOR_7300;
415 else
416 return false;
417 return true;
419 case OPT_mfixed_range_:
420 fix_range (arg);
421 return true;
423 #if TARGET_HPUX
424 case OPT_munix_93:
425 flag_pa_unix = 1993;
426 return true;
427 #endif
429 #if TARGET_HPUX_10_10
430 case OPT_munix_95:
431 flag_pa_unix = 1995;
432 return true;
433 #endif
435 #if TARGET_HPUX_11_11
436 case OPT_munix_98:
437 flag_pa_unix = 1998;
438 return true;
439 #endif
441 default:
442 return true;
446 void
447 override_options (void)
449 /* Unconditional branches in the delay slot are not compatible with dwarf2
450 call frame information. There is no benefit in using this optimization
451 on PA8000 and later processors. */
452 if (pa_cpu >= PROCESSOR_8000
453 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
454 || flag_unwind_tables)
455 target_flags &= ~MASK_JUMP_IN_DELAY;
457 if (flag_pic && TARGET_PORTABLE_RUNTIME)
459 warning (0, "PIC code generation is not supported in the portable runtime model");
462 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
464 warning (0, "PIC code generation is not compatible with fast indirect calls");
467 if (! TARGET_GAS && write_symbols != NO_DEBUG)
469 warning (0, "-g is only supported when using GAS on this processor,");
470 warning (0, "-g option disabled");
471 write_symbols = NO_DEBUG;
474 /* We only support the "big PIC" model now. And we always generate PIC
475 code when in 64bit mode. */
476 if (flag_pic == 1 || TARGET_64BIT)
477 flag_pic = 2;
479 /* We can't guarantee that .dword is available for 32-bit targets. */
480 if (UNITS_PER_WORD == 4)
481 targetm.asm_out.aligned_op.di = NULL;
483 /* The unaligned ops are only available when using GAS. */
484 if (!TARGET_GAS)
486 targetm.asm_out.unaligned_op.hi = NULL;
487 targetm.asm_out.unaligned_op.si = NULL;
488 targetm.asm_out.unaligned_op.di = NULL;
491 init_machine_status = pa_init_machine_status;
494 static void
495 pa_init_builtins (void)
497 #ifdef DONT_HAVE_FPUTC_UNLOCKED
498 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
499 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
500 #endif
503 /* Function to init struct machine_function.
504 This will be called, via a pointer variable,
505 from push_function_context. */
507 static struct machine_function *
508 pa_init_machine_status (void)
510 return ggc_alloc_cleared (sizeof (machine_function));
513 /* If FROM is a probable pointer register, mark TO as a probable
514 pointer register with the same pointer alignment as FROM. */
516 static void
517 copy_reg_pointer (rtx to, rtx from)
519 if (REG_POINTER (from))
520 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
523 /* Return 1 if X contains a symbolic expression. We know these
524 expressions will have one of a few well defined forms, so
525 we need only check those forms. */
527 symbolic_expression_p (rtx x)
530 /* Strip off any HIGH. */
531 if (GET_CODE (x) == HIGH)
532 x = XEXP (x, 0);
534 return (symbolic_operand (x, VOIDmode));
537 /* Accept any constant that can be moved in one instruction into a
538 general register. */
540 cint_ok_for_move (HOST_WIDE_INT intval)
542 /* OK if ldo, ldil, or zdepi, can be used. */
543 return (CONST_OK_FOR_LETTER_P (intval, 'J')
544 || CONST_OK_FOR_LETTER_P (intval, 'N')
545 || CONST_OK_FOR_LETTER_P (intval, 'K'));
548 /* Return truth value of whether OP can be used as an operand in a
549 adddi3 insn. */
551 adddi3_operand (rtx op, enum machine_mode mode)
553 return (register_operand (op, mode)
554 || (GET_CODE (op) == CONST_INT
555 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
558 /* True iff zdepi can be used to generate this CONST_INT.
559 zdepi first sign extends a 5 bit signed number to a given field
560 length, then places this field anywhere in a zero. */
562 zdepi_cint_p (unsigned HOST_WIDE_INT x)
564 unsigned HOST_WIDE_INT lsb_mask, t;
566 /* This might not be obvious, but it's at least fast.
567 This function is critical; we don't have the time loops would take. */
568 lsb_mask = x & -x;
569 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
570 /* Return true iff t is a power of two. */
571 return ((t & (t - 1)) == 0);
574 /* True iff depi or extru can be used to compute (reg & mask).
575 Accept bit pattern like these:
576 0....01....1
577 1....10....0
578 1..10..01..1 */
580 and_mask_p (unsigned HOST_WIDE_INT mask)
582 mask = ~mask;
583 mask += mask & -mask;
584 return (mask & (mask - 1)) == 0;
587 /* True iff depi can be used to compute (reg | MASK). */
589 ior_mask_p (unsigned HOST_WIDE_INT mask)
591 mask += mask & -mask;
592 return (mask & (mask - 1)) == 0;
595 /* Legitimize PIC addresses. If the address is already
596 position-independent, we return ORIG. Newly generated
597 position-independent addresses go to REG. If we need more
598 than one register, we lose. */
601 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
603 rtx pic_ref = orig;
605 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
607 /* Labels need special handling. */
608 if (pic_label_operand (orig, mode))
610 /* We do not want to go through the movXX expanders here since that
611 would create recursion.
613 Nor do we really want to call a generator for a named pattern
614 since that requires multiple patterns if we want to support
615 multiple word sizes.
617 So instead we just emit the raw set, which avoids the movXX
618 expanders completely. */
619 mark_reg_pointer (reg, BITS_PER_UNIT);
620 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
621 current_function_uses_pic_offset_table = 1;
622 return reg;
624 if (GET_CODE (orig) == SYMBOL_REF)
626 rtx insn, tmp_reg;
628 gcc_assert (reg);
630 /* Before reload, allocate a temporary register for the intermediate
631 result. This allows the sequence to be deleted when the final
632 result is unused and the insns are trivially dead. */
633 tmp_reg = ((reload_in_progress || reload_completed)
634 ? reg : gen_reg_rtx (Pmode));
636 emit_move_insn (tmp_reg,
637 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
638 gen_rtx_HIGH (word_mode, orig)));
639 pic_ref
640 = gen_const_mem (Pmode,
641 gen_rtx_LO_SUM (Pmode, tmp_reg,
642 gen_rtx_UNSPEC (Pmode,
643 gen_rtvec (1, orig),
644 UNSPEC_DLTIND14R)));
646 current_function_uses_pic_offset_table = 1;
647 mark_reg_pointer (reg, BITS_PER_UNIT);
648 insn = emit_move_insn (reg, pic_ref);
650 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
651 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
653 return reg;
655 else if (GET_CODE (orig) == CONST)
657 rtx base;
659 if (GET_CODE (XEXP (orig, 0)) == PLUS
660 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
661 return orig;
663 gcc_assert (reg);
664 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
666 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
667 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
668 base == reg ? 0 : reg);
670 if (GET_CODE (orig) == CONST_INT)
672 if (INT_14_BITS (orig))
673 return plus_constant (base, INTVAL (orig));
674 orig = force_reg (Pmode, orig);
676 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
677 /* Likewise, should we set special REG_NOTEs here? */
680 return pic_ref;
683 static GTY(()) rtx gen_tls_tga;
685 static rtx
686 gen_tls_get_addr (void)
688 if (!gen_tls_tga)
689 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
690 return gen_tls_tga;
693 static rtx
694 hppa_tls_call (rtx arg)
696 rtx ret;
698 ret = gen_reg_rtx (Pmode);
699 emit_library_call_value (gen_tls_get_addr (), ret,
700 LCT_CONST, Pmode, 1, arg, Pmode);
702 return ret;
705 static rtx
706 legitimize_tls_address (rtx addr)
708 rtx ret, insn, tmp, t1, t2, tp;
709 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
711 switch (model)
713 case TLS_MODEL_GLOBAL_DYNAMIC:
714 tmp = gen_reg_rtx (Pmode);
715 emit_insn (gen_tgd_load (tmp, addr));
716 ret = hppa_tls_call (tmp);
717 break;
719 case TLS_MODEL_LOCAL_DYNAMIC:
720 ret = gen_reg_rtx (Pmode);
721 tmp = gen_reg_rtx (Pmode);
722 start_sequence ();
723 emit_insn (gen_tld_load (tmp, addr));
724 t1 = hppa_tls_call (tmp);
725 insn = get_insns ();
726 end_sequence ();
727 t2 = gen_reg_rtx (Pmode);
728 emit_libcall_block (insn, t2, t1,
729 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
730 UNSPEC_TLSLDBASE));
731 emit_insn (gen_tld_offset_load (ret, addr, t2));
732 break;
734 case TLS_MODEL_INITIAL_EXEC:
735 tp = gen_reg_rtx (Pmode);
736 tmp = gen_reg_rtx (Pmode);
737 ret = gen_reg_rtx (Pmode);
738 emit_insn (gen_tp_load (tp));
739 emit_insn (gen_tie_load (tmp, addr));
740 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
741 break;
743 case TLS_MODEL_LOCAL_EXEC:
744 tp = gen_reg_rtx (Pmode);
745 ret = gen_reg_rtx (Pmode);
746 emit_insn (gen_tp_load (tp));
747 emit_insn (gen_tle_load (ret, addr, tp));
748 break;
750 default:
751 gcc_unreachable ();
754 return ret;
757 /* Try machine-dependent ways of modifying an illegitimate address
758 to be legitimate. If we find one, return the new, valid address.
759 This macro is used in only one place: `memory_address' in explow.c.
761 OLDX is the address as it was before break_out_memory_refs was called.
762 In some cases it is useful to look at this to decide what needs to be done.
764 MODE and WIN are passed so that this macro can use
765 GO_IF_LEGITIMATE_ADDRESS.
767 It is always safe for this macro to do nothing. It exists to recognize
768 opportunities to optimize the output.
770 For the PA, transform:
772 memory(X + <large int>)
774 into:
776 if (<large int> & mask) >= 16
777 Y = (<large int> & ~mask) + mask + 1 Round up.
778 else
779 Y = (<large int> & ~mask) Round down.
780 Z = X + Y
781 memory (Z + (<large int> - Y));
783 This is for CSE to find several similar references, and only use one Z.
785 X can either be a SYMBOL_REF or REG, but because combine cannot
786 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
787 D will not fit in 14 bits.
789 MODE_FLOAT references allow displacements which fit in 5 bits, so use
790 0x1f as the mask.
792 MODE_INT references allow displacements which fit in 14 bits, so use
793 0x3fff as the mask.
795 This relies on the fact that most mode MODE_FLOAT references will use FP
796 registers and most mode MODE_INT references will use integer registers.
797 (In the rare case of an FP register used in an integer MODE, we depend
798 on secondary reloads to clean things up.)
801 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
802 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
803 addressing modes to be used).
805 Put X and Z into registers. Then put the entire expression into
806 a register. */
809 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
810 enum machine_mode mode)
812 rtx orig = x;
814 /* We need to canonicalize the order of operands in unscaled indexed
815 addresses since the code that checks if an address is valid doesn't
816 always try both orders. */
817 if (!TARGET_NO_SPACE_REGS
818 && GET_CODE (x) == PLUS
819 && GET_MODE (x) == Pmode
820 && REG_P (XEXP (x, 0))
821 && REG_P (XEXP (x, 1))
822 && REG_POINTER (XEXP (x, 0))
823 && !REG_POINTER (XEXP (x, 1)))
824 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
826 if (PA_SYMBOL_REF_TLS_P (x))
827 return legitimize_tls_address (x);
828 else if (flag_pic)
829 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
831 /* Strip off CONST. */
832 if (GET_CODE (x) == CONST)
833 x = XEXP (x, 0);
835 /* Special case. Get the SYMBOL_REF into a register and use indexing.
836 That should always be safe. */
837 if (GET_CODE (x) == PLUS
838 && GET_CODE (XEXP (x, 0)) == REG
839 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
841 rtx reg = force_reg (Pmode, XEXP (x, 1));
842 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
845 /* Note we must reject symbols which represent function addresses
846 since the assembler/linker can't handle arithmetic on plabels. */
847 if (GET_CODE (x) == PLUS
848 && GET_CODE (XEXP (x, 1)) == CONST_INT
849 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
850 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
851 || GET_CODE (XEXP (x, 0)) == REG))
853 rtx int_part, ptr_reg;
854 int newoffset;
855 int offset = INTVAL (XEXP (x, 1));
856 int mask;
858 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
859 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
861 /* Choose which way to round the offset. Round up if we
862 are >= halfway to the next boundary. */
863 if ((offset & mask) >= ((mask + 1) / 2))
864 newoffset = (offset & ~ mask) + mask + 1;
865 else
866 newoffset = (offset & ~ mask);
868 /* If the newoffset will not fit in 14 bits (ldo), then
869 handling this would take 4 or 5 instructions (2 to load
870 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
871 add the new offset and the SYMBOL_REF.) Combine can
872 not handle 4->2 or 5->2 combinations, so do not create
873 them. */
874 if (! VAL_14_BITS_P (newoffset)
875 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
877 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
878 rtx tmp_reg
879 = force_reg (Pmode,
880 gen_rtx_HIGH (Pmode, const_part));
881 ptr_reg
882 = force_reg (Pmode,
883 gen_rtx_LO_SUM (Pmode,
884 tmp_reg, const_part));
886 else
888 if (! VAL_14_BITS_P (newoffset))
889 int_part = force_reg (Pmode, GEN_INT (newoffset));
890 else
891 int_part = GEN_INT (newoffset);
893 ptr_reg = force_reg (Pmode,
894 gen_rtx_PLUS (Pmode,
895 force_reg (Pmode, XEXP (x, 0)),
896 int_part));
898 return plus_constant (ptr_reg, offset - newoffset);
901 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
903 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
904 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
905 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
906 && (OBJECT_P (XEXP (x, 1))
907 || GET_CODE (XEXP (x, 1)) == SUBREG)
908 && GET_CODE (XEXP (x, 1)) != CONST)
910 int val = INTVAL (XEXP (XEXP (x, 0), 1));
911 rtx reg1, reg2;
913 reg1 = XEXP (x, 1);
914 if (GET_CODE (reg1) != REG)
915 reg1 = force_reg (Pmode, force_operand (reg1, 0));
917 reg2 = XEXP (XEXP (x, 0), 0);
918 if (GET_CODE (reg2) != REG)
919 reg2 = force_reg (Pmode, force_operand (reg2, 0));
921 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
922 gen_rtx_MULT (Pmode,
923 reg2,
924 GEN_INT (val)),
925 reg1));
928 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
930 Only do so for floating point modes since this is more speculative
931 and we lose if it's an integer store. */
932 if (GET_CODE (x) == PLUS
933 && GET_CODE (XEXP (x, 0)) == PLUS
934 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
935 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
936 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
937 && (mode == SFmode || mode == DFmode))
940 /* First, try and figure out what to use as a base register. */
941 rtx reg1, reg2, base, idx, orig_base;
943 reg1 = XEXP (XEXP (x, 0), 1);
944 reg2 = XEXP (x, 1);
945 base = NULL_RTX;
946 idx = NULL_RTX;
948 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
949 then emit_move_sequence will turn on REG_POINTER so we'll know
950 it's a base register below. */
951 if (GET_CODE (reg1) != REG)
952 reg1 = force_reg (Pmode, force_operand (reg1, 0));
954 if (GET_CODE (reg2) != REG)
955 reg2 = force_reg (Pmode, force_operand (reg2, 0));
957 /* Figure out what the base and index are. */
959 if (GET_CODE (reg1) == REG
960 && REG_POINTER (reg1))
962 base = reg1;
963 orig_base = XEXP (XEXP (x, 0), 1);
964 idx = gen_rtx_PLUS (Pmode,
965 gen_rtx_MULT (Pmode,
966 XEXP (XEXP (XEXP (x, 0), 0), 0),
967 XEXP (XEXP (XEXP (x, 0), 0), 1)),
968 XEXP (x, 1));
970 else if (GET_CODE (reg2) == REG
971 && REG_POINTER (reg2))
973 base = reg2;
974 orig_base = XEXP (x, 1);
975 idx = XEXP (x, 0);
978 if (base == 0)
979 return orig;
981 /* If the index adds a large constant, try to scale the
982 constant so that it can be loaded with only one insn. */
983 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
984 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
985 / INTVAL (XEXP (XEXP (idx, 0), 1)))
986 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
988 /* Divide the CONST_INT by the scale factor, then add it to A. */
989 int val = INTVAL (XEXP (idx, 1));
991 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
992 reg1 = XEXP (XEXP (idx, 0), 0);
993 if (GET_CODE (reg1) != REG)
994 reg1 = force_reg (Pmode, force_operand (reg1, 0));
996 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
998 /* We can now generate a simple scaled indexed address. */
999 return
1000 force_reg
1001 (Pmode, gen_rtx_PLUS (Pmode,
1002 gen_rtx_MULT (Pmode, reg1,
1003 XEXP (XEXP (idx, 0), 1)),
1004 base));
1007 /* If B + C is still a valid base register, then add them. */
1008 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1009 && INTVAL (XEXP (idx, 1)) <= 4096
1010 && INTVAL (XEXP (idx, 1)) >= -4096)
1012 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1013 rtx reg1, reg2;
1015 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1017 reg2 = XEXP (XEXP (idx, 0), 0);
1018 if (GET_CODE (reg2) != CONST_INT)
1019 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1021 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1022 gen_rtx_MULT (Pmode,
1023 reg2,
1024 GEN_INT (val)),
1025 reg1));
1028 /* Get the index into a register, then add the base + index and
1029 return a register holding the result. */
1031 /* First get A into a register. */
1032 reg1 = XEXP (XEXP (idx, 0), 0);
1033 if (GET_CODE (reg1) != REG)
1034 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1036 /* And get B into a register. */
1037 reg2 = XEXP (idx, 1);
1038 if (GET_CODE (reg2) != REG)
1039 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1041 reg1 = force_reg (Pmode,
1042 gen_rtx_PLUS (Pmode,
1043 gen_rtx_MULT (Pmode, reg1,
1044 XEXP (XEXP (idx, 0), 1)),
1045 reg2));
1047 /* Add the result to our base register and return. */
1048 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1052 /* Uh-oh. We might have an address for x[n-100000]. This needs
1053 special handling to avoid creating an indexed memory address
1054 with x-100000 as the base.
1056 If the constant part is small enough, then it's still safe because
1057 there is a guard page at the beginning and end of the data segment.
1059 Scaled references are common enough that we want to try and rearrange the
1060 terms so that we can use indexing for these addresses too. Only
1061 do the optimization for floatint point modes. */
1063 if (GET_CODE (x) == PLUS
1064 && symbolic_expression_p (XEXP (x, 1)))
1066 /* Ugly. We modify things here so that the address offset specified
1067 by the index expression is computed first, then added to x to form
1068 the entire address. */
1070 rtx regx1, regx2, regy1, regy2, y;
1072 /* Strip off any CONST. */
1073 y = XEXP (x, 1);
1074 if (GET_CODE (y) == CONST)
1075 y = XEXP (y, 0);
1077 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1079 /* See if this looks like
1080 (plus (mult (reg) (shadd_const))
1081 (const (plus (symbol_ref) (const_int))))
1083 Where const_int is small. In that case the const
1084 expression is a valid pointer for indexing.
1086 If const_int is big, but can be divided evenly by shadd_const
1087 and added to (reg). This allows more scaled indexed addresses. */
1088 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1089 && GET_CODE (XEXP (x, 0)) == MULT
1090 && GET_CODE (XEXP (y, 1)) == CONST_INT
1091 && INTVAL (XEXP (y, 1)) >= -4096
1092 && INTVAL (XEXP (y, 1)) <= 4095
1093 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1094 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1096 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1097 rtx reg1, reg2;
1099 reg1 = XEXP (x, 1);
1100 if (GET_CODE (reg1) != REG)
1101 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1103 reg2 = XEXP (XEXP (x, 0), 0);
1104 if (GET_CODE (reg2) != REG)
1105 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1107 return force_reg (Pmode,
1108 gen_rtx_PLUS (Pmode,
1109 gen_rtx_MULT (Pmode,
1110 reg2,
1111 GEN_INT (val)),
1112 reg1));
1114 else if ((mode == DFmode || mode == SFmode)
1115 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1116 && GET_CODE (XEXP (x, 0)) == MULT
1117 && GET_CODE (XEXP (y, 1)) == CONST_INT
1118 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1119 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1120 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1122 regx1
1123 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1124 / INTVAL (XEXP (XEXP (x, 0), 1))));
1125 regx2 = XEXP (XEXP (x, 0), 0);
1126 if (GET_CODE (regx2) != REG)
1127 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1128 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1129 regx2, regx1));
1130 return
1131 force_reg (Pmode,
1132 gen_rtx_PLUS (Pmode,
1133 gen_rtx_MULT (Pmode, regx2,
1134 XEXP (XEXP (x, 0), 1)),
1135 force_reg (Pmode, XEXP (y, 0))));
1137 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1138 && INTVAL (XEXP (y, 1)) >= -4096
1139 && INTVAL (XEXP (y, 1)) <= 4095)
1141 /* This is safe because of the guard page at the
1142 beginning and end of the data space. Just
1143 return the original address. */
1144 return orig;
1146 else
1148 /* Doesn't look like one we can optimize. */
1149 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1150 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1151 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1152 regx1 = force_reg (Pmode,
1153 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1154 regx1, regy2));
1155 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1160 return orig;
1163 /* For the HPPA, REG and REG+CONST is cost 0
1164 and addresses involving symbolic constants are cost 2.
1166 PIC addresses are very expensive.
1168 It is no coincidence that this has the same structure
1169 as GO_IF_LEGITIMATE_ADDRESS. */
1171 static int
1172 hppa_address_cost (rtx X)
1174 switch (GET_CODE (X))
1176 case REG:
1177 case PLUS:
1178 case LO_SUM:
1179 return 1;
1180 case HIGH:
1181 return 2;
1182 default:
1183 return 4;
1187 /* Compute a (partial) cost for rtx X. Return true if the complete
1188 cost has been computed, and false if subexpressions should be
1189 scanned. In either case, *TOTAL contains the cost result. */
1191 static bool
1192 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1194 switch (code)
1196 case CONST_INT:
1197 if (INTVAL (x) == 0)
1198 *total = 0;
1199 else if (INT_14_BITS (x))
1200 *total = 1;
1201 else
1202 *total = 2;
1203 return true;
1205 case HIGH:
1206 *total = 2;
1207 return true;
1209 case CONST:
1210 case LABEL_REF:
1211 case SYMBOL_REF:
1212 *total = 4;
1213 return true;
1215 case CONST_DOUBLE:
1216 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1217 && outer_code != SET)
1218 *total = 0;
1219 else
1220 *total = 8;
1221 return true;
1223 case MULT:
1224 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1225 *total = COSTS_N_INSNS (3);
1226 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1227 *total = COSTS_N_INSNS (8);
1228 else
1229 *total = COSTS_N_INSNS (20);
1230 return true;
1232 case DIV:
1233 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1235 *total = COSTS_N_INSNS (14);
1236 return true;
1238 /* FALLTHRU */
1240 case UDIV:
1241 case MOD:
1242 case UMOD:
1243 *total = COSTS_N_INSNS (60);
1244 return true;
1246 case PLUS: /* this includes shNadd insns */
1247 case MINUS:
1248 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1249 *total = COSTS_N_INSNS (3);
1250 else
1251 *total = COSTS_N_INSNS (1);
1252 return true;
1254 case ASHIFT:
1255 case ASHIFTRT:
1256 case LSHIFTRT:
1257 *total = COSTS_N_INSNS (1);
1258 return true;
1260 default:
1261 return false;
1265 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1266 new rtx with the correct mode. */
1267 static inline rtx
1268 force_mode (enum machine_mode mode, rtx orig)
1270 if (mode == GET_MODE (orig))
1271 return orig;
1273 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1275 return gen_rtx_REG (mode, REGNO (orig));
1278 /* Return 1 if *X is a thread-local symbol. */
1280 static int
1281 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1283 return PA_SYMBOL_REF_TLS_P (*x);
1286 /* Return 1 if X contains a thread-local symbol. */
1288 bool
1289 pa_tls_referenced_p (rtx x)
1291 if (!TARGET_HAVE_TLS)
1292 return false;
1294 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1297 /* Emit insns to move operands[1] into operands[0].
1299 Return 1 if we have written out everything that needs to be done to
1300 do the move. Otherwise, return 0 and the caller will emit the move
1301 normally.
1303 Note SCRATCH_REG may not be in the proper mode depending on how it
1304 will be used. This routine is responsible for creating a new copy
1305 of SCRATCH_REG in the proper mode. */
1308 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1310 register rtx operand0 = operands[0];
1311 register rtx operand1 = operands[1];
1312 register rtx tem;
1314 /* We can only handle indexed addresses in the destination operand
1315 of floating point stores. Thus, we need to break out indexed
1316 addresses from the destination operand. */
1317 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1319 /* This is only safe up to the beginning of life analysis. */
1320 gcc_assert (!no_new_pseudos);
1322 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1323 operand0 = replace_equiv_address (operand0, tem);
1326 /* On targets with non-equivalent space registers, break out unscaled
1327 indexed addresses from the source operand before the final CSE.
1328 We have to do this because the REG_POINTER flag is not correctly
1329 carried through various optimization passes and CSE may substitute
1330 a pseudo without the pointer set for one with the pointer set. As
1331 a result, we loose various opportunities to create insns with
1332 unscaled indexed addresses. */
1333 if (!TARGET_NO_SPACE_REGS
1334 && !cse_not_expected
1335 && GET_CODE (operand1) == MEM
1336 && GET_CODE (XEXP (operand1, 0)) == PLUS
1337 && REG_P (XEXP (XEXP (operand1, 0), 0))
1338 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1339 operand1
1340 = replace_equiv_address (operand1,
1341 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1343 if (scratch_reg
1344 && reload_in_progress && GET_CODE (operand0) == REG
1345 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1346 operand0 = reg_equiv_mem[REGNO (operand0)];
1347 else if (scratch_reg
1348 && reload_in_progress && GET_CODE (operand0) == SUBREG
1349 && GET_CODE (SUBREG_REG (operand0)) == REG
1350 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1352 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1353 the code which tracks sets/uses for delete_output_reload. */
1354 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1355 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1356 SUBREG_BYTE (operand0));
1357 operand0 = alter_subreg (&temp);
1360 if (scratch_reg
1361 && reload_in_progress && GET_CODE (operand1) == REG
1362 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1363 operand1 = reg_equiv_mem[REGNO (operand1)];
1364 else if (scratch_reg
1365 && reload_in_progress && GET_CODE (operand1) == SUBREG
1366 && GET_CODE (SUBREG_REG (operand1)) == REG
1367 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1369 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1370 the code which tracks sets/uses for delete_output_reload. */
1371 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1372 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1373 SUBREG_BYTE (operand1));
1374 operand1 = alter_subreg (&temp);
1377 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1378 && ((tem = find_replacement (&XEXP (operand0, 0)))
1379 != XEXP (operand0, 0)))
1380 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1382 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1383 && ((tem = find_replacement (&XEXP (operand1, 0)))
1384 != XEXP (operand1, 0)))
1385 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1387 /* Handle secondary reloads for loads/stores of FP registers from
1388 REG+D addresses where D does not fit in 5 or 14 bits, including
1389 (subreg (mem (addr))) cases. */
1390 if (scratch_reg
1391 && fp_reg_operand (operand0, mode)
1392 && ((GET_CODE (operand1) == MEM
1393 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1394 XEXP (operand1, 0)))
1395 || ((GET_CODE (operand1) == SUBREG
1396 && GET_CODE (XEXP (operand1, 0)) == MEM
1397 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1398 ? SFmode : DFmode),
1399 XEXP (XEXP (operand1, 0), 0))))))
1401 if (GET_CODE (operand1) == SUBREG)
1402 operand1 = XEXP (operand1, 0);
1404 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1405 it in WORD_MODE regardless of what mode it was originally given
1406 to us. */
1407 scratch_reg = force_mode (word_mode, scratch_reg);
1409 /* D might not fit in 14 bits either; for such cases load D into
1410 scratch reg. */
1411 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1413 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1414 emit_move_insn (scratch_reg,
1415 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1416 Pmode,
1417 XEXP (XEXP (operand1, 0), 0),
1418 scratch_reg));
1420 else
1421 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1422 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1423 gen_rtx_MEM (mode, scratch_reg)));
1424 return 1;
1426 else if (scratch_reg
1427 && fp_reg_operand (operand1, mode)
1428 && ((GET_CODE (operand0) == MEM
1429 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1430 ? SFmode : DFmode),
1431 XEXP (operand0, 0)))
1432 || ((GET_CODE (operand0) == SUBREG)
1433 && GET_CODE (XEXP (operand0, 0)) == MEM
1434 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1435 ? SFmode : DFmode),
1436 XEXP (XEXP (operand0, 0), 0)))))
1438 if (GET_CODE (operand0) == SUBREG)
1439 operand0 = XEXP (operand0, 0);
1441 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1442 it in WORD_MODE regardless of what mode it was originally given
1443 to us. */
1444 scratch_reg = force_mode (word_mode, scratch_reg);
1446 /* D might not fit in 14 bits either; for such cases load D into
1447 scratch reg. */
1448 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1450 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1451 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1452 0)),
1453 Pmode,
1454 XEXP (XEXP (operand0, 0),
1456 scratch_reg));
1458 else
1459 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1460 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1461 operand1));
1462 return 1;
1464 /* Handle secondary reloads for loads of FP registers from constant
1465 expressions by forcing the constant into memory.
1467 Use scratch_reg to hold the address of the memory location.
1469 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1470 NO_REGS when presented with a const_int and a register class
1471 containing only FP registers. Doing so unfortunately creates
1472 more problems than it solves. Fix this for 2.5. */
1473 else if (scratch_reg
1474 && CONSTANT_P (operand1)
1475 && fp_reg_operand (operand0, mode))
1477 rtx xoperands[2];
1479 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1480 it in WORD_MODE regardless of what mode it was originally given
1481 to us. */
1482 scratch_reg = force_mode (word_mode, scratch_reg);
1484 /* Force the constant into memory and put the address of the
1485 memory location into scratch_reg. */
1486 xoperands[0] = scratch_reg;
1487 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1488 emit_move_sequence (xoperands, Pmode, 0);
1490 /* Now load the destination register. */
1491 emit_insn (gen_rtx_SET (mode, operand0,
1492 gen_rtx_MEM (mode, scratch_reg)));
1493 return 1;
1495 /* Handle secondary reloads for SAR. These occur when trying to load
1496 the SAR from memory, FP register, or with a constant. */
1497 else if (scratch_reg
1498 && GET_CODE (operand0) == REG
1499 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1500 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1501 && (GET_CODE (operand1) == MEM
1502 || GET_CODE (operand1) == CONST_INT
1503 || (GET_CODE (operand1) == REG
1504 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1506 /* D might not fit in 14 bits either; for such cases load D into
1507 scratch reg. */
1508 if (GET_CODE (operand1) == MEM
1509 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1511 /* We are reloading the address into the scratch register, so we
1512 want to make sure the scratch register is a full register. */
1513 scratch_reg = force_mode (word_mode, scratch_reg);
1515 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1516 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1517 0)),
1518 Pmode,
1519 XEXP (XEXP (operand1, 0),
1521 scratch_reg));
1523 /* Now we are going to load the scratch register from memory,
1524 we want to load it in the same width as the original MEM,
1525 which must be the same as the width of the ultimate destination,
1526 OPERAND0. */
1527 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1529 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1530 scratch_reg));
1532 else
1534 /* We want to load the scratch register using the same mode as
1535 the ultimate destination. */
1536 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1538 emit_move_insn (scratch_reg, operand1);
1541 /* And emit the insn to set the ultimate destination. We know that
1542 the scratch register has the same mode as the destination at this
1543 point. */
1544 emit_move_insn (operand0, scratch_reg);
1545 return 1;
1547 /* Handle the most common case: storing into a register. */
1548 else if (register_operand (operand0, mode))
1550 if (register_operand (operand1, mode)
1551 || (GET_CODE (operand1) == CONST_INT
1552 && cint_ok_for_move (INTVAL (operand1)))
1553 || (operand1 == CONST0_RTX (mode))
1554 || (GET_CODE (operand1) == HIGH
1555 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1556 /* Only `general_operands' can come here, so MEM is ok. */
1557 || GET_CODE (operand1) == MEM)
1559 /* Various sets are created during RTL generation which don't
1560 have the REG_POINTER flag correctly set. After the CSE pass,
1561 instruction recognition can fail if we don't consistently
1562 set this flag when performing register copies. This should
1563 also improve the opportunities for creating insns that use
1564 unscaled indexing. */
1565 if (REG_P (operand0) && REG_P (operand1))
1567 if (REG_POINTER (operand1)
1568 && !REG_POINTER (operand0)
1569 && !HARD_REGISTER_P (operand0))
1570 copy_reg_pointer (operand0, operand1);
1571 else if (REG_POINTER (operand0)
1572 && !REG_POINTER (operand1)
1573 && !HARD_REGISTER_P (operand1))
1574 copy_reg_pointer (operand1, operand0);
1577 /* When MEMs are broken out, the REG_POINTER flag doesn't
1578 get set. In some cases, we can set the REG_POINTER flag
1579 from the declaration for the MEM. */
1580 if (REG_P (operand0)
1581 && GET_CODE (operand1) == MEM
1582 && !REG_POINTER (operand0))
1584 tree decl = MEM_EXPR (operand1);
1586 /* Set the register pointer flag and register alignment
1587 if the declaration for this memory reference is a
1588 pointer type. Fortran indirect argument references
1589 are ignored. */
1590 if (decl
1591 && !(flag_argument_noalias > 1
1592 && TREE_CODE (decl) == INDIRECT_REF
1593 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1595 tree type;
1597 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1598 tree operand 1. */
1599 if (TREE_CODE (decl) == COMPONENT_REF)
1600 decl = TREE_OPERAND (decl, 1);
1602 type = TREE_TYPE (decl);
1603 if (TREE_CODE (type) == ARRAY_TYPE)
1604 type = get_inner_array_type (type);
1606 if (POINTER_TYPE_P (type))
1608 int align;
1610 type = TREE_TYPE (type);
1611 /* Using TYPE_ALIGN_OK is rather conservative as
1612 only the ada frontend actually sets it. */
1613 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1614 : BITS_PER_UNIT);
1615 mark_reg_pointer (operand0, align);
1620 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1621 return 1;
1624 else if (GET_CODE (operand0) == MEM)
1626 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1627 && !(reload_in_progress || reload_completed))
1629 rtx temp = gen_reg_rtx (DFmode);
1631 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1632 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1633 return 1;
1635 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1637 /* Run this case quickly. */
1638 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1639 return 1;
1641 if (! (reload_in_progress || reload_completed))
1643 operands[0] = validize_mem (operand0);
1644 operands[1] = operand1 = force_reg (mode, operand1);
1648 /* Simplify the source if we need to.
1649 Note we do have to handle function labels here, even though we do
1650 not consider them legitimate constants. Loop optimizations can
1651 call the emit_move_xxx with one as a source. */
1652 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1653 || function_label_operand (operand1, mode)
1654 || (GET_CODE (operand1) == HIGH
1655 && symbolic_operand (XEXP (operand1, 0), mode)))
1657 int ishighonly = 0;
1659 if (GET_CODE (operand1) == HIGH)
1661 ishighonly = 1;
1662 operand1 = XEXP (operand1, 0);
1664 if (symbolic_operand (operand1, mode))
1666 /* Argh. The assembler and linker can't handle arithmetic
1667 involving plabels.
1669 So we force the plabel into memory, load operand0 from
1670 the memory location, then add in the constant part. */
1671 if ((GET_CODE (operand1) == CONST
1672 && GET_CODE (XEXP (operand1, 0)) == PLUS
1673 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1674 || function_label_operand (operand1, mode))
1676 rtx temp, const_part;
1678 /* Figure out what (if any) scratch register to use. */
1679 if (reload_in_progress || reload_completed)
1681 scratch_reg = scratch_reg ? scratch_reg : operand0;
1682 /* SCRATCH_REG will hold an address and maybe the actual
1683 data. We want it in WORD_MODE regardless of what mode it
1684 was originally given to us. */
1685 scratch_reg = force_mode (word_mode, scratch_reg);
1687 else if (flag_pic)
1688 scratch_reg = gen_reg_rtx (Pmode);
1690 if (GET_CODE (operand1) == CONST)
1692 /* Save away the constant part of the expression. */
1693 const_part = XEXP (XEXP (operand1, 0), 1);
1694 gcc_assert (GET_CODE (const_part) == CONST_INT);
1696 /* Force the function label into memory. */
1697 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1699 else
1701 /* No constant part. */
1702 const_part = NULL_RTX;
1704 /* Force the function label into memory. */
1705 temp = force_const_mem (mode, operand1);
1709 /* Get the address of the memory location. PIC-ify it if
1710 necessary. */
1711 temp = XEXP (temp, 0);
1712 if (flag_pic)
1713 temp = legitimize_pic_address (temp, mode, scratch_reg);
1715 /* Put the address of the memory location into our destination
1716 register. */
1717 operands[1] = temp;
1718 emit_move_sequence (operands, mode, scratch_reg);
1720 /* Now load from the memory location into our destination
1721 register. */
1722 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1723 emit_move_sequence (operands, mode, scratch_reg);
1725 /* And add back in the constant part. */
1726 if (const_part != NULL_RTX)
1727 expand_inc (operand0, const_part);
1729 return 1;
1732 if (flag_pic)
1734 rtx temp;
1736 if (reload_in_progress || reload_completed)
1738 temp = scratch_reg ? scratch_reg : operand0;
1739 /* TEMP will hold an address and maybe the actual
1740 data. We want it in WORD_MODE regardless of what mode it
1741 was originally given to us. */
1742 temp = force_mode (word_mode, temp);
1744 else
1745 temp = gen_reg_rtx (Pmode);
1747 /* (const (plus (symbol) (const_int))) must be forced to
1748 memory during/after reload if the const_int will not fit
1749 in 14 bits. */
1750 if (GET_CODE (operand1) == CONST
1751 && GET_CODE (XEXP (operand1, 0)) == PLUS
1752 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1753 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1754 && (reload_completed || reload_in_progress)
1755 && flag_pic)
1757 operands[1] = force_const_mem (mode, operand1);
1758 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
1759 mode, temp);
1760 operands[1] = gen_rtx_MEM (mode, operands[1]);
1761 emit_move_sequence (operands, mode, temp);
1763 else
1765 operands[1] = legitimize_pic_address (operand1, mode, temp);
1766 if (REG_P (operand0) && REG_P (operands[1]))
1767 copy_reg_pointer (operand0, operands[1]);
1768 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1771 /* On the HPPA, references to data space are supposed to use dp,
1772 register 27, but showing it in the RTL inhibits various cse
1773 and loop optimizations. */
1774 else
1776 rtx temp, set;
1778 if (reload_in_progress || reload_completed)
1780 temp = scratch_reg ? scratch_reg : operand0;
1781 /* TEMP will hold an address and maybe the actual
1782 data. We want it in WORD_MODE regardless of what mode it
1783 was originally given to us. */
1784 temp = force_mode (word_mode, temp);
1786 else
1787 temp = gen_reg_rtx (mode);
1789 /* Loading a SYMBOL_REF into a register makes that register
1790 safe to be used as the base in an indexed address.
1792 Don't mark hard registers though. That loses. */
1793 if (GET_CODE (operand0) == REG
1794 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1795 mark_reg_pointer (operand0, BITS_PER_UNIT);
1796 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1797 mark_reg_pointer (temp, BITS_PER_UNIT);
1799 if (ishighonly)
1800 set = gen_rtx_SET (mode, operand0, temp);
1801 else
1802 set = gen_rtx_SET (VOIDmode,
1803 operand0,
1804 gen_rtx_LO_SUM (mode, temp, operand1));
1806 emit_insn (gen_rtx_SET (VOIDmode,
1807 temp,
1808 gen_rtx_HIGH (mode, operand1)));
1809 emit_insn (set);
1812 return 1;
1814 else if (pa_tls_referenced_p (operand1))
1816 rtx tmp = operand1;
1817 rtx addend = NULL;
1819 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1821 addend = XEXP (XEXP (tmp, 0), 1);
1822 tmp = XEXP (XEXP (tmp, 0), 0);
1825 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1826 tmp = legitimize_tls_address (tmp);
1827 if (addend)
1829 tmp = gen_rtx_PLUS (mode, tmp, addend);
1830 tmp = force_operand (tmp, operands[0]);
1832 operands[1] = tmp;
1834 else if (GET_CODE (operand1) != CONST_INT
1835 || !cint_ok_for_move (INTVAL (operand1)))
1837 rtx insn, temp;
1838 rtx op1 = operand1;
1839 HOST_WIDE_INT value = 0;
1840 HOST_WIDE_INT insv = 0;
1841 int insert = 0;
1843 if (GET_CODE (operand1) == CONST_INT)
1844 value = INTVAL (operand1);
1846 if (TARGET_64BIT
1847 && GET_CODE (operand1) == CONST_INT
1848 && HOST_BITS_PER_WIDE_INT > 32
1849 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1851 HOST_WIDE_INT nval;
1853 /* Extract the low order 32 bits of the value and sign extend.
1854 If the new value is the same as the original value, we can
1855 can use the original value as-is. If the new value is
1856 different, we use it and insert the most-significant 32-bits
1857 of the original value into the final result. */
1858 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1859 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1860 if (value != nval)
1862 #if HOST_BITS_PER_WIDE_INT > 32
1863 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1864 #endif
1865 insert = 1;
1866 value = nval;
1867 operand1 = GEN_INT (nval);
1871 if (reload_in_progress || reload_completed)
1872 temp = scratch_reg ? scratch_reg : operand0;
1873 else
1874 temp = gen_reg_rtx (mode);
1876 /* We don't directly split DImode constants on 32-bit targets
1877 because PLUS uses an 11-bit immediate and the insn sequence
1878 generated is not as efficient as the one using HIGH/LO_SUM. */
1879 if (GET_CODE (operand1) == CONST_INT
1880 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1881 && !insert)
1883 /* Directly break constant into high and low parts. This
1884 provides better optimization opportunities because various
1885 passes recognize constants split with PLUS but not LO_SUM.
1886 We use a 14-bit signed low part except when the addition
1887 of 0x4000 to the high part might change the sign of the
1888 high part. */
1889 HOST_WIDE_INT low = value & 0x3fff;
1890 HOST_WIDE_INT high = value & ~ 0x3fff;
1892 if (low >= 0x2000)
1894 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1895 high += 0x2000;
1896 else
1897 high += 0x4000;
1900 low = value - high;
1902 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1903 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1905 else
1907 emit_insn (gen_rtx_SET (VOIDmode, temp,
1908 gen_rtx_HIGH (mode, operand1)));
1909 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1912 insn = emit_move_insn (operands[0], operands[1]);
1914 /* Now insert the most significant 32 bits of the value
1915 into the register. When we don't have a second register
1916 available, it could take up to nine instructions to load
1917 a 64-bit integer constant. Prior to reload, we force
1918 constants that would take more than three instructions
1919 to load to the constant pool. During and after reload,
1920 we have to handle all possible values. */
1921 if (insert)
1923 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1924 register and the value to be inserted is outside the
1925 range that can be loaded with three depdi instructions. */
1926 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1928 operand1 = GEN_INT (insv);
1930 emit_insn (gen_rtx_SET (VOIDmode, temp,
1931 gen_rtx_HIGH (mode, operand1)));
1932 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1933 emit_insn (gen_insv (operand0, GEN_INT (32),
1934 const0_rtx, temp));
1936 else
1938 int len = 5, pos = 27;
1940 /* Insert the bits using the depdi instruction. */
1941 while (pos >= 0)
1943 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1944 HOST_WIDE_INT sign = v5 < 0;
1946 /* Left extend the insertion. */
1947 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1948 while (pos > 0 && (insv & 1) == sign)
1950 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1951 len += 1;
1952 pos -= 1;
1955 emit_insn (gen_insv (operand0, GEN_INT (len),
1956 GEN_INT (pos), GEN_INT (v5)));
1958 len = pos > 0 && pos < 5 ? pos : 5;
1959 pos -= len;
1964 REG_NOTES (insn)
1965 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
1967 return 1;
1970 /* Now have insn-emit do whatever it normally does. */
1971 return 0;
1974 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1975 it will need a link/runtime reloc). */
1978 reloc_needed (tree exp)
1980 int reloc = 0;
1982 switch (TREE_CODE (exp))
1984 case ADDR_EXPR:
1985 return 1;
1987 case PLUS_EXPR:
1988 case MINUS_EXPR:
1989 reloc = reloc_needed (TREE_OPERAND (exp, 0));
1990 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
1991 break;
1993 case NOP_EXPR:
1994 case CONVERT_EXPR:
1995 case NON_LVALUE_EXPR:
1996 reloc = reloc_needed (TREE_OPERAND (exp, 0));
1997 break;
1999 case CONSTRUCTOR:
2001 tree value;
2002 unsigned HOST_WIDE_INT ix;
2004 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2005 if (value)
2006 reloc |= reloc_needed (value);
2008 break;
2010 case ERROR_MARK:
2011 break;
2013 default:
2014 break;
2016 return reloc;
2019 /* Does operand (which is a symbolic_operand) live in text space?
2020 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2021 will be true. */
2024 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2026 if (GET_CODE (operand) == CONST)
2027 operand = XEXP (XEXP (operand, 0), 0);
2028 if (flag_pic)
2030 if (GET_CODE (operand) == SYMBOL_REF)
2031 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2033 else
2035 if (GET_CODE (operand) == SYMBOL_REF)
2036 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2038 return 1;
2042 /* Return the best assembler insn template
2043 for moving operands[1] into operands[0] as a fullword. */
2044 const char *
2045 singlemove_string (rtx *operands)
2047 HOST_WIDE_INT intval;
2049 if (GET_CODE (operands[0]) == MEM)
2050 return "stw %r1,%0";
2051 if (GET_CODE (operands[1]) == MEM)
2052 return "ldw %1,%0";
2053 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2055 long i;
2056 REAL_VALUE_TYPE d;
2058 gcc_assert (GET_MODE (operands[1]) == SFmode);
2060 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2061 bit pattern. */
2062 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2063 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2065 operands[1] = GEN_INT (i);
2066 /* Fall through to CONST_INT case. */
2068 if (GET_CODE (operands[1]) == CONST_INT)
2070 intval = INTVAL (operands[1]);
2072 if (VAL_14_BITS_P (intval))
2073 return "ldi %1,%0";
2074 else if ((intval & 0x7ff) == 0)
2075 return "ldil L'%1,%0";
2076 else if (zdepi_cint_p (intval))
2077 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2078 else
2079 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2081 return "copy %1,%0";
2085 /* Compute position (in OP[1]) and width (in OP[2])
2086 useful for copying IMM to a register using the zdepi
2087 instructions. Store the immediate value to insert in OP[0]. */
2088 static void
2089 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2091 int lsb, len;
2093 /* Find the least significant set bit in IMM. */
2094 for (lsb = 0; lsb < 32; lsb++)
2096 if ((imm & 1) != 0)
2097 break;
2098 imm >>= 1;
2101 /* Choose variants based on *sign* of the 5-bit field. */
2102 if ((imm & 0x10) == 0)
2103 len = (lsb <= 28) ? 4 : 32 - lsb;
2104 else
2106 /* Find the width of the bitstring in IMM. */
2107 for (len = 5; len < 32; len++)
2109 if ((imm & (1 << len)) == 0)
2110 break;
2113 /* Sign extend IMM as a 5-bit value. */
2114 imm = (imm & 0xf) - 0x10;
2117 op[0] = imm;
2118 op[1] = 31 - lsb;
2119 op[2] = len;
2122 /* Compute position (in OP[1]) and width (in OP[2])
2123 useful for copying IMM to a register using the depdi,z
2124 instructions. Store the immediate value to insert in OP[0]. */
2125 void
2126 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2128 HOST_WIDE_INT lsb, len;
2130 /* Find the least significant set bit in IMM. */
2131 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2133 if ((imm & 1) != 0)
2134 break;
2135 imm >>= 1;
2138 /* Choose variants based on *sign* of the 5-bit field. */
2139 if ((imm & 0x10) == 0)
2140 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2141 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2142 else
2144 /* Find the width of the bitstring in IMM. */
2145 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2147 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2148 break;
2151 /* Sign extend IMM as a 5-bit value. */
2152 imm = (imm & 0xf) - 0x10;
2155 op[0] = imm;
2156 op[1] = 63 - lsb;
2157 op[2] = len;
2160 /* Output assembler code to perform a doubleword move insn
2161 with operands OPERANDS. */
2163 const char *
2164 output_move_double (rtx *operands)
2166 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2167 rtx latehalf[2];
2168 rtx addreg0 = 0, addreg1 = 0;
2170 /* First classify both operands. */
2172 if (REG_P (operands[0]))
2173 optype0 = REGOP;
2174 else if (offsettable_memref_p (operands[0]))
2175 optype0 = OFFSOP;
2176 else if (GET_CODE (operands[0]) == MEM)
2177 optype0 = MEMOP;
2178 else
2179 optype0 = RNDOP;
2181 if (REG_P (operands[1]))
2182 optype1 = REGOP;
2183 else if (CONSTANT_P (operands[1]))
2184 optype1 = CNSTOP;
2185 else if (offsettable_memref_p (operands[1]))
2186 optype1 = OFFSOP;
2187 else if (GET_CODE (operands[1]) == MEM)
2188 optype1 = MEMOP;
2189 else
2190 optype1 = RNDOP;
2192 /* Check for the cases that the operand constraints are not
2193 supposed to allow to happen. */
2194 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2196 /* Handle auto decrementing and incrementing loads and stores
2197 specifically, since the structure of the function doesn't work
2198 for them without major modification. Do it better when we learn
2199 this port about the general inc/dec addressing of PA.
2200 (This was written by tege. Chide him if it doesn't work.) */
2202 if (optype0 == MEMOP)
2204 /* We have to output the address syntax ourselves, since print_operand
2205 doesn't deal with the addresses we want to use. Fix this later. */
2207 rtx addr = XEXP (operands[0], 0);
2208 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2210 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2212 operands[0] = XEXP (addr, 0);
2213 gcc_assert (GET_CODE (operands[1]) == REG
2214 && GET_CODE (operands[0]) == REG);
2216 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2218 /* No overlap between high target register and address
2219 register. (We do this in a non-obvious way to
2220 save a register file writeback) */
2221 if (GET_CODE (addr) == POST_INC)
2222 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2223 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2225 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2227 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2229 operands[0] = XEXP (addr, 0);
2230 gcc_assert (GET_CODE (operands[1]) == REG
2231 && GET_CODE (operands[0]) == REG);
2233 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2234 /* No overlap between high target register and address
2235 register. (We do this in a non-obvious way to save a
2236 register file writeback) */
2237 if (GET_CODE (addr) == PRE_INC)
2238 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2239 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2242 if (optype1 == MEMOP)
2244 /* We have to output the address syntax ourselves, since print_operand
2245 doesn't deal with the addresses we want to use. Fix this later. */
2247 rtx addr = XEXP (operands[1], 0);
2248 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2250 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2252 operands[1] = XEXP (addr, 0);
2253 gcc_assert (GET_CODE (operands[0]) == REG
2254 && GET_CODE (operands[1]) == REG);
2256 if (!reg_overlap_mentioned_p (high_reg, addr))
2258 /* No overlap between high target register and address
2259 register. (We do this in a non-obvious way to
2260 save a register file writeback) */
2261 if (GET_CODE (addr) == POST_INC)
2262 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2263 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2265 else
2267 /* This is an undefined situation. We should load into the
2268 address register *and* update that register. Probably
2269 we don't need to handle this at all. */
2270 if (GET_CODE (addr) == POST_INC)
2271 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2272 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2275 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2277 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2279 operands[1] = XEXP (addr, 0);
2280 gcc_assert (GET_CODE (operands[0]) == REG
2281 && GET_CODE (operands[1]) == REG);
2283 if (!reg_overlap_mentioned_p (high_reg, addr))
2285 /* No overlap between high target register and address
2286 register. (We do this in a non-obvious way to
2287 save a register file writeback) */
2288 if (GET_CODE (addr) == PRE_INC)
2289 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2290 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2292 else
2294 /* This is an undefined situation. We should load into the
2295 address register *and* update that register. Probably
2296 we don't need to handle this at all. */
2297 if (GET_CODE (addr) == PRE_INC)
2298 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2299 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2302 else if (GET_CODE (addr) == PLUS
2303 && GET_CODE (XEXP (addr, 0)) == MULT)
2305 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2307 if (!reg_overlap_mentioned_p (high_reg, addr))
2309 rtx xoperands[3];
2311 xoperands[0] = high_reg;
2312 xoperands[1] = XEXP (addr, 1);
2313 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2314 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2315 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2316 xoperands);
2317 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2319 else
2321 rtx xoperands[3];
2323 xoperands[0] = high_reg;
2324 xoperands[1] = XEXP (addr, 1);
2325 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2326 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2327 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2328 xoperands);
2329 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2334 /* If an operand is an unoffsettable memory ref, find a register
2335 we can increment temporarily to make it refer to the second word. */
2337 if (optype0 == MEMOP)
2338 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2340 if (optype1 == MEMOP)
2341 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2343 /* Ok, we can do one word at a time.
2344 Normally we do the low-numbered word first.
2346 In either case, set up in LATEHALF the operands to use
2347 for the high-numbered word and in some cases alter the
2348 operands in OPERANDS to be suitable for the low-numbered word. */
2350 if (optype0 == REGOP)
2351 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2352 else if (optype0 == OFFSOP)
2353 latehalf[0] = adjust_address (operands[0], SImode, 4);
2354 else
2355 latehalf[0] = operands[0];
2357 if (optype1 == REGOP)
2358 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2359 else if (optype1 == OFFSOP)
2360 latehalf[1] = adjust_address (operands[1], SImode, 4);
2361 else if (optype1 == CNSTOP)
2362 split_double (operands[1], &operands[1], &latehalf[1]);
2363 else
2364 latehalf[1] = operands[1];
2366 /* If the first move would clobber the source of the second one,
2367 do them in the other order.
2369 This can happen in two cases:
2371 mem -> register where the first half of the destination register
2372 is the same register used in the memory's address. Reload
2373 can create such insns.
2375 mem in this case will be either register indirect or register
2376 indirect plus a valid offset.
2378 register -> register move where REGNO(dst) == REGNO(src + 1)
2379 someone (Tim/Tege?) claimed this can happen for parameter loads.
2381 Handle mem -> register case first. */
2382 if (optype0 == REGOP
2383 && (optype1 == MEMOP || optype1 == OFFSOP)
2384 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2385 operands[1], 0))
2387 /* Do the late half first. */
2388 if (addreg1)
2389 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2390 output_asm_insn (singlemove_string (latehalf), latehalf);
2392 /* Then clobber. */
2393 if (addreg1)
2394 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2395 return singlemove_string (operands);
2398 /* Now handle register -> register case. */
2399 if (optype0 == REGOP && optype1 == REGOP
2400 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2402 output_asm_insn (singlemove_string (latehalf), latehalf);
2403 return singlemove_string (operands);
2406 /* Normal case: do the two words, low-numbered first. */
2408 output_asm_insn (singlemove_string (operands), operands);
2410 /* Make any unoffsettable addresses point at high-numbered word. */
2411 if (addreg0)
2412 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2413 if (addreg1)
2414 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2416 /* Do that word. */
2417 output_asm_insn (singlemove_string (latehalf), latehalf);
2419 /* Undo the adds we just did. */
2420 if (addreg0)
2421 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2422 if (addreg1)
2423 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2425 return "";
2428 const char *
2429 output_fp_move_double (rtx *operands)
2431 if (FP_REG_P (operands[0]))
2433 if (FP_REG_P (operands[1])
2434 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2435 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2436 else
2437 output_asm_insn ("fldd%F1 %1,%0", operands);
2439 else if (FP_REG_P (operands[1]))
2441 output_asm_insn ("fstd%F0 %1,%0", operands);
2443 else
2445 rtx xoperands[2];
2447 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2449 /* This is a pain. You have to be prepared to deal with an
2450 arbitrary address here including pre/post increment/decrement.
2452 so avoid this in the MD. */
2453 gcc_assert (GET_CODE (operands[0]) == REG);
2455 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2456 xoperands[0] = operands[0];
2457 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2459 return "";
2462 /* Return a REG that occurs in ADDR with coefficient 1.
2463 ADDR can be effectively incremented by incrementing REG. */
2465 static rtx
2466 find_addr_reg (rtx addr)
2468 while (GET_CODE (addr) == PLUS)
2470 if (GET_CODE (XEXP (addr, 0)) == REG)
2471 addr = XEXP (addr, 0);
2472 else if (GET_CODE (XEXP (addr, 1)) == REG)
2473 addr = XEXP (addr, 1);
2474 else if (CONSTANT_P (XEXP (addr, 0)))
2475 addr = XEXP (addr, 1);
2476 else if (CONSTANT_P (XEXP (addr, 1)))
2477 addr = XEXP (addr, 0);
2478 else
2479 gcc_unreachable ();
2481 gcc_assert (GET_CODE (addr) == REG);
2482 return addr;
2485 /* Emit code to perform a block move.
2487 OPERANDS[0] is the destination pointer as a REG, clobbered.
2488 OPERANDS[1] is the source pointer as a REG, clobbered.
2489 OPERANDS[2] is a register for temporary storage.
2490 OPERANDS[3] is a register for temporary storage.
2491 OPERANDS[4] is the size as a CONST_INT
2492 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2493 OPERANDS[6] is another temporary register. */
2495 const char *
2496 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2498 int align = INTVAL (operands[5]);
2499 unsigned long n_bytes = INTVAL (operands[4]);
2501 /* We can't move more than a word at a time because the PA
2502 has no longer integer move insns. (Could use fp mem ops?) */
2503 if (align > (TARGET_64BIT ? 8 : 4))
2504 align = (TARGET_64BIT ? 8 : 4);
2506 /* Note that we know each loop below will execute at least twice
2507 (else we would have open-coded the copy). */
2508 switch (align)
2510 case 8:
2511 /* Pre-adjust the loop counter. */
2512 operands[4] = GEN_INT (n_bytes - 16);
2513 output_asm_insn ("ldi %4,%2", operands);
2515 /* Copying loop. */
2516 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2517 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2518 output_asm_insn ("std,ma %3,8(%0)", operands);
2519 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2520 output_asm_insn ("std,ma %6,8(%0)", operands);
2522 /* Handle the residual. There could be up to 7 bytes of
2523 residual to copy! */
2524 if (n_bytes % 16 != 0)
2526 operands[4] = GEN_INT (n_bytes % 8);
2527 if (n_bytes % 16 >= 8)
2528 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2529 if (n_bytes % 8 != 0)
2530 output_asm_insn ("ldd 0(%1),%6", operands);
2531 if (n_bytes % 16 >= 8)
2532 output_asm_insn ("std,ma %3,8(%0)", operands);
2533 if (n_bytes % 8 != 0)
2534 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2536 return "";
2538 case 4:
2539 /* Pre-adjust the loop counter. */
2540 operands[4] = GEN_INT (n_bytes - 8);
2541 output_asm_insn ("ldi %4,%2", operands);
2543 /* Copying loop. */
2544 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2545 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2546 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2547 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2548 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2550 /* Handle the residual. There could be up to 7 bytes of
2551 residual to copy! */
2552 if (n_bytes % 8 != 0)
2554 operands[4] = GEN_INT (n_bytes % 4);
2555 if (n_bytes % 8 >= 4)
2556 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2557 if (n_bytes % 4 != 0)
2558 output_asm_insn ("ldw 0(%1),%6", operands);
2559 if (n_bytes % 8 >= 4)
2560 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2561 if (n_bytes % 4 != 0)
2562 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2564 return "";
2566 case 2:
2567 /* Pre-adjust the loop counter. */
2568 operands[4] = GEN_INT (n_bytes - 4);
2569 output_asm_insn ("ldi %4,%2", operands);
2571 /* Copying loop. */
2572 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2573 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2574 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2575 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2576 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2578 /* Handle the residual. */
2579 if (n_bytes % 4 != 0)
2581 if (n_bytes % 4 >= 2)
2582 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2583 if (n_bytes % 2 != 0)
2584 output_asm_insn ("ldb 0(%1),%6", operands);
2585 if (n_bytes % 4 >= 2)
2586 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2587 if (n_bytes % 2 != 0)
2588 output_asm_insn ("stb %6,0(%0)", operands);
2590 return "";
2592 case 1:
2593 /* Pre-adjust the loop counter. */
2594 operands[4] = GEN_INT (n_bytes - 2);
2595 output_asm_insn ("ldi %4,%2", operands);
2597 /* Copying loop. */
2598 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2599 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2600 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2601 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2602 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2604 /* Handle the residual. */
2605 if (n_bytes % 2 != 0)
2607 output_asm_insn ("ldb 0(%1),%3", operands);
2608 output_asm_insn ("stb %3,0(%0)", operands);
2610 return "";
2612 default:
2613 gcc_unreachable ();
2617 /* Count the number of insns necessary to handle this block move.
2619 Basic structure is the same as emit_block_move, except that we
2620 count insns rather than emit them. */
2622 static int
2623 compute_movmem_length (rtx insn)
2625 rtx pat = PATTERN (insn);
2626 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2627 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2628 unsigned int n_insns = 0;
2630 /* We can't move more than four bytes at a time because the PA
2631 has no longer integer move insns. (Could use fp mem ops?) */
2632 if (align > (TARGET_64BIT ? 8 : 4))
2633 align = (TARGET_64BIT ? 8 : 4);
2635 /* The basic copying loop. */
2636 n_insns = 6;
2638 /* Residuals. */
2639 if (n_bytes % (2 * align) != 0)
2641 if ((n_bytes % (2 * align)) >= align)
2642 n_insns += 2;
2644 if ((n_bytes % align) != 0)
2645 n_insns += 2;
2648 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2649 return n_insns * 4;
2652 /* Emit code to perform a block clear.
2654 OPERANDS[0] is the destination pointer as a REG, clobbered.
2655 OPERANDS[1] is a register for temporary storage.
2656 OPERANDS[2] is the size as a CONST_INT
2657 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2659 const char *
2660 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2662 int align = INTVAL (operands[3]);
2663 unsigned long n_bytes = INTVAL (operands[2]);
2665 /* We can't clear more than a word at a time because the PA
2666 has no longer integer move insns. */
2667 if (align > (TARGET_64BIT ? 8 : 4))
2668 align = (TARGET_64BIT ? 8 : 4);
2670 /* Note that we know each loop below will execute at least twice
2671 (else we would have open-coded the copy). */
2672 switch (align)
2674 case 8:
2675 /* Pre-adjust the loop counter. */
2676 operands[2] = GEN_INT (n_bytes - 16);
2677 output_asm_insn ("ldi %2,%1", operands);
2679 /* Loop. */
2680 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2681 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2682 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2684 /* Handle the residual. There could be up to 7 bytes of
2685 residual to copy! */
2686 if (n_bytes % 16 != 0)
2688 operands[2] = GEN_INT (n_bytes % 8);
2689 if (n_bytes % 16 >= 8)
2690 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2691 if (n_bytes % 8 != 0)
2692 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2694 return "";
2696 case 4:
2697 /* Pre-adjust the loop counter. */
2698 operands[2] = GEN_INT (n_bytes - 8);
2699 output_asm_insn ("ldi %2,%1", operands);
2701 /* Loop. */
2702 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2703 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2704 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2706 /* Handle the residual. There could be up to 7 bytes of
2707 residual to copy! */
2708 if (n_bytes % 8 != 0)
2710 operands[2] = GEN_INT (n_bytes % 4);
2711 if (n_bytes % 8 >= 4)
2712 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2713 if (n_bytes % 4 != 0)
2714 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2716 return "";
2718 case 2:
2719 /* Pre-adjust the loop counter. */
2720 operands[2] = GEN_INT (n_bytes - 4);
2721 output_asm_insn ("ldi %2,%1", operands);
2723 /* Loop. */
2724 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2725 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2726 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2728 /* Handle the residual. */
2729 if (n_bytes % 4 != 0)
2731 if (n_bytes % 4 >= 2)
2732 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2733 if (n_bytes % 2 != 0)
2734 output_asm_insn ("stb %%r0,0(%0)", operands);
2736 return "";
2738 case 1:
2739 /* Pre-adjust the loop counter. */
2740 operands[2] = GEN_INT (n_bytes - 2);
2741 output_asm_insn ("ldi %2,%1", operands);
2743 /* Loop. */
2744 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2745 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2746 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2748 /* Handle the residual. */
2749 if (n_bytes % 2 != 0)
2750 output_asm_insn ("stb %%r0,0(%0)", operands);
2752 return "";
2754 default:
2755 gcc_unreachable ();
2759 /* Count the number of insns necessary to handle this block move.
2761 Basic structure is the same as emit_block_move, except that we
2762 count insns rather than emit them. */
2764 static int
2765 compute_clrmem_length (rtx insn)
2767 rtx pat = PATTERN (insn);
2768 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2769 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2770 unsigned int n_insns = 0;
2772 /* We can't clear more than a word at a time because the PA
2773 has no longer integer move insns. */
2774 if (align > (TARGET_64BIT ? 8 : 4))
2775 align = (TARGET_64BIT ? 8 : 4);
2777 /* The basic loop. */
2778 n_insns = 4;
2780 /* Residuals. */
2781 if (n_bytes % (2 * align) != 0)
2783 if ((n_bytes % (2 * align)) >= align)
2784 n_insns++;
2786 if ((n_bytes % align) != 0)
2787 n_insns++;
2790 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2791 return n_insns * 4;
2795 const char *
2796 output_and (rtx *operands)
2798 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2800 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2801 int ls0, ls1, ms0, p, len;
2803 for (ls0 = 0; ls0 < 32; ls0++)
2804 if ((mask & (1 << ls0)) == 0)
2805 break;
2807 for (ls1 = ls0; ls1 < 32; ls1++)
2808 if ((mask & (1 << ls1)) != 0)
2809 break;
2811 for (ms0 = ls1; ms0 < 32; ms0++)
2812 if ((mask & (1 << ms0)) == 0)
2813 break;
2815 gcc_assert (ms0 == 32);
2817 if (ls1 == 32)
2819 len = ls0;
2821 gcc_assert (len);
2823 operands[2] = GEN_INT (len);
2824 return "{extru|extrw,u} %1,31,%2,%0";
2826 else
2828 /* We could use this `depi' for the case above as well, but `depi'
2829 requires one more register file access than an `extru'. */
2831 p = 31 - ls0;
2832 len = ls1 - ls0;
2834 operands[2] = GEN_INT (p);
2835 operands[3] = GEN_INT (len);
2836 return "{depi|depwi} 0,%2,%3,%0";
2839 else
2840 return "and %1,%2,%0";
2843 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2844 storing the result in operands[0]. */
2845 const char *
2846 output_64bit_and (rtx *operands)
2848 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2850 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2851 int ls0, ls1, ms0, p, len;
2853 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2854 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2855 break;
2857 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2858 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2859 break;
2861 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2862 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2863 break;
2865 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2867 if (ls1 == HOST_BITS_PER_WIDE_INT)
2869 len = ls0;
2871 gcc_assert (len);
2873 operands[2] = GEN_INT (len);
2874 return "extrd,u %1,63,%2,%0";
2876 else
2878 /* We could use this `depi' for the case above as well, but `depi'
2879 requires one more register file access than an `extru'. */
2881 p = 63 - ls0;
2882 len = ls1 - ls0;
2884 operands[2] = GEN_INT (p);
2885 operands[3] = GEN_INT (len);
2886 return "depdi 0,%2,%3,%0";
2889 else
2890 return "and %1,%2,%0";
2893 const char *
2894 output_ior (rtx *operands)
2896 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2897 int bs0, bs1, p, len;
2899 if (INTVAL (operands[2]) == 0)
2900 return "copy %1,%0";
2902 for (bs0 = 0; bs0 < 32; bs0++)
2903 if ((mask & (1 << bs0)) != 0)
2904 break;
2906 for (bs1 = bs0; bs1 < 32; bs1++)
2907 if ((mask & (1 << bs1)) == 0)
2908 break;
2910 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2912 p = 31 - bs0;
2913 len = bs1 - bs0;
2915 operands[2] = GEN_INT (p);
2916 operands[3] = GEN_INT (len);
2917 return "{depi|depwi} -1,%2,%3,%0";
2920 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2921 storing the result in operands[0]. */
2922 const char *
2923 output_64bit_ior (rtx *operands)
2925 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2926 int bs0, bs1, p, len;
2928 if (INTVAL (operands[2]) == 0)
2929 return "copy %1,%0";
2931 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2932 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2933 break;
2935 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2936 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2937 break;
2939 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2940 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2942 p = 63 - bs0;
2943 len = bs1 - bs0;
2945 operands[2] = GEN_INT (p);
2946 operands[3] = GEN_INT (len);
2947 return "depdi -1,%2,%3,%0";
2950 /* Target hook for assembling integer objects. This code handles
2951 aligned SI and DI integers specially since function references
2952 must be preceded by P%. */
2954 static bool
2955 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2957 if (size == UNITS_PER_WORD
2958 && aligned_p
2959 && function_label_operand (x, VOIDmode))
2961 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2962 output_addr_const (asm_out_file, x);
2963 fputc ('\n', asm_out_file);
2964 return true;
2966 return default_assemble_integer (x, size, aligned_p);
2969 /* Output an ascii string. */
2970 void
2971 output_ascii (FILE *file, const char *p, int size)
2973 int i;
2974 int chars_output;
2975 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
2977 /* The HP assembler can only take strings of 256 characters at one
2978 time. This is a limitation on input line length, *not* the
2979 length of the string. Sigh. Even worse, it seems that the
2980 restriction is in number of input characters (see \xnn &
2981 \whatever). So we have to do this very carefully. */
2983 fputs ("\t.STRING \"", file);
2985 chars_output = 0;
2986 for (i = 0; i < size; i += 4)
2988 int co = 0;
2989 int io = 0;
2990 for (io = 0, co = 0; io < MIN (4, size - i); io++)
2992 register unsigned int c = (unsigned char) p[i + io];
2994 if (c == '\"' || c == '\\')
2995 partial_output[co++] = '\\';
2996 if (c >= ' ' && c < 0177)
2997 partial_output[co++] = c;
2998 else
3000 unsigned int hexd;
3001 partial_output[co++] = '\\';
3002 partial_output[co++] = 'x';
3003 hexd = c / 16 - 0 + '0';
3004 if (hexd > '9')
3005 hexd -= '9' - 'a' + 1;
3006 partial_output[co++] = hexd;
3007 hexd = c % 16 - 0 + '0';
3008 if (hexd > '9')
3009 hexd -= '9' - 'a' + 1;
3010 partial_output[co++] = hexd;
3013 if (chars_output + co > 243)
3015 fputs ("\"\n\t.STRING \"", file);
3016 chars_output = 0;
3018 fwrite (partial_output, 1, (size_t) co, file);
3019 chars_output += co;
3020 co = 0;
3022 fputs ("\"\n", file);
3025 /* Try to rewrite floating point comparisons & branches to avoid
3026 useless add,tr insns.
3028 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3029 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3030 first attempt to remove useless add,tr insns. It is zero
3031 for the second pass as reorg sometimes leaves bogus REG_DEAD
3032 notes lying around.
3034 When CHECK_NOTES is zero we can only eliminate add,tr insns
3035 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3036 instructions. */
3037 static void
3038 remove_useless_addtr_insns (int check_notes)
3040 rtx insn;
3041 static int pass = 0;
3043 /* This is fairly cheap, so always run it when optimizing. */
3044 if (optimize > 0)
3046 int fcmp_count = 0;
3047 int fbranch_count = 0;
3049 /* Walk all the insns in this function looking for fcmp & fbranch
3050 instructions. Keep track of how many of each we find. */
3051 for (insn = get_insns (); insn; insn = next_insn (insn))
3053 rtx tmp;
3055 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3056 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3057 continue;
3059 tmp = PATTERN (insn);
3061 /* It must be a set. */
3062 if (GET_CODE (tmp) != SET)
3063 continue;
3065 /* If the destination is CCFP, then we've found an fcmp insn. */
3066 tmp = SET_DEST (tmp);
3067 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3069 fcmp_count++;
3070 continue;
3073 tmp = PATTERN (insn);
3074 /* If this is an fbranch instruction, bump the fbranch counter. */
3075 if (GET_CODE (tmp) == SET
3076 && SET_DEST (tmp) == pc_rtx
3077 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3078 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3079 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3080 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3082 fbranch_count++;
3083 continue;
3088 /* Find all floating point compare + branch insns. If possible,
3089 reverse the comparison & the branch to avoid add,tr insns. */
3090 for (insn = get_insns (); insn; insn = next_insn (insn))
3092 rtx tmp, next;
3094 /* Ignore anything that isn't an INSN. */
3095 if (GET_CODE (insn) != INSN)
3096 continue;
3098 tmp = PATTERN (insn);
3100 /* It must be a set. */
3101 if (GET_CODE (tmp) != SET)
3102 continue;
3104 /* The destination must be CCFP, which is register zero. */
3105 tmp = SET_DEST (tmp);
3106 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3107 continue;
3109 /* INSN should be a set of CCFP.
3111 See if the result of this insn is used in a reversed FP
3112 conditional branch. If so, reverse our condition and
3113 the branch. Doing so avoids useless add,tr insns. */
3114 next = next_insn (insn);
3115 while (next)
3117 /* Jumps, calls and labels stop our search. */
3118 if (GET_CODE (next) == JUMP_INSN
3119 || GET_CODE (next) == CALL_INSN
3120 || GET_CODE (next) == CODE_LABEL)
3121 break;
3123 /* As does another fcmp insn. */
3124 if (GET_CODE (next) == INSN
3125 && GET_CODE (PATTERN (next)) == SET
3126 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3127 && REGNO (SET_DEST (PATTERN (next))) == 0)
3128 break;
3130 next = next_insn (next);
3133 /* Is NEXT_INSN a branch? */
3134 if (next
3135 && GET_CODE (next) == JUMP_INSN)
3137 rtx pattern = PATTERN (next);
3139 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3140 and CCFP dies, then reverse our conditional and the branch
3141 to avoid the add,tr. */
3142 if (GET_CODE (pattern) == SET
3143 && SET_DEST (pattern) == pc_rtx
3144 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3145 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3146 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3147 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3148 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3149 && (fcmp_count == fbranch_count
3150 || (check_notes
3151 && find_regno_note (next, REG_DEAD, 0))))
3153 /* Reverse the branch. */
3154 tmp = XEXP (SET_SRC (pattern), 1);
3155 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3156 XEXP (SET_SRC (pattern), 2) = tmp;
3157 INSN_CODE (next) = -1;
3159 /* Reverse our condition. */
3160 tmp = PATTERN (insn);
3161 PUT_CODE (XEXP (tmp, 1),
3162 (reverse_condition_maybe_unordered
3163 (GET_CODE (XEXP (tmp, 1)))));
3169 pass = !pass;
3173 /* You may have trouble believing this, but this is the 32 bit HP-PA
3174 stack layout. Wow.
3176 Offset Contents
3178 Variable arguments (optional; any number may be allocated)
3180 SP-(4*(N+9)) arg word N
3182 SP-56 arg word 5
3183 SP-52 arg word 4
3185 Fixed arguments (must be allocated; may remain unused)
3187 SP-48 arg word 3
3188 SP-44 arg word 2
3189 SP-40 arg word 1
3190 SP-36 arg word 0
3192 Frame Marker
3194 SP-32 External Data Pointer (DP)
3195 SP-28 External sr4
3196 SP-24 External/stub RP (RP')
3197 SP-20 Current RP
3198 SP-16 Static Link
3199 SP-12 Clean up
3200 SP-8 Calling Stub RP (RP'')
3201 SP-4 Previous SP
3203 Top of Frame
3205 SP-0 Stack Pointer (points to next available address)
3209 /* This function saves registers as follows. Registers marked with ' are
3210 this function's registers (as opposed to the previous function's).
3211 If a frame_pointer isn't needed, r4 is saved as a general register;
3212 the space for the frame pointer is still allocated, though, to keep
3213 things simple.
3216 Top of Frame
3218 SP (FP') Previous FP
3219 SP + 4 Alignment filler (sigh)
3220 SP + 8 Space for locals reserved here.
3224 SP + n All call saved register used.
3228 SP + o All call saved fp registers used.
3232 SP + p (SP') points to next available address.
3236 /* Global variables set by output_function_prologue(). */
3237 /* Size of frame. Need to know this to emit return insns from
3238 leaf procedures. */
3239 static HOST_WIDE_INT actual_fsize, local_fsize;
3240 static int save_fregs;
3242 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3243 Handle case where DISP > 8k by using the add_high_const patterns.
3245 Note in DISP > 8k case, we will leave the high part of the address
3246 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3248 static void
3249 store_reg (int reg, HOST_WIDE_INT disp, int base)
3251 rtx insn, dest, src, basereg;
3253 src = gen_rtx_REG (word_mode, reg);
3254 basereg = gen_rtx_REG (Pmode, base);
3255 if (VAL_14_BITS_P (disp))
3257 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3258 insn = emit_move_insn (dest, src);
3260 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3262 rtx delta = GEN_INT (disp);
3263 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3265 emit_move_insn (tmpreg, delta);
3266 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3267 dest = gen_rtx_MEM (word_mode, tmpreg);
3268 insn = emit_move_insn (dest, src);
3269 if (DO_FRAME_NOTES)
3271 REG_NOTES (insn)
3272 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3273 gen_rtx_SET (VOIDmode,
3274 gen_rtx_MEM (word_mode,
3275 gen_rtx_PLUS (word_mode, basereg,
3276 delta)),
3277 src),
3278 REG_NOTES (insn));
3281 else
3283 rtx delta = GEN_INT (disp);
3284 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3285 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3287 emit_move_insn (tmpreg, high);
3288 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3289 insn = emit_move_insn (dest, src);
3290 if (DO_FRAME_NOTES)
3292 REG_NOTES (insn)
3293 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3294 gen_rtx_SET (VOIDmode,
3295 gen_rtx_MEM (word_mode,
3296 gen_rtx_PLUS (word_mode, basereg,
3297 delta)),
3298 src),
3299 REG_NOTES (insn));
3303 if (DO_FRAME_NOTES)
3304 RTX_FRAME_RELATED_P (insn) = 1;
3307 /* Emit RTL to store REG at the memory location specified by BASE and then
3308 add MOD to BASE. MOD must be <= 8k. */
3310 static void
3311 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3313 rtx insn, basereg, srcreg, delta;
3315 gcc_assert (VAL_14_BITS_P (mod));
3317 basereg = gen_rtx_REG (Pmode, base);
3318 srcreg = gen_rtx_REG (word_mode, reg);
3319 delta = GEN_INT (mod);
3321 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3322 if (DO_FRAME_NOTES)
3324 RTX_FRAME_RELATED_P (insn) = 1;
3326 /* RTX_FRAME_RELATED_P must be set on each frame related set
3327 in a parallel with more than one element. Don't set
3328 RTX_FRAME_RELATED_P in the first set if reg is temporary
3329 register 1. The effect of this operation is recorded in
3330 the initial copy. */
3331 if (reg != 1)
3333 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3334 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3336 else
3338 /* The first element of a PARALLEL is always processed if it is
3339 a SET. Thus, we need an expression list for this case. */
3340 REG_NOTES (insn)
3341 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3342 gen_rtx_SET (VOIDmode, basereg,
3343 gen_rtx_PLUS (word_mode, basereg, delta)),
3344 REG_NOTES (insn));
3349 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3350 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3351 whether to add a frame note or not.
3353 In the DISP > 8k case, we leave the high part of the address in %r1.
3354 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3356 static void
3357 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3359 rtx insn;
3361 if (VAL_14_BITS_P (disp))
3363 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3364 plus_constant (gen_rtx_REG (Pmode, base), disp));
3366 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3368 rtx basereg = gen_rtx_REG (Pmode, base);
3369 rtx delta = GEN_INT (disp);
3370 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3372 emit_move_insn (tmpreg, delta);
3373 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3374 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3376 else
3378 rtx basereg = gen_rtx_REG (Pmode, base);
3379 rtx delta = GEN_INT (disp);
3380 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3382 emit_move_insn (tmpreg,
3383 gen_rtx_PLUS (Pmode, basereg,
3384 gen_rtx_HIGH (Pmode, delta)));
3385 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3386 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3389 if (DO_FRAME_NOTES && note)
3390 RTX_FRAME_RELATED_P (insn) = 1;
3393 HOST_WIDE_INT
3394 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3396 int freg_saved = 0;
3397 int i, j;
3399 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3400 be consistent with the rounding and size calculation done here.
3401 Change them at the same time. */
3403 /* We do our own stack alignment. First, round the size of the
3404 stack locals up to a word boundary. */
3405 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3407 /* Space for previous frame pointer + filler. If any frame is
3408 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3409 waste some space here for the sake of HP compatibility. The
3410 first slot is only used when the frame pointer is needed. */
3411 if (size || frame_pointer_needed)
3412 size += STARTING_FRAME_OFFSET;
3414 /* If the current function calls __builtin_eh_return, then we need
3415 to allocate stack space for registers that will hold data for
3416 the exception handler. */
3417 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3419 unsigned int i;
3421 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3422 continue;
3423 size += i * UNITS_PER_WORD;
3426 /* Account for space used by the callee general register saves. */
3427 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3428 if (regs_ever_live[i])
3429 size += UNITS_PER_WORD;
3431 /* Account for space used by the callee floating point register saves. */
3432 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3433 if (regs_ever_live[i]
3434 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3436 freg_saved = 1;
3438 /* We always save both halves of the FP register, so always
3439 increment the frame size by 8 bytes. */
3440 size += 8;
3443 /* If any of the floating registers are saved, account for the
3444 alignment needed for the floating point register save block. */
3445 if (freg_saved)
3447 size = (size + 7) & ~7;
3448 if (fregs_live)
3449 *fregs_live = 1;
3452 /* The various ABIs include space for the outgoing parameters in the
3453 size of the current function's stack frame. We don't need to align
3454 for the outgoing arguments as their alignment is set by the final
3455 rounding for the frame as a whole. */
3456 size += current_function_outgoing_args_size;
3458 /* Allocate space for the fixed frame marker. This space must be
3459 allocated for any function that makes calls or allocates
3460 stack space. */
3461 if (!current_function_is_leaf || size)
3462 size += TARGET_64BIT ? 48 : 32;
3464 /* Finally, round to the preferred stack boundary. */
3465 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3466 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3469 /* Generate the assembly code for function entry. FILE is a stdio
3470 stream to output the code to. SIZE is an int: how many units of
3471 temporary storage to allocate.
3473 Refer to the array `regs_ever_live' to determine which registers to
3474 save; `regs_ever_live[I]' is nonzero if register number I is ever
3475 used in the function. This function is responsible for knowing
3476 which registers should not be saved even if used. */
3478 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3479 of memory. If any fpu reg is used in the function, we allocate
3480 such a block here, at the bottom of the frame, just in case it's needed.
3482 If this function is a leaf procedure, then we may choose not
3483 to do a "save" insn. The decision about whether or not
3484 to do this is made in regclass.c. */
3486 static void
3487 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3489 /* The function's label and associated .PROC must never be
3490 separated and must be output *after* any profiling declarations
3491 to avoid changing spaces/subspaces within a procedure. */
3492 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3493 fputs ("\t.PROC\n", file);
3495 /* hppa_expand_prologue does the dirty work now. We just need
3496 to output the assembler directives which denote the start
3497 of a function. */
3498 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3499 if (regs_ever_live[2])
3500 fputs (",CALLS,SAVE_RP", file);
3501 else
3502 fputs (",NO_CALLS", file);
3504 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3505 at the beginning of the frame and that it is used as the frame
3506 pointer for the frame. We do this because our current frame
3507 layout doesn't conform to that specified in the HP runtime
3508 documentation and we need a way to indicate to programs such as
3509 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3510 isn't used by HP compilers but is supported by the assembler.
3511 However, SAVE_SP is supposed to indicate that the previous stack
3512 pointer has been saved in the frame marker. */
3513 if (frame_pointer_needed)
3514 fputs (",SAVE_SP", file);
3516 /* Pass on information about the number of callee register saves
3517 performed in the prologue.
3519 The compiler is supposed to pass the highest register number
3520 saved, the assembler then has to adjust that number before
3521 entering it into the unwind descriptor (to account for any
3522 caller saved registers with lower register numbers than the
3523 first callee saved register). */
3524 if (gr_saved)
3525 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3527 if (fr_saved)
3528 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3530 fputs ("\n\t.ENTRY\n", file);
3532 remove_useless_addtr_insns (0);
3535 void
3536 hppa_expand_prologue (void)
3538 int merge_sp_adjust_with_store = 0;
3539 HOST_WIDE_INT size = get_frame_size ();
3540 HOST_WIDE_INT offset;
3541 int i;
3542 rtx insn, tmpreg;
3544 gr_saved = 0;
3545 fr_saved = 0;
3546 save_fregs = 0;
3548 /* Compute total size for frame pointer, filler, locals and rounding to
3549 the next word boundary. Similar code appears in compute_frame_size
3550 and must be changed in tandem with this code. */
3551 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3552 if (local_fsize || frame_pointer_needed)
3553 local_fsize += STARTING_FRAME_OFFSET;
3555 actual_fsize = compute_frame_size (size, &save_fregs);
3557 /* Compute a few things we will use often. */
3558 tmpreg = gen_rtx_REG (word_mode, 1);
3560 /* Save RP first. The calling conventions manual states RP will
3561 always be stored into the caller's frame at sp - 20 or sp - 16
3562 depending on which ABI is in use. */
3563 if (regs_ever_live[2] || current_function_calls_eh_return)
3564 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3566 /* Allocate the local frame and set up the frame pointer if needed. */
3567 if (actual_fsize != 0)
3569 if (frame_pointer_needed)
3571 /* Copy the old frame pointer temporarily into %r1. Set up the
3572 new stack pointer, then store away the saved old frame pointer
3573 into the stack at sp and at the same time update the stack
3574 pointer by actual_fsize bytes. Two versions, first
3575 handles small (<8k) frames. The second handles large (>=8k)
3576 frames. */
3577 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3578 if (DO_FRAME_NOTES)
3580 /* We need to record the frame pointer save here since the
3581 new frame pointer is set in the following insn. */
3582 RTX_FRAME_RELATED_P (insn) = 1;
3583 REG_NOTES (insn)
3584 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3585 gen_rtx_SET (VOIDmode,
3586 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3587 frame_pointer_rtx),
3588 REG_NOTES (insn));
3591 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3592 if (DO_FRAME_NOTES)
3593 RTX_FRAME_RELATED_P (insn) = 1;
3595 if (VAL_14_BITS_P (actual_fsize))
3596 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3597 else
3599 /* It is incorrect to store the saved frame pointer at *sp,
3600 then increment sp (writes beyond the current stack boundary).
3602 So instead use stwm to store at *sp and post-increment the
3603 stack pointer as an atomic operation. Then increment sp to
3604 finish allocating the new frame. */
3605 HOST_WIDE_INT adjust1 = 8192 - 64;
3606 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3608 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3609 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3610 adjust2, 1);
3613 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3614 we need to store the previous stack pointer (frame pointer)
3615 into the frame marker on targets that use the HP unwind
3616 library. This allows the HP unwind library to be used to
3617 unwind GCC frames. However, we are not fully compatible
3618 with the HP library because our frame layout differs from
3619 that specified in the HP runtime specification.
3621 We don't want a frame note on this instruction as the frame
3622 marker moves during dynamic stack allocation.
3624 This instruction also serves as a blockage to prevent
3625 register spills from being scheduled before the stack
3626 pointer is raised. This is necessary as we store
3627 registers using the frame pointer as a base register,
3628 and the frame pointer is set before sp is raised. */
3629 if (TARGET_HPUX_UNWIND_LIBRARY)
3631 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3632 GEN_INT (TARGET_64BIT ? -8 : -4));
3634 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3635 frame_pointer_rtx);
3637 else
3638 emit_insn (gen_blockage ());
3640 /* no frame pointer needed. */
3641 else
3643 /* In some cases we can perform the first callee register save
3644 and allocating the stack frame at the same time. If so, just
3645 make a note of it and defer allocating the frame until saving
3646 the callee registers. */
3647 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3648 merge_sp_adjust_with_store = 1;
3649 /* Can not optimize. Adjust the stack frame by actual_fsize
3650 bytes. */
3651 else
3652 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3653 actual_fsize, 1);
3657 /* Normal register save.
3659 Do not save the frame pointer in the frame_pointer_needed case. It
3660 was done earlier. */
3661 if (frame_pointer_needed)
3663 offset = local_fsize;
3665 /* Saving the EH return data registers in the frame is the simplest
3666 way to get the frame unwind information emitted. We put them
3667 just before the general registers. */
3668 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3670 unsigned int i, regno;
3672 for (i = 0; ; ++i)
3674 regno = EH_RETURN_DATA_REGNO (i);
3675 if (regno == INVALID_REGNUM)
3676 break;
3678 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3679 offset += UNITS_PER_WORD;
3683 for (i = 18; i >= 4; i--)
3684 if (regs_ever_live[i] && ! call_used_regs[i])
3686 store_reg (i, offset, FRAME_POINTER_REGNUM);
3687 offset += UNITS_PER_WORD;
3688 gr_saved++;
3690 /* Account for %r3 which is saved in a special place. */
3691 gr_saved++;
3693 /* No frame pointer needed. */
3694 else
3696 offset = local_fsize - actual_fsize;
3698 /* Saving the EH return data registers in the frame is the simplest
3699 way to get the frame unwind information emitted. */
3700 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3702 unsigned int i, regno;
3704 for (i = 0; ; ++i)
3706 regno = EH_RETURN_DATA_REGNO (i);
3707 if (regno == INVALID_REGNUM)
3708 break;
3710 /* If merge_sp_adjust_with_store is nonzero, then we can
3711 optimize the first save. */
3712 if (merge_sp_adjust_with_store)
3714 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3715 merge_sp_adjust_with_store = 0;
3717 else
3718 store_reg (regno, offset, STACK_POINTER_REGNUM);
3719 offset += UNITS_PER_WORD;
3723 for (i = 18; i >= 3; i--)
3724 if (regs_ever_live[i] && ! call_used_regs[i])
3726 /* If merge_sp_adjust_with_store is nonzero, then we can
3727 optimize the first GR save. */
3728 if (merge_sp_adjust_with_store)
3730 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3731 merge_sp_adjust_with_store = 0;
3733 else
3734 store_reg (i, offset, STACK_POINTER_REGNUM);
3735 offset += UNITS_PER_WORD;
3736 gr_saved++;
3739 /* If we wanted to merge the SP adjustment with a GR save, but we never
3740 did any GR saves, then just emit the adjustment here. */
3741 if (merge_sp_adjust_with_store)
3742 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3743 actual_fsize, 1);
3746 /* The hppa calling conventions say that %r19, the pic offset
3747 register, is saved at sp - 32 (in this function's frame)
3748 when generating PIC code. FIXME: What is the correct thing
3749 to do for functions which make no calls and allocate no
3750 frame? Do we need to allocate a frame, or can we just omit
3751 the save? For now we'll just omit the save.
3753 We don't want a note on this insn as the frame marker can
3754 move if there is a dynamic stack allocation. */
3755 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3757 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3759 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3763 /* Align pointer properly (doubleword boundary). */
3764 offset = (offset + 7) & ~7;
3766 /* Floating point register store. */
3767 if (save_fregs)
3769 rtx base;
3771 /* First get the frame or stack pointer to the start of the FP register
3772 save area. */
3773 if (frame_pointer_needed)
3775 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3776 base = frame_pointer_rtx;
3778 else
3780 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3781 base = stack_pointer_rtx;
3784 /* Now actually save the FP registers. */
3785 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3787 if (regs_ever_live[i]
3788 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3790 rtx addr, insn, reg;
3791 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3792 reg = gen_rtx_REG (DFmode, i);
3793 insn = emit_move_insn (addr, reg);
3794 if (DO_FRAME_NOTES)
3796 RTX_FRAME_RELATED_P (insn) = 1;
3797 if (TARGET_64BIT)
3799 rtx mem = gen_rtx_MEM (DFmode,
3800 plus_constant (base, offset));
3801 REG_NOTES (insn)
3802 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3803 gen_rtx_SET (VOIDmode, mem, reg),
3804 REG_NOTES (insn));
3806 else
3808 rtx meml = gen_rtx_MEM (SFmode,
3809 plus_constant (base, offset));
3810 rtx memr = gen_rtx_MEM (SFmode,
3811 plus_constant (base, offset + 4));
3812 rtx regl = gen_rtx_REG (SFmode, i);
3813 rtx regr = gen_rtx_REG (SFmode, i + 1);
3814 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3815 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3816 rtvec vec;
3818 RTX_FRAME_RELATED_P (setl) = 1;
3819 RTX_FRAME_RELATED_P (setr) = 1;
3820 vec = gen_rtvec (2, setl, setr);
3821 REG_NOTES (insn)
3822 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3823 gen_rtx_SEQUENCE (VOIDmode, vec),
3824 REG_NOTES (insn));
3827 offset += GET_MODE_SIZE (DFmode);
3828 fr_saved++;
3834 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3835 Handle case where DISP > 8k by using the add_high_const patterns. */
3837 static void
3838 load_reg (int reg, HOST_WIDE_INT disp, int base)
3840 rtx dest = gen_rtx_REG (word_mode, reg);
3841 rtx basereg = gen_rtx_REG (Pmode, base);
3842 rtx src;
3844 if (VAL_14_BITS_P (disp))
3845 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3846 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3848 rtx delta = GEN_INT (disp);
3849 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3851 emit_move_insn (tmpreg, delta);
3852 if (TARGET_DISABLE_INDEXING)
3854 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3855 src = gen_rtx_MEM (word_mode, tmpreg);
3857 else
3858 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3860 else
3862 rtx delta = GEN_INT (disp);
3863 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3864 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3866 emit_move_insn (tmpreg, high);
3867 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3870 emit_move_insn (dest, src);
3873 /* Update the total code bytes output to the text section. */
3875 static void
3876 update_total_code_bytes (int nbytes)
3878 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3879 && !IN_NAMED_SECTION_P (cfun->decl))
3881 if (INSN_ADDRESSES_SET_P ())
3883 unsigned long old_total = total_code_bytes;
3885 total_code_bytes += nbytes;
3887 /* Be prepared to handle overflows. */
3888 if (old_total > total_code_bytes)
3889 total_code_bytes = -1;
3891 else
3892 total_code_bytes = -1;
3896 /* This function generates the assembly code for function exit.
3897 Args are as for output_function_prologue ().
3899 The function epilogue should not depend on the current stack
3900 pointer! It should use the frame pointer only. This is mandatory
3901 because of alloca; we also take advantage of it to omit stack
3902 adjustments before returning. */
3904 static void
3905 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3907 rtx insn = get_last_insn ();
3909 last_address = 0;
3911 /* hppa_expand_epilogue does the dirty work now. We just need
3912 to output the assembler directives which denote the end
3913 of a function.
3915 To make debuggers happy, emit a nop if the epilogue was completely
3916 eliminated due to a volatile call as the last insn in the
3917 current function. That way the return address (in %r2) will
3918 always point to a valid instruction in the current function. */
3920 /* Get the last real insn. */
3921 if (GET_CODE (insn) == NOTE)
3922 insn = prev_real_insn (insn);
3924 /* If it is a sequence, then look inside. */
3925 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3926 insn = XVECEXP (PATTERN (insn), 0, 0);
3928 /* If insn is a CALL_INSN, then it must be a call to a volatile
3929 function (otherwise there would be epilogue insns). */
3930 if (insn && GET_CODE (insn) == CALL_INSN)
3932 fputs ("\tnop\n", file);
3933 last_address += 4;
3936 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3938 if (TARGET_SOM && TARGET_GAS)
3940 /* We done with this subspace except possibly for some additional
3941 debug information. Forget that we are in this subspace to ensure
3942 that the next function is output in its own subspace. */
3943 forget_section ();
3946 if (INSN_ADDRESSES_SET_P ())
3948 insn = get_last_nonnote_insn ();
3949 last_address += INSN_ADDRESSES (INSN_UID (insn));
3950 if (INSN_P (insn))
3951 last_address += insn_default_length (insn);
3952 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3953 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3956 /* Finally, update the total number of code bytes output so far. */
3957 update_total_code_bytes (last_address);
3960 void
3961 hppa_expand_epilogue (void)
3963 rtx tmpreg;
3964 HOST_WIDE_INT offset;
3965 HOST_WIDE_INT ret_off = 0;
3966 int i;
3967 int merge_sp_adjust_with_load = 0;
3969 /* We will use this often. */
3970 tmpreg = gen_rtx_REG (word_mode, 1);
3972 /* Try to restore RP early to avoid load/use interlocks when
3973 RP gets used in the return (bv) instruction. This appears to still
3974 be necessary even when we schedule the prologue and epilogue. */
3975 if (regs_ever_live [2] || current_function_calls_eh_return)
3977 ret_off = TARGET_64BIT ? -16 : -20;
3978 if (frame_pointer_needed)
3980 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
3981 ret_off = 0;
3983 else
3985 /* No frame pointer, and stack is smaller than 8k. */
3986 if (VAL_14_BITS_P (ret_off - actual_fsize))
3988 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
3989 ret_off = 0;
3994 /* General register restores. */
3995 if (frame_pointer_needed)
3997 offset = local_fsize;
3999 /* If the current function calls __builtin_eh_return, then we need
4000 to restore the saved EH data registers. */
4001 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4003 unsigned int i, regno;
4005 for (i = 0; ; ++i)
4007 regno = EH_RETURN_DATA_REGNO (i);
4008 if (regno == INVALID_REGNUM)
4009 break;
4011 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4012 offset += UNITS_PER_WORD;
4016 for (i = 18; i >= 4; i--)
4017 if (regs_ever_live[i] && ! call_used_regs[i])
4019 load_reg (i, offset, FRAME_POINTER_REGNUM);
4020 offset += UNITS_PER_WORD;
4023 else
4025 offset = local_fsize - actual_fsize;
4027 /* If the current function calls __builtin_eh_return, then we need
4028 to restore the saved EH data registers. */
4029 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4031 unsigned int i, regno;
4033 for (i = 0; ; ++i)
4035 regno = EH_RETURN_DATA_REGNO (i);
4036 if (regno == INVALID_REGNUM)
4037 break;
4039 /* Only for the first load.
4040 merge_sp_adjust_with_load holds the register load
4041 with which we will merge the sp adjustment. */
4042 if (merge_sp_adjust_with_load == 0
4043 && local_fsize == 0
4044 && VAL_14_BITS_P (-actual_fsize))
4045 merge_sp_adjust_with_load = regno;
4046 else
4047 load_reg (regno, offset, STACK_POINTER_REGNUM);
4048 offset += UNITS_PER_WORD;
4052 for (i = 18; i >= 3; i--)
4054 if (regs_ever_live[i] && ! call_used_regs[i])
4056 /* Only for the first load.
4057 merge_sp_adjust_with_load holds the register load
4058 with which we will merge the sp adjustment. */
4059 if (merge_sp_adjust_with_load == 0
4060 && local_fsize == 0
4061 && VAL_14_BITS_P (-actual_fsize))
4062 merge_sp_adjust_with_load = i;
4063 else
4064 load_reg (i, offset, STACK_POINTER_REGNUM);
4065 offset += UNITS_PER_WORD;
4070 /* Align pointer properly (doubleword boundary). */
4071 offset = (offset + 7) & ~7;
4073 /* FP register restores. */
4074 if (save_fregs)
4076 /* Adjust the register to index off of. */
4077 if (frame_pointer_needed)
4078 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4079 else
4080 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4082 /* Actually do the restores now. */
4083 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4084 if (regs_ever_live[i]
4085 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4087 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4088 rtx dest = gen_rtx_REG (DFmode, i);
4089 emit_move_insn (dest, src);
4093 /* Emit a blockage insn here to keep these insns from being moved to
4094 an earlier spot in the epilogue, or into the main instruction stream.
4096 This is necessary as we must not cut the stack back before all the
4097 restores are finished. */
4098 emit_insn (gen_blockage ());
4100 /* Reset stack pointer (and possibly frame pointer). The stack
4101 pointer is initially set to fp + 64 to avoid a race condition. */
4102 if (frame_pointer_needed)
4104 rtx delta = GEN_INT (-64);
4106 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4107 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4109 /* If we were deferring a callee register restore, do it now. */
4110 else if (merge_sp_adjust_with_load)
4112 rtx delta = GEN_INT (-actual_fsize);
4113 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4115 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4117 else if (actual_fsize != 0)
4118 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4119 - actual_fsize, 0);
4121 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4122 frame greater than 8k), do so now. */
4123 if (ret_off != 0)
4124 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4126 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4128 rtx sa = EH_RETURN_STACKADJ_RTX;
4130 emit_insn (gen_blockage ());
4131 emit_insn (TARGET_64BIT
4132 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4133 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4138 hppa_pic_save_rtx (void)
4140 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4143 void
4144 hppa_profile_hook (int label_no)
4146 /* We use SImode for the address of the function in both 32 and
4147 64-bit code to avoid having to provide DImode versions of the
4148 lcla2 and load_offset_label_address insn patterns. */
4149 rtx reg = gen_reg_rtx (SImode);
4150 rtx label_rtx = gen_label_rtx ();
4151 rtx begin_label_rtx, call_insn;
4152 char begin_label_name[16];
4154 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4155 label_no);
4156 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4158 if (TARGET_64BIT)
4159 emit_move_insn (arg_pointer_rtx,
4160 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4161 GEN_INT (64)));
4163 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4165 /* The address of the function is loaded into %r25 with a instruction-
4166 relative sequence that avoids the use of relocations. The sequence
4167 is split so that the load_offset_label_address instruction can
4168 occupy the delay slot of the call to _mcount. */
4169 if (TARGET_PA_20)
4170 emit_insn (gen_lcla2 (reg, label_rtx));
4171 else
4172 emit_insn (gen_lcla1 (reg, label_rtx));
4174 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4175 reg, begin_label_rtx, label_rtx));
4177 #ifndef NO_PROFILE_COUNTERS
4179 rtx count_label_rtx, addr, r24;
4180 char count_label_name[16];
4182 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4183 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4185 addr = force_reg (Pmode, count_label_rtx);
4186 r24 = gen_rtx_REG (Pmode, 24);
4187 emit_move_insn (r24, addr);
4189 call_insn =
4190 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4191 gen_rtx_SYMBOL_REF (Pmode,
4192 "_mcount")),
4193 GEN_INT (TARGET_64BIT ? 24 : 12)));
4195 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4197 #else
4199 call_insn =
4200 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4201 gen_rtx_SYMBOL_REF (Pmode,
4202 "_mcount")),
4203 GEN_INT (TARGET_64BIT ? 16 : 8)));
4205 #endif
4207 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4208 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4210 /* Indicate the _mcount call cannot throw, nor will it execute a
4211 non-local goto. */
4212 REG_NOTES (call_insn)
4213 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4216 /* Fetch the return address for the frame COUNT steps up from
4217 the current frame, after the prologue. FRAMEADDR is the
4218 frame pointer of the COUNT frame.
4220 We want to ignore any export stub remnants here. To handle this,
4221 we examine the code at the return address, and if it is an export
4222 stub, we return a memory rtx for the stub return address stored
4223 at frame-24.
4225 The value returned is used in two different ways:
4227 1. To find a function's caller.
4229 2. To change the return address for a function.
4231 This function handles most instances of case 1; however, it will
4232 fail if there are two levels of stubs to execute on the return
4233 path. The only way I believe that can happen is if the return value
4234 needs a parameter relocation, which never happens for C code.
4236 This function handles most instances of case 2; however, it will
4237 fail if we did not originally have stub code on the return path
4238 but will need stub code on the new return path. This can happen if
4239 the caller & callee are both in the main program, but the new
4240 return location is in a shared library. */
4243 return_addr_rtx (int count, rtx frameaddr)
4245 rtx label;
4246 rtx rp;
4247 rtx saved_rp;
4248 rtx ins;
4250 if (count != 0)
4251 return NULL_RTX;
4253 rp = get_hard_reg_initial_val (Pmode, 2);
4255 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4256 return rp;
4258 saved_rp = gen_reg_rtx (Pmode);
4259 emit_move_insn (saved_rp, rp);
4261 /* Get pointer to the instruction stream. We have to mask out the
4262 privilege level from the two low order bits of the return address
4263 pointer here so that ins will point to the start of the first
4264 instruction that would have been executed if we returned. */
4265 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4266 label = gen_label_rtx ();
4268 /* Check the instruction stream at the normal return address for the
4269 export stub:
4271 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4272 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4273 0x00011820 | stub+16: mtsp r1,sr0
4274 0xe0400002 | stub+20: be,n 0(sr0,rp)
4276 If it is an export stub, than our return address is really in
4277 -24[frameaddr]. */
4279 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4280 NULL_RTX, SImode, 1);
4281 emit_jump_insn (gen_bne (label));
4283 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4284 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4285 emit_jump_insn (gen_bne (label));
4287 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4288 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4289 emit_jump_insn (gen_bne (label));
4291 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4292 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4294 /* If there is no export stub then just use the value saved from
4295 the return pointer register. */
4297 emit_jump_insn (gen_bne (label));
4299 /* Here we know that our return address points to an export
4300 stub. We don't want to return the address of the export stub,
4301 but rather the return address of the export stub. That return
4302 address is stored at -24[frameaddr]. */
4304 emit_move_insn (saved_rp,
4305 gen_rtx_MEM (Pmode,
4306 memory_address (Pmode,
4307 plus_constant (frameaddr,
4308 -24))));
4310 emit_label (label);
4311 return saved_rp;
4314 /* This is only valid once reload has completed because it depends on
4315 knowing exactly how much (if any) frame there is and...
4317 It's only valid if there is no frame marker to de-allocate and...
4319 It's only valid if %r2 hasn't been saved into the caller's frame
4320 (we're not profiling and %r2 isn't live anywhere). */
4322 hppa_can_use_return_insn_p (void)
4324 return (reload_completed
4325 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4326 && ! regs_ever_live[2]
4327 && ! frame_pointer_needed);
4330 void
4331 emit_bcond_fp (enum rtx_code code, rtx operand0)
4333 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4334 gen_rtx_IF_THEN_ELSE (VOIDmode,
4335 gen_rtx_fmt_ee (code,
4336 VOIDmode,
4337 gen_rtx_REG (CCFPmode, 0),
4338 const0_rtx),
4339 gen_rtx_LABEL_REF (VOIDmode, operand0),
4340 pc_rtx)));
4345 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4347 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4348 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4351 /* Adjust the cost of a scheduling dependency. Return the new cost of
4352 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4354 static int
4355 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4357 enum attr_type attr_type;
4359 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4360 true dependencies as they are described with bypasses now. */
4361 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4362 return cost;
4364 if (! recog_memoized (insn))
4365 return 0;
4367 attr_type = get_attr_type (insn);
4369 switch (REG_NOTE_KIND (link))
4371 case REG_DEP_ANTI:
4372 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4373 cycles later. */
4375 if (attr_type == TYPE_FPLOAD)
4377 rtx pat = PATTERN (insn);
4378 rtx dep_pat = PATTERN (dep_insn);
4379 if (GET_CODE (pat) == PARALLEL)
4381 /* This happens for the fldXs,mb patterns. */
4382 pat = XVECEXP (pat, 0, 0);
4384 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4385 /* If this happens, we have to extend this to schedule
4386 optimally. Return 0 for now. */
4387 return 0;
4389 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4391 if (! recog_memoized (dep_insn))
4392 return 0;
4393 switch (get_attr_type (dep_insn))
4395 case TYPE_FPALU:
4396 case TYPE_FPMULSGL:
4397 case TYPE_FPMULDBL:
4398 case TYPE_FPDIVSGL:
4399 case TYPE_FPDIVDBL:
4400 case TYPE_FPSQRTSGL:
4401 case TYPE_FPSQRTDBL:
4402 /* A fpload can't be issued until one cycle before a
4403 preceding arithmetic operation has finished if
4404 the target of the fpload is any of the sources
4405 (or destination) of the arithmetic operation. */
4406 return insn_default_latency (dep_insn) - 1;
4408 default:
4409 return 0;
4413 else if (attr_type == TYPE_FPALU)
4415 rtx pat = PATTERN (insn);
4416 rtx dep_pat = PATTERN (dep_insn);
4417 if (GET_CODE (pat) == PARALLEL)
4419 /* This happens for the fldXs,mb patterns. */
4420 pat = XVECEXP (pat, 0, 0);
4422 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4423 /* If this happens, we have to extend this to schedule
4424 optimally. Return 0 for now. */
4425 return 0;
4427 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4429 if (! recog_memoized (dep_insn))
4430 return 0;
4431 switch (get_attr_type (dep_insn))
4433 case TYPE_FPDIVSGL:
4434 case TYPE_FPDIVDBL:
4435 case TYPE_FPSQRTSGL:
4436 case TYPE_FPSQRTDBL:
4437 /* An ALU flop can't be issued until two cycles before a
4438 preceding divide or sqrt operation has finished if
4439 the target of the ALU flop is any of the sources
4440 (or destination) of the divide or sqrt operation. */
4441 return insn_default_latency (dep_insn) - 2;
4443 default:
4444 return 0;
4449 /* For other anti dependencies, the cost is 0. */
4450 return 0;
4452 case REG_DEP_OUTPUT:
4453 /* Output dependency; DEP_INSN writes a register that INSN writes some
4454 cycles later. */
4455 if (attr_type == TYPE_FPLOAD)
4457 rtx pat = PATTERN (insn);
4458 rtx dep_pat = PATTERN (dep_insn);
4459 if (GET_CODE (pat) == PARALLEL)
4461 /* This happens for the fldXs,mb patterns. */
4462 pat = XVECEXP (pat, 0, 0);
4464 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4465 /* If this happens, we have to extend this to schedule
4466 optimally. Return 0 for now. */
4467 return 0;
4469 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4471 if (! recog_memoized (dep_insn))
4472 return 0;
4473 switch (get_attr_type (dep_insn))
4475 case TYPE_FPALU:
4476 case TYPE_FPMULSGL:
4477 case TYPE_FPMULDBL:
4478 case TYPE_FPDIVSGL:
4479 case TYPE_FPDIVDBL:
4480 case TYPE_FPSQRTSGL:
4481 case TYPE_FPSQRTDBL:
4482 /* A fpload can't be issued until one cycle before a
4483 preceding arithmetic operation has finished if
4484 the target of the fpload is the destination of the
4485 arithmetic operation.
4487 Exception: For PA7100LC, PA7200 and PA7300, the cost
4488 is 3 cycles, unless they bundle together. We also
4489 pay the penalty if the second insn is a fpload. */
4490 return insn_default_latency (dep_insn) - 1;
4492 default:
4493 return 0;
4497 else if (attr_type == TYPE_FPALU)
4499 rtx pat = PATTERN (insn);
4500 rtx dep_pat = PATTERN (dep_insn);
4501 if (GET_CODE (pat) == PARALLEL)
4503 /* This happens for the fldXs,mb patterns. */
4504 pat = XVECEXP (pat, 0, 0);
4506 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4507 /* If this happens, we have to extend this to schedule
4508 optimally. Return 0 for now. */
4509 return 0;
4511 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4513 if (! recog_memoized (dep_insn))
4514 return 0;
4515 switch (get_attr_type (dep_insn))
4517 case TYPE_FPDIVSGL:
4518 case TYPE_FPDIVDBL:
4519 case TYPE_FPSQRTSGL:
4520 case TYPE_FPSQRTDBL:
4521 /* An ALU flop can't be issued until two cycles before a
4522 preceding divide or sqrt operation has finished if
4523 the target of the ALU flop is also the target of
4524 the divide or sqrt operation. */
4525 return insn_default_latency (dep_insn) - 2;
4527 default:
4528 return 0;
4533 /* For other output dependencies, the cost is 0. */
4534 return 0;
4536 default:
4537 gcc_unreachable ();
4541 /* Adjust scheduling priorities. We use this to try and keep addil
4542 and the next use of %r1 close together. */
4543 static int
4544 pa_adjust_priority (rtx insn, int priority)
4546 rtx set = single_set (insn);
4547 rtx src, dest;
4548 if (set)
4550 src = SET_SRC (set);
4551 dest = SET_DEST (set);
4552 if (GET_CODE (src) == LO_SUM
4553 && symbolic_operand (XEXP (src, 1), VOIDmode)
4554 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4555 priority >>= 3;
4557 else if (GET_CODE (src) == MEM
4558 && GET_CODE (XEXP (src, 0)) == LO_SUM
4559 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4560 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4561 priority >>= 1;
4563 else if (GET_CODE (dest) == MEM
4564 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4565 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4566 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4567 priority >>= 3;
4569 return priority;
4572 /* The 700 can only issue a single insn at a time.
4573 The 7XXX processors can issue two insns at a time.
4574 The 8000 can issue 4 insns at a time. */
4575 static int
4576 pa_issue_rate (void)
4578 switch (pa_cpu)
4580 case PROCESSOR_700: return 1;
4581 case PROCESSOR_7100: return 2;
4582 case PROCESSOR_7100LC: return 2;
4583 case PROCESSOR_7200: return 2;
4584 case PROCESSOR_7300: return 2;
4585 case PROCESSOR_8000: return 4;
4587 default:
4588 gcc_unreachable ();
4594 /* Return any length adjustment needed by INSN which already has its length
4595 computed as LENGTH. Return zero if no adjustment is necessary.
4597 For the PA: function calls, millicode calls, and backwards short
4598 conditional branches with unfilled delay slots need an adjustment by +1
4599 (to account for the NOP which will be inserted into the instruction stream).
4601 Also compute the length of an inline block move here as it is too
4602 complicated to express as a length attribute in pa.md. */
4604 pa_adjust_insn_length (rtx insn, int length)
4606 rtx pat = PATTERN (insn);
4608 /* Jumps inside switch tables which have unfilled delay slots need
4609 adjustment. */
4610 if (GET_CODE (insn) == JUMP_INSN
4611 && GET_CODE (pat) == PARALLEL
4612 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4613 return 4;
4614 /* Millicode insn with an unfilled delay slot. */
4615 else if (GET_CODE (insn) == INSN
4616 && GET_CODE (pat) != SEQUENCE
4617 && GET_CODE (pat) != USE
4618 && GET_CODE (pat) != CLOBBER
4619 && get_attr_type (insn) == TYPE_MILLI)
4620 return 4;
4621 /* Block move pattern. */
4622 else if (GET_CODE (insn) == INSN
4623 && GET_CODE (pat) == PARALLEL
4624 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4625 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4626 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4627 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4628 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4629 return compute_movmem_length (insn) - 4;
4630 /* Block clear pattern. */
4631 else if (GET_CODE (insn) == INSN
4632 && GET_CODE (pat) == PARALLEL
4633 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4634 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4635 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4636 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4637 return compute_clrmem_length (insn) - 4;
4638 /* Conditional branch with an unfilled delay slot. */
4639 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4641 /* Adjust a short backwards conditional with an unfilled delay slot. */
4642 if (GET_CODE (pat) == SET
4643 && length == 4
4644 && ! forward_branch_p (insn))
4645 return 4;
4646 else if (GET_CODE (pat) == PARALLEL
4647 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4648 && length == 4)
4649 return 4;
4650 /* Adjust dbra insn with short backwards conditional branch with
4651 unfilled delay slot -- only for case where counter is in a
4652 general register register. */
4653 else if (GET_CODE (pat) == PARALLEL
4654 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4655 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4656 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4657 && length == 4
4658 && ! forward_branch_p (insn))
4659 return 4;
4660 else
4661 return 0;
4663 return 0;
4666 /* Print operand X (an rtx) in assembler syntax to file FILE.
4667 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4668 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4670 void
4671 print_operand (FILE *file, rtx x, int code)
4673 switch (code)
4675 case '#':
4676 /* Output a 'nop' if there's nothing for the delay slot. */
4677 if (dbr_sequence_length () == 0)
4678 fputs ("\n\tnop", file);
4679 return;
4680 case '*':
4681 /* Output a nullification completer if there's nothing for the */
4682 /* delay slot or nullification is requested. */
4683 if (dbr_sequence_length () == 0 ||
4684 (final_sequence &&
4685 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4686 fputs (",n", file);
4687 return;
4688 case 'R':
4689 /* Print out the second register name of a register pair.
4690 I.e., R (6) => 7. */
4691 fputs (reg_names[REGNO (x) + 1], file);
4692 return;
4693 case 'r':
4694 /* A register or zero. */
4695 if (x == const0_rtx
4696 || (x == CONST0_RTX (DFmode))
4697 || (x == CONST0_RTX (SFmode)))
4699 fputs ("%r0", file);
4700 return;
4702 else
4703 break;
4704 case 'f':
4705 /* A register or zero (floating point). */
4706 if (x == const0_rtx
4707 || (x == CONST0_RTX (DFmode))
4708 || (x == CONST0_RTX (SFmode)))
4710 fputs ("%fr0", file);
4711 return;
4713 else
4714 break;
4715 case 'A':
4717 rtx xoperands[2];
4719 xoperands[0] = XEXP (XEXP (x, 0), 0);
4720 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4721 output_global_address (file, xoperands[1], 0);
4722 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4723 return;
4726 case 'C': /* Plain (C)ondition */
4727 case 'X':
4728 switch (GET_CODE (x))
4730 case EQ:
4731 fputs ("=", file); break;
4732 case NE:
4733 fputs ("<>", file); break;
4734 case GT:
4735 fputs (">", file); break;
4736 case GE:
4737 fputs (">=", file); break;
4738 case GEU:
4739 fputs (">>=", file); break;
4740 case GTU:
4741 fputs (">>", file); break;
4742 case LT:
4743 fputs ("<", file); break;
4744 case LE:
4745 fputs ("<=", file); break;
4746 case LEU:
4747 fputs ("<<=", file); break;
4748 case LTU:
4749 fputs ("<<", file); break;
4750 default:
4751 gcc_unreachable ();
4753 return;
4754 case 'N': /* Condition, (N)egated */
4755 switch (GET_CODE (x))
4757 case EQ:
4758 fputs ("<>", file); break;
4759 case NE:
4760 fputs ("=", file); break;
4761 case GT:
4762 fputs ("<=", file); break;
4763 case GE:
4764 fputs ("<", file); break;
4765 case GEU:
4766 fputs ("<<", file); break;
4767 case GTU:
4768 fputs ("<<=", file); break;
4769 case LT:
4770 fputs (">=", file); break;
4771 case LE:
4772 fputs (">", file); break;
4773 case LEU:
4774 fputs (">>", file); break;
4775 case LTU:
4776 fputs (">>=", file); break;
4777 default:
4778 gcc_unreachable ();
4780 return;
4781 /* For floating point comparisons. Note that the output
4782 predicates are the complement of the desired mode. The
4783 conditions for GT, GE, LT, LE and LTGT cause an invalid
4784 operation exception if the result is unordered and this
4785 exception is enabled in the floating-point status register. */
4786 case 'Y':
4787 switch (GET_CODE (x))
4789 case EQ:
4790 fputs ("!=", file); break;
4791 case NE:
4792 fputs ("=", file); break;
4793 case GT:
4794 fputs ("!>", file); break;
4795 case GE:
4796 fputs ("!>=", file); break;
4797 case LT:
4798 fputs ("!<", file); break;
4799 case LE:
4800 fputs ("!<=", file); break;
4801 case LTGT:
4802 fputs ("!<>", file); break;
4803 case UNLE:
4804 fputs ("!?<=", file); break;
4805 case UNLT:
4806 fputs ("!?<", file); break;
4807 case UNGE:
4808 fputs ("!?>=", file); break;
4809 case UNGT:
4810 fputs ("!?>", file); break;
4811 case UNEQ:
4812 fputs ("!?=", file); break;
4813 case UNORDERED:
4814 fputs ("!?", file); break;
4815 case ORDERED:
4816 fputs ("?", file); break;
4817 default:
4818 gcc_unreachable ();
4820 return;
4821 case 'S': /* Condition, operands are (S)wapped. */
4822 switch (GET_CODE (x))
4824 case EQ:
4825 fputs ("=", file); break;
4826 case NE:
4827 fputs ("<>", file); break;
4828 case GT:
4829 fputs ("<", file); break;
4830 case GE:
4831 fputs ("<=", file); break;
4832 case GEU:
4833 fputs ("<<=", file); break;
4834 case GTU:
4835 fputs ("<<", file); break;
4836 case LT:
4837 fputs (">", file); break;
4838 case LE:
4839 fputs (">=", file); break;
4840 case LEU:
4841 fputs (">>=", file); break;
4842 case LTU:
4843 fputs (">>", file); break;
4844 default:
4845 gcc_unreachable ();
4847 return;
4848 case 'B': /* Condition, (B)oth swapped and negate. */
4849 switch (GET_CODE (x))
4851 case EQ:
4852 fputs ("<>", file); break;
4853 case NE:
4854 fputs ("=", file); break;
4855 case GT:
4856 fputs (">=", file); break;
4857 case GE:
4858 fputs (">", file); break;
4859 case GEU:
4860 fputs (">>", file); break;
4861 case GTU:
4862 fputs (">>=", file); break;
4863 case LT:
4864 fputs ("<=", file); break;
4865 case LE:
4866 fputs ("<", file); break;
4867 case LEU:
4868 fputs ("<<", file); break;
4869 case LTU:
4870 fputs ("<<=", file); break;
4871 default:
4872 gcc_unreachable ();
4874 return;
4875 case 'k':
4876 gcc_assert (GET_CODE (x) == CONST_INT);
4877 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4878 return;
4879 case 'Q':
4880 gcc_assert (GET_CODE (x) == CONST_INT);
4881 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4882 return;
4883 case 'L':
4884 gcc_assert (GET_CODE (x) == CONST_INT);
4885 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4886 return;
4887 case 'O':
4888 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4889 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4890 return;
4891 case 'p':
4892 gcc_assert (GET_CODE (x) == CONST_INT);
4893 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4894 return;
4895 case 'P':
4896 gcc_assert (GET_CODE (x) == CONST_INT);
4897 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4898 return;
4899 case 'I':
4900 if (GET_CODE (x) == CONST_INT)
4901 fputs ("i", file);
4902 return;
4903 case 'M':
4904 case 'F':
4905 switch (GET_CODE (XEXP (x, 0)))
4907 case PRE_DEC:
4908 case PRE_INC:
4909 if (ASSEMBLER_DIALECT == 0)
4910 fputs ("s,mb", file);
4911 else
4912 fputs (",mb", file);
4913 break;
4914 case POST_DEC:
4915 case POST_INC:
4916 if (ASSEMBLER_DIALECT == 0)
4917 fputs ("s,ma", file);
4918 else
4919 fputs (",ma", file);
4920 break;
4921 case PLUS:
4922 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4923 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4925 if (ASSEMBLER_DIALECT == 0)
4926 fputs ("x", file);
4928 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4929 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4931 if (ASSEMBLER_DIALECT == 0)
4932 fputs ("x,s", file);
4933 else
4934 fputs (",s", file);
4936 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4937 fputs ("s", file);
4938 break;
4939 default:
4940 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4941 fputs ("s", file);
4942 break;
4944 return;
4945 case 'G':
4946 output_global_address (file, x, 0);
4947 return;
4948 case 'H':
4949 output_global_address (file, x, 1);
4950 return;
4951 case 0: /* Don't do anything special */
4952 break;
4953 case 'Z':
4955 unsigned op[3];
4956 compute_zdepwi_operands (INTVAL (x), op);
4957 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4958 return;
4960 case 'z':
4962 unsigned op[3];
4963 compute_zdepdi_operands (INTVAL (x), op);
4964 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4965 return;
4967 case 'c':
4968 /* We can get here from a .vtable_inherit due to our
4969 CONSTANT_ADDRESS_P rejecting perfectly good constant
4970 addresses. */
4971 break;
4972 default:
4973 gcc_unreachable ();
4975 if (GET_CODE (x) == REG)
4977 fputs (reg_names [REGNO (x)], file);
4978 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
4980 fputs ("R", file);
4981 return;
4983 if (FP_REG_P (x)
4984 && GET_MODE_SIZE (GET_MODE (x)) <= 4
4985 && (REGNO (x) & 1) == 0)
4986 fputs ("L", file);
4988 else if (GET_CODE (x) == MEM)
4990 int size = GET_MODE_SIZE (GET_MODE (x));
4991 rtx base = NULL_RTX;
4992 switch (GET_CODE (XEXP (x, 0)))
4994 case PRE_DEC:
4995 case POST_DEC:
4996 base = XEXP (XEXP (x, 0), 0);
4997 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
4998 break;
4999 case PRE_INC:
5000 case POST_INC:
5001 base = XEXP (XEXP (x, 0), 0);
5002 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5003 break;
5004 case PLUS:
5005 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5006 fprintf (file, "%s(%s)",
5007 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5008 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5009 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5010 fprintf (file, "%s(%s)",
5011 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5012 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5013 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5014 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5016 /* Because the REG_POINTER flag can get lost during reload,
5017 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5018 index and base registers in the combined move patterns. */
5019 rtx base = XEXP (XEXP (x, 0), 1);
5020 rtx index = XEXP (XEXP (x, 0), 0);
5022 fprintf (file, "%s(%s)",
5023 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5025 else
5026 output_address (XEXP (x, 0));
5027 break;
5028 default:
5029 output_address (XEXP (x, 0));
5030 break;
5033 else
5034 output_addr_const (file, x);
5037 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5039 void
5040 output_global_address (FILE *file, rtx x, int round_constant)
5043 /* Imagine (high (const (plus ...))). */
5044 if (GET_CODE (x) == HIGH)
5045 x = XEXP (x, 0);
5047 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5048 output_addr_const (file, x);
5049 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5051 output_addr_const (file, x);
5052 fputs ("-$global$", file);
5054 else if (GET_CODE (x) == CONST)
5056 const char *sep = "";
5057 int offset = 0; /* assembler wants -$global$ at end */
5058 rtx base = NULL_RTX;
5060 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5062 case SYMBOL_REF:
5063 base = XEXP (XEXP (x, 0), 0);
5064 output_addr_const (file, base);
5065 break;
5066 case CONST_INT:
5067 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5068 break;
5069 default:
5070 gcc_unreachable ();
5073 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5075 case SYMBOL_REF:
5076 base = XEXP (XEXP (x, 0), 1);
5077 output_addr_const (file, base);
5078 break;
5079 case CONST_INT:
5080 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5081 break;
5082 default:
5083 gcc_unreachable ();
5086 /* How bogus. The compiler is apparently responsible for
5087 rounding the constant if it uses an LR field selector.
5089 The linker and/or assembler seem a better place since
5090 they have to do this kind of thing already.
5092 If we fail to do this, HP's optimizing linker may eliminate
5093 an addil, but not update the ldw/stw/ldo instruction that
5094 uses the result of the addil. */
5095 if (round_constant)
5096 offset = ((offset + 0x1000) & ~0x1fff);
5098 switch (GET_CODE (XEXP (x, 0)))
5100 case PLUS:
5101 if (offset < 0)
5103 offset = -offset;
5104 sep = "-";
5106 else
5107 sep = "+";
5108 break;
5110 case MINUS:
5111 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5112 sep = "-";
5113 break;
5115 default:
5116 gcc_unreachable ();
5119 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5120 fputs ("-$global$", file);
5121 if (offset)
5122 fprintf (file, "%s%d", sep, offset);
5124 else
5125 output_addr_const (file, x);
5128 /* Output boilerplate text to appear at the beginning of the file.
5129 There are several possible versions. */
5130 #define aputs(x) fputs(x, asm_out_file)
5131 static inline void
5132 pa_file_start_level (void)
5134 if (TARGET_64BIT)
5135 aputs ("\t.LEVEL 2.0w\n");
5136 else if (TARGET_PA_20)
5137 aputs ("\t.LEVEL 2.0\n");
5138 else if (TARGET_PA_11)
5139 aputs ("\t.LEVEL 1.1\n");
5140 else
5141 aputs ("\t.LEVEL 1.0\n");
5144 static inline void
5145 pa_file_start_space (int sortspace)
5147 aputs ("\t.SPACE $PRIVATE$");
5148 if (sortspace)
5149 aputs (",SORT=16");
5150 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5151 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5152 "\n\t.SPACE $TEXT$");
5153 if (sortspace)
5154 aputs (",SORT=8");
5155 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5156 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5159 static inline void
5160 pa_file_start_file (int want_version)
5162 if (write_symbols != NO_DEBUG)
5164 output_file_directive (asm_out_file, main_input_filename);
5165 if (want_version)
5166 aputs ("\t.version\t\"01.01\"\n");
5170 static inline void
5171 pa_file_start_mcount (const char *aswhat)
5173 if (profile_flag)
5174 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5177 static void
5178 pa_elf_file_start (void)
5180 pa_file_start_level ();
5181 pa_file_start_mcount ("ENTRY");
5182 pa_file_start_file (0);
5185 static void
5186 pa_som_file_start (void)
5188 pa_file_start_level ();
5189 pa_file_start_space (0);
5190 aputs ("\t.IMPORT $global$,DATA\n"
5191 "\t.IMPORT $$dyncall,MILLICODE\n");
5192 pa_file_start_mcount ("CODE");
5193 pa_file_start_file (0);
5196 static void
5197 pa_linux_file_start (void)
5199 pa_file_start_file (1);
5200 pa_file_start_level ();
5201 pa_file_start_mcount ("CODE");
5204 static void
5205 pa_hpux64_gas_file_start (void)
5207 pa_file_start_level ();
5208 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5209 if (profile_flag)
5210 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5211 #endif
5212 pa_file_start_file (1);
5215 static void
5216 pa_hpux64_hpas_file_start (void)
5218 pa_file_start_level ();
5219 pa_file_start_space (1);
5220 pa_file_start_mcount ("CODE");
5221 pa_file_start_file (0);
5223 #undef aputs
5225 static struct deferred_plabel *
5226 get_plabel (rtx symbol)
5228 const char *fname = XSTR (symbol, 0);
5229 size_t i;
5231 /* See if we have already put this function on the list of deferred
5232 plabels. This list is generally small, so a liner search is not
5233 too ugly. If it proves too slow replace it with something faster. */
5234 for (i = 0; i < n_deferred_plabels; i++)
5235 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5236 break;
5238 /* If the deferred plabel list is empty, or this entry was not found
5239 on the list, create a new entry on the list. */
5240 if (deferred_plabels == NULL || i == n_deferred_plabels)
5242 tree id;
5244 if (deferred_plabels == 0)
5245 deferred_plabels = (struct deferred_plabel *)
5246 ggc_alloc (sizeof (struct deferred_plabel));
5247 else
5248 deferred_plabels = (struct deferred_plabel *)
5249 ggc_realloc (deferred_plabels,
5250 ((n_deferred_plabels + 1)
5251 * sizeof (struct deferred_plabel)));
5253 i = n_deferred_plabels++;
5254 deferred_plabels[i].internal_label = gen_label_rtx ();
5255 deferred_plabels[i].symbol = symbol;
5257 /* Gross. We have just implicitly taken the address of this
5258 function. Mark it in the same manner as assemble_name. */
5259 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5260 if (id)
5261 mark_referenced (id);
5264 return &deferred_plabels[i];
5267 static void
5268 output_deferred_plabels (void)
5270 size_t i;
5271 /* If we have deferred plabels, then we need to switch into the data
5272 section and align it to a 4 byte boundary before we output the
5273 deferred plabels. */
5274 if (n_deferred_plabels)
5276 data_section ();
5277 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5280 /* Now output the deferred plabels. */
5281 for (i = 0; i < n_deferred_plabels; i++)
5283 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5284 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5285 assemble_integer (deferred_plabels[i].symbol,
5286 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5290 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5291 /* Initialize optabs to point to HPUX long double emulation routines. */
5292 static void
5293 pa_hpux_init_libfuncs (void)
5295 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5296 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5297 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5298 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5299 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5300 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5301 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5302 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5303 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5305 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5306 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5307 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5308 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5309 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5310 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5311 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5313 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5314 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5315 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5316 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5318 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5319 ? "__U_Qfcnvfxt_quad_to_sgl"
5320 : "_U_Qfcnvfxt_quad_to_sgl");
5321 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5322 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5323 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5325 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5326 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5328 #endif
5330 /* HP's millicode routines mean something special to the assembler.
5331 Keep track of which ones we have used. */
5333 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5334 static void import_milli (enum millicodes);
5335 static char imported[(int) end1000];
5336 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5337 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5338 #define MILLI_START 10
5340 static void
5341 import_milli (enum millicodes code)
5343 char str[sizeof (import_string)];
5345 if (!imported[(int) code])
5347 imported[(int) code] = 1;
5348 strcpy (str, import_string);
5349 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5350 output_asm_insn (str, 0);
5354 /* The register constraints have put the operands and return value in
5355 the proper registers. */
5357 const char *
5358 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5360 import_milli (mulI);
5361 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5364 /* Emit the rtl for doing a division by a constant. */
5366 /* Do magic division millicodes exist for this value? */
5367 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5369 /* We'll use an array to keep track of the magic millicodes and
5370 whether or not we've used them already. [n][0] is signed, [n][1] is
5371 unsigned. */
5373 static int div_milli[16][2];
5376 emit_hpdiv_const (rtx *operands, int unsignedp)
5378 if (GET_CODE (operands[2]) == CONST_INT
5379 && INTVAL (operands[2]) > 0
5380 && INTVAL (operands[2]) < 16
5381 && magic_milli[INTVAL (operands[2])])
5383 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5385 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5386 emit
5387 (gen_rtx_PARALLEL
5388 (VOIDmode,
5389 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5390 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5391 SImode,
5392 gen_rtx_REG (SImode, 26),
5393 operands[2])),
5394 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5395 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5396 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5397 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5398 gen_rtx_CLOBBER (VOIDmode, ret))));
5399 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5400 return 1;
5402 return 0;
5405 const char *
5406 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5408 int divisor;
5410 /* If the divisor is a constant, try to use one of the special
5411 opcodes .*/
5412 if (GET_CODE (operands[0]) == CONST_INT)
5414 static char buf[100];
5415 divisor = INTVAL (operands[0]);
5416 if (!div_milli[divisor][unsignedp])
5418 div_milli[divisor][unsignedp] = 1;
5419 if (unsignedp)
5420 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5421 else
5422 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5424 if (unsignedp)
5426 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5427 INTVAL (operands[0]));
5428 return output_millicode_call (insn,
5429 gen_rtx_SYMBOL_REF (SImode, buf));
5431 else
5433 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5434 INTVAL (operands[0]));
5435 return output_millicode_call (insn,
5436 gen_rtx_SYMBOL_REF (SImode, buf));
5439 /* Divisor isn't a special constant. */
5440 else
5442 if (unsignedp)
5444 import_milli (divU);
5445 return output_millicode_call (insn,
5446 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5448 else
5450 import_milli (divI);
5451 return output_millicode_call (insn,
5452 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5457 /* Output a $$rem millicode to do mod. */
5459 const char *
5460 output_mod_insn (int unsignedp, rtx insn)
5462 if (unsignedp)
5464 import_milli (remU);
5465 return output_millicode_call (insn,
5466 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5468 else
5470 import_milli (remI);
5471 return output_millicode_call (insn,
5472 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5476 void
5477 output_arg_descriptor (rtx call_insn)
5479 const char *arg_regs[4];
5480 enum machine_mode arg_mode;
5481 rtx link;
5482 int i, output_flag = 0;
5483 int regno;
5485 /* We neither need nor want argument location descriptors for the
5486 64bit runtime environment or the ELF32 environment. */
5487 if (TARGET_64BIT || TARGET_ELF32)
5488 return;
5490 for (i = 0; i < 4; i++)
5491 arg_regs[i] = 0;
5493 /* Specify explicitly that no argument relocations should take place
5494 if using the portable runtime calling conventions. */
5495 if (TARGET_PORTABLE_RUNTIME)
5497 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5498 asm_out_file);
5499 return;
5502 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5503 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5504 link; link = XEXP (link, 1))
5506 rtx use = XEXP (link, 0);
5508 if (! (GET_CODE (use) == USE
5509 && GET_CODE (XEXP (use, 0)) == REG
5510 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5511 continue;
5513 arg_mode = GET_MODE (XEXP (use, 0));
5514 regno = REGNO (XEXP (use, 0));
5515 if (regno >= 23 && regno <= 26)
5517 arg_regs[26 - regno] = "GR";
5518 if (arg_mode == DImode)
5519 arg_regs[25 - regno] = "GR";
5521 else if (regno >= 32 && regno <= 39)
5523 if (arg_mode == SFmode)
5524 arg_regs[(regno - 32) / 2] = "FR";
5525 else
5527 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5528 arg_regs[(regno - 34) / 2] = "FR";
5529 arg_regs[(regno - 34) / 2 + 1] = "FU";
5530 #else
5531 arg_regs[(regno - 34) / 2] = "FU";
5532 arg_regs[(regno - 34) / 2 + 1] = "FR";
5533 #endif
5537 fputs ("\t.CALL ", asm_out_file);
5538 for (i = 0; i < 4; i++)
5540 if (arg_regs[i])
5542 if (output_flag++)
5543 fputc (',', asm_out_file);
5544 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5547 fputc ('\n', asm_out_file);
5550 /* Return the class of any secondary reload register that is needed to
5551 move IN into a register in class CLASS using mode MODE.
5553 Profiling has showed this routine and its descendants account for
5554 a significant amount of compile time (~7%). So it has been
5555 optimized to reduce redundant computations and eliminate useless
5556 function calls.
5558 It might be worthwhile to try and make this a leaf function too. */
5560 enum reg_class
5561 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5563 int regno, is_symbolic;
5565 /* Trying to load a constant into a FP register during PIC code
5566 generation will require %r1 as a scratch register. */
5567 if (flag_pic
5568 && GET_MODE_CLASS (mode) == MODE_INT
5569 && FP_REG_CLASS_P (class)
5570 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5571 return R1_REGS;
5573 /* Profiling showed the PA port spends about 1.3% of its compilation
5574 time in true_regnum from calls inside secondary_reload_class. */
5576 if (GET_CODE (in) == REG)
5578 regno = REGNO (in);
5579 if (regno >= FIRST_PSEUDO_REGISTER)
5580 regno = true_regnum (in);
5582 else if (GET_CODE (in) == SUBREG)
5583 regno = true_regnum (in);
5584 else
5585 regno = -1;
5587 /* If we have something like (mem (mem (...)), we can safely assume the
5588 inner MEM will end up in a general register after reloading, so there's
5589 no need for a secondary reload. */
5590 if (GET_CODE (in) == MEM
5591 && GET_CODE (XEXP (in, 0)) == MEM)
5592 return NO_REGS;
5594 /* Handle out of range displacement for integer mode loads/stores of
5595 FP registers. */
5596 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5597 && GET_MODE_CLASS (mode) == MODE_INT
5598 && FP_REG_CLASS_P (class))
5599 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5600 return GENERAL_REGS;
5602 /* A SAR<->FP register copy requires a secondary register (GPR) as
5603 well as secondary memory. */
5604 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5605 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5606 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5607 return GENERAL_REGS;
5609 if (GET_CODE (in) == HIGH)
5610 in = XEXP (in, 0);
5612 /* Profiling has showed GCC spends about 2.6% of its compilation
5613 time in symbolic_operand from calls inside secondary_reload_class.
5615 We use an inline copy and only compute its return value once to avoid
5616 useless work. */
5617 switch (GET_CODE (in))
5619 rtx tmp;
5621 case SYMBOL_REF:
5622 case LABEL_REF:
5623 is_symbolic = 1;
5624 break;
5625 case CONST:
5626 tmp = XEXP (in, 0);
5627 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5628 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5629 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5630 break;
5632 default:
5633 is_symbolic = 0;
5634 break;
5637 if (!flag_pic
5638 && is_symbolic
5639 && read_only_operand (in, VOIDmode))
5640 return NO_REGS;
5642 if (class != R1_REGS && is_symbolic)
5643 return R1_REGS;
5645 return NO_REGS;
5648 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5649 by invisible reference. As a GCC extension, we also pass anything
5650 with a zero or variable size by reference.
5652 The 64-bit runtime does not describe passing any types by invisible
5653 reference. The internals of GCC can't currently handle passing
5654 empty structures, and zero or variable length arrays when they are
5655 not passed entirely on the stack or by reference. Thus, as a GCC
5656 extension, we pass these types by reference. The HP compiler doesn't
5657 support these types, so hopefully there shouldn't be any compatibility
5658 issues. This may have to be revisited when HP releases a C99 compiler
5659 or updates the ABI. */
5661 static bool
5662 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5663 enum machine_mode mode, tree type,
5664 bool named ATTRIBUTE_UNUSED)
5666 HOST_WIDE_INT size;
5668 if (type)
5669 size = int_size_in_bytes (type);
5670 else
5671 size = GET_MODE_SIZE (mode);
5673 if (TARGET_64BIT)
5674 return size <= 0;
5675 else
5676 return size <= 0 || size > 8;
5679 enum direction
5680 function_arg_padding (enum machine_mode mode, tree type)
5682 if (mode == BLKmode
5683 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5685 /* Return none if justification is not required. */
5686 if (type
5687 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5688 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5689 return none;
5691 /* The directions set here are ignored when a BLKmode argument larger
5692 than a word is placed in a register. Different code is used for
5693 the stack and registers. This makes it difficult to have a
5694 consistent data representation for both the stack and registers.
5695 For both runtimes, the justification and padding for arguments on
5696 the stack and in registers should be identical. */
5697 if (TARGET_64BIT)
5698 /* The 64-bit runtime specifies left justification for aggregates. */
5699 return upward;
5700 else
5701 /* The 32-bit runtime architecture specifies right justification.
5702 When the argument is passed on the stack, the argument is padded
5703 with garbage on the left. The HP compiler pads with zeros. */
5704 return downward;
5707 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5708 return downward;
5709 else
5710 return none;
5714 /* Do what is necessary for `va_start'. We look at the current function
5715 to determine if stdargs or varargs is used and fill in an initial
5716 va_list. A pointer to this constructor is returned. */
5718 static rtx
5719 hppa_builtin_saveregs (void)
5721 rtx offset, dest;
5722 tree fntype = TREE_TYPE (current_function_decl);
5723 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5724 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5725 != void_type_node)))
5726 ? UNITS_PER_WORD : 0);
5728 if (argadj)
5729 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5730 else
5731 offset = current_function_arg_offset_rtx;
5733 if (TARGET_64BIT)
5735 int i, off;
5737 /* Adjust for varargs/stdarg differences. */
5738 if (argadj)
5739 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5740 else
5741 offset = current_function_arg_offset_rtx;
5743 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5744 from the incoming arg pointer and growing to larger addresses. */
5745 for (i = 26, off = -64; i >= 19; i--, off += 8)
5746 emit_move_insn (gen_rtx_MEM (word_mode,
5747 plus_constant (arg_pointer_rtx, off)),
5748 gen_rtx_REG (word_mode, i));
5750 /* The incoming args pointer points just beyond the flushback area;
5751 normally this is not a serious concern. However, when we are doing
5752 varargs/stdargs we want to make the arg pointer point to the start
5753 of the incoming argument area. */
5754 emit_move_insn (virtual_incoming_args_rtx,
5755 plus_constant (arg_pointer_rtx, -64));
5757 /* Now return a pointer to the first anonymous argument. */
5758 return copy_to_reg (expand_binop (Pmode, add_optab,
5759 virtual_incoming_args_rtx,
5760 offset, 0, 0, OPTAB_LIB_WIDEN));
5763 /* Store general registers on the stack. */
5764 dest = gen_rtx_MEM (BLKmode,
5765 plus_constant (current_function_internal_arg_pointer,
5766 -16));
5767 set_mem_alias_set (dest, get_varargs_alias_set ());
5768 set_mem_align (dest, BITS_PER_WORD);
5769 move_block_from_reg (23, dest, 4);
5771 /* move_block_from_reg will emit code to store the argument registers
5772 individually as scalar stores.
5774 However, other insns may later load from the same addresses for
5775 a structure load (passing a struct to a varargs routine).
5777 The alias code assumes that such aliasing can never happen, so we
5778 have to keep memory referencing insns from moving up beyond the
5779 last argument register store. So we emit a blockage insn here. */
5780 emit_insn (gen_blockage ());
5782 return copy_to_reg (expand_binop (Pmode, add_optab,
5783 current_function_internal_arg_pointer,
5784 offset, 0, 0, OPTAB_LIB_WIDEN));
5787 void
5788 hppa_va_start (tree valist, rtx nextarg)
5790 nextarg = expand_builtin_saveregs ();
5791 std_expand_builtin_va_start (valist, nextarg);
5794 static tree
5795 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5797 if (TARGET_64BIT)
5799 /* Args grow upward. We can use the generic routines. */
5800 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5802 else /* !TARGET_64BIT */
5804 tree ptr = build_pointer_type (type);
5805 tree valist_type;
5806 tree t, u;
5807 unsigned int size, ofs;
5808 bool indirect;
5810 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5811 if (indirect)
5813 type = ptr;
5814 ptr = build_pointer_type (type);
5816 size = int_size_in_bytes (type);
5817 valist_type = TREE_TYPE (valist);
5819 /* Args grow down. Not handled by generic routines. */
5821 u = fold_convert (valist_type, size_in_bytes (type));
5822 t = build (MINUS_EXPR, valist_type, valist, u);
5824 /* Copied from va-pa.h, but we probably don't need to align to
5825 word size, since we generate and preserve that invariant. */
5826 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5827 t = build (BIT_AND_EXPR, valist_type, t, u);
5829 t = build (MODIFY_EXPR, valist_type, valist, t);
5831 ofs = (8 - size) % 4;
5832 if (ofs != 0)
5834 u = fold_convert (valist_type, size_int (ofs));
5835 t = build (PLUS_EXPR, valist_type, t, u);
5838 t = fold_convert (ptr, t);
5839 t = build_va_arg_indirect_ref (t);
5841 if (indirect)
5842 t = build_va_arg_indirect_ref (t);
5844 return t;
5848 /* True if MODE is valid for the target. By "valid", we mean able to
5849 be manipulated in non-trivial ways. In particular, this means all
5850 the arithmetic is supported.
5852 Currently, TImode is not valid as the HP 64-bit runtime documentation
5853 doesn't document the alignment and calling conventions for this type.
5854 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5855 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5857 static bool
5858 pa_scalar_mode_supported_p (enum machine_mode mode)
5860 int precision = GET_MODE_PRECISION (mode);
5862 switch (GET_MODE_CLASS (mode))
5864 case MODE_PARTIAL_INT:
5865 case MODE_INT:
5866 if (precision == CHAR_TYPE_SIZE)
5867 return true;
5868 if (precision == SHORT_TYPE_SIZE)
5869 return true;
5870 if (precision == INT_TYPE_SIZE)
5871 return true;
5872 if (precision == LONG_TYPE_SIZE)
5873 return true;
5874 if (precision == LONG_LONG_TYPE_SIZE)
5875 return true;
5876 return false;
5878 case MODE_FLOAT:
5879 if (precision == FLOAT_TYPE_SIZE)
5880 return true;
5881 if (precision == DOUBLE_TYPE_SIZE)
5882 return true;
5883 if (precision == LONG_DOUBLE_TYPE_SIZE)
5884 return true;
5885 return false;
5887 default:
5888 gcc_unreachable ();
5892 /* This routine handles all the normal conditional branch sequences we
5893 might need to generate. It handles compare immediate vs compare
5894 register, nullification of delay slots, varying length branches,
5895 negated branches, and all combinations of the above. It returns the
5896 output appropriate to emit the branch corresponding to all given
5897 parameters. */
5899 const char *
5900 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
5902 static char buf[100];
5903 int useskip = 0;
5904 rtx xoperands[5];
5906 /* A conditional branch to the following instruction (e.g. the delay slot)
5907 is asking for a disaster. This can happen when not optimizing and
5908 when jump optimization fails.
5910 While it is usually safe to emit nothing, this can fail if the
5911 preceding instruction is a nullified branch with an empty delay
5912 slot and the same branch target as this branch. We could check
5913 for this but jump optimization should eliminate nop jumps. It
5914 is always safe to emit a nop. */
5915 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5916 return "nop";
5918 /* The doubleword form of the cmpib instruction doesn't have the LEU
5919 and GTU conditions while the cmpb instruction does. Since we accept
5920 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5921 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5922 operands[2] = gen_rtx_REG (DImode, 0);
5924 /* If this is a long branch with its delay slot unfilled, set `nullify'
5925 as it can nullify the delay slot and save a nop. */
5926 if (length == 8 && dbr_sequence_length () == 0)
5927 nullify = 1;
5929 /* If this is a short forward conditional branch which did not get
5930 its delay slot filled, the delay slot can still be nullified. */
5931 if (! nullify && length == 4 && dbr_sequence_length () == 0)
5932 nullify = forward_branch_p (insn);
5934 /* A forward branch over a single nullified insn can be done with a
5935 comclr instruction. This avoids a single cycle penalty due to
5936 mis-predicted branch if we fall through (branch not taken). */
5937 if (length == 4
5938 && next_real_insn (insn) != 0
5939 && get_attr_length (next_real_insn (insn)) == 4
5940 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
5941 && nullify)
5942 useskip = 1;
5944 switch (length)
5946 /* All short conditional branches except backwards with an unfilled
5947 delay slot. */
5948 case 4:
5949 if (useskip)
5950 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
5951 else
5952 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5953 if (GET_MODE (operands[1]) == DImode)
5954 strcat (buf, "*");
5955 if (negated)
5956 strcat (buf, "%B3");
5957 else
5958 strcat (buf, "%S3");
5959 if (useskip)
5960 strcat (buf, " %2,%r1,%%r0");
5961 else if (nullify)
5962 strcat (buf, ",n %2,%r1,%0");
5963 else
5964 strcat (buf, " %2,%r1,%0");
5965 break;
5967 /* All long conditionals. Note a short backward branch with an
5968 unfilled delay slot is treated just like a long backward branch
5969 with an unfilled delay slot. */
5970 case 8:
5971 /* Handle weird backwards branch with a filled delay slot
5972 with is nullified. */
5973 if (dbr_sequence_length () != 0
5974 && ! forward_branch_p (insn)
5975 && nullify)
5977 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5978 if (GET_MODE (operands[1]) == DImode)
5979 strcat (buf, "*");
5980 if (negated)
5981 strcat (buf, "%S3");
5982 else
5983 strcat (buf, "%B3");
5984 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
5986 /* Handle short backwards branch with an unfilled delay slot.
5987 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
5988 taken and untaken branches. */
5989 else if (dbr_sequence_length () == 0
5990 && ! forward_branch_p (insn)
5991 && INSN_ADDRESSES_SET_P ()
5992 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
5993 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
5995 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5996 if (GET_MODE (operands[1]) == DImode)
5997 strcat (buf, "*");
5998 if (negated)
5999 strcat (buf, "%B3 %2,%r1,%0%#");
6000 else
6001 strcat (buf, "%S3 %2,%r1,%0%#");
6003 else
6005 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6006 if (GET_MODE (operands[1]) == DImode)
6007 strcat (buf, "*");
6008 if (negated)
6009 strcat (buf, "%S3");
6010 else
6011 strcat (buf, "%B3");
6012 if (nullify)
6013 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6014 else
6015 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6017 break;
6019 case 20:
6020 case 28:
6021 xoperands[0] = operands[0];
6022 xoperands[1] = operands[1];
6023 xoperands[2] = operands[2];
6024 xoperands[3] = operands[3];
6026 /* The reversed conditional branch must branch over one additional
6027 instruction if the delay slot is filled. If the delay slot
6028 is empty, the instruction after the reversed condition branch
6029 must be nullified. */
6030 nullify = dbr_sequence_length () == 0;
6031 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6033 /* Create a reversed conditional branch which branches around
6034 the following insns. */
6035 if (GET_MODE (operands[1]) != DImode)
6037 if (nullify)
6039 if (negated)
6040 strcpy (buf,
6041 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6042 else
6043 strcpy (buf,
6044 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6046 else
6048 if (negated)
6049 strcpy (buf,
6050 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6051 else
6052 strcpy (buf,
6053 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6056 else
6058 if (nullify)
6060 if (negated)
6061 strcpy (buf,
6062 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6063 else
6064 strcpy (buf,
6065 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6067 else
6069 if (negated)
6070 strcpy (buf,
6071 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6072 else
6073 strcpy (buf,
6074 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6078 output_asm_insn (buf, xoperands);
6079 return output_lbranch (operands[0], insn);
6081 default:
6082 gcc_unreachable ();
6084 return buf;
6087 /* This routine handles long unconditional branches that exceed the
6088 maximum range of a simple branch instruction. */
6090 const char *
6091 output_lbranch (rtx dest, rtx insn)
6093 rtx xoperands[2];
6095 xoperands[0] = dest;
6097 /* First, free up the delay slot. */
6098 if (dbr_sequence_length () != 0)
6100 /* We can't handle a jump in the delay slot. */
6101 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6103 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6104 optimize, 0, NULL);
6106 /* Now delete the delay insn. */
6107 PUT_CODE (NEXT_INSN (insn), NOTE);
6108 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6109 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6112 /* Output an insn to save %r1. The runtime documentation doesn't
6113 specify whether the "Clean Up" slot in the callers frame can
6114 be clobbered by the callee. It isn't copied by HP's builtin
6115 alloca, so this suggests that it can be clobbered if necessary.
6116 The "Static Link" location is copied by HP builtin alloca, so
6117 we avoid using it. Using the cleanup slot might be a problem
6118 if we have to interoperate with languages that pass cleanup
6119 information. However, it should be possible to handle these
6120 situations with GCC's asm feature.
6122 The "Current RP" slot is reserved for the called procedure, so
6123 we try to use it when we don't have a frame of our own. It's
6124 rather unlikely that we won't have a frame when we need to emit
6125 a very long branch.
6127 Really the way to go long term is a register scavenger; goto
6128 the target of the jump and find a register which we can use
6129 as a scratch to hold the value in %r1. Then, we wouldn't have
6130 to free up the delay slot or clobber a slot that may be needed
6131 for other purposes. */
6132 if (TARGET_64BIT)
6134 if (actual_fsize == 0 && !regs_ever_live[2])
6135 /* Use the return pointer slot in the frame marker. */
6136 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6137 else
6138 /* Use the slot at -40 in the frame marker since HP builtin
6139 alloca doesn't copy it. */
6140 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6142 else
6144 if (actual_fsize == 0 && !regs_ever_live[2])
6145 /* Use the return pointer slot in the frame marker. */
6146 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6147 else
6148 /* Use the "Clean Up" slot in the frame marker. In GCC,
6149 the only other use of this location is for copying a
6150 floating point double argument from a floating-point
6151 register to two general registers. The copy is done
6152 as an "atomic" operation when outputting a call, so it
6153 won't interfere with our using the location here. */
6154 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6157 if (TARGET_PORTABLE_RUNTIME)
6159 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6160 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6161 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6163 else if (flag_pic)
6165 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6166 if (TARGET_SOM || !TARGET_GAS)
6168 xoperands[1] = gen_label_rtx ();
6169 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6170 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6171 CODE_LABEL_NUMBER (xoperands[1]));
6172 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6174 else
6176 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6177 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6179 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6181 else
6182 /* Now output a very long branch to the original target. */
6183 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6185 /* Now restore the value of %r1 in the delay slot. */
6186 if (TARGET_64BIT)
6188 if (actual_fsize == 0 && !regs_ever_live[2])
6189 return "ldd -16(%%r30),%%r1";
6190 else
6191 return "ldd -40(%%r30),%%r1";
6193 else
6195 if (actual_fsize == 0 && !regs_ever_live[2])
6196 return "ldw -20(%%r30),%%r1";
6197 else
6198 return "ldw -12(%%r30),%%r1";
6202 /* This routine handles all the branch-on-bit conditional branch sequences we
6203 might need to generate. It handles nullification of delay slots,
6204 varying length branches, negated branches and all combinations of the
6205 above. it returns the appropriate output template to emit the branch. */
6207 const char *
6208 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6209 int negated, rtx insn, int which)
6211 static char buf[100];
6212 int useskip = 0;
6214 /* A conditional branch to the following instruction (e.g. the delay slot) is
6215 asking for a disaster. I do not think this can happen as this pattern
6216 is only used when optimizing; jump optimization should eliminate the
6217 jump. But be prepared just in case. */
6219 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6220 return "nop";
6222 /* If this is a long branch with its delay slot unfilled, set `nullify'
6223 as it can nullify the delay slot and save a nop. */
6224 if (length == 8 && dbr_sequence_length () == 0)
6225 nullify = 1;
6227 /* If this is a short forward conditional branch which did not get
6228 its delay slot filled, the delay slot can still be nullified. */
6229 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6230 nullify = forward_branch_p (insn);
6232 /* A forward branch over a single nullified insn can be done with a
6233 extrs instruction. This avoids a single cycle penalty due to
6234 mis-predicted branch if we fall through (branch not taken). */
6236 if (length == 4
6237 && next_real_insn (insn) != 0
6238 && get_attr_length (next_real_insn (insn)) == 4
6239 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6240 && nullify)
6241 useskip = 1;
6243 switch (length)
6246 /* All short conditional branches except backwards with an unfilled
6247 delay slot. */
6248 case 4:
6249 if (useskip)
6250 strcpy (buf, "{extrs,|extrw,s,}");
6251 else
6252 strcpy (buf, "bb,");
6253 if (useskip && GET_MODE (operands[0]) == DImode)
6254 strcpy (buf, "extrd,s,*");
6255 else if (GET_MODE (operands[0]) == DImode)
6256 strcpy (buf, "bb,*");
6257 if ((which == 0 && negated)
6258 || (which == 1 && ! negated))
6259 strcat (buf, ">=");
6260 else
6261 strcat (buf, "<");
6262 if (useskip)
6263 strcat (buf, " %0,%1,1,%%r0");
6264 else if (nullify && negated)
6265 strcat (buf, ",n %0,%1,%3");
6266 else if (nullify && ! negated)
6267 strcat (buf, ",n %0,%1,%2");
6268 else if (! nullify && negated)
6269 strcat (buf, "%0,%1,%3");
6270 else if (! nullify && ! negated)
6271 strcat (buf, " %0,%1,%2");
6272 break;
6274 /* All long conditionals. Note a short backward branch with an
6275 unfilled delay slot is treated just like a long backward branch
6276 with an unfilled delay slot. */
6277 case 8:
6278 /* Handle weird backwards branch with a filled delay slot
6279 with is nullified. */
6280 if (dbr_sequence_length () != 0
6281 && ! forward_branch_p (insn)
6282 && nullify)
6284 strcpy (buf, "bb,");
6285 if (GET_MODE (operands[0]) == DImode)
6286 strcat (buf, "*");
6287 if ((which == 0 && negated)
6288 || (which == 1 && ! negated))
6289 strcat (buf, "<");
6290 else
6291 strcat (buf, ">=");
6292 if (negated)
6293 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6294 else
6295 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6297 /* Handle short backwards branch with an unfilled delay slot.
6298 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6299 taken and untaken branches. */
6300 else if (dbr_sequence_length () == 0
6301 && ! forward_branch_p (insn)
6302 && INSN_ADDRESSES_SET_P ()
6303 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6304 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6306 strcpy (buf, "bb,");
6307 if (GET_MODE (operands[0]) == DImode)
6308 strcat (buf, "*");
6309 if ((which == 0 && negated)
6310 || (which == 1 && ! negated))
6311 strcat (buf, ">=");
6312 else
6313 strcat (buf, "<");
6314 if (negated)
6315 strcat (buf, " %0,%1,%3%#");
6316 else
6317 strcat (buf, " %0,%1,%2%#");
6319 else
6321 strcpy (buf, "{extrs,|extrw,s,}");
6322 if (GET_MODE (operands[0]) == DImode)
6323 strcpy (buf, "extrd,s,*");
6324 if ((which == 0 && negated)
6325 || (which == 1 && ! negated))
6326 strcat (buf, "<");
6327 else
6328 strcat (buf, ">=");
6329 if (nullify && negated)
6330 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6331 else if (nullify && ! negated)
6332 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6333 else if (negated)
6334 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6335 else
6336 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6338 break;
6340 default:
6341 gcc_unreachable ();
6343 return buf;
6346 /* This routine handles all the branch-on-variable-bit conditional branch
6347 sequences we might need to generate. It handles nullification of delay
6348 slots, varying length branches, negated branches and all combinations
6349 of the above. it returns the appropriate output template to emit the
6350 branch. */
6352 const char *
6353 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6354 int negated, rtx insn, int which)
6356 static char buf[100];
6357 int useskip = 0;
6359 /* A conditional branch to the following instruction (e.g. the delay slot) is
6360 asking for a disaster. I do not think this can happen as this pattern
6361 is only used when optimizing; jump optimization should eliminate the
6362 jump. But be prepared just in case. */
6364 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6365 return "nop";
6367 /* If this is a long branch with its delay slot unfilled, set `nullify'
6368 as it can nullify the delay slot and save a nop. */
6369 if (length == 8 && dbr_sequence_length () == 0)
6370 nullify = 1;
6372 /* If this is a short forward conditional branch which did not get
6373 its delay slot filled, the delay slot can still be nullified. */
6374 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6375 nullify = forward_branch_p (insn);
6377 /* A forward branch over a single nullified insn can be done with a
6378 extrs instruction. This avoids a single cycle penalty due to
6379 mis-predicted branch if we fall through (branch not taken). */
6381 if (length == 4
6382 && next_real_insn (insn) != 0
6383 && get_attr_length (next_real_insn (insn)) == 4
6384 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6385 && nullify)
6386 useskip = 1;
6388 switch (length)
6391 /* All short conditional branches except backwards with an unfilled
6392 delay slot. */
6393 case 4:
6394 if (useskip)
6395 strcpy (buf, "{vextrs,|extrw,s,}");
6396 else
6397 strcpy (buf, "{bvb,|bb,}");
6398 if (useskip && GET_MODE (operands[0]) == DImode)
6399 strcpy (buf, "extrd,s,*");
6400 else if (GET_MODE (operands[0]) == DImode)
6401 strcpy (buf, "bb,*");
6402 if ((which == 0 && negated)
6403 || (which == 1 && ! negated))
6404 strcat (buf, ">=");
6405 else
6406 strcat (buf, "<");
6407 if (useskip)
6408 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6409 else if (nullify && negated)
6410 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6411 else if (nullify && ! negated)
6412 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6413 else if (! nullify && negated)
6414 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6415 else if (! nullify && ! negated)
6416 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6417 break;
6419 /* All long conditionals. Note a short backward branch with an
6420 unfilled delay slot is treated just like a long backward branch
6421 with an unfilled delay slot. */
6422 case 8:
6423 /* Handle weird backwards branch with a filled delay slot
6424 with is nullified. */
6425 if (dbr_sequence_length () != 0
6426 && ! forward_branch_p (insn)
6427 && nullify)
6429 strcpy (buf, "{bvb,|bb,}");
6430 if (GET_MODE (operands[0]) == DImode)
6431 strcat (buf, "*");
6432 if ((which == 0 && negated)
6433 || (which == 1 && ! negated))
6434 strcat (buf, "<");
6435 else
6436 strcat (buf, ">=");
6437 if (negated)
6438 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6439 else
6440 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6442 /* Handle short backwards branch with an unfilled delay slot.
6443 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6444 taken and untaken branches. */
6445 else if (dbr_sequence_length () == 0
6446 && ! forward_branch_p (insn)
6447 && INSN_ADDRESSES_SET_P ()
6448 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6449 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6451 strcpy (buf, "{bvb,|bb,}");
6452 if (GET_MODE (operands[0]) == DImode)
6453 strcat (buf, "*");
6454 if ((which == 0 && negated)
6455 || (which == 1 && ! negated))
6456 strcat (buf, ">=");
6457 else
6458 strcat (buf, "<");
6459 if (negated)
6460 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6461 else
6462 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6464 else
6466 strcpy (buf, "{vextrs,|extrw,s,}");
6467 if (GET_MODE (operands[0]) == DImode)
6468 strcpy (buf, "extrd,s,*");
6469 if ((which == 0 && negated)
6470 || (which == 1 && ! negated))
6471 strcat (buf, "<");
6472 else
6473 strcat (buf, ">=");
6474 if (nullify && negated)
6475 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6476 else if (nullify && ! negated)
6477 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6478 else if (negated)
6479 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6480 else
6481 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6483 break;
6485 default:
6486 gcc_unreachable ();
6488 return buf;
6491 /* Return the output template for emitting a dbra type insn.
6493 Note it may perform some output operations on its own before
6494 returning the final output string. */
6495 const char *
6496 output_dbra (rtx *operands, rtx insn, int which_alternative)
6499 /* A conditional branch to the following instruction (e.g. the delay slot) is
6500 asking for a disaster. Be prepared! */
6502 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6504 if (which_alternative == 0)
6505 return "ldo %1(%0),%0";
6506 else if (which_alternative == 1)
6508 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6509 output_asm_insn ("ldw -16(%%r30),%4", operands);
6510 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6511 return "{fldws|fldw} -16(%%r30),%0";
6513 else
6515 output_asm_insn ("ldw %0,%4", operands);
6516 return "ldo %1(%4),%4\n\tstw %4,%0";
6520 if (which_alternative == 0)
6522 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6523 int length = get_attr_length (insn);
6525 /* If this is a long branch with its delay slot unfilled, set `nullify'
6526 as it can nullify the delay slot and save a nop. */
6527 if (length == 8 && dbr_sequence_length () == 0)
6528 nullify = 1;
6530 /* If this is a short forward conditional branch which did not get
6531 its delay slot filled, the delay slot can still be nullified. */
6532 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6533 nullify = forward_branch_p (insn);
6535 switch (length)
6537 case 4:
6538 if (nullify)
6539 return "addib,%C2,n %1,%0,%3";
6540 else
6541 return "addib,%C2 %1,%0,%3";
6543 case 8:
6544 /* Handle weird backwards branch with a fulled delay slot
6545 which is nullified. */
6546 if (dbr_sequence_length () != 0
6547 && ! forward_branch_p (insn)
6548 && nullify)
6549 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6550 /* Handle short backwards branch with an unfilled delay slot.
6551 Using a addb;nop rather than addi;bl saves 1 cycle for both
6552 taken and untaken branches. */
6553 else if (dbr_sequence_length () == 0
6554 && ! forward_branch_p (insn)
6555 && INSN_ADDRESSES_SET_P ()
6556 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6557 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6558 return "addib,%C2 %1,%0,%3%#";
6560 /* Handle normal cases. */
6561 if (nullify)
6562 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6563 else
6564 return "addi,%N2 %1,%0,%0\n\tb %3";
6566 default:
6567 gcc_unreachable ();
6571 /* Deal with gross reload from FP register case. */
6572 else if (which_alternative == 1)
6574 /* Move loop counter from FP register to MEM then into a GR,
6575 increment the GR, store the GR into MEM, and finally reload
6576 the FP register from MEM from within the branch's delay slot. */
6577 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6578 operands);
6579 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6580 if (get_attr_length (insn) == 24)
6581 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6582 else
6583 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6585 /* Deal with gross reload from memory case. */
6586 else
6588 /* Reload loop counter from memory, the store back to memory
6589 happens in the branch's delay slot. */
6590 output_asm_insn ("ldw %0,%4", operands);
6591 if (get_attr_length (insn) == 12)
6592 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6593 else
6594 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6598 /* Return the output template for emitting a dbra type insn.
6600 Note it may perform some output operations on its own before
6601 returning the final output string. */
6602 const char *
6603 output_movb (rtx *operands, rtx insn, int which_alternative,
6604 int reverse_comparison)
6607 /* A conditional branch to the following instruction (e.g. the delay slot) is
6608 asking for a disaster. Be prepared! */
6610 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6612 if (which_alternative == 0)
6613 return "copy %1,%0";
6614 else if (which_alternative == 1)
6616 output_asm_insn ("stw %1,-16(%%r30)", operands);
6617 return "{fldws|fldw} -16(%%r30),%0";
6619 else if (which_alternative == 2)
6620 return "stw %1,%0";
6621 else
6622 return "mtsar %r1";
6625 /* Support the second variant. */
6626 if (reverse_comparison)
6627 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6629 if (which_alternative == 0)
6631 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6632 int length = get_attr_length (insn);
6634 /* If this is a long branch with its delay slot unfilled, set `nullify'
6635 as it can nullify the delay slot and save a nop. */
6636 if (length == 8 && dbr_sequence_length () == 0)
6637 nullify = 1;
6639 /* If this is a short forward conditional branch which did not get
6640 its delay slot filled, the delay slot can still be nullified. */
6641 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6642 nullify = forward_branch_p (insn);
6644 switch (length)
6646 case 4:
6647 if (nullify)
6648 return "movb,%C2,n %1,%0,%3";
6649 else
6650 return "movb,%C2 %1,%0,%3";
6652 case 8:
6653 /* Handle weird backwards branch with a filled delay slot
6654 which is nullified. */
6655 if (dbr_sequence_length () != 0
6656 && ! forward_branch_p (insn)
6657 && nullify)
6658 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6660 /* Handle short backwards branch with an unfilled delay slot.
6661 Using a movb;nop rather than or;bl saves 1 cycle for both
6662 taken and untaken branches. */
6663 else if (dbr_sequence_length () == 0
6664 && ! forward_branch_p (insn)
6665 && INSN_ADDRESSES_SET_P ()
6666 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6667 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6668 return "movb,%C2 %1,%0,%3%#";
6669 /* Handle normal cases. */
6670 if (nullify)
6671 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6672 else
6673 return "or,%N2 %1,%%r0,%0\n\tb %3";
6675 default:
6676 gcc_unreachable ();
6679 /* Deal with gross reload from FP register case. */
6680 else if (which_alternative == 1)
6682 /* Move loop counter from FP register to MEM then into a GR,
6683 increment the GR, store the GR into MEM, and finally reload
6684 the FP register from MEM from within the branch's delay slot. */
6685 output_asm_insn ("stw %1,-16(%%r30)", operands);
6686 if (get_attr_length (insn) == 12)
6687 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6688 else
6689 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6691 /* Deal with gross reload from memory case. */
6692 else if (which_alternative == 2)
6694 /* Reload loop counter from memory, the store back to memory
6695 happens in the branch's delay slot. */
6696 if (get_attr_length (insn) == 8)
6697 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6698 else
6699 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6701 /* Handle SAR as a destination. */
6702 else
6704 if (get_attr_length (insn) == 8)
6705 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6706 else
6707 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6711 /* Copy any FP arguments in INSN into integer registers. */
6712 static void
6713 copy_fp_args (rtx insn)
6715 rtx link;
6716 rtx xoperands[2];
6718 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6720 int arg_mode, regno;
6721 rtx use = XEXP (link, 0);
6723 if (! (GET_CODE (use) == USE
6724 && GET_CODE (XEXP (use, 0)) == REG
6725 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6726 continue;
6728 arg_mode = GET_MODE (XEXP (use, 0));
6729 regno = REGNO (XEXP (use, 0));
6731 /* Is it a floating point register? */
6732 if (regno >= 32 && regno <= 39)
6734 /* Copy the FP register into an integer register via memory. */
6735 if (arg_mode == SFmode)
6737 xoperands[0] = XEXP (use, 0);
6738 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6739 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6740 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6742 else
6744 xoperands[0] = XEXP (use, 0);
6745 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6746 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6747 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6748 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6754 /* Compute length of the FP argument copy sequence for INSN. */
6755 static int
6756 length_fp_args (rtx insn)
6758 int length = 0;
6759 rtx link;
6761 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6763 int arg_mode, regno;
6764 rtx use = XEXP (link, 0);
6766 if (! (GET_CODE (use) == USE
6767 && GET_CODE (XEXP (use, 0)) == REG
6768 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6769 continue;
6771 arg_mode = GET_MODE (XEXP (use, 0));
6772 regno = REGNO (XEXP (use, 0));
6774 /* Is it a floating point register? */
6775 if (regno >= 32 && regno <= 39)
6777 if (arg_mode == SFmode)
6778 length += 8;
6779 else
6780 length += 12;
6784 return length;
6787 /* Return the attribute length for the millicode call instruction INSN.
6788 The length must match the code generated by output_millicode_call.
6789 We include the delay slot in the returned length as it is better to
6790 over estimate the length than to under estimate it. */
6793 attr_length_millicode_call (rtx insn)
6795 unsigned long distance = -1;
6796 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6798 if (INSN_ADDRESSES_SET_P ())
6800 distance = (total + insn_current_reference_address (insn));
6801 if (distance < total)
6802 distance = -1;
6805 if (TARGET_64BIT)
6807 if (!TARGET_LONG_CALLS && distance < 7600000)
6808 return 8;
6810 return 20;
6812 else if (TARGET_PORTABLE_RUNTIME)
6813 return 24;
6814 else
6816 if (!TARGET_LONG_CALLS && distance < 240000)
6817 return 8;
6819 if (TARGET_LONG_ABS_CALL && !flag_pic)
6820 return 12;
6822 return 24;
6826 /* INSN is a function call. It may have an unconditional jump
6827 in its delay slot.
6829 CALL_DEST is the routine we are calling. */
6831 const char *
6832 output_millicode_call (rtx insn, rtx call_dest)
6834 int attr_length = get_attr_length (insn);
6835 int seq_length = dbr_sequence_length ();
6836 int distance;
6837 rtx seq_insn;
6838 rtx xoperands[3];
6840 xoperands[0] = call_dest;
6841 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6843 /* Handle the common case where we are sure that the branch will
6844 reach the beginning of the $CODE$ subspace. The within reach
6845 form of the $$sh_func_adrs call has a length of 28. Because
6846 it has an attribute type of multi, it never has a nonzero
6847 sequence length. The length of the $$sh_func_adrs is the same
6848 as certain out of reach PIC calls to other routines. */
6849 if (!TARGET_LONG_CALLS
6850 && ((seq_length == 0
6851 && (attr_length == 12
6852 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6853 || (seq_length != 0 && attr_length == 8)))
6855 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6857 else
6859 if (TARGET_64BIT)
6861 /* It might seem that one insn could be saved by accessing
6862 the millicode function using the linkage table. However,
6863 this doesn't work in shared libraries and other dynamically
6864 loaded objects. Using a pc-relative sequence also avoids
6865 problems related to the implicit use of the gp register. */
6866 output_asm_insn ("b,l .+8,%%r1", xoperands);
6868 if (TARGET_GAS)
6870 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6871 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6873 else
6875 xoperands[1] = gen_label_rtx ();
6876 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6877 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6878 CODE_LABEL_NUMBER (xoperands[1]));
6879 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6882 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
6884 else if (TARGET_PORTABLE_RUNTIME)
6886 /* Pure portable runtime doesn't allow be/ble; we also don't
6887 have PIC support in the assembler/linker, so this sequence
6888 is needed. */
6890 /* Get the address of our target into %r1. */
6891 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6892 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6894 /* Get our return address into %r31. */
6895 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
6896 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
6898 /* Jump to our target address in %r1. */
6899 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6901 else if (!flag_pic)
6903 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6904 if (TARGET_PA_20)
6905 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
6906 else
6907 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
6909 else
6911 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6912 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
6914 if (TARGET_SOM || !TARGET_GAS)
6916 /* The HP assembler can generate relocations for the
6917 difference of two symbols. GAS can do this for a
6918 millicode symbol but not an arbitrary external
6919 symbol when generating SOM output. */
6920 xoperands[1] = gen_label_rtx ();
6921 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6922 CODE_LABEL_NUMBER (xoperands[1]));
6923 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6924 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6926 else
6928 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
6929 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
6930 xoperands);
6933 /* Jump to our target address in %r1. */
6934 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6938 if (seq_length == 0)
6939 output_asm_insn ("nop", xoperands);
6941 /* We are done if there isn't a jump in the delay slot. */
6942 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
6943 return "";
6945 /* This call has an unconditional jump in its delay slot. */
6946 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
6948 /* See if the return address can be adjusted. Use the containing
6949 sequence insn's address. */
6950 if (INSN_ADDRESSES_SET_P ())
6952 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
6953 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
6954 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
6956 if (VAL_14_BITS_P (distance))
6958 xoperands[1] = gen_label_rtx ();
6959 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
6960 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6961 CODE_LABEL_NUMBER (xoperands[1]));
6963 else
6964 /* ??? This branch may not reach its target. */
6965 output_asm_insn ("nop\n\tb,n %0", xoperands);
6967 else
6968 /* ??? This branch may not reach its target. */
6969 output_asm_insn ("nop\n\tb,n %0", xoperands);
6971 /* Delete the jump. */
6972 PUT_CODE (NEXT_INSN (insn), NOTE);
6973 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6974 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6976 return "";
6979 /* Return the attribute length of the call instruction INSN. The SIBCALL
6980 flag indicates whether INSN is a regular call or a sibling call. The
6981 length returned must be longer than the code actually generated by
6982 output_call. Since branch shortening is done before delay branch
6983 sequencing, there is no way to determine whether or not the delay
6984 slot will be filled during branch shortening. Even when the delay
6985 slot is filled, we may have to add a nop if the delay slot contains
6986 a branch that can't reach its target. Thus, we always have to include
6987 the delay slot in the length estimate. This used to be done in
6988 pa_adjust_insn_length but we do it here now as some sequences always
6989 fill the delay slot and we can save four bytes in the estimate for
6990 these sequences. */
6993 attr_length_call (rtx insn, int sibcall)
6995 int local_call;
6996 rtx call_dest;
6997 tree call_decl;
6998 int length = 0;
6999 rtx pat = PATTERN (insn);
7000 unsigned long distance = -1;
7002 if (INSN_ADDRESSES_SET_P ())
7004 unsigned long total;
7006 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7007 distance = (total + insn_current_reference_address (insn));
7008 if (distance < total)
7009 distance = -1;
7012 /* Determine if this is a local call. */
7013 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7014 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7015 else
7016 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7018 call_decl = SYMBOL_REF_DECL (call_dest);
7019 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7021 /* pc-relative branch. */
7022 if (!TARGET_LONG_CALLS
7023 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7024 || distance < 240000))
7025 length += 8;
7027 /* 64-bit plabel sequence. */
7028 else if (TARGET_64BIT && !local_call)
7029 length += sibcall ? 28 : 24;
7031 /* non-pic long absolute branch sequence. */
7032 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7033 length += 12;
7035 /* long pc-relative branch sequence. */
7036 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7037 || (TARGET_64BIT && !TARGET_GAS)
7038 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7040 length += 20;
7042 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7043 length += 8;
7046 /* 32-bit plabel sequence. */
7047 else
7049 length += 32;
7051 if (TARGET_SOM)
7052 length += length_fp_args (insn);
7054 if (flag_pic)
7055 length += 4;
7057 if (!TARGET_PA_20)
7059 if (!sibcall)
7060 length += 8;
7062 if (!TARGET_NO_SPACE_REGS)
7063 length += 8;
7067 return length;
7070 /* INSN is a function call. It may have an unconditional jump
7071 in its delay slot.
7073 CALL_DEST is the routine we are calling. */
7075 const char *
7076 output_call (rtx insn, rtx call_dest, int sibcall)
7078 int delay_insn_deleted = 0;
7079 int delay_slot_filled = 0;
7080 int seq_length = dbr_sequence_length ();
7081 tree call_decl = SYMBOL_REF_DECL (call_dest);
7082 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7083 rtx xoperands[2];
7085 xoperands[0] = call_dest;
7087 /* Handle the common case where we're sure that the branch will reach
7088 the beginning of the "$CODE$" subspace. This is the beginning of
7089 the current function if we are in a named section. */
7090 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7092 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7093 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7095 else
7097 if (TARGET_64BIT && !local_call)
7099 /* ??? As far as I can tell, the HP linker doesn't support the
7100 long pc-relative sequence described in the 64-bit runtime
7101 architecture. So, we use a slightly longer indirect call. */
7102 struct deferred_plabel *p = get_plabel (call_dest);
7104 xoperands[0] = p->internal_label;
7105 xoperands[1] = gen_label_rtx ();
7107 /* If this isn't a sibcall, we put the load of %r27 into the
7108 delay slot. We can't do this in a sibcall as we don't
7109 have a second call-clobbered scratch register available. */
7110 if (seq_length != 0
7111 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7112 && !sibcall)
7114 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7115 optimize, 0, NULL);
7117 /* Now delete the delay insn. */
7118 PUT_CODE (NEXT_INSN (insn), NOTE);
7119 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7120 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7121 delay_insn_deleted = 1;
7124 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7125 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7126 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7128 if (sibcall)
7130 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7131 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7132 output_asm_insn ("bve (%%r1)", xoperands);
7134 else
7136 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7137 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7138 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7139 delay_slot_filled = 1;
7142 else
7144 int indirect_call = 0;
7146 /* Emit a long call. There are several different sequences
7147 of increasing length and complexity. In most cases,
7148 they don't allow an instruction in the delay slot. */
7149 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7150 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7151 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7152 && !TARGET_64BIT)
7153 indirect_call = 1;
7155 if (seq_length != 0
7156 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7157 && !sibcall
7158 && (!TARGET_PA_20 || indirect_call))
7160 /* A non-jump insn in the delay slot. By definition we can
7161 emit this insn before the call (and in fact before argument
7162 relocating. */
7163 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7164 NULL);
7166 /* Now delete the delay insn. */
7167 PUT_CODE (NEXT_INSN (insn), NOTE);
7168 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7169 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7170 delay_insn_deleted = 1;
7173 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7175 /* This is the best sequence for making long calls in
7176 non-pic code. Unfortunately, GNU ld doesn't provide
7177 the stub needed for external calls, and GAS's support
7178 for this with the SOM linker is buggy. It is safe
7179 to use this for local calls. */
7180 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7181 if (sibcall)
7182 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7183 else
7185 if (TARGET_PA_20)
7186 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7187 xoperands);
7188 else
7189 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7191 output_asm_insn ("copy %%r31,%%r2", xoperands);
7192 delay_slot_filled = 1;
7195 else
7197 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7198 || (TARGET_64BIT && !TARGET_GAS))
7200 /* The HP assembler and linker can handle relocations
7201 for the difference of two symbols. GAS and the HP
7202 linker can't do this when one of the symbols is
7203 external. */
7204 xoperands[1] = gen_label_rtx ();
7205 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7206 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7207 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7208 CODE_LABEL_NUMBER (xoperands[1]));
7209 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7211 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7213 /* GAS currently can't generate the relocations that
7214 are needed for the SOM linker under HP-UX using this
7215 sequence. The GNU linker doesn't generate the stubs
7216 that are needed for external calls on TARGET_ELF32
7217 with this sequence. For now, we have to use a
7218 longer plabel sequence when using GAS. */
7219 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7220 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7221 xoperands);
7222 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7223 xoperands);
7225 else
7227 /* Emit a long plabel-based call sequence. This is
7228 essentially an inline implementation of $$dyncall.
7229 We don't actually try to call $$dyncall as this is
7230 as difficult as calling the function itself. */
7231 struct deferred_plabel *p = get_plabel (call_dest);
7233 xoperands[0] = p->internal_label;
7234 xoperands[1] = gen_label_rtx ();
7236 /* Since the call is indirect, FP arguments in registers
7237 need to be copied to the general registers. Then, the
7238 argument relocation stub will copy them back. */
7239 if (TARGET_SOM)
7240 copy_fp_args (insn);
7242 if (flag_pic)
7244 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7245 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7246 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7248 else
7250 output_asm_insn ("addil LR'%0-$global$,%%r27",
7251 xoperands);
7252 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7253 xoperands);
7256 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7257 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7258 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7259 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7261 if (!sibcall && !TARGET_PA_20)
7263 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7264 if (TARGET_NO_SPACE_REGS)
7265 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7266 else
7267 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7271 if (TARGET_PA_20)
7273 if (sibcall)
7274 output_asm_insn ("bve (%%r1)", xoperands);
7275 else
7277 if (indirect_call)
7279 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7280 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7281 delay_slot_filled = 1;
7283 else
7284 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7287 else
7289 if (!TARGET_NO_SPACE_REGS)
7290 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7291 xoperands);
7293 if (sibcall)
7295 if (TARGET_NO_SPACE_REGS)
7296 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7297 else
7298 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7300 else
7302 if (TARGET_NO_SPACE_REGS)
7303 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7304 else
7305 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7307 if (indirect_call)
7308 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7309 else
7310 output_asm_insn ("copy %%r31,%%r2", xoperands);
7311 delay_slot_filled = 1;
7318 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7319 output_asm_insn ("nop", xoperands);
7321 /* We are done if there isn't a jump in the delay slot. */
7322 if (seq_length == 0
7323 || delay_insn_deleted
7324 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7325 return "";
7327 /* A sibcall should never have a branch in the delay slot. */
7328 gcc_assert (!sibcall);
7330 /* This call has an unconditional jump in its delay slot. */
7331 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7333 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7335 /* See if the return address can be adjusted. Use the containing
7336 sequence insn's address. */
7337 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7338 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7339 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7341 if (VAL_14_BITS_P (distance))
7343 xoperands[1] = gen_label_rtx ();
7344 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7345 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7346 CODE_LABEL_NUMBER (xoperands[1]));
7348 else
7349 output_asm_insn ("nop\n\tb,n %0", xoperands);
7351 else
7352 output_asm_insn ("b,n %0", xoperands);
7354 /* Delete the jump. */
7355 PUT_CODE (NEXT_INSN (insn), NOTE);
7356 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7357 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7359 return "";
7362 /* Return the attribute length of the indirect call instruction INSN.
7363 The length must match the code generated by output_indirect call.
7364 The returned length includes the delay slot. Currently, the delay
7365 slot of an indirect call sequence is not exposed and it is used by
7366 the sequence itself. */
7369 attr_length_indirect_call (rtx insn)
7371 unsigned long distance = -1;
7372 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7374 if (INSN_ADDRESSES_SET_P ())
7376 distance = (total + insn_current_reference_address (insn));
7377 if (distance < total)
7378 distance = -1;
7381 if (TARGET_64BIT)
7382 return 12;
7384 if (TARGET_FAST_INDIRECT_CALLS
7385 || (!TARGET_PORTABLE_RUNTIME
7386 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7387 return 8;
7389 if (flag_pic)
7390 return 24;
7392 if (TARGET_PORTABLE_RUNTIME)
7393 return 20;
7395 /* Out of reach, can use ble. */
7396 return 12;
7399 const char *
7400 output_indirect_call (rtx insn, rtx call_dest)
7402 rtx xoperands[1];
7404 if (TARGET_64BIT)
7406 xoperands[0] = call_dest;
7407 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7408 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7409 return "";
7412 /* First the special case for kernels, level 0 systems, etc. */
7413 if (TARGET_FAST_INDIRECT_CALLS)
7414 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7416 /* Now the normal case -- we can reach $$dyncall directly or
7417 we're sure that we can get there via a long-branch stub.
7419 No need to check target flags as the length uniquely identifies
7420 the remaining cases. */
7421 if (attr_length_indirect_call (insn) == 8)
7423 /* The HP linker substitutes a BLE for millicode calls using
7424 the short PIC PCREL form. Thus, we must use %r31 as the
7425 link register when generating PA 1.x code. */
7426 if (TARGET_PA_20)
7427 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7428 else
7429 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7432 /* Long millicode call, but we are not generating PIC or portable runtime
7433 code. */
7434 if (attr_length_indirect_call (insn) == 12)
7435 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7437 /* Long millicode call for portable runtime. */
7438 if (attr_length_indirect_call (insn) == 20)
7439 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7441 /* We need a long PIC call to $$dyncall. */
7442 xoperands[0] = NULL_RTX;
7443 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7444 if (TARGET_SOM || !TARGET_GAS)
7446 xoperands[0] = gen_label_rtx ();
7447 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7448 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7449 CODE_LABEL_NUMBER (xoperands[0]));
7450 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7452 else
7454 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7455 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7456 xoperands);
7458 output_asm_insn ("blr %%r0,%%r2", xoperands);
7459 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7460 return "";
7463 /* Return the total length of the save and restore instructions needed for
7464 the data linkage table pointer (i.e., the PIC register) across the call
7465 instruction INSN. No-return calls do not require a save and restore.
7466 In addition, we may be able to avoid the save and restore for calls
7467 within the same translation unit. */
7470 attr_length_save_restore_dltp (rtx insn)
7472 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7473 return 0;
7475 return 8;
7478 /* In HPUX 8.0's shared library scheme, special relocations are needed
7479 for function labels if they might be passed to a function
7480 in a shared library (because shared libraries don't live in code
7481 space), and special magic is needed to construct their address. */
7483 void
7484 hppa_encode_label (rtx sym)
7486 const char *str = XSTR (sym, 0);
7487 int len = strlen (str) + 1;
7488 char *newstr, *p;
7490 p = newstr = alloca (len + 1);
7491 *p++ = '@';
7492 strcpy (p, str);
7494 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7497 static void
7498 pa_encode_section_info (tree decl, rtx rtl, int first)
7500 default_encode_section_info (decl, rtl, first);
7502 if (first && TEXT_SPACE_P (decl))
7504 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7505 if (TREE_CODE (decl) == FUNCTION_DECL)
7506 hppa_encode_label (XEXP (rtl, 0));
7510 /* This is sort of inverse to pa_encode_section_info. */
7512 static const char *
7513 pa_strip_name_encoding (const char *str)
7515 str += (*str == '@');
7516 str += (*str == '*');
7517 return str;
7521 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7523 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7526 /* Returns 1 if OP is a function label involved in a simple addition
7527 with a constant. Used to keep certain patterns from matching
7528 during instruction combination. */
7530 is_function_label_plus_const (rtx op)
7532 /* Strip off any CONST. */
7533 if (GET_CODE (op) == CONST)
7534 op = XEXP (op, 0);
7536 return (GET_CODE (op) == PLUS
7537 && function_label_operand (XEXP (op, 0), Pmode)
7538 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7541 /* Output assembly code for a thunk to FUNCTION. */
7543 static void
7544 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7545 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7546 tree function)
7548 static unsigned int current_thunk_number;
7549 int val_14 = VAL_14_BITS_P (delta);
7550 int nbytes = 0;
7551 char label[16];
7552 rtx xoperands[4];
7554 xoperands[0] = XEXP (DECL_RTL (function), 0);
7555 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7556 xoperands[2] = GEN_INT (delta);
7558 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7559 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7561 /* Output the thunk. We know that the function is in the same
7562 translation unit (i.e., the same space) as the thunk, and that
7563 thunks are output after their method. Thus, we don't need an
7564 external branch to reach the function. With SOM and GAS,
7565 functions and thunks are effectively in different sections.
7566 Thus, we can always use a IA-relative branch and the linker
7567 will add a long branch stub if necessary.
7569 However, we have to be careful when generating PIC code on the
7570 SOM port to ensure that the sequence does not transfer to an
7571 import stub for the target function as this could clobber the
7572 return value saved at SP-24. This would also apply to the
7573 32-bit linux port if the multi-space model is implemented. */
7574 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7575 && !(flag_pic && TREE_PUBLIC (function))
7576 && (TARGET_GAS || last_address < 262132))
7577 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7578 && ((targetm.have_named_sections
7579 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7580 /* The GNU 64-bit linker has rather poor stub management.
7581 So, we use a long branch from thunks that aren't in
7582 the same section as the target function. */
7583 && ((!TARGET_64BIT
7584 && (DECL_SECTION_NAME (thunk_fndecl)
7585 != DECL_SECTION_NAME (function)))
7586 || ((DECL_SECTION_NAME (thunk_fndecl)
7587 == DECL_SECTION_NAME (function))
7588 && last_address < 262132)))
7589 || (!targetm.have_named_sections && last_address < 262132))))
7591 if (!val_14)
7592 output_asm_insn ("addil L'%2,%%r26", xoperands);
7594 output_asm_insn ("b %0", xoperands);
7596 if (val_14)
7598 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7599 nbytes += 8;
7601 else
7603 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7604 nbytes += 12;
7607 else if (TARGET_64BIT)
7609 /* We only have one call-clobbered scratch register, so we can't
7610 make use of the delay slot if delta doesn't fit in 14 bits. */
7611 if (!val_14)
7613 output_asm_insn ("addil L'%2,%%r26", xoperands);
7614 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7617 output_asm_insn ("b,l .+8,%%r1", xoperands);
7619 if (TARGET_GAS)
7621 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7622 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7624 else
7626 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7627 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7630 if (val_14)
7632 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7633 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7634 nbytes += 20;
7636 else
7638 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7639 nbytes += 24;
7642 else if (TARGET_PORTABLE_RUNTIME)
7644 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7645 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7647 if (!val_14)
7648 output_asm_insn ("addil L'%2,%%r26", xoperands);
7650 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7652 if (val_14)
7654 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7655 nbytes += 16;
7657 else
7659 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7660 nbytes += 20;
7663 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7665 /* The function is accessible from outside this module. The only
7666 way to avoid an import stub between the thunk and function is to
7667 call the function directly with an indirect sequence similar to
7668 that used by $$dyncall. This is possible because $$dyncall acts
7669 as the import stub in an indirect call. */
7670 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7671 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7672 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7673 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7674 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7675 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7676 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7677 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7678 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7680 if (!val_14)
7682 output_asm_insn ("addil L'%2,%%r26", xoperands);
7683 nbytes += 4;
7686 if (TARGET_PA_20)
7688 output_asm_insn ("bve (%%r22)", xoperands);
7689 nbytes += 36;
7691 else if (TARGET_NO_SPACE_REGS)
7693 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7694 nbytes += 36;
7696 else
7698 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7699 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7700 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7701 nbytes += 44;
7704 if (val_14)
7705 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7706 else
7707 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7709 else if (flag_pic)
7711 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7713 if (TARGET_SOM || !TARGET_GAS)
7715 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7716 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7718 else
7720 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7721 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7724 if (!val_14)
7725 output_asm_insn ("addil L'%2,%%r26", xoperands);
7727 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7729 if (val_14)
7731 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7732 nbytes += 20;
7734 else
7736 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7737 nbytes += 24;
7740 else
7742 if (!val_14)
7743 output_asm_insn ("addil L'%2,%%r26", xoperands);
7745 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7746 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7748 if (val_14)
7750 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7751 nbytes += 12;
7753 else
7755 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7756 nbytes += 16;
7760 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7762 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7764 data_section ();
7765 output_asm_insn (".align 4", xoperands);
7766 ASM_OUTPUT_LABEL (file, label);
7767 output_asm_insn (".word P'%0", xoperands);
7769 else if (TARGET_SOM && TARGET_GAS)
7770 forget_section ();
7772 current_thunk_number++;
7773 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7774 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7775 last_address += nbytes;
7776 update_total_code_bytes (nbytes);
7779 /* Only direct calls to static functions are allowed to be sibling (tail)
7780 call optimized.
7782 This restriction is necessary because some linker generated stubs will
7783 store return pointers into rp' in some cases which might clobber a
7784 live value already in rp'.
7786 In a sibcall the current function and the target function share stack
7787 space. Thus if the path to the current function and the path to the
7788 target function save a value in rp', they save the value into the
7789 same stack slot, which has undesirable consequences.
7791 Because of the deferred binding nature of shared libraries any function
7792 with external scope could be in a different load module and thus require
7793 rp' to be saved when calling that function. So sibcall optimizations
7794 can only be safe for static function.
7796 Note that GCC never needs return value relocations, so we don't have to
7797 worry about static calls with return value relocations (which require
7798 saving rp').
7800 It is safe to perform a sibcall optimization when the target function
7801 will never return. */
7802 static bool
7803 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7805 if (TARGET_PORTABLE_RUNTIME)
7806 return false;
7808 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7809 single subspace mode and the call is not indirect. As far as I know,
7810 there is no operating system support for the multiple subspace mode.
7811 It might be possible to support indirect calls if we didn't use
7812 $$dyncall (see the indirect sequence generated in output_call). */
7813 if (TARGET_ELF32)
7814 return (decl != NULL_TREE);
7816 /* Sibcalls are not ok because the arg pointer register is not a fixed
7817 register. This prevents the sibcall optimization from occurring. In
7818 addition, there are problems with stub placement using GNU ld. This
7819 is because a normal sibcall branch uses a 17-bit relocation while
7820 a regular call branch uses a 22-bit relocation. As a result, more
7821 care needs to be taken in the placement of long-branch stubs. */
7822 if (TARGET_64BIT)
7823 return false;
7825 /* Sibcalls are only ok within a translation unit. */
7826 return (decl && !TREE_PUBLIC (decl));
7829 /* ??? Addition is not commutative on the PA due to the weird implicit
7830 space register selection rules for memory addresses. Therefore, we
7831 don't consider a + b == b + a, as this might be inside a MEM. */
7832 static bool
7833 pa_commutative_p (rtx x, int outer_code)
7835 return (COMMUTATIVE_P (x)
7836 && (TARGET_NO_SPACE_REGS
7837 || (outer_code != UNKNOWN && outer_code != MEM)
7838 || GET_CODE (x) != PLUS));
7841 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7842 use in fmpyadd instructions. */
7844 fmpyaddoperands (rtx *operands)
7846 enum machine_mode mode = GET_MODE (operands[0]);
7848 /* Must be a floating point mode. */
7849 if (mode != SFmode && mode != DFmode)
7850 return 0;
7852 /* All modes must be the same. */
7853 if (! (mode == GET_MODE (operands[1])
7854 && mode == GET_MODE (operands[2])
7855 && mode == GET_MODE (operands[3])
7856 && mode == GET_MODE (operands[4])
7857 && mode == GET_MODE (operands[5])))
7858 return 0;
7860 /* All operands must be registers. */
7861 if (! (GET_CODE (operands[1]) == REG
7862 && GET_CODE (operands[2]) == REG
7863 && GET_CODE (operands[3]) == REG
7864 && GET_CODE (operands[4]) == REG
7865 && GET_CODE (operands[5]) == REG))
7866 return 0;
7868 /* Only 2 real operands to the addition. One of the input operands must
7869 be the same as the output operand. */
7870 if (! rtx_equal_p (operands[3], operands[4])
7871 && ! rtx_equal_p (operands[3], operands[5]))
7872 return 0;
7874 /* Inout operand of add cannot conflict with any operands from multiply. */
7875 if (rtx_equal_p (operands[3], operands[0])
7876 || rtx_equal_p (operands[3], operands[1])
7877 || rtx_equal_p (operands[3], operands[2]))
7878 return 0;
7880 /* multiply cannot feed into addition operands. */
7881 if (rtx_equal_p (operands[4], operands[0])
7882 || rtx_equal_p (operands[5], operands[0]))
7883 return 0;
7885 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7886 if (mode == SFmode
7887 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7888 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7889 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7890 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7891 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7892 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7893 return 0;
7895 /* Passed. Operands are suitable for fmpyadd. */
7896 return 1;
7899 #if !defined(USE_COLLECT2)
7900 static void
7901 pa_asm_out_constructor (rtx symbol, int priority)
7903 if (!function_label_operand (symbol, VOIDmode))
7904 hppa_encode_label (symbol);
7906 #ifdef CTORS_SECTION_ASM_OP
7907 default_ctor_section_asm_out_constructor (symbol, priority);
7908 #else
7909 # ifdef TARGET_ASM_NAMED_SECTION
7910 default_named_section_asm_out_constructor (symbol, priority);
7911 # else
7912 default_stabs_asm_out_constructor (symbol, priority);
7913 # endif
7914 #endif
7917 static void
7918 pa_asm_out_destructor (rtx symbol, int priority)
7920 if (!function_label_operand (symbol, VOIDmode))
7921 hppa_encode_label (symbol);
7923 #ifdef DTORS_SECTION_ASM_OP
7924 default_dtor_section_asm_out_destructor (symbol, priority);
7925 #else
7926 # ifdef TARGET_ASM_NAMED_SECTION
7927 default_named_section_asm_out_destructor (symbol, priority);
7928 # else
7929 default_stabs_asm_out_destructor (symbol, priority);
7930 # endif
7931 #endif
7933 #endif
7935 /* This function places uninitialized global data in the bss section.
7936 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
7937 function on the SOM port to prevent uninitialized global data from
7938 being placed in the data section. */
7940 void
7941 pa_asm_output_aligned_bss (FILE *stream,
7942 const char *name,
7943 unsigned HOST_WIDE_INT size,
7944 unsigned int align)
7946 bss_section ();
7947 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7949 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7950 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7951 #endif
7953 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7954 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7955 #endif
7957 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7958 ASM_OUTPUT_LABEL (stream, name);
7959 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
7962 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
7963 that doesn't allow the alignment of global common storage to be directly
7964 specified. The SOM linker aligns common storage based on the rounded
7965 value of the NUM_BYTES parameter in the .comm directive. It's not
7966 possible to use the .align directive as it doesn't affect the alignment
7967 of the label associated with a .comm directive. */
7969 void
7970 pa_asm_output_aligned_common (FILE *stream,
7971 const char *name,
7972 unsigned HOST_WIDE_INT size,
7973 unsigned int align)
7975 unsigned int max_common_align;
7977 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
7978 if (align > max_common_align)
7980 warning (0, "alignment (%u) for %s exceeds maximum alignment "
7981 "for global common data. Using %u",
7982 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
7983 align = max_common_align;
7986 bss_section ();
7988 assemble_name (stream, name);
7989 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
7990 MAX (size, align / BITS_PER_UNIT));
7993 /* We can't use .comm for local common storage as the SOM linker effectively
7994 treats the symbol as universal and uses the same storage for local symbols
7995 with the same name in different object files. The .block directive
7996 reserves an uninitialized block of storage. However, it's not common
7997 storage. Fortunately, GCC never requests common storage with the same
7998 name in any given translation unit. */
8000 void
8001 pa_asm_output_aligned_local (FILE *stream,
8002 const char *name,
8003 unsigned HOST_WIDE_INT size,
8004 unsigned int align)
8006 bss_section ();
8007 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8009 #ifdef LOCAL_ASM_OP
8010 fprintf (stream, "%s", LOCAL_ASM_OP);
8011 assemble_name (stream, name);
8012 fprintf (stream, "\n");
8013 #endif
8015 ASM_OUTPUT_LABEL (stream, name);
8016 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8019 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8020 use in fmpysub instructions. */
8022 fmpysuboperands (rtx *operands)
8024 enum machine_mode mode = GET_MODE (operands[0]);
8026 /* Must be a floating point mode. */
8027 if (mode != SFmode && mode != DFmode)
8028 return 0;
8030 /* All modes must be the same. */
8031 if (! (mode == GET_MODE (operands[1])
8032 && mode == GET_MODE (operands[2])
8033 && mode == GET_MODE (operands[3])
8034 && mode == GET_MODE (operands[4])
8035 && mode == GET_MODE (operands[5])))
8036 return 0;
8038 /* All operands must be registers. */
8039 if (! (GET_CODE (operands[1]) == REG
8040 && GET_CODE (operands[2]) == REG
8041 && GET_CODE (operands[3]) == REG
8042 && GET_CODE (operands[4]) == REG
8043 && GET_CODE (operands[5]) == REG))
8044 return 0;
8046 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8047 operation, so operands[4] must be the same as operand[3]. */
8048 if (! rtx_equal_p (operands[3], operands[4]))
8049 return 0;
8051 /* multiply cannot feed into subtraction. */
8052 if (rtx_equal_p (operands[5], operands[0]))
8053 return 0;
8055 /* Inout operand of sub cannot conflict with any operands from multiply. */
8056 if (rtx_equal_p (operands[3], operands[0])
8057 || rtx_equal_p (operands[3], operands[1])
8058 || rtx_equal_p (operands[3], operands[2]))
8059 return 0;
8061 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8062 if (mode == SFmode
8063 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8064 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8065 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8066 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8067 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8068 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8069 return 0;
8071 /* Passed. Operands are suitable for fmpysub. */
8072 return 1;
8075 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8076 constants for shadd instructions. */
8078 shadd_constant_p (int val)
8080 if (val == 2 || val == 4 || val == 8)
8081 return 1;
8082 else
8083 return 0;
8086 /* Return 1 if OP is valid as a base or index register in a
8087 REG+REG address. */
8090 borx_reg_operand (rtx op, enum machine_mode mode)
8092 if (GET_CODE (op) != REG)
8093 return 0;
8095 /* We must reject virtual registers as the only expressions that
8096 can be instantiated are REG and REG+CONST. */
8097 if (op == virtual_incoming_args_rtx
8098 || op == virtual_stack_vars_rtx
8099 || op == virtual_stack_dynamic_rtx
8100 || op == virtual_outgoing_args_rtx
8101 || op == virtual_cfa_rtx)
8102 return 0;
8104 /* While it's always safe to index off the frame pointer, it's not
8105 profitable to do so when the frame pointer is being eliminated. */
8106 if (!reload_completed
8107 && flag_omit_frame_pointer
8108 && !current_function_calls_alloca
8109 && op == frame_pointer_rtx)
8110 return 0;
8112 return register_operand (op, mode);
8115 /* Return 1 if this operand is anything other than a hard register. */
8118 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8120 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8123 /* Return 1 if INSN branches forward. Should be using insn_addresses
8124 to avoid walking through all the insns... */
8125 static int
8126 forward_branch_p (rtx insn)
8128 rtx label = JUMP_LABEL (insn);
8130 while (insn)
8132 if (insn == label)
8133 break;
8134 else
8135 insn = NEXT_INSN (insn);
8138 return (insn == label);
8141 /* Return 1 if OP is an equality comparison, else return 0. */
8143 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8145 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8148 /* Return 1 if INSN is in the delay slot of a call instruction. */
8150 jump_in_call_delay (rtx insn)
8153 if (GET_CODE (insn) != JUMP_INSN)
8154 return 0;
8156 if (PREV_INSN (insn)
8157 && PREV_INSN (PREV_INSN (insn))
8158 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8160 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8162 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8163 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8166 else
8167 return 0;
8170 /* Output an unconditional move and branch insn. */
8172 const char *
8173 output_parallel_movb (rtx *operands, int length)
8175 /* These are the cases in which we win. */
8176 if (length == 4)
8177 return "mov%I1b,tr %1,%0,%2";
8179 /* None of these cases wins, but they don't lose either. */
8180 if (dbr_sequence_length () == 0)
8182 /* Nothing in the delay slot, fake it by putting the combined
8183 insn (the copy or add) in the delay slot of a bl. */
8184 if (GET_CODE (operands[1]) == CONST_INT)
8185 return "b %2\n\tldi %1,%0";
8186 else
8187 return "b %2\n\tcopy %1,%0";
8189 else
8191 /* Something in the delay slot, but we've got a long branch. */
8192 if (GET_CODE (operands[1]) == CONST_INT)
8193 return "ldi %1,%0\n\tb %2";
8194 else
8195 return "copy %1,%0\n\tb %2";
8199 /* Output an unconditional add and branch insn. */
8201 const char *
8202 output_parallel_addb (rtx *operands, int length)
8204 /* To make life easy we want operand0 to be the shared input/output
8205 operand and operand1 to be the readonly operand. */
8206 if (operands[0] == operands[1])
8207 operands[1] = operands[2];
8209 /* These are the cases in which we win. */
8210 if (length == 4)
8211 return "add%I1b,tr %1,%0,%3";
8213 /* None of these cases win, but they don't lose either. */
8214 if (dbr_sequence_length () == 0)
8216 /* Nothing in the delay slot, fake it by putting the combined
8217 insn (the copy or add) in the delay slot of a bl. */
8218 return "b %3\n\tadd%I1 %1,%0,%0";
8220 else
8222 /* Something in the delay slot, but we've got a long branch. */
8223 return "add%I1 %1,%0,%0\n\tb %3";
8227 /* Return nonzero if INSN (a jump insn) immediately follows a call
8228 to a named function. This is used to avoid filling the delay slot
8229 of the jump since it can usually be eliminated by modifying RP in
8230 the delay slot of the call. */
8233 following_call (rtx insn)
8235 if (! TARGET_JUMP_IN_DELAY)
8236 return 0;
8238 /* Find the previous real insn, skipping NOTEs. */
8239 insn = PREV_INSN (insn);
8240 while (insn && GET_CODE (insn) == NOTE)
8241 insn = PREV_INSN (insn);
8243 /* Check for CALL_INSNs and millicode calls. */
8244 if (insn
8245 && ((GET_CODE (insn) == CALL_INSN
8246 && get_attr_type (insn) != TYPE_DYNCALL)
8247 || (GET_CODE (insn) == INSN
8248 && GET_CODE (PATTERN (insn)) != SEQUENCE
8249 && GET_CODE (PATTERN (insn)) != USE
8250 && GET_CODE (PATTERN (insn)) != CLOBBER
8251 && get_attr_type (insn) == TYPE_MILLI)))
8252 return 1;
8254 return 0;
8257 /* We use this hook to perform a PA specific optimization which is difficult
8258 to do in earlier passes.
8260 We want the delay slots of branches within jump tables to be filled.
8261 None of the compiler passes at the moment even has the notion that a
8262 PA jump table doesn't contain addresses, but instead contains actual
8263 instructions!
8265 Because we actually jump into the table, the addresses of each entry
8266 must stay constant in relation to the beginning of the table (which
8267 itself must stay constant relative to the instruction to jump into
8268 it). I don't believe we can guarantee earlier passes of the compiler
8269 will adhere to those rules.
8271 So, late in the compilation process we find all the jump tables, and
8272 expand them into real code -- e.g. each entry in the jump table vector
8273 will get an appropriate label followed by a jump to the final target.
8275 Reorg and the final jump pass can then optimize these branches and
8276 fill their delay slots. We end up with smaller, more efficient code.
8278 The jump instructions within the table are special; we must be able
8279 to identify them during assembly output (if the jumps don't get filled
8280 we need to emit a nop rather than nullifying the delay slot)). We
8281 identify jumps in switch tables by using insns with the attribute
8282 type TYPE_BTABLE_BRANCH.
8284 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8285 insns. This serves two purposes, first it prevents jump.c from
8286 noticing that the last N entries in the table jump to the instruction
8287 immediately after the table and deleting the jumps. Second, those
8288 insns mark where we should emit .begin_brtab and .end_brtab directives
8289 when using GAS (allows for better link time optimizations). */
8291 static void
8292 pa_reorg (void)
8294 rtx insn;
8296 remove_useless_addtr_insns (1);
8298 if (pa_cpu < PROCESSOR_8000)
8299 pa_combine_instructions ();
8302 /* This is fairly cheap, so always run it if optimizing. */
8303 if (optimize > 0 && !TARGET_BIG_SWITCH)
8305 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8306 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8308 rtx pattern, tmp, location, label;
8309 unsigned int length, i;
8311 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8312 if (GET_CODE (insn) != JUMP_INSN
8313 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8314 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8315 continue;
8317 /* Emit marker for the beginning of the branch table. */
8318 emit_insn_before (gen_begin_brtab (), insn);
8320 pattern = PATTERN (insn);
8321 location = PREV_INSN (insn);
8322 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8324 for (i = 0; i < length; i++)
8326 /* Emit a label before each jump to keep jump.c from
8327 removing this code. */
8328 tmp = gen_label_rtx ();
8329 LABEL_NUSES (tmp) = 1;
8330 emit_label_after (tmp, location);
8331 location = NEXT_INSN (location);
8333 if (GET_CODE (pattern) == ADDR_VEC)
8334 label = XEXP (XVECEXP (pattern, 0, i), 0);
8335 else
8336 label = XEXP (XVECEXP (pattern, 1, i), 0);
8338 tmp = gen_short_jump (label);
8340 /* Emit the jump itself. */
8341 tmp = emit_jump_insn_after (tmp, location);
8342 JUMP_LABEL (tmp) = label;
8343 LABEL_NUSES (label)++;
8344 location = NEXT_INSN (location);
8346 /* Emit a BARRIER after the jump. */
8347 emit_barrier_after (location);
8348 location = NEXT_INSN (location);
8351 /* Emit marker for the end of the branch table. */
8352 emit_insn_before (gen_end_brtab (), location);
8353 location = NEXT_INSN (location);
8354 emit_barrier_after (location);
8356 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8357 delete_insn (insn);
8360 else
8362 /* Still need brtab marker insns. FIXME: the presence of these
8363 markers disables output of the branch table to readonly memory,
8364 and any alignment directives that might be needed. Possibly,
8365 the begin_brtab insn should be output before the label for the
8366 table. This doesn't matter at the moment since the tables are
8367 always output in the text section. */
8368 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8370 /* Find an ADDR_VEC insn. */
8371 if (GET_CODE (insn) != JUMP_INSN
8372 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8373 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8374 continue;
8376 /* Now generate markers for the beginning and end of the
8377 branch table. */
8378 emit_insn_before (gen_begin_brtab (), insn);
8379 emit_insn_after (gen_end_brtab (), insn);
8384 /* The PA has a number of odd instructions which can perform multiple
8385 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8386 it may be profitable to combine two instructions into one instruction
8387 with two outputs. It's not profitable PA2.0 machines because the
8388 two outputs would take two slots in the reorder buffers.
8390 This routine finds instructions which can be combined and combines
8391 them. We only support some of the potential combinations, and we
8392 only try common ways to find suitable instructions.
8394 * addb can add two registers or a register and a small integer
8395 and jump to a nearby (+-8k) location. Normally the jump to the
8396 nearby location is conditional on the result of the add, but by
8397 using the "true" condition we can make the jump unconditional.
8398 Thus addb can perform two independent operations in one insn.
8400 * movb is similar to addb in that it can perform a reg->reg
8401 or small immediate->reg copy and jump to a nearby (+-8k location).
8403 * fmpyadd and fmpysub can perform a FP multiply and either an
8404 FP add or FP sub if the operands of the multiply and add/sub are
8405 independent (there are other minor restrictions). Note both
8406 the fmpy and fadd/fsub can in theory move to better spots according
8407 to data dependencies, but for now we require the fmpy stay at a
8408 fixed location.
8410 * Many of the memory operations can perform pre & post updates
8411 of index registers. GCC's pre/post increment/decrement addressing
8412 is far too simple to take advantage of all the possibilities. This
8413 pass may not be suitable since those insns may not be independent.
8415 * comclr can compare two ints or an int and a register, nullify
8416 the following instruction and zero some other register. This
8417 is more difficult to use as it's harder to find an insn which
8418 will generate a comclr than finding something like an unconditional
8419 branch. (conditional moves & long branches create comclr insns).
8421 * Most arithmetic operations can conditionally skip the next
8422 instruction. They can be viewed as "perform this operation
8423 and conditionally jump to this nearby location" (where nearby
8424 is an insns away). These are difficult to use due to the
8425 branch length restrictions. */
8427 static void
8428 pa_combine_instructions (void)
8430 rtx anchor, new;
8432 /* This can get expensive since the basic algorithm is on the
8433 order of O(n^2) (or worse). Only do it for -O2 or higher
8434 levels of optimization. */
8435 if (optimize < 2)
8436 return;
8438 /* Walk down the list of insns looking for "anchor" insns which
8439 may be combined with "floating" insns. As the name implies,
8440 "anchor" instructions don't move, while "floating" insns may
8441 move around. */
8442 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8443 new = make_insn_raw (new);
8445 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8447 enum attr_pa_combine_type anchor_attr;
8448 enum attr_pa_combine_type floater_attr;
8450 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8451 Also ignore any special USE insns. */
8452 if ((GET_CODE (anchor) != INSN
8453 && GET_CODE (anchor) != JUMP_INSN
8454 && GET_CODE (anchor) != CALL_INSN)
8455 || GET_CODE (PATTERN (anchor)) == USE
8456 || GET_CODE (PATTERN (anchor)) == CLOBBER
8457 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8458 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8459 continue;
8461 anchor_attr = get_attr_pa_combine_type (anchor);
8462 /* See if anchor is an insn suitable for combination. */
8463 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8464 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8465 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8466 && ! forward_branch_p (anchor)))
8468 rtx floater;
8470 for (floater = PREV_INSN (anchor);
8471 floater;
8472 floater = PREV_INSN (floater))
8474 if (GET_CODE (floater) == NOTE
8475 || (GET_CODE (floater) == INSN
8476 && (GET_CODE (PATTERN (floater)) == USE
8477 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8478 continue;
8480 /* Anything except a regular INSN will stop our search. */
8481 if (GET_CODE (floater) != INSN
8482 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8483 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8485 floater = NULL_RTX;
8486 break;
8489 /* See if FLOATER is suitable for combination with the
8490 anchor. */
8491 floater_attr = get_attr_pa_combine_type (floater);
8492 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8493 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8494 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8495 && floater_attr == PA_COMBINE_TYPE_FMPY))
8497 /* If ANCHOR and FLOATER can be combined, then we're
8498 done with this pass. */
8499 if (pa_can_combine_p (new, anchor, floater, 0,
8500 SET_DEST (PATTERN (floater)),
8501 XEXP (SET_SRC (PATTERN (floater)), 0),
8502 XEXP (SET_SRC (PATTERN (floater)), 1)))
8503 break;
8506 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8507 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8509 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8511 if (pa_can_combine_p (new, anchor, floater, 0,
8512 SET_DEST (PATTERN (floater)),
8513 XEXP (SET_SRC (PATTERN (floater)), 0),
8514 XEXP (SET_SRC (PATTERN (floater)), 1)))
8515 break;
8517 else
8519 if (pa_can_combine_p (new, anchor, floater, 0,
8520 SET_DEST (PATTERN (floater)),
8521 SET_SRC (PATTERN (floater)),
8522 SET_SRC (PATTERN (floater))))
8523 break;
8528 /* If we didn't find anything on the backwards scan try forwards. */
8529 if (!floater
8530 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8531 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8533 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8535 if (GET_CODE (floater) == NOTE
8536 || (GET_CODE (floater) == INSN
8537 && (GET_CODE (PATTERN (floater)) == USE
8538 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8540 continue;
8542 /* Anything except a regular INSN will stop our search. */
8543 if (GET_CODE (floater) != INSN
8544 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8545 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8547 floater = NULL_RTX;
8548 break;
8551 /* See if FLOATER is suitable for combination with the
8552 anchor. */
8553 floater_attr = get_attr_pa_combine_type (floater);
8554 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8555 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8556 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8557 && floater_attr == PA_COMBINE_TYPE_FMPY))
8559 /* If ANCHOR and FLOATER can be combined, then we're
8560 done with this pass. */
8561 if (pa_can_combine_p (new, anchor, floater, 1,
8562 SET_DEST (PATTERN (floater)),
8563 XEXP (SET_SRC (PATTERN (floater)),
8565 XEXP (SET_SRC (PATTERN (floater)),
8566 1)))
8567 break;
8572 /* FLOATER will be nonzero if we found a suitable floating
8573 insn for combination with ANCHOR. */
8574 if (floater
8575 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8576 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8578 /* Emit the new instruction and delete the old anchor. */
8579 emit_insn_before (gen_rtx_PARALLEL
8580 (VOIDmode,
8581 gen_rtvec (2, PATTERN (anchor),
8582 PATTERN (floater))),
8583 anchor);
8585 PUT_CODE (anchor, NOTE);
8586 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8587 NOTE_SOURCE_FILE (anchor) = 0;
8589 /* Emit a special USE insn for FLOATER, then delete
8590 the floating insn. */
8591 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8592 delete_insn (floater);
8594 continue;
8596 else if (floater
8597 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8599 rtx temp;
8600 /* Emit the new_jump instruction and delete the old anchor. */
8601 temp
8602 = emit_jump_insn_before (gen_rtx_PARALLEL
8603 (VOIDmode,
8604 gen_rtvec (2, PATTERN (anchor),
8605 PATTERN (floater))),
8606 anchor);
8608 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8609 PUT_CODE (anchor, NOTE);
8610 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8611 NOTE_SOURCE_FILE (anchor) = 0;
8613 /* Emit a special USE insn for FLOATER, then delete
8614 the floating insn. */
8615 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8616 delete_insn (floater);
8617 continue;
8623 static int
8624 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8625 rtx src1, rtx src2)
8627 int insn_code_number;
8628 rtx start, end;
8630 /* Create a PARALLEL with the patterns of ANCHOR and
8631 FLOATER, try to recognize it, then test constraints
8632 for the resulting pattern.
8634 If the pattern doesn't match or the constraints
8635 aren't met keep searching for a suitable floater
8636 insn. */
8637 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8638 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8639 INSN_CODE (new) = -1;
8640 insn_code_number = recog_memoized (new);
8641 if (insn_code_number < 0
8642 || (extract_insn (new), ! constrain_operands (1)))
8643 return 0;
8645 if (reversed)
8647 start = anchor;
8648 end = floater;
8650 else
8652 start = floater;
8653 end = anchor;
8656 /* There's up to three operands to consider. One
8657 output and two inputs.
8659 The output must not be used between FLOATER & ANCHOR
8660 exclusive. The inputs must not be set between
8661 FLOATER and ANCHOR exclusive. */
8663 if (reg_used_between_p (dest, start, end))
8664 return 0;
8666 if (reg_set_between_p (src1, start, end))
8667 return 0;
8669 if (reg_set_between_p (src2, start, end))
8670 return 0;
8672 /* If we get here, then everything is good. */
8673 return 1;
8676 /* Return nonzero if references for INSN are delayed.
8678 Millicode insns are actually function calls with some special
8679 constraints on arguments and register usage.
8681 Millicode calls always expect their arguments in the integer argument
8682 registers, and always return their result in %r29 (ret1). They
8683 are expected to clobber their arguments, %r1, %r29, and the return
8684 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8686 This function tells reorg that the references to arguments and
8687 millicode calls do not appear to happen until after the millicode call.
8688 This allows reorg to put insns which set the argument registers into the
8689 delay slot of the millicode call -- thus they act more like traditional
8690 CALL_INSNs.
8692 Note we cannot consider side effects of the insn to be delayed because
8693 the branch and link insn will clobber the return pointer. If we happened
8694 to use the return pointer in the delay slot of the call, then we lose.
8696 get_attr_type will try to recognize the given insn, so make sure to
8697 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8698 in particular. */
8700 insn_refs_are_delayed (rtx insn)
8702 return ((GET_CODE (insn) == INSN
8703 && GET_CODE (PATTERN (insn)) != SEQUENCE
8704 && GET_CODE (PATTERN (insn)) != USE
8705 && GET_CODE (PATTERN (insn)) != CLOBBER
8706 && get_attr_type (insn) == TYPE_MILLI));
8709 /* On the HP-PA the value is found in register(s) 28(-29), unless
8710 the mode is SF or DF. Then the value is returned in fr4 (32).
8712 This must perform the same promotions as PROMOTE_MODE, else
8713 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8715 Small structures must be returned in a PARALLEL on PA64 in order
8716 to match the HP Compiler ABI. */
8719 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8721 enum machine_mode valmode;
8723 if (AGGREGATE_TYPE_P (valtype))
8725 if (TARGET_64BIT)
8727 /* Aggregates with a size less than or equal to 128 bits are
8728 returned in GR 28(-29). They are left justified. The pad
8729 bits are undefined. Larger aggregates are returned in
8730 memory. */
8731 rtx loc[2];
8732 int i, offset = 0;
8733 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8735 for (i = 0; i < ub; i++)
8737 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8738 gen_rtx_REG (DImode, 28 + i),
8739 GEN_INT (offset));
8740 offset += 8;
8743 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8745 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
8747 /* Aggregates 5 to 8 bytes in size are returned in general
8748 registers r28-r29 in the same manner as other non
8749 floating-point objects. The data is right-justified and
8750 zero-extended to 64 bits. This is opposite to the normal
8751 justification used on big endian targets and requires
8752 special treatment. */
8753 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8754 gen_rtx_REG (DImode, 28), const0_rtx);
8755 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8759 if ((INTEGRAL_TYPE_P (valtype)
8760 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8761 || POINTER_TYPE_P (valtype))
8762 valmode = word_mode;
8763 else
8764 valmode = TYPE_MODE (valtype);
8766 if (TREE_CODE (valtype) == REAL_TYPE
8767 && !AGGREGATE_TYPE_P (valtype)
8768 && TYPE_MODE (valtype) != TFmode
8769 && !TARGET_SOFT_FLOAT)
8770 return gen_rtx_REG (valmode, 32);
8772 return gen_rtx_REG (valmode, 28);
8775 /* Return the location of a parameter that is passed in a register or NULL
8776 if the parameter has any component that is passed in memory.
8778 This is new code and will be pushed to into the net sources after
8779 further testing.
8781 ??? We might want to restructure this so that it looks more like other
8782 ports. */
8784 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8785 int named ATTRIBUTE_UNUSED)
8787 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8788 int alignment = 0;
8789 int arg_size;
8790 int fpr_reg_base;
8791 int gpr_reg_base;
8792 rtx retval;
8794 if (mode == VOIDmode)
8795 return NULL_RTX;
8797 arg_size = FUNCTION_ARG_SIZE (mode, type);
8799 /* If this arg would be passed partially or totally on the stack, then
8800 this routine should return zero. pa_arg_partial_bytes will
8801 handle arguments which are split between regs and stack slots if
8802 the ABI mandates split arguments. */
8803 if (! TARGET_64BIT)
8805 /* The 32-bit ABI does not split arguments. */
8806 if (cum->words + arg_size > max_arg_words)
8807 return NULL_RTX;
8809 else
8811 if (arg_size > 1)
8812 alignment = cum->words & 1;
8813 if (cum->words + alignment >= max_arg_words)
8814 return NULL_RTX;
8817 /* The 32bit ABIs and the 64bit ABIs are rather different,
8818 particularly in their handling of FP registers. We might
8819 be able to cleverly share code between them, but I'm not
8820 going to bother in the hope that splitting them up results
8821 in code that is more easily understood. */
8823 if (TARGET_64BIT)
8825 /* Advance the base registers to their current locations.
8827 Remember, gprs grow towards smaller register numbers while
8828 fprs grow to higher register numbers. Also remember that
8829 although FP regs are 32-bit addressable, we pretend that
8830 the registers are 64-bits wide. */
8831 gpr_reg_base = 26 - cum->words;
8832 fpr_reg_base = 32 + cum->words;
8834 /* Arguments wider than one word and small aggregates need special
8835 treatment. */
8836 if (arg_size > 1
8837 || mode == BLKmode
8838 || (type && AGGREGATE_TYPE_P (type)))
8840 /* Double-extended precision (80-bit), quad-precision (128-bit)
8841 and aggregates including complex numbers are aligned on
8842 128-bit boundaries. The first eight 64-bit argument slots
8843 are associated one-to-one, with general registers r26
8844 through r19, and also with floating-point registers fr4
8845 through fr11. Arguments larger than one word are always
8846 passed in general registers.
8848 Using a PARALLEL with a word mode register results in left
8849 justified data on a big-endian target. */
8851 rtx loc[8];
8852 int i, offset = 0, ub = arg_size;
8854 /* Align the base register. */
8855 gpr_reg_base -= alignment;
8857 ub = MIN (ub, max_arg_words - cum->words - alignment);
8858 for (i = 0; i < ub; i++)
8860 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8861 gen_rtx_REG (DImode, gpr_reg_base),
8862 GEN_INT (offset));
8863 gpr_reg_base -= 1;
8864 offset += 8;
8867 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8870 else
8872 /* If the argument is larger than a word, then we know precisely
8873 which registers we must use. */
8874 if (arg_size > 1)
8876 if (cum->words)
8878 gpr_reg_base = 23;
8879 fpr_reg_base = 38;
8881 else
8883 gpr_reg_base = 25;
8884 fpr_reg_base = 34;
8887 /* Structures 5 to 8 bytes in size are passed in the general
8888 registers in the same manner as other non floating-point
8889 objects. The data is right-justified and zero-extended
8890 to 64 bits. This is opposite to the normal justification
8891 used on big endian targets and requires special treatment.
8892 We now define BLOCK_REG_PADDING to pad these objects. */
8893 if (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
8895 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8896 gen_rtx_REG (DImode, gpr_reg_base),
8897 const0_rtx);
8898 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8901 else
8903 /* We have a single word (32 bits). A simple computation
8904 will get us the register #s we need. */
8905 gpr_reg_base = 26 - cum->words;
8906 fpr_reg_base = 32 + 2 * cum->words;
8910 /* Determine if the argument needs to be passed in both general and
8911 floating point registers. */
8912 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8913 /* If we are doing soft-float with portable runtime, then there
8914 is no need to worry about FP regs. */
8915 && !TARGET_SOFT_FLOAT
8916 /* The parameter must be some kind of float, else we can just
8917 pass it in integer registers. */
8918 && FLOAT_MODE_P (mode)
8919 /* The target function must not have a prototype. */
8920 && cum->nargs_prototype <= 0
8921 /* libcalls do not need to pass items in both FP and general
8922 registers. */
8923 && type != NULL_TREE
8924 /* All this hair applies to "outgoing" args only. This includes
8925 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8926 && !cum->incoming)
8927 /* Also pass outgoing floating arguments in both registers in indirect
8928 calls with the 32 bit ABI and the HP assembler since there is no
8929 way to the specify argument locations in static functions. */
8930 || (!TARGET_64BIT
8931 && !TARGET_GAS
8932 && !cum->incoming
8933 && cum->indirect
8934 && FLOAT_MODE_P (mode)))
8936 retval
8937 = gen_rtx_PARALLEL
8938 (mode,
8939 gen_rtvec (2,
8940 gen_rtx_EXPR_LIST (VOIDmode,
8941 gen_rtx_REG (mode, fpr_reg_base),
8942 const0_rtx),
8943 gen_rtx_EXPR_LIST (VOIDmode,
8944 gen_rtx_REG (mode, gpr_reg_base),
8945 const0_rtx)));
8947 else
8949 /* See if we should pass this parameter in a general register. */
8950 if (TARGET_SOFT_FLOAT
8951 /* Indirect calls in the normal 32bit ABI require all arguments
8952 to be passed in general registers. */
8953 || (!TARGET_PORTABLE_RUNTIME
8954 && !TARGET_64BIT
8955 && !TARGET_ELF32
8956 && cum->indirect)
8957 /* If the parameter is not a floating point parameter, then
8958 it belongs in GPRs. */
8959 || !FLOAT_MODE_P (mode)
8960 /* Structure with single SFmode field belongs in GPR. */
8961 || (type && AGGREGATE_TYPE_P (type)))
8962 retval = gen_rtx_REG (mode, gpr_reg_base);
8963 else
8964 retval = gen_rtx_REG (mode, fpr_reg_base);
8966 return retval;
8970 /* If this arg would be passed totally in registers or totally on the stack,
8971 then this routine should return zero. */
8973 static int
8974 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8975 tree type, bool named ATTRIBUTE_UNUSED)
8977 unsigned int max_arg_words = 8;
8978 unsigned int offset = 0;
8980 if (!TARGET_64BIT)
8981 return 0;
8983 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
8984 offset = 1;
8986 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
8987 /* Arg fits fully into registers. */
8988 return 0;
8989 else if (cum->words + offset >= max_arg_words)
8990 /* Arg fully on the stack. */
8991 return 0;
8992 else
8993 /* Arg is split. */
8994 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
8998 /* Return a string to output before text in the current function.
9000 This function is only used with SOM. Because we don't support
9001 named subspaces, we can only create a new subspace or switch back
9002 to the default text subspace. */
9003 const char *
9004 som_text_section_asm_op (void)
9006 if (!TARGET_SOM)
9007 return "";
9009 if (TARGET_GAS)
9011 if (cfun && !cfun->machine->in_nsubspa)
9013 /* We only want to emit a .nsubspa directive once at the
9014 start of the function. */
9015 cfun->machine->in_nsubspa = 1;
9017 /* Create a new subspace for the text. This provides
9018 better stub placement and one-only functions. */
9019 if (cfun->decl
9020 && DECL_ONE_ONLY (cfun->decl)
9021 && !DECL_WEAK (cfun->decl))
9022 return
9023 "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=24,COMDAT";
9025 return "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$";
9027 else
9029 /* There isn't a current function or the body of the current
9030 function has been completed. So, we are changing to the
9031 text section to output debugging information. Do this in
9032 the default text section. We need to forget that we are
9033 in the text section so that the function text_section in
9034 varasm.c will call us the next time around. */
9035 forget_section ();
9039 return "\t.SPACE $TEXT$\n\t.SUBSPA $CODE$";
9042 /* On hpux10, the linker will give an error if we have a reference
9043 in the read-only data section to a symbol defined in a shared
9044 library. Therefore, expressions that might require a reloc can
9045 not be placed in the read-only data section. */
9047 static void
9048 pa_select_section (tree exp, int reloc,
9049 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9051 if (TREE_CODE (exp) == VAR_DECL
9052 && TREE_READONLY (exp)
9053 && !TREE_THIS_VOLATILE (exp)
9054 && DECL_INITIAL (exp)
9055 && (DECL_INITIAL (exp) == error_mark_node
9056 || TREE_CONSTANT (DECL_INITIAL (exp)))
9057 && !reloc)
9059 if (TARGET_SOM
9060 && DECL_ONE_ONLY (exp)
9061 && !DECL_WEAK (exp))
9062 som_one_only_readonly_data_section ();
9063 else
9064 readonly_data_section ();
9066 else if (CONSTANT_CLASS_P (exp) && !reloc)
9067 readonly_data_section ();
9068 else if (TARGET_SOM
9069 && TREE_CODE (exp) == VAR_DECL
9070 && DECL_ONE_ONLY (exp)
9071 && !DECL_WEAK (exp))
9072 som_one_only_data_section ();
9073 else
9074 data_section ();
9077 static void
9078 pa_globalize_label (FILE *stream, const char *name)
9080 /* We only handle DATA objects here, functions are globalized in
9081 ASM_DECLARE_FUNCTION_NAME. */
9082 if (! FUNCTION_NAME_P (name))
9084 fputs ("\t.EXPORT ", stream);
9085 assemble_name (stream, name);
9086 fputs (",DATA\n", stream);
9090 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9092 static rtx
9093 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9094 int incoming ATTRIBUTE_UNUSED)
9096 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9099 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9101 bool
9102 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9104 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9105 PA64 ABI says that objects larger than 128 bits are returned in memory.
9106 Note, int_size_in_bytes can return -1 if the size of the object is
9107 variable or larger than the maximum value that can be expressed as
9108 a HOST_WIDE_INT. It can also return zero for an empty type. The
9109 simplest way to handle variable and empty types is to pass them in
9110 memory. This avoids problems in defining the boundaries of argument
9111 slots, allocating registers, etc. */
9112 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9113 || int_size_in_bytes (type) <= 0);
9116 /* Structure to hold declaration and name of external symbols that are
9117 emitted by GCC. We generate a vector of these symbols and output them
9118 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9119 This avoids putting out names that are never really used. */
9121 typedef struct extern_symbol GTY(())
9123 tree decl;
9124 const char *name;
9125 } extern_symbol;
9127 /* Define gc'd vector type for extern_symbol. */
9128 DEF_VEC_O(extern_symbol);
9129 DEF_VEC_ALLOC_O(extern_symbol,gc);
9131 /* Vector of extern_symbol pointers. */
9132 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9134 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9135 /* Mark DECL (name NAME) as an external reference (assembler output
9136 file FILE). This saves the names to output at the end of the file
9137 if actually referenced. */
9139 void
9140 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9142 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9144 gcc_assert (file == asm_out_file);
9145 p->decl = decl;
9146 p->name = name;
9149 /* Output text required at the end of an assembler file.
9150 This includes deferred plabels and .import directives for
9151 all external symbols that were actually referenced. */
9153 static void
9154 pa_hpux_file_end (void)
9156 unsigned int i;
9157 extern_symbol *p;
9159 output_deferred_plabels ();
9161 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9163 tree decl = p->decl;
9165 if (!TREE_ASM_WRITTEN (decl)
9166 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9167 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9170 VEC_free (extern_symbol, gc, extern_symbols);
9172 #endif
9174 #include "gt-pa.h"