* Merge from mainline
[official-gcc.git] / gcc / config / pa / pa.c
blob0d8ec24c1f9eac345e9815bfbca7eabd561b28b9
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
111 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
112 ATTRIBUTE_UNUSED;
113 static void pa_encode_section_info (tree, rtx, int);
114 static const char *pa_strip_name_encoding (const char *);
115 static bool pa_function_ok_for_sibcall (tree, tree);
116 static void pa_globalize_label (FILE *, const char *)
117 ATTRIBUTE_UNUSED;
118 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
119 HOST_WIDE_INT, tree);
120 #if !defined(USE_COLLECT2)
121 static void pa_asm_out_constructor (rtx, int);
122 static void pa_asm_out_destructor (rtx, int);
123 #endif
124 static void pa_init_builtins (void);
125 static rtx hppa_builtin_saveregs (void);
126 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
127 static bool pa_scalar_mode_supported_p (enum machine_mode);
128 static bool pa_commutative_p (rtx x, int outer_code);
129 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
142 #ifdef ASM_OUTPUT_EXTERNAL_REAL
143 static void pa_hpux_file_end (void);
144 #endif
145 #ifdef HPUX_LONG_DOUBLE_LIBRARY
146 static void pa_hpux_init_libfuncs (void);
147 #endif
148 static rtx pa_struct_value_rtx (tree, int);
149 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
150 tree, bool);
151 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
152 tree, bool);
153 static struct machine_function * pa_init_machine_status (void);
154 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
155 enum machine_mode,
156 secondary_reload_info *);
159 /* The following extra sections are only used for SOM. */
160 static GTY(()) section *som_readonly_data_section;
161 static GTY(()) section *som_one_only_readonly_data_section;
162 static GTY(()) section *som_one_only_data_section;
164 /* Save the operands last given to a compare for use when we
165 generate a scc or bcc insn. */
166 rtx hppa_compare_op0, hppa_compare_op1;
167 enum cmp_type hppa_branch_type;
169 /* Which cpu we are scheduling for. */
170 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
172 /* The UNIX standard to use for predefines and linking. */
173 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
175 /* Counts for the number of callee-saved general and floating point
176 registers which were saved by the current function's prologue. */
177 static int gr_saved, fr_saved;
179 static rtx find_addr_reg (rtx);
181 /* Keep track of the number of bytes we have output in the CODE subspace
182 during this compilation so we'll know when to emit inline long-calls. */
183 unsigned long total_code_bytes;
185 /* The last address of the previous function plus the number of bytes in
186 associated thunks that have been output. This is used to determine if
187 a thunk can use an IA-relative branch to reach its target function. */
188 static int last_address;
190 /* Variables to handle plabels that we discover are necessary at assembly
191 output time. They are output after the current function. */
192 struct deferred_plabel GTY(())
194 rtx internal_label;
195 rtx symbol;
197 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
198 deferred_plabels;
199 static size_t n_deferred_plabels = 0;
202 /* Initialize the GCC target structure. */
204 #undef TARGET_ASM_ALIGNED_HI_OP
205 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
208 #undef TARGET_ASM_ALIGNED_DI_OP
209 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
210 #undef TARGET_ASM_UNALIGNED_HI_OP
211 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
212 #undef TARGET_ASM_UNALIGNED_SI_OP
213 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
214 #undef TARGET_ASM_UNALIGNED_DI_OP
215 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
216 #undef TARGET_ASM_INTEGER
217 #define TARGET_ASM_INTEGER pa_assemble_integer
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
221 #undef TARGET_ASM_FUNCTION_EPILOGUE
222 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
226 #undef TARGET_SCHED_ADJUST_PRIORITY
227 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
228 #undef TARGET_SCHED_ISSUE_RATE
229 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
231 #undef TARGET_ENCODE_SECTION_INFO
232 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
236 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
237 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
239 #undef TARGET_COMMUTATIVE_P
240 #define TARGET_COMMUTATIVE_P pa_commutative_p
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 #undef TARGET_ASM_FILE_END
248 #ifdef ASM_OUTPUT_EXTERNAL_REAL
249 #define TARGET_ASM_FILE_END pa_hpux_file_end
250 #else
251 #define TARGET_ASM_FILE_END output_deferred_plabels
252 #endif
254 #if !defined(USE_COLLECT2)
255 #undef TARGET_ASM_CONSTRUCTOR
256 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
257 #undef TARGET_ASM_DESTRUCTOR
258 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
259 #endif
261 #undef TARGET_DEFAULT_TARGET_FLAGS
262 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
263 #undef TARGET_HANDLE_OPTION
264 #define TARGET_HANDLE_OPTION pa_handle_option
266 #undef TARGET_INIT_BUILTINS
267 #define TARGET_INIT_BUILTINS pa_init_builtins
269 #undef TARGET_RTX_COSTS
270 #define TARGET_RTX_COSTS hppa_rtx_costs
271 #undef TARGET_ADDRESS_COST
272 #define TARGET_ADDRESS_COST hppa_address_cost
274 #undef TARGET_MACHINE_DEPENDENT_REORG
275 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
277 #ifdef HPUX_LONG_DOUBLE_LIBRARY
278 #undef TARGET_INIT_LIBFUNCS
279 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
280 #endif
282 #undef TARGET_PROMOTE_FUNCTION_RETURN
283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
284 #undef TARGET_PROMOTE_PROTOTYPES
285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
287 #undef TARGET_STRUCT_VALUE_RTX
288 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
289 #undef TARGET_RETURN_IN_MEMORY
290 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
291 #undef TARGET_MUST_PASS_IN_STACK
292 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
295 #undef TARGET_CALLEE_COPIES
296 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
297 #undef TARGET_ARG_PARTIAL_BYTES
298 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
300 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
301 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
302 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
303 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
305 #undef TARGET_SCALAR_MODE_SUPPORTED_P
306 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
308 #undef TARGET_CANNOT_FORCE_CONST_MEM
309 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
311 #undef TARGET_SECONDARY_RELOAD
312 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Parse the -mfixed-range= option string. */
318 static void
319 fix_range (const char *const_str)
321 int i, first, last;
322 char *str, *dash, *comma;
324 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
325 REG2 are either register names or register numbers. The effect
326 of this option is to mark the registers in the range from REG1 to
327 REG2 as ``fixed'' so they won't be used by the compiler. This is
328 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
330 i = strlen (const_str);
331 str = (char *) alloca (i + 1);
332 memcpy (str, const_str, i + 1);
334 while (1)
336 dash = strchr (str, '-');
337 if (!dash)
339 warning (0, "value of -mfixed-range must have form REG1-REG2");
340 return;
342 *dash = '\0';
344 comma = strchr (dash + 1, ',');
345 if (comma)
346 *comma = '\0';
348 first = decode_reg_name (str);
349 if (first < 0)
351 warning (0, "unknown register name: %s", str);
352 return;
355 last = decode_reg_name (dash + 1);
356 if (last < 0)
358 warning (0, "unknown register name: %s", dash + 1);
359 return;
362 *dash = '-';
364 if (first > last)
366 warning (0, "%s-%s is an empty range", str, dash + 1);
367 return;
370 for (i = first; i <= last; ++i)
371 fixed_regs[i] = call_used_regs[i] = 1;
373 if (!comma)
374 break;
376 *comma = ',';
377 str = comma + 1;
380 /* Check if all floating point registers have been fixed. */
381 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
382 if (!fixed_regs[i])
383 break;
385 if (i > FP_REG_LAST)
386 target_flags |= MASK_DISABLE_FPREGS;
389 /* Implement TARGET_HANDLE_OPTION. */
391 static bool
392 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
394 switch (code)
396 case OPT_mnosnake:
397 case OPT_mpa_risc_1_0:
398 case OPT_march_1_0:
399 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
400 return true;
402 case OPT_msnake:
403 case OPT_mpa_risc_1_1:
404 case OPT_march_1_1:
405 target_flags &= ~MASK_PA_20;
406 target_flags |= MASK_PA_11;
407 return true;
409 case OPT_mpa_risc_2_0:
410 case OPT_march_2_0:
411 target_flags |= MASK_PA_11 | MASK_PA_20;
412 return true;
414 case OPT_mschedule_:
415 if (strcmp (arg, "8000") == 0)
416 pa_cpu = PROCESSOR_8000;
417 else if (strcmp (arg, "7100") == 0)
418 pa_cpu = PROCESSOR_7100;
419 else if (strcmp (arg, "700") == 0)
420 pa_cpu = PROCESSOR_700;
421 else if (strcmp (arg, "7100LC") == 0)
422 pa_cpu = PROCESSOR_7100LC;
423 else if (strcmp (arg, "7200") == 0)
424 pa_cpu = PROCESSOR_7200;
425 else if (strcmp (arg, "7300") == 0)
426 pa_cpu = PROCESSOR_7300;
427 else
428 return false;
429 return true;
431 case OPT_mfixed_range_:
432 fix_range (arg);
433 return true;
435 #if TARGET_HPUX
436 case OPT_munix_93:
437 flag_pa_unix = 1993;
438 return true;
439 #endif
441 #if TARGET_HPUX_10_10
442 case OPT_munix_95:
443 flag_pa_unix = 1995;
444 return true;
445 #endif
447 #if TARGET_HPUX_11_11
448 case OPT_munix_98:
449 flag_pa_unix = 1998;
450 return true;
451 #endif
453 default:
454 return true;
458 void
459 override_options (void)
461 /* Unconditional branches in the delay slot are not compatible with dwarf2
462 call frame information. There is no benefit in using this optimization
463 on PA8000 and later processors. */
464 if (pa_cpu >= PROCESSOR_8000
465 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
466 || flag_unwind_tables)
467 target_flags &= ~MASK_JUMP_IN_DELAY;
469 if (flag_pic && TARGET_PORTABLE_RUNTIME)
471 warning (0, "PIC code generation is not supported in the portable runtime model");
474 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
476 warning (0, "PIC code generation is not compatible with fast indirect calls");
479 if (! TARGET_GAS && write_symbols != NO_DEBUG)
481 warning (0, "-g is only supported when using GAS on this processor,");
482 warning (0, "-g option disabled");
483 write_symbols = NO_DEBUG;
486 /* We only support the "big PIC" model now. And we always generate PIC
487 code when in 64bit mode. */
488 if (flag_pic == 1 || TARGET_64BIT)
489 flag_pic = 2;
491 /* We can't guarantee that .dword is available for 32-bit targets. */
492 if (UNITS_PER_WORD == 4)
493 targetm.asm_out.aligned_op.di = NULL;
495 /* The unaligned ops are only available when using GAS. */
496 if (!TARGET_GAS)
498 targetm.asm_out.unaligned_op.hi = NULL;
499 targetm.asm_out.unaligned_op.si = NULL;
500 targetm.asm_out.unaligned_op.di = NULL;
503 init_machine_status = pa_init_machine_status;
506 static void
507 pa_init_builtins (void)
509 #ifdef DONT_HAVE_FPUTC_UNLOCKED
510 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
511 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
512 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
513 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
514 #endif
517 /* Function to init struct machine_function.
518 This will be called, via a pointer variable,
519 from push_function_context. */
521 static struct machine_function *
522 pa_init_machine_status (void)
524 return ggc_alloc_cleared (sizeof (machine_function));
527 /* If FROM is a probable pointer register, mark TO as a probable
528 pointer register with the same pointer alignment as FROM. */
530 static void
531 copy_reg_pointer (rtx to, rtx from)
533 if (REG_POINTER (from))
534 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
537 /* Return 1 if X contains a symbolic expression. We know these
538 expressions will have one of a few well defined forms, so
539 we need only check those forms. */
541 symbolic_expression_p (rtx x)
544 /* Strip off any HIGH. */
545 if (GET_CODE (x) == HIGH)
546 x = XEXP (x, 0);
548 return (symbolic_operand (x, VOIDmode));
551 /* Accept any constant that can be moved in one instruction into a
552 general register. */
554 cint_ok_for_move (HOST_WIDE_INT intval)
556 /* OK if ldo, ldil, or zdepi, can be used. */
557 return (CONST_OK_FOR_LETTER_P (intval, 'J')
558 || CONST_OK_FOR_LETTER_P (intval, 'N')
559 || CONST_OK_FOR_LETTER_P (intval, 'K'));
562 /* Return truth value of whether OP can be used as an operand in a
563 adddi3 insn. */
565 adddi3_operand (rtx op, enum machine_mode mode)
567 return (register_operand (op, mode)
568 || (GET_CODE (op) == CONST_INT
569 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
572 /* True iff zdepi can be used to generate this CONST_INT.
573 zdepi first sign extends a 5 bit signed number to a given field
574 length, then places this field anywhere in a zero. */
576 zdepi_cint_p (unsigned HOST_WIDE_INT x)
578 unsigned HOST_WIDE_INT lsb_mask, t;
580 /* This might not be obvious, but it's at least fast.
581 This function is critical; we don't have the time loops would take. */
582 lsb_mask = x & -x;
583 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
584 /* Return true iff t is a power of two. */
585 return ((t & (t - 1)) == 0);
588 /* True iff depi or extru can be used to compute (reg & mask).
589 Accept bit pattern like these:
590 0....01....1
591 1....10....0
592 1..10..01..1 */
594 and_mask_p (unsigned HOST_WIDE_INT mask)
596 mask = ~mask;
597 mask += mask & -mask;
598 return (mask & (mask - 1)) == 0;
601 /* True iff depi can be used to compute (reg | MASK). */
603 ior_mask_p (unsigned HOST_WIDE_INT mask)
605 mask += mask & -mask;
606 return (mask & (mask - 1)) == 0;
609 /* Legitimize PIC addresses. If the address is already
610 position-independent, we return ORIG. Newly generated
611 position-independent addresses go to REG. If we need more
612 than one register, we lose. */
615 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
617 rtx pic_ref = orig;
619 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
621 /* Labels need special handling. */
622 if (pic_label_operand (orig, mode))
624 /* We do not want to go through the movXX expanders here since that
625 would create recursion.
627 Nor do we really want to call a generator for a named pattern
628 since that requires multiple patterns if we want to support
629 multiple word sizes.
631 So instead we just emit the raw set, which avoids the movXX
632 expanders completely. */
633 mark_reg_pointer (reg, BITS_PER_UNIT);
634 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
635 current_function_uses_pic_offset_table = 1;
636 return reg;
638 if (GET_CODE (orig) == SYMBOL_REF)
640 rtx insn, tmp_reg;
642 gcc_assert (reg);
644 /* Before reload, allocate a temporary register for the intermediate
645 result. This allows the sequence to be deleted when the final
646 result is unused and the insns are trivially dead. */
647 tmp_reg = ((reload_in_progress || reload_completed)
648 ? reg : gen_reg_rtx (Pmode));
650 emit_move_insn (tmp_reg,
651 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
652 gen_rtx_HIGH (word_mode, orig)));
653 pic_ref
654 = gen_const_mem (Pmode,
655 gen_rtx_LO_SUM (Pmode, tmp_reg,
656 gen_rtx_UNSPEC (Pmode,
657 gen_rtvec (1, orig),
658 UNSPEC_DLTIND14R)));
660 current_function_uses_pic_offset_table = 1;
661 mark_reg_pointer (reg, BITS_PER_UNIT);
662 insn = emit_move_insn (reg, pic_ref);
664 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
665 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
667 return reg;
669 else if (GET_CODE (orig) == CONST)
671 rtx base;
673 if (GET_CODE (XEXP (orig, 0)) == PLUS
674 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
675 return orig;
677 gcc_assert (reg);
678 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
680 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
681 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
682 base == reg ? 0 : reg);
684 if (GET_CODE (orig) == CONST_INT)
686 if (INT_14_BITS (orig))
687 return plus_constant (base, INTVAL (orig));
688 orig = force_reg (Pmode, orig);
690 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
691 /* Likewise, should we set special REG_NOTEs here? */
694 return pic_ref;
697 static GTY(()) rtx gen_tls_tga;
699 static rtx
700 gen_tls_get_addr (void)
702 if (!gen_tls_tga)
703 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
704 return gen_tls_tga;
707 static rtx
708 hppa_tls_call (rtx arg)
710 rtx ret;
712 ret = gen_reg_rtx (Pmode);
713 emit_library_call_value (gen_tls_get_addr (), ret,
714 LCT_CONST, Pmode, 1, arg, Pmode);
716 return ret;
719 static rtx
720 legitimize_tls_address (rtx addr)
722 rtx ret, insn, tmp, t1, t2, tp;
723 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
725 switch (model)
727 case TLS_MODEL_GLOBAL_DYNAMIC:
728 tmp = gen_reg_rtx (Pmode);
729 emit_insn (gen_tgd_load (tmp, addr));
730 ret = hppa_tls_call (tmp);
731 break;
733 case TLS_MODEL_LOCAL_DYNAMIC:
734 ret = gen_reg_rtx (Pmode);
735 tmp = gen_reg_rtx (Pmode);
736 start_sequence ();
737 emit_insn (gen_tld_load (tmp, addr));
738 t1 = hppa_tls_call (tmp);
739 insn = get_insns ();
740 end_sequence ();
741 t2 = gen_reg_rtx (Pmode);
742 emit_libcall_block (insn, t2, t1,
743 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
744 UNSPEC_TLSLDBASE));
745 emit_insn (gen_tld_offset_load (ret, addr, t2));
746 break;
748 case TLS_MODEL_INITIAL_EXEC:
749 tp = gen_reg_rtx (Pmode);
750 tmp = gen_reg_rtx (Pmode);
751 ret = gen_reg_rtx (Pmode);
752 emit_insn (gen_tp_load (tp));
753 emit_insn (gen_tie_load (tmp, addr));
754 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
755 break;
757 case TLS_MODEL_LOCAL_EXEC:
758 tp = gen_reg_rtx (Pmode);
759 ret = gen_reg_rtx (Pmode);
760 emit_insn (gen_tp_load (tp));
761 emit_insn (gen_tle_load (ret, addr, tp));
762 break;
764 default:
765 gcc_unreachable ();
768 return ret;
771 /* Try machine-dependent ways of modifying an illegitimate address
772 to be legitimate. If we find one, return the new, valid address.
773 This macro is used in only one place: `memory_address' in explow.c.
775 OLDX is the address as it was before break_out_memory_refs was called.
776 In some cases it is useful to look at this to decide what needs to be done.
778 MODE and WIN are passed so that this macro can use
779 GO_IF_LEGITIMATE_ADDRESS.
781 It is always safe for this macro to do nothing. It exists to recognize
782 opportunities to optimize the output.
784 For the PA, transform:
786 memory(X + <large int>)
788 into:
790 if (<large int> & mask) >= 16
791 Y = (<large int> & ~mask) + mask + 1 Round up.
792 else
793 Y = (<large int> & ~mask) Round down.
794 Z = X + Y
795 memory (Z + (<large int> - Y));
797 This is for CSE to find several similar references, and only use one Z.
799 X can either be a SYMBOL_REF or REG, but because combine cannot
800 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
801 D will not fit in 14 bits.
803 MODE_FLOAT references allow displacements which fit in 5 bits, so use
804 0x1f as the mask.
806 MODE_INT references allow displacements which fit in 14 bits, so use
807 0x3fff as the mask.
809 This relies on the fact that most mode MODE_FLOAT references will use FP
810 registers and most mode MODE_INT references will use integer registers.
811 (In the rare case of an FP register used in an integer MODE, we depend
812 on secondary reloads to clean things up.)
815 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
816 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
817 addressing modes to be used).
819 Put X and Z into registers. Then put the entire expression into
820 a register. */
823 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
824 enum machine_mode mode)
826 rtx orig = x;
828 /* We need to canonicalize the order of operands in unscaled indexed
829 addresses since the code that checks if an address is valid doesn't
830 always try both orders. */
831 if (!TARGET_NO_SPACE_REGS
832 && GET_CODE (x) == PLUS
833 && GET_MODE (x) == Pmode
834 && REG_P (XEXP (x, 0))
835 && REG_P (XEXP (x, 1))
836 && REG_POINTER (XEXP (x, 0))
837 && !REG_POINTER (XEXP (x, 1)))
838 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
840 if (PA_SYMBOL_REF_TLS_P (x))
841 return legitimize_tls_address (x);
842 else if (flag_pic)
843 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
845 /* Strip off CONST. */
846 if (GET_CODE (x) == CONST)
847 x = XEXP (x, 0);
849 /* Special case. Get the SYMBOL_REF into a register and use indexing.
850 That should always be safe. */
851 if (GET_CODE (x) == PLUS
852 && GET_CODE (XEXP (x, 0)) == REG
853 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
855 rtx reg = force_reg (Pmode, XEXP (x, 1));
856 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
859 /* Note we must reject symbols which represent function addresses
860 since the assembler/linker can't handle arithmetic on plabels. */
861 if (GET_CODE (x) == PLUS
862 && GET_CODE (XEXP (x, 1)) == CONST_INT
863 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
864 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
865 || GET_CODE (XEXP (x, 0)) == REG))
867 rtx int_part, ptr_reg;
868 int newoffset;
869 int offset = INTVAL (XEXP (x, 1));
870 int mask;
872 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
873 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
875 /* Choose which way to round the offset. Round up if we
876 are >= halfway to the next boundary. */
877 if ((offset & mask) >= ((mask + 1) / 2))
878 newoffset = (offset & ~ mask) + mask + 1;
879 else
880 newoffset = (offset & ~ mask);
882 /* If the newoffset will not fit in 14 bits (ldo), then
883 handling this would take 4 or 5 instructions (2 to load
884 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
885 add the new offset and the SYMBOL_REF.) Combine can
886 not handle 4->2 or 5->2 combinations, so do not create
887 them. */
888 if (! VAL_14_BITS_P (newoffset)
889 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
891 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
892 rtx tmp_reg
893 = force_reg (Pmode,
894 gen_rtx_HIGH (Pmode, const_part));
895 ptr_reg
896 = force_reg (Pmode,
897 gen_rtx_LO_SUM (Pmode,
898 tmp_reg, const_part));
900 else
902 if (! VAL_14_BITS_P (newoffset))
903 int_part = force_reg (Pmode, GEN_INT (newoffset));
904 else
905 int_part = GEN_INT (newoffset);
907 ptr_reg = force_reg (Pmode,
908 gen_rtx_PLUS (Pmode,
909 force_reg (Pmode, XEXP (x, 0)),
910 int_part));
912 return plus_constant (ptr_reg, offset - newoffset);
915 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
917 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
918 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
919 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
920 && (OBJECT_P (XEXP (x, 1))
921 || GET_CODE (XEXP (x, 1)) == SUBREG)
922 && GET_CODE (XEXP (x, 1)) != CONST)
924 int val = INTVAL (XEXP (XEXP (x, 0), 1));
925 rtx reg1, reg2;
927 reg1 = XEXP (x, 1);
928 if (GET_CODE (reg1) != REG)
929 reg1 = force_reg (Pmode, force_operand (reg1, 0));
931 reg2 = XEXP (XEXP (x, 0), 0);
932 if (GET_CODE (reg2) != REG)
933 reg2 = force_reg (Pmode, force_operand (reg2, 0));
935 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
936 gen_rtx_MULT (Pmode,
937 reg2,
938 GEN_INT (val)),
939 reg1));
942 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
944 Only do so for floating point modes since this is more speculative
945 and we lose if it's an integer store. */
946 if (GET_CODE (x) == PLUS
947 && GET_CODE (XEXP (x, 0)) == PLUS
948 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
949 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
950 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
951 && (mode == SFmode || mode == DFmode))
954 /* First, try and figure out what to use as a base register. */
955 rtx reg1, reg2, base, idx, orig_base;
957 reg1 = XEXP (XEXP (x, 0), 1);
958 reg2 = XEXP (x, 1);
959 base = NULL_RTX;
960 idx = NULL_RTX;
962 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
963 then emit_move_sequence will turn on REG_POINTER so we'll know
964 it's a base register below. */
965 if (GET_CODE (reg1) != REG)
966 reg1 = force_reg (Pmode, force_operand (reg1, 0));
968 if (GET_CODE (reg2) != REG)
969 reg2 = force_reg (Pmode, force_operand (reg2, 0));
971 /* Figure out what the base and index are. */
973 if (GET_CODE (reg1) == REG
974 && REG_POINTER (reg1))
976 base = reg1;
977 orig_base = XEXP (XEXP (x, 0), 1);
978 idx = gen_rtx_PLUS (Pmode,
979 gen_rtx_MULT (Pmode,
980 XEXP (XEXP (XEXP (x, 0), 0), 0),
981 XEXP (XEXP (XEXP (x, 0), 0), 1)),
982 XEXP (x, 1));
984 else if (GET_CODE (reg2) == REG
985 && REG_POINTER (reg2))
987 base = reg2;
988 orig_base = XEXP (x, 1);
989 idx = XEXP (x, 0);
992 if (base == 0)
993 return orig;
995 /* If the index adds a large constant, try to scale the
996 constant so that it can be loaded with only one insn. */
997 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
998 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
999 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1000 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1002 /* Divide the CONST_INT by the scale factor, then add it to A. */
1003 int val = INTVAL (XEXP (idx, 1));
1005 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1006 reg1 = XEXP (XEXP (idx, 0), 0);
1007 if (GET_CODE (reg1) != REG)
1008 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1010 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1012 /* We can now generate a simple scaled indexed address. */
1013 return
1014 force_reg
1015 (Pmode, gen_rtx_PLUS (Pmode,
1016 gen_rtx_MULT (Pmode, reg1,
1017 XEXP (XEXP (idx, 0), 1)),
1018 base));
1021 /* If B + C is still a valid base register, then add them. */
1022 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1023 && INTVAL (XEXP (idx, 1)) <= 4096
1024 && INTVAL (XEXP (idx, 1)) >= -4096)
1026 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1027 rtx reg1, reg2;
1029 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1031 reg2 = XEXP (XEXP (idx, 0), 0);
1032 if (GET_CODE (reg2) != CONST_INT)
1033 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1035 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1036 gen_rtx_MULT (Pmode,
1037 reg2,
1038 GEN_INT (val)),
1039 reg1));
1042 /* Get the index into a register, then add the base + index and
1043 return a register holding the result. */
1045 /* First get A into a register. */
1046 reg1 = XEXP (XEXP (idx, 0), 0);
1047 if (GET_CODE (reg1) != REG)
1048 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1050 /* And get B into a register. */
1051 reg2 = XEXP (idx, 1);
1052 if (GET_CODE (reg2) != REG)
1053 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1055 reg1 = force_reg (Pmode,
1056 gen_rtx_PLUS (Pmode,
1057 gen_rtx_MULT (Pmode, reg1,
1058 XEXP (XEXP (idx, 0), 1)),
1059 reg2));
1061 /* Add the result to our base register and return. */
1062 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1066 /* Uh-oh. We might have an address for x[n-100000]. This needs
1067 special handling to avoid creating an indexed memory address
1068 with x-100000 as the base.
1070 If the constant part is small enough, then it's still safe because
1071 there is a guard page at the beginning and end of the data segment.
1073 Scaled references are common enough that we want to try and rearrange the
1074 terms so that we can use indexing for these addresses too. Only
1075 do the optimization for floatint point modes. */
1077 if (GET_CODE (x) == PLUS
1078 && symbolic_expression_p (XEXP (x, 1)))
1080 /* Ugly. We modify things here so that the address offset specified
1081 by the index expression is computed first, then added to x to form
1082 the entire address. */
1084 rtx regx1, regx2, regy1, regy2, y;
1086 /* Strip off any CONST. */
1087 y = XEXP (x, 1);
1088 if (GET_CODE (y) == CONST)
1089 y = XEXP (y, 0);
1091 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1093 /* See if this looks like
1094 (plus (mult (reg) (shadd_const))
1095 (const (plus (symbol_ref) (const_int))))
1097 Where const_int is small. In that case the const
1098 expression is a valid pointer for indexing.
1100 If const_int is big, but can be divided evenly by shadd_const
1101 and added to (reg). This allows more scaled indexed addresses. */
1102 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1103 && GET_CODE (XEXP (x, 0)) == MULT
1104 && GET_CODE (XEXP (y, 1)) == CONST_INT
1105 && INTVAL (XEXP (y, 1)) >= -4096
1106 && INTVAL (XEXP (y, 1)) <= 4095
1107 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1108 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1110 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1111 rtx reg1, reg2;
1113 reg1 = XEXP (x, 1);
1114 if (GET_CODE (reg1) != REG)
1115 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1117 reg2 = XEXP (XEXP (x, 0), 0);
1118 if (GET_CODE (reg2) != REG)
1119 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1121 return force_reg (Pmode,
1122 gen_rtx_PLUS (Pmode,
1123 gen_rtx_MULT (Pmode,
1124 reg2,
1125 GEN_INT (val)),
1126 reg1));
1128 else if ((mode == DFmode || mode == SFmode)
1129 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1130 && GET_CODE (XEXP (x, 0)) == MULT
1131 && GET_CODE (XEXP (y, 1)) == CONST_INT
1132 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1133 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1134 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1136 regx1
1137 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1138 / INTVAL (XEXP (XEXP (x, 0), 1))));
1139 regx2 = XEXP (XEXP (x, 0), 0);
1140 if (GET_CODE (regx2) != REG)
1141 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1142 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1143 regx2, regx1));
1144 return
1145 force_reg (Pmode,
1146 gen_rtx_PLUS (Pmode,
1147 gen_rtx_MULT (Pmode, regx2,
1148 XEXP (XEXP (x, 0), 1)),
1149 force_reg (Pmode, XEXP (y, 0))));
1151 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1152 && INTVAL (XEXP (y, 1)) >= -4096
1153 && INTVAL (XEXP (y, 1)) <= 4095)
1155 /* This is safe because of the guard page at the
1156 beginning and end of the data space. Just
1157 return the original address. */
1158 return orig;
1160 else
1162 /* Doesn't look like one we can optimize. */
1163 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1164 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1165 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1166 regx1 = force_reg (Pmode,
1167 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1168 regx1, regy2));
1169 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1174 return orig;
1177 /* For the HPPA, REG and REG+CONST is cost 0
1178 and addresses involving symbolic constants are cost 2.
1180 PIC addresses are very expensive.
1182 It is no coincidence that this has the same structure
1183 as GO_IF_LEGITIMATE_ADDRESS. */
1185 static int
1186 hppa_address_cost (rtx X)
1188 switch (GET_CODE (X))
1190 case REG:
1191 case PLUS:
1192 case LO_SUM:
1193 return 1;
1194 case HIGH:
1195 return 2;
1196 default:
1197 return 4;
1201 /* Compute a (partial) cost for rtx X. Return true if the complete
1202 cost has been computed, and false if subexpressions should be
1203 scanned. In either case, *TOTAL contains the cost result. */
1205 static bool
1206 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1208 switch (code)
1210 case CONST_INT:
1211 if (INTVAL (x) == 0)
1212 *total = 0;
1213 else if (INT_14_BITS (x))
1214 *total = 1;
1215 else
1216 *total = 2;
1217 return true;
1219 case HIGH:
1220 *total = 2;
1221 return true;
1223 case CONST:
1224 case LABEL_REF:
1225 case SYMBOL_REF:
1226 *total = 4;
1227 return true;
1229 case CONST_DOUBLE:
1230 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1231 && outer_code != SET)
1232 *total = 0;
1233 else
1234 *total = 8;
1235 return true;
1237 case MULT:
1238 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1239 *total = COSTS_N_INSNS (3);
1240 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1241 *total = COSTS_N_INSNS (8);
1242 else
1243 *total = COSTS_N_INSNS (20);
1244 return true;
1246 case DIV:
1247 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1249 *total = COSTS_N_INSNS (14);
1250 return true;
1252 /* FALLTHRU */
1254 case UDIV:
1255 case MOD:
1256 case UMOD:
1257 *total = COSTS_N_INSNS (60);
1258 return true;
1260 case PLUS: /* this includes shNadd insns */
1261 case MINUS:
1262 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1263 *total = COSTS_N_INSNS (3);
1264 else
1265 *total = COSTS_N_INSNS (1);
1266 return true;
1268 case ASHIFT:
1269 case ASHIFTRT:
1270 case LSHIFTRT:
1271 *total = COSTS_N_INSNS (1);
1272 return true;
1274 default:
1275 return false;
1279 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1280 new rtx with the correct mode. */
1281 static inline rtx
1282 force_mode (enum machine_mode mode, rtx orig)
1284 if (mode == GET_MODE (orig))
1285 return orig;
1287 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1289 return gen_rtx_REG (mode, REGNO (orig));
1292 /* Return 1 if *X is a thread-local symbol. */
1294 static int
1295 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1297 return PA_SYMBOL_REF_TLS_P (*x);
1300 /* Return 1 if X contains a thread-local symbol. */
1302 bool
1303 pa_tls_referenced_p (rtx x)
1305 if (!TARGET_HAVE_TLS)
1306 return false;
1308 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1311 /* Emit insns to move operands[1] into operands[0].
1313 Return 1 if we have written out everything that needs to be done to
1314 do the move. Otherwise, return 0 and the caller will emit the move
1315 normally.
1317 Note SCRATCH_REG may not be in the proper mode depending on how it
1318 will be used. This routine is responsible for creating a new copy
1319 of SCRATCH_REG in the proper mode. */
1322 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1324 register rtx operand0 = operands[0];
1325 register rtx operand1 = operands[1];
1326 register rtx tem;
1328 /* We can only handle indexed addresses in the destination operand
1329 of floating point stores. Thus, we need to break out indexed
1330 addresses from the destination operand. */
1331 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1333 /* This is only safe up to the beginning of life analysis. */
1334 gcc_assert (!no_new_pseudos);
1336 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1337 operand0 = replace_equiv_address (operand0, tem);
1340 /* On targets with non-equivalent space registers, break out unscaled
1341 indexed addresses from the source operand before the final CSE.
1342 We have to do this because the REG_POINTER flag is not correctly
1343 carried through various optimization passes and CSE may substitute
1344 a pseudo without the pointer set for one with the pointer set. As
1345 a result, we loose various opportunities to create insns with
1346 unscaled indexed addresses. */
1347 if (!TARGET_NO_SPACE_REGS
1348 && !cse_not_expected
1349 && GET_CODE (operand1) == MEM
1350 && GET_CODE (XEXP (operand1, 0)) == PLUS
1351 && REG_P (XEXP (XEXP (operand1, 0), 0))
1352 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1353 operand1
1354 = replace_equiv_address (operand1,
1355 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1357 if (scratch_reg
1358 && reload_in_progress && GET_CODE (operand0) == REG
1359 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1360 operand0 = reg_equiv_mem[REGNO (operand0)];
1361 else if (scratch_reg
1362 && reload_in_progress && GET_CODE (operand0) == SUBREG
1363 && GET_CODE (SUBREG_REG (operand0)) == REG
1364 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1366 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1367 the code which tracks sets/uses for delete_output_reload. */
1368 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1369 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1370 SUBREG_BYTE (operand0));
1371 operand0 = alter_subreg (&temp);
1374 if (scratch_reg
1375 && reload_in_progress && GET_CODE (operand1) == REG
1376 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1377 operand1 = reg_equiv_mem[REGNO (operand1)];
1378 else if (scratch_reg
1379 && reload_in_progress && GET_CODE (operand1) == SUBREG
1380 && GET_CODE (SUBREG_REG (operand1)) == REG
1381 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1383 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1384 the code which tracks sets/uses for delete_output_reload. */
1385 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1386 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1387 SUBREG_BYTE (operand1));
1388 operand1 = alter_subreg (&temp);
1391 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1392 && ((tem = find_replacement (&XEXP (operand0, 0)))
1393 != XEXP (operand0, 0)))
1394 operand0 = replace_equiv_address (operand0, tem);
1396 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1397 && ((tem = find_replacement (&XEXP (operand1, 0)))
1398 != XEXP (operand1, 0)))
1399 operand1 = replace_equiv_address (operand1, tem);
1401 /* Handle secondary reloads for loads/stores of FP registers from
1402 REG+D addresses where D does not fit in 5 or 14 bits, including
1403 (subreg (mem (addr))) cases. */
1404 if (scratch_reg
1405 && fp_reg_operand (operand0, mode)
1406 && ((GET_CODE (operand1) == MEM
1407 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1408 XEXP (operand1, 0)))
1409 || ((GET_CODE (operand1) == SUBREG
1410 && GET_CODE (XEXP (operand1, 0)) == MEM
1411 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1412 ? SFmode : DFmode),
1413 XEXP (XEXP (operand1, 0), 0))))))
1415 if (GET_CODE (operand1) == SUBREG)
1416 operand1 = XEXP (operand1, 0);
1418 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1419 it in WORD_MODE regardless of what mode it was originally given
1420 to us. */
1421 scratch_reg = force_mode (word_mode, scratch_reg);
1423 /* D might not fit in 14 bits either; for such cases load D into
1424 scratch reg. */
1425 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1427 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1428 emit_move_insn (scratch_reg,
1429 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1430 Pmode,
1431 XEXP (XEXP (operand1, 0), 0),
1432 scratch_reg));
1434 else
1435 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1436 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1437 replace_equiv_address (operand1, scratch_reg)));
1438 return 1;
1440 else if (scratch_reg
1441 && fp_reg_operand (operand1, mode)
1442 && ((GET_CODE (operand0) == MEM
1443 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1444 ? SFmode : DFmode),
1445 XEXP (operand0, 0)))
1446 || ((GET_CODE (operand0) == SUBREG)
1447 && GET_CODE (XEXP (operand0, 0)) == MEM
1448 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1449 ? SFmode : DFmode),
1450 XEXP (XEXP (operand0, 0), 0)))))
1452 if (GET_CODE (operand0) == SUBREG)
1453 operand0 = XEXP (operand0, 0);
1455 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1456 it in WORD_MODE regardless of what mode it was originally given
1457 to us. */
1458 scratch_reg = force_mode (word_mode, scratch_reg);
1460 /* D might not fit in 14 bits either; for such cases load D into
1461 scratch reg. */
1462 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1464 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1465 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1466 0)),
1467 Pmode,
1468 XEXP (XEXP (operand0, 0),
1470 scratch_reg));
1472 else
1473 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1474 emit_insn (gen_rtx_SET (VOIDmode,
1475 replace_equiv_address (operand0, scratch_reg),
1476 operand1));
1477 return 1;
1479 /* Handle secondary reloads for loads of FP registers from constant
1480 expressions by forcing the constant into memory.
1482 Use scratch_reg to hold the address of the memory location.
1484 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1485 NO_REGS when presented with a const_int and a register class
1486 containing only FP registers. Doing so unfortunately creates
1487 more problems than it solves. Fix this for 2.5. */
1488 else if (scratch_reg
1489 && CONSTANT_P (operand1)
1490 && fp_reg_operand (operand0, mode))
1492 rtx const_mem, xoperands[2];
1494 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1495 it in WORD_MODE regardless of what mode it was originally given
1496 to us. */
1497 scratch_reg = force_mode (word_mode, scratch_reg);
1499 /* Force the constant into memory and put the address of the
1500 memory location into scratch_reg. */
1501 const_mem = force_const_mem (mode, operand1);
1502 xoperands[0] = scratch_reg;
1503 xoperands[1] = XEXP (const_mem, 0);
1504 emit_move_sequence (xoperands, Pmode, 0);
1506 /* Now load the destination register. */
1507 emit_insn (gen_rtx_SET (mode, operand0,
1508 replace_equiv_address (const_mem, scratch_reg)));
1509 return 1;
1511 /* Handle secondary reloads for SAR. These occur when trying to load
1512 the SAR from memory, FP register, or with a constant. */
1513 else if (scratch_reg
1514 && GET_CODE (operand0) == REG
1515 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1516 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1517 && (GET_CODE (operand1) == MEM
1518 || GET_CODE (operand1) == CONST_INT
1519 || (GET_CODE (operand1) == REG
1520 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1522 /* D might not fit in 14 bits either; for such cases load D into
1523 scratch reg. */
1524 if (GET_CODE (operand1) == MEM
1525 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1527 /* We are reloading the address into the scratch register, so we
1528 want to make sure the scratch register is a full register. */
1529 scratch_reg = force_mode (word_mode, scratch_reg);
1531 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1532 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1533 0)),
1534 Pmode,
1535 XEXP (XEXP (operand1, 0),
1537 scratch_reg));
1539 /* Now we are going to load the scratch register from memory,
1540 we want to load it in the same width as the original MEM,
1541 which must be the same as the width of the ultimate destination,
1542 OPERAND0. */
1543 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1545 emit_move_insn (scratch_reg,
1546 replace_equiv_address (operand1, scratch_reg));
1548 else
1550 /* We want to load the scratch register using the same mode as
1551 the ultimate destination. */
1552 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1554 emit_move_insn (scratch_reg, operand1);
1557 /* And emit the insn to set the ultimate destination. We know that
1558 the scratch register has the same mode as the destination at this
1559 point. */
1560 emit_move_insn (operand0, scratch_reg);
1561 return 1;
1563 /* Handle the most common case: storing into a register. */
1564 else if (register_operand (operand0, mode))
1566 if (register_operand (operand1, mode)
1567 || (GET_CODE (operand1) == CONST_INT
1568 && cint_ok_for_move (INTVAL (operand1)))
1569 || (operand1 == CONST0_RTX (mode))
1570 || (GET_CODE (operand1) == HIGH
1571 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1572 /* Only `general_operands' can come here, so MEM is ok. */
1573 || GET_CODE (operand1) == MEM)
1575 /* Various sets are created during RTL generation which don't
1576 have the REG_POINTER flag correctly set. After the CSE pass,
1577 instruction recognition can fail if we don't consistently
1578 set this flag when performing register copies. This should
1579 also improve the opportunities for creating insns that use
1580 unscaled indexing. */
1581 if (REG_P (operand0) && REG_P (operand1))
1583 if (REG_POINTER (operand1)
1584 && !REG_POINTER (operand0)
1585 && !HARD_REGISTER_P (operand0))
1586 copy_reg_pointer (operand0, operand1);
1587 else if (REG_POINTER (operand0)
1588 && !REG_POINTER (operand1)
1589 && !HARD_REGISTER_P (operand1))
1590 copy_reg_pointer (operand1, operand0);
1593 /* When MEMs are broken out, the REG_POINTER flag doesn't
1594 get set. In some cases, we can set the REG_POINTER flag
1595 from the declaration for the MEM. */
1596 if (REG_P (operand0)
1597 && GET_CODE (operand1) == MEM
1598 && !REG_POINTER (operand0))
1600 tree decl = MEM_EXPR (operand1);
1602 /* Set the register pointer flag and register alignment
1603 if the declaration for this memory reference is a
1604 pointer type. Fortran indirect argument references
1605 are ignored. */
1606 if (decl
1607 && !(flag_argument_noalias > 1
1608 && TREE_CODE (decl) == INDIRECT_REF
1609 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1611 tree type;
1613 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1614 tree operand 1. */
1615 if (TREE_CODE (decl) == COMPONENT_REF)
1616 decl = TREE_OPERAND (decl, 1);
1618 type = TREE_TYPE (decl);
1619 if (TREE_CODE (type) == ARRAY_TYPE)
1620 type = get_inner_array_type (type);
1622 if (POINTER_TYPE_P (type))
1624 int align;
1626 type = TREE_TYPE (type);
1627 /* Using TYPE_ALIGN_OK is rather conservative as
1628 only the ada frontend actually sets it. */
1629 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1630 : BITS_PER_UNIT);
1631 mark_reg_pointer (operand0, align);
1636 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1637 return 1;
1640 else if (GET_CODE (operand0) == MEM)
1642 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1643 && !(reload_in_progress || reload_completed))
1645 rtx temp = gen_reg_rtx (DFmode);
1647 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1648 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1649 return 1;
1651 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1653 /* Run this case quickly. */
1654 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1655 return 1;
1657 if (! (reload_in_progress || reload_completed))
1659 operands[0] = validize_mem (operand0);
1660 operands[1] = operand1 = force_reg (mode, operand1);
1664 /* Simplify the source if we need to.
1665 Note we do have to handle function labels here, even though we do
1666 not consider them legitimate constants. Loop optimizations can
1667 call the emit_move_xxx with one as a source. */
1668 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1669 || function_label_operand (operand1, mode)
1670 || (GET_CODE (operand1) == HIGH
1671 && symbolic_operand (XEXP (operand1, 0), mode)))
1673 int ishighonly = 0;
1675 if (GET_CODE (operand1) == HIGH)
1677 ishighonly = 1;
1678 operand1 = XEXP (operand1, 0);
1680 if (symbolic_operand (operand1, mode))
1682 /* Argh. The assembler and linker can't handle arithmetic
1683 involving plabels.
1685 So we force the plabel into memory, load operand0 from
1686 the memory location, then add in the constant part. */
1687 if ((GET_CODE (operand1) == CONST
1688 && GET_CODE (XEXP (operand1, 0)) == PLUS
1689 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1690 || function_label_operand (operand1, mode))
1692 rtx temp, const_part;
1694 /* Figure out what (if any) scratch register to use. */
1695 if (reload_in_progress || reload_completed)
1697 scratch_reg = scratch_reg ? scratch_reg : operand0;
1698 /* SCRATCH_REG will hold an address and maybe the actual
1699 data. We want it in WORD_MODE regardless of what mode it
1700 was originally given to us. */
1701 scratch_reg = force_mode (word_mode, scratch_reg);
1703 else if (flag_pic)
1704 scratch_reg = gen_reg_rtx (Pmode);
1706 if (GET_CODE (operand1) == CONST)
1708 /* Save away the constant part of the expression. */
1709 const_part = XEXP (XEXP (operand1, 0), 1);
1710 gcc_assert (GET_CODE (const_part) == CONST_INT);
1712 /* Force the function label into memory. */
1713 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1715 else
1717 /* No constant part. */
1718 const_part = NULL_RTX;
1720 /* Force the function label into memory. */
1721 temp = force_const_mem (mode, operand1);
1725 /* Get the address of the memory location. PIC-ify it if
1726 necessary. */
1727 temp = XEXP (temp, 0);
1728 if (flag_pic)
1729 temp = legitimize_pic_address (temp, mode, scratch_reg);
1731 /* Put the address of the memory location into our destination
1732 register. */
1733 operands[1] = temp;
1734 emit_move_sequence (operands, mode, scratch_reg);
1736 /* Now load from the memory location into our destination
1737 register. */
1738 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1739 emit_move_sequence (operands, mode, scratch_reg);
1741 /* And add back in the constant part. */
1742 if (const_part != NULL_RTX)
1743 expand_inc (operand0, const_part);
1745 return 1;
1748 if (flag_pic)
1750 rtx temp;
1752 if (reload_in_progress || reload_completed)
1754 temp = scratch_reg ? scratch_reg : operand0;
1755 /* TEMP will hold an address and maybe the actual
1756 data. We want it in WORD_MODE regardless of what mode it
1757 was originally given to us. */
1758 temp = force_mode (word_mode, temp);
1760 else
1761 temp = gen_reg_rtx (Pmode);
1763 /* (const (plus (symbol) (const_int))) must be forced to
1764 memory during/after reload if the const_int will not fit
1765 in 14 bits. */
1766 if (GET_CODE (operand1) == CONST
1767 && GET_CODE (XEXP (operand1, 0)) == PLUS
1768 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1769 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1770 && (reload_completed || reload_in_progress)
1771 && flag_pic)
1773 rtx const_mem = force_const_mem (mode, operand1);
1774 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1775 mode, temp);
1776 operands[1] = replace_equiv_address (const_mem, operands[1]);
1777 emit_move_sequence (operands, mode, temp);
1779 else
1781 operands[1] = legitimize_pic_address (operand1, mode, temp);
1782 if (REG_P (operand0) && REG_P (operands[1]))
1783 copy_reg_pointer (operand0, operands[1]);
1784 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1787 /* On the HPPA, references to data space are supposed to use dp,
1788 register 27, but showing it in the RTL inhibits various cse
1789 and loop optimizations. */
1790 else
1792 rtx temp, set;
1794 if (reload_in_progress || reload_completed)
1796 temp = scratch_reg ? scratch_reg : operand0;
1797 /* TEMP will hold an address and maybe the actual
1798 data. We want it in WORD_MODE regardless of what mode it
1799 was originally given to us. */
1800 temp = force_mode (word_mode, temp);
1802 else
1803 temp = gen_reg_rtx (mode);
1805 /* Loading a SYMBOL_REF into a register makes that register
1806 safe to be used as the base in an indexed address.
1808 Don't mark hard registers though. That loses. */
1809 if (GET_CODE (operand0) == REG
1810 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1811 mark_reg_pointer (operand0, BITS_PER_UNIT);
1812 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1813 mark_reg_pointer (temp, BITS_PER_UNIT);
1815 if (ishighonly)
1816 set = gen_rtx_SET (mode, operand0, temp);
1817 else
1818 set = gen_rtx_SET (VOIDmode,
1819 operand0,
1820 gen_rtx_LO_SUM (mode, temp, operand1));
1822 emit_insn (gen_rtx_SET (VOIDmode,
1823 temp,
1824 gen_rtx_HIGH (mode, operand1)));
1825 emit_insn (set);
1828 return 1;
1830 else if (pa_tls_referenced_p (operand1))
1832 rtx tmp = operand1;
1833 rtx addend = NULL;
1835 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1837 addend = XEXP (XEXP (tmp, 0), 1);
1838 tmp = XEXP (XEXP (tmp, 0), 0);
1841 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1842 tmp = legitimize_tls_address (tmp);
1843 if (addend)
1845 tmp = gen_rtx_PLUS (mode, tmp, addend);
1846 tmp = force_operand (tmp, operands[0]);
1848 operands[1] = tmp;
1850 else if (GET_CODE (operand1) != CONST_INT
1851 || !cint_ok_for_move (INTVAL (operand1)))
1853 rtx insn, temp;
1854 rtx op1 = operand1;
1855 HOST_WIDE_INT value = 0;
1856 HOST_WIDE_INT insv = 0;
1857 int insert = 0;
1859 if (GET_CODE (operand1) == CONST_INT)
1860 value = INTVAL (operand1);
1862 if (TARGET_64BIT
1863 && GET_CODE (operand1) == CONST_INT
1864 && HOST_BITS_PER_WIDE_INT > 32
1865 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1867 HOST_WIDE_INT nval;
1869 /* Extract the low order 32 bits of the value and sign extend.
1870 If the new value is the same as the original value, we can
1871 can use the original value as-is. If the new value is
1872 different, we use it and insert the most-significant 32-bits
1873 of the original value into the final result. */
1874 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1875 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1876 if (value != nval)
1878 #if HOST_BITS_PER_WIDE_INT > 32
1879 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1880 #endif
1881 insert = 1;
1882 value = nval;
1883 operand1 = GEN_INT (nval);
1887 if (reload_in_progress || reload_completed)
1888 temp = scratch_reg ? scratch_reg : operand0;
1889 else
1890 temp = gen_reg_rtx (mode);
1892 /* We don't directly split DImode constants on 32-bit targets
1893 because PLUS uses an 11-bit immediate and the insn sequence
1894 generated is not as efficient as the one using HIGH/LO_SUM. */
1895 if (GET_CODE (operand1) == CONST_INT
1896 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1897 && !insert)
1899 /* Directly break constant into high and low parts. This
1900 provides better optimization opportunities because various
1901 passes recognize constants split with PLUS but not LO_SUM.
1902 We use a 14-bit signed low part except when the addition
1903 of 0x4000 to the high part might change the sign of the
1904 high part. */
1905 HOST_WIDE_INT low = value & 0x3fff;
1906 HOST_WIDE_INT high = value & ~ 0x3fff;
1908 if (low >= 0x2000)
1910 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1911 high += 0x2000;
1912 else
1913 high += 0x4000;
1916 low = value - high;
1918 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1919 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1921 else
1923 emit_insn (gen_rtx_SET (VOIDmode, temp,
1924 gen_rtx_HIGH (mode, operand1)));
1925 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1928 insn = emit_move_insn (operands[0], operands[1]);
1930 /* Now insert the most significant 32 bits of the value
1931 into the register. When we don't have a second register
1932 available, it could take up to nine instructions to load
1933 a 64-bit integer constant. Prior to reload, we force
1934 constants that would take more than three instructions
1935 to load to the constant pool. During and after reload,
1936 we have to handle all possible values. */
1937 if (insert)
1939 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1940 register and the value to be inserted is outside the
1941 range that can be loaded with three depdi instructions. */
1942 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1944 operand1 = GEN_INT (insv);
1946 emit_insn (gen_rtx_SET (VOIDmode, temp,
1947 gen_rtx_HIGH (mode, operand1)));
1948 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1949 emit_insn (gen_insv (operand0, GEN_INT (32),
1950 const0_rtx, temp));
1952 else
1954 int len = 5, pos = 27;
1956 /* Insert the bits using the depdi instruction. */
1957 while (pos >= 0)
1959 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1960 HOST_WIDE_INT sign = v5 < 0;
1962 /* Left extend the insertion. */
1963 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1964 while (pos > 0 && (insv & 1) == sign)
1966 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1967 len += 1;
1968 pos -= 1;
1971 emit_insn (gen_insv (operand0, GEN_INT (len),
1972 GEN_INT (pos), GEN_INT (v5)));
1974 len = pos > 0 && pos < 5 ? pos : 5;
1975 pos -= len;
1980 REG_NOTES (insn)
1981 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
1983 return 1;
1986 /* Now have insn-emit do whatever it normally does. */
1987 return 0;
1990 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1991 it will need a link/runtime reloc). */
1994 reloc_needed (tree exp)
1996 int reloc = 0;
1998 switch (TREE_CODE (exp))
2000 case ADDR_EXPR:
2001 return 1;
2003 case PLUS_EXPR:
2004 case MINUS_EXPR:
2005 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2006 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2007 break;
2009 case NOP_EXPR:
2010 case CONVERT_EXPR:
2011 case NON_LVALUE_EXPR:
2012 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2013 break;
2015 case CONSTRUCTOR:
2017 tree value;
2018 unsigned HOST_WIDE_INT ix;
2020 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2021 if (value)
2022 reloc |= reloc_needed (value);
2024 break;
2026 case ERROR_MARK:
2027 break;
2029 default:
2030 break;
2032 return reloc;
2035 /* Does operand (which is a symbolic_operand) live in text space?
2036 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2037 will be true. */
2040 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2042 if (GET_CODE (operand) == CONST)
2043 operand = XEXP (XEXP (operand, 0), 0);
2044 if (flag_pic)
2046 if (GET_CODE (operand) == SYMBOL_REF)
2047 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2049 else
2051 if (GET_CODE (operand) == SYMBOL_REF)
2052 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2054 return 1;
2058 /* Return the best assembler insn template
2059 for moving operands[1] into operands[0] as a fullword. */
2060 const char *
2061 singlemove_string (rtx *operands)
2063 HOST_WIDE_INT intval;
2065 if (GET_CODE (operands[0]) == MEM)
2066 return "stw %r1,%0";
2067 if (GET_CODE (operands[1]) == MEM)
2068 return "ldw %1,%0";
2069 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2071 long i;
2072 REAL_VALUE_TYPE d;
2074 gcc_assert (GET_MODE (operands[1]) == SFmode);
2076 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2077 bit pattern. */
2078 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2079 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2081 operands[1] = GEN_INT (i);
2082 /* Fall through to CONST_INT case. */
2084 if (GET_CODE (operands[1]) == CONST_INT)
2086 intval = INTVAL (operands[1]);
2088 if (VAL_14_BITS_P (intval))
2089 return "ldi %1,%0";
2090 else if ((intval & 0x7ff) == 0)
2091 return "ldil L'%1,%0";
2092 else if (zdepi_cint_p (intval))
2093 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2094 else
2095 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2097 return "copy %1,%0";
2101 /* Compute position (in OP[1]) and width (in OP[2])
2102 useful for copying IMM to a register using the zdepi
2103 instructions. Store the immediate value to insert in OP[0]. */
2104 static void
2105 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2107 int lsb, len;
2109 /* Find the least significant set bit in IMM. */
2110 for (lsb = 0; lsb < 32; lsb++)
2112 if ((imm & 1) != 0)
2113 break;
2114 imm >>= 1;
2117 /* Choose variants based on *sign* of the 5-bit field. */
2118 if ((imm & 0x10) == 0)
2119 len = (lsb <= 28) ? 4 : 32 - lsb;
2120 else
2122 /* Find the width of the bitstring in IMM. */
2123 for (len = 5; len < 32; len++)
2125 if ((imm & (1 << len)) == 0)
2126 break;
2129 /* Sign extend IMM as a 5-bit value. */
2130 imm = (imm & 0xf) - 0x10;
2133 op[0] = imm;
2134 op[1] = 31 - lsb;
2135 op[2] = len;
2138 /* Compute position (in OP[1]) and width (in OP[2])
2139 useful for copying IMM to a register using the depdi,z
2140 instructions. Store the immediate value to insert in OP[0]. */
2141 void
2142 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2144 HOST_WIDE_INT lsb, len;
2146 /* Find the least significant set bit in IMM. */
2147 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2149 if ((imm & 1) != 0)
2150 break;
2151 imm >>= 1;
2154 /* Choose variants based on *sign* of the 5-bit field. */
2155 if ((imm & 0x10) == 0)
2156 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2157 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2158 else
2160 /* Find the width of the bitstring in IMM. */
2161 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2163 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2164 break;
2167 /* Sign extend IMM as a 5-bit value. */
2168 imm = (imm & 0xf) - 0x10;
2171 op[0] = imm;
2172 op[1] = 63 - lsb;
2173 op[2] = len;
2176 /* Output assembler code to perform a doubleword move insn
2177 with operands OPERANDS. */
2179 const char *
2180 output_move_double (rtx *operands)
2182 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2183 rtx latehalf[2];
2184 rtx addreg0 = 0, addreg1 = 0;
2186 /* First classify both operands. */
2188 if (REG_P (operands[0]))
2189 optype0 = REGOP;
2190 else if (offsettable_memref_p (operands[0]))
2191 optype0 = OFFSOP;
2192 else if (GET_CODE (operands[0]) == MEM)
2193 optype0 = MEMOP;
2194 else
2195 optype0 = RNDOP;
2197 if (REG_P (operands[1]))
2198 optype1 = REGOP;
2199 else if (CONSTANT_P (operands[1]))
2200 optype1 = CNSTOP;
2201 else if (offsettable_memref_p (operands[1]))
2202 optype1 = OFFSOP;
2203 else if (GET_CODE (operands[1]) == MEM)
2204 optype1 = MEMOP;
2205 else
2206 optype1 = RNDOP;
2208 /* Check for the cases that the operand constraints are not
2209 supposed to allow to happen. */
2210 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2212 /* Handle copies between general and floating registers. */
2214 if (optype0 == REGOP && optype1 == REGOP
2215 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2217 if (FP_REG_P (operands[0]))
2219 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2220 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2221 return "{fldds|fldd} -16(%%sp),%0";
2223 else
2225 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2226 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2227 return "{ldws|ldw} -12(%%sp),%R0";
2231 /* Handle auto decrementing and incrementing loads and stores
2232 specifically, since the structure of the function doesn't work
2233 for them without major modification. Do it better when we learn
2234 this port about the general inc/dec addressing of PA.
2235 (This was written by tege. Chide him if it doesn't work.) */
2237 if (optype0 == MEMOP)
2239 /* We have to output the address syntax ourselves, since print_operand
2240 doesn't deal with the addresses we want to use. Fix this later. */
2242 rtx addr = XEXP (operands[0], 0);
2243 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2245 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2247 operands[0] = XEXP (addr, 0);
2248 gcc_assert (GET_CODE (operands[1]) == REG
2249 && GET_CODE (operands[0]) == REG);
2251 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2253 /* No overlap between high target register and address
2254 register. (We do this in a non-obvious way to
2255 save a register file writeback) */
2256 if (GET_CODE (addr) == POST_INC)
2257 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2258 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2260 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2262 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2264 operands[0] = XEXP (addr, 0);
2265 gcc_assert (GET_CODE (operands[1]) == REG
2266 && GET_CODE (operands[0]) == REG);
2268 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2269 /* No overlap between high target register and address
2270 register. (We do this in a non-obvious way to save a
2271 register file writeback) */
2272 if (GET_CODE (addr) == PRE_INC)
2273 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2274 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2277 if (optype1 == MEMOP)
2279 /* We have to output the address syntax ourselves, since print_operand
2280 doesn't deal with the addresses we want to use. Fix this later. */
2282 rtx addr = XEXP (operands[1], 0);
2283 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2285 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2287 operands[1] = XEXP (addr, 0);
2288 gcc_assert (GET_CODE (operands[0]) == REG
2289 && GET_CODE (operands[1]) == REG);
2291 if (!reg_overlap_mentioned_p (high_reg, addr))
2293 /* No overlap between high target register and address
2294 register. (We do this in a non-obvious way to
2295 save a register file writeback) */
2296 if (GET_CODE (addr) == POST_INC)
2297 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2298 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2300 else
2302 /* This is an undefined situation. We should load into the
2303 address register *and* update that register. Probably
2304 we don't need to handle this at all. */
2305 if (GET_CODE (addr) == POST_INC)
2306 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2307 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2310 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2312 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2314 operands[1] = XEXP (addr, 0);
2315 gcc_assert (GET_CODE (operands[0]) == REG
2316 && GET_CODE (operands[1]) == REG);
2318 if (!reg_overlap_mentioned_p (high_reg, addr))
2320 /* No overlap between high target register and address
2321 register. (We do this in a non-obvious way to
2322 save a register file writeback) */
2323 if (GET_CODE (addr) == PRE_INC)
2324 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2325 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2327 else
2329 /* This is an undefined situation. We should load into the
2330 address register *and* update that register. Probably
2331 we don't need to handle this at all. */
2332 if (GET_CODE (addr) == PRE_INC)
2333 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2334 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2337 else if (GET_CODE (addr) == PLUS
2338 && GET_CODE (XEXP (addr, 0)) == MULT)
2340 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2342 if (!reg_overlap_mentioned_p (high_reg, addr))
2344 rtx xoperands[3];
2346 xoperands[0] = high_reg;
2347 xoperands[1] = XEXP (addr, 1);
2348 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2349 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2350 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2351 xoperands);
2352 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2354 else
2356 rtx xoperands[3];
2358 xoperands[0] = high_reg;
2359 xoperands[1] = XEXP (addr, 1);
2360 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2361 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2362 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2363 xoperands);
2364 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2369 /* If an operand is an unoffsettable memory ref, find a register
2370 we can increment temporarily to make it refer to the second word. */
2372 if (optype0 == MEMOP)
2373 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2375 if (optype1 == MEMOP)
2376 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2378 /* Ok, we can do one word at a time.
2379 Normally we do the low-numbered word first.
2381 In either case, set up in LATEHALF the operands to use
2382 for the high-numbered word and in some cases alter the
2383 operands in OPERANDS to be suitable for the low-numbered word. */
2385 if (optype0 == REGOP)
2386 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2387 else if (optype0 == OFFSOP)
2388 latehalf[0] = adjust_address (operands[0], SImode, 4);
2389 else
2390 latehalf[0] = operands[0];
2392 if (optype1 == REGOP)
2393 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2394 else if (optype1 == OFFSOP)
2395 latehalf[1] = adjust_address (operands[1], SImode, 4);
2396 else if (optype1 == CNSTOP)
2397 split_double (operands[1], &operands[1], &latehalf[1]);
2398 else
2399 latehalf[1] = operands[1];
2401 /* If the first move would clobber the source of the second one,
2402 do them in the other order.
2404 This can happen in two cases:
2406 mem -> register where the first half of the destination register
2407 is the same register used in the memory's address. Reload
2408 can create such insns.
2410 mem in this case will be either register indirect or register
2411 indirect plus a valid offset.
2413 register -> register move where REGNO(dst) == REGNO(src + 1)
2414 someone (Tim/Tege?) claimed this can happen for parameter loads.
2416 Handle mem -> register case first. */
2417 if (optype0 == REGOP
2418 && (optype1 == MEMOP || optype1 == OFFSOP)
2419 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2420 operands[1], 0))
2422 /* Do the late half first. */
2423 if (addreg1)
2424 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2425 output_asm_insn (singlemove_string (latehalf), latehalf);
2427 /* Then clobber. */
2428 if (addreg1)
2429 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2430 return singlemove_string (operands);
2433 /* Now handle register -> register case. */
2434 if (optype0 == REGOP && optype1 == REGOP
2435 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2437 output_asm_insn (singlemove_string (latehalf), latehalf);
2438 return singlemove_string (operands);
2441 /* Normal case: do the two words, low-numbered first. */
2443 output_asm_insn (singlemove_string (operands), operands);
2445 /* Make any unoffsettable addresses point at high-numbered word. */
2446 if (addreg0)
2447 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2448 if (addreg1)
2449 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2451 /* Do that word. */
2452 output_asm_insn (singlemove_string (latehalf), latehalf);
2454 /* Undo the adds we just did. */
2455 if (addreg0)
2456 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2457 if (addreg1)
2458 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2460 return "";
2463 const char *
2464 output_fp_move_double (rtx *operands)
2466 if (FP_REG_P (operands[0]))
2468 if (FP_REG_P (operands[1])
2469 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2470 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2471 else
2472 output_asm_insn ("fldd%F1 %1,%0", operands);
2474 else if (FP_REG_P (operands[1]))
2476 output_asm_insn ("fstd%F0 %1,%0", operands);
2478 else
2480 rtx xoperands[2];
2482 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2484 /* This is a pain. You have to be prepared to deal with an
2485 arbitrary address here including pre/post increment/decrement.
2487 so avoid this in the MD. */
2488 gcc_assert (GET_CODE (operands[0]) == REG);
2490 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2491 xoperands[0] = operands[0];
2492 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2494 return "";
2497 /* Return a REG that occurs in ADDR with coefficient 1.
2498 ADDR can be effectively incremented by incrementing REG. */
2500 static rtx
2501 find_addr_reg (rtx addr)
2503 while (GET_CODE (addr) == PLUS)
2505 if (GET_CODE (XEXP (addr, 0)) == REG)
2506 addr = XEXP (addr, 0);
2507 else if (GET_CODE (XEXP (addr, 1)) == REG)
2508 addr = XEXP (addr, 1);
2509 else if (CONSTANT_P (XEXP (addr, 0)))
2510 addr = XEXP (addr, 1);
2511 else if (CONSTANT_P (XEXP (addr, 1)))
2512 addr = XEXP (addr, 0);
2513 else
2514 gcc_unreachable ();
2516 gcc_assert (GET_CODE (addr) == REG);
2517 return addr;
2520 /* Emit code to perform a block move.
2522 OPERANDS[0] is the destination pointer as a REG, clobbered.
2523 OPERANDS[1] is the source pointer as a REG, clobbered.
2524 OPERANDS[2] is a register for temporary storage.
2525 OPERANDS[3] is a register for temporary storage.
2526 OPERANDS[4] is the size as a CONST_INT
2527 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2528 OPERANDS[6] is another temporary register. */
2530 const char *
2531 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2533 int align = INTVAL (operands[5]);
2534 unsigned long n_bytes = INTVAL (operands[4]);
2536 /* We can't move more than a word at a time because the PA
2537 has no longer integer move insns. (Could use fp mem ops?) */
2538 if (align > (TARGET_64BIT ? 8 : 4))
2539 align = (TARGET_64BIT ? 8 : 4);
2541 /* Note that we know each loop below will execute at least twice
2542 (else we would have open-coded the copy). */
2543 switch (align)
2545 case 8:
2546 /* Pre-adjust the loop counter. */
2547 operands[4] = GEN_INT (n_bytes - 16);
2548 output_asm_insn ("ldi %4,%2", operands);
2550 /* Copying loop. */
2551 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2552 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2553 output_asm_insn ("std,ma %3,8(%0)", operands);
2554 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2555 output_asm_insn ("std,ma %6,8(%0)", operands);
2557 /* Handle the residual. There could be up to 7 bytes of
2558 residual to copy! */
2559 if (n_bytes % 16 != 0)
2561 operands[4] = GEN_INT (n_bytes % 8);
2562 if (n_bytes % 16 >= 8)
2563 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2564 if (n_bytes % 8 != 0)
2565 output_asm_insn ("ldd 0(%1),%6", operands);
2566 if (n_bytes % 16 >= 8)
2567 output_asm_insn ("std,ma %3,8(%0)", operands);
2568 if (n_bytes % 8 != 0)
2569 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2571 return "";
2573 case 4:
2574 /* Pre-adjust the loop counter. */
2575 operands[4] = GEN_INT (n_bytes - 8);
2576 output_asm_insn ("ldi %4,%2", operands);
2578 /* Copying loop. */
2579 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2580 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2581 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2582 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2583 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2585 /* Handle the residual. There could be up to 7 bytes of
2586 residual to copy! */
2587 if (n_bytes % 8 != 0)
2589 operands[4] = GEN_INT (n_bytes % 4);
2590 if (n_bytes % 8 >= 4)
2591 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2592 if (n_bytes % 4 != 0)
2593 output_asm_insn ("ldw 0(%1),%6", operands);
2594 if (n_bytes % 8 >= 4)
2595 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2596 if (n_bytes % 4 != 0)
2597 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2599 return "";
2601 case 2:
2602 /* Pre-adjust the loop counter. */
2603 operands[4] = GEN_INT (n_bytes - 4);
2604 output_asm_insn ("ldi %4,%2", operands);
2606 /* Copying loop. */
2607 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2608 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2609 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2610 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2611 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2613 /* Handle the residual. */
2614 if (n_bytes % 4 != 0)
2616 if (n_bytes % 4 >= 2)
2617 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2618 if (n_bytes % 2 != 0)
2619 output_asm_insn ("ldb 0(%1),%6", operands);
2620 if (n_bytes % 4 >= 2)
2621 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2622 if (n_bytes % 2 != 0)
2623 output_asm_insn ("stb %6,0(%0)", operands);
2625 return "";
2627 case 1:
2628 /* Pre-adjust the loop counter. */
2629 operands[4] = GEN_INT (n_bytes - 2);
2630 output_asm_insn ("ldi %4,%2", operands);
2632 /* Copying loop. */
2633 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2634 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2635 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2636 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2637 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2639 /* Handle the residual. */
2640 if (n_bytes % 2 != 0)
2642 output_asm_insn ("ldb 0(%1),%3", operands);
2643 output_asm_insn ("stb %3,0(%0)", operands);
2645 return "";
2647 default:
2648 gcc_unreachable ();
2652 /* Count the number of insns necessary to handle this block move.
2654 Basic structure is the same as emit_block_move, except that we
2655 count insns rather than emit them. */
2657 static int
2658 compute_movmem_length (rtx insn)
2660 rtx pat = PATTERN (insn);
2661 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2662 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2663 unsigned int n_insns = 0;
2665 /* We can't move more than four bytes at a time because the PA
2666 has no longer integer move insns. (Could use fp mem ops?) */
2667 if (align > (TARGET_64BIT ? 8 : 4))
2668 align = (TARGET_64BIT ? 8 : 4);
2670 /* The basic copying loop. */
2671 n_insns = 6;
2673 /* Residuals. */
2674 if (n_bytes % (2 * align) != 0)
2676 if ((n_bytes % (2 * align)) >= align)
2677 n_insns += 2;
2679 if ((n_bytes % align) != 0)
2680 n_insns += 2;
2683 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2684 return n_insns * 4;
2687 /* Emit code to perform a block clear.
2689 OPERANDS[0] is the destination pointer as a REG, clobbered.
2690 OPERANDS[1] is a register for temporary storage.
2691 OPERANDS[2] is the size as a CONST_INT
2692 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2694 const char *
2695 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2697 int align = INTVAL (operands[3]);
2698 unsigned long n_bytes = INTVAL (operands[2]);
2700 /* We can't clear more than a word at a time because the PA
2701 has no longer integer move insns. */
2702 if (align > (TARGET_64BIT ? 8 : 4))
2703 align = (TARGET_64BIT ? 8 : 4);
2705 /* Note that we know each loop below will execute at least twice
2706 (else we would have open-coded the copy). */
2707 switch (align)
2709 case 8:
2710 /* Pre-adjust the loop counter. */
2711 operands[2] = GEN_INT (n_bytes - 16);
2712 output_asm_insn ("ldi %2,%1", operands);
2714 /* Loop. */
2715 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2716 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2717 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2719 /* Handle the residual. There could be up to 7 bytes of
2720 residual to copy! */
2721 if (n_bytes % 16 != 0)
2723 operands[2] = GEN_INT (n_bytes % 8);
2724 if (n_bytes % 16 >= 8)
2725 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2726 if (n_bytes % 8 != 0)
2727 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2729 return "";
2731 case 4:
2732 /* Pre-adjust the loop counter. */
2733 operands[2] = GEN_INT (n_bytes - 8);
2734 output_asm_insn ("ldi %2,%1", operands);
2736 /* Loop. */
2737 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2738 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2739 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2741 /* Handle the residual. There could be up to 7 bytes of
2742 residual to copy! */
2743 if (n_bytes % 8 != 0)
2745 operands[2] = GEN_INT (n_bytes % 4);
2746 if (n_bytes % 8 >= 4)
2747 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2748 if (n_bytes % 4 != 0)
2749 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2751 return "";
2753 case 2:
2754 /* Pre-adjust the loop counter. */
2755 operands[2] = GEN_INT (n_bytes - 4);
2756 output_asm_insn ("ldi %2,%1", operands);
2758 /* Loop. */
2759 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2760 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2761 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2763 /* Handle the residual. */
2764 if (n_bytes % 4 != 0)
2766 if (n_bytes % 4 >= 2)
2767 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2768 if (n_bytes % 2 != 0)
2769 output_asm_insn ("stb %%r0,0(%0)", operands);
2771 return "";
2773 case 1:
2774 /* Pre-adjust the loop counter. */
2775 operands[2] = GEN_INT (n_bytes - 2);
2776 output_asm_insn ("ldi %2,%1", operands);
2778 /* Loop. */
2779 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2780 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2781 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2783 /* Handle the residual. */
2784 if (n_bytes % 2 != 0)
2785 output_asm_insn ("stb %%r0,0(%0)", operands);
2787 return "";
2789 default:
2790 gcc_unreachable ();
2794 /* Count the number of insns necessary to handle this block move.
2796 Basic structure is the same as emit_block_move, except that we
2797 count insns rather than emit them. */
2799 static int
2800 compute_clrmem_length (rtx insn)
2802 rtx pat = PATTERN (insn);
2803 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2804 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2805 unsigned int n_insns = 0;
2807 /* We can't clear more than a word at a time because the PA
2808 has no longer integer move insns. */
2809 if (align > (TARGET_64BIT ? 8 : 4))
2810 align = (TARGET_64BIT ? 8 : 4);
2812 /* The basic loop. */
2813 n_insns = 4;
2815 /* Residuals. */
2816 if (n_bytes % (2 * align) != 0)
2818 if ((n_bytes % (2 * align)) >= align)
2819 n_insns++;
2821 if ((n_bytes % align) != 0)
2822 n_insns++;
2825 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2826 return n_insns * 4;
2830 const char *
2831 output_and (rtx *operands)
2833 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2835 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2836 int ls0, ls1, ms0, p, len;
2838 for (ls0 = 0; ls0 < 32; ls0++)
2839 if ((mask & (1 << ls0)) == 0)
2840 break;
2842 for (ls1 = ls0; ls1 < 32; ls1++)
2843 if ((mask & (1 << ls1)) != 0)
2844 break;
2846 for (ms0 = ls1; ms0 < 32; ms0++)
2847 if ((mask & (1 << ms0)) == 0)
2848 break;
2850 gcc_assert (ms0 == 32);
2852 if (ls1 == 32)
2854 len = ls0;
2856 gcc_assert (len);
2858 operands[2] = GEN_INT (len);
2859 return "{extru|extrw,u} %1,31,%2,%0";
2861 else
2863 /* We could use this `depi' for the case above as well, but `depi'
2864 requires one more register file access than an `extru'. */
2866 p = 31 - ls0;
2867 len = ls1 - ls0;
2869 operands[2] = GEN_INT (p);
2870 operands[3] = GEN_INT (len);
2871 return "{depi|depwi} 0,%2,%3,%0";
2874 else
2875 return "and %1,%2,%0";
2878 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2879 storing the result in operands[0]. */
2880 const char *
2881 output_64bit_and (rtx *operands)
2883 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2885 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2886 int ls0, ls1, ms0, p, len;
2888 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2889 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2890 break;
2892 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2893 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2894 break;
2896 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2897 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2898 break;
2900 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2902 if (ls1 == HOST_BITS_PER_WIDE_INT)
2904 len = ls0;
2906 gcc_assert (len);
2908 operands[2] = GEN_INT (len);
2909 return "extrd,u %1,63,%2,%0";
2911 else
2913 /* We could use this `depi' for the case above as well, but `depi'
2914 requires one more register file access than an `extru'. */
2916 p = 63 - ls0;
2917 len = ls1 - ls0;
2919 operands[2] = GEN_INT (p);
2920 operands[3] = GEN_INT (len);
2921 return "depdi 0,%2,%3,%0";
2924 else
2925 return "and %1,%2,%0";
2928 const char *
2929 output_ior (rtx *operands)
2931 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2932 int bs0, bs1, p, len;
2934 if (INTVAL (operands[2]) == 0)
2935 return "copy %1,%0";
2937 for (bs0 = 0; bs0 < 32; bs0++)
2938 if ((mask & (1 << bs0)) != 0)
2939 break;
2941 for (bs1 = bs0; bs1 < 32; bs1++)
2942 if ((mask & (1 << bs1)) == 0)
2943 break;
2945 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2947 p = 31 - bs0;
2948 len = bs1 - bs0;
2950 operands[2] = GEN_INT (p);
2951 operands[3] = GEN_INT (len);
2952 return "{depi|depwi} -1,%2,%3,%0";
2955 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2956 storing the result in operands[0]. */
2957 const char *
2958 output_64bit_ior (rtx *operands)
2960 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2961 int bs0, bs1, p, len;
2963 if (INTVAL (operands[2]) == 0)
2964 return "copy %1,%0";
2966 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2967 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2968 break;
2970 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2971 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2972 break;
2974 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2975 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2977 p = 63 - bs0;
2978 len = bs1 - bs0;
2980 operands[2] = GEN_INT (p);
2981 operands[3] = GEN_INT (len);
2982 return "depdi -1,%2,%3,%0";
2985 /* Target hook for assembling integer objects. This code handles
2986 aligned SI and DI integers specially since function references
2987 must be preceded by P%. */
2989 static bool
2990 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2992 if (size == UNITS_PER_WORD
2993 && aligned_p
2994 && function_label_operand (x, VOIDmode))
2996 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2997 output_addr_const (asm_out_file, x);
2998 fputc ('\n', asm_out_file);
2999 return true;
3001 return default_assemble_integer (x, size, aligned_p);
3004 /* Output an ascii string. */
3005 void
3006 output_ascii (FILE *file, const char *p, int size)
3008 int i;
3009 int chars_output;
3010 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3012 /* The HP assembler can only take strings of 256 characters at one
3013 time. This is a limitation on input line length, *not* the
3014 length of the string. Sigh. Even worse, it seems that the
3015 restriction is in number of input characters (see \xnn &
3016 \whatever). So we have to do this very carefully. */
3018 fputs ("\t.STRING \"", file);
3020 chars_output = 0;
3021 for (i = 0; i < size; i += 4)
3023 int co = 0;
3024 int io = 0;
3025 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3027 register unsigned int c = (unsigned char) p[i + io];
3029 if (c == '\"' || c == '\\')
3030 partial_output[co++] = '\\';
3031 if (c >= ' ' && c < 0177)
3032 partial_output[co++] = c;
3033 else
3035 unsigned int hexd;
3036 partial_output[co++] = '\\';
3037 partial_output[co++] = 'x';
3038 hexd = c / 16 - 0 + '0';
3039 if (hexd > '9')
3040 hexd -= '9' - 'a' + 1;
3041 partial_output[co++] = hexd;
3042 hexd = c % 16 - 0 + '0';
3043 if (hexd > '9')
3044 hexd -= '9' - 'a' + 1;
3045 partial_output[co++] = hexd;
3048 if (chars_output + co > 243)
3050 fputs ("\"\n\t.STRING \"", file);
3051 chars_output = 0;
3053 fwrite (partial_output, 1, (size_t) co, file);
3054 chars_output += co;
3055 co = 0;
3057 fputs ("\"\n", file);
3060 /* Try to rewrite floating point comparisons & branches to avoid
3061 useless add,tr insns.
3063 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3064 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3065 first attempt to remove useless add,tr insns. It is zero
3066 for the second pass as reorg sometimes leaves bogus REG_DEAD
3067 notes lying around.
3069 When CHECK_NOTES is zero we can only eliminate add,tr insns
3070 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3071 instructions. */
3072 static void
3073 remove_useless_addtr_insns (int check_notes)
3075 rtx insn;
3076 static int pass = 0;
3078 /* This is fairly cheap, so always run it when optimizing. */
3079 if (optimize > 0)
3081 int fcmp_count = 0;
3082 int fbranch_count = 0;
3084 /* Walk all the insns in this function looking for fcmp & fbranch
3085 instructions. Keep track of how many of each we find. */
3086 for (insn = get_insns (); insn; insn = next_insn (insn))
3088 rtx tmp;
3090 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3091 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3092 continue;
3094 tmp = PATTERN (insn);
3096 /* It must be a set. */
3097 if (GET_CODE (tmp) != SET)
3098 continue;
3100 /* If the destination is CCFP, then we've found an fcmp insn. */
3101 tmp = SET_DEST (tmp);
3102 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3104 fcmp_count++;
3105 continue;
3108 tmp = PATTERN (insn);
3109 /* If this is an fbranch instruction, bump the fbranch counter. */
3110 if (GET_CODE (tmp) == SET
3111 && SET_DEST (tmp) == pc_rtx
3112 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3113 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3114 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3115 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3117 fbranch_count++;
3118 continue;
3123 /* Find all floating point compare + branch insns. If possible,
3124 reverse the comparison & the branch to avoid add,tr insns. */
3125 for (insn = get_insns (); insn; insn = next_insn (insn))
3127 rtx tmp, next;
3129 /* Ignore anything that isn't an INSN. */
3130 if (GET_CODE (insn) != INSN)
3131 continue;
3133 tmp = PATTERN (insn);
3135 /* It must be a set. */
3136 if (GET_CODE (tmp) != SET)
3137 continue;
3139 /* The destination must be CCFP, which is register zero. */
3140 tmp = SET_DEST (tmp);
3141 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3142 continue;
3144 /* INSN should be a set of CCFP.
3146 See if the result of this insn is used in a reversed FP
3147 conditional branch. If so, reverse our condition and
3148 the branch. Doing so avoids useless add,tr insns. */
3149 next = next_insn (insn);
3150 while (next)
3152 /* Jumps, calls and labels stop our search. */
3153 if (GET_CODE (next) == JUMP_INSN
3154 || GET_CODE (next) == CALL_INSN
3155 || GET_CODE (next) == CODE_LABEL)
3156 break;
3158 /* As does another fcmp insn. */
3159 if (GET_CODE (next) == INSN
3160 && GET_CODE (PATTERN (next)) == SET
3161 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3162 && REGNO (SET_DEST (PATTERN (next))) == 0)
3163 break;
3165 next = next_insn (next);
3168 /* Is NEXT_INSN a branch? */
3169 if (next
3170 && GET_CODE (next) == JUMP_INSN)
3172 rtx pattern = PATTERN (next);
3174 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3175 and CCFP dies, then reverse our conditional and the branch
3176 to avoid the add,tr. */
3177 if (GET_CODE (pattern) == SET
3178 && SET_DEST (pattern) == pc_rtx
3179 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3180 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3181 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3182 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3183 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3184 && (fcmp_count == fbranch_count
3185 || (check_notes
3186 && find_regno_note (next, REG_DEAD, 0))))
3188 /* Reverse the branch. */
3189 tmp = XEXP (SET_SRC (pattern), 1);
3190 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3191 XEXP (SET_SRC (pattern), 2) = tmp;
3192 INSN_CODE (next) = -1;
3194 /* Reverse our condition. */
3195 tmp = PATTERN (insn);
3196 PUT_CODE (XEXP (tmp, 1),
3197 (reverse_condition_maybe_unordered
3198 (GET_CODE (XEXP (tmp, 1)))));
3204 pass = !pass;
3208 /* You may have trouble believing this, but this is the 32 bit HP-PA
3209 stack layout. Wow.
3211 Offset Contents
3213 Variable arguments (optional; any number may be allocated)
3215 SP-(4*(N+9)) arg word N
3217 SP-56 arg word 5
3218 SP-52 arg word 4
3220 Fixed arguments (must be allocated; may remain unused)
3222 SP-48 arg word 3
3223 SP-44 arg word 2
3224 SP-40 arg word 1
3225 SP-36 arg word 0
3227 Frame Marker
3229 SP-32 External Data Pointer (DP)
3230 SP-28 External sr4
3231 SP-24 External/stub RP (RP')
3232 SP-20 Current RP
3233 SP-16 Static Link
3234 SP-12 Clean up
3235 SP-8 Calling Stub RP (RP'')
3236 SP-4 Previous SP
3238 Top of Frame
3240 SP-0 Stack Pointer (points to next available address)
3244 /* This function saves registers as follows. Registers marked with ' are
3245 this function's registers (as opposed to the previous function's).
3246 If a frame_pointer isn't needed, r4 is saved as a general register;
3247 the space for the frame pointer is still allocated, though, to keep
3248 things simple.
3251 Top of Frame
3253 SP (FP') Previous FP
3254 SP + 4 Alignment filler (sigh)
3255 SP + 8 Space for locals reserved here.
3259 SP + n All call saved register used.
3263 SP + o All call saved fp registers used.
3267 SP + p (SP') points to next available address.
3271 /* Global variables set by output_function_prologue(). */
3272 /* Size of frame. Need to know this to emit return insns from
3273 leaf procedures. */
3274 static HOST_WIDE_INT actual_fsize, local_fsize;
3275 static int save_fregs;
3277 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3278 Handle case where DISP > 8k by using the add_high_const patterns.
3280 Note in DISP > 8k case, we will leave the high part of the address
3281 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3283 static void
3284 store_reg (int reg, HOST_WIDE_INT disp, int base)
3286 rtx insn, dest, src, basereg;
3288 src = gen_rtx_REG (word_mode, reg);
3289 basereg = gen_rtx_REG (Pmode, base);
3290 if (VAL_14_BITS_P (disp))
3292 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3293 insn = emit_move_insn (dest, src);
3295 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3297 rtx delta = GEN_INT (disp);
3298 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3300 emit_move_insn (tmpreg, delta);
3301 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3302 if (DO_FRAME_NOTES)
3304 REG_NOTES (insn)
3305 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3306 gen_rtx_SET (VOIDmode, tmpreg,
3307 gen_rtx_PLUS (Pmode, basereg, delta)),
3308 REG_NOTES (insn));
3309 RTX_FRAME_RELATED_P (insn) = 1;
3311 dest = gen_rtx_MEM (word_mode, tmpreg);
3312 insn = emit_move_insn (dest, src);
3314 else
3316 rtx delta = GEN_INT (disp);
3317 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3318 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3320 emit_move_insn (tmpreg, high);
3321 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3322 insn = emit_move_insn (dest, src);
3323 if (DO_FRAME_NOTES)
3325 REG_NOTES (insn)
3326 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3327 gen_rtx_SET (VOIDmode,
3328 gen_rtx_MEM (word_mode,
3329 gen_rtx_PLUS (word_mode, basereg,
3330 delta)),
3331 src),
3332 REG_NOTES (insn));
3336 if (DO_FRAME_NOTES)
3337 RTX_FRAME_RELATED_P (insn) = 1;
3340 /* Emit RTL to store REG at the memory location specified by BASE and then
3341 add MOD to BASE. MOD must be <= 8k. */
3343 static void
3344 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3346 rtx insn, basereg, srcreg, delta;
3348 gcc_assert (VAL_14_BITS_P (mod));
3350 basereg = gen_rtx_REG (Pmode, base);
3351 srcreg = gen_rtx_REG (word_mode, reg);
3352 delta = GEN_INT (mod);
3354 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3355 if (DO_FRAME_NOTES)
3357 RTX_FRAME_RELATED_P (insn) = 1;
3359 /* RTX_FRAME_RELATED_P must be set on each frame related set
3360 in a parallel with more than one element. */
3361 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3362 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3366 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3367 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3368 whether to add a frame note or not.
3370 In the DISP > 8k case, we leave the high part of the address in %r1.
3371 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3373 static void
3374 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3376 rtx insn;
3378 if (VAL_14_BITS_P (disp))
3380 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3381 plus_constant (gen_rtx_REG (Pmode, base), disp));
3383 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3385 rtx basereg = gen_rtx_REG (Pmode, base);
3386 rtx delta = GEN_INT (disp);
3387 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3389 emit_move_insn (tmpreg, delta);
3390 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3391 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3392 if (DO_FRAME_NOTES)
3393 REG_NOTES (insn)
3394 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3395 gen_rtx_SET (VOIDmode, tmpreg,
3396 gen_rtx_PLUS (Pmode, basereg, delta)),
3397 REG_NOTES (insn));
3399 else
3401 rtx basereg = gen_rtx_REG (Pmode, base);
3402 rtx delta = GEN_INT (disp);
3403 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3405 emit_move_insn (tmpreg,
3406 gen_rtx_PLUS (Pmode, basereg,
3407 gen_rtx_HIGH (Pmode, delta)));
3408 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3409 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3412 if (DO_FRAME_NOTES && note)
3413 RTX_FRAME_RELATED_P (insn) = 1;
3416 HOST_WIDE_INT
3417 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3419 int freg_saved = 0;
3420 int i, j;
3422 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3423 be consistent with the rounding and size calculation done here.
3424 Change them at the same time. */
3426 /* We do our own stack alignment. First, round the size of the
3427 stack locals up to a word boundary. */
3428 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3430 /* Space for previous frame pointer + filler. If any frame is
3431 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3432 waste some space here for the sake of HP compatibility. The
3433 first slot is only used when the frame pointer is needed. */
3434 if (size || frame_pointer_needed)
3435 size += STARTING_FRAME_OFFSET;
3437 /* If the current function calls __builtin_eh_return, then we need
3438 to allocate stack space for registers that will hold data for
3439 the exception handler. */
3440 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3442 unsigned int i;
3444 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3445 continue;
3446 size += i * UNITS_PER_WORD;
3449 /* Account for space used by the callee general register saves. */
3450 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3451 if (regs_ever_live[i])
3452 size += UNITS_PER_WORD;
3454 /* Account for space used by the callee floating point register saves. */
3455 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3456 if (regs_ever_live[i]
3457 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3459 freg_saved = 1;
3461 /* We always save both halves of the FP register, so always
3462 increment the frame size by 8 bytes. */
3463 size += 8;
3466 /* If any of the floating registers are saved, account for the
3467 alignment needed for the floating point register save block. */
3468 if (freg_saved)
3470 size = (size + 7) & ~7;
3471 if (fregs_live)
3472 *fregs_live = 1;
3475 /* The various ABIs include space for the outgoing parameters in the
3476 size of the current function's stack frame. We don't need to align
3477 for the outgoing arguments as their alignment is set by the final
3478 rounding for the frame as a whole. */
3479 size += current_function_outgoing_args_size;
3481 /* Allocate space for the fixed frame marker. This space must be
3482 allocated for any function that makes calls or allocates
3483 stack space. */
3484 if (!current_function_is_leaf || size)
3485 size += TARGET_64BIT ? 48 : 32;
3487 /* Finally, round to the preferred stack boundary. */
3488 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3489 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3492 /* Generate the assembly code for function entry. FILE is a stdio
3493 stream to output the code to. SIZE is an int: how many units of
3494 temporary storage to allocate.
3496 Refer to the array `regs_ever_live' to determine which registers to
3497 save; `regs_ever_live[I]' is nonzero if register number I is ever
3498 used in the function. This function is responsible for knowing
3499 which registers should not be saved even if used. */
3501 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3502 of memory. If any fpu reg is used in the function, we allocate
3503 such a block here, at the bottom of the frame, just in case it's needed.
3505 If this function is a leaf procedure, then we may choose not
3506 to do a "save" insn. The decision about whether or not
3507 to do this is made in regclass.c. */
3509 static void
3510 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3512 /* The function's label and associated .PROC must never be
3513 separated and must be output *after* any profiling declarations
3514 to avoid changing spaces/subspaces within a procedure. */
3515 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3516 fputs ("\t.PROC\n", file);
3518 /* hppa_expand_prologue does the dirty work now. We just need
3519 to output the assembler directives which denote the start
3520 of a function. */
3521 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3522 if (regs_ever_live[2])
3523 fputs (",CALLS,SAVE_RP", file);
3524 else
3525 fputs (",NO_CALLS", file);
3527 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3528 at the beginning of the frame and that it is used as the frame
3529 pointer for the frame. We do this because our current frame
3530 layout doesn't conform to that specified in the HP runtime
3531 documentation and we need a way to indicate to programs such as
3532 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3533 isn't used by HP compilers but is supported by the assembler.
3534 However, SAVE_SP is supposed to indicate that the previous stack
3535 pointer has been saved in the frame marker. */
3536 if (frame_pointer_needed)
3537 fputs (",SAVE_SP", file);
3539 /* Pass on information about the number of callee register saves
3540 performed in the prologue.
3542 The compiler is supposed to pass the highest register number
3543 saved, the assembler then has to adjust that number before
3544 entering it into the unwind descriptor (to account for any
3545 caller saved registers with lower register numbers than the
3546 first callee saved register). */
3547 if (gr_saved)
3548 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3550 if (fr_saved)
3551 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3553 fputs ("\n\t.ENTRY\n", file);
3555 remove_useless_addtr_insns (0);
3558 void
3559 hppa_expand_prologue (void)
3561 int merge_sp_adjust_with_store = 0;
3562 HOST_WIDE_INT size = get_frame_size ();
3563 HOST_WIDE_INT offset;
3564 int i;
3565 rtx insn, tmpreg;
3567 gr_saved = 0;
3568 fr_saved = 0;
3569 save_fregs = 0;
3571 /* Compute total size for frame pointer, filler, locals and rounding to
3572 the next word boundary. Similar code appears in compute_frame_size
3573 and must be changed in tandem with this code. */
3574 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3575 if (local_fsize || frame_pointer_needed)
3576 local_fsize += STARTING_FRAME_OFFSET;
3578 actual_fsize = compute_frame_size (size, &save_fregs);
3580 /* Compute a few things we will use often. */
3581 tmpreg = gen_rtx_REG (word_mode, 1);
3583 /* Save RP first. The calling conventions manual states RP will
3584 always be stored into the caller's frame at sp - 20 or sp - 16
3585 depending on which ABI is in use. */
3586 if (regs_ever_live[2] || current_function_calls_eh_return)
3587 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3589 /* Allocate the local frame and set up the frame pointer if needed. */
3590 if (actual_fsize != 0)
3592 if (frame_pointer_needed)
3594 /* Copy the old frame pointer temporarily into %r1. Set up the
3595 new stack pointer, then store away the saved old frame pointer
3596 into the stack at sp and at the same time update the stack
3597 pointer by actual_fsize bytes. Two versions, first
3598 handles small (<8k) frames. The second handles large (>=8k)
3599 frames. */
3600 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3601 if (DO_FRAME_NOTES)
3602 RTX_FRAME_RELATED_P (insn) = 1;
3604 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3605 if (DO_FRAME_NOTES)
3606 RTX_FRAME_RELATED_P (insn) = 1;
3608 if (VAL_14_BITS_P (actual_fsize))
3609 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3610 else
3612 /* It is incorrect to store the saved frame pointer at *sp,
3613 then increment sp (writes beyond the current stack boundary).
3615 So instead use stwm to store at *sp and post-increment the
3616 stack pointer as an atomic operation. Then increment sp to
3617 finish allocating the new frame. */
3618 HOST_WIDE_INT adjust1 = 8192 - 64;
3619 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3621 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3622 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3623 adjust2, 1);
3626 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3627 we need to store the previous stack pointer (frame pointer)
3628 into the frame marker on targets that use the HP unwind
3629 library. This allows the HP unwind library to be used to
3630 unwind GCC frames. However, we are not fully compatible
3631 with the HP library because our frame layout differs from
3632 that specified in the HP runtime specification.
3634 We don't want a frame note on this instruction as the frame
3635 marker moves during dynamic stack allocation.
3637 This instruction also serves as a blockage to prevent
3638 register spills from being scheduled before the stack
3639 pointer is raised. This is necessary as we store
3640 registers using the frame pointer as a base register,
3641 and the frame pointer is set before sp is raised. */
3642 if (TARGET_HPUX_UNWIND_LIBRARY)
3644 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3645 GEN_INT (TARGET_64BIT ? -8 : -4));
3647 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3648 frame_pointer_rtx);
3650 else
3651 emit_insn (gen_blockage ());
3653 /* no frame pointer needed. */
3654 else
3656 /* In some cases we can perform the first callee register save
3657 and allocating the stack frame at the same time. If so, just
3658 make a note of it and defer allocating the frame until saving
3659 the callee registers. */
3660 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3661 merge_sp_adjust_with_store = 1;
3662 /* Can not optimize. Adjust the stack frame by actual_fsize
3663 bytes. */
3664 else
3665 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3666 actual_fsize, 1);
3670 /* Normal register save.
3672 Do not save the frame pointer in the frame_pointer_needed case. It
3673 was done earlier. */
3674 if (frame_pointer_needed)
3676 offset = local_fsize;
3678 /* Saving the EH return data registers in the frame is the simplest
3679 way to get the frame unwind information emitted. We put them
3680 just before the general registers. */
3681 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3683 unsigned int i, regno;
3685 for (i = 0; ; ++i)
3687 regno = EH_RETURN_DATA_REGNO (i);
3688 if (regno == INVALID_REGNUM)
3689 break;
3691 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3692 offset += UNITS_PER_WORD;
3696 for (i = 18; i >= 4; i--)
3697 if (regs_ever_live[i] && ! call_used_regs[i])
3699 store_reg (i, offset, FRAME_POINTER_REGNUM);
3700 offset += UNITS_PER_WORD;
3701 gr_saved++;
3703 /* Account for %r3 which is saved in a special place. */
3704 gr_saved++;
3706 /* No frame pointer needed. */
3707 else
3709 offset = local_fsize - actual_fsize;
3711 /* Saving the EH return data registers in the frame is the simplest
3712 way to get the frame unwind information emitted. */
3713 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3715 unsigned int i, regno;
3717 for (i = 0; ; ++i)
3719 regno = EH_RETURN_DATA_REGNO (i);
3720 if (regno == INVALID_REGNUM)
3721 break;
3723 /* If merge_sp_adjust_with_store is nonzero, then we can
3724 optimize the first save. */
3725 if (merge_sp_adjust_with_store)
3727 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3728 merge_sp_adjust_with_store = 0;
3730 else
3731 store_reg (regno, offset, STACK_POINTER_REGNUM);
3732 offset += UNITS_PER_WORD;
3736 for (i = 18; i >= 3; i--)
3737 if (regs_ever_live[i] && ! call_used_regs[i])
3739 /* If merge_sp_adjust_with_store is nonzero, then we can
3740 optimize the first GR save. */
3741 if (merge_sp_adjust_with_store)
3743 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3744 merge_sp_adjust_with_store = 0;
3746 else
3747 store_reg (i, offset, STACK_POINTER_REGNUM);
3748 offset += UNITS_PER_WORD;
3749 gr_saved++;
3752 /* If we wanted to merge the SP adjustment with a GR save, but we never
3753 did any GR saves, then just emit the adjustment here. */
3754 if (merge_sp_adjust_with_store)
3755 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3756 actual_fsize, 1);
3759 /* The hppa calling conventions say that %r19, the pic offset
3760 register, is saved at sp - 32 (in this function's frame)
3761 when generating PIC code. FIXME: What is the correct thing
3762 to do for functions which make no calls and allocate no
3763 frame? Do we need to allocate a frame, or can we just omit
3764 the save? For now we'll just omit the save.
3766 We don't want a note on this insn as the frame marker can
3767 move if there is a dynamic stack allocation. */
3768 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3770 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3772 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3776 /* Align pointer properly (doubleword boundary). */
3777 offset = (offset + 7) & ~7;
3779 /* Floating point register store. */
3780 if (save_fregs)
3782 rtx base;
3784 /* First get the frame or stack pointer to the start of the FP register
3785 save area. */
3786 if (frame_pointer_needed)
3788 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3789 base = frame_pointer_rtx;
3791 else
3793 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3794 base = stack_pointer_rtx;
3797 /* Now actually save the FP registers. */
3798 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3800 if (regs_ever_live[i]
3801 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3803 rtx addr, insn, reg;
3804 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3805 reg = gen_rtx_REG (DFmode, i);
3806 insn = emit_move_insn (addr, reg);
3807 if (DO_FRAME_NOTES)
3809 RTX_FRAME_RELATED_P (insn) = 1;
3810 if (TARGET_64BIT)
3812 rtx mem = gen_rtx_MEM (DFmode,
3813 plus_constant (base, offset));
3814 REG_NOTES (insn)
3815 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3816 gen_rtx_SET (VOIDmode, mem, reg),
3817 REG_NOTES (insn));
3819 else
3821 rtx meml = gen_rtx_MEM (SFmode,
3822 plus_constant (base, offset));
3823 rtx memr = gen_rtx_MEM (SFmode,
3824 plus_constant (base, offset + 4));
3825 rtx regl = gen_rtx_REG (SFmode, i);
3826 rtx regr = gen_rtx_REG (SFmode, i + 1);
3827 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3828 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3829 rtvec vec;
3831 RTX_FRAME_RELATED_P (setl) = 1;
3832 RTX_FRAME_RELATED_P (setr) = 1;
3833 vec = gen_rtvec (2, setl, setr);
3834 REG_NOTES (insn)
3835 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3836 gen_rtx_SEQUENCE (VOIDmode, vec),
3837 REG_NOTES (insn));
3840 offset += GET_MODE_SIZE (DFmode);
3841 fr_saved++;
3847 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3848 Handle case where DISP > 8k by using the add_high_const patterns. */
3850 static void
3851 load_reg (int reg, HOST_WIDE_INT disp, int base)
3853 rtx dest = gen_rtx_REG (word_mode, reg);
3854 rtx basereg = gen_rtx_REG (Pmode, base);
3855 rtx src;
3857 if (VAL_14_BITS_P (disp))
3858 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3859 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3861 rtx delta = GEN_INT (disp);
3862 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3864 emit_move_insn (tmpreg, delta);
3865 if (TARGET_DISABLE_INDEXING)
3867 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3868 src = gen_rtx_MEM (word_mode, tmpreg);
3870 else
3871 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3873 else
3875 rtx delta = GEN_INT (disp);
3876 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3877 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3879 emit_move_insn (tmpreg, high);
3880 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3883 emit_move_insn (dest, src);
3886 /* Update the total code bytes output to the text section. */
3888 static void
3889 update_total_code_bytes (int nbytes)
3891 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3892 && !IN_NAMED_SECTION_P (cfun->decl))
3894 if (INSN_ADDRESSES_SET_P ())
3896 unsigned long old_total = total_code_bytes;
3898 total_code_bytes += nbytes;
3900 /* Be prepared to handle overflows. */
3901 if (old_total > total_code_bytes)
3902 total_code_bytes = -1;
3904 else
3905 total_code_bytes = -1;
3909 /* This function generates the assembly code for function exit.
3910 Args are as for output_function_prologue ().
3912 The function epilogue should not depend on the current stack
3913 pointer! It should use the frame pointer only. This is mandatory
3914 because of alloca; we also take advantage of it to omit stack
3915 adjustments before returning. */
3917 static void
3918 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3920 rtx insn = get_last_insn ();
3922 last_address = 0;
3924 /* hppa_expand_epilogue does the dirty work now. We just need
3925 to output the assembler directives which denote the end
3926 of a function.
3928 To make debuggers happy, emit a nop if the epilogue was completely
3929 eliminated due to a volatile call as the last insn in the
3930 current function. That way the return address (in %r2) will
3931 always point to a valid instruction in the current function. */
3933 /* Get the last real insn. */
3934 if (GET_CODE (insn) == NOTE)
3935 insn = prev_real_insn (insn);
3937 /* If it is a sequence, then look inside. */
3938 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3939 insn = XVECEXP (PATTERN (insn), 0, 0);
3941 /* If insn is a CALL_INSN, then it must be a call to a volatile
3942 function (otherwise there would be epilogue insns). */
3943 if (insn && GET_CODE (insn) == CALL_INSN)
3945 fputs ("\tnop\n", file);
3946 last_address += 4;
3949 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3951 if (TARGET_SOM && TARGET_GAS)
3953 /* We done with this subspace except possibly for some additional
3954 debug information. Forget that we are in this subspace to ensure
3955 that the next function is output in its own subspace. */
3956 in_section = NULL;
3957 cfun->machine->in_nsubspa = 2;
3960 if (INSN_ADDRESSES_SET_P ())
3962 insn = get_last_nonnote_insn ();
3963 last_address += INSN_ADDRESSES (INSN_UID (insn));
3964 if (INSN_P (insn))
3965 last_address += insn_default_length (insn);
3966 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3967 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3970 /* Finally, update the total number of code bytes output so far. */
3971 update_total_code_bytes (last_address);
3974 void
3975 hppa_expand_epilogue (void)
3977 rtx tmpreg;
3978 HOST_WIDE_INT offset;
3979 HOST_WIDE_INT ret_off = 0;
3980 int i;
3981 int merge_sp_adjust_with_load = 0;
3983 /* We will use this often. */
3984 tmpreg = gen_rtx_REG (word_mode, 1);
3986 /* Try to restore RP early to avoid load/use interlocks when
3987 RP gets used in the return (bv) instruction. This appears to still
3988 be necessary even when we schedule the prologue and epilogue. */
3989 if (regs_ever_live [2] || current_function_calls_eh_return)
3991 ret_off = TARGET_64BIT ? -16 : -20;
3992 if (frame_pointer_needed)
3994 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
3995 ret_off = 0;
3997 else
3999 /* No frame pointer, and stack is smaller than 8k. */
4000 if (VAL_14_BITS_P (ret_off - actual_fsize))
4002 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4003 ret_off = 0;
4008 /* General register restores. */
4009 if (frame_pointer_needed)
4011 offset = local_fsize;
4013 /* If the current function calls __builtin_eh_return, then we need
4014 to restore the saved EH data registers. */
4015 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4017 unsigned int i, regno;
4019 for (i = 0; ; ++i)
4021 regno = EH_RETURN_DATA_REGNO (i);
4022 if (regno == INVALID_REGNUM)
4023 break;
4025 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4026 offset += UNITS_PER_WORD;
4030 for (i = 18; i >= 4; i--)
4031 if (regs_ever_live[i] && ! call_used_regs[i])
4033 load_reg (i, offset, FRAME_POINTER_REGNUM);
4034 offset += UNITS_PER_WORD;
4037 else
4039 offset = local_fsize - actual_fsize;
4041 /* If the current function calls __builtin_eh_return, then we need
4042 to restore the saved EH data registers. */
4043 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4045 unsigned int i, regno;
4047 for (i = 0; ; ++i)
4049 regno = EH_RETURN_DATA_REGNO (i);
4050 if (regno == INVALID_REGNUM)
4051 break;
4053 /* Only for the first load.
4054 merge_sp_adjust_with_load holds the register load
4055 with which we will merge the sp adjustment. */
4056 if (merge_sp_adjust_with_load == 0
4057 && local_fsize == 0
4058 && VAL_14_BITS_P (-actual_fsize))
4059 merge_sp_adjust_with_load = regno;
4060 else
4061 load_reg (regno, offset, STACK_POINTER_REGNUM);
4062 offset += UNITS_PER_WORD;
4066 for (i = 18; i >= 3; i--)
4068 if (regs_ever_live[i] && ! call_used_regs[i])
4070 /* Only for the first load.
4071 merge_sp_adjust_with_load holds the register load
4072 with which we will merge the sp adjustment. */
4073 if (merge_sp_adjust_with_load == 0
4074 && local_fsize == 0
4075 && VAL_14_BITS_P (-actual_fsize))
4076 merge_sp_adjust_with_load = i;
4077 else
4078 load_reg (i, offset, STACK_POINTER_REGNUM);
4079 offset += UNITS_PER_WORD;
4084 /* Align pointer properly (doubleword boundary). */
4085 offset = (offset + 7) & ~7;
4087 /* FP register restores. */
4088 if (save_fregs)
4090 /* Adjust the register to index off of. */
4091 if (frame_pointer_needed)
4092 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4093 else
4094 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4096 /* Actually do the restores now. */
4097 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4098 if (regs_ever_live[i]
4099 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4101 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4102 rtx dest = gen_rtx_REG (DFmode, i);
4103 emit_move_insn (dest, src);
4107 /* Emit a blockage insn here to keep these insns from being moved to
4108 an earlier spot in the epilogue, or into the main instruction stream.
4110 This is necessary as we must not cut the stack back before all the
4111 restores are finished. */
4112 emit_insn (gen_blockage ());
4114 /* Reset stack pointer (and possibly frame pointer). The stack
4115 pointer is initially set to fp + 64 to avoid a race condition. */
4116 if (frame_pointer_needed)
4118 rtx delta = GEN_INT (-64);
4120 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4121 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4123 /* If we were deferring a callee register restore, do it now. */
4124 else if (merge_sp_adjust_with_load)
4126 rtx delta = GEN_INT (-actual_fsize);
4127 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4129 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4131 else if (actual_fsize != 0)
4132 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4133 - actual_fsize, 0);
4135 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4136 frame greater than 8k), do so now. */
4137 if (ret_off != 0)
4138 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4140 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4142 rtx sa = EH_RETURN_STACKADJ_RTX;
4144 emit_insn (gen_blockage ());
4145 emit_insn (TARGET_64BIT
4146 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4147 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4152 hppa_pic_save_rtx (void)
4154 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4157 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4158 #define NO_DEFERRED_PROFILE_COUNTERS 0
4159 #endif
4161 /* Define heap vector type for funcdef numbers. */
4162 DEF_VEC_I(int);
4163 DEF_VEC_ALLOC_I(int,heap);
4165 /* Vector of funcdef numbers. */
4166 static VEC(int,heap) *funcdef_nos;
4168 /* Output deferred profile counters. */
4169 static void
4170 output_deferred_profile_counters (void)
4172 unsigned int i;
4173 int align, n;
4175 if (VEC_empty (int, funcdef_nos))
4176 return;
4178 switch_to_section (data_section);
4179 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4180 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4182 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4184 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4185 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4188 VEC_free (int, heap, funcdef_nos);
4191 void
4192 hppa_profile_hook (int label_no)
4194 /* We use SImode for the address of the function in both 32 and
4195 64-bit code to avoid having to provide DImode versions of the
4196 lcla2 and load_offset_label_address insn patterns. */
4197 rtx reg = gen_reg_rtx (SImode);
4198 rtx label_rtx = gen_label_rtx ();
4199 rtx begin_label_rtx, call_insn;
4200 char begin_label_name[16];
4202 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4203 label_no);
4204 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4206 if (TARGET_64BIT)
4207 emit_move_insn (arg_pointer_rtx,
4208 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4209 GEN_INT (64)));
4211 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4213 /* The address of the function is loaded into %r25 with a instruction-
4214 relative sequence that avoids the use of relocations. The sequence
4215 is split so that the load_offset_label_address instruction can
4216 occupy the delay slot of the call to _mcount. */
4217 if (TARGET_PA_20)
4218 emit_insn (gen_lcla2 (reg, label_rtx));
4219 else
4220 emit_insn (gen_lcla1 (reg, label_rtx));
4222 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4223 reg, begin_label_rtx, label_rtx));
4225 #if !NO_DEFERRED_PROFILE_COUNTERS
4227 rtx count_label_rtx, addr, r24;
4228 char count_label_name[16];
4230 VEC_safe_push (int, heap, funcdef_nos, label_no);
4231 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4232 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4234 addr = force_reg (Pmode, count_label_rtx);
4235 r24 = gen_rtx_REG (Pmode, 24);
4236 emit_move_insn (r24, addr);
4238 call_insn =
4239 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4240 gen_rtx_SYMBOL_REF (Pmode,
4241 "_mcount")),
4242 GEN_INT (TARGET_64BIT ? 24 : 12)));
4244 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4246 #else
4248 call_insn =
4249 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4250 gen_rtx_SYMBOL_REF (Pmode,
4251 "_mcount")),
4252 GEN_INT (TARGET_64BIT ? 16 : 8)));
4254 #endif
4256 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4257 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4259 /* Indicate the _mcount call cannot throw, nor will it execute a
4260 non-local goto. */
4261 REG_NOTES (call_insn)
4262 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4265 /* Fetch the return address for the frame COUNT steps up from
4266 the current frame, after the prologue. FRAMEADDR is the
4267 frame pointer of the COUNT frame.
4269 We want to ignore any export stub remnants here. To handle this,
4270 we examine the code at the return address, and if it is an export
4271 stub, we return a memory rtx for the stub return address stored
4272 at frame-24.
4274 The value returned is used in two different ways:
4276 1. To find a function's caller.
4278 2. To change the return address for a function.
4280 This function handles most instances of case 1; however, it will
4281 fail if there are two levels of stubs to execute on the return
4282 path. The only way I believe that can happen is if the return value
4283 needs a parameter relocation, which never happens for C code.
4285 This function handles most instances of case 2; however, it will
4286 fail if we did not originally have stub code on the return path
4287 but will need stub code on the new return path. This can happen if
4288 the caller & callee are both in the main program, but the new
4289 return location is in a shared library. */
4292 return_addr_rtx (int count, rtx frameaddr)
4294 rtx label;
4295 rtx rp;
4296 rtx saved_rp;
4297 rtx ins;
4299 if (count != 0)
4300 return NULL_RTX;
4302 rp = get_hard_reg_initial_val (Pmode, 2);
4304 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4305 return rp;
4307 saved_rp = gen_reg_rtx (Pmode);
4308 emit_move_insn (saved_rp, rp);
4310 /* Get pointer to the instruction stream. We have to mask out the
4311 privilege level from the two low order bits of the return address
4312 pointer here so that ins will point to the start of the first
4313 instruction that would have been executed if we returned. */
4314 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4315 label = gen_label_rtx ();
4317 /* Check the instruction stream at the normal return address for the
4318 export stub:
4320 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4321 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4322 0x00011820 | stub+16: mtsp r1,sr0
4323 0xe0400002 | stub+20: be,n 0(sr0,rp)
4325 If it is an export stub, than our return address is really in
4326 -24[frameaddr]. */
4328 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4329 NULL_RTX, SImode, 1);
4330 emit_jump_insn (gen_bne (label));
4332 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4333 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4334 emit_jump_insn (gen_bne (label));
4336 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4337 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4338 emit_jump_insn (gen_bne (label));
4340 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4341 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4343 /* If there is no export stub then just use the value saved from
4344 the return pointer register. */
4346 emit_jump_insn (gen_bne (label));
4348 /* Here we know that our return address points to an export
4349 stub. We don't want to return the address of the export stub,
4350 but rather the return address of the export stub. That return
4351 address is stored at -24[frameaddr]. */
4353 emit_move_insn (saved_rp,
4354 gen_rtx_MEM (Pmode,
4355 memory_address (Pmode,
4356 plus_constant (frameaddr,
4357 -24))));
4359 emit_label (label);
4360 return saved_rp;
4363 /* This is only valid once reload has completed because it depends on
4364 knowing exactly how much (if any) frame there is and...
4366 It's only valid if there is no frame marker to de-allocate and...
4368 It's only valid if %r2 hasn't been saved into the caller's frame
4369 (we're not profiling and %r2 isn't live anywhere). */
4371 hppa_can_use_return_insn_p (void)
4373 return (reload_completed
4374 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4375 && ! regs_ever_live[2]
4376 && ! frame_pointer_needed);
4379 void
4380 emit_bcond_fp (enum rtx_code code, rtx operand0)
4382 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4383 gen_rtx_IF_THEN_ELSE (VOIDmode,
4384 gen_rtx_fmt_ee (code,
4385 VOIDmode,
4386 gen_rtx_REG (CCFPmode, 0),
4387 const0_rtx),
4388 gen_rtx_LABEL_REF (VOIDmode, operand0),
4389 pc_rtx)));
4394 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4396 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4397 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4400 /* Adjust the cost of a scheduling dependency. Return the new cost of
4401 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4403 static int
4404 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4406 enum attr_type attr_type;
4408 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4409 true dependencies as they are described with bypasses now. */
4410 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4411 return cost;
4413 if (! recog_memoized (insn))
4414 return 0;
4416 attr_type = get_attr_type (insn);
4418 switch (REG_NOTE_KIND (link))
4420 case REG_DEP_ANTI:
4421 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4422 cycles later. */
4424 if (attr_type == TYPE_FPLOAD)
4426 rtx pat = PATTERN (insn);
4427 rtx dep_pat = PATTERN (dep_insn);
4428 if (GET_CODE (pat) == PARALLEL)
4430 /* This happens for the fldXs,mb patterns. */
4431 pat = XVECEXP (pat, 0, 0);
4433 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4434 /* If this happens, we have to extend this to schedule
4435 optimally. Return 0 for now. */
4436 return 0;
4438 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4440 if (! recog_memoized (dep_insn))
4441 return 0;
4442 switch (get_attr_type (dep_insn))
4444 case TYPE_FPALU:
4445 case TYPE_FPMULSGL:
4446 case TYPE_FPMULDBL:
4447 case TYPE_FPDIVSGL:
4448 case TYPE_FPDIVDBL:
4449 case TYPE_FPSQRTSGL:
4450 case TYPE_FPSQRTDBL:
4451 /* A fpload can't be issued until one cycle before a
4452 preceding arithmetic operation has finished if
4453 the target of the fpload is any of the sources
4454 (or destination) of the arithmetic operation. */
4455 return insn_default_latency (dep_insn) - 1;
4457 default:
4458 return 0;
4462 else if (attr_type == TYPE_FPALU)
4464 rtx pat = PATTERN (insn);
4465 rtx dep_pat = PATTERN (dep_insn);
4466 if (GET_CODE (pat) == PARALLEL)
4468 /* This happens for the fldXs,mb patterns. */
4469 pat = XVECEXP (pat, 0, 0);
4471 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4472 /* If this happens, we have to extend this to schedule
4473 optimally. Return 0 for now. */
4474 return 0;
4476 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4478 if (! recog_memoized (dep_insn))
4479 return 0;
4480 switch (get_attr_type (dep_insn))
4482 case TYPE_FPDIVSGL:
4483 case TYPE_FPDIVDBL:
4484 case TYPE_FPSQRTSGL:
4485 case TYPE_FPSQRTDBL:
4486 /* An ALU flop can't be issued until two cycles before a
4487 preceding divide or sqrt operation has finished if
4488 the target of the ALU flop is any of the sources
4489 (or destination) of the divide or sqrt operation. */
4490 return insn_default_latency (dep_insn) - 2;
4492 default:
4493 return 0;
4498 /* For other anti dependencies, the cost is 0. */
4499 return 0;
4501 case REG_DEP_OUTPUT:
4502 /* Output dependency; DEP_INSN writes a register that INSN writes some
4503 cycles later. */
4504 if (attr_type == TYPE_FPLOAD)
4506 rtx pat = PATTERN (insn);
4507 rtx dep_pat = PATTERN (dep_insn);
4508 if (GET_CODE (pat) == PARALLEL)
4510 /* This happens for the fldXs,mb patterns. */
4511 pat = XVECEXP (pat, 0, 0);
4513 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4514 /* If this happens, we have to extend this to schedule
4515 optimally. Return 0 for now. */
4516 return 0;
4518 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4520 if (! recog_memoized (dep_insn))
4521 return 0;
4522 switch (get_attr_type (dep_insn))
4524 case TYPE_FPALU:
4525 case TYPE_FPMULSGL:
4526 case TYPE_FPMULDBL:
4527 case TYPE_FPDIVSGL:
4528 case TYPE_FPDIVDBL:
4529 case TYPE_FPSQRTSGL:
4530 case TYPE_FPSQRTDBL:
4531 /* A fpload can't be issued until one cycle before a
4532 preceding arithmetic operation has finished if
4533 the target of the fpload is the destination of the
4534 arithmetic operation.
4536 Exception: For PA7100LC, PA7200 and PA7300, the cost
4537 is 3 cycles, unless they bundle together. We also
4538 pay the penalty if the second insn is a fpload. */
4539 return insn_default_latency (dep_insn) - 1;
4541 default:
4542 return 0;
4546 else if (attr_type == TYPE_FPALU)
4548 rtx pat = PATTERN (insn);
4549 rtx dep_pat = PATTERN (dep_insn);
4550 if (GET_CODE (pat) == PARALLEL)
4552 /* This happens for the fldXs,mb patterns. */
4553 pat = XVECEXP (pat, 0, 0);
4555 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4556 /* If this happens, we have to extend this to schedule
4557 optimally. Return 0 for now. */
4558 return 0;
4560 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4562 if (! recog_memoized (dep_insn))
4563 return 0;
4564 switch (get_attr_type (dep_insn))
4566 case TYPE_FPDIVSGL:
4567 case TYPE_FPDIVDBL:
4568 case TYPE_FPSQRTSGL:
4569 case TYPE_FPSQRTDBL:
4570 /* An ALU flop can't be issued until two cycles before a
4571 preceding divide or sqrt operation has finished if
4572 the target of the ALU flop is also the target of
4573 the divide or sqrt operation. */
4574 return insn_default_latency (dep_insn) - 2;
4576 default:
4577 return 0;
4582 /* For other output dependencies, the cost is 0. */
4583 return 0;
4585 default:
4586 gcc_unreachable ();
4590 /* Adjust scheduling priorities. We use this to try and keep addil
4591 and the next use of %r1 close together. */
4592 static int
4593 pa_adjust_priority (rtx insn, int priority)
4595 rtx set = single_set (insn);
4596 rtx src, dest;
4597 if (set)
4599 src = SET_SRC (set);
4600 dest = SET_DEST (set);
4601 if (GET_CODE (src) == LO_SUM
4602 && symbolic_operand (XEXP (src, 1), VOIDmode)
4603 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4604 priority >>= 3;
4606 else if (GET_CODE (src) == MEM
4607 && GET_CODE (XEXP (src, 0)) == LO_SUM
4608 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4609 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4610 priority >>= 1;
4612 else if (GET_CODE (dest) == MEM
4613 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4614 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4615 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4616 priority >>= 3;
4618 return priority;
4621 /* The 700 can only issue a single insn at a time.
4622 The 7XXX processors can issue two insns at a time.
4623 The 8000 can issue 4 insns at a time. */
4624 static int
4625 pa_issue_rate (void)
4627 switch (pa_cpu)
4629 case PROCESSOR_700: return 1;
4630 case PROCESSOR_7100: return 2;
4631 case PROCESSOR_7100LC: return 2;
4632 case PROCESSOR_7200: return 2;
4633 case PROCESSOR_7300: return 2;
4634 case PROCESSOR_8000: return 4;
4636 default:
4637 gcc_unreachable ();
4643 /* Return any length adjustment needed by INSN which already has its length
4644 computed as LENGTH. Return zero if no adjustment is necessary.
4646 For the PA: function calls, millicode calls, and backwards short
4647 conditional branches with unfilled delay slots need an adjustment by +1
4648 (to account for the NOP which will be inserted into the instruction stream).
4650 Also compute the length of an inline block move here as it is too
4651 complicated to express as a length attribute in pa.md. */
4653 pa_adjust_insn_length (rtx insn, int length)
4655 rtx pat = PATTERN (insn);
4657 /* Jumps inside switch tables which have unfilled delay slots need
4658 adjustment. */
4659 if (GET_CODE (insn) == JUMP_INSN
4660 && GET_CODE (pat) == PARALLEL
4661 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4662 return 4;
4663 /* Millicode insn with an unfilled delay slot. */
4664 else if (GET_CODE (insn) == INSN
4665 && GET_CODE (pat) != SEQUENCE
4666 && GET_CODE (pat) != USE
4667 && GET_CODE (pat) != CLOBBER
4668 && get_attr_type (insn) == TYPE_MILLI)
4669 return 4;
4670 /* Block move pattern. */
4671 else if (GET_CODE (insn) == INSN
4672 && GET_CODE (pat) == PARALLEL
4673 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4674 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4675 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4676 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4677 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4678 return compute_movmem_length (insn) - 4;
4679 /* Block clear pattern. */
4680 else if (GET_CODE (insn) == INSN
4681 && GET_CODE (pat) == PARALLEL
4682 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4683 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4684 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4685 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4686 return compute_clrmem_length (insn) - 4;
4687 /* Conditional branch with an unfilled delay slot. */
4688 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4690 /* Adjust a short backwards conditional with an unfilled delay slot. */
4691 if (GET_CODE (pat) == SET
4692 && length == 4
4693 && ! forward_branch_p (insn))
4694 return 4;
4695 else if (GET_CODE (pat) == PARALLEL
4696 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4697 && length == 4)
4698 return 4;
4699 /* Adjust dbra insn with short backwards conditional branch with
4700 unfilled delay slot -- only for case where counter is in a
4701 general register register. */
4702 else if (GET_CODE (pat) == PARALLEL
4703 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4704 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4705 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4706 && length == 4
4707 && ! forward_branch_p (insn))
4708 return 4;
4709 else
4710 return 0;
4712 return 0;
4715 /* Print operand X (an rtx) in assembler syntax to file FILE.
4716 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4717 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4719 void
4720 print_operand (FILE *file, rtx x, int code)
4722 switch (code)
4724 case '#':
4725 /* Output a 'nop' if there's nothing for the delay slot. */
4726 if (dbr_sequence_length () == 0)
4727 fputs ("\n\tnop", file);
4728 return;
4729 case '*':
4730 /* Output a nullification completer if there's nothing for the */
4731 /* delay slot or nullification is requested. */
4732 if (dbr_sequence_length () == 0 ||
4733 (final_sequence &&
4734 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4735 fputs (",n", file);
4736 return;
4737 case 'R':
4738 /* Print out the second register name of a register pair.
4739 I.e., R (6) => 7. */
4740 fputs (reg_names[REGNO (x) + 1], file);
4741 return;
4742 case 'r':
4743 /* A register or zero. */
4744 if (x == const0_rtx
4745 || (x == CONST0_RTX (DFmode))
4746 || (x == CONST0_RTX (SFmode)))
4748 fputs ("%r0", file);
4749 return;
4751 else
4752 break;
4753 case 'f':
4754 /* A register or zero (floating point). */
4755 if (x == const0_rtx
4756 || (x == CONST0_RTX (DFmode))
4757 || (x == CONST0_RTX (SFmode)))
4759 fputs ("%fr0", file);
4760 return;
4762 else
4763 break;
4764 case 'A':
4766 rtx xoperands[2];
4768 xoperands[0] = XEXP (XEXP (x, 0), 0);
4769 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4770 output_global_address (file, xoperands[1], 0);
4771 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4772 return;
4775 case 'C': /* Plain (C)ondition */
4776 case 'X':
4777 switch (GET_CODE (x))
4779 case EQ:
4780 fputs ("=", file); break;
4781 case NE:
4782 fputs ("<>", file); break;
4783 case GT:
4784 fputs (">", file); break;
4785 case GE:
4786 fputs (">=", file); break;
4787 case GEU:
4788 fputs (">>=", file); break;
4789 case GTU:
4790 fputs (">>", file); break;
4791 case LT:
4792 fputs ("<", file); break;
4793 case LE:
4794 fputs ("<=", file); break;
4795 case LEU:
4796 fputs ("<<=", file); break;
4797 case LTU:
4798 fputs ("<<", file); break;
4799 default:
4800 gcc_unreachable ();
4802 return;
4803 case 'N': /* Condition, (N)egated */
4804 switch (GET_CODE (x))
4806 case EQ:
4807 fputs ("<>", file); break;
4808 case NE:
4809 fputs ("=", file); break;
4810 case GT:
4811 fputs ("<=", file); break;
4812 case GE:
4813 fputs ("<", file); break;
4814 case GEU:
4815 fputs ("<<", file); break;
4816 case GTU:
4817 fputs ("<<=", file); break;
4818 case LT:
4819 fputs (">=", file); break;
4820 case LE:
4821 fputs (">", file); break;
4822 case LEU:
4823 fputs (">>", file); break;
4824 case LTU:
4825 fputs (">>=", file); break;
4826 default:
4827 gcc_unreachable ();
4829 return;
4830 /* For floating point comparisons. Note that the output
4831 predicates are the complement of the desired mode. The
4832 conditions for GT, GE, LT, LE and LTGT cause an invalid
4833 operation exception if the result is unordered and this
4834 exception is enabled in the floating-point status register. */
4835 case 'Y':
4836 switch (GET_CODE (x))
4838 case EQ:
4839 fputs ("!=", file); break;
4840 case NE:
4841 fputs ("=", file); break;
4842 case GT:
4843 fputs ("!>", file); break;
4844 case GE:
4845 fputs ("!>=", file); break;
4846 case LT:
4847 fputs ("!<", file); break;
4848 case LE:
4849 fputs ("!<=", file); break;
4850 case LTGT:
4851 fputs ("!<>", file); break;
4852 case UNLE:
4853 fputs ("!?<=", file); break;
4854 case UNLT:
4855 fputs ("!?<", file); break;
4856 case UNGE:
4857 fputs ("!?>=", file); break;
4858 case UNGT:
4859 fputs ("!?>", file); break;
4860 case UNEQ:
4861 fputs ("!?=", file); break;
4862 case UNORDERED:
4863 fputs ("!?", file); break;
4864 case ORDERED:
4865 fputs ("?", file); break;
4866 default:
4867 gcc_unreachable ();
4869 return;
4870 case 'S': /* Condition, operands are (S)wapped. */
4871 switch (GET_CODE (x))
4873 case EQ:
4874 fputs ("=", file); break;
4875 case NE:
4876 fputs ("<>", file); break;
4877 case GT:
4878 fputs ("<", file); break;
4879 case GE:
4880 fputs ("<=", file); break;
4881 case GEU:
4882 fputs ("<<=", file); break;
4883 case GTU:
4884 fputs ("<<", file); break;
4885 case LT:
4886 fputs (">", file); break;
4887 case LE:
4888 fputs (">=", file); break;
4889 case LEU:
4890 fputs (">>=", file); break;
4891 case LTU:
4892 fputs (">>", file); break;
4893 default:
4894 gcc_unreachable ();
4896 return;
4897 case 'B': /* Condition, (B)oth swapped and negate. */
4898 switch (GET_CODE (x))
4900 case EQ:
4901 fputs ("<>", file); break;
4902 case NE:
4903 fputs ("=", file); break;
4904 case GT:
4905 fputs (">=", file); break;
4906 case GE:
4907 fputs (">", file); break;
4908 case GEU:
4909 fputs (">>", file); break;
4910 case GTU:
4911 fputs (">>=", file); break;
4912 case LT:
4913 fputs ("<=", file); break;
4914 case LE:
4915 fputs ("<", file); break;
4916 case LEU:
4917 fputs ("<<", file); break;
4918 case LTU:
4919 fputs ("<<=", file); break;
4920 default:
4921 gcc_unreachable ();
4923 return;
4924 case 'k':
4925 gcc_assert (GET_CODE (x) == CONST_INT);
4926 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4927 return;
4928 case 'Q':
4929 gcc_assert (GET_CODE (x) == CONST_INT);
4930 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4931 return;
4932 case 'L':
4933 gcc_assert (GET_CODE (x) == CONST_INT);
4934 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4935 return;
4936 case 'O':
4937 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4938 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4939 return;
4940 case 'p':
4941 gcc_assert (GET_CODE (x) == CONST_INT);
4942 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4943 return;
4944 case 'P':
4945 gcc_assert (GET_CODE (x) == CONST_INT);
4946 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4947 return;
4948 case 'I':
4949 if (GET_CODE (x) == CONST_INT)
4950 fputs ("i", file);
4951 return;
4952 case 'M':
4953 case 'F':
4954 switch (GET_CODE (XEXP (x, 0)))
4956 case PRE_DEC:
4957 case PRE_INC:
4958 if (ASSEMBLER_DIALECT == 0)
4959 fputs ("s,mb", file);
4960 else
4961 fputs (",mb", file);
4962 break;
4963 case POST_DEC:
4964 case POST_INC:
4965 if (ASSEMBLER_DIALECT == 0)
4966 fputs ("s,ma", file);
4967 else
4968 fputs (",ma", file);
4969 break;
4970 case PLUS:
4971 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4972 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4974 if (ASSEMBLER_DIALECT == 0)
4975 fputs ("x", file);
4977 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4978 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4980 if (ASSEMBLER_DIALECT == 0)
4981 fputs ("x,s", file);
4982 else
4983 fputs (",s", file);
4985 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4986 fputs ("s", file);
4987 break;
4988 default:
4989 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4990 fputs ("s", file);
4991 break;
4993 return;
4994 case 'G':
4995 output_global_address (file, x, 0);
4996 return;
4997 case 'H':
4998 output_global_address (file, x, 1);
4999 return;
5000 case 0: /* Don't do anything special */
5001 break;
5002 case 'Z':
5004 unsigned op[3];
5005 compute_zdepwi_operands (INTVAL (x), op);
5006 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5007 return;
5009 case 'z':
5011 unsigned op[3];
5012 compute_zdepdi_operands (INTVAL (x), op);
5013 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5014 return;
5016 case 'c':
5017 /* We can get here from a .vtable_inherit due to our
5018 CONSTANT_ADDRESS_P rejecting perfectly good constant
5019 addresses. */
5020 break;
5021 default:
5022 gcc_unreachable ();
5024 if (GET_CODE (x) == REG)
5026 fputs (reg_names [REGNO (x)], file);
5027 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5029 fputs ("R", file);
5030 return;
5032 if (FP_REG_P (x)
5033 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5034 && (REGNO (x) & 1) == 0)
5035 fputs ("L", file);
5037 else if (GET_CODE (x) == MEM)
5039 int size = GET_MODE_SIZE (GET_MODE (x));
5040 rtx base = NULL_RTX;
5041 switch (GET_CODE (XEXP (x, 0)))
5043 case PRE_DEC:
5044 case POST_DEC:
5045 base = XEXP (XEXP (x, 0), 0);
5046 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5047 break;
5048 case PRE_INC:
5049 case POST_INC:
5050 base = XEXP (XEXP (x, 0), 0);
5051 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5052 break;
5053 case PLUS:
5054 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5055 fprintf (file, "%s(%s)",
5056 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5057 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5058 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5059 fprintf (file, "%s(%s)",
5060 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5061 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5062 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5063 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5065 /* Because the REG_POINTER flag can get lost during reload,
5066 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5067 index and base registers in the combined move patterns. */
5068 rtx base = XEXP (XEXP (x, 0), 1);
5069 rtx index = XEXP (XEXP (x, 0), 0);
5071 fprintf (file, "%s(%s)",
5072 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5074 else
5075 output_address (XEXP (x, 0));
5076 break;
5077 default:
5078 output_address (XEXP (x, 0));
5079 break;
5082 else
5083 output_addr_const (file, x);
5086 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5088 void
5089 output_global_address (FILE *file, rtx x, int round_constant)
5092 /* Imagine (high (const (plus ...))). */
5093 if (GET_CODE (x) == HIGH)
5094 x = XEXP (x, 0);
5096 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5097 output_addr_const (file, x);
5098 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5100 output_addr_const (file, x);
5101 fputs ("-$global$", file);
5103 else if (GET_CODE (x) == CONST)
5105 const char *sep = "";
5106 int offset = 0; /* assembler wants -$global$ at end */
5107 rtx base = NULL_RTX;
5109 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5111 case SYMBOL_REF:
5112 base = XEXP (XEXP (x, 0), 0);
5113 output_addr_const (file, base);
5114 break;
5115 case CONST_INT:
5116 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5117 break;
5118 default:
5119 gcc_unreachable ();
5122 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5124 case SYMBOL_REF:
5125 base = XEXP (XEXP (x, 0), 1);
5126 output_addr_const (file, base);
5127 break;
5128 case CONST_INT:
5129 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5130 break;
5131 default:
5132 gcc_unreachable ();
5135 /* How bogus. The compiler is apparently responsible for
5136 rounding the constant if it uses an LR field selector.
5138 The linker and/or assembler seem a better place since
5139 they have to do this kind of thing already.
5141 If we fail to do this, HP's optimizing linker may eliminate
5142 an addil, but not update the ldw/stw/ldo instruction that
5143 uses the result of the addil. */
5144 if (round_constant)
5145 offset = ((offset + 0x1000) & ~0x1fff);
5147 switch (GET_CODE (XEXP (x, 0)))
5149 case PLUS:
5150 if (offset < 0)
5152 offset = -offset;
5153 sep = "-";
5155 else
5156 sep = "+";
5157 break;
5159 case MINUS:
5160 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5161 sep = "-";
5162 break;
5164 default:
5165 gcc_unreachable ();
5168 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5169 fputs ("-$global$", file);
5170 if (offset)
5171 fprintf (file, "%s%d", sep, offset);
5173 else
5174 output_addr_const (file, x);
5177 /* Output boilerplate text to appear at the beginning of the file.
5178 There are several possible versions. */
5179 #define aputs(x) fputs(x, asm_out_file)
5180 static inline void
5181 pa_file_start_level (void)
5183 if (TARGET_64BIT)
5184 aputs ("\t.LEVEL 2.0w\n");
5185 else if (TARGET_PA_20)
5186 aputs ("\t.LEVEL 2.0\n");
5187 else if (TARGET_PA_11)
5188 aputs ("\t.LEVEL 1.1\n");
5189 else
5190 aputs ("\t.LEVEL 1.0\n");
5193 static inline void
5194 pa_file_start_space (int sortspace)
5196 aputs ("\t.SPACE $PRIVATE$");
5197 if (sortspace)
5198 aputs (",SORT=16");
5199 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5200 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5201 "\n\t.SPACE $TEXT$");
5202 if (sortspace)
5203 aputs (",SORT=8");
5204 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5205 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5208 static inline void
5209 pa_file_start_file (int want_version)
5211 if (write_symbols != NO_DEBUG)
5213 output_file_directive (asm_out_file, main_input_filename);
5214 if (want_version)
5215 aputs ("\t.version\t\"01.01\"\n");
5219 static inline void
5220 pa_file_start_mcount (const char *aswhat)
5222 if (profile_flag)
5223 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5226 static void
5227 pa_elf_file_start (void)
5229 pa_file_start_level ();
5230 pa_file_start_mcount ("ENTRY");
5231 pa_file_start_file (0);
5234 static void
5235 pa_som_file_start (void)
5237 pa_file_start_level ();
5238 pa_file_start_space (0);
5239 aputs ("\t.IMPORT $global$,DATA\n"
5240 "\t.IMPORT $$dyncall,MILLICODE\n");
5241 pa_file_start_mcount ("CODE");
5242 pa_file_start_file (0);
5245 static void
5246 pa_linux_file_start (void)
5248 pa_file_start_file (1);
5249 pa_file_start_level ();
5250 pa_file_start_mcount ("CODE");
5253 static void
5254 pa_hpux64_gas_file_start (void)
5256 pa_file_start_level ();
5257 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5258 if (profile_flag)
5259 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5260 #endif
5261 pa_file_start_file (1);
5264 static void
5265 pa_hpux64_hpas_file_start (void)
5267 pa_file_start_level ();
5268 pa_file_start_space (1);
5269 pa_file_start_mcount ("CODE");
5270 pa_file_start_file (0);
5272 #undef aputs
5274 /* Search the deferred plabel list for SYMBOL and return its internal
5275 label. If an entry for SYMBOL is not found, a new entry is created. */
5278 get_deferred_plabel (rtx symbol)
5280 const char *fname = XSTR (symbol, 0);
5281 size_t i;
5283 /* See if we have already put this function on the list of deferred
5284 plabels. This list is generally small, so a liner search is not
5285 too ugly. If it proves too slow replace it with something faster. */
5286 for (i = 0; i < n_deferred_plabels; i++)
5287 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5288 break;
5290 /* If the deferred plabel list is empty, or this entry was not found
5291 on the list, create a new entry on the list. */
5292 if (deferred_plabels == NULL || i == n_deferred_plabels)
5294 tree id;
5296 if (deferred_plabels == 0)
5297 deferred_plabels = (struct deferred_plabel *)
5298 ggc_alloc (sizeof (struct deferred_plabel));
5299 else
5300 deferred_plabels = (struct deferred_plabel *)
5301 ggc_realloc (deferred_plabels,
5302 ((n_deferred_plabels + 1)
5303 * sizeof (struct deferred_plabel)));
5305 i = n_deferred_plabels++;
5306 deferred_plabels[i].internal_label = gen_label_rtx ();
5307 deferred_plabels[i].symbol = symbol;
5309 /* Gross. We have just implicitly taken the address of this
5310 function. Mark it in the same manner as assemble_name. */
5311 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5312 if (id)
5313 mark_referenced (id);
5316 return deferred_plabels[i].internal_label;
5319 static void
5320 output_deferred_plabels (void)
5322 size_t i;
5324 /* If we have some deferred plabels, then we need to switch into the
5325 data or readonly data section, and align it to a 4 byte boundary
5326 before outputting the deferred plabels. */
5327 if (n_deferred_plabels)
5329 switch_to_section (flag_pic ? data_section : readonly_data_section);
5330 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5333 /* Now output the deferred plabels. */
5334 for (i = 0; i < n_deferred_plabels; i++)
5336 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5337 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5338 assemble_integer (deferred_plabels[i].symbol,
5339 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5343 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5344 /* Initialize optabs to point to HPUX long double emulation routines. */
5345 static void
5346 pa_hpux_init_libfuncs (void)
5348 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5349 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5350 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5351 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5352 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5353 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5354 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5355 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5356 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5358 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5359 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5360 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5361 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5362 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5363 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5364 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5366 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5367 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5368 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5369 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5371 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5372 ? "__U_Qfcnvfxt_quad_to_sgl"
5373 : "_U_Qfcnvfxt_quad_to_sgl");
5374 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5375 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5376 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5378 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5379 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5380 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5381 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5383 #endif
5385 /* HP's millicode routines mean something special to the assembler.
5386 Keep track of which ones we have used. */
5388 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5389 static void import_milli (enum millicodes);
5390 static char imported[(int) end1000];
5391 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5392 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5393 #define MILLI_START 10
5395 static void
5396 import_milli (enum millicodes code)
5398 char str[sizeof (import_string)];
5400 if (!imported[(int) code])
5402 imported[(int) code] = 1;
5403 strcpy (str, import_string);
5404 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5405 output_asm_insn (str, 0);
5409 /* The register constraints have put the operands and return value in
5410 the proper registers. */
5412 const char *
5413 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5415 import_milli (mulI);
5416 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5419 /* Emit the rtl for doing a division by a constant. */
5421 /* Do magic division millicodes exist for this value? */
5422 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5424 /* We'll use an array to keep track of the magic millicodes and
5425 whether or not we've used them already. [n][0] is signed, [n][1] is
5426 unsigned. */
5428 static int div_milli[16][2];
5431 emit_hpdiv_const (rtx *operands, int unsignedp)
5433 if (GET_CODE (operands[2]) == CONST_INT
5434 && INTVAL (operands[2]) > 0
5435 && INTVAL (operands[2]) < 16
5436 && magic_milli[INTVAL (operands[2])])
5438 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5440 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5441 emit
5442 (gen_rtx_PARALLEL
5443 (VOIDmode,
5444 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5445 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5446 SImode,
5447 gen_rtx_REG (SImode, 26),
5448 operands[2])),
5449 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5450 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5451 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5452 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5453 gen_rtx_CLOBBER (VOIDmode, ret))));
5454 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5455 return 1;
5457 return 0;
5460 const char *
5461 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5463 int divisor;
5465 /* If the divisor is a constant, try to use one of the special
5466 opcodes .*/
5467 if (GET_CODE (operands[0]) == CONST_INT)
5469 static char buf[100];
5470 divisor = INTVAL (operands[0]);
5471 if (!div_milli[divisor][unsignedp])
5473 div_milli[divisor][unsignedp] = 1;
5474 if (unsignedp)
5475 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5476 else
5477 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5479 if (unsignedp)
5481 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5482 INTVAL (operands[0]));
5483 return output_millicode_call (insn,
5484 gen_rtx_SYMBOL_REF (SImode, buf));
5486 else
5488 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5489 INTVAL (operands[0]));
5490 return output_millicode_call (insn,
5491 gen_rtx_SYMBOL_REF (SImode, buf));
5494 /* Divisor isn't a special constant. */
5495 else
5497 if (unsignedp)
5499 import_milli (divU);
5500 return output_millicode_call (insn,
5501 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5503 else
5505 import_milli (divI);
5506 return output_millicode_call (insn,
5507 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5512 /* Output a $$rem millicode to do mod. */
5514 const char *
5515 output_mod_insn (int unsignedp, rtx insn)
5517 if (unsignedp)
5519 import_milli (remU);
5520 return output_millicode_call (insn,
5521 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5523 else
5525 import_milli (remI);
5526 return output_millicode_call (insn,
5527 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5531 void
5532 output_arg_descriptor (rtx call_insn)
5534 const char *arg_regs[4];
5535 enum machine_mode arg_mode;
5536 rtx link;
5537 int i, output_flag = 0;
5538 int regno;
5540 /* We neither need nor want argument location descriptors for the
5541 64bit runtime environment or the ELF32 environment. */
5542 if (TARGET_64BIT || TARGET_ELF32)
5543 return;
5545 for (i = 0; i < 4; i++)
5546 arg_regs[i] = 0;
5548 /* Specify explicitly that no argument relocations should take place
5549 if using the portable runtime calling conventions. */
5550 if (TARGET_PORTABLE_RUNTIME)
5552 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5553 asm_out_file);
5554 return;
5557 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5558 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5559 link; link = XEXP (link, 1))
5561 rtx use = XEXP (link, 0);
5563 if (! (GET_CODE (use) == USE
5564 && GET_CODE (XEXP (use, 0)) == REG
5565 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5566 continue;
5568 arg_mode = GET_MODE (XEXP (use, 0));
5569 regno = REGNO (XEXP (use, 0));
5570 if (regno >= 23 && regno <= 26)
5572 arg_regs[26 - regno] = "GR";
5573 if (arg_mode == DImode)
5574 arg_regs[25 - regno] = "GR";
5576 else if (regno >= 32 && regno <= 39)
5578 if (arg_mode == SFmode)
5579 arg_regs[(regno - 32) / 2] = "FR";
5580 else
5582 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5583 arg_regs[(regno - 34) / 2] = "FR";
5584 arg_regs[(regno - 34) / 2 + 1] = "FU";
5585 #else
5586 arg_regs[(regno - 34) / 2] = "FU";
5587 arg_regs[(regno - 34) / 2 + 1] = "FR";
5588 #endif
5592 fputs ("\t.CALL ", asm_out_file);
5593 for (i = 0; i < 4; i++)
5595 if (arg_regs[i])
5597 if (output_flag++)
5598 fputc (',', asm_out_file);
5599 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5602 fputc ('\n', asm_out_file);
5605 static enum reg_class
5606 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5607 enum machine_mode mode, secondary_reload_info *sri)
5609 int is_symbolic, regno;
5611 /* Handle the easy stuff first. */
5612 if (class == R1_REGS)
5613 return NO_REGS;
5615 if (REG_P (x))
5617 regno = REGNO (x);
5618 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5619 return NO_REGS;
5621 else
5622 regno = -1;
5624 /* If we have something like (mem (mem (...)), we can safely assume the
5625 inner MEM will end up in a general register after reloading, so there's
5626 no need for a secondary reload. */
5627 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5628 return NO_REGS;
5630 /* Trying to load a constant into a FP register during PIC code
5631 generation requires %r1 as a scratch register. */
5632 if (flag_pic
5633 && GET_MODE_CLASS (mode) == MODE_INT
5634 && FP_REG_CLASS_P (class)
5635 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5637 gcc_assert (mode == SImode || mode == DImode);
5638 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5639 : CODE_FOR_reload_indi_r1);
5640 return NO_REGS;
5643 /* Profiling showed the PA port spends about 1.3% of its compilation
5644 time in true_regnum from calls inside pa_secondary_reload_class. */
5645 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5646 regno = true_regnum (x);
5648 /* Handle out of range displacement for integer mode loads/stores of
5649 FP registers. */
5650 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5651 && GET_MODE_CLASS (mode) == MODE_INT
5652 && FP_REG_CLASS_P (class))
5653 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5655 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5656 return NO_REGS;
5659 /* A SAR<->FP register copy requires a secondary register (GPR) as
5660 well as secondary memory. */
5661 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5662 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5663 || (class == SHIFT_REGS
5664 && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5666 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5667 return NO_REGS;
5670 /* Secondary reloads of symbolic operands require %r1 as a scratch
5671 register when we're generating PIC code and the operand isn't
5672 readonly. */
5673 if (GET_CODE (x) == HIGH)
5674 x = XEXP (x, 0);
5676 /* Profiling has showed GCC spends about 2.6% of its compilation
5677 time in symbolic_operand from calls inside pa_secondary_reload_class.
5678 So, we use an inline copy to avoid useless work. */
5679 switch (GET_CODE (x))
5681 rtx op;
5683 case SYMBOL_REF:
5684 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5685 break;
5686 case LABEL_REF:
5687 is_symbolic = 1;
5688 break;
5689 case CONST:
5690 op = XEXP (x, 0);
5691 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5692 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5693 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5694 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5695 break;
5696 default:
5697 is_symbolic = 0;
5698 break;
5701 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5703 gcc_assert (mode == SImode || mode == DImode);
5704 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5705 : CODE_FOR_reload_indi_r1);
5708 return NO_REGS;
5711 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5712 by invisible reference. As a GCC extension, we also pass anything
5713 with a zero or variable size by reference.
5715 The 64-bit runtime does not describe passing any types by invisible
5716 reference. The internals of GCC can't currently handle passing
5717 empty structures, and zero or variable length arrays when they are
5718 not passed entirely on the stack or by reference. Thus, as a GCC
5719 extension, we pass these types by reference. The HP compiler doesn't
5720 support these types, so hopefully there shouldn't be any compatibility
5721 issues. This may have to be revisited when HP releases a C99 compiler
5722 or updates the ABI. */
5724 static bool
5725 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5726 enum machine_mode mode, tree type,
5727 bool named ATTRIBUTE_UNUSED)
5729 HOST_WIDE_INT size;
5731 if (type)
5732 size = int_size_in_bytes (type);
5733 else
5734 size = GET_MODE_SIZE (mode);
5736 if (TARGET_64BIT)
5737 return size <= 0;
5738 else
5739 return size <= 0 || size > 8;
5742 enum direction
5743 function_arg_padding (enum machine_mode mode, tree type)
5745 if (mode == BLKmode
5746 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5748 /* Return none if justification is not required. */
5749 if (type
5750 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5751 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5752 return none;
5754 /* The directions set here are ignored when a BLKmode argument larger
5755 than a word is placed in a register. Different code is used for
5756 the stack and registers. This makes it difficult to have a
5757 consistent data representation for both the stack and registers.
5758 For both runtimes, the justification and padding for arguments on
5759 the stack and in registers should be identical. */
5760 if (TARGET_64BIT)
5761 /* The 64-bit runtime specifies left justification for aggregates. */
5762 return upward;
5763 else
5764 /* The 32-bit runtime architecture specifies right justification.
5765 When the argument is passed on the stack, the argument is padded
5766 with garbage on the left. The HP compiler pads with zeros. */
5767 return downward;
5770 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5771 return downward;
5772 else
5773 return none;
5777 /* Do what is necessary for `va_start'. We look at the current function
5778 to determine if stdargs or varargs is used and fill in an initial
5779 va_list. A pointer to this constructor is returned. */
5781 static rtx
5782 hppa_builtin_saveregs (void)
5784 rtx offset, dest;
5785 tree fntype = TREE_TYPE (current_function_decl);
5786 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5787 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5788 != void_type_node)))
5789 ? UNITS_PER_WORD : 0);
5791 if (argadj)
5792 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5793 else
5794 offset = current_function_arg_offset_rtx;
5796 if (TARGET_64BIT)
5798 int i, off;
5800 /* Adjust for varargs/stdarg differences. */
5801 if (argadj)
5802 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5803 else
5804 offset = current_function_arg_offset_rtx;
5806 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5807 from the incoming arg pointer and growing to larger addresses. */
5808 for (i = 26, off = -64; i >= 19; i--, off += 8)
5809 emit_move_insn (gen_rtx_MEM (word_mode,
5810 plus_constant (arg_pointer_rtx, off)),
5811 gen_rtx_REG (word_mode, i));
5813 /* The incoming args pointer points just beyond the flushback area;
5814 normally this is not a serious concern. However, when we are doing
5815 varargs/stdargs we want to make the arg pointer point to the start
5816 of the incoming argument area. */
5817 emit_move_insn (virtual_incoming_args_rtx,
5818 plus_constant (arg_pointer_rtx, -64));
5820 /* Now return a pointer to the first anonymous argument. */
5821 return copy_to_reg (expand_binop (Pmode, add_optab,
5822 virtual_incoming_args_rtx,
5823 offset, 0, 0, OPTAB_LIB_WIDEN));
5826 /* Store general registers on the stack. */
5827 dest = gen_rtx_MEM (BLKmode,
5828 plus_constant (current_function_internal_arg_pointer,
5829 -16));
5830 set_mem_alias_set (dest, get_varargs_alias_set ());
5831 set_mem_align (dest, BITS_PER_WORD);
5832 move_block_from_reg (23, dest, 4);
5834 /* move_block_from_reg will emit code to store the argument registers
5835 individually as scalar stores.
5837 However, other insns may later load from the same addresses for
5838 a structure load (passing a struct to a varargs routine).
5840 The alias code assumes that such aliasing can never happen, so we
5841 have to keep memory referencing insns from moving up beyond the
5842 last argument register store. So we emit a blockage insn here. */
5843 emit_insn (gen_blockage ());
5845 return copy_to_reg (expand_binop (Pmode, add_optab,
5846 current_function_internal_arg_pointer,
5847 offset, 0, 0, OPTAB_LIB_WIDEN));
5850 void
5851 hppa_va_start (tree valist, rtx nextarg)
5853 nextarg = expand_builtin_saveregs ();
5854 std_expand_builtin_va_start (valist, nextarg);
5857 static tree
5858 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5860 if (TARGET_64BIT)
5862 /* Args grow upward. We can use the generic routines. */
5863 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5865 else /* !TARGET_64BIT */
5867 tree ptr = build_pointer_type (type);
5868 tree valist_type;
5869 tree t, u;
5870 unsigned int size, ofs;
5871 bool indirect;
5873 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5874 if (indirect)
5876 type = ptr;
5877 ptr = build_pointer_type (type);
5879 size = int_size_in_bytes (type);
5880 valist_type = TREE_TYPE (valist);
5882 /* Args grow down. Not handled by generic routines. */
5884 u = fold_convert (valist_type, size_in_bytes (type));
5885 t = build2 (MINUS_EXPR, valist_type, valist, u);
5887 /* Copied from va-pa.h, but we probably don't need to align to
5888 word size, since we generate and preserve that invariant. */
5889 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5890 t = build2 (BIT_AND_EXPR, valist_type, t, u);
5892 t = build2 (MODIFY_EXPR, valist_type, valist, t);
5894 ofs = (8 - size) % 4;
5895 if (ofs != 0)
5897 u = fold_convert (valist_type, size_int (ofs));
5898 t = build2 (PLUS_EXPR, valist_type, t, u);
5901 t = fold_convert (ptr, t);
5902 t = build_va_arg_indirect_ref (t);
5904 if (indirect)
5905 t = build_va_arg_indirect_ref (t);
5907 return t;
5911 /* True if MODE is valid for the target. By "valid", we mean able to
5912 be manipulated in non-trivial ways. In particular, this means all
5913 the arithmetic is supported.
5915 Currently, TImode is not valid as the HP 64-bit runtime documentation
5916 doesn't document the alignment and calling conventions for this type.
5917 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5918 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5920 static bool
5921 pa_scalar_mode_supported_p (enum machine_mode mode)
5923 int precision = GET_MODE_PRECISION (mode);
5925 switch (GET_MODE_CLASS (mode))
5927 case MODE_PARTIAL_INT:
5928 case MODE_INT:
5929 if (precision == CHAR_TYPE_SIZE)
5930 return true;
5931 if (precision == SHORT_TYPE_SIZE)
5932 return true;
5933 if (precision == INT_TYPE_SIZE)
5934 return true;
5935 if (precision == LONG_TYPE_SIZE)
5936 return true;
5937 if (precision == LONG_LONG_TYPE_SIZE)
5938 return true;
5939 return false;
5941 case MODE_FLOAT:
5942 if (precision == FLOAT_TYPE_SIZE)
5943 return true;
5944 if (precision == DOUBLE_TYPE_SIZE)
5945 return true;
5946 if (precision == LONG_DOUBLE_TYPE_SIZE)
5947 return true;
5948 return false;
5950 case MODE_DECIMAL_FLOAT:
5951 return false;
5953 default:
5954 gcc_unreachable ();
5958 /* This routine handles all the normal conditional branch sequences we
5959 might need to generate. It handles compare immediate vs compare
5960 register, nullification of delay slots, varying length branches,
5961 negated branches, and all combinations of the above. It returns the
5962 output appropriate to emit the branch corresponding to all given
5963 parameters. */
5965 const char *
5966 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
5968 static char buf[100];
5969 int useskip = 0;
5970 rtx xoperands[5];
5972 /* A conditional branch to the following instruction (e.g. the delay slot)
5973 is asking for a disaster. This can happen when not optimizing and
5974 when jump optimization fails.
5976 While it is usually safe to emit nothing, this can fail if the
5977 preceding instruction is a nullified branch with an empty delay
5978 slot and the same branch target as this branch. We could check
5979 for this but jump optimization should eliminate nop jumps. It
5980 is always safe to emit a nop. */
5981 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5982 return "nop";
5984 /* The doubleword form of the cmpib instruction doesn't have the LEU
5985 and GTU conditions while the cmpb instruction does. Since we accept
5986 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5987 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5988 operands[2] = gen_rtx_REG (DImode, 0);
5989 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
5990 operands[1] = gen_rtx_REG (DImode, 0);
5992 /* If this is a long branch with its delay slot unfilled, set `nullify'
5993 as it can nullify the delay slot and save a nop. */
5994 if (length == 8 && dbr_sequence_length () == 0)
5995 nullify = 1;
5997 /* If this is a short forward conditional branch which did not get
5998 its delay slot filled, the delay slot can still be nullified. */
5999 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6000 nullify = forward_branch_p (insn);
6002 /* A forward branch over a single nullified insn can be done with a
6003 comclr instruction. This avoids a single cycle penalty due to
6004 mis-predicted branch if we fall through (branch not taken). */
6005 if (length == 4
6006 && next_real_insn (insn) != 0
6007 && get_attr_length (next_real_insn (insn)) == 4
6008 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6009 && nullify)
6010 useskip = 1;
6012 switch (length)
6014 /* All short conditional branches except backwards with an unfilled
6015 delay slot. */
6016 case 4:
6017 if (useskip)
6018 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6019 else
6020 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6021 if (GET_MODE (operands[1]) == DImode)
6022 strcat (buf, "*");
6023 if (negated)
6024 strcat (buf, "%B3");
6025 else
6026 strcat (buf, "%S3");
6027 if (useskip)
6028 strcat (buf, " %2,%r1,%%r0");
6029 else if (nullify)
6030 strcat (buf, ",n %2,%r1,%0");
6031 else
6032 strcat (buf, " %2,%r1,%0");
6033 break;
6035 /* All long conditionals. Note a short backward branch with an
6036 unfilled delay slot is treated just like a long backward branch
6037 with an unfilled delay slot. */
6038 case 8:
6039 /* Handle weird backwards branch with a filled delay slot
6040 with is nullified. */
6041 if (dbr_sequence_length () != 0
6042 && ! forward_branch_p (insn)
6043 && nullify)
6045 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6046 if (GET_MODE (operands[1]) == DImode)
6047 strcat (buf, "*");
6048 if (negated)
6049 strcat (buf, "%S3");
6050 else
6051 strcat (buf, "%B3");
6052 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6054 /* Handle short backwards branch with an unfilled delay slot.
6055 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6056 taken and untaken branches. */
6057 else if (dbr_sequence_length () == 0
6058 && ! forward_branch_p (insn)
6059 && INSN_ADDRESSES_SET_P ()
6060 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6061 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6063 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6064 if (GET_MODE (operands[1]) == DImode)
6065 strcat (buf, "*");
6066 if (negated)
6067 strcat (buf, "%B3 %2,%r1,%0%#");
6068 else
6069 strcat (buf, "%S3 %2,%r1,%0%#");
6071 else
6073 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6074 if (GET_MODE (operands[1]) == DImode)
6075 strcat (buf, "*");
6076 if (negated)
6077 strcat (buf, "%S3");
6078 else
6079 strcat (buf, "%B3");
6080 if (nullify)
6081 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6082 else
6083 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6085 break;
6087 case 20:
6088 case 28:
6089 xoperands[0] = operands[0];
6090 xoperands[1] = operands[1];
6091 xoperands[2] = operands[2];
6092 xoperands[3] = operands[3];
6094 /* The reversed conditional branch must branch over one additional
6095 instruction if the delay slot is filled. If the delay slot
6096 is empty, the instruction after the reversed condition branch
6097 must be nullified. */
6098 nullify = dbr_sequence_length () == 0;
6099 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6101 /* Create a reversed conditional branch which branches around
6102 the following insns. */
6103 if (GET_MODE (operands[1]) != DImode)
6105 if (nullify)
6107 if (negated)
6108 strcpy (buf,
6109 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6110 else
6111 strcpy (buf,
6112 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6114 else
6116 if (negated)
6117 strcpy (buf,
6118 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6119 else
6120 strcpy (buf,
6121 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6124 else
6126 if (nullify)
6128 if (negated)
6129 strcpy (buf,
6130 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6131 else
6132 strcpy (buf,
6133 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6135 else
6137 if (negated)
6138 strcpy (buf,
6139 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6140 else
6141 strcpy (buf,
6142 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6146 output_asm_insn (buf, xoperands);
6147 return output_lbranch (operands[0], insn);
6149 default:
6150 gcc_unreachable ();
6152 return buf;
6155 /* This routine handles long unconditional branches that exceed the
6156 maximum range of a simple branch instruction. */
6158 const char *
6159 output_lbranch (rtx dest, rtx insn)
6161 rtx xoperands[2];
6163 xoperands[0] = dest;
6165 /* First, free up the delay slot. */
6166 if (dbr_sequence_length () != 0)
6168 /* We can't handle a jump in the delay slot. */
6169 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6171 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6172 optimize, 0, NULL);
6174 /* Now delete the delay insn. */
6175 PUT_CODE (NEXT_INSN (insn), NOTE);
6176 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6177 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6180 /* Output an insn to save %r1. The runtime documentation doesn't
6181 specify whether the "Clean Up" slot in the callers frame can
6182 be clobbered by the callee. It isn't copied by HP's builtin
6183 alloca, so this suggests that it can be clobbered if necessary.
6184 The "Static Link" location is copied by HP builtin alloca, so
6185 we avoid using it. Using the cleanup slot might be a problem
6186 if we have to interoperate with languages that pass cleanup
6187 information. However, it should be possible to handle these
6188 situations with GCC's asm feature.
6190 The "Current RP" slot is reserved for the called procedure, so
6191 we try to use it when we don't have a frame of our own. It's
6192 rather unlikely that we won't have a frame when we need to emit
6193 a very long branch.
6195 Really the way to go long term is a register scavenger; goto
6196 the target of the jump and find a register which we can use
6197 as a scratch to hold the value in %r1. Then, we wouldn't have
6198 to free up the delay slot or clobber a slot that may be needed
6199 for other purposes. */
6200 if (TARGET_64BIT)
6202 if (actual_fsize == 0 && !regs_ever_live[2])
6203 /* Use the return pointer slot in the frame marker. */
6204 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6205 else
6206 /* Use the slot at -40 in the frame marker since HP builtin
6207 alloca doesn't copy it. */
6208 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6210 else
6212 if (actual_fsize == 0 && !regs_ever_live[2])
6213 /* Use the return pointer slot in the frame marker. */
6214 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6215 else
6216 /* Use the "Clean Up" slot in the frame marker. In GCC,
6217 the only other use of this location is for copying a
6218 floating point double argument from a floating-point
6219 register to two general registers. The copy is done
6220 as an "atomic" operation when outputting a call, so it
6221 won't interfere with our using the location here. */
6222 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6225 if (TARGET_PORTABLE_RUNTIME)
6227 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6228 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6229 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6231 else if (flag_pic)
6233 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6234 if (TARGET_SOM || !TARGET_GAS)
6236 xoperands[1] = gen_label_rtx ();
6237 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6238 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6239 CODE_LABEL_NUMBER (xoperands[1]));
6240 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6242 else
6244 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6245 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6247 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6249 else
6250 /* Now output a very long branch to the original target. */
6251 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6253 /* Now restore the value of %r1 in the delay slot. */
6254 if (TARGET_64BIT)
6256 if (actual_fsize == 0 && !regs_ever_live[2])
6257 return "ldd -16(%%r30),%%r1";
6258 else
6259 return "ldd -40(%%r30),%%r1";
6261 else
6263 if (actual_fsize == 0 && !regs_ever_live[2])
6264 return "ldw -20(%%r30),%%r1";
6265 else
6266 return "ldw -12(%%r30),%%r1";
6270 /* This routine handles all the branch-on-bit conditional branch sequences we
6271 might need to generate. It handles nullification of delay slots,
6272 varying length branches, negated branches and all combinations of the
6273 above. it returns the appropriate output template to emit the branch. */
6275 const char *
6276 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6277 int negated, rtx insn, int which)
6279 static char buf[100];
6280 int useskip = 0;
6282 /* A conditional branch to the following instruction (e.g. the delay slot) is
6283 asking for a disaster. I do not think this can happen as this pattern
6284 is only used when optimizing; jump optimization should eliminate the
6285 jump. But be prepared just in case. */
6287 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6288 return "nop";
6290 /* If this is a long branch with its delay slot unfilled, set `nullify'
6291 as it can nullify the delay slot and save a nop. */
6292 if (length == 8 && dbr_sequence_length () == 0)
6293 nullify = 1;
6295 /* If this is a short forward conditional branch which did not get
6296 its delay slot filled, the delay slot can still be nullified. */
6297 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6298 nullify = forward_branch_p (insn);
6300 /* A forward branch over a single nullified insn can be done with a
6301 extrs instruction. This avoids a single cycle penalty due to
6302 mis-predicted branch if we fall through (branch not taken). */
6304 if (length == 4
6305 && next_real_insn (insn) != 0
6306 && get_attr_length (next_real_insn (insn)) == 4
6307 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6308 && nullify)
6309 useskip = 1;
6311 switch (length)
6314 /* All short conditional branches except backwards with an unfilled
6315 delay slot. */
6316 case 4:
6317 if (useskip)
6318 strcpy (buf, "{extrs,|extrw,s,}");
6319 else
6320 strcpy (buf, "bb,");
6321 if (useskip && GET_MODE (operands[0]) == DImode)
6322 strcpy (buf, "extrd,s,*");
6323 else if (GET_MODE (operands[0]) == DImode)
6324 strcpy (buf, "bb,*");
6325 if ((which == 0 && negated)
6326 || (which == 1 && ! negated))
6327 strcat (buf, ">=");
6328 else
6329 strcat (buf, "<");
6330 if (useskip)
6331 strcat (buf, " %0,%1,1,%%r0");
6332 else if (nullify && negated)
6333 strcat (buf, ",n %0,%1,%3");
6334 else if (nullify && ! negated)
6335 strcat (buf, ",n %0,%1,%2");
6336 else if (! nullify && negated)
6337 strcat (buf, "%0,%1,%3");
6338 else if (! nullify && ! negated)
6339 strcat (buf, " %0,%1,%2");
6340 break;
6342 /* All long conditionals. Note a short backward branch with an
6343 unfilled delay slot is treated just like a long backward branch
6344 with an unfilled delay slot. */
6345 case 8:
6346 /* Handle weird backwards branch with a filled delay slot
6347 with is nullified. */
6348 if (dbr_sequence_length () != 0
6349 && ! forward_branch_p (insn)
6350 && nullify)
6352 strcpy (buf, "bb,");
6353 if (GET_MODE (operands[0]) == DImode)
6354 strcat (buf, "*");
6355 if ((which == 0 && negated)
6356 || (which == 1 && ! negated))
6357 strcat (buf, "<");
6358 else
6359 strcat (buf, ">=");
6360 if (negated)
6361 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6362 else
6363 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6365 /* Handle short backwards branch with an unfilled delay slot.
6366 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6367 taken and untaken branches. */
6368 else if (dbr_sequence_length () == 0
6369 && ! forward_branch_p (insn)
6370 && INSN_ADDRESSES_SET_P ()
6371 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6372 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6374 strcpy (buf, "bb,");
6375 if (GET_MODE (operands[0]) == DImode)
6376 strcat (buf, "*");
6377 if ((which == 0 && negated)
6378 || (which == 1 && ! negated))
6379 strcat (buf, ">=");
6380 else
6381 strcat (buf, "<");
6382 if (negated)
6383 strcat (buf, " %0,%1,%3%#");
6384 else
6385 strcat (buf, " %0,%1,%2%#");
6387 else
6389 strcpy (buf, "{extrs,|extrw,s,}");
6390 if (GET_MODE (operands[0]) == DImode)
6391 strcpy (buf, "extrd,s,*");
6392 if ((which == 0 && negated)
6393 || (which == 1 && ! negated))
6394 strcat (buf, "<");
6395 else
6396 strcat (buf, ">=");
6397 if (nullify && negated)
6398 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6399 else if (nullify && ! negated)
6400 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6401 else if (negated)
6402 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6403 else
6404 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6406 break;
6408 default:
6409 gcc_unreachable ();
6411 return buf;
6414 /* This routine handles all the branch-on-variable-bit conditional branch
6415 sequences we might need to generate. It handles nullification of delay
6416 slots, varying length branches, negated branches and all combinations
6417 of the above. it returns the appropriate output template to emit the
6418 branch. */
6420 const char *
6421 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6422 int negated, rtx insn, int which)
6424 static char buf[100];
6425 int useskip = 0;
6427 /* A conditional branch to the following instruction (e.g. the delay slot) is
6428 asking for a disaster. I do not think this can happen as this pattern
6429 is only used when optimizing; jump optimization should eliminate the
6430 jump. But be prepared just in case. */
6432 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6433 return "nop";
6435 /* If this is a long branch with its delay slot unfilled, set `nullify'
6436 as it can nullify the delay slot and save a nop. */
6437 if (length == 8 && dbr_sequence_length () == 0)
6438 nullify = 1;
6440 /* If this is a short forward conditional branch which did not get
6441 its delay slot filled, the delay slot can still be nullified. */
6442 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6443 nullify = forward_branch_p (insn);
6445 /* A forward branch over a single nullified insn can be done with a
6446 extrs instruction. This avoids a single cycle penalty due to
6447 mis-predicted branch if we fall through (branch not taken). */
6449 if (length == 4
6450 && next_real_insn (insn) != 0
6451 && get_attr_length (next_real_insn (insn)) == 4
6452 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6453 && nullify)
6454 useskip = 1;
6456 switch (length)
6459 /* All short conditional branches except backwards with an unfilled
6460 delay slot. */
6461 case 4:
6462 if (useskip)
6463 strcpy (buf, "{vextrs,|extrw,s,}");
6464 else
6465 strcpy (buf, "{bvb,|bb,}");
6466 if (useskip && GET_MODE (operands[0]) == DImode)
6467 strcpy (buf, "extrd,s,*");
6468 else if (GET_MODE (operands[0]) == DImode)
6469 strcpy (buf, "bb,*");
6470 if ((which == 0 && negated)
6471 || (which == 1 && ! negated))
6472 strcat (buf, ">=");
6473 else
6474 strcat (buf, "<");
6475 if (useskip)
6476 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6477 else if (nullify && negated)
6478 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6479 else if (nullify && ! negated)
6480 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6481 else if (! nullify && negated)
6482 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6483 else if (! nullify && ! negated)
6484 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6485 break;
6487 /* All long conditionals. Note a short backward branch with an
6488 unfilled delay slot is treated just like a long backward branch
6489 with an unfilled delay slot. */
6490 case 8:
6491 /* Handle weird backwards branch with a filled delay slot
6492 with is nullified. */
6493 if (dbr_sequence_length () != 0
6494 && ! forward_branch_p (insn)
6495 && nullify)
6497 strcpy (buf, "{bvb,|bb,}");
6498 if (GET_MODE (operands[0]) == DImode)
6499 strcat (buf, "*");
6500 if ((which == 0 && negated)
6501 || (which == 1 && ! negated))
6502 strcat (buf, "<");
6503 else
6504 strcat (buf, ">=");
6505 if (negated)
6506 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6507 else
6508 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6510 /* Handle short backwards branch with an unfilled delay slot.
6511 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6512 taken and untaken branches. */
6513 else if (dbr_sequence_length () == 0
6514 && ! forward_branch_p (insn)
6515 && INSN_ADDRESSES_SET_P ()
6516 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6517 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6519 strcpy (buf, "{bvb,|bb,}");
6520 if (GET_MODE (operands[0]) == DImode)
6521 strcat (buf, "*");
6522 if ((which == 0 && negated)
6523 || (which == 1 && ! negated))
6524 strcat (buf, ">=");
6525 else
6526 strcat (buf, "<");
6527 if (negated)
6528 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6529 else
6530 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6532 else
6534 strcpy (buf, "{vextrs,|extrw,s,}");
6535 if (GET_MODE (operands[0]) == DImode)
6536 strcpy (buf, "extrd,s,*");
6537 if ((which == 0 && negated)
6538 || (which == 1 && ! negated))
6539 strcat (buf, "<");
6540 else
6541 strcat (buf, ">=");
6542 if (nullify && negated)
6543 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6544 else if (nullify && ! negated)
6545 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6546 else if (negated)
6547 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6548 else
6549 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6551 break;
6553 default:
6554 gcc_unreachable ();
6556 return buf;
6559 /* Return the output template for emitting a dbra type insn.
6561 Note it may perform some output operations on its own before
6562 returning the final output string. */
6563 const char *
6564 output_dbra (rtx *operands, rtx insn, int which_alternative)
6567 /* A conditional branch to the following instruction (e.g. the delay slot) is
6568 asking for a disaster. Be prepared! */
6570 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6572 if (which_alternative == 0)
6573 return "ldo %1(%0),%0";
6574 else if (which_alternative == 1)
6576 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6577 output_asm_insn ("ldw -16(%%r30),%4", operands);
6578 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6579 return "{fldws|fldw} -16(%%r30),%0";
6581 else
6583 output_asm_insn ("ldw %0,%4", operands);
6584 return "ldo %1(%4),%4\n\tstw %4,%0";
6588 if (which_alternative == 0)
6590 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6591 int length = get_attr_length (insn);
6593 /* If this is a long branch with its delay slot unfilled, set `nullify'
6594 as it can nullify the delay slot and save a nop. */
6595 if (length == 8 && dbr_sequence_length () == 0)
6596 nullify = 1;
6598 /* If this is a short forward conditional branch which did not get
6599 its delay slot filled, the delay slot can still be nullified. */
6600 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6601 nullify = forward_branch_p (insn);
6603 switch (length)
6605 case 4:
6606 if (nullify)
6607 return "addib,%C2,n %1,%0,%3";
6608 else
6609 return "addib,%C2 %1,%0,%3";
6611 case 8:
6612 /* Handle weird backwards branch with a fulled delay slot
6613 which is nullified. */
6614 if (dbr_sequence_length () != 0
6615 && ! forward_branch_p (insn)
6616 && nullify)
6617 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6618 /* Handle short backwards branch with an unfilled delay slot.
6619 Using a addb;nop rather than addi;bl saves 1 cycle for both
6620 taken and untaken branches. */
6621 else if (dbr_sequence_length () == 0
6622 && ! forward_branch_p (insn)
6623 && INSN_ADDRESSES_SET_P ()
6624 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6625 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6626 return "addib,%C2 %1,%0,%3%#";
6628 /* Handle normal cases. */
6629 if (nullify)
6630 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6631 else
6632 return "addi,%N2 %1,%0,%0\n\tb %3";
6634 default:
6635 gcc_unreachable ();
6639 /* Deal with gross reload from FP register case. */
6640 else if (which_alternative == 1)
6642 /* Move loop counter from FP register to MEM then into a GR,
6643 increment the GR, store the GR into MEM, and finally reload
6644 the FP register from MEM from within the branch's delay slot. */
6645 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6646 operands);
6647 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6648 if (get_attr_length (insn) == 24)
6649 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6650 else
6651 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6653 /* Deal with gross reload from memory case. */
6654 else
6656 /* Reload loop counter from memory, the store back to memory
6657 happens in the branch's delay slot. */
6658 output_asm_insn ("ldw %0,%4", operands);
6659 if (get_attr_length (insn) == 12)
6660 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6661 else
6662 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6666 /* Return the output template for emitting a dbra type insn.
6668 Note it may perform some output operations on its own before
6669 returning the final output string. */
6670 const char *
6671 output_movb (rtx *operands, rtx insn, int which_alternative,
6672 int reverse_comparison)
6675 /* A conditional branch to the following instruction (e.g. the delay slot) is
6676 asking for a disaster. Be prepared! */
6678 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6680 if (which_alternative == 0)
6681 return "copy %1,%0";
6682 else if (which_alternative == 1)
6684 output_asm_insn ("stw %1,-16(%%r30)", operands);
6685 return "{fldws|fldw} -16(%%r30),%0";
6687 else if (which_alternative == 2)
6688 return "stw %1,%0";
6689 else
6690 return "mtsar %r1";
6693 /* Support the second variant. */
6694 if (reverse_comparison)
6695 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6697 if (which_alternative == 0)
6699 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6700 int length = get_attr_length (insn);
6702 /* If this is a long branch with its delay slot unfilled, set `nullify'
6703 as it can nullify the delay slot and save a nop. */
6704 if (length == 8 && dbr_sequence_length () == 0)
6705 nullify = 1;
6707 /* If this is a short forward conditional branch which did not get
6708 its delay slot filled, the delay slot can still be nullified. */
6709 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6710 nullify = forward_branch_p (insn);
6712 switch (length)
6714 case 4:
6715 if (nullify)
6716 return "movb,%C2,n %1,%0,%3";
6717 else
6718 return "movb,%C2 %1,%0,%3";
6720 case 8:
6721 /* Handle weird backwards branch with a filled delay slot
6722 which is nullified. */
6723 if (dbr_sequence_length () != 0
6724 && ! forward_branch_p (insn)
6725 && nullify)
6726 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6728 /* Handle short backwards branch with an unfilled delay slot.
6729 Using a movb;nop rather than or;bl saves 1 cycle for both
6730 taken and untaken branches. */
6731 else if (dbr_sequence_length () == 0
6732 && ! forward_branch_p (insn)
6733 && INSN_ADDRESSES_SET_P ()
6734 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6735 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6736 return "movb,%C2 %1,%0,%3%#";
6737 /* Handle normal cases. */
6738 if (nullify)
6739 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6740 else
6741 return "or,%N2 %1,%%r0,%0\n\tb %3";
6743 default:
6744 gcc_unreachable ();
6747 /* Deal with gross reload from FP register case. */
6748 else if (which_alternative == 1)
6750 /* Move loop counter from FP register to MEM then into a GR,
6751 increment the GR, store the GR into MEM, and finally reload
6752 the FP register from MEM from within the branch's delay slot. */
6753 output_asm_insn ("stw %1,-16(%%r30)", operands);
6754 if (get_attr_length (insn) == 12)
6755 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6756 else
6757 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6759 /* Deal with gross reload from memory case. */
6760 else if (which_alternative == 2)
6762 /* Reload loop counter from memory, the store back to memory
6763 happens in the branch's delay slot. */
6764 if (get_attr_length (insn) == 8)
6765 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6766 else
6767 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6769 /* Handle SAR as a destination. */
6770 else
6772 if (get_attr_length (insn) == 8)
6773 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6774 else
6775 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6779 /* Copy any FP arguments in INSN into integer registers. */
6780 static void
6781 copy_fp_args (rtx insn)
6783 rtx link;
6784 rtx xoperands[2];
6786 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6788 int arg_mode, regno;
6789 rtx use = XEXP (link, 0);
6791 if (! (GET_CODE (use) == USE
6792 && GET_CODE (XEXP (use, 0)) == REG
6793 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6794 continue;
6796 arg_mode = GET_MODE (XEXP (use, 0));
6797 regno = REGNO (XEXP (use, 0));
6799 /* Is it a floating point register? */
6800 if (regno >= 32 && regno <= 39)
6802 /* Copy the FP register into an integer register via memory. */
6803 if (arg_mode == SFmode)
6805 xoperands[0] = XEXP (use, 0);
6806 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6807 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6808 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6810 else
6812 xoperands[0] = XEXP (use, 0);
6813 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6814 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6815 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6816 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6822 /* Compute length of the FP argument copy sequence for INSN. */
6823 static int
6824 length_fp_args (rtx insn)
6826 int length = 0;
6827 rtx link;
6829 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6831 int arg_mode, regno;
6832 rtx use = XEXP (link, 0);
6834 if (! (GET_CODE (use) == USE
6835 && GET_CODE (XEXP (use, 0)) == REG
6836 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6837 continue;
6839 arg_mode = GET_MODE (XEXP (use, 0));
6840 regno = REGNO (XEXP (use, 0));
6842 /* Is it a floating point register? */
6843 if (regno >= 32 && regno <= 39)
6845 if (arg_mode == SFmode)
6846 length += 8;
6847 else
6848 length += 12;
6852 return length;
6855 /* Return the attribute length for the millicode call instruction INSN.
6856 The length must match the code generated by output_millicode_call.
6857 We include the delay slot in the returned length as it is better to
6858 over estimate the length than to under estimate it. */
6861 attr_length_millicode_call (rtx insn)
6863 unsigned long distance = -1;
6864 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6866 if (INSN_ADDRESSES_SET_P ())
6868 distance = (total + insn_current_reference_address (insn));
6869 if (distance < total)
6870 distance = -1;
6873 if (TARGET_64BIT)
6875 if (!TARGET_LONG_CALLS && distance < 7600000)
6876 return 8;
6878 return 20;
6880 else if (TARGET_PORTABLE_RUNTIME)
6881 return 24;
6882 else
6884 if (!TARGET_LONG_CALLS && distance < 240000)
6885 return 8;
6887 if (TARGET_LONG_ABS_CALL && !flag_pic)
6888 return 12;
6890 return 24;
6894 /* INSN is a function call. It may have an unconditional jump
6895 in its delay slot.
6897 CALL_DEST is the routine we are calling. */
6899 const char *
6900 output_millicode_call (rtx insn, rtx call_dest)
6902 int attr_length = get_attr_length (insn);
6903 int seq_length = dbr_sequence_length ();
6904 int distance;
6905 rtx seq_insn;
6906 rtx xoperands[3];
6908 xoperands[0] = call_dest;
6909 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6911 /* Handle the common case where we are sure that the branch will
6912 reach the beginning of the $CODE$ subspace. The within reach
6913 form of the $$sh_func_adrs call has a length of 28. Because
6914 it has an attribute type of multi, it never has a nonzero
6915 sequence length. The length of the $$sh_func_adrs is the same
6916 as certain out of reach PIC calls to other routines. */
6917 if (!TARGET_LONG_CALLS
6918 && ((seq_length == 0
6919 && (attr_length == 12
6920 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6921 || (seq_length != 0 && attr_length == 8)))
6923 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6925 else
6927 if (TARGET_64BIT)
6929 /* It might seem that one insn could be saved by accessing
6930 the millicode function using the linkage table. However,
6931 this doesn't work in shared libraries and other dynamically
6932 loaded objects. Using a pc-relative sequence also avoids
6933 problems related to the implicit use of the gp register. */
6934 output_asm_insn ("b,l .+8,%%r1", xoperands);
6936 if (TARGET_GAS)
6938 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6939 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6941 else
6943 xoperands[1] = gen_label_rtx ();
6944 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6945 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6946 CODE_LABEL_NUMBER (xoperands[1]));
6947 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6950 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
6952 else if (TARGET_PORTABLE_RUNTIME)
6954 /* Pure portable runtime doesn't allow be/ble; we also don't
6955 have PIC support in the assembler/linker, so this sequence
6956 is needed. */
6958 /* Get the address of our target into %r1. */
6959 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6960 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6962 /* Get our return address into %r31. */
6963 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
6964 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
6966 /* Jump to our target address in %r1. */
6967 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6969 else if (!flag_pic)
6971 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6972 if (TARGET_PA_20)
6973 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
6974 else
6975 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
6977 else
6979 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6980 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
6982 if (TARGET_SOM || !TARGET_GAS)
6984 /* The HP assembler can generate relocations for the
6985 difference of two symbols. GAS can do this for a
6986 millicode symbol but not an arbitrary external
6987 symbol when generating SOM output. */
6988 xoperands[1] = gen_label_rtx ();
6989 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6990 CODE_LABEL_NUMBER (xoperands[1]));
6991 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6992 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6994 else
6996 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
6997 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
6998 xoperands);
7001 /* Jump to our target address in %r1. */
7002 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7006 if (seq_length == 0)
7007 output_asm_insn ("nop", xoperands);
7009 /* We are done if there isn't a jump in the delay slot. */
7010 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7011 return "";
7013 /* This call has an unconditional jump in its delay slot. */
7014 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7016 /* See if the return address can be adjusted. Use the containing
7017 sequence insn's address. */
7018 if (INSN_ADDRESSES_SET_P ())
7020 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7021 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7022 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7024 if (VAL_14_BITS_P (distance))
7026 xoperands[1] = gen_label_rtx ();
7027 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7028 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7029 CODE_LABEL_NUMBER (xoperands[1]));
7031 else
7032 /* ??? This branch may not reach its target. */
7033 output_asm_insn ("nop\n\tb,n %0", xoperands);
7035 else
7036 /* ??? This branch may not reach its target. */
7037 output_asm_insn ("nop\n\tb,n %0", xoperands);
7039 /* Delete the jump. */
7040 PUT_CODE (NEXT_INSN (insn), NOTE);
7041 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7042 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7044 return "";
7047 /* Return the attribute length of the call instruction INSN. The SIBCALL
7048 flag indicates whether INSN is a regular call or a sibling call. The
7049 length returned must be longer than the code actually generated by
7050 output_call. Since branch shortening is done before delay branch
7051 sequencing, there is no way to determine whether or not the delay
7052 slot will be filled during branch shortening. Even when the delay
7053 slot is filled, we may have to add a nop if the delay slot contains
7054 a branch that can't reach its target. Thus, we always have to include
7055 the delay slot in the length estimate. This used to be done in
7056 pa_adjust_insn_length but we do it here now as some sequences always
7057 fill the delay slot and we can save four bytes in the estimate for
7058 these sequences. */
7061 attr_length_call (rtx insn, int sibcall)
7063 int local_call;
7064 rtx call_dest;
7065 tree call_decl;
7066 int length = 0;
7067 rtx pat = PATTERN (insn);
7068 unsigned long distance = -1;
7070 if (INSN_ADDRESSES_SET_P ())
7072 unsigned long total;
7074 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7075 distance = (total + insn_current_reference_address (insn));
7076 if (distance < total)
7077 distance = -1;
7080 /* Determine if this is a local call. */
7081 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7082 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7083 else
7084 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7086 call_decl = SYMBOL_REF_DECL (call_dest);
7087 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7089 /* pc-relative branch. */
7090 if (!TARGET_LONG_CALLS
7091 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7092 || distance < 240000))
7093 length += 8;
7095 /* 64-bit plabel sequence. */
7096 else if (TARGET_64BIT && !local_call)
7097 length += sibcall ? 28 : 24;
7099 /* non-pic long absolute branch sequence. */
7100 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7101 length += 12;
7103 /* long pc-relative branch sequence. */
7104 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7105 || (TARGET_64BIT && !TARGET_GAS)
7106 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7108 length += 20;
7110 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7111 length += 8;
7114 /* 32-bit plabel sequence. */
7115 else
7117 length += 32;
7119 if (TARGET_SOM)
7120 length += length_fp_args (insn);
7122 if (flag_pic)
7123 length += 4;
7125 if (!TARGET_PA_20)
7127 if (!sibcall)
7128 length += 8;
7130 if (!TARGET_NO_SPACE_REGS)
7131 length += 8;
7135 return length;
7138 /* INSN is a function call. It may have an unconditional jump
7139 in its delay slot.
7141 CALL_DEST is the routine we are calling. */
7143 const char *
7144 output_call (rtx insn, rtx call_dest, int sibcall)
7146 int delay_insn_deleted = 0;
7147 int delay_slot_filled = 0;
7148 int seq_length = dbr_sequence_length ();
7149 tree call_decl = SYMBOL_REF_DECL (call_dest);
7150 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7151 rtx xoperands[2];
7153 xoperands[0] = call_dest;
7155 /* Handle the common case where we're sure that the branch will reach
7156 the beginning of the "$CODE$" subspace. This is the beginning of
7157 the current function if we are in a named section. */
7158 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7160 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7161 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7163 else
7165 if (TARGET_64BIT && !local_call)
7167 /* ??? As far as I can tell, the HP linker doesn't support the
7168 long pc-relative sequence described in the 64-bit runtime
7169 architecture. So, we use a slightly longer indirect call. */
7170 xoperands[0] = get_deferred_plabel (call_dest);
7171 xoperands[1] = gen_label_rtx ();
7173 /* If this isn't a sibcall, we put the load of %r27 into the
7174 delay slot. We can't do this in a sibcall as we don't
7175 have a second call-clobbered scratch register available. */
7176 if (seq_length != 0
7177 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7178 && !sibcall)
7180 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7181 optimize, 0, NULL);
7183 /* Now delete the delay insn. */
7184 PUT_CODE (NEXT_INSN (insn), NOTE);
7185 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7186 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7187 delay_insn_deleted = 1;
7190 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7191 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7192 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7194 if (sibcall)
7196 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7197 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7198 output_asm_insn ("bve (%%r1)", xoperands);
7200 else
7202 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7203 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7204 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7205 delay_slot_filled = 1;
7208 else
7210 int indirect_call = 0;
7212 /* Emit a long call. There are several different sequences
7213 of increasing length and complexity. In most cases,
7214 they don't allow an instruction in the delay slot. */
7215 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7216 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7217 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7218 && !TARGET_64BIT)
7219 indirect_call = 1;
7221 if (seq_length != 0
7222 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7223 && !sibcall
7224 && (!TARGET_PA_20 || indirect_call))
7226 /* A non-jump insn in the delay slot. By definition we can
7227 emit this insn before the call (and in fact before argument
7228 relocating. */
7229 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7230 NULL);
7232 /* Now delete the delay insn. */
7233 PUT_CODE (NEXT_INSN (insn), NOTE);
7234 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7235 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7236 delay_insn_deleted = 1;
7239 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7241 /* This is the best sequence for making long calls in
7242 non-pic code. Unfortunately, GNU ld doesn't provide
7243 the stub needed for external calls, and GAS's support
7244 for this with the SOM linker is buggy. It is safe
7245 to use this for local calls. */
7246 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7247 if (sibcall)
7248 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7249 else
7251 if (TARGET_PA_20)
7252 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7253 xoperands);
7254 else
7255 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7257 output_asm_insn ("copy %%r31,%%r2", xoperands);
7258 delay_slot_filled = 1;
7261 else
7263 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7264 || (TARGET_64BIT && !TARGET_GAS))
7266 /* The HP assembler and linker can handle relocations
7267 for the difference of two symbols. GAS and the HP
7268 linker can't do this when one of the symbols is
7269 external. */
7270 xoperands[1] = gen_label_rtx ();
7271 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7272 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7273 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7274 CODE_LABEL_NUMBER (xoperands[1]));
7275 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7277 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7279 /* GAS currently can't generate the relocations that
7280 are needed for the SOM linker under HP-UX using this
7281 sequence. The GNU linker doesn't generate the stubs
7282 that are needed for external calls on TARGET_ELF32
7283 with this sequence. For now, we have to use a
7284 longer plabel sequence when using GAS. */
7285 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7286 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7287 xoperands);
7288 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7289 xoperands);
7291 else
7293 /* Emit a long plabel-based call sequence. This is
7294 essentially an inline implementation of $$dyncall.
7295 We don't actually try to call $$dyncall as this is
7296 as difficult as calling the function itself. */
7297 xoperands[0] = get_deferred_plabel (call_dest);
7298 xoperands[1] = gen_label_rtx ();
7300 /* Since the call is indirect, FP arguments in registers
7301 need to be copied to the general registers. Then, the
7302 argument relocation stub will copy them back. */
7303 if (TARGET_SOM)
7304 copy_fp_args (insn);
7306 if (flag_pic)
7308 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7309 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7310 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7312 else
7314 output_asm_insn ("addil LR'%0-$global$,%%r27",
7315 xoperands);
7316 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7317 xoperands);
7320 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7321 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7322 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7323 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7325 if (!sibcall && !TARGET_PA_20)
7327 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7328 if (TARGET_NO_SPACE_REGS)
7329 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7330 else
7331 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7335 if (TARGET_PA_20)
7337 if (sibcall)
7338 output_asm_insn ("bve (%%r1)", xoperands);
7339 else
7341 if (indirect_call)
7343 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7344 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7345 delay_slot_filled = 1;
7347 else
7348 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7351 else
7353 if (!TARGET_NO_SPACE_REGS)
7354 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7355 xoperands);
7357 if (sibcall)
7359 if (TARGET_NO_SPACE_REGS)
7360 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7361 else
7362 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7364 else
7366 if (TARGET_NO_SPACE_REGS)
7367 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7368 else
7369 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7371 if (indirect_call)
7372 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7373 else
7374 output_asm_insn ("copy %%r31,%%r2", xoperands);
7375 delay_slot_filled = 1;
7382 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7383 output_asm_insn ("nop", xoperands);
7385 /* We are done if there isn't a jump in the delay slot. */
7386 if (seq_length == 0
7387 || delay_insn_deleted
7388 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7389 return "";
7391 /* A sibcall should never have a branch in the delay slot. */
7392 gcc_assert (!sibcall);
7394 /* This call has an unconditional jump in its delay slot. */
7395 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7397 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7399 /* See if the return address can be adjusted. Use the containing
7400 sequence insn's address. */
7401 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7402 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7403 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7405 if (VAL_14_BITS_P (distance))
7407 xoperands[1] = gen_label_rtx ();
7408 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7409 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7410 CODE_LABEL_NUMBER (xoperands[1]));
7412 else
7413 output_asm_insn ("nop\n\tb,n %0", xoperands);
7415 else
7416 output_asm_insn ("b,n %0", xoperands);
7418 /* Delete the jump. */
7419 PUT_CODE (NEXT_INSN (insn), NOTE);
7420 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7421 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7423 return "";
7426 /* Return the attribute length of the indirect call instruction INSN.
7427 The length must match the code generated by output_indirect call.
7428 The returned length includes the delay slot. Currently, the delay
7429 slot of an indirect call sequence is not exposed and it is used by
7430 the sequence itself. */
7433 attr_length_indirect_call (rtx insn)
7435 unsigned long distance = -1;
7436 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7438 if (INSN_ADDRESSES_SET_P ())
7440 distance = (total + insn_current_reference_address (insn));
7441 if (distance < total)
7442 distance = -1;
7445 if (TARGET_64BIT)
7446 return 12;
7448 if (TARGET_FAST_INDIRECT_CALLS
7449 || (!TARGET_PORTABLE_RUNTIME
7450 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7451 return 8;
7453 if (flag_pic)
7454 return 24;
7456 if (TARGET_PORTABLE_RUNTIME)
7457 return 20;
7459 /* Out of reach, can use ble. */
7460 return 12;
7463 const char *
7464 output_indirect_call (rtx insn, rtx call_dest)
7466 rtx xoperands[1];
7468 if (TARGET_64BIT)
7470 xoperands[0] = call_dest;
7471 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7472 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7473 return "";
7476 /* First the special case for kernels, level 0 systems, etc. */
7477 if (TARGET_FAST_INDIRECT_CALLS)
7478 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7480 /* Now the normal case -- we can reach $$dyncall directly or
7481 we're sure that we can get there via a long-branch stub.
7483 No need to check target flags as the length uniquely identifies
7484 the remaining cases. */
7485 if (attr_length_indirect_call (insn) == 8)
7487 /* The HP linker substitutes a BLE for millicode calls using
7488 the short PIC PCREL form. Thus, we must use %r31 as the
7489 link register when generating PA 1.x code. */
7490 if (TARGET_PA_20)
7491 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7492 else
7493 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7496 /* Long millicode call, but we are not generating PIC or portable runtime
7497 code. */
7498 if (attr_length_indirect_call (insn) == 12)
7499 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7501 /* Long millicode call for portable runtime. */
7502 if (attr_length_indirect_call (insn) == 20)
7503 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7505 /* We need a long PIC call to $$dyncall. */
7506 xoperands[0] = NULL_RTX;
7507 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7508 if (TARGET_SOM || !TARGET_GAS)
7510 xoperands[0] = gen_label_rtx ();
7511 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7512 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7513 CODE_LABEL_NUMBER (xoperands[0]));
7514 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7516 else
7518 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7519 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7520 xoperands);
7522 output_asm_insn ("blr %%r0,%%r2", xoperands);
7523 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7524 return "";
7527 /* Return the total length of the save and restore instructions needed for
7528 the data linkage table pointer (i.e., the PIC register) across the call
7529 instruction INSN. No-return calls do not require a save and restore.
7530 In addition, we may be able to avoid the save and restore for calls
7531 within the same translation unit. */
7534 attr_length_save_restore_dltp (rtx insn)
7536 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7537 return 0;
7539 return 8;
7542 /* In HPUX 8.0's shared library scheme, special relocations are needed
7543 for function labels if they might be passed to a function
7544 in a shared library (because shared libraries don't live in code
7545 space), and special magic is needed to construct their address. */
7547 void
7548 hppa_encode_label (rtx sym)
7550 const char *str = XSTR (sym, 0);
7551 int len = strlen (str) + 1;
7552 char *newstr, *p;
7554 p = newstr = alloca (len + 1);
7555 *p++ = '@';
7556 strcpy (p, str);
7558 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7561 static void
7562 pa_encode_section_info (tree decl, rtx rtl, int first)
7564 default_encode_section_info (decl, rtl, first);
7566 if (first && TEXT_SPACE_P (decl))
7568 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7569 if (TREE_CODE (decl) == FUNCTION_DECL)
7570 hppa_encode_label (XEXP (rtl, 0));
7574 /* This is sort of inverse to pa_encode_section_info. */
7576 static const char *
7577 pa_strip_name_encoding (const char *str)
7579 str += (*str == '@');
7580 str += (*str == '*');
7581 return str;
7585 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7587 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7590 /* Returns 1 if OP is a function label involved in a simple addition
7591 with a constant. Used to keep certain patterns from matching
7592 during instruction combination. */
7594 is_function_label_plus_const (rtx op)
7596 /* Strip off any CONST. */
7597 if (GET_CODE (op) == CONST)
7598 op = XEXP (op, 0);
7600 return (GET_CODE (op) == PLUS
7601 && function_label_operand (XEXP (op, 0), Pmode)
7602 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7605 /* Output assembly code for a thunk to FUNCTION. */
7607 static void
7608 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7609 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7610 tree function)
7612 static unsigned int current_thunk_number;
7613 int val_14 = VAL_14_BITS_P (delta);
7614 int nbytes = 0;
7615 char label[16];
7616 rtx xoperands[4];
7618 xoperands[0] = XEXP (DECL_RTL (function), 0);
7619 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7620 xoperands[2] = GEN_INT (delta);
7622 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7623 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7625 /* Output the thunk. We know that the function is in the same
7626 translation unit (i.e., the same space) as the thunk, and that
7627 thunks are output after their method. Thus, we don't need an
7628 external branch to reach the function. With SOM and GAS,
7629 functions and thunks are effectively in different sections.
7630 Thus, we can always use a IA-relative branch and the linker
7631 will add a long branch stub if necessary.
7633 However, we have to be careful when generating PIC code on the
7634 SOM port to ensure that the sequence does not transfer to an
7635 import stub for the target function as this could clobber the
7636 return value saved at SP-24. This would also apply to the
7637 32-bit linux port if the multi-space model is implemented. */
7638 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7639 && !(flag_pic && TREE_PUBLIC (function))
7640 && (TARGET_GAS || last_address < 262132))
7641 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7642 && ((targetm.have_named_sections
7643 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7644 /* The GNU 64-bit linker has rather poor stub management.
7645 So, we use a long branch from thunks that aren't in
7646 the same section as the target function. */
7647 && ((!TARGET_64BIT
7648 && (DECL_SECTION_NAME (thunk_fndecl)
7649 != DECL_SECTION_NAME (function)))
7650 || ((DECL_SECTION_NAME (thunk_fndecl)
7651 == DECL_SECTION_NAME (function))
7652 && last_address < 262132)))
7653 || (!targetm.have_named_sections && last_address < 262132))))
7655 if (!val_14)
7656 output_asm_insn ("addil L'%2,%%r26", xoperands);
7658 output_asm_insn ("b %0", xoperands);
7660 if (val_14)
7662 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7663 nbytes += 8;
7665 else
7667 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7668 nbytes += 12;
7671 else if (TARGET_64BIT)
7673 /* We only have one call-clobbered scratch register, so we can't
7674 make use of the delay slot if delta doesn't fit in 14 bits. */
7675 if (!val_14)
7677 output_asm_insn ("addil L'%2,%%r26", xoperands);
7678 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7681 output_asm_insn ("b,l .+8,%%r1", xoperands);
7683 if (TARGET_GAS)
7685 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7686 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7688 else
7690 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7691 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7694 if (val_14)
7696 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7697 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7698 nbytes += 20;
7700 else
7702 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7703 nbytes += 24;
7706 else if (TARGET_PORTABLE_RUNTIME)
7708 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7709 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7711 if (!val_14)
7712 output_asm_insn ("addil L'%2,%%r26", xoperands);
7714 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7716 if (val_14)
7718 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7719 nbytes += 16;
7721 else
7723 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7724 nbytes += 20;
7727 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7729 /* The function is accessible from outside this module. The only
7730 way to avoid an import stub between the thunk and function is to
7731 call the function directly with an indirect sequence similar to
7732 that used by $$dyncall. This is possible because $$dyncall acts
7733 as the import stub in an indirect call. */
7734 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7735 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7736 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7737 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7738 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7739 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7740 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7741 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7742 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7744 if (!val_14)
7746 output_asm_insn ("addil L'%2,%%r26", xoperands);
7747 nbytes += 4;
7750 if (TARGET_PA_20)
7752 output_asm_insn ("bve (%%r22)", xoperands);
7753 nbytes += 36;
7755 else if (TARGET_NO_SPACE_REGS)
7757 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7758 nbytes += 36;
7760 else
7762 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7763 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7764 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7765 nbytes += 44;
7768 if (val_14)
7769 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7770 else
7771 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7773 else if (flag_pic)
7775 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7777 if (TARGET_SOM || !TARGET_GAS)
7779 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7780 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7782 else
7784 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7785 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7788 if (!val_14)
7789 output_asm_insn ("addil L'%2,%%r26", xoperands);
7791 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7793 if (val_14)
7795 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7796 nbytes += 20;
7798 else
7800 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7801 nbytes += 24;
7804 else
7806 if (!val_14)
7807 output_asm_insn ("addil L'%2,%%r26", xoperands);
7809 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7810 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7812 if (val_14)
7814 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7815 nbytes += 12;
7817 else
7819 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7820 nbytes += 16;
7824 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7826 if (TARGET_SOM && TARGET_GAS)
7828 /* We done with this subspace except possibly for some additional
7829 debug information. Forget that we are in this subspace to ensure
7830 that the next function is output in its own subspace. */
7831 in_section = NULL;
7832 cfun->machine->in_nsubspa = 2;
7835 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7837 switch_to_section (data_section);
7838 output_asm_insn (".align 4", xoperands);
7839 ASM_OUTPUT_LABEL (file, label);
7840 output_asm_insn (".word P'%0", xoperands);
7843 current_thunk_number++;
7844 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7845 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7846 last_address += nbytes;
7847 update_total_code_bytes (nbytes);
7850 /* Only direct calls to static functions are allowed to be sibling (tail)
7851 call optimized.
7853 This restriction is necessary because some linker generated stubs will
7854 store return pointers into rp' in some cases which might clobber a
7855 live value already in rp'.
7857 In a sibcall the current function and the target function share stack
7858 space. Thus if the path to the current function and the path to the
7859 target function save a value in rp', they save the value into the
7860 same stack slot, which has undesirable consequences.
7862 Because of the deferred binding nature of shared libraries any function
7863 with external scope could be in a different load module and thus require
7864 rp' to be saved when calling that function. So sibcall optimizations
7865 can only be safe for static function.
7867 Note that GCC never needs return value relocations, so we don't have to
7868 worry about static calls with return value relocations (which require
7869 saving rp').
7871 It is safe to perform a sibcall optimization when the target function
7872 will never return. */
7873 static bool
7874 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7876 if (TARGET_PORTABLE_RUNTIME)
7877 return false;
7879 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7880 single subspace mode and the call is not indirect. As far as I know,
7881 there is no operating system support for the multiple subspace mode.
7882 It might be possible to support indirect calls if we didn't use
7883 $$dyncall (see the indirect sequence generated in output_call). */
7884 if (TARGET_ELF32)
7885 return (decl != NULL_TREE);
7887 /* Sibcalls are not ok because the arg pointer register is not a fixed
7888 register. This prevents the sibcall optimization from occurring. In
7889 addition, there are problems with stub placement using GNU ld. This
7890 is because a normal sibcall branch uses a 17-bit relocation while
7891 a regular call branch uses a 22-bit relocation. As a result, more
7892 care needs to be taken in the placement of long-branch stubs. */
7893 if (TARGET_64BIT)
7894 return false;
7896 /* Sibcalls are only ok within a translation unit. */
7897 return (decl && !TREE_PUBLIC (decl));
7900 /* ??? Addition is not commutative on the PA due to the weird implicit
7901 space register selection rules for memory addresses. Therefore, we
7902 don't consider a + b == b + a, as this might be inside a MEM. */
7903 static bool
7904 pa_commutative_p (rtx x, int outer_code)
7906 return (COMMUTATIVE_P (x)
7907 && (TARGET_NO_SPACE_REGS
7908 || (outer_code != UNKNOWN && outer_code != MEM)
7909 || GET_CODE (x) != PLUS));
7912 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7913 use in fmpyadd instructions. */
7915 fmpyaddoperands (rtx *operands)
7917 enum machine_mode mode = GET_MODE (operands[0]);
7919 /* Must be a floating point mode. */
7920 if (mode != SFmode && mode != DFmode)
7921 return 0;
7923 /* All modes must be the same. */
7924 if (! (mode == GET_MODE (operands[1])
7925 && mode == GET_MODE (operands[2])
7926 && mode == GET_MODE (operands[3])
7927 && mode == GET_MODE (operands[4])
7928 && mode == GET_MODE (operands[5])))
7929 return 0;
7931 /* All operands must be registers. */
7932 if (! (GET_CODE (operands[1]) == REG
7933 && GET_CODE (operands[2]) == REG
7934 && GET_CODE (operands[3]) == REG
7935 && GET_CODE (operands[4]) == REG
7936 && GET_CODE (operands[5]) == REG))
7937 return 0;
7939 /* Only 2 real operands to the addition. One of the input operands must
7940 be the same as the output operand. */
7941 if (! rtx_equal_p (operands[3], operands[4])
7942 && ! rtx_equal_p (operands[3], operands[5]))
7943 return 0;
7945 /* Inout operand of add cannot conflict with any operands from multiply. */
7946 if (rtx_equal_p (operands[3], operands[0])
7947 || rtx_equal_p (operands[3], operands[1])
7948 || rtx_equal_p (operands[3], operands[2]))
7949 return 0;
7951 /* multiply cannot feed into addition operands. */
7952 if (rtx_equal_p (operands[4], operands[0])
7953 || rtx_equal_p (operands[5], operands[0]))
7954 return 0;
7956 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7957 if (mode == SFmode
7958 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7959 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7960 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7961 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7962 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7963 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7964 return 0;
7966 /* Passed. Operands are suitable for fmpyadd. */
7967 return 1;
7970 #if !defined(USE_COLLECT2)
7971 static void
7972 pa_asm_out_constructor (rtx symbol, int priority)
7974 if (!function_label_operand (symbol, VOIDmode))
7975 hppa_encode_label (symbol);
7977 #ifdef CTORS_SECTION_ASM_OP
7978 default_ctor_section_asm_out_constructor (symbol, priority);
7979 #else
7980 # ifdef TARGET_ASM_NAMED_SECTION
7981 default_named_section_asm_out_constructor (symbol, priority);
7982 # else
7983 default_stabs_asm_out_constructor (symbol, priority);
7984 # endif
7985 #endif
7988 static void
7989 pa_asm_out_destructor (rtx symbol, int priority)
7991 if (!function_label_operand (symbol, VOIDmode))
7992 hppa_encode_label (symbol);
7994 #ifdef DTORS_SECTION_ASM_OP
7995 default_dtor_section_asm_out_destructor (symbol, priority);
7996 #else
7997 # ifdef TARGET_ASM_NAMED_SECTION
7998 default_named_section_asm_out_destructor (symbol, priority);
7999 # else
8000 default_stabs_asm_out_destructor (symbol, priority);
8001 # endif
8002 #endif
8004 #endif
8006 /* This function places uninitialized global data in the bss section.
8007 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8008 function on the SOM port to prevent uninitialized global data from
8009 being placed in the data section. */
8011 void
8012 pa_asm_output_aligned_bss (FILE *stream,
8013 const char *name,
8014 unsigned HOST_WIDE_INT size,
8015 unsigned int align)
8017 switch_to_section (bss_section);
8018 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8020 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8021 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8022 #endif
8024 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8025 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8026 #endif
8028 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8029 ASM_OUTPUT_LABEL (stream, name);
8030 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8033 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8034 that doesn't allow the alignment of global common storage to be directly
8035 specified. The SOM linker aligns common storage based on the rounded
8036 value of the NUM_BYTES parameter in the .comm directive. It's not
8037 possible to use the .align directive as it doesn't affect the alignment
8038 of the label associated with a .comm directive. */
8040 void
8041 pa_asm_output_aligned_common (FILE *stream,
8042 const char *name,
8043 unsigned HOST_WIDE_INT size,
8044 unsigned int align)
8046 unsigned int max_common_align;
8048 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8049 if (align > max_common_align)
8051 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8052 "for global common data. Using %u",
8053 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8054 align = max_common_align;
8057 switch_to_section (bss_section);
8059 assemble_name (stream, name);
8060 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8061 MAX (size, align / BITS_PER_UNIT));
8064 /* We can't use .comm for local common storage as the SOM linker effectively
8065 treats the symbol as universal and uses the same storage for local symbols
8066 with the same name in different object files. The .block directive
8067 reserves an uninitialized block of storage. However, it's not common
8068 storage. Fortunately, GCC never requests common storage with the same
8069 name in any given translation unit. */
8071 void
8072 pa_asm_output_aligned_local (FILE *stream,
8073 const char *name,
8074 unsigned HOST_WIDE_INT size,
8075 unsigned int align)
8077 switch_to_section (bss_section);
8078 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8080 #ifdef LOCAL_ASM_OP
8081 fprintf (stream, "%s", LOCAL_ASM_OP);
8082 assemble_name (stream, name);
8083 fprintf (stream, "\n");
8084 #endif
8086 ASM_OUTPUT_LABEL (stream, name);
8087 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8090 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8091 use in fmpysub instructions. */
8093 fmpysuboperands (rtx *operands)
8095 enum machine_mode mode = GET_MODE (operands[0]);
8097 /* Must be a floating point mode. */
8098 if (mode != SFmode && mode != DFmode)
8099 return 0;
8101 /* All modes must be the same. */
8102 if (! (mode == GET_MODE (operands[1])
8103 && mode == GET_MODE (operands[2])
8104 && mode == GET_MODE (operands[3])
8105 && mode == GET_MODE (operands[4])
8106 && mode == GET_MODE (operands[5])))
8107 return 0;
8109 /* All operands must be registers. */
8110 if (! (GET_CODE (operands[1]) == REG
8111 && GET_CODE (operands[2]) == REG
8112 && GET_CODE (operands[3]) == REG
8113 && GET_CODE (operands[4]) == REG
8114 && GET_CODE (operands[5]) == REG))
8115 return 0;
8117 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8118 operation, so operands[4] must be the same as operand[3]. */
8119 if (! rtx_equal_p (operands[3], operands[4]))
8120 return 0;
8122 /* multiply cannot feed into subtraction. */
8123 if (rtx_equal_p (operands[5], operands[0]))
8124 return 0;
8126 /* Inout operand of sub cannot conflict with any operands from multiply. */
8127 if (rtx_equal_p (operands[3], operands[0])
8128 || rtx_equal_p (operands[3], operands[1])
8129 || rtx_equal_p (operands[3], operands[2]))
8130 return 0;
8132 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8133 if (mode == SFmode
8134 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8135 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8136 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8137 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8138 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8139 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8140 return 0;
8142 /* Passed. Operands are suitable for fmpysub. */
8143 return 1;
8146 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8147 constants for shadd instructions. */
8149 shadd_constant_p (int val)
8151 if (val == 2 || val == 4 || val == 8)
8152 return 1;
8153 else
8154 return 0;
8157 /* Return 1 if OP is valid as a base or index register in a
8158 REG+REG address. */
8161 borx_reg_operand (rtx op, enum machine_mode mode)
8163 if (GET_CODE (op) != REG)
8164 return 0;
8166 /* We must reject virtual registers as the only expressions that
8167 can be instantiated are REG and REG+CONST. */
8168 if (op == virtual_incoming_args_rtx
8169 || op == virtual_stack_vars_rtx
8170 || op == virtual_stack_dynamic_rtx
8171 || op == virtual_outgoing_args_rtx
8172 || op == virtual_cfa_rtx)
8173 return 0;
8175 /* While it's always safe to index off the frame pointer, it's not
8176 profitable to do so when the frame pointer is being eliminated. */
8177 if (!reload_completed
8178 && flag_omit_frame_pointer
8179 && !current_function_calls_alloca
8180 && op == frame_pointer_rtx)
8181 return 0;
8183 return register_operand (op, mode);
8186 /* Return 1 if this operand is anything other than a hard register. */
8189 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8191 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8194 /* Return 1 if INSN branches forward. Should be using insn_addresses
8195 to avoid walking through all the insns... */
8196 static int
8197 forward_branch_p (rtx insn)
8199 rtx label = JUMP_LABEL (insn);
8201 while (insn)
8203 if (insn == label)
8204 break;
8205 else
8206 insn = NEXT_INSN (insn);
8209 return (insn == label);
8212 /* Return 1 if OP is an equality comparison, else return 0. */
8214 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8216 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8219 /* Return 1 if INSN is in the delay slot of a call instruction. */
8221 jump_in_call_delay (rtx insn)
8224 if (GET_CODE (insn) != JUMP_INSN)
8225 return 0;
8227 if (PREV_INSN (insn)
8228 && PREV_INSN (PREV_INSN (insn))
8229 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8231 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8233 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8234 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8237 else
8238 return 0;
8241 /* Output an unconditional move and branch insn. */
8243 const char *
8244 output_parallel_movb (rtx *operands, int length)
8246 /* These are the cases in which we win. */
8247 if (length == 4)
8248 return "mov%I1b,tr %1,%0,%2";
8250 /* None of these cases wins, but they don't lose either. */
8251 if (dbr_sequence_length () == 0)
8253 /* Nothing in the delay slot, fake it by putting the combined
8254 insn (the copy or add) in the delay slot of a bl. */
8255 if (GET_CODE (operands[1]) == CONST_INT)
8256 return "b %2\n\tldi %1,%0";
8257 else
8258 return "b %2\n\tcopy %1,%0";
8260 else
8262 /* Something in the delay slot, but we've got a long branch. */
8263 if (GET_CODE (operands[1]) == CONST_INT)
8264 return "ldi %1,%0\n\tb %2";
8265 else
8266 return "copy %1,%0\n\tb %2";
8270 /* Output an unconditional add and branch insn. */
8272 const char *
8273 output_parallel_addb (rtx *operands, int length)
8275 /* To make life easy we want operand0 to be the shared input/output
8276 operand and operand1 to be the readonly operand. */
8277 if (operands[0] == operands[1])
8278 operands[1] = operands[2];
8280 /* These are the cases in which we win. */
8281 if (length == 4)
8282 return "add%I1b,tr %1,%0,%3";
8284 /* None of these cases win, but they don't lose either. */
8285 if (dbr_sequence_length () == 0)
8287 /* Nothing in the delay slot, fake it by putting the combined
8288 insn (the copy or add) in the delay slot of a bl. */
8289 return "b %3\n\tadd%I1 %1,%0,%0";
8291 else
8293 /* Something in the delay slot, but we've got a long branch. */
8294 return "add%I1 %1,%0,%0\n\tb %3";
8298 /* Return nonzero if INSN (a jump insn) immediately follows a call
8299 to a named function. This is used to avoid filling the delay slot
8300 of the jump since it can usually be eliminated by modifying RP in
8301 the delay slot of the call. */
8304 following_call (rtx insn)
8306 if (! TARGET_JUMP_IN_DELAY)
8307 return 0;
8309 /* Find the previous real insn, skipping NOTEs. */
8310 insn = PREV_INSN (insn);
8311 while (insn && GET_CODE (insn) == NOTE)
8312 insn = PREV_INSN (insn);
8314 /* Check for CALL_INSNs and millicode calls. */
8315 if (insn
8316 && ((GET_CODE (insn) == CALL_INSN
8317 && get_attr_type (insn) != TYPE_DYNCALL)
8318 || (GET_CODE (insn) == INSN
8319 && GET_CODE (PATTERN (insn)) != SEQUENCE
8320 && GET_CODE (PATTERN (insn)) != USE
8321 && GET_CODE (PATTERN (insn)) != CLOBBER
8322 && get_attr_type (insn) == TYPE_MILLI)))
8323 return 1;
8325 return 0;
8328 /* We use this hook to perform a PA specific optimization which is difficult
8329 to do in earlier passes.
8331 We want the delay slots of branches within jump tables to be filled.
8332 None of the compiler passes at the moment even has the notion that a
8333 PA jump table doesn't contain addresses, but instead contains actual
8334 instructions!
8336 Because we actually jump into the table, the addresses of each entry
8337 must stay constant in relation to the beginning of the table (which
8338 itself must stay constant relative to the instruction to jump into
8339 it). I don't believe we can guarantee earlier passes of the compiler
8340 will adhere to those rules.
8342 So, late in the compilation process we find all the jump tables, and
8343 expand them into real code -- e.g. each entry in the jump table vector
8344 will get an appropriate label followed by a jump to the final target.
8346 Reorg and the final jump pass can then optimize these branches and
8347 fill their delay slots. We end up with smaller, more efficient code.
8349 The jump instructions within the table are special; we must be able
8350 to identify them during assembly output (if the jumps don't get filled
8351 we need to emit a nop rather than nullifying the delay slot)). We
8352 identify jumps in switch tables by using insns with the attribute
8353 type TYPE_BTABLE_BRANCH.
8355 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8356 insns. This serves two purposes, first it prevents jump.c from
8357 noticing that the last N entries in the table jump to the instruction
8358 immediately after the table and deleting the jumps. Second, those
8359 insns mark where we should emit .begin_brtab and .end_brtab directives
8360 when using GAS (allows for better link time optimizations). */
8362 static void
8363 pa_reorg (void)
8365 rtx insn;
8367 remove_useless_addtr_insns (1);
8369 if (pa_cpu < PROCESSOR_8000)
8370 pa_combine_instructions ();
8373 /* This is fairly cheap, so always run it if optimizing. */
8374 if (optimize > 0 && !TARGET_BIG_SWITCH)
8376 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8377 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8379 rtx pattern, tmp, location, label;
8380 unsigned int length, i;
8382 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8383 if (GET_CODE (insn) != JUMP_INSN
8384 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8385 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8386 continue;
8388 /* Emit marker for the beginning of the branch table. */
8389 emit_insn_before (gen_begin_brtab (), insn);
8391 pattern = PATTERN (insn);
8392 location = PREV_INSN (insn);
8393 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8395 for (i = 0; i < length; i++)
8397 /* Emit a label before each jump to keep jump.c from
8398 removing this code. */
8399 tmp = gen_label_rtx ();
8400 LABEL_NUSES (tmp) = 1;
8401 emit_label_after (tmp, location);
8402 location = NEXT_INSN (location);
8404 if (GET_CODE (pattern) == ADDR_VEC)
8405 label = XEXP (XVECEXP (pattern, 0, i), 0);
8406 else
8407 label = XEXP (XVECEXP (pattern, 1, i), 0);
8409 tmp = gen_short_jump (label);
8411 /* Emit the jump itself. */
8412 tmp = emit_jump_insn_after (tmp, location);
8413 JUMP_LABEL (tmp) = label;
8414 LABEL_NUSES (label)++;
8415 location = NEXT_INSN (location);
8417 /* Emit a BARRIER after the jump. */
8418 emit_barrier_after (location);
8419 location = NEXT_INSN (location);
8422 /* Emit marker for the end of the branch table. */
8423 emit_insn_before (gen_end_brtab (), location);
8424 location = NEXT_INSN (location);
8425 emit_barrier_after (location);
8427 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8428 delete_insn (insn);
8431 else
8433 /* Still need brtab marker insns. FIXME: the presence of these
8434 markers disables output of the branch table to readonly memory,
8435 and any alignment directives that might be needed. Possibly,
8436 the begin_brtab insn should be output before the label for the
8437 table. This doesn't matter at the moment since the tables are
8438 always output in the text section. */
8439 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8441 /* Find an ADDR_VEC insn. */
8442 if (GET_CODE (insn) != JUMP_INSN
8443 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8444 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8445 continue;
8447 /* Now generate markers for the beginning and end of the
8448 branch table. */
8449 emit_insn_before (gen_begin_brtab (), insn);
8450 emit_insn_after (gen_end_brtab (), insn);
8455 /* The PA has a number of odd instructions which can perform multiple
8456 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8457 it may be profitable to combine two instructions into one instruction
8458 with two outputs. It's not profitable PA2.0 machines because the
8459 two outputs would take two slots in the reorder buffers.
8461 This routine finds instructions which can be combined and combines
8462 them. We only support some of the potential combinations, and we
8463 only try common ways to find suitable instructions.
8465 * addb can add two registers or a register and a small integer
8466 and jump to a nearby (+-8k) location. Normally the jump to the
8467 nearby location is conditional on the result of the add, but by
8468 using the "true" condition we can make the jump unconditional.
8469 Thus addb can perform two independent operations in one insn.
8471 * movb is similar to addb in that it can perform a reg->reg
8472 or small immediate->reg copy and jump to a nearby (+-8k location).
8474 * fmpyadd and fmpysub can perform a FP multiply and either an
8475 FP add or FP sub if the operands of the multiply and add/sub are
8476 independent (there are other minor restrictions). Note both
8477 the fmpy and fadd/fsub can in theory move to better spots according
8478 to data dependencies, but for now we require the fmpy stay at a
8479 fixed location.
8481 * Many of the memory operations can perform pre & post updates
8482 of index registers. GCC's pre/post increment/decrement addressing
8483 is far too simple to take advantage of all the possibilities. This
8484 pass may not be suitable since those insns may not be independent.
8486 * comclr can compare two ints or an int and a register, nullify
8487 the following instruction and zero some other register. This
8488 is more difficult to use as it's harder to find an insn which
8489 will generate a comclr than finding something like an unconditional
8490 branch. (conditional moves & long branches create comclr insns).
8492 * Most arithmetic operations can conditionally skip the next
8493 instruction. They can be viewed as "perform this operation
8494 and conditionally jump to this nearby location" (where nearby
8495 is an insns away). These are difficult to use due to the
8496 branch length restrictions. */
8498 static void
8499 pa_combine_instructions (void)
8501 rtx anchor, new;
8503 /* This can get expensive since the basic algorithm is on the
8504 order of O(n^2) (or worse). Only do it for -O2 or higher
8505 levels of optimization. */
8506 if (optimize < 2)
8507 return;
8509 /* Walk down the list of insns looking for "anchor" insns which
8510 may be combined with "floating" insns. As the name implies,
8511 "anchor" instructions don't move, while "floating" insns may
8512 move around. */
8513 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8514 new = make_insn_raw (new);
8516 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8518 enum attr_pa_combine_type anchor_attr;
8519 enum attr_pa_combine_type floater_attr;
8521 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8522 Also ignore any special USE insns. */
8523 if ((GET_CODE (anchor) != INSN
8524 && GET_CODE (anchor) != JUMP_INSN
8525 && GET_CODE (anchor) != CALL_INSN)
8526 || GET_CODE (PATTERN (anchor)) == USE
8527 || GET_CODE (PATTERN (anchor)) == CLOBBER
8528 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8529 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8530 continue;
8532 anchor_attr = get_attr_pa_combine_type (anchor);
8533 /* See if anchor is an insn suitable for combination. */
8534 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8535 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8536 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8537 && ! forward_branch_p (anchor)))
8539 rtx floater;
8541 for (floater = PREV_INSN (anchor);
8542 floater;
8543 floater = PREV_INSN (floater))
8545 if (GET_CODE (floater) == NOTE
8546 || (GET_CODE (floater) == INSN
8547 && (GET_CODE (PATTERN (floater)) == USE
8548 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8549 continue;
8551 /* Anything except a regular INSN will stop our search. */
8552 if (GET_CODE (floater) != INSN
8553 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8554 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8556 floater = NULL_RTX;
8557 break;
8560 /* See if FLOATER is suitable for combination with the
8561 anchor. */
8562 floater_attr = get_attr_pa_combine_type (floater);
8563 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8564 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8565 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8566 && floater_attr == PA_COMBINE_TYPE_FMPY))
8568 /* If ANCHOR and FLOATER can be combined, then we're
8569 done with this pass. */
8570 if (pa_can_combine_p (new, anchor, floater, 0,
8571 SET_DEST (PATTERN (floater)),
8572 XEXP (SET_SRC (PATTERN (floater)), 0),
8573 XEXP (SET_SRC (PATTERN (floater)), 1)))
8574 break;
8577 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8578 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8580 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8582 if (pa_can_combine_p (new, anchor, floater, 0,
8583 SET_DEST (PATTERN (floater)),
8584 XEXP (SET_SRC (PATTERN (floater)), 0),
8585 XEXP (SET_SRC (PATTERN (floater)), 1)))
8586 break;
8588 else
8590 if (pa_can_combine_p (new, anchor, floater, 0,
8591 SET_DEST (PATTERN (floater)),
8592 SET_SRC (PATTERN (floater)),
8593 SET_SRC (PATTERN (floater))))
8594 break;
8599 /* If we didn't find anything on the backwards scan try forwards. */
8600 if (!floater
8601 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8602 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8604 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8606 if (GET_CODE (floater) == NOTE
8607 || (GET_CODE (floater) == INSN
8608 && (GET_CODE (PATTERN (floater)) == USE
8609 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8611 continue;
8613 /* Anything except a regular INSN will stop our search. */
8614 if (GET_CODE (floater) != INSN
8615 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8616 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8618 floater = NULL_RTX;
8619 break;
8622 /* See if FLOATER is suitable for combination with the
8623 anchor. */
8624 floater_attr = get_attr_pa_combine_type (floater);
8625 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8626 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8627 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8628 && floater_attr == PA_COMBINE_TYPE_FMPY))
8630 /* If ANCHOR and FLOATER can be combined, then we're
8631 done with this pass. */
8632 if (pa_can_combine_p (new, anchor, floater, 1,
8633 SET_DEST (PATTERN (floater)),
8634 XEXP (SET_SRC (PATTERN (floater)),
8636 XEXP (SET_SRC (PATTERN (floater)),
8637 1)))
8638 break;
8643 /* FLOATER will be nonzero if we found a suitable floating
8644 insn for combination with ANCHOR. */
8645 if (floater
8646 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8647 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8649 /* Emit the new instruction and delete the old anchor. */
8650 emit_insn_before (gen_rtx_PARALLEL
8651 (VOIDmode,
8652 gen_rtvec (2, PATTERN (anchor),
8653 PATTERN (floater))),
8654 anchor);
8656 PUT_CODE (anchor, NOTE);
8657 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8658 NOTE_SOURCE_FILE (anchor) = 0;
8660 /* Emit a special USE insn for FLOATER, then delete
8661 the floating insn. */
8662 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8663 delete_insn (floater);
8665 continue;
8667 else if (floater
8668 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8670 rtx temp;
8671 /* Emit the new_jump instruction and delete the old anchor. */
8672 temp
8673 = emit_jump_insn_before (gen_rtx_PARALLEL
8674 (VOIDmode,
8675 gen_rtvec (2, PATTERN (anchor),
8676 PATTERN (floater))),
8677 anchor);
8679 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8680 PUT_CODE (anchor, NOTE);
8681 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8682 NOTE_SOURCE_FILE (anchor) = 0;
8684 /* Emit a special USE insn for FLOATER, then delete
8685 the floating insn. */
8686 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8687 delete_insn (floater);
8688 continue;
8694 static int
8695 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8696 rtx src1, rtx src2)
8698 int insn_code_number;
8699 rtx start, end;
8701 /* Create a PARALLEL with the patterns of ANCHOR and
8702 FLOATER, try to recognize it, then test constraints
8703 for the resulting pattern.
8705 If the pattern doesn't match or the constraints
8706 aren't met keep searching for a suitable floater
8707 insn. */
8708 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8709 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8710 INSN_CODE (new) = -1;
8711 insn_code_number = recog_memoized (new);
8712 if (insn_code_number < 0
8713 || (extract_insn (new), ! constrain_operands (1)))
8714 return 0;
8716 if (reversed)
8718 start = anchor;
8719 end = floater;
8721 else
8723 start = floater;
8724 end = anchor;
8727 /* There's up to three operands to consider. One
8728 output and two inputs.
8730 The output must not be used between FLOATER & ANCHOR
8731 exclusive. The inputs must not be set between
8732 FLOATER and ANCHOR exclusive. */
8734 if (reg_used_between_p (dest, start, end))
8735 return 0;
8737 if (reg_set_between_p (src1, start, end))
8738 return 0;
8740 if (reg_set_between_p (src2, start, end))
8741 return 0;
8743 /* If we get here, then everything is good. */
8744 return 1;
8747 /* Return nonzero if references for INSN are delayed.
8749 Millicode insns are actually function calls with some special
8750 constraints on arguments and register usage.
8752 Millicode calls always expect their arguments in the integer argument
8753 registers, and always return their result in %r29 (ret1). They
8754 are expected to clobber their arguments, %r1, %r29, and the return
8755 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8757 This function tells reorg that the references to arguments and
8758 millicode calls do not appear to happen until after the millicode call.
8759 This allows reorg to put insns which set the argument registers into the
8760 delay slot of the millicode call -- thus they act more like traditional
8761 CALL_INSNs.
8763 Note we cannot consider side effects of the insn to be delayed because
8764 the branch and link insn will clobber the return pointer. If we happened
8765 to use the return pointer in the delay slot of the call, then we lose.
8767 get_attr_type will try to recognize the given insn, so make sure to
8768 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8769 in particular. */
8771 insn_refs_are_delayed (rtx insn)
8773 return ((GET_CODE (insn) == INSN
8774 && GET_CODE (PATTERN (insn)) != SEQUENCE
8775 && GET_CODE (PATTERN (insn)) != USE
8776 && GET_CODE (PATTERN (insn)) != CLOBBER
8777 && get_attr_type (insn) == TYPE_MILLI));
8780 /* On the HP-PA the value is found in register(s) 28(-29), unless
8781 the mode is SF or DF. Then the value is returned in fr4 (32).
8783 This must perform the same promotions as PROMOTE_MODE, else
8784 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8786 Small structures must be returned in a PARALLEL on PA64 in order
8787 to match the HP Compiler ABI. */
8790 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8792 enum machine_mode valmode;
8794 if (AGGREGATE_TYPE_P (valtype))
8796 if (TARGET_64BIT)
8798 /* Aggregates with a size less than or equal to 128 bits are
8799 returned in GR 28(-29). They are left justified. The pad
8800 bits are undefined. Larger aggregates are returned in
8801 memory. */
8802 rtx loc[2];
8803 int i, offset = 0;
8804 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8806 for (i = 0; i < ub; i++)
8808 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8809 gen_rtx_REG (DImode, 28 + i),
8810 GEN_INT (offset));
8811 offset += 8;
8814 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8816 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
8818 /* Aggregates 5 to 8 bytes in size are returned in general
8819 registers r28-r29 in the same manner as other non
8820 floating-point objects. The data is right-justified and
8821 zero-extended to 64 bits. This is opposite to the normal
8822 justification used on big endian targets and requires
8823 special treatment. */
8824 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8825 gen_rtx_REG (DImode, 28), const0_rtx);
8826 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8830 if ((INTEGRAL_TYPE_P (valtype)
8831 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8832 || POINTER_TYPE_P (valtype))
8833 valmode = word_mode;
8834 else
8835 valmode = TYPE_MODE (valtype);
8837 if (TREE_CODE (valtype) == REAL_TYPE
8838 && !AGGREGATE_TYPE_P (valtype)
8839 && TYPE_MODE (valtype) != TFmode
8840 && !TARGET_SOFT_FLOAT)
8841 return gen_rtx_REG (valmode, 32);
8843 return gen_rtx_REG (valmode, 28);
8846 /* Return the location of a parameter that is passed in a register or NULL
8847 if the parameter has any component that is passed in memory.
8849 This is new code and will be pushed to into the net sources after
8850 further testing.
8852 ??? We might want to restructure this so that it looks more like other
8853 ports. */
8855 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8856 int named ATTRIBUTE_UNUSED)
8858 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8859 int alignment = 0;
8860 int arg_size;
8861 int fpr_reg_base;
8862 int gpr_reg_base;
8863 rtx retval;
8865 if (mode == VOIDmode)
8866 return NULL_RTX;
8868 arg_size = FUNCTION_ARG_SIZE (mode, type);
8870 /* If this arg would be passed partially or totally on the stack, then
8871 this routine should return zero. pa_arg_partial_bytes will
8872 handle arguments which are split between regs and stack slots if
8873 the ABI mandates split arguments. */
8874 if (! TARGET_64BIT)
8876 /* The 32-bit ABI does not split arguments. */
8877 if (cum->words + arg_size > max_arg_words)
8878 return NULL_RTX;
8880 else
8882 if (arg_size > 1)
8883 alignment = cum->words & 1;
8884 if (cum->words + alignment >= max_arg_words)
8885 return NULL_RTX;
8888 /* The 32bit ABIs and the 64bit ABIs are rather different,
8889 particularly in their handling of FP registers. We might
8890 be able to cleverly share code between them, but I'm not
8891 going to bother in the hope that splitting them up results
8892 in code that is more easily understood. */
8894 if (TARGET_64BIT)
8896 /* Advance the base registers to their current locations.
8898 Remember, gprs grow towards smaller register numbers while
8899 fprs grow to higher register numbers. Also remember that
8900 although FP regs are 32-bit addressable, we pretend that
8901 the registers are 64-bits wide. */
8902 gpr_reg_base = 26 - cum->words;
8903 fpr_reg_base = 32 + cum->words;
8905 /* Arguments wider than one word and small aggregates need special
8906 treatment. */
8907 if (arg_size > 1
8908 || mode == BLKmode
8909 || (type && AGGREGATE_TYPE_P (type)))
8911 /* Double-extended precision (80-bit), quad-precision (128-bit)
8912 and aggregates including complex numbers are aligned on
8913 128-bit boundaries. The first eight 64-bit argument slots
8914 are associated one-to-one, with general registers r26
8915 through r19, and also with floating-point registers fr4
8916 through fr11. Arguments larger than one word are always
8917 passed in general registers.
8919 Using a PARALLEL with a word mode register results in left
8920 justified data on a big-endian target. */
8922 rtx loc[8];
8923 int i, offset = 0, ub = arg_size;
8925 /* Align the base register. */
8926 gpr_reg_base -= alignment;
8928 ub = MIN (ub, max_arg_words - cum->words - alignment);
8929 for (i = 0; i < ub; i++)
8931 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8932 gen_rtx_REG (DImode, gpr_reg_base),
8933 GEN_INT (offset));
8934 gpr_reg_base -= 1;
8935 offset += 8;
8938 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8941 else
8943 /* If the argument is larger than a word, then we know precisely
8944 which registers we must use. */
8945 if (arg_size > 1)
8947 if (cum->words)
8949 gpr_reg_base = 23;
8950 fpr_reg_base = 38;
8952 else
8954 gpr_reg_base = 25;
8955 fpr_reg_base = 34;
8958 /* Structures 5 to 8 bytes in size are passed in the general
8959 registers in the same manner as other non floating-point
8960 objects. The data is right-justified and zero-extended
8961 to 64 bits. This is opposite to the normal justification
8962 used on big endian targets and requires special treatment.
8963 We now define BLOCK_REG_PADDING to pad these objects. */
8964 if (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
8966 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8967 gen_rtx_REG (DImode, gpr_reg_base),
8968 const0_rtx);
8969 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8972 else
8974 /* We have a single word (32 bits). A simple computation
8975 will get us the register #s we need. */
8976 gpr_reg_base = 26 - cum->words;
8977 fpr_reg_base = 32 + 2 * cum->words;
8981 /* Determine if the argument needs to be passed in both general and
8982 floating point registers. */
8983 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8984 /* If we are doing soft-float with portable runtime, then there
8985 is no need to worry about FP regs. */
8986 && !TARGET_SOFT_FLOAT
8987 /* The parameter must be some kind of float, else we can just
8988 pass it in integer registers. */
8989 && FLOAT_MODE_P (mode)
8990 /* The target function must not have a prototype. */
8991 && cum->nargs_prototype <= 0
8992 /* libcalls do not need to pass items in both FP and general
8993 registers. */
8994 && type != NULL_TREE
8995 /* All this hair applies to "outgoing" args only. This includes
8996 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8997 && !cum->incoming)
8998 /* Also pass outgoing floating arguments in both registers in indirect
8999 calls with the 32 bit ABI and the HP assembler since there is no
9000 way to the specify argument locations in static functions. */
9001 || (!TARGET_64BIT
9002 && !TARGET_GAS
9003 && !cum->incoming
9004 && cum->indirect
9005 && FLOAT_MODE_P (mode)))
9007 retval
9008 = gen_rtx_PARALLEL
9009 (mode,
9010 gen_rtvec (2,
9011 gen_rtx_EXPR_LIST (VOIDmode,
9012 gen_rtx_REG (mode, fpr_reg_base),
9013 const0_rtx),
9014 gen_rtx_EXPR_LIST (VOIDmode,
9015 gen_rtx_REG (mode, gpr_reg_base),
9016 const0_rtx)));
9018 else
9020 /* See if we should pass this parameter in a general register. */
9021 if (TARGET_SOFT_FLOAT
9022 /* Indirect calls in the normal 32bit ABI require all arguments
9023 to be passed in general registers. */
9024 || (!TARGET_PORTABLE_RUNTIME
9025 && !TARGET_64BIT
9026 && !TARGET_ELF32
9027 && cum->indirect)
9028 /* If the parameter is not a floating point parameter, then
9029 it belongs in GPRs. */
9030 || !FLOAT_MODE_P (mode)
9031 /* Structure with single SFmode field belongs in GPR. */
9032 || (type && AGGREGATE_TYPE_P (type)))
9033 retval = gen_rtx_REG (mode, gpr_reg_base);
9034 else
9035 retval = gen_rtx_REG (mode, fpr_reg_base);
9037 return retval;
9041 /* If this arg would be passed totally in registers or totally on the stack,
9042 then this routine should return zero. */
9044 static int
9045 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9046 tree type, bool named ATTRIBUTE_UNUSED)
9048 unsigned int max_arg_words = 8;
9049 unsigned int offset = 0;
9051 if (!TARGET_64BIT)
9052 return 0;
9054 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9055 offset = 1;
9057 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9058 /* Arg fits fully into registers. */
9059 return 0;
9060 else if (cum->words + offset >= max_arg_words)
9061 /* Arg fully on the stack. */
9062 return 0;
9063 else
9064 /* Arg is split. */
9065 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9069 /* A get_unnamed_section callback for switching to the text section.
9071 This function is only used with SOM. Because we don't support
9072 named subspaces, we can only create a new subspace or switch back
9073 to the default text subspace. */
9075 static void
9076 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9078 gcc_assert (TARGET_SOM);
9079 if (TARGET_GAS)
9081 if (cfun && !cfun->machine->in_nsubspa)
9083 /* We only want to emit a .nsubspa directive once at the
9084 start of the function. */
9085 cfun->machine->in_nsubspa = 1;
9087 /* Create a new subspace for the text. This provides
9088 better stub placement and one-only functions. */
9089 if (cfun->decl
9090 && DECL_ONE_ONLY (cfun->decl)
9091 && !DECL_WEAK (cfun->decl))
9093 output_section_asm_op ("\t.SPACE $TEXT$\n"
9094 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9095 "ACCESS=44,SORT=24,COMDAT");
9096 return;
9099 else
9101 /* There isn't a current function or the body of the current
9102 function has been completed. So, we are changing to the
9103 text section to output debugging information. Thus, we
9104 need to forget that we are in the text section so that
9105 varasm.c will call us when text_section is selected again. */
9106 gcc_assert (!cfun || cfun->machine->in_nsubspa == 2);
9107 in_section = NULL;
9109 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9110 return;
9112 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9115 /* A get_unnamed_section callback for switching to comdat data
9116 sections. This function is only used with SOM. */
9118 static void
9119 som_output_comdat_data_section_asm_op (const void *data)
9121 in_section = NULL;
9122 output_section_asm_op (data);
9125 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9127 static void
9128 pa_som_asm_init_sections (void)
9130 text_section
9131 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9133 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9134 is not being generated. */
9135 som_readonly_data_section
9136 = get_unnamed_section (0, output_section_asm_op,
9137 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9139 /* When secondary definitions are not supported, SOM makes readonly
9140 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9141 the comdat flag. */
9142 som_one_only_readonly_data_section
9143 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9144 "\t.SPACE $TEXT$\n"
9145 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9146 "ACCESS=0x2c,SORT=16,COMDAT");
9149 /* When secondary definitions are not supported, SOM makes data one-only
9150 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9151 som_one_only_data_section
9152 = get_unnamed_section (SECTION_WRITE,
9153 som_output_comdat_data_section_asm_op,
9154 "\t.SPACE $PRIVATE$\n"
9155 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9156 "ACCESS=31,SORT=24,COMDAT");
9158 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9159 which reference data within the $TEXT$ space (for example constant
9160 strings in the $LIT$ subspace).
9162 The assemblers (GAS and HP as) both have problems with handling
9163 the difference of two symbols which is the other correct way to
9164 reference constant data during PIC code generation.
9166 So, there's no way to reference constant data which is in the
9167 $TEXT$ space during PIC generation. Instead place all constant
9168 data into the $PRIVATE$ subspace (this reduces sharing, but it
9169 works correctly). */
9170 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9172 /* We must not have a reference to an external symbol defined in a
9173 shared library in a readonly section, else the SOM linker will
9174 complain.
9176 So, we force exception information into the data section. */
9177 exception_section = data_section;
9180 /* On hpux10, the linker will give an error if we have a reference
9181 in the read-only data section to a symbol defined in a shared
9182 library. Therefore, expressions that might require a reloc can
9183 not be placed in the read-only data section. */
9185 static section *
9186 pa_select_section (tree exp, int reloc,
9187 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9189 if (TREE_CODE (exp) == VAR_DECL
9190 && TREE_READONLY (exp)
9191 && !TREE_THIS_VOLATILE (exp)
9192 && DECL_INITIAL (exp)
9193 && (DECL_INITIAL (exp) == error_mark_node
9194 || TREE_CONSTANT (DECL_INITIAL (exp)))
9195 && !reloc)
9197 if (TARGET_SOM
9198 && DECL_ONE_ONLY (exp)
9199 && !DECL_WEAK (exp))
9200 return som_one_only_readonly_data_section;
9201 else
9202 return readonly_data_section;
9204 else if (CONSTANT_CLASS_P (exp) && !reloc)
9205 return readonly_data_section;
9206 else if (TARGET_SOM
9207 && TREE_CODE (exp) == VAR_DECL
9208 && DECL_ONE_ONLY (exp)
9209 && !DECL_WEAK (exp))
9210 return som_one_only_data_section;
9211 else
9212 return data_section;
9215 static void
9216 pa_globalize_label (FILE *stream, const char *name)
9218 /* We only handle DATA objects here, functions are globalized in
9219 ASM_DECLARE_FUNCTION_NAME. */
9220 if (! FUNCTION_NAME_P (name))
9222 fputs ("\t.EXPORT ", stream);
9223 assemble_name (stream, name);
9224 fputs (",DATA\n", stream);
9228 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9230 static rtx
9231 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9232 int incoming ATTRIBUTE_UNUSED)
9234 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9237 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9239 bool
9240 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9242 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9243 PA64 ABI says that objects larger than 128 bits are returned in memory.
9244 Note, int_size_in_bytes can return -1 if the size of the object is
9245 variable or larger than the maximum value that can be expressed as
9246 a HOST_WIDE_INT. It can also return zero for an empty type. The
9247 simplest way to handle variable and empty types is to pass them in
9248 memory. This avoids problems in defining the boundaries of argument
9249 slots, allocating registers, etc. */
9250 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9251 || int_size_in_bytes (type) <= 0);
9254 /* Structure to hold declaration and name of external symbols that are
9255 emitted by GCC. We generate a vector of these symbols and output them
9256 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9257 This avoids putting out names that are never really used. */
9259 typedef struct extern_symbol GTY(())
9261 tree decl;
9262 const char *name;
9263 } extern_symbol;
9265 /* Define gc'd vector type for extern_symbol. */
9266 DEF_VEC_O(extern_symbol);
9267 DEF_VEC_ALLOC_O(extern_symbol,gc);
9269 /* Vector of extern_symbol pointers. */
9270 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9272 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9273 /* Mark DECL (name NAME) as an external reference (assembler output
9274 file FILE). This saves the names to output at the end of the file
9275 if actually referenced. */
9277 void
9278 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9280 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9282 gcc_assert (file == asm_out_file);
9283 p->decl = decl;
9284 p->name = name;
9287 /* Output text required at the end of an assembler file.
9288 This includes deferred plabels and .import directives for
9289 all external symbols that were actually referenced. */
9291 static void
9292 pa_hpux_file_end (void)
9294 unsigned int i;
9295 extern_symbol *p;
9297 if (!NO_DEFERRED_PROFILE_COUNTERS)
9298 output_deferred_profile_counters ();
9300 output_deferred_plabels ();
9302 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9304 tree decl = p->decl;
9306 if (!TREE_ASM_WRITTEN (decl)
9307 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9308 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9311 VEC_free (extern_symbol, gc, extern_symbols);
9313 #endif
9315 #include "gt-pa.h"