* config/alpha/alpha.c (alpha_start_function): Use switch_to_section.
[official-gcc.git] / gcc / config / pa / pa.c
blobea2a9fa190e8cb2068adf516243270419ac8f6e8
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
111 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
112 ATTRIBUTE_UNUSED;
113 static void pa_encode_section_info (tree, rtx, int);
114 static const char *pa_strip_name_encoding (const char *);
115 static bool pa_function_ok_for_sibcall (tree, tree);
116 static void pa_globalize_label (FILE *, const char *)
117 ATTRIBUTE_UNUSED;
118 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
119 HOST_WIDE_INT, tree);
120 #if !defined(USE_COLLECT2)
121 static void pa_asm_out_constructor (rtx, int);
122 static void pa_asm_out_destructor (rtx, int);
123 #endif
124 static void pa_init_builtins (void);
125 static rtx hppa_builtin_saveregs (void);
126 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
127 static bool pa_scalar_mode_supported_p (enum machine_mode);
128 static bool pa_commutative_p (rtx x, int outer_code);
129 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
142 #ifdef ASM_OUTPUT_EXTERNAL_REAL
143 static void pa_hpux_file_end (void);
144 #endif
145 #ifdef HPUX_LONG_DOUBLE_LIBRARY
146 static void pa_hpux_init_libfuncs (void);
147 #endif
148 static rtx pa_struct_value_rtx (tree, int);
149 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
150 tree, bool);
151 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
152 tree, bool);
153 static struct machine_function * pa_init_machine_status (void);
154 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
155 enum machine_mode,
156 secondary_reload_info *);
159 /* The following extra sections are only used for SOM. */
160 static GTY(()) section *som_readonly_data_section;
161 static GTY(()) section *som_one_only_readonly_data_section;
162 static GTY(()) section *som_one_only_data_section;
164 /* Save the operands last given to a compare for use when we
165 generate a scc or bcc insn. */
166 rtx hppa_compare_op0, hppa_compare_op1;
167 enum cmp_type hppa_branch_type;
169 /* Which cpu we are scheduling for. */
170 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
172 /* The UNIX standard to use for predefines and linking. */
173 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
175 /* Counts for the number of callee-saved general and floating point
176 registers which were saved by the current function's prologue. */
177 static int gr_saved, fr_saved;
179 static rtx find_addr_reg (rtx);
181 /* Keep track of the number of bytes we have output in the CODE subspace
182 during this compilation so we'll know when to emit inline long-calls. */
183 unsigned long total_code_bytes;
185 /* The last address of the previous function plus the number of bytes in
186 associated thunks that have been output. This is used to determine if
187 a thunk can use an IA-relative branch to reach its target function. */
188 static int last_address;
190 /* Variables to handle plabels that we discover are necessary at assembly
191 output time. They are output after the current function. */
192 struct deferred_plabel GTY(())
194 rtx internal_label;
195 rtx symbol;
197 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
198 deferred_plabels;
199 static size_t n_deferred_plabels = 0;
202 /* Initialize the GCC target structure. */
204 #undef TARGET_ASM_ALIGNED_HI_OP
205 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
208 #undef TARGET_ASM_ALIGNED_DI_OP
209 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
210 #undef TARGET_ASM_UNALIGNED_HI_OP
211 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
212 #undef TARGET_ASM_UNALIGNED_SI_OP
213 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
214 #undef TARGET_ASM_UNALIGNED_DI_OP
215 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
216 #undef TARGET_ASM_INTEGER
217 #define TARGET_ASM_INTEGER pa_assemble_integer
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
221 #undef TARGET_ASM_FUNCTION_EPILOGUE
222 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
226 #undef TARGET_SCHED_ADJUST_PRIORITY
227 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
228 #undef TARGET_SCHED_ISSUE_RATE
229 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
231 #undef TARGET_ENCODE_SECTION_INFO
232 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
236 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
237 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
239 #undef TARGET_COMMUTATIVE_P
240 #define TARGET_COMMUTATIVE_P pa_commutative_p
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 #undef TARGET_ASM_FILE_END
248 #ifdef ASM_OUTPUT_EXTERNAL_REAL
249 #define TARGET_ASM_FILE_END pa_hpux_file_end
250 #else
251 #define TARGET_ASM_FILE_END output_deferred_plabels
252 #endif
254 #if !defined(USE_COLLECT2)
255 #undef TARGET_ASM_CONSTRUCTOR
256 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
257 #undef TARGET_ASM_DESTRUCTOR
258 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
259 #endif
261 #undef TARGET_DEFAULT_TARGET_FLAGS
262 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
263 #undef TARGET_HANDLE_OPTION
264 #define TARGET_HANDLE_OPTION pa_handle_option
266 #undef TARGET_INIT_BUILTINS
267 #define TARGET_INIT_BUILTINS pa_init_builtins
269 #undef TARGET_RTX_COSTS
270 #define TARGET_RTX_COSTS hppa_rtx_costs
271 #undef TARGET_ADDRESS_COST
272 #define TARGET_ADDRESS_COST hppa_address_cost
274 #undef TARGET_MACHINE_DEPENDENT_REORG
275 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
277 #ifdef HPUX_LONG_DOUBLE_LIBRARY
278 #undef TARGET_INIT_LIBFUNCS
279 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
280 #endif
282 #undef TARGET_PROMOTE_FUNCTION_RETURN
283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
284 #undef TARGET_PROMOTE_PROTOTYPES
285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
287 #undef TARGET_STRUCT_VALUE_RTX
288 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
289 #undef TARGET_RETURN_IN_MEMORY
290 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
291 #undef TARGET_MUST_PASS_IN_STACK
292 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
295 #undef TARGET_CALLEE_COPIES
296 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
297 #undef TARGET_ARG_PARTIAL_BYTES
298 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
300 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
301 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
302 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
303 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
305 #undef TARGET_SCALAR_MODE_SUPPORTED_P
306 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
308 #undef TARGET_CANNOT_FORCE_CONST_MEM
309 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
311 #undef TARGET_SECONDARY_RELOAD
312 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Parse the -mfixed-range= option string. */
318 static void
319 fix_range (const char *const_str)
321 int i, first, last;
322 char *str, *dash, *comma;
324 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
325 REG2 are either register names or register numbers. The effect
326 of this option is to mark the registers in the range from REG1 to
327 REG2 as ``fixed'' so they won't be used by the compiler. This is
328 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
330 i = strlen (const_str);
331 str = (char *) alloca (i + 1);
332 memcpy (str, const_str, i + 1);
334 while (1)
336 dash = strchr (str, '-');
337 if (!dash)
339 warning (0, "value of -mfixed-range must have form REG1-REG2");
340 return;
342 *dash = '\0';
344 comma = strchr (dash + 1, ',');
345 if (comma)
346 *comma = '\0';
348 first = decode_reg_name (str);
349 if (first < 0)
351 warning (0, "unknown register name: %s", str);
352 return;
355 last = decode_reg_name (dash + 1);
356 if (last < 0)
358 warning (0, "unknown register name: %s", dash + 1);
359 return;
362 *dash = '-';
364 if (first > last)
366 warning (0, "%s-%s is an empty range", str, dash + 1);
367 return;
370 for (i = first; i <= last; ++i)
371 fixed_regs[i] = call_used_regs[i] = 1;
373 if (!comma)
374 break;
376 *comma = ',';
377 str = comma + 1;
380 /* Check if all floating point registers have been fixed. */
381 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
382 if (!fixed_regs[i])
383 break;
385 if (i > FP_REG_LAST)
386 target_flags |= MASK_DISABLE_FPREGS;
389 /* Implement TARGET_HANDLE_OPTION. */
391 static bool
392 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
394 switch (code)
396 case OPT_mnosnake:
397 case OPT_mpa_risc_1_0:
398 case OPT_march_1_0:
399 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
400 return true;
402 case OPT_msnake:
403 case OPT_mpa_risc_1_1:
404 case OPT_march_1_1:
405 target_flags &= ~MASK_PA_20;
406 target_flags |= MASK_PA_11;
407 return true;
409 case OPT_mpa_risc_2_0:
410 case OPT_march_2_0:
411 target_flags |= MASK_PA_11 | MASK_PA_20;
412 return true;
414 case OPT_mschedule_:
415 if (strcmp (arg, "8000") == 0)
416 pa_cpu = PROCESSOR_8000;
417 else if (strcmp (arg, "7100") == 0)
418 pa_cpu = PROCESSOR_7100;
419 else if (strcmp (arg, "700") == 0)
420 pa_cpu = PROCESSOR_700;
421 else if (strcmp (arg, "7100LC") == 0)
422 pa_cpu = PROCESSOR_7100LC;
423 else if (strcmp (arg, "7200") == 0)
424 pa_cpu = PROCESSOR_7200;
425 else if (strcmp (arg, "7300") == 0)
426 pa_cpu = PROCESSOR_7300;
427 else
428 return false;
429 return true;
431 case OPT_mfixed_range_:
432 fix_range (arg);
433 return true;
435 #if TARGET_HPUX
436 case OPT_munix_93:
437 flag_pa_unix = 1993;
438 return true;
439 #endif
441 #if TARGET_HPUX_10_10
442 case OPT_munix_95:
443 flag_pa_unix = 1995;
444 return true;
445 #endif
447 #if TARGET_HPUX_11_11
448 case OPT_munix_98:
449 flag_pa_unix = 1998;
450 return true;
451 #endif
453 default:
454 return true;
458 void
459 override_options (void)
461 /* Unconditional branches in the delay slot are not compatible with dwarf2
462 call frame information. There is no benefit in using this optimization
463 on PA8000 and later processors. */
464 if (pa_cpu >= PROCESSOR_8000
465 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
466 || flag_unwind_tables)
467 target_flags &= ~MASK_JUMP_IN_DELAY;
469 if (flag_pic && TARGET_PORTABLE_RUNTIME)
471 warning (0, "PIC code generation is not supported in the portable runtime model");
474 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
476 warning (0, "PIC code generation is not compatible with fast indirect calls");
479 if (! TARGET_GAS && write_symbols != NO_DEBUG)
481 warning (0, "-g is only supported when using GAS on this processor,");
482 warning (0, "-g option disabled");
483 write_symbols = NO_DEBUG;
486 /* We only support the "big PIC" model now. And we always generate PIC
487 code when in 64bit mode. */
488 if (flag_pic == 1 || TARGET_64BIT)
489 flag_pic = 2;
491 /* We can't guarantee that .dword is available for 32-bit targets. */
492 if (UNITS_PER_WORD == 4)
493 targetm.asm_out.aligned_op.di = NULL;
495 /* The unaligned ops are only available when using GAS. */
496 if (!TARGET_GAS)
498 targetm.asm_out.unaligned_op.hi = NULL;
499 targetm.asm_out.unaligned_op.si = NULL;
500 targetm.asm_out.unaligned_op.di = NULL;
503 init_machine_status = pa_init_machine_status;
506 static void
507 pa_init_builtins (void)
509 #ifdef DONT_HAVE_FPUTC_UNLOCKED
510 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
511 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
512 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
513 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
514 #endif
517 /* Function to init struct machine_function.
518 This will be called, via a pointer variable,
519 from push_function_context. */
521 static struct machine_function *
522 pa_init_machine_status (void)
524 return ggc_alloc_cleared (sizeof (machine_function));
527 /* If FROM is a probable pointer register, mark TO as a probable
528 pointer register with the same pointer alignment as FROM. */
530 static void
531 copy_reg_pointer (rtx to, rtx from)
533 if (REG_POINTER (from))
534 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
537 /* Return 1 if X contains a symbolic expression. We know these
538 expressions will have one of a few well defined forms, so
539 we need only check those forms. */
541 symbolic_expression_p (rtx x)
544 /* Strip off any HIGH. */
545 if (GET_CODE (x) == HIGH)
546 x = XEXP (x, 0);
548 return (symbolic_operand (x, VOIDmode));
551 /* Accept any constant that can be moved in one instruction into a
552 general register. */
554 cint_ok_for_move (HOST_WIDE_INT intval)
556 /* OK if ldo, ldil, or zdepi, can be used. */
557 return (CONST_OK_FOR_LETTER_P (intval, 'J')
558 || CONST_OK_FOR_LETTER_P (intval, 'N')
559 || CONST_OK_FOR_LETTER_P (intval, 'K'));
562 /* Return truth value of whether OP can be used as an operand in a
563 adddi3 insn. */
565 adddi3_operand (rtx op, enum machine_mode mode)
567 return (register_operand (op, mode)
568 || (GET_CODE (op) == CONST_INT
569 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
572 /* True iff zdepi can be used to generate this CONST_INT.
573 zdepi first sign extends a 5 bit signed number to a given field
574 length, then places this field anywhere in a zero. */
576 zdepi_cint_p (unsigned HOST_WIDE_INT x)
578 unsigned HOST_WIDE_INT lsb_mask, t;
580 /* This might not be obvious, but it's at least fast.
581 This function is critical; we don't have the time loops would take. */
582 lsb_mask = x & -x;
583 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
584 /* Return true iff t is a power of two. */
585 return ((t & (t - 1)) == 0);
588 /* True iff depi or extru can be used to compute (reg & mask).
589 Accept bit pattern like these:
590 0....01....1
591 1....10....0
592 1..10..01..1 */
594 and_mask_p (unsigned HOST_WIDE_INT mask)
596 mask = ~mask;
597 mask += mask & -mask;
598 return (mask & (mask - 1)) == 0;
601 /* True iff depi can be used to compute (reg | MASK). */
603 ior_mask_p (unsigned HOST_WIDE_INT mask)
605 mask += mask & -mask;
606 return (mask & (mask - 1)) == 0;
609 /* Legitimize PIC addresses. If the address is already
610 position-independent, we return ORIG. Newly generated
611 position-independent addresses go to REG. If we need more
612 than one register, we lose. */
615 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
617 rtx pic_ref = orig;
619 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
621 /* Labels need special handling. */
622 if (pic_label_operand (orig, mode))
624 /* We do not want to go through the movXX expanders here since that
625 would create recursion.
627 Nor do we really want to call a generator for a named pattern
628 since that requires multiple patterns if we want to support
629 multiple word sizes.
631 So instead we just emit the raw set, which avoids the movXX
632 expanders completely. */
633 mark_reg_pointer (reg, BITS_PER_UNIT);
634 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
635 current_function_uses_pic_offset_table = 1;
636 return reg;
638 if (GET_CODE (orig) == SYMBOL_REF)
640 rtx insn, tmp_reg;
642 gcc_assert (reg);
644 /* Before reload, allocate a temporary register for the intermediate
645 result. This allows the sequence to be deleted when the final
646 result is unused and the insns are trivially dead. */
647 tmp_reg = ((reload_in_progress || reload_completed)
648 ? reg : gen_reg_rtx (Pmode));
650 emit_move_insn (tmp_reg,
651 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
652 gen_rtx_HIGH (word_mode, orig)));
653 pic_ref
654 = gen_const_mem (Pmode,
655 gen_rtx_LO_SUM (Pmode, tmp_reg,
656 gen_rtx_UNSPEC (Pmode,
657 gen_rtvec (1, orig),
658 UNSPEC_DLTIND14R)));
660 current_function_uses_pic_offset_table = 1;
661 mark_reg_pointer (reg, BITS_PER_UNIT);
662 insn = emit_move_insn (reg, pic_ref);
664 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
665 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
667 return reg;
669 else if (GET_CODE (orig) == CONST)
671 rtx base;
673 if (GET_CODE (XEXP (orig, 0)) == PLUS
674 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
675 return orig;
677 gcc_assert (reg);
678 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
680 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
681 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
682 base == reg ? 0 : reg);
684 if (GET_CODE (orig) == CONST_INT)
686 if (INT_14_BITS (orig))
687 return plus_constant (base, INTVAL (orig));
688 orig = force_reg (Pmode, orig);
690 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
691 /* Likewise, should we set special REG_NOTEs here? */
694 return pic_ref;
697 static GTY(()) rtx gen_tls_tga;
699 static rtx
700 gen_tls_get_addr (void)
702 if (!gen_tls_tga)
703 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
704 return gen_tls_tga;
707 static rtx
708 hppa_tls_call (rtx arg)
710 rtx ret;
712 ret = gen_reg_rtx (Pmode);
713 emit_library_call_value (gen_tls_get_addr (), ret,
714 LCT_CONST, Pmode, 1, arg, Pmode);
716 return ret;
719 static rtx
720 legitimize_tls_address (rtx addr)
722 rtx ret, insn, tmp, t1, t2, tp;
723 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
725 switch (model)
727 case TLS_MODEL_GLOBAL_DYNAMIC:
728 tmp = gen_reg_rtx (Pmode);
729 emit_insn (gen_tgd_load (tmp, addr));
730 ret = hppa_tls_call (tmp);
731 break;
733 case TLS_MODEL_LOCAL_DYNAMIC:
734 ret = gen_reg_rtx (Pmode);
735 tmp = gen_reg_rtx (Pmode);
736 start_sequence ();
737 emit_insn (gen_tld_load (tmp, addr));
738 t1 = hppa_tls_call (tmp);
739 insn = get_insns ();
740 end_sequence ();
741 t2 = gen_reg_rtx (Pmode);
742 emit_libcall_block (insn, t2, t1,
743 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
744 UNSPEC_TLSLDBASE));
745 emit_insn (gen_tld_offset_load (ret, addr, t2));
746 break;
748 case TLS_MODEL_INITIAL_EXEC:
749 tp = gen_reg_rtx (Pmode);
750 tmp = gen_reg_rtx (Pmode);
751 ret = gen_reg_rtx (Pmode);
752 emit_insn (gen_tp_load (tp));
753 emit_insn (gen_tie_load (tmp, addr));
754 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
755 break;
757 case TLS_MODEL_LOCAL_EXEC:
758 tp = gen_reg_rtx (Pmode);
759 ret = gen_reg_rtx (Pmode);
760 emit_insn (gen_tp_load (tp));
761 emit_insn (gen_tle_load (ret, addr, tp));
762 break;
764 default:
765 gcc_unreachable ();
768 return ret;
771 /* Try machine-dependent ways of modifying an illegitimate address
772 to be legitimate. If we find one, return the new, valid address.
773 This macro is used in only one place: `memory_address' in explow.c.
775 OLDX is the address as it was before break_out_memory_refs was called.
776 In some cases it is useful to look at this to decide what needs to be done.
778 MODE and WIN are passed so that this macro can use
779 GO_IF_LEGITIMATE_ADDRESS.
781 It is always safe for this macro to do nothing. It exists to recognize
782 opportunities to optimize the output.
784 For the PA, transform:
786 memory(X + <large int>)
788 into:
790 if (<large int> & mask) >= 16
791 Y = (<large int> & ~mask) + mask + 1 Round up.
792 else
793 Y = (<large int> & ~mask) Round down.
794 Z = X + Y
795 memory (Z + (<large int> - Y));
797 This is for CSE to find several similar references, and only use one Z.
799 X can either be a SYMBOL_REF or REG, but because combine cannot
800 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
801 D will not fit in 14 bits.
803 MODE_FLOAT references allow displacements which fit in 5 bits, so use
804 0x1f as the mask.
806 MODE_INT references allow displacements which fit in 14 bits, so use
807 0x3fff as the mask.
809 This relies on the fact that most mode MODE_FLOAT references will use FP
810 registers and most mode MODE_INT references will use integer registers.
811 (In the rare case of an FP register used in an integer MODE, we depend
812 on secondary reloads to clean things up.)
815 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
816 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
817 addressing modes to be used).
819 Put X and Z into registers. Then put the entire expression into
820 a register. */
823 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
824 enum machine_mode mode)
826 rtx orig = x;
828 /* We need to canonicalize the order of operands in unscaled indexed
829 addresses since the code that checks if an address is valid doesn't
830 always try both orders. */
831 if (!TARGET_NO_SPACE_REGS
832 && GET_CODE (x) == PLUS
833 && GET_MODE (x) == Pmode
834 && REG_P (XEXP (x, 0))
835 && REG_P (XEXP (x, 1))
836 && REG_POINTER (XEXP (x, 0))
837 && !REG_POINTER (XEXP (x, 1)))
838 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
840 if (PA_SYMBOL_REF_TLS_P (x))
841 return legitimize_tls_address (x);
842 else if (flag_pic)
843 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
845 /* Strip off CONST. */
846 if (GET_CODE (x) == CONST)
847 x = XEXP (x, 0);
849 /* Special case. Get the SYMBOL_REF into a register and use indexing.
850 That should always be safe. */
851 if (GET_CODE (x) == PLUS
852 && GET_CODE (XEXP (x, 0)) == REG
853 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
855 rtx reg = force_reg (Pmode, XEXP (x, 1));
856 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
859 /* Note we must reject symbols which represent function addresses
860 since the assembler/linker can't handle arithmetic on plabels. */
861 if (GET_CODE (x) == PLUS
862 && GET_CODE (XEXP (x, 1)) == CONST_INT
863 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
864 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
865 || GET_CODE (XEXP (x, 0)) == REG))
867 rtx int_part, ptr_reg;
868 int newoffset;
869 int offset = INTVAL (XEXP (x, 1));
870 int mask;
872 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
873 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
875 /* Choose which way to round the offset. Round up if we
876 are >= halfway to the next boundary. */
877 if ((offset & mask) >= ((mask + 1) / 2))
878 newoffset = (offset & ~ mask) + mask + 1;
879 else
880 newoffset = (offset & ~ mask);
882 /* If the newoffset will not fit in 14 bits (ldo), then
883 handling this would take 4 or 5 instructions (2 to load
884 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
885 add the new offset and the SYMBOL_REF.) Combine can
886 not handle 4->2 or 5->2 combinations, so do not create
887 them. */
888 if (! VAL_14_BITS_P (newoffset)
889 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
891 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
892 rtx tmp_reg
893 = force_reg (Pmode,
894 gen_rtx_HIGH (Pmode, const_part));
895 ptr_reg
896 = force_reg (Pmode,
897 gen_rtx_LO_SUM (Pmode,
898 tmp_reg, const_part));
900 else
902 if (! VAL_14_BITS_P (newoffset))
903 int_part = force_reg (Pmode, GEN_INT (newoffset));
904 else
905 int_part = GEN_INT (newoffset);
907 ptr_reg = force_reg (Pmode,
908 gen_rtx_PLUS (Pmode,
909 force_reg (Pmode, XEXP (x, 0)),
910 int_part));
912 return plus_constant (ptr_reg, offset - newoffset);
915 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
917 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
918 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
919 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
920 && (OBJECT_P (XEXP (x, 1))
921 || GET_CODE (XEXP (x, 1)) == SUBREG)
922 && GET_CODE (XEXP (x, 1)) != CONST)
924 int val = INTVAL (XEXP (XEXP (x, 0), 1));
925 rtx reg1, reg2;
927 reg1 = XEXP (x, 1);
928 if (GET_CODE (reg1) != REG)
929 reg1 = force_reg (Pmode, force_operand (reg1, 0));
931 reg2 = XEXP (XEXP (x, 0), 0);
932 if (GET_CODE (reg2) != REG)
933 reg2 = force_reg (Pmode, force_operand (reg2, 0));
935 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
936 gen_rtx_MULT (Pmode,
937 reg2,
938 GEN_INT (val)),
939 reg1));
942 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
944 Only do so for floating point modes since this is more speculative
945 and we lose if it's an integer store. */
946 if (GET_CODE (x) == PLUS
947 && GET_CODE (XEXP (x, 0)) == PLUS
948 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
949 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
950 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
951 && (mode == SFmode || mode == DFmode))
954 /* First, try and figure out what to use as a base register. */
955 rtx reg1, reg2, base, idx, orig_base;
957 reg1 = XEXP (XEXP (x, 0), 1);
958 reg2 = XEXP (x, 1);
959 base = NULL_RTX;
960 idx = NULL_RTX;
962 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
963 then emit_move_sequence will turn on REG_POINTER so we'll know
964 it's a base register below. */
965 if (GET_CODE (reg1) != REG)
966 reg1 = force_reg (Pmode, force_operand (reg1, 0));
968 if (GET_CODE (reg2) != REG)
969 reg2 = force_reg (Pmode, force_operand (reg2, 0));
971 /* Figure out what the base and index are. */
973 if (GET_CODE (reg1) == REG
974 && REG_POINTER (reg1))
976 base = reg1;
977 orig_base = XEXP (XEXP (x, 0), 1);
978 idx = gen_rtx_PLUS (Pmode,
979 gen_rtx_MULT (Pmode,
980 XEXP (XEXP (XEXP (x, 0), 0), 0),
981 XEXP (XEXP (XEXP (x, 0), 0), 1)),
982 XEXP (x, 1));
984 else if (GET_CODE (reg2) == REG
985 && REG_POINTER (reg2))
987 base = reg2;
988 orig_base = XEXP (x, 1);
989 idx = XEXP (x, 0);
992 if (base == 0)
993 return orig;
995 /* If the index adds a large constant, try to scale the
996 constant so that it can be loaded with only one insn. */
997 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
998 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
999 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1000 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1002 /* Divide the CONST_INT by the scale factor, then add it to A. */
1003 int val = INTVAL (XEXP (idx, 1));
1005 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1006 reg1 = XEXP (XEXP (idx, 0), 0);
1007 if (GET_CODE (reg1) != REG)
1008 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1010 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1012 /* We can now generate a simple scaled indexed address. */
1013 return
1014 force_reg
1015 (Pmode, gen_rtx_PLUS (Pmode,
1016 gen_rtx_MULT (Pmode, reg1,
1017 XEXP (XEXP (idx, 0), 1)),
1018 base));
1021 /* If B + C is still a valid base register, then add them. */
1022 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1023 && INTVAL (XEXP (idx, 1)) <= 4096
1024 && INTVAL (XEXP (idx, 1)) >= -4096)
1026 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1027 rtx reg1, reg2;
1029 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1031 reg2 = XEXP (XEXP (idx, 0), 0);
1032 if (GET_CODE (reg2) != CONST_INT)
1033 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1035 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1036 gen_rtx_MULT (Pmode,
1037 reg2,
1038 GEN_INT (val)),
1039 reg1));
1042 /* Get the index into a register, then add the base + index and
1043 return a register holding the result. */
1045 /* First get A into a register. */
1046 reg1 = XEXP (XEXP (idx, 0), 0);
1047 if (GET_CODE (reg1) != REG)
1048 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1050 /* And get B into a register. */
1051 reg2 = XEXP (idx, 1);
1052 if (GET_CODE (reg2) != REG)
1053 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1055 reg1 = force_reg (Pmode,
1056 gen_rtx_PLUS (Pmode,
1057 gen_rtx_MULT (Pmode, reg1,
1058 XEXP (XEXP (idx, 0), 1)),
1059 reg2));
1061 /* Add the result to our base register and return. */
1062 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1066 /* Uh-oh. We might have an address for x[n-100000]. This needs
1067 special handling to avoid creating an indexed memory address
1068 with x-100000 as the base.
1070 If the constant part is small enough, then it's still safe because
1071 there is a guard page at the beginning and end of the data segment.
1073 Scaled references are common enough that we want to try and rearrange the
1074 terms so that we can use indexing for these addresses too. Only
1075 do the optimization for floatint point modes. */
1077 if (GET_CODE (x) == PLUS
1078 && symbolic_expression_p (XEXP (x, 1)))
1080 /* Ugly. We modify things here so that the address offset specified
1081 by the index expression is computed first, then added to x to form
1082 the entire address. */
1084 rtx regx1, regx2, regy1, regy2, y;
1086 /* Strip off any CONST. */
1087 y = XEXP (x, 1);
1088 if (GET_CODE (y) == CONST)
1089 y = XEXP (y, 0);
1091 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1093 /* See if this looks like
1094 (plus (mult (reg) (shadd_const))
1095 (const (plus (symbol_ref) (const_int))))
1097 Where const_int is small. In that case the const
1098 expression is a valid pointer for indexing.
1100 If const_int is big, but can be divided evenly by shadd_const
1101 and added to (reg). This allows more scaled indexed addresses. */
1102 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1103 && GET_CODE (XEXP (x, 0)) == MULT
1104 && GET_CODE (XEXP (y, 1)) == CONST_INT
1105 && INTVAL (XEXP (y, 1)) >= -4096
1106 && INTVAL (XEXP (y, 1)) <= 4095
1107 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1108 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1110 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1111 rtx reg1, reg2;
1113 reg1 = XEXP (x, 1);
1114 if (GET_CODE (reg1) != REG)
1115 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1117 reg2 = XEXP (XEXP (x, 0), 0);
1118 if (GET_CODE (reg2) != REG)
1119 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1121 return force_reg (Pmode,
1122 gen_rtx_PLUS (Pmode,
1123 gen_rtx_MULT (Pmode,
1124 reg2,
1125 GEN_INT (val)),
1126 reg1));
1128 else if ((mode == DFmode || mode == SFmode)
1129 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1130 && GET_CODE (XEXP (x, 0)) == MULT
1131 && GET_CODE (XEXP (y, 1)) == CONST_INT
1132 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1133 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1134 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1136 regx1
1137 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1138 / INTVAL (XEXP (XEXP (x, 0), 1))));
1139 regx2 = XEXP (XEXP (x, 0), 0);
1140 if (GET_CODE (regx2) != REG)
1141 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1142 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1143 regx2, regx1));
1144 return
1145 force_reg (Pmode,
1146 gen_rtx_PLUS (Pmode,
1147 gen_rtx_MULT (Pmode, regx2,
1148 XEXP (XEXP (x, 0), 1)),
1149 force_reg (Pmode, XEXP (y, 0))));
1151 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1152 && INTVAL (XEXP (y, 1)) >= -4096
1153 && INTVAL (XEXP (y, 1)) <= 4095)
1155 /* This is safe because of the guard page at the
1156 beginning and end of the data space. Just
1157 return the original address. */
1158 return orig;
1160 else
1162 /* Doesn't look like one we can optimize. */
1163 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1164 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1165 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1166 regx1 = force_reg (Pmode,
1167 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1168 regx1, regy2));
1169 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1174 return orig;
1177 /* For the HPPA, REG and REG+CONST is cost 0
1178 and addresses involving symbolic constants are cost 2.
1180 PIC addresses are very expensive.
1182 It is no coincidence that this has the same structure
1183 as GO_IF_LEGITIMATE_ADDRESS. */
1185 static int
1186 hppa_address_cost (rtx X)
1188 switch (GET_CODE (X))
1190 case REG:
1191 case PLUS:
1192 case LO_SUM:
1193 return 1;
1194 case HIGH:
1195 return 2;
1196 default:
1197 return 4;
1201 /* Compute a (partial) cost for rtx X. Return true if the complete
1202 cost has been computed, and false if subexpressions should be
1203 scanned. In either case, *TOTAL contains the cost result. */
1205 static bool
1206 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1208 switch (code)
1210 case CONST_INT:
1211 if (INTVAL (x) == 0)
1212 *total = 0;
1213 else if (INT_14_BITS (x))
1214 *total = 1;
1215 else
1216 *total = 2;
1217 return true;
1219 case HIGH:
1220 *total = 2;
1221 return true;
1223 case CONST:
1224 case LABEL_REF:
1225 case SYMBOL_REF:
1226 *total = 4;
1227 return true;
1229 case CONST_DOUBLE:
1230 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1231 && outer_code != SET)
1232 *total = 0;
1233 else
1234 *total = 8;
1235 return true;
1237 case MULT:
1238 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1239 *total = COSTS_N_INSNS (3);
1240 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1241 *total = COSTS_N_INSNS (8);
1242 else
1243 *total = COSTS_N_INSNS (20);
1244 return true;
1246 case DIV:
1247 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1249 *total = COSTS_N_INSNS (14);
1250 return true;
1252 /* FALLTHRU */
1254 case UDIV:
1255 case MOD:
1256 case UMOD:
1257 *total = COSTS_N_INSNS (60);
1258 return true;
1260 case PLUS: /* this includes shNadd insns */
1261 case MINUS:
1262 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1263 *total = COSTS_N_INSNS (3);
1264 else
1265 *total = COSTS_N_INSNS (1);
1266 return true;
1268 case ASHIFT:
1269 case ASHIFTRT:
1270 case LSHIFTRT:
1271 *total = COSTS_N_INSNS (1);
1272 return true;
1274 default:
1275 return false;
1279 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1280 new rtx with the correct mode. */
1281 static inline rtx
1282 force_mode (enum machine_mode mode, rtx orig)
1284 if (mode == GET_MODE (orig))
1285 return orig;
1287 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1289 return gen_rtx_REG (mode, REGNO (orig));
1292 /* Return 1 if *X is a thread-local symbol. */
1294 static int
1295 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1297 return PA_SYMBOL_REF_TLS_P (*x);
1300 /* Return 1 if X contains a thread-local symbol. */
1302 bool
1303 pa_tls_referenced_p (rtx x)
1305 if (!TARGET_HAVE_TLS)
1306 return false;
1308 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1311 /* Emit insns to move operands[1] into operands[0].
1313 Return 1 if we have written out everything that needs to be done to
1314 do the move. Otherwise, return 0 and the caller will emit the move
1315 normally.
1317 Note SCRATCH_REG may not be in the proper mode depending on how it
1318 will be used. This routine is responsible for creating a new copy
1319 of SCRATCH_REG in the proper mode. */
1322 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1324 register rtx operand0 = operands[0];
1325 register rtx operand1 = operands[1];
1326 register rtx tem;
1328 /* We can only handle indexed addresses in the destination operand
1329 of floating point stores. Thus, we need to break out indexed
1330 addresses from the destination operand. */
1331 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1333 /* This is only safe up to the beginning of life analysis. */
1334 gcc_assert (!no_new_pseudos);
1336 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1337 operand0 = replace_equiv_address (operand0, tem);
1340 /* On targets with non-equivalent space registers, break out unscaled
1341 indexed addresses from the source operand before the final CSE.
1342 We have to do this because the REG_POINTER flag is not correctly
1343 carried through various optimization passes and CSE may substitute
1344 a pseudo without the pointer set for one with the pointer set. As
1345 a result, we loose various opportunities to create insns with
1346 unscaled indexed addresses. */
1347 if (!TARGET_NO_SPACE_REGS
1348 && !cse_not_expected
1349 && GET_CODE (operand1) == MEM
1350 && GET_CODE (XEXP (operand1, 0)) == PLUS
1351 && REG_P (XEXP (XEXP (operand1, 0), 0))
1352 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1353 operand1
1354 = replace_equiv_address (operand1,
1355 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1357 if (scratch_reg
1358 && reload_in_progress && GET_CODE (operand0) == REG
1359 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1360 operand0 = reg_equiv_mem[REGNO (operand0)];
1361 else if (scratch_reg
1362 && reload_in_progress && GET_CODE (operand0) == SUBREG
1363 && GET_CODE (SUBREG_REG (operand0)) == REG
1364 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1366 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1367 the code which tracks sets/uses for delete_output_reload. */
1368 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1369 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1370 SUBREG_BYTE (operand0));
1371 operand0 = alter_subreg (&temp);
1374 if (scratch_reg
1375 && reload_in_progress && GET_CODE (operand1) == REG
1376 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1377 operand1 = reg_equiv_mem[REGNO (operand1)];
1378 else if (scratch_reg
1379 && reload_in_progress && GET_CODE (operand1) == SUBREG
1380 && GET_CODE (SUBREG_REG (operand1)) == REG
1381 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1383 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1384 the code which tracks sets/uses for delete_output_reload. */
1385 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1386 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1387 SUBREG_BYTE (operand1));
1388 operand1 = alter_subreg (&temp);
1391 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1392 && ((tem = find_replacement (&XEXP (operand0, 0)))
1393 != XEXP (operand0, 0)))
1394 operand0 = replace_equiv_address (operand0, tem);
1396 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1397 && ((tem = find_replacement (&XEXP (operand1, 0)))
1398 != XEXP (operand1, 0)))
1399 operand1 = replace_equiv_address (operand1, tem);
1401 /* Handle secondary reloads for loads/stores of FP registers from
1402 REG+D addresses where D does not fit in 5 or 14 bits, including
1403 (subreg (mem (addr))) cases. */
1404 if (scratch_reg
1405 && fp_reg_operand (operand0, mode)
1406 && ((GET_CODE (operand1) == MEM
1407 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1408 XEXP (operand1, 0)))
1409 || ((GET_CODE (operand1) == SUBREG
1410 && GET_CODE (XEXP (operand1, 0)) == MEM
1411 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1412 ? SFmode : DFmode),
1413 XEXP (XEXP (operand1, 0), 0))))))
1415 if (GET_CODE (operand1) == SUBREG)
1416 operand1 = XEXP (operand1, 0);
1418 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1419 it in WORD_MODE regardless of what mode it was originally given
1420 to us. */
1421 scratch_reg = force_mode (word_mode, scratch_reg);
1423 /* D might not fit in 14 bits either; for such cases load D into
1424 scratch reg. */
1425 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1427 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1428 emit_move_insn (scratch_reg,
1429 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1430 Pmode,
1431 XEXP (XEXP (operand1, 0), 0),
1432 scratch_reg));
1434 else
1435 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1436 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1437 replace_equiv_address (operand1, scratch_reg)));
1438 return 1;
1440 else if (scratch_reg
1441 && fp_reg_operand (operand1, mode)
1442 && ((GET_CODE (operand0) == MEM
1443 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1444 ? SFmode : DFmode),
1445 XEXP (operand0, 0)))
1446 || ((GET_CODE (operand0) == SUBREG)
1447 && GET_CODE (XEXP (operand0, 0)) == MEM
1448 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1449 ? SFmode : DFmode),
1450 XEXP (XEXP (operand0, 0), 0)))))
1452 if (GET_CODE (operand0) == SUBREG)
1453 operand0 = XEXP (operand0, 0);
1455 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1456 it in WORD_MODE regardless of what mode it was originally given
1457 to us. */
1458 scratch_reg = force_mode (word_mode, scratch_reg);
1460 /* D might not fit in 14 bits either; for such cases load D into
1461 scratch reg. */
1462 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1464 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1465 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1466 0)),
1467 Pmode,
1468 XEXP (XEXP (operand0, 0),
1470 scratch_reg));
1472 else
1473 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1474 emit_insn (gen_rtx_SET (VOIDmode,
1475 replace_equiv_address (operand0, scratch_reg),
1476 operand1));
1477 return 1;
1479 /* Handle secondary reloads for loads of FP registers from constant
1480 expressions by forcing the constant into memory.
1482 Use scratch_reg to hold the address of the memory location.
1484 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1485 NO_REGS when presented with a const_int and a register class
1486 containing only FP registers. Doing so unfortunately creates
1487 more problems than it solves. Fix this for 2.5. */
1488 else if (scratch_reg
1489 && CONSTANT_P (operand1)
1490 && fp_reg_operand (operand0, mode))
1492 rtx const_mem, xoperands[2];
1494 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1495 it in WORD_MODE regardless of what mode it was originally given
1496 to us. */
1497 scratch_reg = force_mode (word_mode, scratch_reg);
1499 /* Force the constant into memory and put the address of the
1500 memory location into scratch_reg. */
1501 const_mem = force_const_mem (mode, operand1);
1502 xoperands[0] = scratch_reg;
1503 xoperands[1] = XEXP (const_mem, 0);
1504 emit_move_sequence (xoperands, Pmode, 0);
1506 /* Now load the destination register. */
1507 emit_insn (gen_rtx_SET (mode, operand0,
1508 replace_equiv_address (const_mem, scratch_reg)));
1509 return 1;
1511 /* Handle secondary reloads for SAR. These occur when trying to load
1512 the SAR from memory, FP register, or with a constant. */
1513 else if (scratch_reg
1514 && GET_CODE (operand0) == REG
1515 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1516 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1517 && (GET_CODE (operand1) == MEM
1518 || GET_CODE (operand1) == CONST_INT
1519 || (GET_CODE (operand1) == REG
1520 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1522 /* D might not fit in 14 bits either; for such cases load D into
1523 scratch reg. */
1524 if (GET_CODE (operand1) == MEM
1525 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1527 /* We are reloading the address into the scratch register, so we
1528 want to make sure the scratch register is a full register. */
1529 scratch_reg = force_mode (word_mode, scratch_reg);
1531 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1532 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1533 0)),
1534 Pmode,
1535 XEXP (XEXP (operand1, 0),
1537 scratch_reg));
1539 /* Now we are going to load the scratch register from memory,
1540 we want to load it in the same width as the original MEM,
1541 which must be the same as the width of the ultimate destination,
1542 OPERAND0. */
1543 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1545 emit_move_insn (scratch_reg,
1546 replace_equiv_address (operand1, scratch_reg));
1548 else
1550 /* We want to load the scratch register using the same mode as
1551 the ultimate destination. */
1552 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1554 emit_move_insn (scratch_reg, operand1);
1557 /* And emit the insn to set the ultimate destination. We know that
1558 the scratch register has the same mode as the destination at this
1559 point. */
1560 emit_move_insn (operand0, scratch_reg);
1561 return 1;
1563 /* Handle the most common case: storing into a register. */
1564 else if (register_operand (operand0, mode))
1566 if (register_operand (operand1, mode)
1567 || (GET_CODE (operand1) == CONST_INT
1568 && cint_ok_for_move (INTVAL (operand1)))
1569 || (operand1 == CONST0_RTX (mode))
1570 || (GET_CODE (operand1) == HIGH
1571 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1572 /* Only `general_operands' can come here, so MEM is ok. */
1573 || GET_CODE (operand1) == MEM)
1575 /* Various sets are created during RTL generation which don't
1576 have the REG_POINTER flag correctly set. After the CSE pass,
1577 instruction recognition can fail if we don't consistently
1578 set this flag when performing register copies. This should
1579 also improve the opportunities for creating insns that use
1580 unscaled indexing. */
1581 if (REG_P (operand0) && REG_P (operand1))
1583 if (REG_POINTER (operand1)
1584 && !REG_POINTER (operand0)
1585 && !HARD_REGISTER_P (operand0))
1586 copy_reg_pointer (operand0, operand1);
1587 else if (REG_POINTER (operand0)
1588 && !REG_POINTER (operand1)
1589 && !HARD_REGISTER_P (operand1))
1590 copy_reg_pointer (operand1, operand0);
1593 /* When MEMs are broken out, the REG_POINTER flag doesn't
1594 get set. In some cases, we can set the REG_POINTER flag
1595 from the declaration for the MEM. */
1596 if (REG_P (operand0)
1597 && GET_CODE (operand1) == MEM
1598 && !REG_POINTER (operand0))
1600 tree decl = MEM_EXPR (operand1);
1602 /* Set the register pointer flag and register alignment
1603 if the declaration for this memory reference is a
1604 pointer type. Fortran indirect argument references
1605 are ignored. */
1606 if (decl
1607 && !(flag_argument_noalias > 1
1608 && TREE_CODE (decl) == INDIRECT_REF
1609 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1611 tree type;
1613 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1614 tree operand 1. */
1615 if (TREE_CODE (decl) == COMPONENT_REF)
1616 decl = TREE_OPERAND (decl, 1);
1618 type = TREE_TYPE (decl);
1619 if (TREE_CODE (type) == ARRAY_TYPE)
1620 type = get_inner_array_type (type);
1622 if (POINTER_TYPE_P (type))
1624 int align;
1626 type = TREE_TYPE (type);
1627 /* Using TYPE_ALIGN_OK is rather conservative as
1628 only the ada frontend actually sets it. */
1629 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1630 : BITS_PER_UNIT);
1631 mark_reg_pointer (operand0, align);
1636 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1637 return 1;
1640 else if (GET_CODE (operand0) == MEM)
1642 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1643 && !(reload_in_progress || reload_completed))
1645 rtx temp = gen_reg_rtx (DFmode);
1647 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1648 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1649 return 1;
1651 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1653 /* Run this case quickly. */
1654 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1655 return 1;
1657 if (! (reload_in_progress || reload_completed))
1659 operands[0] = validize_mem (operand0);
1660 operands[1] = operand1 = force_reg (mode, operand1);
1664 /* Simplify the source if we need to.
1665 Note we do have to handle function labels here, even though we do
1666 not consider them legitimate constants. Loop optimizations can
1667 call the emit_move_xxx with one as a source. */
1668 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1669 || function_label_operand (operand1, mode)
1670 || (GET_CODE (operand1) == HIGH
1671 && symbolic_operand (XEXP (operand1, 0), mode)))
1673 int ishighonly = 0;
1675 if (GET_CODE (operand1) == HIGH)
1677 ishighonly = 1;
1678 operand1 = XEXP (operand1, 0);
1680 if (symbolic_operand (operand1, mode))
1682 /* Argh. The assembler and linker can't handle arithmetic
1683 involving plabels.
1685 So we force the plabel into memory, load operand0 from
1686 the memory location, then add in the constant part. */
1687 if ((GET_CODE (operand1) == CONST
1688 && GET_CODE (XEXP (operand1, 0)) == PLUS
1689 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1690 || function_label_operand (operand1, mode))
1692 rtx temp, const_part;
1694 /* Figure out what (if any) scratch register to use. */
1695 if (reload_in_progress || reload_completed)
1697 scratch_reg = scratch_reg ? scratch_reg : operand0;
1698 /* SCRATCH_REG will hold an address and maybe the actual
1699 data. We want it in WORD_MODE regardless of what mode it
1700 was originally given to us. */
1701 scratch_reg = force_mode (word_mode, scratch_reg);
1703 else if (flag_pic)
1704 scratch_reg = gen_reg_rtx (Pmode);
1706 if (GET_CODE (operand1) == CONST)
1708 /* Save away the constant part of the expression. */
1709 const_part = XEXP (XEXP (operand1, 0), 1);
1710 gcc_assert (GET_CODE (const_part) == CONST_INT);
1712 /* Force the function label into memory. */
1713 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1715 else
1717 /* No constant part. */
1718 const_part = NULL_RTX;
1720 /* Force the function label into memory. */
1721 temp = force_const_mem (mode, operand1);
1725 /* Get the address of the memory location. PIC-ify it if
1726 necessary. */
1727 temp = XEXP (temp, 0);
1728 if (flag_pic)
1729 temp = legitimize_pic_address (temp, mode, scratch_reg);
1731 /* Put the address of the memory location into our destination
1732 register. */
1733 operands[1] = temp;
1734 emit_move_sequence (operands, mode, scratch_reg);
1736 /* Now load from the memory location into our destination
1737 register. */
1738 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1739 emit_move_sequence (operands, mode, scratch_reg);
1741 /* And add back in the constant part. */
1742 if (const_part != NULL_RTX)
1743 expand_inc (operand0, const_part);
1745 return 1;
1748 if (flag_pic)
1750 rtx temp;
1752 if (reload_in_progress || reload_completed)
1754 temp = scratch_reg ? scratch_reg : operand0;
1755 /* TEMP will hold an address and maybe the actual
1756 data. We want it in WORD_MODE regardless of what mode it
1757 was originally given to us. */
1758 temp = force_mode (word_mode, temp);
1760 else
1761 temp = gen_reg_rtx (Pmode);
1763 /* (const (plus (symbol) (const_int))) must be forced to
1764 memory during/after reload if the const_int will not fit
1765 in 14 bits. */
1766 if (GET_CODE (operand1) == CONST
1767 && GET_CODE (XEXP (operand1, 0)) == PLUS
1768 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1769 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1770 && (reload_completed || reload_in_progress)
1771 && flag_pic)
1773 rtx const_mem = force_const_mem (mode, operand1);
1774 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1775 mode, temp);
1776 operands[1] = replace_equiv_address (const_mem, operands[1]);
1777 emit_move_sequence (operands, mode, temp);
1779 else
1781 operands[1] = legitimize_pic_address (operand1, mode, temp);
1782 if (REG_P (operand0) && REG_P (operands[1]))
1783 copy_reg_pointer (operand0, operands[1]);
1784 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1787 /* On the HPPA, references to data space are supposed to use dp,
1788 register 27, but showing it in the RTL inhibits various cse
1789 and loop optimizations. */
1790 else
1792 rtx temp, set;
1794 if (reload_in_progress || reload_completed)
1796 temp = scratch_reg ? scratch_reg : operand0;
1797 /* TEMP will hold an address and maybe the actual
1798 data. We want it in WORD_MODE regardless of what mode it
1799 was originally given to us. */
1800 temp = force_mode (word_mode, temp);
1802 else
1803 temp = gen_reg_rtx (mode);
1805 /* Loading a SYMBOL_REF into a register makes that register
1806 safe to be used as the base in an indexed address.
1808 Don't mark hard registers though. That loses. */
1809 if (GET_CODE (operand0) == REG
1810 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1811 mark_reg_pointer (operand0, BITS_PER_UNIT);
1812 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1813 mark_reg_pointer (temp, BITS_PER_UNIT);
1815 if (ishighonly)
1816 set = gen_rtx_SET (mode, operand0, temp);
1817 else
1818 set = gen_rtx_SET (VOIDmode,
1819 operand0,
1820 gen_rtx_LO_SUM (mode, temp, operand1));
1822 emit_insn (gen_rtx_SET (VOIDmode,
1823 temp,
1824 gen_rtx_HIGH (mode, operand1)));
1825 emit_insn (set);
1828 return 1;
1830 else if (pa_tls_referenced_p (operand1))
1832 rtx tmp = operand1;
1833 rtx addend = NULL;
1835 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1837 addend = XEXP (XEXP (tmp, 0), 1);
1838 tmp = XEXP (XEXP (tmp, 0), 0);
1841 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1842 tmp = legitimize_tls_address (tmp);
1843 if (addend)
1845 tmp = gen_rtx_PLUS (mode, tmp, addend);
1846 tmp = force_operand (tmp, operands[0]);
1848 operands[1] = tmp;
1850 else if (GET_CODE (operand1) != CONST_INT
1851 || !cint_ok_for_move (INTVAL (operand1)))
1853 rtx insn, temp;
1854 rtx op1 = operand1;
1855 HOST_WIDE_INT value = 0;
1856 HOST_WIDE_INT insv = 0;
1857 int insert = 0;
1859 if (GET_CODE (operand1) == CONST_INT)
1860 value = INTVAL (operand1);
1862 if (TARGET_64BIT
1863 && GET_CODE (operand1) == CONST_INT
1864 && HOST_BITS_PER_WIDE_INT > 32
1865 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1867 HOST_WIDE_INT nval;
1869 /* Extract the low order 32 bits of the value and sign extend.
1870 If the new value is the same as the original value, we can
1871 can use the original value as-is. If the new value is
1872 different, we use it and insert the most-significant 32-bits
1873 of the original value into the final result. */
1874 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1875 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1876 if (value != nval)
1878 #if HOST_BITS_PER_WIDE_INT > 32
1879 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1880 #endif
1881 insert = 1;
1882 value = nval;
1883 operand1 = GEN_INT (nval);
1887 if (reload_in_progress || reload_completed)
1888 temp = scratch_reg ? scratch_reg : operand0;
1889 else
1890 temp = gen_reg_rtx (mode);
1892 /* We don't directly split DImode constants on 32-bit targets
1893 because PLUS uses an 11-bit immediate and the insn sequence
1894 generated is not as efficient as the one using HIGH/LO_SUM. */
1895 if (GET_CODE (operand1) == CONST_INT
1896 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1897 && !insert)
1899 /* Directly break constant into high and low parts. This
1900 provides better optimization opportunities because various
1901 passes recognize constants split with PLUS but not LO_SUM.
1902 We use a 14-bit signed low part except when the addition
1903 of 0x4000 to the high part might change the sign of the
1904 high part. */
1905 HOST_WIDE_INT low = value & 0x3fff;
1906 HOST_WIDE_INT high = value & ~ 0x3fff;
1908 if (low >= 0x2000)
1910 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1911 high += 0x2000;
1912 else
1913 high += 0x4000;
1916 low = value - high;
1918 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1919 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1921 else
1923 emit_insn (gen_rtx_SET (VOIDmode, temp,
1924 gen_rtx_HIGH (mode, operand1)));
1925 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1928 insn = emit_move_insn (operands[0], operands[1]);
1930 /* Now insert the most significant 32 bits of the value
1931 into the register. When we don't have a second register
1932 available, it could take up to nine instructions to load
1933 a 64-bit integer constant. Prior to reload, we force
1934 constants that would take more than three instructions
1935 to load to the constant pool. During and after reload,
1936 we have to handle all possible values. */
1937 if (insert)
1939 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1940 register and the value to be inserted is outside the
1941 range that can be loaded with three depdi instructions. */
1942 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1944 operand1 = GEN_INT (insv);
1946 emit_insn (gen_rtx_SET (VOIDmode, temp,
1947 gen_rtx_HIGH (mode, operand1)));
1948 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1949 emit_insn (gen_insv (operand0, GEN_INT (32),
1950 const0_rtx, temp));
1952 else
1954 int len = 5, pos = 27;
1956 /* Insert the bits using the depdi instruction. */
1957 while (pos >= 0)
1959 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1960 HOST_WIDE_INT sign = v5 < 0;
1962 /* Left extend the insertion. */
1963 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1964 while (pos > 0 && (insv & 1) == sign)
1966 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1967 len += 1;
1968 pos -= 1;
1971 emit_insn (gen_insv (operand0, GEN_INT (len),
1972 GEN_INT (pos), GEN_INT (v5)));
1974 len = pos > 0 && pos < 5 ? pos : 5;
1975 pos -= len;
1980 REG_NOTES (insn)
1981 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
1983 return 1;
1986 /* Now have insn-emit do whatever it normally does. */
1987 return 0;
1990 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1991 it will need a link/runtime reloc). */
1994 reloc_needed (tree exp)
1996 int reloc = 0;
1998 switch (TREE_CODE (exp))
2000 case ADDR_EXPR:
2001 return 1;
2003 case PLUS_EXPR:
2004 case MINUS_EXPR:
2005 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2006 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2007 break;
2009 case NOP_EXPR:
2010 case CONVERT_EXPR:
2011 case NON_LVALUE_EXPR:
2012 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2013 break;
2015 case CONSTRUCTOR:
2017 tree value;
2018 unsigned HOST_WIDE_INT ix;
2020 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2021 if (value)
2022 reloc |= reloc_needed (value);
2024 break;
2026 case ERROR_MARK:
2027 break;
2029 default:
2030 break;
2032 return reloc;
2035 /* Does operand (which is a symbolic_operand) live in text space?
2036 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2037 will be true. */
2040 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2042 if (GET_CODE (operand) == CONST)
2043 operand = XEXP (XEXP (operand, 0), 0);
2044 if (flag_pic)
2046 if (GET_CODE (operand) == SYMBOL_REF)
2047 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2049 else
2051 if (GET_CODE (operand) == SYMBOL_REF)
2052 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2054 return 1;
2058 /* Return the best assembler insn template
2059 for moving operands[1] into operands[0] as a fullword. */
2060 const char *
2061 singlemove_string (rtx *operands)
2063 HOST_WIDE_INT intval;
2065 if (GET_CODE (operands[0]) == MEM)
2066 return "stw %r1,%0";
2067 if (GET_CODE (operands[1]) == MEM)
2068 return "ldw %1,%0";
2069 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2071 long i;
2072 REAL_VALUE_TYPE d;
2074 gcc_assert (GET_MODE (operands[1]) == SFmode);
2076 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2077 bit pattern. */
2078 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2079 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2081 operands[1] = GEN_INT (i);
2082 /* Fall through to CONST_INT case. */
2084 if (GET_CODE (operands[1]) == CONST_INT)
2086 intval = INTVAL (operands[1]);
2088 if (VAL_14_BITS_P (intval))
2089 return "ldi %1,%0";
2090 else if ((intval & 0x7ff) == 0)
2091 return "ldil L'%1,%0";
2092 else if (zdepi_cint_p (intval))
2093 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2094 else
2095 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2097 return "copy %1,%0";
2101 /* Compute position (in OP[1]) and width (in OP[2])
2102 useful for copying IMM to a register using the zdepi
2103 instructions. Store the immediate value to insert in OP[0]. */
2104 static void
2105 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2107 int lsb, len;
2109 /* Find the least significant set bit in IMM. */
2110 for (lsb = 0; lsb < 32; lsb++)
2112 if ((imm & 1) != 0)
2113 break;
2114 imm >>= 1;
2117 /* Choose variants based on *sign* of the 5-bit field. */
2118 if ((imm & 0x10) == 0)
2119 len = (lsb <= 28) ? 4 : 32 - lsb;
2120 else
2122 /* Find the width of the bitstring in IMM. */
2123 for (len = 5; len < 32; len++)
2125 if ((imm & (1 << len)) == 0)
2126 break;
2129 /* Sign extend IMM as a 5-bit value. */
2130 imm = (imm & 0xf) - 0x10;
2133 op[0] = imm;
2134 op[1] = 31 - lsb;
2135 op[2] = len;
2138 /* Compute position (in OP[1]) and width (in OP[2])
2139 useful for copying IMM to a register using the depdi,z
2140 instructions. Store the immediate value to insert in OP[0]. */
2141 void
2142 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2144 HOST_WIDE_INT lsb, len;
2146 /* Find the least significant set bit in IMM. */
2147 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2149 if ((imm & 1) != 0)
2150 break;
2151 imm >>= 1;
2154 /* Choose variants based on *sign* of the 5-bit field. */
2155 if ((imm & 0x10) == 0)
2156 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2157 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2158 else
2160 /* Find the width of the bitstring in IMM. */
2161 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2163 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2164 break;
2167 /* Sign extend IMM as a 5-bit value. */
2168 imm = (imm & 0xf) - 0x10;
2171 op[0] = imm;
2172 op[1] = 63 - lsb;
2173 op[2] = len;
2176 /* Output assembler code to perform a doubleword move insn
2177 with operands OPERANDS. */
2179 const char *
2180 output_move_double (rtx *operands)
2182 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2183 rtx latehalf[2];
2184 rtx addreg0 = 0, addreg1 = 0;
2186 /* First classify both operands. */
2188 if (REG_P (operands[0]))
2189 optype0 = REGOP;
2190 else if (offsettable_memref_p (operands[0]))
2191 optype0 = OFFSOP;
2192 else if (GET_CODE (operands[0]) == MEM)
2193 optype0 = MEMOP;
2194 else
2195 optype0 = RNDOP;
2197 if (REG_P (operands[1]))
2198 optype1 = REGOP;
2199 else if (CONSTANT_P (operands[1]))
2200 optype1 = CNSTOP;
2201 else if (offsettable_memref_p (operands[1]))
2202 optype1 = OFFSOP;
2203 else if (GET_CODE (operands[1]) == MEM)
2204 optype1 = MEMOP;
2205 else
2206 optype1 = RNDOP;
2208 /* Check for the cases that the operand constraints are not
2209 supposed to allow to happen. */
2210 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2212 /* Handle auto decrementing and incrementing loads and stores
2213 specifically, since the structure of the function doesn't work
2214 for them without major modification. Do it better when we learn
2215 this port about the general inc/dec addressing of PA.
2216 (This was written by tege. Chide him if it doesn't work.) */
2218 if (optype0 == MEMOP)
2220 /* We have to output the address syntax ourselves, since print_operand
2221 doesn't deal with the addresses we want to use. Fix this later. */
2223 rtx addr = XEXP (operands[0], 0);
2224 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2226 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2228 operands[0] = XEXP (addr, 0);
2229 gcc_assert (GET_CODE (operands[1]) == REG
2230 && GET_CODE (operands[0]) == REG);
2232 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2234 /* No overlap between high target register and address
2235 register. (We do this in a non-obvious way to
2236 save a register file writeback) */
2237 if (GET_CODE (addr) == POST_INC)
2238 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2239 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2241 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2243 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2245 operands[0] = XEXP (addr, 0);
2246 gcc_assert (GET_CODE (operands[1]) == REG
2247 && GET_CODE (operands[0]) == REG);
2249 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2250 /* No overlap between high target register and address
2251 register. (We do this in a non-obvious way to save a
2252 register file writeback) */
2253 if (GET_CODE (addr) == PRE_INC)
2254 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2255 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2258 if (optype1 == MEMOP)
2260 /* We have to output the address syntax ourselves, since print_operand
2261 doesn't deal with the addresses we want to use. Fix this later. */
2263 rtx addr = XEXP (operands[1], 0);
2264 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2266 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2268 operands[1] = XEXP (addr, 0);
2269 gcc_assert (GET_CODE (operands[0]) == REG
2270 && GET_CODE (operands[1]) == REG);
2272 if (!reg_overlap_mentioned_p (high_reg, addr))
2274 /* No overlap between high target register and address
2275 register. (We do this in a non-obvious way to
2276 save a register file writeback) */
2277 if (GET_CODE (addr) == POST_INC)
2278 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2279 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2281 else
2283 /* This is an undefined situation. We should load into the
2284 address register *and* update that register. Probably
2285 we don't need to handle this at all. */
2286 if (GET_CODE (addr) == POST_INC)
2287 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2288 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2291 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2293 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2295 operands[1] = XEXP (addr, 0);
2296 gcc_assert (GET_CODE (operands[0]) == REG
2297 && GET_CODE (operands[1]) == REG);
2299 if (!reg_overlap_mentioned_p (high_reg, addr))
2301 /* No overlap between high target register and address
2302 register. (We do this in a non-obvious way to
2303 save a register file writeback) */
2304 if (GET_CODE (addr) == PRE_INC)
2305 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2306 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2308 else
2310 /* This is an undefined situation. We should load into the
2311 address register *and* update that register. Probably
2312 we don't need to handle this at all. */
2313 if (GET_CODE (addr) == PRE_INC)
2314 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2315 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2318 else if (GET_CODE (addr) == PLUS
2319 && GET_CODE (XEXP (addr, 0)) == MULT)
2321 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2323 if (!reg_overlap_mentioned_p (high_reg, addr))
2325 rtx xoperands[3];
2327 xoperands[0] = high_reg;
2328 xoperands[1] = XEXP (addr, 1);
2329 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2330 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2331 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2332 xoperands);
2333 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2335 else
2337 rtx xoperands[3];
2339 xoperands[0] = high_reg;
2340 xoperands[1] = XEXP (addr, 1);
2341 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2342 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2343 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2344 xoperands);
2345 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2350 /* If an operand is an unoffsettable memory ref, find a register
2351 we can increment temporarily to make it refer to the second word. */
2353 if (optype0 == MEMOP)
2354 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2356 if (optype1 == MEMOP)
2357 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2359 /* Ok, we can do one word at a time.
2360 Normally we do the low-numbered word first.
2362 In either case, set up in LATEHALF the operands to use
2363 for the high-numbered word and in some cases alter the
2364 operands in OPERANDS to be suitable for the low-numbered word. */
2366 if (optype0 == REGOP)
2367 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2368 else if (optype0 == OFFSOP)
2369 latehalf[0] = adjust_address (operands[0], SImode, 4);
2370 else
2371 latehalf[0] = operands[0];
2373 if (optype1 == REGOP)
2374 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2375 else if (optype1 == OFFSOP)
2376 latehalf[1] = adjust_address (operands[1], SImode, 4);
2377 else if (optype1 == CNSTOP)
2378 split_double (operands[1], &operands[1], &latehalf[1]);
2379 else
2380 latehalf[1] = operands[1];
2382 /* If the first move would clobber the source of the second one,
2383 do them in the other order.
2385 This can happen in two cases:
2387 mem -> register where the first half of the destination register
2388 is the same register used in the memory's address. Reload
2389 can create such insns.
2391 mem in this case will be either register indirect or register
2392 indirect plus a valid offset.
2394 register -> register move where REGNO(dst) == REGNO(src + 1)
2395 someone (Tim/Tege?) claimed this can happen for parameter loads.
2397 Handle mem -> register case first. */
2398 if (optype0 == REGOP
2399 && (optype1 == MEMOP || optype1 == OFFSOP)
2400 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2401 operands[1], 0))
2403 /* Do the late half first. */
2404 if (addreg1)
2405 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2406 output_asm_insn (singlemove_string (latehalf), latehalf);
2408 /* Then clobber. */
2409 if (addreg1)
2410 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2411 return singlemove_string (operands);
2414 /* Now handle register -> register case. */
2415 if (optype0 == REGOP && optype1 == REGOP
2416 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2418 output_asm_insn (singlemove_string (latehalf), latehalf);
2419 return singlemove_string (operands);
2422 /* Normal case: do the two words, low-numbered first. */
2424 output_asm_insn (singlemove_string (operands), operands);
2426 /* Make any unoffsettable addresses point at high-numbered word. */
2427 if (addreg0)
2428 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2429 if (addreg1)
2430 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2432 /* Do that word. */
2433 output_asm_insn (singlemove_string (latehalf), latehalf);
2435 /* Undo the adds we just did. */
2436 if (addreg0)
2437 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2438 if (addreg1)
2439 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2441 return "";
2444 const char *
2445 output_fp_move_double (rtx *operands)
2447 if (FP_REG_P (operands[0]))
2449 if (FP_REG_P (operands[1])
2450 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2451 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2452 else
2453 output_asm_insn ("fldd%F1 %1,%0", operands);
2455 else if (FP_REG_P (operands[1]))
2457 output_asm_insn ("fstd%F0 %1,%0", operands);
2459 else
2461 rtx xoperands[2];
2463 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2465 /* This is a pain. You have to be prepared to deal with an
2466 arbitrary address here including pre/post increment/decrement.
2468 so avoid this in the MD. */
2469 gcc_assert (GET_CODE (operands[0]) == REG);
2471 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2472 xoperands[0] = operands[0];
2473 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2475 return "";
2478 /* Return a REG that occurs in ADDR with coefficient 1.
2479 ADDR can be effectively incremented by incrementing REG. */
2481 static rtx
2482 find_addr_reg (rtx addr)
2484 while (GET_CODE (addr) == PLUS)
2486 if (GET_CODE (XEXP (addr, 0)) == REG)
2487 addr = XEXP (addr, 0);
2488 else if (GET_CODE (XEXP (addr, 1)) == REG)
2489 addr = XEXP (addr, 1);
2490 else if (CONSTANT_P (XEXP (addr, 0)))
2491 addr = XEXP (addr, 1);
2492 else if (CONSTANT_P (XEXP (addr, 1)))
2493 addr = XEXP (addr, 0);
2494 else
2495 gcc_unreachable ();
2497 gcc_assert (GET_CODE (addr) == REG);
2498 return addr;
2501 /* Emit code to perform a block move.
2503 OPERANDS[0] is the destination pointer as a REG, clobbered.
2504 OPERANDS[1] is the source pointer as a REG, clobbered.
2505 OPERANDS[2] is a register for temporary storage.
2506 OPERANDS[3] is a register for temporary storage.
2507 OPERANDS[4] is the size as a CONST_INT
2508 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2509 OPERANDS[6] is another temporary register. */
2511 const char *
2512 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2514 int align = INTVAL (operands[5]);
2515 unsigned long n_bytes = INTVAL (operands[4]);
2517 /* We can't move more than a word at a time because the PA
2518 has no longer integer move insns. (Could use fp mem ops?) */
2519 if (align > (TARGET_64BIT ? 8 : 4))
2520 align = (TARGET_64BIT ? 8 : 4);
2522 /* Note that we know each loop below will execute at least twice
2523 (else we would have open-coded the copy). */
2524 switch (align)
2526 case 8:
2527 /* Pre-adjust the loop counter. */
2528 operands[4] = GEN_INT (n_bytes - 16);
2529 output_asm_insn ("ldi %4,%2", operands);
2531 /* Copying loop. */
2532 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2533 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2534 output_asm_insn ("std,ma %3,8(%0)", operands);
2535 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2536 output_asm_insn ("std,ma %6,8(%0)", operands);
2538 /* Handle the residual. There could be up to 7 bytes of
2539 residual to copy! */
2540 if (n_bytes % 16 != 0)
2542 operands[4] = GEN_INT (n_bytes % 8);
2543 if (n_bytes % 16 >= 8)
2544 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2545 if (n_bytes % 8 != 0)
2546 output_asm_insn ("ldd 0(%1),%6", operands);
2547 if (n_bytes % 16 >= 8)
2548 output_asm_insn ("std,ma %3,8(%0)", operands);
2549 if (n_bytes % 8 != 0)
2550 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2552 return "";
2554 case 4:
2555 /* Pre-adjust the loop counter. */
2556 operands[4] = GEN_INT (n_bytes - 8);
2557 output_asm_insn ("ldi %4,%2", operands);
2559 /* Copying loop. */
2560 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2561 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2562 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2563 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2564 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2566 /* Handle the residual. There could be up to 7 bytes of
2567 residual to copy! */
2568 if (n_bytes % 8 != 0)
2570 operands[4] = GEN_INT (n_bytes % 4);
2571 if (n_bytes % 8 >= 4)
2572 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2573 if (n_bytes % 4 != 0)
2574 output_asm_insn ("ldw 0(%1),%6", operands);
2575 if (n_bytes % 8 >= 4)
2576 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2577 if (n_bytes % 4 != 0)
2578 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2580 return "";
2582 case 2:
2583 /* Pre-adjust the loop counter. */
2584 operands[4] = GEN_INT (n_bytes - 4);
2585 output_asm_insn ("ldi %4,%2", operands);
2587 /* Copying loop. */
2588 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2589 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2590 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2591 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2592 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2594 /* Handle the residual. */
2595 if (n_bytes % 4 != 0)
2597 if (n_bytes % 4 >= 2)
2598 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2599 if (n_bytes % 2 != 0)
2600 output_asm_insn ("ldb 0(%1),%6", operands);
2601 if (n_bytes % 4 >= 2)
2602 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2603 if (n_bytes % 2 != 0)
2604 output_asm_insn ("stb %6,0(%0)", operands);
2606 return "";
2608 case 1:
2609 /* Pre-adjust the loop counter. */
2610 operands[4] = GEN_INT (n_bytes - 2);
2611 output_asm_insn ("ldi %4,%2", operands);
2613 /* Copying loop. */
2614 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2615 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2616 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2617 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2618 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2620 /* Handle the residual. */
2621 if (n_bytes % 2 != 0)
2623 output_asm_insn ("ldb 0(%1),%3", operands);
2624 output_asm_insn ("stb %3,0(%0)", operands);
2626 return "";
2628 default:
2629 gcc_unreachable ();
2633 /* Count the number of insns necessary to handle this block move.
2635 Basic structure is the same as emit_block_move, except that we
2636 count insns rather than emit them. */
2638 static int
2639 compute_movmem_length (rtx insn)
2641 rtx pat = PATTERN (insn);
2642 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2643 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2644 unsigned int n_insns = 0;
2646 /* We can't move more than four bytes at a time because the PA
2647 has no longer integer move insns. (Could use fp mem ops?) */
2648 if (align > (TARGET_64BIT ? 8 : 4))
2649 align = (TARGET_64BIT ? 8 : 4);
2651 /* The basic copying loop. */
2652 n_insns = 6;
2654 /* Residuals. */
2655 if (n_bytes % (2 * align) != 0)
2657 if ((n_bytes % (2 * align)) >= align)
2658 n_insns += 2;
2660 if ((n_bytes % align) != 0)
2661 n_insns += 2;
2664 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2665 return n_insns * 4;
2668 /* Emit code to perform a block clear.
2670 OPERANDS[0] is the destination pointer as a REG, clobbered.
2671 OPERANDS[1] is a register for temporary storage.
2672 OPERANDS[2] is the size as a CONST_INT
2673 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2675 const char *
2676 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2678 int align = INTVAL (operands[3]);
2679 unsigned long n_bytes = INTVAL (operands[2]);
2681 /* We can't clear more than a word at a time because the PA
2682 has no longer integer move insns. */
2683 if (align > (TARGET_64BIT ? 8 : 4))
2684 align = (TARGET_64BIT ? 8 : 4);
2686 /* Note that we know each loop below will execute at least twice
2687 (else we would have open-coded the copy). */
2688 switch (align)
2690 case 8:
2691 /* Pre-adjust the loop counter. */
2692 operands[2] = GEN_INT (n_bytes - 16);
2693 output_asm_insn ("ldi %2,%1", operands);
2695 /* Loop. */
2696 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2697 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2698 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2700 /* Handle the residual. There could be up to 7 bytes of
2701 residual to copy! */
2702 if (n_bytes % 16 != 0)
2704 operands[2] = GEN_INT (n_bytes % 8);
2705 if (n_bytes % 16 >= 8)
2706 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2707 if (n_bytes % 8 != 0)
2708 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2710 return "";
2712 case 4:
2713 /* Pre-adjust the loop counter. */
2714 operands[2] = GEN_INT (n_bytes - 8);
2715 output_asm_insn ("ldi %2,%1", operands);
2717 /* Loop. */
2718 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2719 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2720 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2722 /* Handle the residual. There could be up to 7 bytes of
2723 residual to copy! */
2724 if (n_bytes % 8 != 0)
2726 operands[2] = GEN_INT (n_bytes % 4);
2727 if (n_bytes % 8 >= 4)
2728 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2729 if (n_bytes % 4 != 0)
2730 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2732 return "";
2734 case 2:
2735 /* Pre-adjust the loop counter. */
2736 operands[2] = GEN_INT (n_bytes - 4);
2737 output_asm_insn ("ldi %2,%1", operands);
2739 /* Loop. */
2740 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2741 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2742 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2744 /* Handle the residual. */
2745 if (n_bytes % 4 != 0)
2747 if (n_bytes % 4 >= 2)
2748 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2749 if (n_bytes % 2 != 0)
2750 output_asm_insn ("stb %%r0,0(%0)", operands);
2752 return "";
2754 case 1:
2755 /* Pre-adjust the loop counter. */
2756 operands[2] = GEN_INT (n_bytes - 2);
2757 output_asm_insn ("ldi %2,%1", operands);
2759 /* Loop. */
2760 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2761 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2762 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2764 /* Handle the residual. */
2765 if (n_bytes % 2 != 0)
2766 output_asm_insn ("stb %%r0,0(%0)", operands);
2768 return "";
2770 default:
2771 gcc_unreachable ();
2775 /* Count the number of insns necessary to handle this block move.
2777 Basic structure is the same as emit_block_move, except that we
2778 count insns rather than emit them. */
2780 static int
2781 compute_clrmem_length (rtx insn)
2783 rtx pat = PATTERN (insn);
2784 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2785 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2786 unsigned int n_insns = 0;
2788 /* We can't clear more than a word at a time because the PA
2789 has no longer integer move insns. */
2790 if (align > (TARGET_64BIT ? 8 : 4))
2791 align = (TARGET_64BIT ? 8 : 4);
2793 /* The basic loop. */
2794 n_insns = 4;
2796 /* Residuals. */
2797 if (n_bytes % (2 * align) != 0)
2799 if ((n_bytes % (2 * align)) >= align)
2800 n_insns++;
2802 if ((n_bytes % align) != 0)
2803 n_insns++;
2806 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2807 return n_insns * 4;
2811 const char *
2812 output_and (rtx *operands)
2814 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2816 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2817 int ls0, ls1, ms0, p, len;
2819 for (ls0 = 0; ls0 < 32; ls0++)
2820 if ((mask & (1 << ls0)) == 0)
2821 break;
2823 for (ls1 = ls0; ls1 < 32; ls1++)
2824 if ((mask & (1 << ls1)) != 0)
2825 break;
2827 for (ms0 = ls1; ms0 < 32; ms0++)
2828 if ((mask & (1 << ms0)) == 0)
2829 break;
2831 gcc_assert (ms0 == 32);
2833 if (ls1 == 32)
2835 len = ls0;
2837 gcc_assert (len);
2839 operands[2] = GEN_INT (len);
2840 return "{extru|extrw,u} %1,31,%2,%0";
2842 else
2844 /* We could use this `depi' for the case above as well, but `depi'
2845 requires one more register file access than an `extru'. */
2847 p = 31 - ls0;
2848 len = ls1 - ls0;
2850 operands[2] = GEN_INT (p);
2851 operands[3] = GEN_INT (len);
2852 return "{depi|depwi} 0,%2,%3,%0";
2855 else
2856 return "and %1,%2,%0";
2859 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2860 storing the result in operands[0]. */
2861 const char *
2862 output_64bit_and (rtx *operands)
2864 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2866 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2867 int ls0, ls1, ms0, p, len;
2869 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2870 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2871 break;
2873 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2874 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2875 break;
2877 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2878 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2879 break;
2881 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2883 if (ls1 == HOST_BITS_PER_WIDE_INT)
2885 len = ls0;
2887 gcc_assert (len);
2889 operands[2] = GEN_INT (len);
2890 return "extrd,u %1,63,%2,%0";
2892 else
2894 /* We could use this `depi' for the case above as well, but `depi'
2895 requires one more register file access than an `extru'. */
2897 p = 63 - ls0;
2898 len = ls1 - ls0;
2900 operands[2] = GEN_INT (p);
2901 operands[3] = GEN_INT (len);
2902 return "depdi 0,%2,%3,%0";
2905 else
2906 return "and %1,%2,%0";
2909 const char *
2910 output_ior (rtx *operands)
2912 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2913 int bs0, bs1, p, len;
2915 if (INTVAL (operands[2]) == 0)
2916 return "copy %1,%0";
2918 for (bs0 = 0; bs0 < 32; bs0++)
2919 if ((mask & (1 << bs0)) != 0)
2920 break;
2922 for (bs1 = bs0; bs1 < 32; bs1++)
2923 if ((mask & (1 << bs1)) == 0)
2924 break;
2926 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2928 p = 31 - bs0;
2929 len = bs1 - bs0;
2931 operands[2] = GEN_INT (p);
2932 operands[3] = GEN_INT (len);
2933 return "{depi|depwi} -1,%2,%3,%0";
2936 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2937 storing the result in operands[0]. */
2938 const char *
2939 output_64bit_ior (rtx *operands)
2941 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2942 int bs0, bs1, p, len;
2944 if (INTVAL (operands[2]) == 0)
2945 return "copy %1,%0";
2947 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2948 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2949 break;
2951 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2952 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2953 break;
2955 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2956 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2958 p = 63 - bs0;
2959 len = bs1 - bs0;
2961 operands[2] = GEN_INT (p);
2962 operands[3] = GEN_INT (len);
2963 return "depdi -1,%2,%3,%0";
2966 /* Target hook for assembling integer objects. This code handles
2967 aligned SI and DI integers specially since function references
2968 must be preceded by P%. */
2970 static bool
2971 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2973 if (size == UNITS_PER_WORD
2974 && aligned_p
2975 && function_label_operand (x, VOIDmode))
2977 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2978 output_addr_const (asm_out_file, x);
2979 fputc ('\n', asm_out_file);
2980 return true;
2982 return default_assemble_integer (x, size, aligned_p);
2985 /* Output an ascii string. */
2986 void
2987 output_ascii (FILE *file, const char *p, int size)
2989 int i;
2990 int chars_output;
2991 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
2993 /* The HP assembler can only take strings of 256 characters at one
2994 time. This is a limitation on input line length, *not* the
2995 length of the string. Sigh. Even worse, it seems that the
2996 restriction is in number of input characters (see \xnn &
2997 \whatever). So we have to do this very carefully. */
2999 fputs ("\t.STRING \"", file);
3001 chars_output = 0;
3002 for (i = 0; i < size; i += 4)
3004 int co = 0;
3005 int io = 0;
3006 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3008 register unsigned int c = (unsigned char) p[i + io];
3010 if (c == '\"' || c == '\\')
3011 partial_output[co++] = '\\';
3012 if (c >= ' ' && c < 0177)
3013 partial_output[co++] = c;
3014 else
3016 unsigned int hexd;
3017 partial_output[co++] = '\\';
3018 partial_output[co++] = 'x';
3019 hexd = c / 16 - 0 + '0';
3020 if (hexd > '9')
3021 hexd -= '9' - 'a' + 1;
3022 partial_output[co++] = hexd;
3023 hexd = c % 16 - 0 + '0';
3024 if (hexd > '9')
3025 hexd -= '9' - 'a' + 1;
3026 partial_output[co++] = hexd;
3029 if (chars_output + co > 243)
3031 fputs ("\"\n\t.STRING \"", file);
3032 chars_output = 0;
3034 fwrite (partial_output, 1, (size_t) co, file);
3035 chars_output += co;
3036 co = 0;
3038 fputs ("\"\n", file);
3041 /* Try to rewrite floating point comparisons & branches to avoid
3042 useless add,tr insns.
3044 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3045 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3046 first attempt to remove useless add,tr insns. It is zero
3047 for the second pass as reorg sometimes leaves bogus REG_DEAD
3048 notes lying around.
3050 When CHECK_NOTES is zero we can only eliminate add,tr insns
3051 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3052 instructions. */
3053 static void
3054 remove_useless_addtr_insns (int check_notes)
3056 rtx insn;
3057 static int pass = 0;
3059 /* This is fairly cheap, so always run it when optimizing. */
3060 if (optimize > 0)
3062 int fcmp_count = 0;
3063 int fbranch_count = 0;
3065 /* Walk all the insns in this function looking for fcmp & fbranch
3066 instructions. Keep track of how many of each we find. */
3067 for (insn = get_insns (); insn; insn = next_insn (insn))
3069 rtx tmp;
3071 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3072 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3073 continue;
3075 tmp = PATTERN (insn);
3077 /* It must be a set. */
3078 if (GET_CODE (tmp) != SET)
3079 continue;
3081 /* If the destination is CCFP, then we've found an fcmp insn. */
3082 tmp = SET_DEST (tmp);
3083 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3085 fcmp_count++;
3086 continue;
3089 tmp = PATTERN (insn);
3090 /* If this is an fbranch instruction, bump the fbranch counter. */
3091 if (GET_CODE (tmp) == SET
3092 && SET_DEST (tmp) == pc_rtx
3093 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3094 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3095 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3096 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3098 fbranch_count++;
3099 continue;
3104 /* Find all floating point compare + branch insns. If possible,
3105 reverse the comparison & the branch to avoid add,tr insns. */
3106 for (insn = get_insns (); insn; insn = next_insn (insn))
3108 rtx tmp, next;
3110 /* Ignore anything that isn't an INSN. */
3111 if (GET_CODE (insn) != INSN)
3112 continue;
3114 tmp = PATTERN (insn);
3116 /* It must be a set. */
3117 if (GET_CODE (tmp) != SET)
3118 continue;
3120 /* The destination must be CCFP, which is register zero. */
3121 tmp = SET_DEST (tmp);
3122 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3123 continue;
3125 /* INSN should be a set of CCFP.
3127 See if the result of this insn is used in a reversed FP
3128 conditional branch. If so, reverse our condition and
3129 the branch. Doing so avoids useless add,tr insns. */
3130 next = next_insn (insn);
3131 while (next)
3133 /* Jumps, calls and labels stop our search. */
3134 if (GET_CODE (next) == JUMP_INSN
3135 || GET_CODE (next) == CALL_INSN
3136 || GET_CODE (next) == CODE_LABEL)
3137 break;
3139 /* As does another fcmp insn. */
3140 if (GET_CODE (next) == INSN
3141 && GET_CODE (PATTERN (next)) == SET
3142 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3143 && REGNO (SET_DEST (PATTERN (next))) == 0)
3144 break;
3146 next = next_insn (next);
3149 /* Is NEXT_INSN a branch? */
3150 if (next
3151 && GET_CODE (next) == JUMP_INSN)
3153 rtx pattern = PATTERN (next);
3155 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3156 and CCFP dies, then reverse our conditional and the branch
3157 to avoid the add,tr. */
3158 if (GET_CODE (pattern) == SET
3159 && SET_DEST (pattern) == pc_rtx
3160 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3161 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3162 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3163 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3164 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3165 && (fcmp_count == fbranch_count
3166 || (check_notes
3167 && find_regno_note (next, REG_DEAD, 0))))
3169 /* Reverse the branch. */
3170 tmp = XEXP (SET_SRC (pattern), 1);
3171 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3172 XEXP (SET_SRC (pattern), 2) = tmp;
3173 INSN_CODE (next) = -1;
3175 /* Reverse our condition. */
3176 tmp = PATTERN (insn);
3177 PUT_CODE (XEXP (tmp, 1),
3178 (reverse_condition_maybe_unordered
3179 (GET_CODE (XEXP (tmp, 1)))));
3185 pass = !pass;
3189 /* You may have trouble believing this, but this is the 32 bit HP-PA
3190 stack layout. Wow.
3192 Offset Contents
3194 Variable arguments (optional; any number may be allocated)
3196 SP-(4*(N+9)) arg word N
3198 SP-56 arg word 5
3199 SP-52 arg word 4
3201 Fixed arguments (must be allocated; may remain unused)
3203 SP-48 arg word 3
3204 SP-44 arg word 2
3205 SP-40 arg word 1
3206 SP-36 arg word 0
3208 Frame Marker
3210 SP-32 External Data Pointer (DP)
3211 SP-28 External sr4
3212 SP-24 External/stub RP (RP')
3213 SP-20 Current RP
3214 SP-16 Static Link
3215 SP-12 Clean up
3216 SP-8 Calling Stub RP (RP'')
3217 SP-4 Previous SP
3219 Top of Frame
3221 SP-0 Stack Pointer (points to next available address)
3225 /* This function saves registers as follows. Registers marked with ' are
3226 this function's registers (as opposed to the previous function's).
3227 If a frame_pointer isn't needed, r4 is saved as a general register;
3228 the space for the frame pointer is still allocated, though, to keep
3229 things simple.
3232 Top of Frame
3234 SP (FP') Previous FP
3235 SP + 4 Alignment filler (sigh)
3236 SP + 8 Space for locals reserved here.
3240 SP + n All call saved register used.
3244 SP + o All call saved fp registers used.
3248 SP + p (SP') points to next available address.
3252 /* Global variables set by output_function_prologue(). */
3253 /* Size of frame. Need to know this to emit return insns from
3254 leaf procedures. */
3255 static HOST_WIDE_INT actual_fsize, local_fsize;
3256 static int save_fregs;
3258 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3259 Handle case where DISP > 8k by using the add_high_const patterns.
3261 Note in DISP > 8k case, we will leave the high part of the address
3262 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3264 static void
3265 store_reg (int reg, HOST_WIDE_INT disp, int base)
3267 rtx insn, dest, src, basereg;
3269 src = gen_rtx_REG (word_mode, reg);
3270 basereg = gen_rtx_REG (Pmode, base);
3271 if (VAL_14_BITS_P (disp))
3273 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3274 insn = emit_move_insn (dest, src);
3276 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3278 rtx delta = GEN_INT (disp);
3279 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3281 emit_move_insn (tmpreg, delta);
3282 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3283 if (DO_FRAME_NOTES)
3285 REG_NOTES (insn)
3286 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3287 gen_rtx_SET (VOIDmode, tmpreg,
3288 gen_rtx_PLUS (Pmode, basereg, delta)),
3289 REG_NOTES (insn));
3290 RTX_FRAME_RELATED_P (insn) = 1;
3292 dest = gen_rtx_MEM (word_mode, tmpreg);
3293 insn = emit_move_insn (dest, src);
3295 else
3297 rtx delta = GEN_INT (disp);
3298 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3299 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3301 emit_move_insn (tmpreg, high);
3302 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3303 insn = emit_move_insn (dest, src);
3304 if (DO_FRAME_NOTES)
3306 REG_NOTES (insn)
3307 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3308 gen_rtx_SET (VOIDmode,
3309 gen_rtx_MEM (word_mode,
3310 gen_rtx_PLUS (word_mode, basereg,
3311 delta)),
3312 src),
3313 REG_NOTES (insn));
3317 if (DO_FRAME_NOTES)
3318 RTX_FRAME_RELATED_P (insn) = 1;
3321 /* Emit RTL to store REG at the memory location specified by BASE and then
3322 add MOD to BASE. MOD must be <= 8k. */
3324 static void
3325 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3327 rtx insn, basereg, srcreg, delta;
3329 gcc_assert (VAL_14_BITS_P (mod));
3331 basereg = gen_rtx_REG (Pmode, base);
3332 srcreg = gen_rtx_REG (word_mode, reg);
3333 delta = GEN_INT (mod);
3335 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3336 if (DO_FRAME_NOTES)
3338 RTX_FRAME_RELATED_P (insn) = 1;
3340 /* RTX_FRAME_RELATED_P must be set on each frame related set
3341 in a parallel with more than one element. */
3342 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3343 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3347 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3348 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3349 whether to add a frame note or not.
3351 In the DISP > 8k case, we leave the high part of the address in %r1.
3352 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3354 static void
3355 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3357 rtx insn;
3359 if (VAL_14_BITS_P (disp))
3361 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3362 plus_constant (gen_rtx_REG (Pmode, base), disp));
3364 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3366 rtx basereg = gen_rtx_REG (Pmode, base);
3367 rtx delta = GEN_INT (disp);
3368 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3370 emit_move_insn (tmpreg, delta);
3371 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3372 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3373 if (DO_FRAME_NOTES)
3374 REG_NOTES (insn)
3375 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3376 gen_rtx_SET (VOIDmode, tmpreg,
3377 gen_rtx_PLUS (Pmode, basereg, delta)),
3378 REG_NOTES (insn));
3380 else
3382 rtx basereg = gen_rtx_REG (Pmode, base);
3383 rtx delta = GEN_INT (disp);
3384 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3386 emit_move_insn (tmpreg,
3387 gen_rtx_PLUS (Pmode, basereg,
3388 gen_rtx_HIGH (Pmode, delta)));
3389 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3390 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3393 if (DO_FRAME_NOTES && note)
3394 RTX_FRAME_RELATED_P (insn) = 1;
3397 HOST_WIDE_INT
3398 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3400 int freg_saved = 0;
3401 int i, j;
3403 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3404 be consistent with the rounding and size calculation done here.
3405 Change them at the same time. */
3407 /* We do our own stack alignment. First, round the size of the
3408 stack locals up to a word boundary. */
3409 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3411 /* Space for previous frame pointer + filler. If any frame is
3412 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3413 waste some space here for the sake of HP compatibility. The
3414 first slot is only used when the frame pointer is needed. */
3415 if (size || frame_pointer_needed)
3416 size += STARTING_FRAME_OFFSET;
3418 /* If the current function calls __builtin_eh_return, then we need
3419 to allocate stack space for registers that will hold data for
3420 the exception handler. */
3421 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3423 unsigned int i;
3425 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3426 continue;
3427 size += i * UNITS_PER_WORD;
3430 /* Account for space used by the callee general register saves. */
3431 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3432 if (regs_ever_live[i])
3433 size += UNITS_PER_WORD;
3435 /* Account for space used by the callee floating point register saves. */
3436 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3437 if (regs_ever_live[i]
3438 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3440 freg_saved = 1;
3442 /* We always save both halves of the FP register, so always
3443 increment the frame size by 8 bytes. */
3444 size += 8;
3447 /* If any of the floating registers are saved, account for the
3448 alignment needed for the floating point register save block. */
3449 if (freg_saved)
3451 size = (size + 7) & ~7;
3452 if (fregs_live)
3453 *fregs_live = 1;
3456 /* The various ABIs include space for the outgoing parameters in the
3457 size of the current function's stack frame. We don't need to align
3458 for the outgoing arguments as their alignment is set by the final
3459 rounding for the frame as a whole. */
3460 size += current_function_outgoing_args_size;
3462 /* Allocate space for the fixed frame marker. This space must be
3463 allocated for any function that makes calls or allocates
3464 stack space. */
3465 if (!current_function_is_leaf || size)
3466 size += TARGET_64BIT ? 48 : 32;
3468 /* Finally, round to the preferred stack boundary. */
3469 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3470 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3473 /* Generate the assembly code for function entry. FILE is a stdio
3474 stream to output the code to. SIZE is an int: how many units of
3475 temporary storage to allocate.
3477 Refer to the array `regs_ever_live' to determine which registers to
3478 save; `regs_ever_live[I]' is nonzero if register number I is ever
3479 used in the function. This function is responsible for knowing
3480 which registers should not be saved even if used. */
3482 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3483 of memory. If any fpu reg is used in the function, we allocate
3484 such a block here, at the bottom of the frame, just in case it's needed.
3486 If this function is a leaf procedure, then we may choose not
3487 to do a "save" insn. The decision about whether or not
3488 to do this is made in regclass.c. */
3490 static void
3491 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3493 /* The function's label and associated .PROC must never be
3494 separated and must be output *after* any profiling declarations
3495 to avoid changing spaces/subspaces within a procedure. */
3496 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3497 fputs ("\t.PROC\n", file);
3499 /* hppa_expand_prologue does the dirty work now. We just need
3500 to output the assembler directives which denote the start
3501 of a function. */
3502 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3503 if (regs_ever_live[2])
3504 fputs (",CALLS,SAVE_RP", file);
3505 else
3506 fputs (",NO_CALLS", file);
3508 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3509 at the beginning of the frame and that it is used as the frame
3510 pointer for the frame. We do this because our current frame
3511 layout doesn't conform to that specified in the HP runtime
3512 documentation and we need a way to indicate to programs such as
3513 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3514 isn't used by HP compilers but is supported by the assembler.
3515 However, SAVE_SP is supposed to indicate that the previous stack
3516 pointer has been saved in the frame marker. */
3517 if (frame_pointer_needed)
3518 fputs (",SAVE_SP", file);
3520 /* Pass on information about the number of callee register saves
3521 performed in the prologue.
3523 The compiler is supposed to pass the highest register number
3524 saved, the assembler then has to adjust that number before
3525 entering it into the unwind descriptor (to account for any
3526 caller saved registers with lower register numbers than the
3527 first callee saved register). */
3528 if (gr_saved)
3529 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3531 if (fr_saved)
3532 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3534 fputs ("\n\t.ENTRY\n", file);
3536 remove_useless_addtr_insns (0);
3539 void
3540 hppa_expand_prologue (void)
3542 int merge_sp_adjust_with_store = 0;
3543 HOST_WIDE_INT size = get_frame_size ();
3544 HOST_WIDE_INT offset;
3545 int i;
3546 rtx insn, tmpreg;
3548 gr_saved = 0;
3549 fr_saved = 0;
3550 save_fregs = 0;
3552 /* Compute total size for frame pointer, filler, locals and rounding to
3553 the next word boundary. Similar code appears in compute_frame_size
3554 and must be changed in tandem with this code. */
3555 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3556 if (local_fsize || frame_pointer_needed)
3557 local_fsize += STARTING_FRAME_OFFSET;
3559 actual_fsize = compute_frame_size (size, &save_fregs);
3561 /* Compute a few things we will use often. */
3562 tmpreg = gen_rtx_REG (word_mode, 1);
3564 /* Save RP first. The calling conventions manual states RP will
3565 always be stored into the caller's frame at sp - 20 or sp - 16
3566 depending on which ABI is in use. */
3567 if (regs_ever_live[2] || current_function_calls_eh_return)
3568 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3570 /* Allocate the local frame and set up the frame pointer if needed. */
3571 if (actual_fsize != 0)
3573 if (frame_pointer_needed)
3575 /* Copy the old frame pointer temporarily into %r1. Set up the
3576 new stack pointer, then store away the saved old frame pointer
3577 into the stack at sp and at the same time update the stack
3578 pointer by actual_fsize bytes. Two versions, first
3579 handles small (<8k) frames. The second handles large (>=8k)
3580 frames. */
3581 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3582 if (DO_FRAME_NOTES)
3583 RTX_FRAME_RELATED_P (insn) = 1;
3585 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3586 if (DO_FRAME_NOTES)
3587 RTX_FRAME_RELATED_P (insn) = 1;
3589 if (VAL_14_BITS_P (actual_fsize))
3590 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3591 else
3593 /* It is incorrect to store the saved frame pointer at *sp,
3594 then increment sp (writes beyond the current stack boundary).
3596 So instead use stwm to store at *sp and post-increment the
3597 stack pointer as an atomic operation. Then increment sp to
3598 finish allocating the new frame. */
3599 HOST_WIDE_INT adjust1 = 8192 - 64;
3600 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3602 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3603 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3604 adjust2, 1);
3607 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3608 we need to store the previous stack pointer (frame pointer)
3609 into the frame marker on targets that use the HP unwind
3610 library. This allows the HP unwind library to be used to
3611 unwind GCC frames. However, we are not fully compatible
3612 with the HP library because our frame layout differs from
3613 that specified in the HP runtime specification.
3615 We don't want a frame note on this instruction as the frame
3616 marker moves during dynamic stack allocation.
3618 This instruction also serves as a blockage to prevent
3619 register spills from being scheduled before the stack
3620 pointer is raised. This is necessary as we store
3621 registers using the frame pointer as a base register,
3622 and the frame pointer is set before sp is raised. */
3623 if (TARGET_HPUX_UNWIND_LIBRARY)
3625 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3626 GEN_INT (TARGET_64BIT ? -8 : -4));
3628 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3629 frame_pointer_rtx);
3631 else
3632 emit_insn (gen_blockage ());
3634 /* no frame pointer needed. */
3635 else
3637 /* In some cases we can perform the first callee register save
3638 and allocating the stack frame at the same time. If so, just
3639 make a note of it and defer allocating the frame until saving
3640 the callee registers. */
3641 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3642 merge_sp_adjust_with_store = 1;
3643 /* Can not optimize. Adjust the stack frame by actual_fsize
3644 bytes. */
3645 else
3646 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3647 actual_fsize, 1);
3651 /* Normal register save.
3653 Do not save the frame pointer in the frame_pointer_needed case. It
3654 was done earlier. */
3655 if (frame_pointer_needed)
3657 offset = local_fsize;
3659 /* Saving the EH return data registers in the frame is the simplest
3660 way to get the frame unwind information emitted. We put them
3661 just before the general registers. */
3662 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3664 unsigned int i, regno;
3666 for (i = 0; ; ++i)
3668 regno = EH_RETURN_DATA_REGNO (i);
3669 if (regno == INVALID_REGNUM)
3670 break;
3672 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3673 offset += UNITS_PER_WORD;
3677 for (i = 18; i >= 4; i--)
3678 if (regs_ever_live[i] && ! call_used_regs[i])
3680 store_reg (i, offset, FRAME_POINTER_REGNUM);
3681 offset += UNITS_PER_WORD;
3682 gr_saved++;
3684 /* Account for %r3 which is saved in a special place. */
3685 gr_saved++;
3687 /* No frame pointer needed. */
3688 else
3690 offset = local_fsize - actual_fsize;
3692 /* Saving the EH return data registers in the frame is the simplest
3693 way to get the frame unwind information emitted. */
3694 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3696 unsigned int i, regno;
3698 for (i = 0; ; ++i)
3700 regno = EH_RETURN_DATA_REGNO (i);
3701 if (regno == INVALID_REGNUM)
3702 break;
3704 /* If merge_sp_adjust_with_store is nonzero, then we can
3705 optimize the first save. */
3706 if (merge_sp_adjust_with_store)
3708 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3709 merge_sp_adjust_with_store = 0;
3711 else
3712 store_reg (regno, offset, STACK_POINTER_REGNUM);
3713 offset += UNITS_PER_WORD;
3717 for (i = 18; i >= 3; i--)
3718 if (regs_ever_live[i] && ! call_used_regs[i])
3720 /* If merge_sp_adjust_with_store is nonzero, then we can
3721 optimize the first GR save. */
3722 if (merge_sp_adjust_with_store)
3724 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3725 merge_sp_adjust_with_store = 0;
3727 else
3728 store_reg (i, offset, STACK_POINTER_REGNUM);
3729 offset += UNITS_PER_WORD;
3730 gr_saved++;
3733 /* If we wanted to merge the SP adjustment with a GR save, but we never
3734 did any GR saves, then just emit the adjustment here. */
3735 if (merge_sp_adjust_with_store)
3736 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3737 actual_fsize, 1);
3740 /* The hppa calling conventions say that %r19, the pic offset
3741 register, is saved at sp - 32 (in this function's frame)
3742 when generating PIC code. FIXME: What is the correct thing
3743 to do for functions which make no calls and allocate no
3744 frame? Do we need to allocate a frame, or can we just omit
3745 the save? For now we'll just omit the save.
3747 We don't want a note on this insn as the frame marker can
3748 move if there is a dynamic stack allocation. */
3749 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3751 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3753 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3757 /* Align pointer properly (doubleword boundary). */
3758 offset = (offset + 7) & ~7;
3760 /* Floating point register store. */
3761 if (save_fregs)
3763 rtx base;
3765 /* First get the frame or stack pointer to the start of the FP register
3766 save area. */
3767 if (frame_pointer_needed)
3769 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3770 base = frame_pointer_rtx;
3772 else
3774 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3775 base = stack_pointer_rtx;
3778 /* Now actually save the FP registers. */
3779 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3781 if (regs_ever_live[i]
3782 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3784 rtx addr, insn, reg;
3785 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3786 reg = gen_rtx_REG (DFmode, i);
3787 insn = emit_move_insn (addr, reg);
3788 if (DO_FRAME_NOTES)
3790 RTX_FRAME_RELATED_P (insn) = 1;
3791 if (TARGET_64BIT)
3793 rtx mem = gen_rtx_MEM (DFmode,
3794 plus_constant (base, offset));
3795 REG_NOTES (insn)
3796 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3797 gen_rtx_SET (VOIDmode, mem, reg),
3798 REG_NOTES (insn));
3800 else
3802 rtx meml = gen_rtx_MEM (SFmode,
3803 plus_constant (base, offset));
3804 rtx memr = gen_rtx_MEM (SFmode,
3805 plus_constant (base, offset + 4));
3806 rtx regl = gen_rtx_REG (SFmode, i);
3807 rtx regr = gen_rtx_REG (SFmode, i + 1);
3808 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3809 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3810 rtvec vec;
3812 RTX_FRAME_RELATED_P (setl) = 1;
3813 RTX_FRAME_RELATED_P (setr) = 1;
3814 vec = gen_rtvec (2, setl, setr);
3815 REG_NOTES (insn)
3816 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3817 gen_rtx_SEQUENCE (VOIDmode, vec),
3818 REG_NOTES (insn));
3821 offset += GET_MODE_SIZE (DFmode);
3822 fr_saved++;
3828 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3829 Handle case where DISP > 8k by using the add_high_const patterns. */
3831 static void
3832 load_reg (int reg, HOST_WIDE_INT disp, int base)
3834 rtx dest = gen_rtx_REG (word_mode, reg);
3835 rtx basereg = gen_rtx_REG (Pmode, base);
3836 rtx src;
3838 if (VAL_14_BITS_P (disp))
3839 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3840 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3842 rtx delta = GEN_INT (disp);
3843 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3845 emit_move_insn (tmpreg, delta);
3846 if (TARGET_DISABLE_INDEXING)
3848 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3849 src = gen_rtx_MEM (word_mode, tmpreg);
3851 else
3852 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3854 else
3856 rtx delta = GEN_INT (disp);
3857 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3858 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3860 emit_move_insn (tmpreg, high);
3861 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3864 emit_move_insn (dest, src);
3867 /* Update the total code bytes output to the text section. */
3869 static void
3870 update_total_code_bytes (int nbytes)
3872 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3873 && !IN_NAMED_SECTION_P (cfun->decl))
3875 if (INSN_ADDRESSES_SET_P ())
3877 unsigned long old_total = total_code_bytes;
3879 total_code_bytes += nbytes;
3881 /* Be prepared to handle overflows. */
3882 if (old_total > total_code_bytes)
3883 total_code_bytes = -1;
3885 else
3886 total_code_bytes = -1;
3890 /* This function generates the assembly code for function exit.
3891 Args are as for output_function_prologue ().
3893 The function epilogue should not depend on the current stack
3894 pointer! It should use the frame pointer only. This is mandatory
3895 because of alloca; we also take advantage of it to omit stack
3896 adjustments before returning. */
3898 static void
3899 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3901 rtx insn = get_last_insn ();
3903 last_address = 0;
3905 /* hppa_expand_epilogue does the dirty work now. We just need
3906 to output the assembler directives which denote the end
3907 of a function.
3909 To make debuggers happy, emit a nop if the epilogue was completely
3910 eliminated due to a volatile call as the last insn in the
3911 current function. That way the return address (in %r2) will
3912 always point to a valid instruction in the current function. */
3914 /* Get the last real insn. */
3915 if (GET_CODE (insn) == NOTE)
3916 insn = prev_real_insn (insn);
3918 /* If it is a sequence, then look inside. */
3919 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3920 insn = XVECEXP (PATTERN (insn), 0, 0);
3922 /* If insn is a CALL_INSN, then it must be a call to a volatile
3923 function (otherwise there would be epilogue insns). */
3924 if (insn && GET_CODE (insn) == CALL_INSN)
3926 fputs ("\tnop\n", file);
3927 last_address += 4;
3930 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3932 if (TARGET_SOM && TARGET_GAS)
3934 /* We done with this subspace except possibly for some additional
3935 debug information. Forget that we are in this subspace to ensure
3936 that the next function is output in its own subspace. */
3937 in_section = NULL;
3940 if (INSN_ADDRESSES_SET_P ())
3942 insn = get_last_nonnote_insn ();
3943 last_address += INSN_ADDRESSES (INSN_UID (insn));
3944 if (INSN_P (insn))
3945 last_address += insn_default_length (insn);
3946 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3947 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3950 /* Finally, update the total number of code bytes output so far. */
3951 update_total_code_bytes (last_address);
3954 void
3955 hppa_expand_epilogue (void)
3957 rtx tmpreg;
3958 HOST_WIDE_INT offset;
3959 HOST_WIDE_INT ret_off = 0;
3960 int i;
3961 int merge_sp_adjust_with_load = 0;
3963 /* We will use this often. */
3964 tmpreg = gen_rtx_REG (word_mode, 1);
3966 /* Try to restore RP early to avoid load/use interlocks when
3967 RP gets used in the return (bv) instruction. This appears to still
3968 be necessary even when we schedule the prologue and epilogue. */
3969 if (regs_ever_live [2] || current_function_calls_eh_return)
3971 ret_off = TARGET_64BIT ? -16 : -20;
3972 if (frame_pointer_needed)
3974 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
3975 ret_off = 0;
3977 else
3979 /* No frame pointer, and stack is smaller than 8k. */
3980 if (VAL_14_BITS_P (ret_off - actual_fsize))
3982 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
3983 ret_off = 0;
3988 /* General register restores. */
3989 if (frame_pointer_needed)
3991 offset = local_fsize;
3993 /* If the current function calls __builtin_eh_return, then we need
3994 to restore the saved EH data registers. */
3995 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3997 unsigned int i, regno;
3999 for (i = 0; ; ++i)
4001 regno = EH_RETURN_DATA_REGNO (i);
4002 if (regno == INVALID_REGNUM)
4003 break;
4005 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4006 offset += UNITS_PER_WORD;
4010 for (i = 18; i >= 4; i--)
4011 if (regs_ever_live[i] && ! call_used_regs[i])
4013 load_reg (i, offset, FRAME_POINTER_REGNUM);
4014 offset += UNITS_PER_WORD;
4017 else
4019 offset = local_fsize - actual_fsize;
4021 /* If the current function calls __builtin_eh_return, then we need
4022 to restore the saved EH data registers. */
4023 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4025 unsigned int i, regno;
4027 for (i = 0; ; ++i)
4029 regno = EH_RETURN_DATA_REGNO (i);
4030 if (regno == INVALID_REGNUM)
4031 break;
4033 /* Only for the first load.
4034 merge_sp_adjust_with_load holds the register load
4035 with which we will merge the sp adjustment. */
4036 if (merge_sp_adjust_with_load == 0
4037 && local_fsize == 0
4038 && VAL_14_BITS_P (-actual_fsize))
4039 merge_sp_adjust_with_load = regno;
4040 else
4041 load_reg (regno, offset, STACK_POINTER_REGNUM);
4042 offset += UNITS_PER_WORD;
4046 for (i = 18; i >= 3; i--)
4048 if (regs_ever_live[i] && ! call_used_regs[i])
4050 /* Only for the first load.
4051 merge_sp_adjust_with_load holds the register load
4052 with which we will merge the sp adjustment. */
4053 if (merge_sp_adjust_with_load == 0
4054 && local_fsize == 0
4055 && VAL_14_BITS_P (-actual_fsize))
4056 merge_sp_adjust_with_load = i;
4057 else
4058 load_reg (i, offset, STACK_POINTER_REGNUM);
4059 offset += UNITS_PER_WORD;
4064 /* Align pointer properly (doubleword boundary). */
4065 offset = (offset + 7) & ~7;
4067 /* FP register restores. */
4068 if (save_fregs)
4070 /* Adjust the register to index off of. */
4071 if (frame_pointer_needed)
4072 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4073 else
4074 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4076 /* Actually do the restores now. */
4077 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4078 if (regs_ever_live[i]
4079 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4081 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4082 rtx dest = gen_rtx_REG (DFmode, i);
4083 emit_move_insn (dest, src);
4087 /* Emit a blockage insn here to keep these insns from being moved to
4088 an earlier spot in the epilogue, or into the main instruction stream.
4090 This is necessary as we must not cut the stack back before all the
4091 restores are finished. */
4092 emit_insn (gen_blockage ());
4094 /* Reset stack pointer (and possibly frame pointer). The stack
4095 pointer is initially set to fp + 64 to avoid a race condition. */
4096 if (frame_pointer_needed)
4098 rtx delta = GEN_INT (-64);
4100 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4101 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4103 /* If we were deferring a callee register restore, do it now. */
4104 else if (merge_sp_adjust_with_load)
4106 rtx delta = GEN_INT (-actual_fsize);
4107 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4109 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4111 else if (actual_fsize != 0)
4112 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4113 - actual_fsize, 0);
4115 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4116 frame greater than 8k), do so now. */
4117 if (ret_off != 0)
4118 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4120 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4122 rtx sa = EH_RETURN_STACKADJ_RTX;
4124 emit_insn (gen_blockage ());
4125 emit_insn (TARGET_64BIT
4126 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4127 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4132 hppa_pic_save_rtx (void)
4134 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4137 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4138 #define NO_DEFERRED_PROFILE_COUNTERS 0
4139 #endif
4141 /* Define heap vector type for funcdef numbers. */
4142 DEF_VEC_I(int);
4143 DEF_VEC_ALLOC_I(int,heap);
4145 /* Vector of funcdef numbers. */
4146 static VEC(int,heap) *funcdef_nos;
4148 /* Output deferred profile counters. */
4149 static void
4150 output_deferred_profile_counters (void)
4152 unsigned int i;
4153 int align, n;
4155 if (VEC_empty (int, funcdef_nos))
4156 return;
4158 switch_to_section (data_section);
4159 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4160 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4162 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4164 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4165 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4168 VEC_free (int, heap, funcdef_nos);
4171 void
4172 hppa_profile_hook (int label_no)
4174 /* We use SImode for the address of the function in both 32 and
4175 64-bit code to avoid having to provide DImode versions of the
4176 lcla2 and load_offset_label_address insn patterns. */
4177 rtx reg = gen_reg_rtx (SImode);
4178 rtx label_rtx = gen_label_rtx ();
4179 rtx begin_label_rtx, call_insn;
4180 char begin_label_name[16];
4182 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4183 label_no);
4184 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4186 if (TARGET_64BIT)
4187 emit_move_insn (arg_pointer_rtx,
4188 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4189 GEN_INT (64)));
4191 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4193 /* The address of the function is loaded into %r25 with a instruction-
4194 relative sequence that avoids the use of relocations. The sequence
4195 is split so that the load_offset_label_address instruction can
4196 occupy the delay slot of the call to _mcount. */
4197 if (TARGET_PA_20)
4198 emit_insn (gen_lcla2 (reg, label_rtx));
4199 else
4200 emit_insn (gen_lcla1 (reg, label_rtx));
4202 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4203 reg, begin_label_rtx, label_rtx));
4205 #if !NO_DEFERRED_PROFILE_COUNTERS
4207 rtx count_label_rtx, addr, r24;
4208 char count_label_name[16];
4210 VEC_safe_push (int, heap, funcdef_nos, label_no);
4211 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4212 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4214 addr = force_reg (Pmode, count_label_rtx);
4215 r24 = gen_rtx_REG (Pmode, 24);
4216 emit_move_insn (r24, addr);
4218 call_insn =
4219 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4220 gen_rtx_SYMBOL_REF (Pmode,
4221 "_mcount")),
4222 GEN_INT (TARGET_64BIT ? 24 : 12)));
4224 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4226 #else
4228 call_insn =
4229 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4230 gen_rtx_SYMBOL_REF (Pmode,
4231 "_mcount")),
4232 GEN_INT (TARGET_64BIT ? 16 : 8)));
4234 #endif
4236 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4237 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4239 /* Indicate the _mcount call cannot throw, nor will it execute a
4240 non-local goto. */
4241 REG_NOTES (call_insn)
4242 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4245 /* Fetch the return address for the frame COUNT steps up from
4246 the current frame, after the prologue. FRAMEADDR is the
4247 frame pointer of the COUNT frame.
4249 We want to ignore any export stub remnants here. To handle this,
4250 we examine the code at the return address, and if it is an export
4251 stub, we return a memory rtx for the stub return address stored
4252 at frame-24.
4254 The value returned is used in two different ways:
4256 1. To find a function's caller.
4258 2. To change the return address for a function.
4260 This function handles most instances of case 1; however, it will
4261 fail if there are two levels of stubs to execute on the return
4262 path. The only way I believe that can happen is if the return value
4263 needs a parameter relocation, which never happens for C code.
4265 This function handles most instances of case 2; however, it will
4266 fail if we did not originally have stub code on the return path
4267 but will need stub code on the new return path. This can happen if
4268 the caller & callee are both in the main program, but the new
4269 return location is in a shared library. */
4272 return_addr_rtx (int count, rtx frameaddr)
4274 rtx label;
4275 rtx rp;
4276 rtx saved_rp;
4277 rtx ins;
4279 if (count != 0)
4280 return NULL_RTX;
4282 rp = get_hard_reg_initial_val (Pmode, 2);
4284 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4285 return rp;
4287 saved_rp = gen_reg_rtx (Pmode);
4288 emit_move_insn (saved_rp, rp);
4290 /* Get pointer to the instruction stream. We have to mask out the
4291 privilege level from the two low order bits of the return address
4292 pointer here so that ins will point to the start of the first
4293 instruction that would have been executed if we returned. */
4294 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4295 label = gen_label_rtx ();
4297 /* Check the instruction stream at the normal return address for the
4298 export stub:
4300 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4301 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4302 0x00011820 | stub+16: mtsp r1,sr0
4303 0xe0400002 | stub+20: be,n 0(sr0,rp)
4305 If it is an export stub, than our return address is really in
4306 -24[frameaddr]. */
4308 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4309 NULL_RTX, SImode, 1);
4310 emit_jump_insn (gen_bne (label));
4312 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4313 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4314 emit_jump_insn (gen_bne (label));
4316 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4317 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4318 emit_jump_insn (gen_bne (label));
4320 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4321 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4323 /* If there is no export stub then just use the value saved from
4324 the return pointer register. */
4326 emit_jump_insn (gen_bne (label));
4328 /* Here we know that our return address points to an export
4329 stub. We don't want to return the address of the export stub,
4330 but rather the return address of the export stub. That return
4331 address is stored at -24[frameaddr]. */
4333 emit_move_insn (saved_rp,
4334 gen_rtx_MEM (Pmode,
4335 memory_address (Pmode,
4336 plus_constant (frameaddr,
4337 -24))));
4339 emit_label (label);
4340 return saved_rp;
4343 /* This is only valid once reload has completed because it depends on
4344 knowing exactly how much (if any) frame there is and...
4346 It's only valid if there is no frame marker to de-allocate and...
4348 It's only valid if %r2 hasn't been saved into the caller's frame
4349 (we're not profiling and %r2 isn't live anywhere). */
4351 hppa_can_use_return_insn_p (void)
4353 return (reload_completed
4354 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4355 && ! regs_ever_live[2]
4356 && ! frame_pointer_needed);
4359 void
4360 emit_bcond_fp (enum rtx_code code, rtx operand0)
4362 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4363 gen_rtx_IF_THEN_ELSE (VOIDmode,
4364 gen_rtx_fmt_ee (code,
4365 VOIDmode,
4366 gen_rtx_REG (CCFPmode, 0),
4367 const0_rtx),
4368 gen_rtx_LABEL_REF (VOIDmode, operand0),
4369 pc_rtx)));
4374 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4376 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4377 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4380 /* Adjust the cost of a scheduling dependency. Return the new cost of
4381 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4383 static int
4384 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4386 enum attr_type attr_type;
4388 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4389 true dependencies as they are described with bypasses now. */
4390 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4391 return cost;
4393 if (! recog_memoized (insn))
4394 return 0;
4396 attr_type = get_attr_type (insn);
4398 switch (REG_NOTE_KIND (link))
4400 case REG_DEP_ANTI:
4401 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4402 cycles later. */
4404 if (attr_type == TYPE_FPLOAD)
4406 rtx pat = PATTERN (insn);
4407 rtx dep_pat = PATTERN (dep_insn);
4408 if (GET_CODE (pat) == PARALLEL)
4410 /* This happens for the fldXs,mb patterns. */
4411 pat = XVECEXP (pat, 0, 0);
4413 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4414 /* If this happens, we have to extend this to schedule
4415 optimally. Return 0 for now. */
4416 return 0;
4418 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4420 if (! recog_memoized (dep_insn))
4421 return 0;
4422 switch (get_attr_type (dep_insn))
4424 case TYPE_FPALU:
4425 case TYPE_FPMULSGL:
4426 case TYPE_FPMULDBL:
4427 case TYPE_FPDIVSGL:
4428 case TYPE_FPDIVDBL:
4429 case TYPE_FPSQRTSGL:
4430 case TYPE_FPSQRTDBL:
4431 /* A fpload can't be issued until one cycle before a
4432 preceding arithmetic operation has finished if
4433 the target of the fpload is any of the sources
4434 (or destination) of the arithmetic operation. */
4435 return insn_default_latency (dep_insn) - 1;
4437 default:
4438 return 0;
4442 else if (attr_type == TYPE_FPALU)
4444 rtx pat = PATTERN (insn);
4445 rtx dep_pat = PATTERN (dep_insn);
4446 if (GET_CODE (pat) == PARALLEL)
4448 /* This happens for the fldXs,mb patterns. */
4449 pat = XVECEXP (pat, 0, 0);
4451 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4452 /* If this happens, we have to extend this to schedule
4453 optimally. Return 0 for now. */
4454 return 0;
4456 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4458 if (! recog_memoized (dep_insn))
4459 return 0;
4460 switch (get_attr_type (dep_insn))
4462 case TYPE_FPDIVSGL:
4463 case TYPE_FPDIVDBL:
4464 case TYPE_FPSQRTSGL:
4465 case TYPE_FPSQRTDBL:
4466 /* An ALU flop can't be issued until two cycles before a
4467 preceding divide or sqrt operation has finished if
4468 the target of the ALU flop is any of the sources
4469 (or destination) of the divide or sqrt operation. */
4470 return insn_default_latency (dep_insn) - 2;
4472 default:
4473 return 0;
4478 /* For other anti dependencies, the cost is 0. */
4479 return 0;
4481 case REG_DEP_OUTPUT:
4482 /* Output dependency; DEP_INSN writes a register that INSN writes some
4483 cycles later. */
4484 if (attr_type == TYPE_FPLOAD)
4486 rtx pat = PATTERN (insn);
4487 rtx dep_pat = PATTERN (dep_insn);
4488 if (GET_CODE (pat) == PARALLEL)
4490 /* This happens for the fldXs,mb patterns. */
4491 pat = XVECEXP (pat, 0, 0);
4493 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4494 /* If this happens, we have to extend this to schedule
4495 optimally. Return 0 for now. */
4496 return 0;
4498 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4500 if (! recog_memoized (dep_insn))
4501 return 0;
4502 switch (get_attr_type (dep_insn))
4504 case TYPE_FPALU:
4505 case TYPE_FPMULSGL:
4506 case TYPE_FPMULDBL:
4507 case TYPE_FPDIVSGL:
4508 case TYPE_FPDIVDBL:
4509 case TYPE_FPSQRTSGL:
4510 case TYPE_FPSQRTDBL:
4511 /* A fpload can't be issued until one cycle before a
4512 preceding arithmetic operation has finished if
4513 the target of the fpload is the destination of the
4514 arithmetic operation.
4516 Exception: For PA7100LC, PA7200 and PA7300, the cost
4517 is 3 cycles, unless they bundle together. We also
4518 pay the penalty if the second insn is a fpload. */
4519 return insn_default_latency (dep_insn) - 1;
4521 default:
4522 return 0;
4526 else if (attr_type == TYPE_FPALU)
4528 rtx pat = PATTERN (insn);
4529 rtx dep_pat = PATTERN (dep_insn);
4530 if (GET_CODE (pat) == PARALLEL)
4532 /* This happens for the fldXs,mb patterns. */
4533 pat = XVECEXP (pat, 0, 0);
4535 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4536 /* If this happens, we have to extend this to schedule
4537 optimally. Return 0 for now. */
4538 return 0;
4540 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4542 if (! recog_memoized (dep_insn))
4543 return 0;
4544 switch (get_attr_type (dep_insn))
4546 case TYPE_FPDIVSGL:
4547 case TYPE_FPDIVDBL:
4548 case TYPE_FPSQRTSGL:
4549 case TYPE_FPSQRTDBL:
4550 /* An ALU flop can't be issued until two cycles before a
4551 preceding divide or sqrt operation has finished if
4552 the target of the ALU flop is also the target of
4553 the divide or sqrt operation. */
4554 return insn_default_latency (dep_insn) - 2;
4556 default:
4557 return 0;
4562 /* For other output dependencies, the cost is 0. */
4563 return 0;
4565 default:
4566 gcc_unreachable ();
4570 /* Adjust scheduling priorities. We use this to try and keep addil
4571 and the next use of %r1 close together. */
4572 static int
4573 pa_adjust_priority (rtx insn, int priority)
4575 rtx set = single_set (insn);
4576 rtx src, dest;
4577 if (set)
4579 src = SET_SRC (set);
4580 dest = SET_DEST (set);
4581 if (GET_CODE (src) == LO_SUM
4582 && symbolic_operand (XEXP (src, 1), VOIDmode)
4583 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4584 priority >>= 3;
4586 else if (GET_CODE (src) == MEM
4587 && GET_CODE (XEXP (src, 0)) == LO_SUM
4588 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4589 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4590 priority >>= 1;
4592 else if (GET_CODE (dest) == MEM
4593 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4594 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4595 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4596 priority >>= 3;
4598 return priority;
4601 /* The 700 can only issue a single insn at a time.
4602 The 7XXX processors can issue two insns at a time.
4603 The 8000 can issue 4 insns at a time. */
4604 static int
4605 pa_issue_rate (void)
4607 switch (pa_cpu)
4609 case PROCESSOR_700: return 1;
4610 case PROCESSOR_7100: return 2;
4611 case PROCESSOR_7100LC: return 2;
4612 case PROCESSOR_7200: return 2;
4613 case PROCESSOR_7300: return 2;
4614 case PROCESSOR_8000: return 4;
4616 default:
4617 gcc_unreachable ();
4623 /* Return any length adjustment needed by INSN which already has its length
4624 computed as LENGTH. Return zero if no adjustment is necessary.
4626 For the PA: function calls, millicode calls, and backwards short
4627 conditional branches with unfilled delay slots need an adjustment by +1
4628 (to account for the NOP which will be inserted into the instruction stream).
4630 Also compute the length of an inline block move here as it is too
4631 complicated to express as a length attribute in pa.md. */
4633 pa_adjust_insn_length (rtx insn, int length)
4635 rtx pat = PATTERN (insn);
4637 /* Jumps inside switch tables which have unfilled delay slots need
4638 adjustment. */
4639 if (GET_CODE (insn) == JUMP_INSN
4640 && GET_CODE (pat) == PARALLEL
4641 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4642 return 4;
4643 /* Millicode insn with an unfilled delay slot. */
4644 else if (GET_CODE (insn) == INSN
4645 && GET_CODE (pat) != SEQUENCE
4646 && GET_CODE (pat) != USE
4647 && GET_CODE (pat) != CLOBBER
4648 && get_attr_type (insn) == TYPE_MILLI)
4649 return 4;
4650 /* Block move pattern. */
4651 else if (GET_CODE (insn) == INSN
4652 && GET_CODE (pat) == PARALLEL
4653 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4654 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4655 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4656 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4657 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4658 return compute_movmem_length (insn) - 4;
4659 /* Block clear pattern. */
4660 else if (GET_CODE (insn) == INSN
4661 && GET_CODE (pat) == PARALLEL
4662 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4663 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4664 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4665 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4666 return compute_clrmem_length (insn) - 4;
4667 /* Conditional branch with an unfilled delay slot. */
4668 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4670 /* Adjust a short backwards conditional with an unfilled delay slot. */
4671 if (GET_CODE (pat) == SET
4672 && length == 4
4673 && ! forward_branch_p (insn))
4674 return 4;
4675 else if (GET_CODE (pat) == PARALLEL
4676 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4677 && length == 4)
4678 return 4;
4679 /* Adjust dbra insn with short backwards conditional branch with
4680 unfilled delay slot -- only for case where counter is in a
4681 general register register. */
4682 else if (GET_CODE (pat) == PARALLEL
4683 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4684 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4685 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4686 && length == 4
4687 && ! forward_branch_p (insn))
4688 return 4;
4689 else
4690 return 0;
4692 return 0;
4695 /* Print operand X (an rtx) in assembler syntax to file FILE.
4696 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4697 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4699 void
4700 print_operand (FILE *file, rtx x, int code)
4702 switch (code)
4704 case '#':
4705 /* Output a 'nop' if there's nothing for the delay slot. */
4706 if (dbr_sequence_length () == 0)
4707 fputs ("\n\tnop", file);
4708 return;
4709 case '*':
4710 /* Output a nullification completer if there's nothing for the */
4711 /* delay slot or nullification is requested. */
4712 if (dbr_sequence_length () == 0 ||
4713 (final_sequence &&
4714 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4715 fputs (",n", file);
4716 return;
4717 case 'R':
4718 /* Print out the second register name of a register pair.
4719 I.e., R (6) => 7. */
4720 fputs (reg_names[REGNO (x) + 1], file);
4721 return;
4722 case 'r':
4723 /* A register or zero. */
4724 if (x == const0_rtx
4725 || (x == CONST0_RTX (DFmode))
4726 || (x == CONST0_RTX (SFmode)))
4728 fputs ("%r0", file);
4729 return;
4731 else
4732 break;
4733 case 'f':
4734 /* A register or zero (floating point). */
4735 if (x == const0_rtx
4736 || (x == CONST0_RTX (DFmode))
4737 || (x == CONST0_RTX (SFmode)))
4739 fputs ("%fr0", file);
4740 return;
4742 else
4743 break;
4744 case 'A':
4746 rtx xoperands[2];
4748 xoperands[0] = XEXP (XEXP (x, 0), 0);
4749 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4750 output_global_address (file, xoperands[1], 0);
4751 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4752 return;
4755 case 'C': /* Plain (C)ondition */
4756 case 'X':
4757 switch (GET_CODE (x))
4759 case EQ:
4760 fputs ("=", file); break;
4761 case NE:
4762 fputs ("<>", file); break;
4763 case GT:
4764 fputs (">", file); break;
4765 case GE:
4766 fputs (">=", file); break;
4767 case GEU:
4768 fputs (">>=", file); break;
4769 case GTU:
4770 fputs (">>", file); break;
4771 case LT:
4772 fputs ("<", file); break;
4773 case LE:
4774 fputs ("<=", file); break;
4775 case LEU:
4776 fputs ("<<=", file); break;
4777 case LTU:
4778 fputs ("<<", file); break;
4779 default:
4780 gcc_unreachable ();
4782 return;
4783 case 'N': /* Condition, (N)egated */
4784 switch (GET_CODE (x))
4786 case EQ:
4787 fputs ("<>", file); break;
4788 case NE:
4789 fputs ("=", file); break;
4790 case GT:
4791 fputs ("<=", file); break;
4792 case GE:
4793 fputs ("<", file); break;
4794 case GEU:
4795 fputs ("<<", file); break;
4796 case GTU:
4797 fputs ("<<=", file); break;
4798 case LT:
4799 fputs (">=", file); break;
4800 case LE:
4801 fputs (">", file); break;
4802 case LEU:
4803 fputs (">>", file); break;
4804 case LTU:
4805 fputs (">>=", file); break;
4806 default:
4807 gcc_unreachable ();
4809 return;
4810 /* For floating point comparisons. Note that the output
4811 predicates are the complement of the desired mode. The
4812 conditions for GT, GE, LT, LE and LTGT cause an invalid
4813 operation exception if the result is unordered and this
4814 exception is enabled in the floating-point status register. */
4815 case 'Y':
4816 switch (GET_CODE (x))
4818 case EQ:
4819 fputs ("!=", file); break;
4820 case NE:
4821 fputs ("=", file); break;
4822 case GT:
4823 fputs ("!>", file); break;
4824 case GE:
4825 fputs ("!>=", file); break;
4826 case LT:
4827 fputs ("!<", file); break;
4828 case LE:
4829 fputs ("!<=", file); break;
4830 case LTGT:
4831 fputs ("!<>", file); break;
4832 case UNLE:
4833 fputs ("!?<=", file); break;
4834 case UNLT:
4835 fputs ("!?<", file); break;
4836 case UNGE:
4837 fputs ("!?>=", file); break;
4838 case UNGT:
4839 fputs ("!?>", file); break;
4840 case UNEQ:
4841 fputs ("!?=", file); break;
4842 case UNORDERED:
4843 fputs ("!?", file); break;
4844 case ORDERED:
4845 fputs ("?", file); break;
4846 default:
4847 gcc_unreachable ();
4849 return;
4850 case 'S': /* Condition, operands are (S)wapped. */
4851 switch (GET_CODE (x))
4853 case EQ:
4854 fputs ("=", file); break;
4855 case NE:
4856 fputs ("<>", file); break;
4857 case GT:
4858 fputs ("<", file); break;
4859 case GE:
4860 fputs ("<=", file); break;
4861 case GEU:
4862 fputs ("<<=", file); break;
4863 case GTU:
4864 fputs ("<<", file); break;
4865 case LT:
4866 fputs (">", file); break;
4867 case LE:
4868 fputs (">=", file); break;
4869 case LEU:
4870 fputs (">>=", file); break;
4871 case LTU:
4872 fputs (">>", file); break;
4873 default:
4874 gcc_unreachable ();
4876 return;
4877 case 'B': /* Condition, (B)oth swapped and negate. */
4878 switch (GET_CODE (x))
4880 case EQ:
4881 fputs ("<>", file); break;
4882 case NE:
4883 fputs ("=", file); break;
4884 case GT:
4885 fputs (">=", file); break;
4886 case GE:
4887 fputs (">", file); break;
4888 case GEU:
4889 fputs (">>", file); break;
4890 case GTU:
4891 fputs (">>=", file); break;
4892 case LT:
4893 fputs ("<=", file); break;
4894 case LE:
4895 fputs ("<", file); break;
4896 case LEU:
4897 fputs ("<<", file); break;
4898 case LTU:
4899 fputs ("<<=", file); break;
4900 default:
4901 gcc_unreachable ();
4903 return;
4904 case 'k':
4905 gcc_assert (GET_CODE (x) == CONST_INT);
4906 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4907 return;
4908 case 'Q':
4909 gcc_assert (GET_CODE (x) == CONST_INT);
4910 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4911 return;
4912 case 'L':
4913 gcc_assert (GET_CODE (x) == CONST_INT);
4914 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4915 return;
4916 case 'O':
4917 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4918 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4919 return;
4920 case 'p':
4921 gcc_assert (GET_CODE (x) == CONST_INT);
4922 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4923 return;
4924 case 'P':
4925 gcc_assert (GET_CODE (x) == CONST_INT);
4926 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4927 return;
4928 case 'I':
4929 if (GET_CODE (x) == CONST_INT)
4930 fputs ("i", file);
4931 return;
4932 case 'M':
4933 case 'F':
4934 switch (GET_CODE (XEXP (x, 0)))
4936 case PRE_DEC:
4937 case PRE_INC:
4938 if (ASSEMBLER_DIALECT == 0)
4939 fputs ("s,mb", file);
4940 else
4941 fputs (",mb", file);
4942 break;
4943 case POST_DEC:
4944 case POST_INC:
4945 if (ASSEMBLER_DIALECT == 0)
4946 fputs ("s,ma", file);
4947 else
4948 fputs (",ma", file);
4949 break;
4950 case PLUS:
4951 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4952 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4954 if (ASSEMBLER_DIALECT == 0)
4955 fputs ("x", file);
4957 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4958 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4960 if (ASSEMBLER_DIALECT == 0)
4961 fputs ("x,s", file);
4962 else
4963 fputs (",s", file);
4965 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4966 fputs ("s", file);
4967 break;
4968 default:
4969 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4970 fputs ("s", file);
4971 break;
4973 return;
4974 case 'G':
4975 output_global_address (file, x, 0);
4976 return;
4977 case 'H':
4978 output_global_address (file, x, 1);
4979 return;
4980 case 0: /* Don't do anything special */
4981 break;
4982 case 'Z':
4984 unsigned op[3];
4985 compute_zdepwi_operands (INTVAL (x), op);
4986 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4987 return;
4989 case 'z':
4991 unsigned op[3];
4992 compute_zdepdi_operands (INTVAL (x), op);
4993 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4994 return;
4996 case 'c':
4997 /* We can get here from a .vtable_inherit due to our
4998 CONSTANT_ADDRESS_P rejecting perfectly good constant
4999 addresses. */
5000 break;
5001 default:
5002 gcc_unreachable ();
5004 if (GET_CODE (x) == REG)
5006 fputs (reg_names [REGNO (x)], file);
5007 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5009 fputs ("R", file);
5010 return;
5012 if (FP_REG_P (x)
5013 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5014 && (REGNO (x) & 1) == 0)
5015 fputs ("L", file);
5017 else if (GET_CODE (x) == MEM)
5019 int size = GET_MODE_SIZE (GET_MODE (x));
5020 rtx base = NULL_RTX;
5021 switch (GET_CODE (XEXP (x, 0)))
5023 case PRE_DEC:
5024 case POST_DEC:
5025 base = XEXP (XEXP (x, 0), 0);
5026 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5027 break;
5028 case PRE_INC:
5029 case POST_INC:
5030 base = XEXP (XEXP (x, 0), 0);
5031 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5032 break;
5033 case PLUS:
5034 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5035 fprintf (file, "%s(%s)",
5036 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5037 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5038 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5039 fprintf (file, "%s(%s)",
5040 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5041 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5042 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5043 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5045 /* Because the REG_POINTER flag can get lost during reload,
5046 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5047 index and base registers in the combined move patterns. */
5048 rtx base = XEXP (XEXP (x, 0), 1);
5049 rtx index = XEXP (XEXP (x, 0), 0);
5051 fprintf (file, "%s(%s)",
5052 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5054 else
5055 output_address (XEXP (x, 0));
5056 break;
5057 default:
5058 output_address (XEXP (x, 0));
5059 break;
5062 else
5063 output_addr_const (file, x);
5066 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5068 void
5069 output_global_address (FILE *file, rtx x, int round_constant)
5072 /* Imagine (high (const (plus ...))). */
5073 if (GET_CODE (x) == HIGH)
5074 x = XEXP (x, 0);
5076 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5077 output_addr_const (file, x);
5078 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5080 output_addr_const (file, x);
5081 fputs ("-$global$", file);
5083 else if (GET_CODE (x) == CONST)
5085 const char *sep = "";
5086 int offset = 0; /* assembler wants -$global$ at end */
5087 rtx base = NULL_RTX;
5089 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5091 case SYMBOL_REF:
5092 base = XEXP (XEXP (x, 0), 0);
5093 output_addr_const (file, base);
5094 break;
5095 case CONST_INT:
5096 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5097 break;
5098 default:
5099 gcc_unreachable ();
5102 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5104 case SYMBOL_REF:
5105 base = XEXP (XEXP (x, 0), 1);
5106 output_addr_const (file, base);
5107 break;
5108 case CONST_INT:
5109 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5110 break;
5111 default:
5112 gcc_unreachable ();
5115 /* How bogus. The compiler is apparently responsible for
5116 rounding the constant if it uses an LR field selector.
5118 The linker and/or assembler seem a better place since
5119 they have to do this kind of thing already.
5121 If we fail to do this, HP's optimizing linker may eliminate
5122 an addil, but not update the ldw/stw/ldo instruction that
5123 uses the result of the addil. */
5124 if (round_constant)
5125 offset = ((offset + 0x1000) & ~0x1fff);
5127 switch (GET_CODE (XEXP (x, 0)))
5129 case PLUS:
5130 if (offset < 0)
5132 offset = -offset;
5133 sep = "-";
5135 else
5136 sep = "+";
5137 break;
5139 case MINUS:
5140 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5141 sep = "-";
5142 break;
5144 default:
5145 gcc_unreachable ();
5148 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5149 fputs ("-$global$", file);
5150 if (offset)
5151 fprintf (file, "%s%d", sep, offset);
5153 else
5154 output_addr_const (file, x);
5157 /* Output boilerplate text to appear at the beginning of the file.
5158 There are several possible versions. */
5159 #define aputs(x) fputs(x, asm_out_file)
5160 static inline void
5161 pa_file_start_level (void)
5163 if (TARGET_64BIT)
5164 aputs ("\t.LEVEL 2.0w\n");
5165 else if (TARGET_PA_20)
5166 aputs ("\t.LEVEL 2.0\n");
5167 else if (TARGET_PA_11)
5168 aputs ("\t.LEVEL 1.1\n");
5169 else
5170 aputs ("\t.LEVEL 1.0\n");
5173 static inline void
5174 pa_file_start_space (int sortspace)
5176 aputs ("\t.SPACE $PRIVATE$");
5177 if (sortspace)
5178 aputs (",SORT=16");
5179 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5180 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5181 "\n\t.SPACE $TEXT$");
5182 if (sortspace)
5183 aputs (",SORT=8");
5184 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5185 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5188 static inline void
5189 pa_file_start_file (int want_version)
5191 if (write_symbols != NO_DEBUG)
5193 output_file_directive (asm_out_file, main_input_filename);
5194 if (want_version)
5195 aputs ("\t.version\t\"01.01\"\n");
5199 static inline void
5200 pa_file_start_mcount (const char *aswhat)
5202 if (profile_flag)
5203 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5206 static void
5207 pa_elf_file_start (void)
5209 pa_file_start_level ();
5210 pa_file_start_mcount ("ENTRY");
5211 pa_file_start_file (0);
5214 static void
5215 pa_som_file_start (void)
5217 pa_file_start_level ();
5218 pa_file_start_space (0);
5219 aputs ("\t.IMPORT $global$,DATA\n"
5220 "\t.IMPORT $$dyncall,MILLICODE\n");
5221 pa_file_start_mcount ("CODE");
5222 pa_file_start_file (0);
5225 static void
5226 pa_linux_file_start (void)
5228 pa_file_start_file (1);
5229 pa_file_start_level ();
5230 pa_file_start_mcount ("CODE");
5233 static void
5234 pa_hpux64_gas_file_start (void)
5236 pa_file_start_level ();
5237 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5238 if (profile_flag)
5239 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5240 #endif
5241 pa_file_start_file (1);
5244 static void
5245 pa_hpux64_hpas_file_start (void)
5247 pa_file_start_level ();
5248 pa_file_start_space (1);
5249 pa_file_start_mcount ("CODE");
5250 pa_file_start_file (0);
5252 #undef aputs
5254 /* Search the deferred plabel list for SYMBOL and return its internal
5255 label. If an entry for SYMBOL is not found, a new entry is created. */
5258 get_deferred_plabel (rtx symbol)
5260 const char *fname = XSTR (symbol, 0);
5261 size_t i;
5263 /* See if we have already put this function on the list of deferred
5264 plabels. This list is generally small, so a liner search is not
5265 too ugly. If it proves too slow replace it with something faster. */
5266 for (i = 0; i < n_deferred_plabels; i++)
5267 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5268 break;
5270 /* If the deferred plabel list is empty, or this entry was not found
5271 on the list, create a new entry on the list. */
5272 if (deferred_plabels == NULL || i == n_deferred_plabels)
5274 tree id;
5276 if (deferred_plabels == 0)
5277 deferred_plabels = (struct deferred_plabel *)
5278 ggc_alloc (sizeof (struct deferred_plabel));
5279 else
5280 deferred_plabels = (struct deferred_plabel *)
5281 ggc_realloc (deferred_plabels,
5282 ((n_deferred_plabels + 1)
5283 * sizeof (struct deferred_plabel)));
5285 i = n_deferred_plabels++;
5286 deferred_plabels[i].internal_label = gen_label_rtx ();
5287 deferred_plabels[i].symbol = symbol;
5289 /* Gross. We have just implicitly taken the address of this
5290 function. Mark it in the same manner as assemble_name. */
5291 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5292 if (id)
5293 mark_referenced (id);
5296 return deferred_plabels[i].internal_label;
5299 static void
5300 output_deferred_plabels (void)
5302 size_t i;
5303 /* If we have deferred plabels, then we need to switch into the data
5304 section and align it to a 4 byte boundary before we output the
5305 deferred plabels. */
5306 if (n_deferred_plabels)
5308 switch_to_section (data_section);
5309 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5312 /* Now output the deferred plabels. */
5313 for (i = 0; i < n_deferred_plabels; i++)
5315 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5316 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5317 assemble_integer (deferred_plabels[i].symbol,
5318 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5322 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5323 /* Initialize optabs to point to HPUX long double emulation routines. */
5324 static void
5325 pa_hpux_init_libfuncs (void)
5327 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5328 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5329 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5330 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5331 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5332 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5333 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5334 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5335 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5337 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5338 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5339 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5340 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5341 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5342 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5343 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5345 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5346 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5347 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5348 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5350 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5351 ? "__U_Qfcnvfxt_quad_to_sgl"
5352 : "_U_Qfcnvfxt_quad_to_sgl");
5353 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5354 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5355 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5357 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5358 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5359 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5360 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5362 #endif
5364 /* HP's millicode routines mean something special to the assembler.
5365 Keep track of which ones we have used. */
5367 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5368 static void import_milli (enum millicodes);
5369 static char imported[(int) end1000];
5370 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5371 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5372 #define MILLI_START 10
5374 static void
5375 import_milli (enum millicodes code)
5377 char str[sizeof (import_string)];
5379 if (!imported[(int) code])
5381 imported[(int) code] = 1;
5382 strcpy (str, import_string);
5383 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5384 output_asm_insn (str, 0);
5388 /* The register constraints have put the operands and return value in
5389 the proper registers. */
5391 const char *
5392 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5394 import_milli (mulI);
5395 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5398 /* Emit the rtl for doing a division by a constant. */
5400 /* Do magic division millicodes exist for this value? */
5401 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5403 /* We'll use an array to keep track of the magic millicodes and
5404 whether or not we've used them already. [n][0] is signed, [n][1] is
5405 unsigned. */
5407 static int div_milli[16][2];
5410 emit_hpdiv_const (rtx *operands, int unsignedp)
5412 if (GET_CODE (operands[2]) == CONST_INT
5413 && INTVAL (operands[2]) > 0
5414 && INTVAL (operands[2]) < 16
5415 && magic_milli[INTVAL (operands[2])])
5417 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5419 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5420 emit
5421 (gen_rtx_PARALLEL
5422 (VOIDmode,
5423 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5424 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5425 SImode,
5426 gen_rtx_REG (SImode, 26),
5427 operands[2])),
5428 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5429 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5430 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5431 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5432 gen_rtx_CLOBBER (VOIDmode, ret))));
5433 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5434 return 1;
5436 return 0;
5439 const char *
5440 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5442 int divisor;
5444 /* If the divisor is a constant, try to use one of the special
5445 opcodes .*/
5446 if (GET_CODE (operands[0]) == CONST_INT)
5448 static char buf[100];
5449 divisor = INTVAL (operands[0]);
5450 if (!div_milli[divisor][unsignedp])
5452 div_milli[divisor][unsignedp] = 1;
5453 if (unsignedp)
5454 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5455 else
5456 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5458 if (unsignedp)
5460 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5461 INTVAL (operands[0]));
5462 return output_millicode_call (insn,
5463 gen_rtx_SYMBOL_REF (SImode, buf));
5465 else
5467 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5468 INTVAL (operands[0]));
5469 return output_millicode_call (insn,
5470 gen_rtx_SYMBOL_REF (SImode, buf));
5473 /* Divisor isn't a special constant. */
5474 else
5476 if (unsignedp)
5478 import_milli (divU);
5479 return output_millicode_call (insn,
5480 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5482 else
5484 import_milli (divI);
5485 return output_millicode_call (insn,
5486 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5491 /* Output a $$rem millicode to do mod. */
5493 const char *
5494 output_mod_insn (int unsignedp, rtx insn)
5496 if (unsignedp)
5498 import_milli (remU);
5499 return output_millicode_call (insn,
5500 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5502 else
5504 import_milli (remI);
5505 return output_millicode_call (insn,
5506 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5510 void
5511 output_arg_descriptor (rtx call_insn)
5513 const char *arg_regs[4];
5514 enum machine_mode arg_mode;
5515 rtx link;
5516 int i, output_flag = 0;
5517 int regno;
5519 /* We neither need nor want argument location descriptors for the
5520 64bit runtime environment or the ELF32 environment. */
5521 if (TARGET_64BIT || TARGET_ELF32)
5522 return;
5524 for (i = 0; i < 4; i++)
5525 arg_regs[i] = 0;
5527 /* Specify explicitly that no argument relocations should take place
5528 if using the portable runtime calling conventions. */
5529 if (TARGET_PORTABLE_RUNTIME)
5531 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5532 asm_out_file);
5533 return;
5536 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5537 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5538 link; link = XEXP (link, 1))
5540 rtx use = XEXP (link, 0);
5542 if (! (GET_CODE (use) == USE
5543 && GET_CODE (XEXP (use, 0)) == REG
5544 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5545 continue;
5547 arg_mode = GET_MODE (XEXP (use, 0));
5548 regno = REGNO (XEXP (use, 0));
5549 if (regno >= 23 && regno <= 26)
5551 arg_regs[26 - regno] = "GR";
5552 if (arg_mode == DImode)
5553 arg_regs[25 - regno] = "GR";
5555 else if (regno >= 32 && regno <= 39)
5557 if (arg_mode == SFmode)
5558 arg_regs[(regno - 32) / 2] = "FR";
5559 else
5561 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5562 arg_regs[(regno - 34) / 2] = "FR";
5563 arg_regs[(regno - 34) / 2 + 1] = "FU";
5564 #else
5565 arg_regs[(regno - 34) / 2] = "FU";
5566 arg_regs[(regno - 34) / 2 + 1] = "FR";
5567 #endif
5571 fputs ("\t.CALL ", asm_out_file);
5572 for (i = 0; i < 4; i++)
5574 if (arg_regs[i])
5576 if (output_flag++)
5577 fputc (',', asm_out_file);
5578 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5581 fputc ('\n', asm_out_file);
5584 static enum reg_class
5585 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5586 enum machine_mode mode, secondary_reload_info *sri)
5588 int is_symbolic, regno;
5590 /* Handle the easy stuff first. */
5591 if (class == R1_REGS)
5592 return NO_REGS;
5594 if (REG_P (x))
5596 regno = REGNO (x);
5597 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5598 return NO_REGS;
5600 else
5601 regno = -1;
5603 /* If we have something like (mem (mem (...)), we can safely assume the
5604 inner MEM will end up in a general register after reloading, so there's
5605 no need for a secondary reload. */
5606 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5607 return NO_REGS;
5609 /* Trying to load a constant into a FP register during PIC code
5610 generation requires %r1 as a scratch register. */
5611 if (flag_pic
5612 && GET_MODE_CLASS (mode) == MODE_INT
5613 && FP_REG_CLASS_P (class)
5614 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5616 gcc_assert (mode == SImode || mode == DImode);
5617 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5618 : CODE_FOR_reload_indi_r1);
5619 return NO_REGS;
5622 /* Profiling showed the PA port spends about 1.3% of its compilation
5623 time in true_regnum from calls inside pa_secondary_reload_class. */
5624 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5625 regno = true_regnum (x);
5627 /* Handle out of range displacement for integer mode loads/stores of
5628 FP registers. */
5629 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5630 && GET_MODE_CLASS (mode) == MODE_INT
5631 && FP_REG_CLASS_P (class))
5632 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5634 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5635 return NO_REGS;
5638 /* A SAR<->FP register copy requires a secondary register (GPR) as
5639 well as secondary memory. */
5640 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5641 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5642 || (class == SHIFT_REGS
5643 && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5645 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5646 return NO_REGS;
5649 /* Secondary reloads of symbolic operands require %r1 as a scratch
5650 register when we're generating PIC code and the operand isn't
5651 readonly. */
5652 if (GET_CODE (x) == HIGH)
5653 x = XEXP (x, 0);
5655 /* Profiling has showed GCC spends about 2.6% of its compilation
5656 time in symbolic_operand from calls inside pa_secondary_reload_class.
5657 So, we use an inline copy to avoid useless work. */
5658 switch (GET_CODE (x))
5660 rtx op;
5662 case SYMBOL_REF:
5663 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5664 break;
5665 case LABEL_REF:
5666 is_symbolic = 1;
5667 break;
5668 case CONST:
5669 op = XEXP (x, 0);
5670 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5671 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5672 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5673 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5674 break;
5675 default:
5676 is_symbolic = 0;
5677 break;
5680 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5682 gcc_assert (mode == SImode || mode == DImode);
5683 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5684 : CODE_FOR_reload_indi_r1);
5687 return NO_REGS;
5690 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5691 by invisible reference. As a GCC extension, we also pass anything
5692 with a zero or variable size by reference.
5694 The 64-bit runtime does not describe passing any types by invisible
5695 reference. The internals of GCC can't currently handle passing
5696 empty structures, and zero or variable length arrays when they are
5697 not passed entirely on the stack or by reference. Thus, as a GCC
5698 extension, we pass these types by reference. The HP compiler doesn't
5699 support these types, so hopefully there shouldn't be any compatibility
5700 issues. This may have to be revisited when HP releases a C99 compiler
5701 or updates the ABI. */
5703 static bool
5704 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5705 enum machine_mode mode, tree type,
5706 bool named ATTRIBUTE_UNUSED)
5708 HOST_WIDE_INT size;
5710 if (type)
5711 size = int_size_in_bytes (type);
5712 else
5713 size = GET_MODE_SIZE (mode);
5715 if (TARGET_64BIT)
5716 return size <= 0;
5717 else
5718 return size <= 0 || size > 8;
5721 enum direction
5722 function_arg_padding (enum machine_mode mode, tree type)
5724 if (mode == BLKmode
5725 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5727 /* Return none if justification is not required. */
5728 if (type
5729 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5730 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5731 return none;
5733 /* The directions set here are ignored when a BLKmode argument larger
5734 than a word is placed in a register. Different code is used for
5735 the stack and registers. This makes it difficult to have a
5736 consistent data representation for both the stack and registers.
5737 For both runtimes, the justification and padding for arguments on
5738 the stack and in registers should be identical. */
5739 if (TARGET_64BIT)
5740 /* The 64-bit runtime specifies left justification for aggregates. */
5741 return upward;
5742 else
5743 /* The 32-bit runtime architecture specifies right justification.
5744 When the argument is passed on the stack, the argument is padded
5745 with garbage on the left. The HP compiler pads with zeros. */
5746 return downward;
5749 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5750 return downward;
5751 else
5752 return none;
5756 /* Do what is necessary for `va_start'. We look at the current function
5757 to determine if stdargs or varargs is used and fill in an initial
5758 va_list. A pointer to this constructor is returned. */
5760 static rtx
5761 hppa_builtin_saveregs (void)
5763 rtx offset, dest;
5764 tree fntype = TREE_TYPE (current_function_decl);
5765 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5766 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5767 != void_type_node)))
5768 ? UNITS_PER_WORD : 0);
5770 if (argadj)
5771 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5772 else
5773 offset = current_function_arg_offset_rtx;
5775 if (TARGET_64BIT)
5777 int i, off;
5779 /* Adjust for varargs/stdarg differences. */
5780 if (argadj)
5781 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5782 else
5783 offset = current_function_arg_offset_rtx;
5785 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5786 from the incoming arg pointer and growing to larger addresses. */
5787 for (i = 26, off = -64; i >= 19; i--, off += 8)
5788 emit_move_insn (gen_rtx_MEM (word_mode,
5789 plus_constant (arg_pointer_rtx, off)),
5790 gen_rtx_REG (word_mode, i));
5792 /* The incoming args pointer points just beyond the flushback area;
5793 normally this is not a serious concern. However, when we are doing
5794 varargs/stdargs we want to make the arg pointer point to the start
5795 of the incoming argument area. */
5796 emit_move_insn (virtual_incoming_args_rtx,
5797 plus_constant (arg_pointer_rtx, -64));
5799 /* Now return a pointer to the first anonymous argument. */
5800 return copy_to_reg (expand_binop (Pmode, add_optab,
5801 virtual_incoming_args_rtx,
5802 offset, 0, 0, OPTAB_LIB_WIDEN));
5805 /* Store general registers on the stack. */
5806 dest = gen_rtx_MEM (BLKmode,
5807 plus_constant (current_function_internal_arg_pointer,
5808 -16));
5809 set_mem_alias_set (dest, get_varargs_alias_set ());
5810 set_mem_align (dest, BITS_PER_WORD);
5811 move_block_from_reg (23, dest, 4);
5813 /* move_block_from_reg will emit code to store the argument registers
5814 individually as scalar stores.
5816 However, other insns may later load from the same addresses for
5817 a structure load (passing a struct to a varargs routine).
5819 The alias code assumes that such aliasing can never happen, so we
5820 have to keep memory referencing insns from moving up beyond the
5821 last argument register store. So we emit a blockage insn here. */
5822 emit_insn (gen_blockage ());
5824 return copy_to_reg (expand_binop (Pmode, add_optab,
5825 current_function_internal_arg_pointer,
5826 offset, 0, 0, OPTAB_LIB_WIDEN));
5829 void
5830 hppa_va_start (tree valist, rtx nextarg)
5832 nextarg = expand_builtin_saveregs ();
5833 std_expand_builtin_va_start (valist, nextarg);
5836 static tree
5837 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5839 if (TARGET_64BIT)
5841 /* Args grow upward. We can use the generic routines. */
5842 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5844 else /* !TARGET_64BIT */
5846 tree ptr = build_pointer_type (type);
5847 tree valist_type;
5848 tree t, u;
5849 unsigned int size, ofs;
5850 bool indirect;
5852 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5853 if (indirect)
5855 type = ptr;
5856 ptr = build_pointer_type (type);
5858 size = int_size_in_bytes (type);
5859 valist_type = TREE_TYPE (valist);
5861 /* Args grow down. Not handled by generic routines. */
5863 u = fold_convert (valist_type, size_in_bytes (type));
5864 t = build2 (MINUS_EXPR, valist_type, valist, u);
5866 /* Copied from va-pa.h, but we probably don't need to align to
5867 word size, since we generate and preserve that invariant. */
5868 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5869 t = build2 (BIT_AND_EXPR, valist_type, t, u);
5871 t = build2 (MODIFY_EXPR, valist_type, valist, t);
5873 ofs = (8 - size) % 4;
5874 if (ofs != 0)
5876 u = fold_convert (valist_type, size_int (ofs));
5877 t = build2 (PLUS_EXPR, valist_type, t, u);
5880 t = fold_convert (ptr, t);
5881 t = build_va_arg_indirect_ref (t);
5883 if (indirect)
5884 t = build_va_arg_indirect_ref (t);
5886 return t;
5890 /* True if MODE is valid for the target. By "valid", we mean able to
5891 be manipulated in non-trivial ways. In particular, this means all
5892 the arithmetic is supported.
5894 Currently, TImode is not valid as the HP 64-bit runtime documentation
5895 doesn't document the alignment and calling conventions for this type.
5896 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5897 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5899 static bool
5900 pa_scalar_mode_supported_p (enum machine_mode mode)
5902 int precision = GET_MODE_PRECISION (mode);
5904 switch (GET_MODE_CLASS (mode))
5906 case MODE_PARTIAL_INT:
5907 case MODE_INT:
5908 if (precision == CHAR_TYPE_SIZE)
5909 return true;
5910 if (precision == SHORT_TYPE_SIZE)
5911 return true;
5912 if (precision == INT_TYPE_SIZE)
5913 return true;
5914 if (precision == LONG_TYPE_SIZE)
5915 return true;
5916 if (precision == LONG_LONG_TYPE_SIZE)
5917 return true;
5918 return false;
5920 case MODE_FLOAT:
5921 if (precision == FLOAT_TYPE_SIZE)
5922 return true;
5923 if (precision == DOUBLE_TYPE_SIZE)
5924 return true;
5925 if (precision == LONG_DOUBLE_TYPE_SIZE)
5926 return true;
5927 return false;
5929 default:
5930 gcc_unreachable ();
5934 /* This routine handles all the normal conditional branch sequences we
5935 might need to generate. It handles compare immediate vs compare
5936 register, nullification of delay slots, varying length branches,
5937 negated branches, and all combinations of the above. It returns the
5938 output appropriate to emit the branch corresponding to all given
5939 parameters. */
5941 const char *
5942 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
5944 static char buf[100];
5945 int useskip = 0;
5946 rtx xoperands[5];
5948 /* A conditional branch to the following instruction (e.g. the delay slot)
5949 is asking for a disaster. This can happen when not optimizing and
5950 when jump optimization fails.
5952 While it is usually safe to emit nothing, this can fail if the
5953 preceding instruction is a nullified branch with an empty delay
5954 slot and the same branch target as this branch. We could check
5955 for this but jump optimization should eliminate nop jumps. It
5956 is always safe to emit a nop. */
5957 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5958 return "nop";
5960 /* The doubleword form of the cmpib instruction doesn't have the LEU
5961 and GTU conditions while the cmpb instruction does. Since we accept
5962 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5963 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5964 operands[2] = gen_rtx_REG (DImode, 0);
5965 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
5966 operands[1] = gen_rtx_REG (DImode, 0);
5968 /* If this is a long branch with its delay slot unfilled, set `nullify'
5969 as it can nullify the delay slot and save a nop. */
5970 if (length == 8 && dbr_sequence_length () == 0)
5971 nullify = 1;
5973 /* If this is a short forward conditional branch which did not get
5974 its delay slot filled, the delay slot can still be nullified. */
5975 if (! nullify && length == 4 && dbr_sequence_length () == 0)
5976 nullify = forward_branch_p (insn);
5978 /* A forward branch over a single nullified insn can be done with a
5979 comclr instruction. This avoids a single cycle penalty due to
5980 mis-predicted branch if we fall through (branch not taken). */
5981 if (length == 4
5982 && next_real_insn (insn) != 0
5983 && get_attr_length (next_real_insn (insn)) == 4
5984 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
5985 && nullify)
5986 useskip = 1;
5988 switch (length)
5990 /* All short conditional branches except backwards with an unfilled
5991 delay slot. */
5992 case 4:
5993 if (useskip)
5994 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
5995 else
5996 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5997 if (GET_MODE (operands[1]) == DImode)
5998 strcat (buf, "*");
5999 if (negated)
6000 strcat (buf, "%B3");
6001 else
6002 strcat (buf, "%S3");
6003 if (useskip)
6004 strcat (buf, " %2,%r1,%%r0");
6005 else if (nullify)
6006 strcat (buf, ",n %2,%r1,%0");
6007 else
6008 strcat (buf, " %2,%r1,%0");
6009 break;
6011 /* All long conditionals. Note a short backward branch with an
6012 unfilled delay slot is treated just like a long backward branch
6013 with an unfilled delay slot. */
6014 case 8:
6015 /* Handle weird backwards branch with a filled delay slot
6016 with is nullified. */
6017 if (dbr_sequence_length () != 0
6018 && ! forward_branch_p (insn)
6019 && nullify)
6021 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6022 if (GET_MODE (operands[1]) == DImode)
6023 strcat (buf, "*");
6024 if (negated)
6025 strcat (buf, "%S3");
6026 else
6027 strcat (buf, "%B3");
6028 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6030 /* Handle short backwards branch with an unfilled delay slot.
6031 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6032 taken and untaken branches. */
6033 else if (dbr_sequence_length () == 0
6034 && ! forward_branch_p (insn)
6035 && INSN_ADDRESSES_SET_P ()
6036 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6037 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6039 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6040 if (GET_MODE (operands[1]) == DImode)
6041 strcat (buf, "*");
6042 if (negated)
6043 strcat (buf, "%B3 %2,%r1,%0%#");
6044 else
6045 strcat (buf, "%S3 %2,%r1,%0%#");
6047 else
6049 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6050 if (GET_MODE (operands[1]) == DImode)
6051 strcat (buf, "*");
6052 if (negated)
6053 strcat (buf, "%S3");
6054 else
6055 strcat (buf, "%B3");
6056 if (nullify)
6057 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6058 else
6059 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6061 break;
6063 case 20:
6064 case 28:
6065 xoperands[0] = operands[0];
6066 xoperands[1] = operands[1];
6067 xoperands[2] = operands[2];
6068 xoperands[3] = operands[3];
6070 /* The reversed conditional branch must branch over one additional
6071 instruction if the delay slot is filled. If the delay slot
6072 is empty, the instruction after the reversed condition branch
6073 must be nullified. */
6074 nullify = dbr_sequence_length () == 0;
6075 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6077 /* Create a reversed conditional branch which branches around
6078 the following insns. */
6079 if (GET_MODE (operands[1]) != DImode)
6081 if (nullify)
6083 if (negated)
6084 strcpy (buf,
6085 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6086 else
6087 strcpy (buf,
6088 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6090 else
6092 if (negated)
6093 strcpy (buf,
6094 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6095 else
6096 strcpy (buf,
6097 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6100 else
6102 if (nullify)
6104 if (negated)
6105 strcpy (buf,
6106 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6107 else
6108 strcpy (buf,
6109 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6111 else
6113 if (negated)
6114 strcpy (buf,
6115 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6116 else
6117 strcpy (buf,
6118 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6122 output_asm_insn (buf, xoperands);
6123 return output_lbranch (operands[0], insn);
6125 default:
6126 gcc_unreachable ();
6128 return buf;
6131 /* This routine handles long unconditional branches that exceed the
6132 maximum range of a simple branch instruction. */
6134 const char *
6135 output_lbranch (rtx dest, rtx insn)
6137 rtx xoperands[2];
6139 xoperands[0] = dest;
6141 /* First, free up the delay slot. */
6142 if (dbr_sequence_length () != 0)
6144 /* We can't handle a jump in the delay slot. */
6145 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6147 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6148 optimize, 0, NULL);
6150 /* Now delete the delay insn. */
6151 PUT_CODE (NEXT_INSN (insn), NOTE);
6152 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6153 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6156 /* Output an insn to save %r1. The runtime documentation doesn't
6157 specify whether the "Clean Up" slot in the callers frame can
6158 be clobbered by the callee. It isn't copied by HP's builtin
6159 alloca, so this suggests that it can be clobbered if necessary.
6160 The "Static Link" location is copied by HP builtin alloca, so
6161 we avoid using it. Using the cleanup slot might be a problem
6162 if we have to interoperate with languages that pass cleanup
6163 information. However, it should be possible to handle these
6164 situations with GCC's asm feature.
6166 The "Current RP" slot is reserved for the called procedure, so
6167 we try to use it when we don't have a frame of our own. It's
6168 rather unlikely that we won't have a frame when we need to emit
6169 a very long branch.
6171 Really the way to go long term is a register scavenger; goto
6172 the target of the jump and find a register which we can use
6173 as a scratch to hold the value in %r1. Then, we wouldn't have
6174 to free up the delay slot or clobber a slot that may be needed
6175 for other purposes. */
6176 if (TARGET_64BIT)
6178 if (actual_fsize == 0 && !regs_ever_live[2])
6179 /* Use the return pointer slot in the frame marker. */
6180 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6181 else
6182 /* Use the slot at -40 in the frame marker since HP builtin
6183 alloca doesn't copy it. */
6184 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6186 else
6188 if (actual_fsize == 0 && !regs_ever_live[2])
6189 /* Use the return pointer slot in the frame marker. */
6190 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6191 else
6192 /* Use the "Clean Up" slot in the frame marker. In GCC,
6193 the only other use of this location is for copying a
6194 floating point double argument from a floating-point
6195 register to two general registers. The copy is done
6196 as an "atomic" operation when outputting a call, so it
6197 won't interfere with our using the location here. */
6198 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6201 if (TARGET_PORTABLE_RUNTIME)
6203 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6204 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6205 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6207 else if (flag_pic)
6209 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6210 if (TARGET_SOM || !TARGET_GAS)
6212 xoperands[1] = gen_label_rtx ();
6213 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6214 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6215 CODE_LABEL_NUMBER (xoperands[1]));
6216 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6218 else
6220 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6221 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6223 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6225 else
6226 /* Now output a very long branch to the original target. */
6227 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6229 /* Now restore the value of %r1 in the delay slot. */
6230 if (TARGET_64BIT)
6232 if (actual_fsize == 0 && !regs_ever_live[2])
6233 return "ldd -16(%%r30),%%r1";
6234 else
6235 return "ldd -40(%%r30),%%r1";
6237 else
6239 if (actual_fsize == 0 && !regs_ever_live[2])
6240 return "ldw -20(%%r30),%%r1";
6241 else
6242 return "ldw -12(%%r30),%%r1";
6246 /* This routine handles all the branch-on-bit conditional branch sequences we
6247 might need to generate. It handles nullification of delay slots,
6248 varying length branches, negated branches and all combinations of the
6249 above. it returns the appropriate output template to emit the branch. */
6251 const char *
6252 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6253 int negated, rtx insn, int which)
6255 static char buf[100];
6256 int useskip = 0;
6258 /* A conditional branch to the following instruction (e.g. the delay slot) is
6259 asking for a disaster. I do not think this can happen as this pattern
6260 is only used when optimizing; jump optimization should eliminate the
6261 jump. But be prepared just in case. */
6263 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6264 return "nop";
6266 /* If this is a long branch with its delay slot unfilled, set `nullify'
6267 as it can nullify the delay slot and save a nop. */
6268 if (length == 8 && dbr_sequence_length () == 0)
6269 nullify = 1;
6271 /* If this is a short forward conditional branch which did not get
6272 its delay slot filled, the delay slot can still be nullified. */
6273 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6274 nullify = forward_branch_p (insn);
6276 /* A forward branch over a single nullified insn can be done with a
6277 extrs instruction. This avoids a single cycle penalty due to
6278 mis-predicted branch if we fall through (branch not taken). */
6280 if (length == 4
6281 && next_real_insn (insn) != 0
6282 && get_attr_length (next_real_insn (insn)) == 4
6283 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6284 && nullify)
6285 useskip = 1;
6287 switch (length)
6290 /* All short conditional branches except backwards with an unfilled
6291 delay slot. */
6292 case 4:
6293 if (useskip)
6294 strcpy (buf, "{extrs,|extrw,s,}");
6295 else
6296 strcpy (buf, "bb,");
6297 if (useskip && GET_MODE (operands[0]) == DImode)
6298 strcpy (buf, "extrd,s,*");
6299 else if (GET_MODE (operands[0]) == DImode)
6300 strcpy (buf, "bb,*");
6301 if ((which == 0 && negated)
6302 || (which == 1 && ! negated))
6303 strcat (buf, ">=");
6304 else
6305 strcat (buf, "<");
6306 if (useskip)
6307 strcat (buf, " %0,%1,1,%%r0");
6308 else if (nullify && negated)
6309 strcat (buf, ",n %0,%1,%3");
6310 else if (nullify && ! negated)
6311 strcat (buf, ",n %0,%1,%2");
6312 else if (! nullify && negated)
6313 strcat (buf, "%0,%1,%3");
6314 else if (! nullify && ! negated)
6315 strcat (buf, " %0,%1,%2");
6316 break;
6318 /* All long conditionals. Note a short backward branch with an
6319 unfilled delay slot is treated just like a long backward branch
6320 with an unfilled delay slot. */
6321 case 8:
6322 /* Handle weird backwards branch with a filled delay slot
6323 with is nullified. */
6324 if (dbr_sequence_length () != 0
6325 && ! forward_branch_p (insn)
6326 && nullify)
6328 strcpy (buf, "bb,");
6329 if (GET_MODE (operands[0]) == DImode)
6330 strcat (buf, "*");
6331 if ((which == 0 && negated)
6332 || (which == 1 && ! negated))
6333 strcat (buf, "<");
6334 else
6335 strcat (buf, ">=");
6336 if (negated)
6337 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6338 else
6339 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6341 /* Handle short backwards branch with an unfilled delay slot.
6342 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6343 taken and untaken branches. */
6344 else if (dbr_sequence_length () == 0
6345 && ! forward_branch_p (insn)
6346 && INSN_ADDRESSES_SET_P ()
6347 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6348 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6350 strcpy (buf, "bb,");
6351 if (GET_MODE (operands[0]) == DImode)
6352 strcat (buf, "*");
6353 if ((which == 0 && negated)
6354 || (which == 1 && ! negated))
6355 strcat (buf, ">=");
6356 else
6357 strcat (buf, "<");
6358 if (negated)
6359 strcat (buf, " %0,%1,%3%#");
6360 else
6361 strcat (buf, " %0,%1,%2%#");
6363 else
6365 strcpy (buf, "{extrs,|extrw,s,}");
6366 if (GET_MODE (operands[0]) == DImode)
6367 strcpy (buf, "extrd,s,*");
6368 if ((which == 0 && negated)
6369 || (which == 1 && ! negated))
6370 strcat (buf, "<");
6371 else
6372 strcat (buf, ">=");
6373 if (nullify && negated)
6374 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6375 else if (nullify && ! negated)
6376 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6377 else if (negated)
6378 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6379 else
6380 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6382 break;
6384 default:
6385 gcc_unreachable ();
6387 return buf;
6390 /* This routine handles all the branch-on-variable-bit conditional branch
6391 sequences we might need to generate. It handles nullification of delay
6392 slots, varying length branches, negated branches and all combinations
6393 of the above. it returns the appropriate output template to emit the
6394 branch. */
6396 const char *
6397 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6398 int negated, rtx insn, int which)
6400 static char buf[100];
6401 int useskip = 0;
6403 /* A conditional branch to the following instruction (e.g. the delay slot) is
6404 asking for a disaster. I do not think this can happen as this pattern
6405 is only used when optimizing; jump optimization should eliminate the
6406 jump. But be prepared just in case. */
6408 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6409 return "nop";
6411 /* If this is a long branch with its delay slot unfilled, set `nullify'
6412 as it can nullify the delay slot and save a nop. */
6413 if (length == 8 && dbr_sequence_length () == 0)
6414 nullify = 1;
6416 /* If this is a short forward conditional branch which did not get
6417 its delay slot filled, the delay slot can still be nullified. */
6418 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6419 nullify = forward_branch_p (insn);
6421 /* A forward branch over a single nullified insn can be done with a
6422 extrs instruction. This avoids a single cycle penalty due to
6423 mis-predicted branch if we fall through (branch not taken). */
6425 if (length == 4
6426 && next_real_insn (insn) != 0
6427 && get_attr_length (next_real_insn (insn)) == 4
6428 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6429 && nullify)
6430 useskip = 1;
6432 switch (length)
6435 /* All short conditional branches except backwards with an unfilled
6436 delay slot. */
6437 case 4:
6438 if (useskip)
6439 strcpy (buf, "{vextrs,|extrw,s,}");
6440 else
6441 strcpy (buf, "{bvb,|bb,}");
6442 if (useskip && GET_MODE (operands[0]) == DImode)
6443 strcpy (buf, "extrd,s,*");
6444 else if (GET_MODE (operands[0]) == DImode)
6445 strcpy (buf, "bb,*");
6446 if ((which == 0 && negated)
6447 || (which == 1 && ! negated))
6448 strcat (buf, ">=");
6449 else
6450 strcat (buf, "<");
6451 if (useskip)
6452 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6453 else if (nullify && negated)
6454 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6455 else if (nullify && ! negated)
6456 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6457 else if (! nullify && negated)
6458 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6459 else if (! nullify && ! negated)
6460 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6461 break;
6463 /* All long conditionals. Note a short backward branch with an
6464 unfilled delay slot is treated just like a long backward branch
6465 with an unfilled delay slot. */
6466 case 8:
6467 /* Handle weird backwards branch with a filled delay slot
6468 with is nullified. */
6469 if (dbr_sequence_length () != 0
6470 && ! forward_branch_p (insn)
6471 && nullify)
6473 strcpy (buf, "{bvb,|bb,}");
6474 if (GET_MODE (operands[0]) == DImode)
6475 strcat (buf, "*");
6476 if ((which == 0 && negated)
6477 || (which == 1 && ! negated))
6478 strcat (buf, "<");
6479 else
6480 strcat (buf, ">=");
6481 if (negated)
6482 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6483 else
6484 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6486 /* Handle short backwards branch with an unfilled delay slot.
6487 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6488 taken and untaken branches. */
6489 else if (dbr_sequence_length () == 0
6490 && ! forward_branch_p (insn)
6491 && INSN_ADDRESSES_SET_P ()
6492 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6493 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6495 strcpy (buf, "{bvb,|bb,}");
6496 if (GET_MODE (operands[0]) == DImode)
6497 strcat (buf, "*");
6498 if ((which == 0 && negated)
6499 || (which == 1 && ! negated))
6500 strcat (buf, ">=");
6501 else
6502 strcat (buf, "<");
6503 if (negated)
6504 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6505 else
6506 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6508 else
6510 strcpy (buf, "{vextrs,|extrw,s,}");
6511 if (GET_MODE (operands[0]) == DImode)
6512 strcpy (buf, "extrd,s,*");
6513 if ((which == 0 && negated)
6514 || (which == 1 && ! negated))
6515 strcat (buf, "<");
6516 else
6517 strcat (buf, ">=");
6518 if (nullify && negated)
6519 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6520 else if (nullify && ! negated)
6521 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6522 else if (negated)
6523 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6524 else
6525 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6527 break;
6529 default:
6530 gcc_unreachable ();
6532 return buf;
6535 /* Return the output template for emitting a dbra type insn.
6537 Note it may perform some output operations on its own before
6538 returning the final output string. */
6539 const char *
6540 output_dbra (rtx *operands, rtx insn, int which_alternative)
6543 /* A conditional branch to the following instruction (e.g. the delay slot) is
6544 asking for a disaster. Be prepared! */
6546 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6548 if (which_alternative == 0)
6549 return "ldo %1(%0),%0";
6550 else if (which_alternative == 1)
6552 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6553 output_asm_insn ("ldw -16(%%r30),%4", operands);
6554 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6555 return "{fldws|fldw} -16(%%r30),%0";
6557 else
6559 output_asm_insn ("ldw %0,%4", operands);
6560 return "ldo %1(%4),%4\n\tstw %4,%0";
6564 if (which_alternative == 0)
6566 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6567 int length = get_attr_length (insn);
6569 /* If this is a long branch with its delay slot unfilled, set `nullify'
6570 as it can nullify the delay slot and save a nop. */
6571 if (length == 8 && dbr_sequence_length () == 0)
6572 nullify = 1;
6574 /* If this is a short forward conditional branch which did not get
6575 its delay slot filled, the delay slot can still be nullified. */
6576 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6577 nullify = forward_branch_p (insn);
6579 switch (length)
6581 case 4:
6582 if (nullify)
6583 return "addib,%C2,n %1,%0,%3";
6584 else
6585 return "addib,%C2 %1,%0,%3";
6587 case 8:
6588 /* Handle weird backwards branch with a fulled delay slot
6589 which is nullified. */
6590 if (dbr_sequence_length () != 0
6591 && ! forward_branch_p (insn)
6592 && nullify)
6593 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6594 /* Handle short backwards branch with an unfilled delay slot.
6595 Using a addb;nop rather than addi;bl saves 1 cycle for both
6596 taken and untaken branches. */
6597 else if (dbr_sequence_length () == 0
6598 && ! forward_branch_p (insn)
6599 && INSN_ADDRESSES_SET_P ()
6600 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6601 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6602 return "addib,%C2 %1,%0,%3%#";
6604 /* Handle normal cases. */
6605 if (nullify)
6606 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6607 else
6608 return "addi,%N2 %1,%0,%0\n\tb %3";
6610 default:
6611 gcc_unreachable ();
6615 /* Deal with gross reload from FP register case. */
6616 else if (which_alternative == 1)
6618 /* Move loop counter from FP register to MEM then into a GR,
6619 increment the GR, store the GR into MEM, and finally reload
6620 the FP register from MEM from within the branch's delay slot. */
6621 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6622 operands);
6623 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6624 if (get_attr_length (insn) == 24)
6625 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6626 else
6627 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6629 /* Deal with gross reload from memory case. */
6630 else
6632 /* Reload loop counter from memory, the store back to memory
6633 happens in the branch's delay slot. */
6634 output_asm_insn ("ldw %0,%4", operands);
6635 if (get_attr_length (insn) == 12)
6636 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6637 else
6638 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6642 /* Return the output template for emitting a dbra type insn.
6644 Note it may perform some output operations on its own before
6645 returning the final output string. */
6646 const char *
6647 output_movb (rtx *operands, rtx insn, int which_alternative,
6648 int reverse_comparison)
6651 /* A conditional branch to the following instruction (e.g. the delay slot) is
6652 asking for a disaster. Be prepared! */
6654 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6656 if (which_alternative == 0)
6657 return "copy %1,%0";
6658 else if (which_alternative == 1)
6660 output_asm_insn ("stw %1,-16(%%r30)", operands);
6661 return "{fldws|fldw} -16(%%r30),%0";
6663 else if (which_alternative == 2)
6664 return "stw %1,%0";
6665 else
6666 return "mtsar %r1";
6669 /* Support the second variant. */
6670 if (reverse_comparison)
6671 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6673 if (which_alternative == 0)
6675 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6676 int length = get_attr_length (insn);
6678 /* If this is a long branch with its delay slot unfilled, set `nullify'
6679 as it can nullify the delay slot and save a nop. */
6680 if (length == 8 && dbr_sequence_length () == 0)
6681 nullify = 1;
6683 /* If this is a short forward conditional branch which did not get
6684 its delay slot filled, the delay slot can still be nullified. */
6685 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6686 nullify = forward_branch_p (insn);
6688 switch (length)
6690 case 4:
6691 if (nullify)
6692 return "movb,%C2,n %1,%0,%3";
6693 else
6694 return "movb,%C2 %1,%0,%3";
6696 case 8:
6697 /* Handle weird backwards branch with a filled delay slot
6698 which is nullified. */
6699 if (dbr_sequence_length () != 0
6700 && ! forward_branch_p (insn)
6701 && nullify)
6702 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6704 /* Handle short backwards branch with an unfilled delay slot.
6705 Using a movb;nop rather than or;bl saves 1 cycle for both
6706 taken and untaken branches. */
6707 else if (dbr_sequence_length () == 0
6708 && ! forward_branch_p (insn)
6709 && INSN_ADDRESSES_SET_P ()
6710 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6711 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6712 return "movb,%C2 %1,%0,%3%#";
6713 /* Handle normal cases. */
6714 if (nullify)
6715 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6716 else
6717 return "or,%N2 %1,%%r0,%0\n\tb %3";
6719 default:
6720 gcc_unreachable ();
6723 /* Deal with gross reload from FP register case. */
6724 else if (which_alternative == 1)
6726 /* Move loop counter from FP register to MEM then into a GR,
6727 increment the GR, store the GR into MEM, and finally reload
6728 the FP register from MEM from within the branch's delay slot. */
6729 output_asm_insn ("stw %1,-16(%%r30)", operands);
6730 if (get_attr_length (insn) == 12)
6731 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6732 else
6733 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6735 /* Deal with gross reload from memory case. */
6736 else if (which_alternative == 2)
6738 /* Reload loop counter from memory, the store back to memory
6739 happens in the branch's delay slot. */
6740 if (get_attr_length (insn) == 8)
6741 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6742 else
6743 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6745 /* Handle SAR as a destination. */
6746 else
6748 if (get_attr_length (insn) == 8)
6749 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6750 else
6751 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6755 /* Copy any FP arguments in INSN into integer registers. */
6756 static void
6757 copy_fp_args (rtx insn)
6759 rtx link;
6760 rtx xoperands[2];
6762 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6764 int arg_mode, regno;
6765 rtx use = XEXP (link, 0);
6767 if (! (GET_CODE (use) == USE
6768 && GET_CODE (XEXP (use, 0)) == REG
6769 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6770 continue;
6772 arg_mode = GET_MODE (XEXP (use, 0));
6773 regno = REGNO (XEXP (use, 0));
6775 /* Is it a floating point register? */
6776 if (regno >= 32 && regno <= 39)
6778 /* Copy the FP register into an integer register via memory. */
6779 if (arg_mode == SFmode)
6781 xoperands[0] = XEXP (use, 0);
6782 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6783 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6784 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6786 else
6788 xoperands[0] = XEXP (use, 0);
6789 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6790 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6791 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6792 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6798 /* Compute length of the FP argument copy sequence for INSN. */
6799 static int
6800 length_fp_args (rtx insn)
6802 int length = 0;
6803 rtx link;
6805 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6807 int arg_mode, regno;
6808 rtx use = XEXP (link, 0);
6810 if (! (GET_CODE (use) == USE
6811 && GET_CODE (XEXP (use, 0)) == REG
6812 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6813 continue;
6815 arg_mode = GET_MODE (XEXP (use, 0));
6816 regno = REGNO (XEXP (use, 0));
6818 /* Is it a floating point register? */
6819 if (regno >= 32 && regno <= 39)
6821 if (arg_mode == SFmode)
6822 length += 8;
6823 else
6824 length += 12;
6828 return length;
6831 /* Return the attribute length for the millicode call instruction INSN.
6832 The length must match the code generated by output_millicode_call.
6833 We include the delay slot in the returned length as it is better to
6834 over estimate the length than to under estimate it. */
6837 attr_length_millicode_call (rtx insn)
6839 unsigned long distance = -1;
6840 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6842 if (INSN_ADDRESSES_SET_P ())
6844 distance = (total + insn_current_reference_address (insn));
6845 if (distance < total)
6846 distance = -1;
6849 if (TARGET_64BIT)
6851 if (!TARGET_LONG_CALLS && distance < 7600000)
6852 return 8;
6854 return 20;
6856 else if (TARGET_PORTABLE_RUNTIME)
6857 return 24;
6858 else
6860 if (!TARGET_LONG_CALLS && distance < 240000)
6861 return 8;
6863 if (TARGET_LONG_ABS_CALL && !flag_pic)
6864 return 12;
6866 return 24;
6870 /* INSN is a function call. It may have an unconditional jump
6871 in its delay slot.
6873 CALL_DEST is the routine we are calling. */
6875 const char *
6876 output_millicode_call (rtx insn, rtx call_dest)
6878 int attr_length = get_attr_length (insn);
6879 int seq_length = dbr_sequence_length ();
6880 int distance;
6881 rtx seq_insn;
6882 rtx xoperands[3];
6884 xoperands[0] = call_dest;
6885 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6887 /* Handle the common case where we are sure that the branch will
6888 reach the beginning of the $CODE$ subspace. The within reach
6889 form of the $$sh_func_adrs call has a length of 28. Because
6890 it has an attribute type of multi, it never has a nonzero
6891 sequence length. The length of the $$sh_func_adrs is the same
6892 as certain out of reach PIC calls to other routines. */
6893 if (!TARGET_LONG_CALLS
6894 && ((seq_length == 0
6895 && (attr_length == 12
6896 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6897 || (seq_length != 0 && attr_length == 8)))
6899 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6901 else
6903 if (TARGET_64BIT)
6905 /* It might seem that one insn could be saved by accessing
6906 the millicode function using the linkage table. However,
6907 this doesn't work in shared libraries and other dynamically
6908 loaded objects. Using a pc-relative sequence also avoids
6909 problems related to the implicit use of the gp register. */
6910 output_asm_insn ("b,l .+8,%%r1", xoperands);
6912 if (TARGET_GAS)
6914 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6915 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6917 else
6919 xoperands[1] = gen_label_rtx ();
6920 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6921 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6922 CODE_LABEL_NUMBER (xoperands[1]));
6923 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6926 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
6928 else if (TARGET_PORTABLE_RUNTIME)
6930 /* Pure portable runtime doesn't allow be/ble; we also don't
6931 have PIC support in the assembler/linker, so this sequence
6932 is needed. */
6934 /* Get the address of our target into %r1. */
6935 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6936 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6938 /* Get our return address into %r31. */
6939 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
6940 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
6942 /* Jump to our target address in %r1. */
6943 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6945 else if (!flag_pic)
6947 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6948 if (TARGET_PA_20)
6949 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
6950 else
6951 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
6953 else
6955 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6956 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
6958 if (TARGET_SOM || !TARGET_GAS)
6960 /* The HP assembler can generate relocations for the
6961 difference of two symbols. GAS can do this for a
6962 millicode symbol but not an arbitrary external
6963 symbol when generating SOM output. */
6964 xoperands[1] = gen_label_rtx ();
6965 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6966 CODE_LABEL_NUMBER (xoperands[1]));
6967 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6968 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6970 else
6972 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
6973 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
6974 xoperands);
6977 /* Jump to our target address in %r1. */
6978 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6982 if (seq_length == 0)
6983 output_asm_insn ("nop", xoperands);
6985 /* We are done if there isn't a jump in the delay slot. */
6986 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
6987 return "";
6989 /* This call has an unconditional jump in its delay slot. */
6990 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
6992 /* See if the return address can be adjusted. Use the containing
6993 sequence insn's address. */
6994 if (INSN_ADDRESSES_SET_P ())
6996 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
6997 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
6998 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7000 if (VAL_14_BITS_P (distance))
7002 xoperands[1] = gen_label_rtx ();
7003 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7004 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7005 CODE_LABEL_NUMBER (xoperands[1]));
7007 else
7008 /* ??? This branch may not reach its target. */
7009 output_asm_insn ("nop\n\tb,n %0", xoperands);
7011 else
7012 /* ??? This branch may not reach its target. */
7013 output_asm_insn ("nop\n\tb,n %0", xoperands);
7015 /* Delete the jump. */
7016 PUT_CODE (NEXT_INSN (insn), NOTE);
7017 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7018 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7020 return "";
7023 /* Return the attribute length of the call instruction INSN. The SIBCALL
7024 flag indicates whether INSN is a regular call or a sibling call. The
7025 length returned must be longer than the code actually generated by
7026 output_call. Since branch shortening is done before delay branch
7027 sequencing, there is no way to determine whether or not the delay
7028 slot will be filled during branch shortening. Even when the delay
7029 slot is filled, we may have to add a nop if the delay slot contains
7030 a branch that can't reach its target. Thus, we always have to include
7031 the delay slot in the length estimate. This used to be done in
7032 pa_adjust_insn_length but we do it here now as some sequences always
7033 fill the delay slot and we can save four bytes in the estimate for
7034 these sequences. */
7037 attr_length_call (rtx insn, int sibcall)
7039 int local_call;
7040 rtx call_dest;
7041 tree call_decl;
7042 int length = 0;
7043 rtx pat = PATTERN (insn);
7044 unsigned long distance = -1;
7046 if (INSN_ADDRESSES_SET_P ())
7048 unsigned long total;
7050 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7051 distance = (total + insn_current_reference_address (insn));
7052 if (distance < total)
7053 distance = -1;
7056 /* Determine if this is a local call. */
7057 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7058 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7059 else
7060 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7062 call_decl = SYMBOL_REF_DECL (call_dest);
7063 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7065 /* pc-relative branch. */
7066 if (!TARGET_LONG_CALLS
7067 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7068 || distance < 240000))
7069 length += 8;
7071 /* 64-bit plabel sequence. */
7072 else if (TARGET_64BIT && !local_call)
7073 length += sibcall ? 28 : 24;
7075 /* non-pic long absolute branch sequence. */
7076 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7077 length += 12;
7079 /* long pc-relative branch sequence. */
7080 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7081 || (TARGET_64BIT && !TARGET_GAS)
7082 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7084 length += 20;
7086 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7087 length += 8;
7090 /* 32-bit plabel sequence. */
7091 else
7093 length += 32;
7095 if (TARGET_SOM)
7096 length += length_fp_args (insn);
7098 if (flag_pic)
7099 length += 4;
7101 if (!TARGET_PA_20)
7103 if (!sibcall)
7104 length += 8;
7106 if (!TARGET_NO_SPACE_REGS)
7107 length += 8;
7111 return length;
7114 /* INSN is a function call. It may have an unconditional jump
7115 in its delay slot.
7117 CALL_DEST is the routine we are calling. */
7119 const char *
7120 output_call (rtx insn, rtx call_dest, int sibcall)
7122 int delay_insn_deleted = 0;
7123 int delay_slot_filled = 0;
7124 int seq_length = dbr_sequence_length ();
7125 tree call_decl = SYMBOL_REF_DECL (call_dest);
7126 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7127 rtx xoperands[2];
7129 xoperands[0] = call_dest;
7131 /* Handle the common case where we're sure that the branch will reach
7132 the beginning of the "$CODE$" subspace. This is the beginning of
7133 the current function if we are in a named section. */
7134 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7136 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7137 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7139 else
7141 if (TARGET_64BIT && !local_call)
7143 /* ??? As far as I can tell, the HP linker doesn't support the
7144 long pc-relative sequence described in the 64-bit runtime
7145 architecture. So, we use a slightly longer indirect call. */
7146 xoperands[0] = get_deferred_plabel (call_dest);
7147 xoperands[1] = gen_label_rtx ();
7149 /* If this isn't a sibcall, we put the load of %r27 into the
7150 delay slot. We can't do this in a sibcall as we don't
7151 have a second call-clobbered scratch register available. */
7152 if (seq_length != 0
7153 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7154 && !sibcall)
7156 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7157 optimize, 0, NULL);
7159 /* Now delete the delay insn. */
7160 PUT_CODE (NEXT_INSN (insn), NOTE);
7161 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7162 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7163 delay_insn_deleted = 1;
7166 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7167 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7168 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7170 if (sibcall)
7172 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7173 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7174 output_asm_insn ("bve (%%r1)", xoperands);
7176 else
7178 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7179 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7180 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7181 delay_slot_filled = 1;
7184 else
7186 int indirect_call = 0;
7188 /* Emit a long call. There are several different sequences
7189 of increasing length and complexity. In most cases,
7190 they don't allow an instruction in the delay slot. */
7191 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7192 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7193 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7194 && !TARGET_64BIT)
7195 indirect_call = 1;
7197 if (seq_length != 0
7198 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7199 && !sibcall
7200 && (!TARGET_PA_20 || indirect_call))
7202 /* A non-jump insn in the delay slot. By definition we can
7203 emit this insn before the call (and in fact before argument
7204 relocating. */
7205 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7206 NULL);
7208 /* Now delete the delay insn. */
7209 PUT_CODE (NEXT_INSN (insn), NOTE);
7210 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7211 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7212 delay_insn_deleted = 1;
7215 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7217 /* This is the best sequence for making long calls in
7218 non-pic code. Unfortunately, GNU ld doesn't provide
7219 the stub needed for external calls, and GAS's support
7220 for this with the SOM linker is buggy. It is safe
7221 to use this for local calls. */
7222 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7223 if (sibcall)
7224 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7225 else
7227 if (TARGET_PA_20)
7228 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7229 xoperands);
7230 else
7231 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7233 output_asm_insn ("copy %%r31,%%r2", xoperands);
7234 delay_slot_filled = 1;
7237 else
7239 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7240 || (TARGET_64BIT && !TARGET_GAS))
7242 /* The HP assembler and linker can handle relocations
7243 for the difference of two symbols. GAS and the HP
7244 linker can't do this when one of the symbols is
7245 external. */
7246 xoperands[1] = gen_label_rtx ();
7247 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7248 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7249 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7250 CODE_LABEL_NUMBER (xoperands[1]));
7251 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7253 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7255 /* GAS currently can't generate the relocations that
7256 are needed for the SOM linker under HP-UX using this
7257 sequence. The GNU linker doesn't generate the stubs
7258 that are needed for external calls on TARGET_ELF32
7259 with this sequence. For now, we have to use a
7260 longer plabel sequence when using GAS. */
7261 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7262 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7263 xoperands);
7264 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7265 xoperands);
7267 else
7269 /* Emit a long plabel-based call sequence. This is
7270 essentially an inline implementation of $$dyncall.
7271 We don't actually try to call $$dyncall as this is
7272 as difficult as calling the function itself. */
7273 xoperands[0] = get_deferred_plabel (call_dest);
7274 xoperands[1] = gen_label_rtx ();
7276 /* Since the call is indirect, FP arguments in registers
7277 need to be copied to the general registers. Then, the
7278 argument relocation stub will copy them back. */
7279 if (TARGET_SOM)
7280 copy_fp_args (insn);
7282 if (flag_pic)
7284 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7285 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7286 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7288 else
7290 output_asm_insn ("addil LR'%0-$global$,%%r27",
7291 xoperands);
7292 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7293 xoperands);
7296 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7297 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7298 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7299 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7301 if (!sibcall && !TARGET_PA_20)
7303 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7304 if (TARGET_NO_SPACE_REGS)
7305 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7306 else
7307 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7311 if (TARGET_PA_20)
7313 if (sibcall)
7314 output_asm_insn ("bve (%%r1)", xoperands);
7315 else
7317 if (indirect_call)
7319 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7320 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7321 delay_slot_filled = 1;
7323 else
7324 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7327 else
7329 if (!TARGET_NO_SPACE_REGS)
7330 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7331 xoperands);
7333 if (sibcall)
7335 if (TARGET_NO_SPACE_REGS)
7336 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7337 else
7338 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7340 else
7342 if (TARGET_NO_SPACE_REGS)
7343 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7344 else
7345 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7347 if (indirect_call)
7348 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7349 else
7350 output_asm_insn ("copy %%r31,%%r2", xoperands);
7351 delay_slot_filled = 1;
7358 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7359 output_asm_insn ("nop", xoperands);
7361 /* We are done if there isn't a jump in the delay slot. */
7362 if (seq_length == 0
7363 || delay_insn_deleted
7364 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7365 return "";
7367 /* A sibcall should never have a branch in the delay slot. */
7368 gcc_assert (!sibcall);
7370 /* This call has an unconditional jump in its delay slot. */
7371 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7373 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7375 /* See if the return address can be adjusted. Use the containing
7376 sequence insn's address. */
7377 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7378 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7379 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7381 if (VAL_14_BITS_P (distance))
7383 xoperands[1] = gen_label_rtx ();
7384 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7385 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7386 CODE_LABEL_NUMBER (xoperands[1]));
7388 else
7389 output_asm_insn ("nop\n\tb,n %0", xoperands);
7391 else
7392 output_asm_insn ("b,n %0", xoperands);
7394 /* Delete the jump. */
7395 PUT_CODE (NEXT_INSN (insn), NOTE);
7396 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7397 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7399 return "";
7402 /* Return the attribute length of the indirect call instruction INSN.
7403 The length must match the code generated by output_indirect call.
7404 The returned length includes the delay slot. Currently, the delay
7405 slot of an indirect call sequence is not exposed and it is used by
7406 the sequence itself. */
7409 attr_length_indirect_call (rtx insn)
7411 unsigned long distance = -1;
7412 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7414 if (INSN_ADDRESSES_SET_P ())
7416 distance = (total + insn_current_reference_address (insn));
7417 if (distance < total)
7418 distance = -1;
7421 if (TARGET_64BIT)
7422 return 12;
7424 if (TARGET_FAST_INDIRECT_CALLS
7425 || (!TARGET_PORTABLE_RUNTIME
7426 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7427 return 8;
7429 if (flag_pic)
7430 return 24;
7432 if (TARGET_PORTABLE_RUNTIME)
7433 return 20;
7435 /* Out of reach, can use ble. */
7436 return 12;
7439 const char *
7440 output_indirect_call (rtx insn, rtx call_dest)
7442 rtx xoperands[1];
7444 if (TARGET_64BIT)
7446 xoperands[0] = call_dest;
7447 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7448 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7449 return "";
7452 /* First the special case for kernels, level 0 systems, etc. */
7453 if (TARGET_FAST_INDIRECT_CALLS)
7454 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7456 /* Now the normal case -- we can reach $$dyncall directly or
7457 we're sure that we can get there via a long-branch stub.
7459 No need to check target flags as the length uniquely identifies
7460 the remaining cases. */
7461 if (attr_length_indirect_call (insn) == 8)
7463 /* The HP linker substitutes a BLE for millicode calls using
7464 the short PIC PCREL form. Thus, we must use %r31 as the
7465 link register when generating PA 1.x code. */
7466 if (TARGET_PA_20)
7467 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7468 else
7469 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7472 /* Long millicode call, but we are not generating PIC or portable runtime
7473 code. */
7474 if (attr_length_indirect_call (insn) == 12)
7475 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7477 /* Long millicode call for portable runtime. */
7478 if (attr_length_indirect_call (insn) == 20)
7479 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7481 /* We need a long PIC call to $$dyncall. */
7482 xoperands[0] = NULL_RTX;
7483 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7484 if (TARGET_SOM || !TARGET_GAS)
7486 xoperands[0] = gen_label_rtx ();
7487 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7488 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7489 CODE_LABEL_NUMBER (xoperands[0]));
7490 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7492 else
7494 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7495 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7496 xoperands);
7498 output_asm_insn ("blr %%r0,%%r2", xoperands);
7499 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7500 return "";
7503 /* Return the total length of the save and restore instructions needed for
7504 the data linkage table pointer (i.e., the PIC register) across the call
7505 instruction INSN. No-return calls do not require a save and restore.
7506 In addition, we may be able to avoid the save and restore for calls
7507 within the same translation unit. */
7510 attr_length_save_restore_dltp (rtx insn)
7512 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7513 return 0;
7515 return 8;
7518 /* In HPUX 8.0's shared library scheme, special relocations are needed
7519 for function labels if they might be passed to a function
7520 in a shared library (because shared libraries don't live in code
7521 space), and special magic is needed to construct their address. */
7523 void
7524 hppa_encode_label (rtx sym)
7526 const char *str = XSTR (sym, 0);
7527 int len = strlen (str) + 1;
7528 char *newstr, *p;
7530 p = newstr = alloca (len + 1);
7531 *p++ = '@';
7532 strcpy (p, str);
7534 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7537 static void
7538 pa_encode_section_info (tree decl, rtx rtl, int first)
7540 default_encode_section_info (decl, rtl, first);
7542 if (first && TEXT_SPACE_P (decl))
7544 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7545 if (TREE_CODE (decl) == FUNCTION_DECL)
7546 hppa_encode_label (XEXP (rtl, 0));
7550 /* This is sort of inverse to pa_encode_section_info. */
7552 static const char *
7553 pa_strip_name_encoding (const char *str)
7555 str += (*str == '@');
7556 str += (*str == '*');
7557 return str;
7561 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7563 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7566 /* Returns 1 if OP is a function label involved in a simple addition
7567 with a constant. Used to keep certain patterns from matching
7568 during instruction combination. */
7570 is_function_label_plus_const (rtx op)
7572 /* Strip off any CONST. */
7573 if (GET_CODE (op) == CONST)
7574 op = XEXP (op, 0);
7576 return (GET_CODE (op) == PLUS
7577 && function_label_operand (XEXP (op, 0), Pmode)
7578 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7581 /* Output assembly code for a thunk to FUNCTION. */
7583 static void
7584 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7585 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7586 tree function)
7588 static unsigned int current_thunk_number;
7589 int val_14 = VAL_14_BITS_P (delta);
7590 int nbytes = 0;
7591 char label[16];
7592 rtx xoperands[4];
7594 xoperands[0] = XEXP (DECL_RTL (function), 0);
7595 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7596 xoperands[2] = GEN_INT (delta);
7598 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7599 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7601 /* Output the thunk. We know that the function is in the same
7602 translation unit (i.e., the same space) as the thunk, and that
7603 thunks are output after their method. Thus, we don't need an
7604 external branch to reach the function. With SOM and GAS,
7605 functions and thunks are effectively in different sections.
7606 Thus, we can always use a IA-relative branch and the linker
7607 will add a long branch stub if necessary.
7609 However, we have to be careful when generating PIC code on the
7610 SOM port to ensure that the sequence does not transfer to an
7611 import stub for the target function as this could clobber the
7612 return value saved at SP-24. This would also apply to the
7613 32-bit linux port if the multi-space model is implemented. */
7614 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7615 && !(flag_pic && TREE_PUBLIC (function))
7616 && (TARGET_GAS || last_address < 262132))
7617 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7618 && ((targetm.have_named_sections
7619 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7620 /* The GNU 64-bit linker has rather poor stub management.
7621 So, we use a long branch from thunks that aren't in
7622 the same section as the target function. */
7623 && ((!TARGET_64BIT
7624 && (DECL_SECTION_NAME (thunk_fndecl)
7625 != DECL_SECTION_NAME (function)))
7626 || ((DECL_SECTION_NAME (thunk_fndecl)
7627 == DECL_SECTION_NAME (function))
7628 && last_address < 262132)))
7629 || (!targetm.have_named_sections && last_address < 262132))))
7631 if (!val_14)
7632 output_asm_insn ("addil L'%2,%%r26", xoperands);
7634 output_asm_insn ("b %0", xoperands);
7636 if (val_14)
7638 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7639 nbytes += 8;
7641 else
7643 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7644 nbytes += 12;
7647 else if (TARGET_64BIT)
7649 /* We only have one call-clobbered scratch register, so we can't
7650 make use of the delay slot if delta doesn't fit in 14 bits. */
7651 if (!val_14)
7653 output_asm_insn ("addil L'%2,%%r26", xoperands);
7654 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7657 output_asm_insn ("b,l .+8,%%r1", xoperands);
7659 if (TARGET_GAS)
7661 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7662 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7664 else
7666 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7667 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7670 if (val_14)
7672 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7673 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7674 nbytes += 20;
7676 else
7678 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7679 nbytes += 24;
7682 else if (TARGET_PORTABLE_RUNTIME)
7684 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7685 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7687 if (!val_14)
7688 output_asm_insn ("addil L'%2,%%r26", xoperands);
7690 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7692 if (val_14)
7694 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7695 nbytes += 16;
7697 else
7699 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7700 nbytes += 20;
7703 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7705 /* The function is accessible from outside this module. The only
7706 way to avoid an import stub between the thunk and function is to
7707 call the function directly with an indirect sequence similar to
7708 that used by $$dyncall. This is possible because $$dyncall acts
7709 as the import stub in an indirect call. */
7710 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7711 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7712 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7713 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7714 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7715 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7716 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7717 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7718 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7720 if (!val_14)
7722 output_asm_insn ("addil L'%2,%%r26", xoperands);
7723 nbytes += 4;
7726 if (TARGET_PA_20)
7728 output_asm_insn ("bve (%%r22)", xoperands);
7729 nbytes += 36;
7731 else if (TARGET_NO_SPACE_REGS)
7733 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7734 nbytes += 36;
7736 else
7738 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7739 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7740 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7741 nbytes += 44;
7744 if (val_14)
7745 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7746 else
7747 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7749 else if (flag_pic)
7751 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7753 if (TARGET_SOM || !TARGET_GAS)
7755 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7756 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7758 else
7760 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7761 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7764 if (!val_14)
7765 output_asm_insn ("addil L'%2,%%r26", xoperands);
7767 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7769 if (val_14)
7771 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7772 nbytes += 20;
7774 else
7776 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7777 nbytes += 24;
7780 else
7782 if (!val_14)
7783 output_asm_insn ("addil L'%2,%%r26", xoperands);
7785 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7786 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7788 if (val_14)
7790 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7791 nbytes += 12;
7793 else
7795 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7796 nbytes += 16;
7800 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7802 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7804 switch_to_section (data_section);
7805 output_asm_insn (".align 4", xoperands);
7806 ASM_OUTPUT_LABEL (file, label);
7807 output_asm_insn (".word P'%0", xoperands);
7809 else if (TARGET_SOM && TARGET_GAS)
7810 in_section = NULL;
7812 current_thunk_number++;
7813 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7814 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7815 last_address += nbytes;
7816 update_total_code_bytes (nbytes);
7819 /* Only direct calls to static functions are allowed to be sibling (tail)
7820 call optimized.
7822 This restriction is necessary because some linker generated stubs will
7823 store return pointers into rp' in some cases which might clobber a
7824 live value already in rp'.
7826 In a sibcall the current function and the target function share stack
7827 space. Thus if the path to the current function and the path to the
7828 target function save a value in rp', they save the value into the
7829 same stack slot, which has undesirable consequences.
7831 Because of the deferred binding nature of shared libraries any function
7832 with external scope could be in a different load module and thus require
7833 rp' to be saved when calling that function. So sibcall optimizations
7834 can only be safe for static function.
7836 Note that GCC never needs return value relocations, so we don't have to
7837 worry about static calls with return value relocations (which require
7838 saving rp').
7840 It is safe to perform a sibcall optimization when the target function
7841 will never return. */
7842 static bool
7843 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7845 if (TARGET_PORTABLE_RUNTIME)
7846 return false;
7848 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7849 single subspace mode and the call is not indirect. As far as I know,
7850 there is no operating system support for the multiple subspace mode.
7851 It might be possible to support indirect calls if we didn't use
7852 $$dyncall (see the indirect sequence generated in output_call). */
7853 if (TARGET_ELF32)
7854 return (decl != NULL_TREE);
7856 /* Sibcalls are not ok because the arg pointer register is not a fixed
7857 register. This prevents the sibcall optimization from occurring. In
7858 addition, there are problems with stub placement using GNU ld. This
7859 is because a normal sibcall branch uses a 17-bit relocation while
7860 a regular call branch uses a 22-bit relocation. As a result, more
7861 care needs to be taken in the placement of long-branch stubs. */
7862 if (TARGET_64BIT)
7863 return false;
7865 /* Sibcalls are only ok within a translation unit. */
7866 return (decl && !TREE_PUBLIC (decl));
7869 /* ??? Addition is not commutative on the PA due to the weird implicit
7870 space register selection rules for memory addresses. Therefore, we
7871 don't consider a + b == b + a, as this might be inside a MEM. */
7872 static bool
7873 pa_commutative_p (rtx x, int outer_code)
7875 return (COMMUTATIVE_P (x)
7876 && (TARGET_NO_SPACE_REGS
7877 || (outer_code != UNKNOWN && outer_code != MEM)
7878 || GET_CODE (x) != PLUS));
7881 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7882 use in fmpyadd instructions. */
7884 fmpyaddoperands (rtx *operands)
7886 enum machine_mode mode = GET_MODE (operands[0]);
7888 /* Must be a floating point mode. */
7889 if (mode != SFmode && mode != DFmode)
7890 return 0;
7892 /* All modes must be the same. */
7893 if (! (mode == GET_MODE (operands[1])
7894 && mode == GET_MODE (operands[2])
7895 && mode == GET_MODE (operands[3])
7896 && mode == GET_MODE (operands[4])
7897 && mode == GET_MODE (operands[5])))
7898 return 0;
7900 /* All operands must be registers. */
7901 if (! (GET_CODE (operands[1]) == REG
7902 && GET_CODE (operands[2]) == REG
7903 && GET_CODE (operands[3]) == REG
7904 && GET_CODE (operands[4]) == REG
7905 && GET_CODE (operands[5]) == REG))
7906 return 0;
7908 /* Only 2 real operands to the addition. One of the input operands must
7909 be the same as the output operand. */
7910 if (! rtx_equal_p (operands[3], operands[4])
7911 && ! rtx_equal_p (operands[3], operands[5]))
7912 return 0;
7914 /* Inout operand of add cannot conflict with any operands from multiply. */
7915 if (rtx_equal_p (operands[3], operands[0])
7916 || rtx_equal_p (operands[3], operands[1])
7917 || rtx_equal_p (operands[3], operands[2]))
7918 return 0;
7920 /* multiply cannot feed into addition operands. */
7921 if (rtx_equal_p (operands[4], operands[0])
7922 || rtx_equal_p (operands[5], operands[0]))
7923 return 0;
7925 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7926 if (mode == SFmode
7927 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7928 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7929 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7930 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7931 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7932 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7933 return 0;
7935 /* Passed. Operands are suitable for fmpyadd. */
7936 return 1;
7939 #if !defined(USE_COLLECT2)
7940 static void
7941 pa_asm_out_constructor (rtx symbol, int priority)
7943 if (!function_label_operand (symbol, VOIDmode))
7944 hppa_encode_label (symbol);
7946 #ifdef CTORS_SECTION_ASM_OP
7947 default_ctor_section_asm_out_constructor (symbol, priority);
7948 #else
7949 # ifdef TARGET_ASM_NAMED_SECTION
7950 default_named_section_asm_out_constructor (symbol, priority);
7951 # else
7952 default_stabs_asm_out_constructor (symbol, priority);
7953 # endif
7954 #endif
7957 static void
7958 pa_asm_out_destructor (rtx symbol, int priority)
7960 if (!function_label_operand (symbol, VOIDmode))
7961 hppa_encode_label (symbol);
7963 #ifdef DTORS_SECTION_ASM_OP
7964 default_dtor_section_asm_out_destructor (symbol, priority);
7965 #else
7966 # ifdef TARGET_ASM_NAMED_SECTION
7967 default_named_section_asm_out_destructor (symbol, priority);
7968 # else
7969 default_stabs_asm_out_destructor (symbol, priority);
7970 # endif
7971 #endif
7973 #endif
7975 /* This function places uninitialized global data in the bss section.
7976 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
7977 function on the SOM port to prevent uninitialized global data from
7978 being placed in the data section. */
7980 void
7981 pa_asm_output_aligned_bss (FILE *stream,
7982 const char *name,
7983 unsigned HOST_WIDE_INT size,
7984 unsigned int align)
7986 switch_to_section (bss_section);
7987 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7989 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7990 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7991 #endif
7993 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7994 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7995 #endif
7997 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7998 ASM_OUTPUT_LABEL (stream, name);
7999 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8002 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8003 that doesn't allow the alignment of global common storage to be directly
8004 specified. The SOM linker aligns common storage based on the rounded
8005 value of the NUM_BYTES parameter in the .comm directive. It's not
8006 possible to use the .align directive as it doesn't affect the alignment
8007 of the label associated with a .comm directive. */
8009 void
8010 pa_asm_output_aligned_common (FILE *stream,
8011 const char *name,
8012 unsigned HOST_WIDE_INT size,
8013 unsigned int align)
8015 unsigned int max_common_align;
8017 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8018 if (align > max_common_align)
8020 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8021 "for global common data. Using %u",
8022 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8023 align = max_common_align;
8026 switch_to_section (bss_section);
8028 assemble_name (stream, name);
8029 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8030 MAX (size, align / BITS_PER_UNIT));
8033 /* We can't use .comm for local common storage as the SOM linker effectively
8034 treats the symbol as universal and uses the same storage for local symbols
8035 with the same name in different object files. The .block directive
8036 reserves an uninitialized block of storage. However, it's not common
8037 storage. Fortunately, GCC never requests common storage with the same
8038 name in any given translation unit. */
8040 void
8041 pa_asm_output_aligned_local (FILE *stream,
8042 const char *name,
8043 unsigned HOST_WIDE_INT size,
8044 unsigned int align)
8046 switch_to_section (bss_section);
8047 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8049 #ifdef LOCAL_ASM_OP
8050 fprintf (stream, "%s", LOCAL_ASM_OP);
8051 assemble_name (stream, name);
8052 fprintf (stream, "\n");
8053 #endif
8055 ASM_OUTPUT_LABEL (stream, name);
8056 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8059 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8060 use in fmpysub instructions. */
8062 fmpysuboperands (rtx *operands)
8064 enum machine_mode mode = GET_MODE (operands[0]);
8066 /* Must be a floating point mode. */
8067 if (mode != SFmode && mode != DFmode)
8068 return 0;
8070 /* All modes must be the same. */
8071 if (! (mode == GET_MODE (operands[1])
8072 && mode == GET_MODE (operands[2])
8073 && mode == GET_MODE (operands[3])
8074 && mode == GET_MODE (operands[4])
8075 && mode == GET_MODE (operands[5])))
8076 return 0;
8078 /* All operands must be registers. */
8079 if (! (GET_CODE (operands[1]) == REG
8080 && GET_CODE (operands[2]) == REG
8081 && GET_CODE (operands[3]) == REG
8082 && GET_CODE (operands[4]) == REG
8083 && GET_CODE (operands[5]) == REG))
8084 return 0;
8086 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8087 operation, so operands[4] must be the same as operand[3]. */
8088 if (! rtx_equal_p (operands[3], operands[4]))
8089 return 0;
8091 /* multiply cannot feed into subtraction. */
8092 if (rtx_equal_p (operands[5], operands[0]))
8093 return 0;
8095 /* Inout operand of sub cannot conflict with any operands from multiply. */
8096 if (rtx_equal_p (operands[3], operands[0])
8097 || rtx_equal_p (operands[3], operands[1])
8098 || rtx_equal_p (operands[3], operands[2]))
8099 return 0;
8101 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8102 if (mode == SFmode
8103 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8104 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8105 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8106 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8107 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8108 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8109 return 0;
8111 /* Passed. Operands are suitable for fmpysub. */
8112 return 1;
8115 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8116 constants for shadd instructions. */
8118 shadd_constant_p (int val)
8120 if (val == 2 || val == 4 || val == 8)
8121 return 1;
8122 else
8123 return 0;
8126 /* Return 1 if OP is valid as a base or index register in a
8127 REG+REG address. */
8130 borx_reg_operand (rtx op, enum machine_mode mode)
8132 if (GET_CODE (op) != REG)
8133 return 0;
8135 /* We must reject virtual registers as the only expressions that
8136 can be instantiated are REG and REG+CONST. */
8137 if (op == virtual_incoming_args_rtx
8138 || op == virtual_stack_vars_rtx
8139 || op == virtual_stack_dynamic_rtx
8140 || op == virtual_outgoing_args_rtx
8141 || op == virtual_cfa_rtx)
8142 return 0;
8144 /* While it's always safe to index off the frame pointer, it's not
8145 profitable to do so when the frame pointer is being eliminated. */
8146 if (!reload_completed
8147 && flag_omit_frame_pointer
8148 && !current_function_calls_alloca
8149 && op == frame_pointer_rtx)
8150 return 0;
8152 return register_operand (op, mode);
8155 /* Return 1 if this operand is anything other than a hard register. */
8158 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8160 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8163 /* Return 1 if INSN branches forward. Should be using insn_addresses
8164 to avoid walking through all the insns... */
8165 static int
8166 forward_branch_p (rtx insn)
8168 rtx label = JUMP_LABEL (insn);
8170 while (insn)
8172 if (insn == label)
8173 break;
8174 else
8175 insn = NEXT_INSN (insn);
8178 return (insn == label);
8181 /* Return 1 if OP is an equality comparison, else return 0. */
8183 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8185 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8188 /* Return 1 if INSN is in the delay slot of a call instruction. */
8190 jump_in_call_delay (rtx insn)
8193 if (GET_CODE (insn) != JUMP_INSN)
8194 return 0;
8196 if (PREV_INSN (insn)
8197 && PREV_INSN (PREV_INSN (insn))
8198 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8200 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8202 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8203 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8206 else
8207 return 0;
8210 /* Output an unconditional move and branch insn. */
8212 const char *
8213 output_parallel_movb (rtx *operands, int length)
8215 /* These are the cases in which we win. */
8216 if (length == 4)
8217 return "mov%I1b,tr %1,%0,%2";
8219 /* None of these cases wins, but they don't lose either. */
8220 if (dbr_sequence_length () == 0)
8222 /* Nothing in the delay slot, fake it by putting the combined
8223 insn (the copy or add) in the delay slot of a bl. */
8224 if (GET_CODE (operands[1]) == CONST_INT)
8225 return "b %2\n\tldi %1,%0";
8226 else
8227 return "b %2\n\tcopy %1,%0";
8229 else
8231 /* Something in the delay slot, but we've got a long branch. */
8232 if (GET_CODE (operands[1]) == CONST_INT)
8233 return "ldi %1,%0\n\tb %2";
8234 else
8235 return "copy %1,%0\n\tb %2";
8239 /* Output an unconditional add and branch insn. */
8241 const char *
8242 output_parallel_addb (rtx *operands, int length)
8244 /* To make life easy we want operand0 to be the shared input/output
8245 operand and operand1 to be the readonly operand. */
8246 if (operands[0] == operands[1])
8247 operands[1] = operands[2];
8249 /* These are the cases in which we win. */
8250 if (length == 4)
8251 return "add%I1b,tr %1,%0,%3";
8253 /* None of these cases win, but they don't lose either. */
8254 if (dbr_sequence_length () == 0)
8256 /* Nothing in the delay slot, fake it by putting the combined
8257 insn (the copy or add) in the delay slot of a bl. */
8258 return "b %3\n\tadd%I1 %1,%0,%0";
8260 else
8262 /* Something in the delay slot, but we've got a long branch. */
8263 return "add%I1 %1,%0,%0\n\tb %3";
8267 /* Return nonzero if INSN (a jump insn) immediately follows a call
8268 to a named function. This is used to avoid filling the delay slot
8269 of the jump since it can usually be eliminated by modifying RP in
8270 the delay slot of the call. */
8273 following_call (rtx insn)
8275 if (! TARGET_JUMP_IN_DELAY)
8276 return 0;
8278 /* Find the previous real insn, skipping NOTEs. */
8279 insn = PREV_INSN (insn);
8280 while (insn && GET_CODE (insn) == NOTE)
8281 insn = PREV_INSN (insn);
8283 /* Check for CALL_INSNs and millicode calls. */
8284 if (insn
8285 && ((GET_CODE (insn) == CALL_INSN
8286 && get_attr_type (insn) != TYPE_DYNCALL)
8287 || (GET_CODE (insn) == INSN
8288 && GET_CODE (PATTERN (insn)) != SEQUENCE
8289 && GET_CODE (PATTERN (insn)) != USE
8290 && GET_CODE (PATTERN (insn)) != CLOBBER
8291 && get_attr_type (insn) == TYPE_MILLI)))
8292 return 1;
8294 return 0;
8297 /* We use this hook to perform a PA specific optimization which is difficult
8298 to do in earlier passes.
8300 We want the delay slots of branches within jump tables to be filled.
8301 None of the compiler passes at the moment even has the notion that a
8302 PA jump table doesn't contain addresses, but instead contains actual
8303 instructions!
8305 Because we actually jump into the table, the addresses of each entry
8306 must stay constant in relation to the beginning of the table (which
8307 itself must stay constant relative to the instruction to jump into
8308 it). I don't believe we can guarantee earlier passes of the compiler
8309 will adhere to those rules.
8311 So, late in the compilation process we find all the jump tables, and
8312 expand them into real code -- e.g. each entry in the jump table vector
8313 will get an appropriate label followed by a jump to the final target.
8315 Reorg and the final jump pass can then optimize these branches and
8316 fill their delay slots. We end up with smaller, more efficient code.
8318 The jump instructions within the table are special; we must be able
8319 to identify them during assembly output (if the jumps don't get filled
8320 we need to emit a nop rather than nullifying the delay slot)). We
8321 identify jumps in switch tables by using insns with the attribute
8322 type TYPE_BTABLE_BRANCH.
8324 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8325 insns. This serves two purposes, first it prevents jump.c from
8326 noticing that the last N entries in the table jump to the instruction
8327 immediately after the table and deleting the jumps. Second, those
8328 insns mark where we should emit .begin_brtab and .end_brtab directives
8329 when using GAS (allows for better link time optimizations). */
8331 static void
8332 pa_reorg (void)
8334 rtx insn;
8336 remove_useless_addtr_insns (1);
8338 if (pa_cpu < PROCESSOR_8000)
8339 pa_combine_instructions ();
8342 /* This is fairly cheap, so always run it if optimizing. */
8343 if (optimize > 0 && !TARGET_BIG_SWITCH)
8345 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8346 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8348 rtx pattern, tmp, location, label;
8349 unsigned int length, i;
8351 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8352 if (GET_CODE (insn) != JUMP_INSN
8353 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8354 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8355 continue;
8357 /* Emit marker for the beginning of the branch table. */
8358 emit_insn_before (gen_begin_brtab (), insn);
8360 pattern = PATTERN (insn);
8361 location = PREV_INSN (insn);
8362 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8364 for (i = 0; i < length; i++)
8366 /* Emit a label before each jump to keep jump.c from
8367 removing this code. */
8368 tmp = gen_label_rtx ();
8369 LABEL_NUSES (tmp) = 1;
8370 emit_label_after (tmp, location);
8371 location = NEXT_INSN (location);
8373 if (GET_CODE (pattern) == ADDR_VEC)
8374 label = XEXP (XVECEXP (pattern, 0, i), 0);
8375 else
8376 label = XEXP (XVECEXP (pattern, 1, i), 0);
8378 tmp = gen_short_jump (label);
8380 /* Emit the jump itself. */
8381 tmp = emit_jump_insn_after (tmp, location);
8382 JUMP_LABEL (tmp) = label;
8383 LABEL_NUSES (label)++;
8384 location = NEXT_INSN (location);
8386 /* Emit a BARRIER after the jump. */
8387 emit_barrier_after (location);
8388 location = NEXT_INSN (location);
8391 /* Emit marker for the end of the branch table. */
8392 emit_insn_before (gen_end_brtab (), location);
8393 location = NEXT_INSN (location);
8394 emit_barrier_after (location);
8396 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8397 delete_insn (insn);
8400 else
8402 /* Still need brtab marker insns. FIXME: the presence of these
8403 markers disables output of the branch table to readonly memory,
8404 and any alignment directives that might be needed. Possibly,
8405 the begin_brtab insn should be output before the label for the
8406 table. This doesn't matter at the moment since the tables are
8407 always output in the text section. */
8408 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8410 /* Find an ADDR_VEC insn. */
8411 if (GET_CODE (insn) != JUMP_INSN
8412 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8413 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8414 continue;
8416 /* Now generate markers for the beginning and end of the
8417 branch table. */
8418 emit_insn_before (gen_begin_brtab (), insn);
8419 emit_insn_after (gen_end_brtab (), insn);
8424 /* The PA has a number of odd instructions which can perform multiple
8425 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8426 it may be profitable to combine two instructions into one instruction
8427 with two outputs. It's not profitable PA2.0 machines because the
8428 two outputs would take two slots in the reorder buffers.
8430 This routine finds instructions which can be combined and combines
8431 them. We only support some of the potential combinations, and we
8432 only try common ways to find suitable instructions.
8434 * addb can add two registers or a register and a small integer
8435 and jump to a nearby (+-8k) location. Normally the jump to the
8436 nearby location is conditional on the result of the add, but by
8437 using the "true" condition we can make the jump unconditional.
8438 Thus addb can perform two independent operations in one insn.
8440 * movb is similar to addb in that it can perform a reg->reg
8441 or small immediate->reg copy and jump to a nearby (+-8k location).
8443 * fmpyadd and fmpysub can perform a FP multiply and either an
8444 FP add or FP sub if the operands of the multiply and add/sub are
8445 independent (there are other minor restrictions). Note both
8446 the fmpy and fadd/fsub can in theory move to better spots according
8447 to data dependencies, but for now we require the fmpy stay at a
8448 fixed location.
8450 * Many of the memory operations can perform pre & post updates
8451 of index registers. GCC's pre/post increment/decrement addressing
8452 is far too simple to take advantage of all the possibilities. This
8453 pass may not be suitable since those insns may not be independent.
8455 * comclr can compare two ints or an int and a register, nullify
8456 the following instruction and zero some other register. This
8457 is more difficult to use as it's harder to find an insn which
8458 will generate a comclr than finding something like an unconditional
8459 branch. (conditional moves & long branches create comclr insns).
8461 * Most arithmetic operations can conditionally skip the next
8462 instruction. They can be viewed as "perform this operation
8463 and conditionally jump to this nearby location" (where nearby
8464 is an insns away). These are difficult to use due to the
8465 branch length restrictions. */
8467 static void
8468 pa_combine_instructions (void)
8470 rtx anchor, new;
8472 /* This can get expensive since the basic algorithm is on the
8473 order of O(n^2) (or worse). Only do it for -O2 or higher
8474 levels of optimization. */
8475 if (optimize < 2)
8476 return;
8478 /* Walk down the list of insns looking for "anchor" insns which
8479 may be combined with "floating" insns. As the name implies,
8480 "anchor" instructions don't move, while "floating" insns may
8481 move around. */
8482 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8483 new = make_insn_raw (new);
8485 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8487 enum attr_pa_combine_type anchor_attr;
8488 enum attr_pa_combine_type floater_attr;
8490 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8491 Also ignore any special USE insns. */
8492 if ((GET_CODE (anchor) != INSN
8493 && GET_CODE (anchor) != JUMP_INSN
8494 && GET_CODE (anchor) != CALL_INSN)
8495 || GET_CODE (PATTERN (anchor)) == USE
8496 || GET_CODE (PATTERN (anchor)) == CLOBBER
8497 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8498 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8499 continue;
8501 anchor_attr = get_attr_pa_combine_type (anchor);
8502 /* See if anchor is an insn suitable for combination. */
8503 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8504 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8505 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8506 && ! forward_branch_p (anchor)))
8508 rtx floater;
8510 for (floater = PREV_INSN (anchor);
8511 floater;
8512 floater = PREV_INSN (floater))
8514 if (GET_CODE (floater) == NOTE
8515 || (GET_CODE (floater) == INSN
8516 && (GET_CODE (PATTERN (floater)) == USE
8517 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8518 continue;
8520 /* Anything except a regular INSN will stop our search. */
8521 if (GET_CODE (floater) != INSN
8522 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8523 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8525 floater = NULL_RTX;
8526 break;
8529 /* See if FLOATER is suitable for combination with the
8530 anchor. */
8531 floater_attr = get_attr_pa_combine_type (floater);
8532 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8533 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8534 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8535 && floater_attr == PA_COMBINE_TYPE_FMPY))
8537 /* If ANCHOR and FLOATER can be combined, then we're
8538 done with this pass. */
8539 if (pa_can_combine_p (new, anchor, floater, 0,
8540 SET_DEST (PATTERN (floater)),
8541 XEXP (SET_SRC (PATTERN (floater)), 0),
8542 XEXP (SET_SRC (PATTERN (floater)), 1)))
8543 break;
8546 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8547 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8549 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8551 if (pa_can_combine_p (new, anchor, floater, 0,
8552 SET_DEST (PATTERN (floater)),
8553 XEXP (SET_SRC (PATTERN (floater)), 0),
8554 XEXP (SET_SRC (PATTERN (floater)), 1)))
8555 break;
8557 else
8559 if (pa_can_combine_p (new, anchor, floater, 0,
8560 SET_DEST (PATTERN (floater)),
8561 SET_SRC (PATTERN (floater)),
8562 SET_SRC (PATTERN (floater))))
8563 break;
8568 /* If we didn't find anything on the backwards scan try forwards. */
8569 if (!floater
8570 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8571 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8573 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8575 if (GET_CODE (floater) == NOTE
8576 || (GET_CODE (floater) == INSN
8577 && (GET_CODE (PATTERN (floater)) == USE
8578 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8580 continue;
8582 /* Anything except a regular INSN will stop our search. */
8583 if (GET_CODE (floater) != INSN
8584 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8585 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8587 floater = NULL_RTX;
8588 break;
8591 /* See if FLOATER is suitable for combination with the
8592 anchor. */
8593 floater_attr = get_attr_pa_combine_type (floater);
8594 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8595 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8596 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8597 && floater_attr == PA_COMBINE_TYPE_FMPY))
8599 /* If ANCHOR and FLOATER can be combined, then we're
8600 done with this pass. */
8601 if (pa_can_combine_p (new, anchor, floater, 1,
8602 SET_DEST (PATTERN (floater)),
8603 XEXP (SET_SRC (PATTERN (floater)),
8605 XEXP (SET_SRC (PATTERN (floater)),
8606 1)))
8607 break;
8612 /* FLOATER will be nonzero if we found a suitable floating
8613 insn for combination with ANCHOR. */
8614 if (floater
8615 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8616 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8618 /* Emit the new instruction and delete the old anchor. */
8619 emit_insn_before (gen_rtx_PARALLEL
8620 (VOIDmode,
8621 gen_rtvec (2, PATTERN (anchor),
8622 PATTERN (floater))),
8623 anchor);
8625 PUT_CODE (anchor, NOTE);
8626 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8627 NOTE_SOURCE_FILE (anchor) = 0;
8629 /* Emit a special USE insn for FLOATER, then delete
8630 the floating insn. */
8631 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8632 delete_insn (floater);
8634 continue;
8636 else if (floater
8637 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8639 rtx temp;
8640 /* Emit the new_jump instruction and delete the old anchor. */
8641 temp
8642 = emit_jump_insn_before (gen_rtx_PARALLEL
8643 (VOIDmode,
8644 gen_rtvec (2, PATTERN (anchor),
8645 PATTERN (floater))),
8646 anchor);
8648 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8649 PUT_CODE (anchor, NOTE);
8650 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8651 NOTE_SOURCE_FILE (anchor) = 0;
8653 /* Emit a special USE insn for FLOATER, then delete
8654 the floating insn. */
8655 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8656 delete_insn (floater);
8657 continue;
8663 static int
8664 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8665 rtx src1, rtx src2)
8667 int insn_code_number;
8668 rtx start, end;
8670 /* Create a PARALLEL with the patterns of ANCHOR and
8671 FLOATER, try to recognize it, then test constraints
8672 for the resulting pattern.
8674 If the pattern doesn't match or the constraints
8675 aren't met keep searching for a suitable floater
8676 insn. */
8677 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8678 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8679 INSN_CODE (new) = -1;
8680 insn_code_number = recog_memoized (new);
8681 if (insn_code_number < 0
8682 || (extract_insn (new), ! constrain_operands (1)))
8683 return 0;
8685 if (reversed)
8687 start = anchor;
8688 end = floater;
8690 else
8692 start = floater;
8693 end = anchor;
8696 /* There's up to three operands to consider. One
8697 output and two inputs.
8699 The output must not be used between FLOATER & ANCHOR
8700 exclusive. The inputs must not be set between
8701 FLOATER and ANCHOR exclusive. */
8703 if (reg_used_between_p (dest, start, end))
8704 return 0;
8706 if (reg_set_between_p (src1, start, end))
8707 return 0;
8709 if (reg_set_between_p (src2, start, end))
8710 return 0;
8712 /* If we get here, then everything is good. */
8713 return 1;
8716 /* Return nonzero if references for INSN are delayed.
8718 Millicode insns are actually function calls with some special
8719 constraints on arguments and register usage.
8721 Millicode calls always expect their arguments in the integer argument
8722 registers, and always return their result in %r29 (ret1). They
8723 are expected to clobber their arguments, %r1, %r29, and the return
8724 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8726 This function tells reorg that the references to arguments and
8727 millicode calls do not appear to happen until after the millicode call.
8728 This allows reorg to put insns which set the argument registers into the
8729 delay slot of the millicode call -- thus they act more like traditional
8730 CALL_INSNs.
8732 Note we cannot consider side effects of the insn to be delayed because
8733 the branch and link insn will clobber the return pointer. If we happened
8734 to use the return pointer in the delay slot of the call, then we lose.
8736 get_attr_type will try to recognize the given insn, so make sure to
8737 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8738 in particular. */
8740 insn_refs_are_delayed (rtx insn)
8742 return ((GET_CODE (insn) == INSN
8743 && GET_CODE (PATTERN (insn)) != SEQUENCE
8744 && GET_CODE (PATTERN (insn)) != USE
8745 && GET_CODE (PATTERN (insn)) != CLOBBER
8746 && get_attr_type (insn) == TYPE_MILLI));
8749 /* On the HP-PA the value is found in register(s) 28(-29), unless
8750 the mode is SF or DF. Then the value is returned in fr4 (32).
8752 This must perform the same promotions as PROMOTE_MODE, else
8753 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8755 Small structures must be returned in a PARALLEL on PA64 in order
8756 to match the HP Compiler ABI. */
8759 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8761 enum machine_mode valmode;
8763 if (AGGREGATE_TYPE_P (valtype))
8765 if (TARGET_64BIT)
8767 /* Aggregates with a size less than or equal to 128 bits are
8768 returned in GR 28(-29). They are left justified. The pad
8769 bits are undefined. Larger aggregates are returned in
8770 memory. */
8771 rtx loc[2];
8772 int i, offset = 0;
8773 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8775 for (i = 0; i < ub; i++)
8777 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8778 gen_rtx_REG (DImode, 28 + i),
8779 GEN_INT (offset));
8780 offset += 8;
8783 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8785 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
8787 /* Aggregates 5 to 8 bytes in size are returned in general
8788 registers r28-r29 in the same manner as other non
8789 floating-point objects. The data is right-justified and
8790 zero-extended to 64 bits. This is opposite to the normal
8791 justification used on big endian targets and requires
8792 special treatment. */
8793 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8794 gen_rtx_REG (DImode, 28), const0_rtx);
8795 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8799 if ((INTEGRAL_TYPE_P (valtype)
8800 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8801 || POINTER_TYPE_P (valtype))
8802 valmode = word_mode;
8803 else
8804 valmode = TYPE_MODE (valtype);
8806 if (TREE_CODE (valtype) == REAL_TYPE
8807 && !AGGREGATE_TYPE_P (valtype)
8808 && TYPE_MODE (valtype) != TFmode
8809 && !TARGET_SOFT_FLOAT)
8810 return gen_rtx_REG (valmode, 32);
8812 return gen_rtx_REG (valmode, 28);
8815 /* Return the location of a parameter that is passed in a register or NULL
8816 if the parameter has any component that is passed in memory.
8818 This is new code and will be pushed to into the net sources after
8819 further testing.
8821 ??? We might want to restructure this so that it looks more like other
8822 ports. */
8824 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8825 int named ATTRIBUTE_UNUSED)
8827 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8828 int alignment = 0;
8829 int arg_size;
8830 int fpr_reg_base;
8831 int gpr_reg_base;
8832 rtx retval;
8834 if (mode == VOIDmode)
8835 return NULL_RTX;
8837 arg_size = FUNCTION_ARG_SIZE (mode, type);
8839 /* If this arg would be passed partially or totally on the stack, then
8840 this routine should return zero. pa_arg_partial_bytes will
8841 handle arguments which are split between regs and stack slots if
8842 the ABI mandates split arguments. */
8843 if (! TARGET_64BIT)
8845 /* The 32-bit ABI does not split arguments. */
8846 if (cum->words + arg_size > max_arg_words)
8847 return NULL_RTX;
8849 else
8851 if (arg_size > 1)
8852 alignment = cum->words & 1;
8853 if (cum->words + alignment >= max_arg_words)
8854 return NULL_RTX;
8857 /* The 32bit ABIs and the 64bit ABIs are rather different,
8858 particularly in their handling of FP registers. We might
8859 be able to cleverly share code between them, but I'm not
8860 going to bother in the hope that splitting them up results
8861 in code that is more easily understood. */
8863 if (TARGET_64BIT)
8865 /* Advance the base registers to their current locations.
8867 Remember, gprs grow towards smaller register numbers while
8868 fprs grow to higher register numbers. Also remember that
8869 although FP regs are 32-bit addressable, we pretend that
8870 the registers are 64-bits wide. */
8871 gpr_reg_base = 26 - cum->words;
8872 fpr_reg_base = 32 + cum->words;
8874 /* Arguments wider than one word and small aggregates need special
8875 treatment. */
8876 if (arg_size > 1
8877 || mode == BLKmode
8878 || (type && AGGREGATE_TYPE_P (type)))
8880 /* Double-extended precision (80-bit), quad-precision (128-bit)
8881 and aggregates including complex numbers are aligned on
8882 128-bit boundaries. The first eight 64-bit argument slots
8883 are associated one-to-one, with general registers r26
8884 through r19, and also with floating-point registers fr4
8885 through fr11. Arguments larger than one word are always
8886 passed in general registers.
8888 Using a PARALLEL with a word mode register results in left
8889 justified data on a big-endian target. */
8891 rtx loc[8];
8892 int i, offset = 0, ub = arg_size;
8894 /* Align the base register. */
8895 gpr_reg_base -= alignment;
8897 ub = MIN (ub, max_arg_words - cum->words - alignment);
8898 for (i = 0; i < ub; i++)
8900 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8901 gen_rtx_REG (DImode, gpr_reg_base),
8902 GEN_INT (offset));
8903 gpr_reg_base -= 1;
8904 offset += 8;
8907 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8910 else
8912 /* If the argument is larger than a word, then we know precisely
8913 which registers we must use. */
8914 if (arg_size > 1)
8916 if (cum->words)
8918 gpr_reg_base = 23;
8919 fpr_reg_base = 38;
8921 else
8923 gpr_reg_base = 25;
8924 fpr_reg_base = 34;
8927 /* Structures 5 to 8 bytes in size are passed in the general
8928 registers in the same manner as other non floating-point
8929 objects. The data is right-justified and zero-extended
8930 to 64 bits. This is opposite to the normal justification
8931 used on big endian targets and requires special treatment.
8932 We now define BLOCK_REG_PADDING to pad these objects. */
8933 if (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
8935 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8936 gen_rtx_REG (DImode, gpr_reg_base),
8937 const0_rtx);
8938 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8941 else
8943 /* We have a single word (32 bits). A simple computation
8944 will get us the register #s we need. */
8945 gpr_reg_base = 26 - cum->words;
8946 fpr_reg_base = 32 + 2 * cum->words;
8950 /* Determine if the argument needs to be passed in both general and
8951 floating point registers. */
8952 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8953 /* If we are doing soft-float with portable runtime, then there
8954 is no need to worry about FP regs. */
8955 && !TARGET_SOFT_FLOAT
8956 /* The parameter must be some kind of float, else we can just
8957 pass it in integer registers. */
8958 && FLOAT_MODE_P (mode)
8959 /* The target function must not have a prototype. */
8960 && cum->nargs_prototype <= 0
8961 /* libcalls do not need to pass items in both FP and general
8962 registers. */
8963 && type != NULL_TREE
8964 /* All this hair applies to "outgoing" args only. This includes
8965 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8966 && !cum->incoming)
8967 /* Also pass outgoing floating arguments in both registers in indirect
8968 calls with the 32 bit ABI and the HP assembler since there is no
8969 way to the specify argument locations in static functions. */
8970 || (!TARGET_64BIT
8971 && !TARGET_GAS
8972 && !cum->incoming
8973 && cum->indirect
8974 && FLOAT_MODE_P (mode)))
8976 retval
8977 = gen_rtx_PARALLEL
8978 (mode,
8979 gen_rtvec (2,
8980 gen_rtx_EXPR_LIST (VOIDmode,
8981 gen_rtx_REG (mode, fpr_reg_base),
8982 const0_rtx),
8983 gen_rtx_EXPR_LIST (VOIDmode,
8984 gen_rtx_REG (mode, gpr_reg_base),
8985 const0_rtx)));
8987 else
8989 /* See if we should pass this parameter in a general register. */
8990 if (TARGET_SOFT_FLOAT
8991 /* Indirect calls in the normal 32bit ABI require all arguments
8992 to be passed in general registers. */
8993 || (!TARGET_PORTABLE_RUNTIME
8994 && !TARGET_64BIT
8995 && !TARGET_ELF32
8996 && cum->indirect)
8997 /* If the parameter is not a floating point parameter, then
8998 it belongs in GPRs. */
8999 || !FLOAT_MODE_P (mode)
9000 /* Structure with single SFmode field belongs in GPR. */
9001 || (type && AGGREGATE_TYPE_P (type)))
9002 retval = gen_rtx_REG (mode, gpr_reg_base);
9003 else
9004 retval = gen_rtx_REG (mode, fpr_reg_base);
9006 return retval;
9010 /* If this arg would be passed totally in registers or totally on the stack,
9011 then this routine should return zero. */
9013 static int
9014 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9015 tree type, bool named ATTRIBUTE_UNUSED)
9017 unsigned int max_arg_words = 8;
9018 unsigned int offset = 0;
9020 if (!TARGET_64BIT)
9021 return 0;
9023 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9024 offset = 1;
9026 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9027 /* Arg fits fully into registers. */
9028 return 0;
9029 else if (cum->words + offset >= max_arg_words)
9030 /* Arg fully on the stack. */
9031 return 0;
9032 else
9033 /* Arg is split. */
9034 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9038 /* A get_unnamed_section callback for switching to the text section.
9040 This function is only used with SOM. Because we don't support
9041 named subspaces, we can only create a new subspace or switch back
9042 to the default text subspace. */
9044 static void
9045 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9047 gcc_assert (TARGET_SOM);
9048 if (TARGET_GAS)
9050 if (cfun && !cfun->machine->in_nsubspa)
9052 /* We only want to emit a .nsubspa directive once at the
9053 start of the function. */
9054 cfun->machine->in_nsubspa = 1;
9056 /* Create a new subspace for the text. This provides
9057 better stub placement and one-only functions. */
9058 if (cfun->decl
9059 && DECL_ONE_ONLY (cfun->decl)
9060 && !DECL_WEAK (cfun->decl))
9061 output_section_asm_op ("\t.SPACE $TEXT$\n"
9062 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9063 "ACCESS=44,SORT=24,COMDAT");
9064 else
9065 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9066 return;
9068 else
9070 /* There isn't a current function or the body of the current
9071 function has been completed. So, we are changing to the
9072 text section to output debugging information. Do this in
9073 the default text section. We need to forget that we are
9074 in the text section so that varasm.c will call us when
9075 text_section is selected again. */
9076 in_section = NULL;
9079 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9082 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9084 static void
9085 pa_som_asm_init_sections (void)
9087 text_section
9088 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9090 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9091 is not being generated. */
9092 som_readonly_data_section
9093 = get_unnamed_section (0, output_section_asm_op,
9094 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9096 /* When secondary definitions are not supported, SOM makes readonly
9097 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9098 the comdat flag. */
9099 som_one_only_readonly_data_section
9100 = get_unnamed_section (0, output_section_asm_op,
9101 "\t.SPACE $TEXT$\n"
9102 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9103 "ACCESS=0x2c,SORT=16,COMDAT");
9106 /* When secondary definitions are not supported, SOM makes data one-only
9107 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9108 som_one_only_data_section
9109 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
9110 "\t.SPACE $PRIVATE$\n"
9111 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9112 "ACCESS=31,SORT=24,COMDAT");
9114 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9115 which reference data within the $TEXT$ space (for example constant
9116 strings in the $LIT$ subspace).
9118 The assemblers (GAS and HP as) both have problems with handling
9119 the difference of two symbols which is the other correct way to
9120 reference constant data during PIC code generation.
9122 So, there's no way to reference constant data which is in the
9123 $TEXT$ space during PIC generation. Instead place all constant
9124 data into the $PRIVATE$ subspace (this reduces sharing, but it
9125 works correctly). */
9126 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9128 /* We must not have a reference to an external symbol defined in a
9129 shared library in a readonly section, else the SOM linker will
9130 complain.
9132 So, we force exception information into the data section. */
9133 exception_section = data_section;
9136 /* On hpux10, the linker will give an error if we have a reference
9137 in the read-only data section to a symbol defined in a shared
9138 library. Therefore, expressions that might require a reloc can
9139 not be placed in the read-only data section. */
9141 static section *
9142 pa_select_section (tree exp, int reloc,
9143 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9145 if (TREE_CODE (exp) == VAR_DECL
9146 && TREE_READONLY (exp)
9147 && !TREE_THIS_VOLATILE (exp)
9148 && DECL_INITIAL (exp)
9149 && (DECL_INITIAL (exp) == error_mark_node
9150 || TREE_CONSTANT (DECL_INITIAL (exp)))
9151 && !reloc)
9153 if (TARGET_SOM
9154 && DECL_ONE_ONLY (exp)
9155 && !DECL_WEAK (exp))
9156 return som_one_only_readonly_data_section;
9157 else
9158 return readonly_data_section;
9160 else if (CONSTANT_CLASS_P (exp) && !reloc)
9161 return readonly_data_section;
9162 else if (TARGET_SOM
9163 && TREE_CODE (exp) == VAR_DECL
9164 && DECL_ONE_ONLY (exp)
9165 && !DECL_WEAK (exp))
9166 return som_one_only_data_section;
9167 else
9168 return data_section;
9171 static void
9172 pa_globalize_label (FILE *stream, const char *name)
9174 /* We only handle DATA objects here, functions are globalized in
9175 ASM_DECLARE_FUNCTION_NAME. */
9176 if (! FUNCTION_NAME_P (name))
9178 fputs ("\t.EXPORT ", stream);
9179 assemble_name (stream, name);
9180 fputs (",DATA\n", stream);
9184 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9186 static rtx
9187 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9188 int incoming ATTRIBUTE_UNUSED)
9190 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9193 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9195 bool
9196 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9198 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9199 PA64 ABI says that objects larger than 128 bits are returned in memory.
9200 Note, int_size_in_bytes can return -1 if the size of the object is
9201 variable or larger than the maximum value that can be expressed as
9202 a HOST_WIDE_INT. It can also return zero for an empty type. The
9203 simplest way to handle variable and empty types is to pass them in
9204 memory. This avoids problems in defining the boundaries of argument
9205 slots, allocating registers, etc. */
9206 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9207 || int_size_in_bytes (type) <= 0);
9210 /* Structure to hold declaration and name of external symbols that are
9211 emitted by GCC. We generate a vector of these symbols and output them
9212 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9213 This avoids putting out names that are never really used. */
9215 typedef struct extern_symbol GTY(())
9217 tree decl;
9218 const char *name;
9219 } extern_symbol;
9221 /* Define gc'd vector type for extern_symbol. */
9222 DEF_VEC_O(extern_symbol);
9223 DEF_VEC_ALLOC_O(extern_symbol,gc);
9225 /* Vector of extern_symbol pointers. */
9226 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9228 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9229 /* Mark DECL (name NAME) as an external reference (assembler output
9230 file FILE). This saves the names to output at the end of the file
9231 if actually referenced. */
9233 void
9234 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9236 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9238 gcc_assert (file == asm_out_file);
9239 p->decl = decl;
9240 p->name = name;
9243 /* Output text required at the end of an assembler file.
9244 This includes deferred plabels and .import directives for
9245 all external symbols that were actually referenced. */
9247 static void
9248 pa_hpux_file_end (void)
9250 unsigned int i;
9251 extern_symbol *p;
9253 if (!NO_DEFERRED_PROFILE_COUNTERS)
9254 output_deferred_profile_counters ();
9256 output_deferred_plabels ();
9258 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9260 tree decl = p->decl;
9262 if (!TREE_ASM_WRITTEN (decl)
9263 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9264 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9267 VEC_free (extern_symbol, gc, extern_symbols);
9269 #endif
9271 #include "gt-pa.h"