* config/alpha/alpha.md, arm/arm.c, darwin.c, frv/frv.md,
[official-gcc.git] / gcc / config / pa / pa.c
blob59f9dfd4d32c2567d071359353cc38d880a35ce2
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
111 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
112 ATTRIBUTE_UNUSED;
113 static void pa_encode_section_info (tree, rtx, int);
114 static const char *pa_strip_name_encoding (const char *);
115 static bool pa_function_ok_for_sibcall (tree, tree);
116 static void pa_globalize_label (FILE *, const char *)
117 ATTRIBUTE_UNUSED;
118 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
119 HOST_WIDE_INT, tree);
120 #if !defined(USE_COLLECT2)
121 static void pa_asm_out_constructor (rtx, int);
122 static void pa_asm_out_destructor (rtx, int);
123 #endif
124 static void pa_init_builtins (void);
125 static rtx hppa_builtin_saveregs (void);
126 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
127 static bool pa_scalar_mode_supported_p (enum machine_mode);
128 static bool pa_commutative_p (rtx x, int outer_code);
129 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
142 #ifdef ASM_OUTPUT_EXTERNAL_REAL
143 static void pa_hpux_file_end (void);
144 #endif
145 #ifdef HPUX_LONG_DOUBLE_LIBRARY
146 static void pa_hpux_init_libfuncs (void);
147 #endif
148 static rtx pa_struct_value_rtx (tree, int);
149 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
150 tree, bool);
151 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
152 tree, bool);
153 static struct machine_function * pa_init_machine_status (void);
154 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
155 enum machine_mode,
156 secondary_reload_info *);
159 /* The following extra sections are only used for SOM. */
160 static GTY(()) section *som_readonly_data_section;
161 static GTY(()) section *som_one_only_readonly_data_section;
162 static GTY(()) section *som_one_only_data_section;
164 /* Save the operands last given to a compare for use when we
165 generate a scc or bcc insn. */
166 rtx hppa_compare_op0, hppa_compare_op1;
167 enum cmp_type hppa_branch_type;
169 /* Which cpu we are scheduling for. */
170 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
172 /* The UNIX standard to use for predefines and linking. */
173 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
175 /* Counts for the number of callee-saved general and floating point
176 registers which were saved by the current function's prologue. */
177 static int gr_saved, fr_saved;
179 static rtx find_addr_reg (rtx);
181 /* Keep track of the number of bytes we have output in the CODE subspace
182 during this compilation so we'll know when to emit inline long-calls. */
183 unsigned long total_code_bytes;
185 /* The last address of the previous function plus the number of bytes in
186 associated thunks that have been output. This is used to determine if
187 a thunk can use an IA-relative branch to reach its target function. */
188 static int last_address;
190 /* Variables to handle plabels that we discover are necessary at assembly
191 output time. They are output after the current function. */
192 struct deferred_plabel GTY(())
194 rtx internal_label;
195 rtx symbol;
197 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
198 deferred_plabels;
199 static size_t n_deferred_plabels = 0;
202 /* Initialize the GCC target structure. */
204 #undef TARGET_ASM_ALIGNED_HI_OP
205 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
208 #undef TARGET_ASM_ALIGNED_DI_OP
209 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
210 #undef TARGET_ASM_UNALIGNED_HI_OP
211 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
212 #undef TARGET_ASM_UNALIGNED_SI_OP
213 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
214 #undef TARGET_ASM_UNALIGNED_DI_OP
215 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
216 #undef TARGET_ASM_INTEGER
217 #define TARGET_ASM_INTEGER pa_assemble_integer
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
221 #undef TARGET_ASM_FUNCTION_EPILOGUE
222 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
226 #undef TARGET_SCHED_ADJUST_PRIORITY
227 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
228 #undef TARGET_SCHED_ISSUE_RATE
229 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
231 #undef TARGET_ENCODE_SECTION_INFO
232 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
236 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
237 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
239 #undef TARGET_COMMUTATIVE_P
240 #define TARGET_COMMUTATIVE_P pa_commutative_p
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 #undef TARGET_ASM_FILE_END
248 #ifdef ASM_OUTPUT_EXTERNAL_REAL
249 #define TARGET_ASM_FILE_END pa_hpux_file_end
250 #else
251 #define TARGET_ASM_FILE_END output_deferred_plabels
252 #endif
254 #if !defined(USE_COLLECT2)
255 #undef TARGET_ASM_CONSTRUCTOR
256 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
257 #undef TARGET_ASM_DESTRUCTOR
258 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
259 #endif
261 #undef TARGET_DEFAULT_TARGET_FLAGS
262 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
263 #undef TARGET_HANDLE_OPTION
264 #define TARGET_HANDLE_OPTION pa_handle_option
266 #undef TARGET_INIT_BUILTINS
267 #define TARGET_INIT_BUILTINS pa_init_builtins
269 #undef TARGET_RTX_COSTS
270 #define TARGET_RTX_COSTS hppa_rtx_costs
271 #undef TARGET_ADDRESS_COST
272 #define TARGET_ADDRESS_COST hppa_address_cost
274 #undef TARGET_MACHINE_DEPENDENT_REORG
275 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
277 #ifdef HPUX_LONG_DOUBLE_LIBRARY
278 #undef TARGET_INIT_LIBFUNCS
279 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
280 #endif
282 #undef TARGET_PROMOTE_FUNCTION_RETURN
283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
284 #undef TARGET_PROMOTE_PROTOTYPES
285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
287 #undef TARGET_STRUCT_VALUE_RTX
288 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
289 #undef TARGET_RETURN_IN_MEMORY
290 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
291 #undef TARGET_MUST_PASS_IN_STACK
292 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
295 #undef TARGET_CALLEE_COPIES
296 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
297 #undef TARGET_ARG_PARTIAL_BYTES
298 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
300 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
301 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
302 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
303 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
305 #undef TARGET_SCALAR_MODE_SUPPORTED_P
306 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
308 #undef TARGET_CANNOT_FORCE_CONST_MEM
309 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
311 #undef TARGET_SECONDARY_RELOAD
312 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Parse the -mfixed-range= option string. */
318 static void
319 fix_range (const char *const_str)
321 int i, first, last;
322 char *str, *dash, *comma;
324 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
325 REG2 are either register names or register numbers. The effect
326 of this option is to mark the registers in the range from REG1 to
327 REG2 as ``fixed'' so they won't be used by the compiler. This is
328 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
330 i = strlen (const_str);
331 str = (char *) alloca (i + 1);
332 memcpy (str, const_str, i + 1);
334 while (1)
336 dash = strchr (str, '-');
337 if (!dash)
339 warning (0, "value of -mfixed-range must have form REG1-REG2");
340 return;
342 *dash = '\0';
344 comma = strchr (dash + 1, ',');
345 if (comma)
346 *comma = '\0';
348 first = decode_reg_name (str);
349 if (first < 0)
351 warning (0, "unknown register name: %s", str);
352 return;
355 last = decode_reg_name (dash + 1);
356 if (last < 0)
358 warning (0, "unknown register name: %s", dash + 1);
359 return;
362 *dash = '-';
364 if (first > last)
366 warning (0, "%s-%s is an empty range", str, dash + 1);
367 return;
370 for (i = first; i <= last; ++i)
371 fixed_regs[i] = call_used_regs[i] = 1;
373 if (!comma)
374 break;
376 *comma = ',';
377 str = comma + 1;
380 /* Check if all floating point registers have been fixed. */
381 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
382 if (!fixed_regs[i])
383 break;
385 if (i > FP_REG_LAST)
386 target_flags |= MASK_DISABLE_FPREGS;
389 /* Implement TARGET_HANDLE_OPTION. */
391 static bool
392 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
394 switch (code)
396 case OPT_mnosnake:
397 case OPT_mpa_risc_1_0:
398 case OPT_march_1_0:
399 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
400 return true;
402 case OPT_msnake:
403 case OPT_mpa_risc_1_1:
404 case OPT_march_1_1:
405 target_flags &= ~MASK_PA_20;
406 target_flags |= MASK_PA_11;
407 return true;
409 case OPT_mpa_risc_2_0:
410 case OPT_march_2_0:
411 target_flags |= MASK_PA_11 | MASK_PA_20;
412 return true;
414 case OPT_mschedule_:
415 if (strcmp (arg, "8000") == 0)
416 pa_cpu = PROCESSOR_8000;
417 else if (strcmp (arg, "7100") == 0)
418 pa_cpu = PROCESSOR_7100;
419 else if (strcmp (arg, "700") == 0)
420 pa_cpu = PROCESSOR_700;
421 else if (strcmp (arg, "7100LC") == 0)
422 pa_cpu = PROCESSOR_7100LC;
423 else if (strcmp (arg, "7200") == 0)
424 pa_cpu = PROCESSOR_7200;
425 else if (strcmp (arg, "7300") == 0)
426 pa_cpu = PROCESSOR_7300;
427 else
428 return false;
429 return true;
431 case OPT_mfixed_range_:
432 fix_range (arg);
433 return true;
435 #if TARGET_HPUX
436 case OPT_munix_93:
437 flag_pa_unix = 1993;
438 return true;
439 #endif
441 #if TARGET_HPUX_10_10
442 case OPT_munix_95:
443 flag_pa_unix = 1995;
444 return true;
445 #endif
447 #if TARGET_HPUX_11_11
448 case OPT_munix_98:
449 flag_pa_unix = 1998;
450 return true;
451 #endif
453 default:
454 return true;
458 void
459 override_options (void)
461 /* Unconditional branches in the delay slot are not compatible with dwarf2
462 call frame information. There is no benefit in using this optimization
463 on PA8000 and later processors. */
464 if (pa_cpu >= PROCESSOR_8000
465 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
466 || flag_unwind_tables)
467 target_flags &= ~MASK_JUMP_IN_DELAY;
469 if (flag_pic && TARGET_PORTABLE_RUNTIME)
471 warning (0, "PIC code generation is not supported in the portable runtime model");
474 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
476 warning (0, "PIC code generation is not compatible with fast indirect calls");
479 if (! TARGET_GAS && write_symbols != NO_DEBUG)
481 warning (0, "-g is only supported when using GAS on this processor,");
482 warning (0, "-g option disabled");
483 write_symbols = NO_DEBUG;
486 /* We only support the "big PIC" model now. And we always generate PIC
487 code when in 64bit mode. */
488 if (flag_pic == 1 || TARGET_64BIT)
489 flag_pic = 2;
491 /* We can't guarantee that .dword is available for 32-bit targets. */
492 if (UNITS_PER_WORD == 4)
493 targetm.asm_out.aligned_op.di = NULL;
495 /* The unaligned ops are only available when using GAS. */
496 if (!TARGET_GAS)
498 targetm.asm_out.unaligned_op.hi = NULL;
499 targetm.asm_out.unaligned_op.si = NULL;
500 targetm.asm_out.unaligned_op.di = NULL;
503 init_machine_status = pa_init_machine_status;
506 static void
507 pa_init_builtins (void)
509 #ifdef DONT_HAVE_FPUTC_UNLOCKED
510 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
511 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
512 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
513 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
514 #endif
517 /* Function to init struct machine_function.
518 This will be called, via a pointer variable,
519 from push_function_context. */
521 static struct machine_function *
522 pa_init_machine_status (void)
524 return ggc_alloc_cleared (sizeof (machine_function));
527 /* If FROM is a probable pointer register, mark TO as a probable
528 pointer register with the same pointer alignment as FROM. */
530 static void
531 copy_reg_pointer (rtx to, rtx from)
533 if (REG_POINTER (from))
534 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
537 /* Return 1 if X contains a symbolic expression. We know these
538 expressions will have one of a few well defined forms, so
539 we need only check those forms. */
541 symbolic_expression_p (rtx x)
544 /* Strip off any HIGH. */
545 if (GET_CODE (x) == HIGH)
546 x = XEXP (x, 0);
548 return (symbolic_operand (x, VOIDmode));
551 /* Accept any constant that can be moved in one instruction into a
552 general register. */
554 cint_ok_for_move (HOST_WIDE_INT intval)
556 /* OK if ldo, ldil, or zdepi, can be used. */
557 return (CONST_OK_FOR_LETTER_P (intval, 'J')
558 || CONST_OK_FOR_LETTER_P (intval, 'N')
559 || CONST_OK_FOR_LETTER_P (intval, 'K'));
562 /* Return truth value of whether OP can be used as an operand in a
563 adddi3 insn. */
565 adddi3_operand (rtx op, enum machine_mode mode)
567 return (register_operand (op, mode)
568 || (GET_CODE (op) == CONST_INT
569 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
572 /* True iff zdepi can be used to generate this CONST_INT.
573 zdepi first sign extends a 5 bit signed number to a given field
574 length, then places this field anywhere in a zero. */
576 zdepi_cint_p (unsigned HOST_WIDE_INT x)
578 unsigned HOST_WIDE_INT lsb_mask, t;
580 /* This might not be obvious, but it's at least fast.
581 This function is critical; we don't have the time loops would take. */
582 lsb_mask = x & -x;
583 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
584 /* Return true iff t is a power of two. */
585 return ((t & (t - 1)) == 0);
588 /* True iff depi or extru can be used to compute (reg & mask).
589 Accept bit pattern like these:
590 0....01....1
591 1....10....0
592 1..10..01..1 */
594 and_mask_p (unsigned HOST_WIDE_INT mask)
596 mask = ~mask;
597 mask += mask & -mask;
598 return (mask & (mask - 1)) == 0;
601 /* True iff depi can be used to compute (reg | MASK). */
603 ior_mask_p (unsigned HOST_WIDE_INT mask)
605 mask += mask & -mask;
606 return (mask & (mask - 1)) == 0;
609 /* Legitimize PIC addresses. If the address is already
610 position-independent, we return ORIG. Newly generated
611 position-independent addresses go to REG. If we need more
612 than one register, we lose. */
615 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
617 rtx pic_ref = orig;
619 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
621 /* Labels need special handling. */
622 if (pic_label_operand (orig, mode))
624 /* We do not want to go through the movXX expanders here since that
625 would create recursion.
627 Nor do we really want to call a generator for a named pattern
628 since that requires multiple patterns if we want to support
629 multiple word sizes.
631 So instead we just emit the raw set, which avoids the movXX
632 expanders completely. */
633 mark_reg_pointer (reg, BITS_PER_UNIT);
634 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
635 current_function_uses_pic_offset_table = 1;
636 return reg;
638 if (GET_CODE (orig) == SYMBOL_REF)
640 rtx insn, tmp_reg;
642 gcc_assert (reg);
644 /* Before reload, allocate a temporary register for the intermediate
645 result. This allows the sequence to be deleted when the final
646 result is unused and the insns are trivially dead. */
647 tmp_reg = ((reload_in_progress || reload_completed)
648 ? reg : gen_reg_rtx (Pmode));
650 emit_move_insn (tmp_reg,
651 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
652 gen_rtx_HIGH (word_mode, orig)));
653 pic_ref
654 = gen_const_mem (Pmode,
655 gen_rtx_LO_SUM (Pmode, tmp_reg,
656 gen_rtx_UNSPEC (Pmode,
657 gen_rtvec (1, orig),
658 UNSPEC_DLTIND14R)));
660 current_function_uses_pic_offset_table = 1;
661 mark_reg_pointer (reg, BITS_PER_UNIT);
662 insn = emit_move_insn (reg, pic_ref);
664 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
665 set_unique_reg_note (insn, REG_EQUAL, orig);
667 return reg;
669 else if (GET_CODE (orig) == CONST)
671 rtx base;
673 if (GET_CODE (XEXP (orig, 0)) == PLUS
674 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
675 return orig;
677 gcc_assert (reg);
678 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
680 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
681 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
682 base == reg ? 0 : reg);
684 if (GET_CODE (orig) == CONST_INT)
686 if (INT_14_BITS (orig))
687 return plus_constant (base, INTVAL (orig));
688 orig = force_reg (Pmode, orig);
690 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
691 /* Likewise, should we set special REG_NOTEs here? */
694 return pic_ref;
697 static GTY(()) rtx gen_tls_tga;
699 static rtx
700 gen_tls_get_addr (void)
702 if (!gen_tls_tga)
703 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
704 return gen_tls_tga;
707 static rtx
708 hppa_tls_call (rtx arg)
710 rtx ret;
712 ret = gen_reg_rtx (Pmode);
713 emit_library_call_value (gen_tls_get_addr (), ret,
714 LCT_CONST, Pmode, 1, arg, Pmode);
716 return ret;
719 static rtx
720 legitimize_tls_address (rtx addr)
722 rtx ret, insn, tmp, t1, t2, tp;
723 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
725 switch (model)
727 case TLS_MODEL_GLOBAL_DYNAMIC:
728 tmp = gen_reg_rtx (Pmode);
729 emit_insn (gen_tgd_load (tmp, addr));
730 ret = hppa_tls_call (tmp);
731 break;
733 case TLS_MODEL_LOCAL_DYNAMIC:
734 ret = gen_reg_rtx (Pmode);
735 tmp = gen_reg_rtx (Pmode);
736 start_sequence ();
737 emit_insn (gen_tld_load (tmp, addr));
738 t1 = hppa_tls_call (tmp);
739 insn = get_insns ();
740 end_sequence ();
741 t2 = gen_reg_rtx (Pmode);
742 emit_libcall_block (insn, t2, t1,
743 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
744 UNSPEC_TLSLDBASE));
745 emit_insn (gen_tld_offset_load (ret, addr, t2));
746 break;
748 case TLS_MODEL_INITIAL_EXEC:
749 tp = gen_reg_rtx (Pmode);
750 tmp = gen_reg_rtx (Pmode);
751 ret = gen_reg_rtx (Pmode);
752 emit_insn (gen_tp_load (tp));
753 emit_insn (gen_tie_load (tmp, addr));
754 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
755 break;
757 case TLS_MODEL_LOCAL_EXEC:
758 tp = gen_reg_rtx (Pmode);
759 ret = gen_reg_rtx (Pmode);
760 emit_insn (gen_tp_load (tp));
761 emit_insn (gen_tle_load (ret, addr, tp));
762 break;
764 default:
765 gcc_unreachable ();
768 return ret;
771 /* Try machine-dependent ways of modifying an illegitimate address
772 to be legitimate. If we find one, return the new, valid address.
773 This macro is used in only one place: `memory_address' in explow.c.
775 OLDX is the address as it was before break_out_memory_refs was called.
776 In some cases it is useful to look at this to decide what needs to be done.
778 MODE and WIN are passed so that this macro can use
779 GO_IF_LEGITIMATE_ADDRESS.
781 It is always safe for this macro to do nothing. It exists to recognize
782 opportunities to optimize the output.
784 For the PA, transform:
786 memory(X + <large int>)
788 into:
790 if (<large int> & mask) >= 16
791 Y = (<large int> & ~mask) + mask + 1 Round up.
792 else
793 Y = (<large int> & ~mask) Round down.
794 Z = X + Y
795 memory (Z + (<large int> - Y));
797 This is for CSE to find several similar references, and only use one Z.
799 X can either be a SYMBOL_REF or REG, but because combine cannot
800 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
801 D will not fit in 14 bits.
803 MODE_FLOAT references allow displacements which fit in 5 bits, so use
804 0x1f as the mask.
806 MODE_INT references allow displacements which fit in 14 bits, so use
807 0x3fff as the mask.
809 This relies on the fact that most mode MODE_FLOAT references will use FP
810 registers and most mode MODE_INT references will use integer registers.
811 (In the rare case of an FP register used in an integer MODE, we depend
812 on secondary reloads to clean things up.)
815 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
816 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
817 addressing modes to be used).
819 Put X and Z into registers. Then put the entire expression into
820 a register. */
823 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
824 enum machine_mode mode)
826 rtx orig = x;
828 /* We need to canonicalize the order of operands in unscaled indexed
829 addresses since the code that checks if an address is valid doesn't
830 always try both orders. */
831 if (!TARGET_NO_SPACE_REGS
832 && GET_CODE (x) == PLUS
833 && GET_MODE (x) == Pmode
834 && REG_P (XEXP (x, 0))
835 && REG_P (XEXP (x, 1))
836 && REG_POINTER (XEXP (x, 0))
837 && !REG_POINTER (XEXP (x, 1)))
838 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
840 if (PA_SYMBOL_REF_TLS_P (x))
841 return legitimize_tls_address (x);
842 else if (flag_pic)
843 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
845 /* Strip off CONST. */
846 if (GET_CODE (x) == CONST)
847 x = XEXP (x, 0);
849 /* Special case. Get the SYMBOL_REF into a register and use indexing.
850 That should always be safe. */
851 if (GET_CODE (x) == PLUS
852 && GET_CODE (XEXP (x, 0)) == REG
853 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
855 rtx reg = force_reg (Pmode, XEXP (x, 1));
856 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
859 /* Note we must reject symbols which represent function addresses
860 since the assembler/linker can't handle arithmetic on plabels. */
861 if (GET_CODE (x) == PLUS
862 && GET_CODE (XEXP (x, 1)) == CONST_INT
863 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
864 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
865 || GET_CODE (XEXP (x, 0)) == REG))
867 rtx int_part, ptr_reg;
868 int newoffset;
869 int offset = INTVAL (XEXP (x, 1));
870 int mask;
872 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
873 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
875 /* Choose which way to round the offset. Round up if we
876 are >= halfway to the next boundary. */
877 if ((offset & mask) >= ((mask + 1) / 2))
878 newoffset = (offset & ~ mask) + mask + 1;
879 else
880 newoffset = (offset & ~ mask);
882 /* If the newoffset will not fit in 14 bits (ldo), then
883 handling this would take 4 or 5 instructions (2 to load
884 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
885 add the new offset and the SYMBOL_REF.) Combine can
886 not handle 4->2 or 5->2 combinations, so do not create
887 them. */
888 if (! VAL_14_BITS_P (newoffset)
889 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
891 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
892 rtx tmp_reg
893 = force_reg (Pmode,
894 gen_rtx_HIGH (Pmode, const_part));
895 ptr_reg
896 = force_reg (Pmode,
897 gen_rtx_LO_SUM (Pmode,
898 tmp_reg, const_part));
900 else
902 if (! VAL_14_BITS_P (newoffset))
903 int_part = force_reg (Pmode, GEN_INT (newoffset));
904 else
905 int_part = GEN_INT (newoffset);
907 ptr_reg = force_reg (Pmode,
908 gen_rtx_PLUS (Pmode,
909 force_reg (Pmode, XEXP (x, 0)),
910 int_part));
912 return plus_constant (ptr_reg, offset - newoffset);
915 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
917 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
918 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
919 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
920 && (OBJECT_P (XEXP (x, 1))
921 || GET_CODE (XEXP (x, 1)) == SUBREG)
922 && GET_CODE (XEXP (x, 1)) != CONST)
924 int val = INTVAL (XEXP (XEXP (x, 0), 1));
925 rtx reg1, reg2;
927 reg1 = XEXP (x, 1);
928 if (GET_CODE (reg1) != REG)
929 reg1 = force_reg (Pmode, force_operand (reg1, 0));
931 reg2 = XEXP (XEXP (x, 0), 0);
932 if (GET_CODE (reg2) != REG)
933 reg2 = force_reg (Pmode, force_operand (reg2, 0));
935 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
936 gen_rtx_MULT (Pmode,
937 reg2,
938 GEN_INT (val)),
939 reg1));
942 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
944 Only do so for floating point modes since this is more speculative
945 and we lose if it's an integer store. */
946 if (GET_CODE (x) == PLUS
947 && GET_CODE (XEXP (x, 0)) == PLUS
948 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
949 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
950 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
951 && (mode == SFmode || mode == DFmode))
954 /* First, try and figure out what to use as a base register. */
955 rtx reg1, reg2, base, idx, orig_base;
957 reg1 = XEXP (XEXP (x, 0), 1);
958 reg2 = XEXP (x, 1);
959 base = NULL_RTX;
960 idx = NULL_RTX;
962 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
963 then emit_move_sequence will turn on REG_POINTER so we'll know
964 it's a base register below. */
965 if (GET_CODE (reg1) != REG)
966 reg1 = force_reg (Pmode, force_operand (reg1, 0));
968 if (GET_CODE (reg2) != REG)
969 reg2 = force_reg (Pmode, force_operand (reg2, 0));
971 /* Figure out what the base and index are. */
973 if (GET_CODE (reg1) == REG
974 && REG_POINTER (reg1))
976 base = reg1;
977 orig_base = XEXP (XEXP (x, 0), 1);
978 idx = gen_rtx_PLUS (Pmode,
979 gen_rtx_MULT (Pmode,
980 XEXP (XEXP (XEXP (x, 0), 0), 0),
981 XEXP (XEXP (XEXP (x, 0), 0), 1)),
982 XEXP (x, 1));
984 else if (GET_CODE (reg2) == REG
985 && REG_POINTER (reg2))
987 base = reg2;
988 orig_base = XEXP (x, 1);
989 idx = XEXP (x, 0);
992 if (base == 0)
993 return orig;
995 /* If the index adds a large constant, try to scale the
996 constant so that it can be loaded with only one insn. */
997 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
998 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
999 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1000 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1002 /* Divide the CONST_INT by the scale factor, then add it to A. */
1003 int val = INTVAL (XEXP (idx, 1));
1005 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1006 reg1 = XEXP (XEXP (idx, 0), 0);
1007 if (GET_CODE (reg1) != REG)
1008 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1010 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1012 /* We can now generate a simple scaled indexed address. */
1013 return
1014 force_reg
1015 (Pmode, gen_rtx_PLUS (Pmode,
1016 gen_rtx_MULT (Pmode, reg1,
1017 XEXP (XEXP (idx, 0), 1)),
1018 base));
1021 /* If B + C is still a valid base register, then add them. */
1022 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1023 && INTVAL (XEXP (idx, 1)) <= 4096
1024 && INTVAL (XEXP (idx, 1)) >= -4096)
1026 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1027 rtx reg1, reg2;
1029 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1031 reg2 = XEXP (XEXP (idx, 0), 0);
1032 if (GET_CODE (reg2) != CONST_INT)
1033 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1035 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1036 gen_rtx_MULT (Pmode,
1037 reg2,
1038 GEN_INT (val)),
1039 reg1));
1042 /* Get the index into a register, then add the base + index and
1043 return a register holding the result. */
1045 /* First get A into a register. */
1046 reg1 = XEXP (XEXP (idx, 0), 0);
1047 if (GET_CODE (reg1) != REG)
1048 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1050 /* And get B into a register. */
1051 reg2 = XEXP (idx, 1);
1052 if (GET_CODE (reg2) != REG)
1053 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1055 reg1 = force_reg (Pmode,
1056 gen_rtx_PLUS (Pmode,
1057 gen_rtx_MULT (Pmode, reg1,
1058 XEXP (XEXP (idx, 0), 1)),
1059 reg2));
1061 /* Add the result to our base register and return. */
1062 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1066 /* Uh-oh. We might have an address for x[n-100000]. This needs
1067 special handling to avoid creating an indexed memory address
1068 with x-100000 as the base.
1070 If the constant part is small enough, then it's still safe because
1071 there is a guard page at the beginning and end of the data segment.
1073 Scaled references are common enough that we want to try and rearrange the
1074 terms so that we can use indexing for these addresses too. Only
1075 do the optimization for floatint point modes. */
1077 if (GET_CODE (x) == PLUS
1078 && symbolic_expression_p (XEXP (x, 1)))
1080 /* Ugly. We modify things here so that the address offset specified
1081 by the index expression is computed first, then added to x to form
1082 the entire address. */
1084 rtx regx1, regx2, regy1, regy2, y;
1086 /* Strip off any CONST. */
1087 y = XEXP (x, 1);
1088 if (GET_CODE (y) == CONST)
1089 y = XEXP (y, 0);
1091 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1093 /* See if this looks like
1094 (plus (mult (reg) (shadd_const))
1095 (const (plus (symbol_ref) (const_int))))
1097 Where const_int is small. In that case the const
1098 expression is a valid pointer for indexing.
1100 If const_int is big, but can be divided evenly by shadd_const
1101 and added to (reg). This allows more scaled indexed addresses. */
1102 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1103 && GET_CODE (XEXP (x, 0)) == MULT
1104 && GET_CODE (XEXP (y, 1)) == CONST_INT
1105 && INTVAL (XEXP (y, 1)) >= -4096
1106 && INTVAL (XEXP (y, 1)) <= 4095
1107 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1108 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1110 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1111 rtx reg1, reg2;
1113 reg1 = XEXP (x, 1);
1114 if (GET_CODE (reg1) != REG)
1115 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1117 reg2 = XEXP (XEXP (x, 0), 0);
1118 if (GET_CODE (reg2) != REG)
1119 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1121 return force_reg (Pmode,
1122 gen_rtx_PLUS (Pmode,
1123 gen_rtx_MULT (Pmode,
1124 reg2,
1125 GEN_INT (val)),
1126 reg1));
1128 else if ((mode == DFmode || mode == SFmode)
1129 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1130 && GET_CODE (XEXP (x, 0)) == MULT
1131 && GET_CODE (XEXP (y, 1)) == CONST_INT
1132 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1133 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1134 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1136 regx1
1137 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1138 / INTVAL (XEXP (XEXP (x, 0), 1))));
1139 regx2 = XEXP (XEXP (x, 0), 0);
1140 if (GET_CODE (regx2) != REG)
1141 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1142 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1143 regx2, regx1));
1144 return
1145 force_reg (Pmode,
1146 gen_rtx_PLUS (Pmode,
1147 gen_rtx_MULT (Pmode, regx2,
1148 XEXP (XEXP (x, 0), 1)),
1149 force_reg (Pmode, XEXP (y, 0))));
1151 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1152 && INTVAL (XEXP (y, 1)) >= -4096
1153 && INTVAL (XEXP (y, 1)) <= 4095)
1155 /* This is safe because of the guard page at the
1156 beginning and end of the data space. Just
1157 return the original address. */
1158 return orig;
1160 else
1162 /* Doesn't look like one we can optimize. */
1163 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1164 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1165 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1166 regx1 = force_reg (Pmode,
1167 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1168 regx1, regy2));
1169 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1174 return orig;
1177 /* For the HPPA, REG and REG+CONST is cost 0
1178 and addresses involving symbolic constants are cost 2.
1180 PIC addresses are very expensive.
1182 It is no coincidence that this has the same structure
1183 as GO_IF_LEGITIMATE_ADDRESS. */
1185 static int
1186 hppa_address_cost (rtx X)
1188 switch (GET_CODE (X))
1190 case REG:
1191 case PLUS:
1192 case LO_SUM:
1193 return 1;
1194 case HIGH:
1195 return 2;
1196 default:
1197 return 4;
1201 /* Compute a (partial) cost for rtx X. Return true if the complete
1202 cost has been computed, and false if subexpressions should be
1203 scanned. In either case, *TOTAL contains the cost result. */
1205 static bool
1206 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1208 switch (code)
1210 case CONST_INT:
1211 if (INTVAL (x) == 0)
1212 *total = 0;
1213 else if (INT_14_BITS (x))
1214 *total = 1;
1215 else
1216 *total = 2;
1217 return true;
1219 case HIGH:
1220 *total = 2;
1221 return true;
1223 case CONST:
1224 case LABEL_REF:
1225 case SYMBOL_REF:
1226 *total = 4;
1227 return true;
1229 case CONST_DOUBLE:
1230 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1231 && outer_code != SET)
1232 *total = 0;
1233 else
1234 *total = 8;
1235 return true;
1237 case MULT:
1238 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1239 *total = COSTS_N_INSNS (3);
1240 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1241 *total = COSTS_N_INSNS (8);
1242 else
1243 *total = COSTS_N_INSNS (20);
1244 return true;
1246 case DIV:
1247 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1249 *total = COSTS_N_INSNS (14);
1250 return true;
1252 /* FALLTHRU */
1254 case UDIV:
1255 case MOD:
1256 case UMOD:
1257 *total = COSTS_N_INSNS (60);
1258 return true;
1260 case PLUS: /* this includes shNadd insns */
1261 case MINUS:
1262 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1263 *total = COSTS_N_INSNS (3);
1264 else
1265 *total = COSTS_N_INSNS (1);
1266 return true;
1268 case ASHIFT:
1269 case ASHIFTRT:
1270 case LSHIFTRT:
1271 *total = COSTS_N_INSNS (1);
1272 return true;
1274 default:
1275 return false;
1279 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1280 new rtx with the correct mode. */
1281 static inline rtx
1282 force_mode (enum machine_mode mode, rtx orig)
1284 if (mode == GET_MODE (orig))
1285 return orig;
1287 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1289 return gen_rtx_REG (mode, REGNO (orig));
1292 /* Return 1 if *X is a thread-local symbol. */
1294 static int
1295 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1297 return PA_SYMBOL_REF_TLS_P (*x);
1300 /* Return 1 if X contains a thread-local symbol. */
1302 bool
1303 pa_tls_referenced_p (rtx x)
1305 if (!TARGET_HAVE_TLS)
1306 return false;
1308 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1311 /* Emit insns to move operands[1] into operands[0].
1313 Return 1 if we have written out everything that needs to be done to
1314 do the move. Otherwise, return 0 and the caller will emit the move
1315 normally.
1317 Note SCRATCH_REG may not be in the proper mode depending on how it
1318 will be used. This routine is responsible for creating a new copy
1319 of SCRATCH_REG in the proper mode. */
1322 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1324 register rtx operand0 = operands[0];
1325 register rtx operand1 = operands[1];
1326 register rtx tem;
1328 /* We can only handle indexed addresses in the destination operand
1329 of floating point stores. Thus, we need to break out indexed
1330 addresses from the destination operand. */
1331 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1333 /* This is only safe up to the beginning of life analysis. */
1334 gcc_assert (!no_new_pseudos);
1336 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1337 operand0 = replace_equiv_address (operand0, tem);
1340 /* On targets with non-equivalent space registers, break out unscaled
1341 indexed addresses from the source operand before the final CSE.
1342 We have to do this because the REG_POINTER flag is not correctly
1343 carried through various optimization passes and CSE may substitute
1344 a pseudo without the pointer set for one with the pointer set. As
1345 a result, we loose various opportunities to create insns with
1346 unscaled indexed addresses. */
1347 if (!TARGET_NO_SPACE_REGS
1348 && !cse_not_expected
1349 && GET_CODE (operand1) == MEM
1350 && GET_CODE (XEXP (operand1, 0)) == PLUS
1351 && REG_P (XEXP (XEXP (operand1, 0), 0))
1352 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1353 operand1
1354 = replace_equiv_address (operand1,
1355 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1357 if (scratch_reg
1358 && reload_in_progress && GET_CODE (operand0) == REG
1359 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1360 operand0 = reg_equiv_mem[REGNO (operand0)];
1361 else if (scratch_reg
1362 && reload_in_progress && GET_CODE (operand0) == SUBREG
1363 && GET_CODE (SUBREG_REG (operand0)) == REG
1364 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1366 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1367 the code which tracks sets/uses for delete_output_reload. */
1368 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1369 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1370 SUBREG_BYTE (operand0));
1371 operand0 = alter_subreg (&temp);
1374 if (scratch_reg
1375 && reload_in_progress && GET_CODE (operand1) == REG
1376 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1377 operand1 = reg_equiv_mem[REGNO (operand1)];
1378 else if (scratch_reg
1379 && reload_in_progress && GET_CODE (operand1) == SUBREG
1380 && GET_CODE (SUBREG_REG (operand1)) == REG
1381 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1383 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1384 the code which tracks sets/uses for delete_output_reload. */
1385 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1386 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1387 SUBREG_BYTE (operand1));
1388 operand1 = alter_subreg (&temp);
1391 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1392 && ((tem = find_replacement (&XEXP (operand0, 0)))
1393 != XEXP (operand0, 0)))
1394 operand0 = replace_equiv_address (operand0, tem);
1396 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1397 && ((tem = find_replacement (&XEXP (operand1, 0)))
1398 != XEXP (operand1, 0)))
1399 operand1 = replace_equiv_address (operand1, tem);
1401 /* Handle secondary reloads for loads/stores of FP registers from
1402 REG+D addresses where D does not fit in 5 or 14 bits, including
1403 (subreg (mem (addr))) cases. */
1404 if (scratch_reg
1405 && fp_reg_operand (operand0, mode)
1406 && ((GET_CODE (operand1) == MEM
1407 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1408 XEXP (operand1, 0)))
1409 || ((GET_CODE (operand1) == SUBREG
1410 && GET_CODE (XEXP (operand1, 0)) == MEM
1411 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1412 ? SFmode : DFmode),
1413 XEXP (XEXP (operand1, 0), 0))))))
1415 if (GET_CODE (operand1) == SUBREG)
1416 operand1 = XEXP (operand1, 0);
1418 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1419 it in WORD_MODE regardless of what mode it was originally given
1420 to us. */
1421 scratch_reg = force_mode (word_mode, scratch_reg);
1423 /* D might not fit in 14 bits either; for such cases load D into
1424 scratch reg. */
1425 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1427 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1428 emit_move_insn (scratch_reg,
1429 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1430 Pmode,
1431 XEXP (XEXP (operand1, 0), 0),
1432 scratch_reg));
1434 else
1435 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1436 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1437 replace_equiv_address (operand1, scratch_reg)));
1438 return 1;
1440 else if (scratch_reg
1441 && fp_reg_operand (operand1, mode)
1442 && ((GET_CODE (operand0) == MEM
1443 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1444 ? SFmode : DFmode),
1445 XEXP (operand0, 0)))
1446 || ((GET_CODE (operand0) == SUBREG)
1447 && GET_CODE (XEXP (operand0, 0)) == MEM
1448 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1449 ? SFmode : DFmode),
1450 XEXP (XEXP (operand0, 0), 0)))))
1452 if (GET_CODE (operand0) == SUBREG)
1453 operand0 = XEXP (operand0, 0);
1455 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1456 it in WORD_MODE regardless of what mode it was originally given
1457 to us. */
1458 scratch_reg = force_mode (word_mode, scratch_reg);
1460 /* D might not fit in 14 bits either; for such cases load D into
1461 scratch reg. */
1462 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1464 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1465 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1466 0)),
1467 Pmode,
1468 XEXP (XEXP (operand0, 0),
1470 scratch_reg));
1472 else
1473 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1474 emit_insn (gen_rtx_SET (VOIDmode,
1475 replace_equiv_address (operand0, scratch_reg),
1476 operand1));
1477 return 1;
1479 /* Handle secondary reloads for loads of FP registers from constant
1480 expressions by forcing the constant into memory.
1482 Use scratch_reg to hold the address of the memory location.
1484 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1485 NO_REGS when presented with a const_int and a register class
1486 containing only FP registers. Doing so unfortunately creates
1487 more problems than it solves. Fix this for 2.5. */
1488 else if (scratch_reg
1489 && CONSTANT_P (operand1)
1490 && fp_reg_operand (operand0, mode))
1492 rtx const_mem, xoperands[2];
1494 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1495 it in WORD_MODE regardless of what mode it was originally given
1496 to us. */
1497 scratch_reg = force_mode (word_mode, scratch_reg);
1499 /* Force the constant into memory and put the address of the
1500 memory location into scratch_reg. */
1501 const_mem = force_const_mem (mode, operand1);
1502 xoperands[0] = scratch_reg;
1503 xoperands[1] = XEXP (const_mem, 0);
1504 emit_move_sequence (xoperands, Pmode, 0);
1506 /* Now load the destination register. */
1507 emit_insn (gen_rtx_SET (mode, operand0,
1508 replace_equiv_address (const_mem, scratch_reg)));
1509 return 1;
1511 /* Handle secondary reloads for SAR. These occur when trying to load
1512 the SAR from memory, FP register, or with a constant. */
1513 else if (scratch_reg
1514 && GET_CODE (operand0) == REG
1515 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1516 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1517 && (GET_CODE (operand1) == MEM
1518 || GET_CODE (operand1) == CONST_INT
1519 || (GET_CODE (operand1) == REG
1520 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1522 /* D might not fit in 14 bits either; for such cases load D into
1523 scratch reg. */
1524 if (GET_CODE (operand1) == MEM
1525 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1527 /* We are reloading the address into the scratch register, so we
1528 want to make sure the scratch register is a full register. */
1529 scratch_reg = force_mode (word_mode, scratch_reg);
1531 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1532 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1533 0)),
1534 Pmode,
1535 XEXP (XEXP (operand1, 0),
1537 scratch_reg));
1539 /* Now we are going to load the scratch register from memory,
1540 we want to load it in the same width as the original MEM,
1541 which must be the same as the width of the ultimate destination,
1542 OPERAND0. */
1543 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1545 emit_move_insn (scratch_reg,
1546 replace_equiv_address (operand1, scratch_reg));
1548 else
1550 /* We want to load the scratch register using the same mode as
1551 the ultimate destination. */
1552 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1554 emit_move_insn (scratch_reg, operand1);
1557 /* And emit the insn to set the ultimate destination. We know that
1558 the scratch register has the same mode as the destination at this
1559 point. */
1560 emit_move_insn (operand0, scratch_reg);
1561 return 1;
1563 /* Handle the most common case: storing into a register. */
1564 else if (register_operand (operand0, mode))
1566 if (register_operand (operand1, mode)
1567 || (GET_CODE (operand1) == CONST_INT
1568 && cint_ok_for_move (INTVAL (operand1)))
1569 || (operand1 == CONST0_RTX (mode))
1570 || (GET_CODE (operand1) == HIGH
1571 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1572 /* Only `general_operands' can come here, so MEM is ok. */
1573 || GET_CODE (operand1) == MEM)
1575 /* Various sets are created during RTL generation which don't
1576 have the REG_POINTER flag correctly set. After the CSE pass,
1577 instruction recognition can fail if we don't consistently
1578 set this flag when performing register copies. This should
1579 also improve the opportunities for creating insns that use
1580 unscaled indexing. */
1581 if (REG_P (operand0) && REG_P (operand1))
1583 if (REG_POINTER (operand1)
1584 && !REG_POINTER (operand0)
1585 && !HARD_REGISTER_P (operand0))
1586 copy_reg_pointer (operand0, operand1);
1587 else if (REG_POINTER (operand0)
1588 && !REG_POINTER (operand1)
1589 && !HARD_REGISTER_P (operand1))
1590 copy_reg_pointer (operand1, operand0);
1593 /* When MEMs are broken out, the REG_POINTER flag doesn't
1594 get set. In some cases, we can set the REG_POINTER flag
1595 from the declaration for the MEM. */
1596 if (REG_P (operand0)
1597 && GET_CODE (operand1) == MEM
1598 && !REG_POINTER (operand0))
1600 tree decl = MEM_EXPR (operand1);
1602 /* Set the register pointer flag and register alignment
1603 if the declaration for this memory reference is a
1604 pointer type. Fortran indirect argument references
1605 are ignored. */
1606 if (decl
1607 && !(flag_argument_noalias > 1
1608 && TREE_CODE (decl) == INDIRECT_REF
1609 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1611 tree type;
1613 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1614 tree operand 1. */
1615 if (TREE_CODE (decl) == COMPONENT_REF)
1616 decl = TREE_OPERAND (decl, 1);
1618 type = TREE_TYPE (decl);
1619 if (TREE_CODE (type) == ARRAY_TYPE)
1620 type = get_inner_array_type (type);
1622 if (POINTER_TYPE_P (type))
1624 int align;
1626 type = TREE_TYPE (type);
1627 /* Using TYPE_ALIGN_OK is rather conservative as
1628 only the ada frontend actually sets it. */
1629 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1630 : BITS_PER_UNIT);
1631 mark_reg_pointer (operand0, align);
1636 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1637 return 1;
1640 else if (GET_CODE (operand0) == MEM)
1642 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1643 && !(reload_in_progress || reload_completed))
1645 rtx temp = gen_reg_rtx (DFmode);
1647 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1648 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1649 return 1;
1651 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1653 /* Run this case quickly. */
1654 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1655 return 1;
1657 if (! (reload_in_progress || reload_completed))
1659 operands[0] = validize_mem (operand0);
1660 operands[1] = operand1 = force_reg (mode, operand1);
1664 /* Simplify the source if we need to.
1665 Note we do have to handle function labels here, even though we do
1666 not consider them legitimate constants. Loop optimizations can
1667 call the emit_move_xxx with one as a source. */
1668 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1669 || function_label_operand (operand1, mode)
1670 || (GET_CODE (operand1) == HIGH
1671 && symbolic_operand (XEXP (operand1, 0), mode)))
1673 int ishighonly = 0;
1675 if (GET_CODE (operand1) == HIGH)
1677 ishighonly = 1;
1678 operand1 = XEXP (operand1, 0);
1680 if (symbolic_operand (operand1, mode))
1682 /* Argh. The assembler and linker can't handle arithmetic
1683 involving plabels.
1685 So we force the plabel into memory, load operand0 from
1686 the memory location, then add in the constant part. */
1687 if ((GET_CODE (operand1) == CONST
1688 && GET_CODE (XEXP (operand1, 0)) == PLUS
1689 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1690 || function_label_operand (operand1, mode))
1692 rtx temp, const_part;
1694 /* Figure out what (if any) scratch register to use. */
1695 if (reload_in_progress || reload_completed)
1697 scratch_reg = scratch_reg ? scratch_reg : operand0;
1698 /* SCRATCH_REG will hold an address and maybe the actual
1699 data. We want it in WORD_MODE regardless of what mode it
1700 was originally given to us. */
1701 scratch_reg = force_mode (word_mode, scratch_reg);
1703 else if (flag_pic)
1704 scratch_reg = gen_reg_rtx (Pmode);
1706 if (GET_CODE (operand1) == CONST)
1708 /* Save away the constant part of the expression. */
1709 const_part = XEXP (XEXP (operand1, 0), 1);
1710 gcc_assert (GET_CODE (const_part) == CONST_INT);
1712 /* Force the function label into memory. */
1713 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1715 else
1717 /* No constant part. */
1718 const_part = NULL_RTX;
1720 /* Force the function label into memory. */
1721 temp = force_const_mem (mode, operand1);
1725 /* Get the address of the memory location. PIC-ify it if
1726 necessary. */
1727 temp = XEXP (temp, 0);
1728 if (flag_pic)
1729 temp = legitimize_pic_address (temp, mode, scratch_reg);
1731 /* Put the address of the memory location into our destination
1732 register. */
1733 operands[1] = temp;
1734 emit_move_sequence (operands, mode, scratch_reg);
1736 /* Now load from the memory location into our destination
1737 register. */
1738 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1739 emit_move_sequence (operands, mode, scratch_reg);
1741 /* And add back in the constant part. */
1742 if (const_part != NULL_RTX)
1743 expand_inc (operand0, const_part);
1745 return 1;
1748 if (flag_pic)
1750 rtx temp;
1752 if (reload_in_progress || reload_completed)
1754 temp = scratch_reg ? scratch_reg : operand0;
1755 /* TEMP will hold an address and maybe the actual
1756 data. We want it in WORD_MODE regardless of what mode it
1757 was originally given to us. */
1758 temp = force_mode (word_mode, temp);
1760 else
1761 temp = gen_reg_rtx (Pmode);
1763 /* (const (plus (symbol) (const_int))) must be forced to
1764 memory during/after reload if the const_int will not fit
1765 in 14 bits. */
1766 if (GET_CODE (operand1) == CONST
1767 && GET_CODE (XEXP (operand1, 0)) == PLUS
1768 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1769 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1770 && (reload_completed || reload_in_progress)
1771 && flag_pic)
1773 rtx const_mem = force_const_mem (mode, operand1);
1774 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1775 mode, temp);
1776 operands[1] = replace_equiv_address (const_mem, operands[1]);
1777 emit_move_sequence (operands, mode, temp);
1779 else
1781 operands[1] = legitimize_pic_address (operand1, mode, temp);
1782 if (REG_P (operand0) && REG_P (operands[1]))
1783 copy_reg_pointer (operand0, operands[1]);
1784 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1787 /* On the HPPA, references to data space are supposed to use dp,
1788 register 27, but showing it in the RTL inhibits various cse
1789 and loop optimizations. */
1790 else
1792 rtx temp, set;
1794 if (reload_in_progress || reload_completed)
1796 temp = scratch_reg ? scratch_reg : operand0;
1797 /* TEMP will hold an address and maybe the actual
1798 data. We want it in WORD_MODE regardless of what mode it
1799 was originally given to us. */
1800 temp = force_mode (word_mode, temp);
1802 else
1803 temp = gen_reg_rtx (mode);
1805 /* Loading a SYMBOL_REF into a register makes that register
1806 safe to be used as the base in an indexed address.
1808 Don't mark hard registers though. That loses. */
1809 if (GET_CODE (operand0) == REG
1810 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1811 mark_reg_pointer (operand0, BITS_PER_UNIT);
1812 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1813 mark_reg_pointer (temp, BITS_PER_UNIT);
1815 if (ishighonly)
1816 set = gen_rtx_SET (mode, operand0, temp);
1817 else
1818 set = gen_rtx_SET (VOIDmode,
1819 operand0,
1820 gen_rtx_LO_SUM (mode, temp, operand1));
1822 emit_insn (gen_rtx_SET (VOIDmode,
1823 temp,
1824 gen_rtx_HIGH (mode, operand1)));
1825 emit_insn (set);
1828 return 1;
1830 else if (pa_tls_referenced_p (operand1))
1832 rtx tmp = operand1;
1833 rtx addend = NULL;
1835 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1837 addend = XEXP (XEXP (tmp, 0), 1);
1838 tmp = XEXP (XEXP (tmp, 0), 0);
1841 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1842 tmp = legitimize_tls_address (tmp);
1843 if (addend)
1845 tmp = gen_rtx_PLUS (mode, tmp, addend);
1846 tmp = force_operand (tmp, operands[0]);
1848 operands[1] = tmp;
1850 else if (GET_CODE (operand1) != CONST_INT
1851 || !cint_ok_for_move (INTVAL (operand1)))
1853 rtx insn, temp;
1854 rtx op1 = operand1;
1855 HOST_WIDE_INT value = 0;
1856 HOST_WIDE_INT insv = 0;
1857 int insert = 0;
1859 if (GET_CODE (operand1) == CONST_INT)
1860 value = INTVAL (operand1);
1862 if (TARGET_64BIT
1863 && GET_CODE (operand1) == CONST_INT
1864 && HOST_BITS_PER_WIDE_INT > 32
1865 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1867 HOST_WIDE_INT nval;
1869 /* Extract the low order 32 bits of the value and sign extend.
1870 If the new value is the same as the original value, we can
1871 can use the original value as-is. If the new value is
1872 different, we use it and insert the most-significant 32-bits
1873 of the original value into the final result. */
1874 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1875 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1876 if (value != nval)
1878 #if HOST_BITS_PER_WIDE_INT > 32
1879 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1880 #endif
1881 insert = 1;
1882 value = nval;
1883 operand1 = GEN_INT (nval);
1887 if (reload_in_progress || reload_completed)
1888 temp = scratch_reg ? scratch_reg : operand0;
1889 else
1890 temp = gen_reg_rtx (mode);
1892 /* We don't directly split DImode constants on 32-bit targets
1893 because PLUS uses an 11-bit immediate and the insn sequence
1894 generated is not as efficient as the one using HIGH/LO_SUM. */
1895 if (GET_CODE (operand1) == CONST_INT
1896 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1897 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1898 && !insert)
1900 /* Directly break constant into high and low parts. This
1901 provides better optimization opportunities because various
1902 passes recognize constants split with PLUS but not LO_SUM.
1903 We use a 14-bit signed low part except when the addition
1904 of 0x4000 to the high part might change the sign of the
1905 high part. */
1906 HOST_WIDE_INT low = value & 0x3fff;
1907 HOST_WIDE_INT high = value & ~ 0x3fff;
1909 if (low >= 0x2000)
1911 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1912 high += 0x2000;
1913 else
1914 high += 0x4000;
1917 low = value - high;
1919 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1920 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1922 else
1924 emit_insn (gen_rtx_SET (VOIDmode, temp,
1925 gen_rtx_HIGH (mode, operand1)));
1926 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1929 insn = emit_move_insn (operands[0], operands[1]);
1931 /* Now insert the most significant 32 bits of the value
1932 into the register. When we don't have a second register
1933 available, it could take up to nine instructions to load
1934 a 64-bit integer constant. Prior to reload, we force
1935 constants that would take more than three instructions
1936 to load to the constant pool. During and after reload,
1937 we have to handle all possible values. */
1938 if (insert)
1940 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1941 register and the value to be inserted is outside the
1942 range that can be loaded with three depdi instructions. */
1943 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1945 operand1 = GEN_INT (insv);
1947 emit_insn (gen_rtx_SET (VOIDmode, temp,
1948 gen_rtx_HIGH (mode, operand1)));
1949 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1950 emit_insn (gen_insv (operand0, GEN_INT (32),
1951 const0_rtx, temp));
1953 else
1955 int len = 5, pos = 27;
1957 /* Insert the bits using the depdi instruction. */
1958 while (pos >= 0)
1960 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1961 HOST_WIDE_INT sign = v5 < 0;
1963 /* Left extend the insertion. */
1964 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1965 while (pos > 0 && (insv & 1) == sign)
1967 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1968 len += 1;
1969 pos -= 1;
1972 emit_insn (gen_insv (operand0, GEN_INT (len),
1973 GEN_INT (pos), GEN_INT (v5)));
1975 len = pos > 0 && pos < 5 ? pos : 5;
1976 pos -= len;
1981 set_unique_reg_note (insn, REG_EQUAL, op1);
1983 return 1;
1986 /* Now have insn-emit do whatever it normally does. */
1987 return 0;
1990 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1991 it will need a link/runtime reloc). */
1994 reloc_needed (tree exp)
1996 int reloc = 0;
1998 switch (TREE_CODE (exp))
2000 case ADDR_EXPR:
2001 return 1;
2003 case PLUS_EXPR:
2004 case MINUS_EXPR:
2005 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2006 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2007 break;
2009 case NOP_EXPR:
2010 case CONVERT_EXPR:
2011 case NON_LVALUE_EXPR:
2012 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2013 break;
2015 case CONSTRUCTOR:
2017 tree value;
2018 unsigned HOST_WIDE_INT ix;
2020 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2021 if (value)
2022 reloc |= reloc_needed (value);
2024 break;
2026 case ERROR_MARK:
2027 break;
2029 default:
2030 break;
2032 return reloc;
2035 /* Does operand (which is a symbolic_operand) live in text space?
2036 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2037 will be true. */
2040 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2042 if (GET_CODE (operand) == CONST)
2043 operand = XEXP (XEXP (operand, 0), 0);
2044 if (flag_pic)
2046 if (GET_CODE (operand) == SYMBOL_REF)
2047 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2049 else
2051 if (GET_CODE (operand) == SYMBOL_REF)
2052 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2054 return 1;
2058 /* Return the best assembler insn template
2059 for moving operands[1] into operands[0] as a fullword. */
2060 const char *
2061 singlemove_string (rtx *operands)
2063 HOST_WIDE_INT intval;
2065 if (GET_CODE (operands[0]) == MEM)
2066 return "stw %r1,%0";
2067 if (GET_CODE (operands[1]) == MEM)
2068 return "ldw %1,%0";
2069 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2071 long i;
2072 REAL_VALUE_TYPE d;
2074 gcc_assert (GET_MODE (operands[1]) == SFmode);
2076 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2077 bit pattern. */
2078 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2079 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2081 operands[1] = GEN_INT (i);
2082 /* Fall through to CONST_INT case. */
2084 if (GET_CODE (operands[1]) == CONST_INT)
2086 intval = INTVAL (operands[1]);
2088 if (VAL_14_BITS_P (intval))
2089 return "ldi %1,%0";
2090 else if ((intval & 0x7ff) == 0)
2091 return "ldil L'%1,%0";
2092 else if (zdepi_cint_p (intval))
2093 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2094 else
2095 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2097 return "copy %1,%0";
2101 /* Compute position (in OP[1]) and width (in OP[2])
2102 useful for copying IMM to a register using the zdepi
2103 instructions. Store the immediate value to insert in OP[0]. */
2104 static void
2105 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2107 int lsb, len;
2109 /* Find the least significant set bit in IMM. */
2110 for (lsb = 0; lsb < 32; lsb++)
2112 if ((imm & 1) != 0)
2113 break;
2114 imm >>= 1;
2117 /* Choose variants based on *sign* of the 5-bit field. */
2118 if ((imm & 0x10) == 0)
2119 len = (lsb <= 28) ? 4 : 32 - lsb;
2120 else
2122 /* Find the width of the bitstring in IMM. */
2123 for (len = 5; len < 32; len++)
2125 if ((imm & (1 << len)) == 0)
2126 break;
2129 /* Sign extend IMM as a 5-bit value. */
2130 imm = (imm & 0xf) - 0x10;
2133 op[0] = imm;
2134 op[1] = 31 - lsb;
2135 op[2] = len;
2138 /* Compute position (in OP[1]) and width (in OP[2])
2139 useful for copying IMM to a register using the depdi,z
2140 instructions. Store the immediate value to insert in OP[0]. */
2141 void
2142 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2144 HOST_WIDE_INT lsb, len;
2146 /* Find the least significant set bit in IMM. */
2147 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2149 if ((imm & 1) != 0)
2150 break;
2151 imm >>= 1;
2154 /* Choose variants based on *sign* of the 5-bit field. */
2155 if ((imm & 0x10) == 0)
2156 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2157 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2158 else
2160 /* Find the width of the bitstring in IMM. */
2161 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2163 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2164 break;
2167 /* Sign extend IMM as a 5-bit value. */
2168 imm = (imm & 0xf) - 0x10;
2171 op[0] = imm;
2172 op[1] = 63 - lsb;
2173 op[2] = len;
2176 /* Output assembler code to perform a doubleword move insn
2177 with operands OPERANDS. */
2179 const char *
2180 output_move_double (rtx *operands)
2182 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2183 rtx latehalf[2];
2184 rtx addreg0 = 0, addreg1 = 0;
2186 /* First classify both operands. */
2188 if (REG_P (operands[0]))
2189 optype0 = REGOP;
2190 else if (offsettable_memref_p (operands[0]))
2191 optype0 = OFFSOP;
2192 else if (GET_CODE (operands[0]) == MEM)
2193 optype0 = MEMOP;
2194 else
2195 optype0 = RNDOP;
2197 if (REG_P (operands[1]))
2198 optype1 = REGOP;
2199 else if (CONSTANT_P (operands[1]))
2200 optype1 = CNSTOP;
2201 else if (offsettable_memref_p (operands[1]))
2202 optype1 = OFFSOP;
2203 else if (GET_CODE (operands[1]) == MEM)
2204 optype1 = MEMOP;
2205 else
2206 optype1 = RNDOP;
2208 /* Check for the cases that the operand constraints are not
2209 supposed to allow to happen. */
2210 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2212 /* Handle copies between general and floating registers. */
2214 if (optype0 == REGOP && optype1 == REGOP
2215 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2217 if (FP_REG_P (operands[0]))
2219 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2220 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2221 return "{fldds|fldd} -16(%%sp),%0";
2223 else
2225 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2226 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2227 return "{ldws|ldw} -12(%%sp),%R0";
2231 /* Handle auto decrementing and incrementing loads and stores
2232 specifically, since the structure of the function doesn't work
2233 for them without major modification. Do it better when we learn
2234 this port about the general inc/dec addressing of PA.
2235 (This was written by tege. Chide him if it doesn't work.) */
2237 if (optype0 == MEMOP)
2239 /* We have to output the address syntax ourselves, since print_operand
2240 doesn't deal with the addresses we want to use. Fix this later. */
2242 rtx addr = XEXP (operands[0], 0);
2243 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2245 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2247 operands[0] = XEXP (addr, 0);
2248 gcc_assert (GET_CODE (operands[1]) == REG
2249 && GET_CODE (operands[0]) == REG);
2251 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2253 /* No overlap between high target register and address
2254 register. (We do this in a non-obvious way to
2255 save a register file writeback) */
2256 if (GET_CODE (addr) == POST_INC)
2257 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2258 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2260 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2262 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2264 operands[0] = XEXP (addr, 0);
2265 gcc_assert (GET_CODE (operands[1]) == REG
2266 && GET_CODE (operands[0]) == REG);
2268 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2269 /* No overlap between high target register and address
2270 register. (We do this in a non-obvious way to save a
2271 register file writeback) */
2272 if (GET_CODE (addr) == PRE_INC)
2273 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2274 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2277 if (optype1 == MEMOP)
2279 /* We have to output the address syntax ourselves, since print_operand
2280 doesn't deal with the addresses we want to use. Fix this later. */
2282 rtx addr = XEXP (operands[1], 0);
2283 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2285 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2287 operands[1] = XEXP (addr, 0);
2288 gcc_assert (GET_CODE (operands[0]) == REG
2289 && GET_CODE (operands[1]) == REG);
2291 if (!reg_overlap_mentioned_p (high_reg, addr))
2293 /* No overlap between high target register and address
2294 register. (We do this in a non-obvious way to
2295 save a register file writeback) */
2296 if (GET_CODE (addr) == POST_INC)
2297 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2298 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2300 else
2302 /* This is an undefined situation. We should load into the
2303 address register *and* update that register. Probably
2304 we don't need to handle this at all. */
2305 if (GET_CODE (addr) == POST_INC)
2306 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2307 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2310 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2312 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2314 operands[1] = XEXP (addr, 0);
2315 gcc_assert (GET_CODE (operands[0]) == REG
2316 && GET_CODE (operands[1]) == REG);
2318 if (!reg_overlap_mentioned_p (high_reg, addr))
2320 /* No overlap between high target register and address
2321 register. (We do this in a non-obvious way to
2322 save a register file writeback) */
2323 if (GET_CODE (addr) == PRE_INC)
2324 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2325 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2327 else
2329 /* This is an undefined situation. We should load into the
2330 address register *and* update that register. Probably
2331 we don't need to handle this at all. */
2332 if (GET_CODE (addr) == PRE_INC)
2333 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2334 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2337 else if (GET_CODE (addr) == PLUS
2338 && GET_CODE (XEXP (addr, 0)) == MULT)
2340 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2342 if (!reg_overlap_mentioned_p (high_reg, addr))
2344 rtx xoperands[3];
2346 xoperands[0] = high_reg;
2347 xoperands[1] = XEXP (addr, 1);
2348 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2349 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2350 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2351 xoperands);
2352 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2354 else
2356 rtx xoperands[3];
2358 xoperands[0] = high_reg;
2359 xoperands[1] = XEXP (addr, 1);
2360 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2361 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2362 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2363 xoperands);
2364 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2369 /* If an operand is an unoffsettable memory ref, find a register
2370 we can increment temporarily to make it refer to the second word. */
2372 if (optype0 == MEMOP)
2373 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2375 if (optype1 == MEMOP)
2376 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2378 /* Ok, we can do one word at a time.
2379 Normally we do the low-numbered word first.
2381 In either case, set up in LATEHALF the operands to use
2382 for the high-numbered word and in some cases alter the
2383 operands in OPERANDS to be suitable for the low-numbered word. */
2385 if (optype0 == REGOP)
2386 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2387 else if (optype0 == OFFSOP)
2388 latehalf[0] = adjust_address (operands[0], SImode, 4);
2389 else
2390 latehalf[0] = operands[0];
2392 if (optype1 == REGOP)
2393 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2394 else if (optype1 == OFFSOP)
2395 latehalf[1] = adjust_address (operands[1], SImode, 4);
2396 else if (optype1 == CNSTOP)
2397 split_double (operands[1], &operands[1], &latehalf[1]);
2398 else
2399 latehalf[1] = operands[1];
2401 /* If the first move would clobber the source of the second one,
2402 do them in the other order.
2404 This can happen in two cases:
2406 mem -> register where the first half of the destination register
2407 is the same register used in the memory's address. Reload
2408 can create such insns.
2410 mem in this case will be either register indirect or register
2411 indirect plus a valid offset.
2413 register -> register move where REGNO(dst) == REGNO(src + 1)
2414 someone (Tim/Tege?) claimed this can happen for parameter loads.
2416 Handle mem -> register case first. */
2417 if (optype0 == REGOP
2418 && (optype1 == MEMOP || optype1 == OFFSOP)
2419 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2420 operands[1], 0))
2422 /* Do the late half first. */
2423 if (addreg1)
2424 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2425 output_asm_insn (singlemove_string (latehalf), latehalf);
2427 /* Then clobber. */
2428 if (addreg1)
2429 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2430 return singlemove_string (operands);
2433 /* Now handle register -> register case. */
2434 if (optype0 == REGOP && optype1 == REGOP
2435 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2437 output_asm_insn (singlemove_string (latehalf), latehalf);
2438 return singlemove_string (operands);
2441 /* Normal case: do the two words, low-numbered first. */
2443 output_asm_insn (singlemove_string (operands), operands);
2445 /* Make any unoffsettable addresses point at high-numbered word. */
2446 if (addreg0)
2447 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2448 if (addreg1)
2449 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2451 /* Do that word. */
2452 output_asm_insn (singlemove_string (latehalf), latehalf);
2454 /* Undo the adds we just did. */
2455 if (addreg0)
2456 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2457 if (addreg1)
2458 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2460 return "";
2463 const char *
2464 output_fp_move_double (rtx *operands)
2466 if (FP_REG_P (operands[0]))
2468 if (FP_REG_P (operands[1])
2469 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2470 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2471 else
2472 output_asm_insn ("fldd%F1 %1,%0", operands);
2474 else if (FP_REG_P (operands[1]))
2476 output_asm_insn ("fstd%F0 %1,%0", operands);
2478 else
2480 rtx xoperands[2];
2482 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2484 /* This is a pain. You have to be prepared to deal with an
2485 arbitrary address here including pre/post increment/decrement.
2487 so avoid this in the MD. */
2488 gcc_assert (GET_CODE (operands[0]) == REG);
2490 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2491 xoperands[0] = operands[0];
2492 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2494 return "";
2497 /* Return a REG that occurs in ADDR with coefficient 1.
2498 ADDR can be effectively incremented by incrementing REG. */
2500 static rtx
2501 find_addr_reg (rtx addr)
2503 while (GET_CODE (addr) == PLUS)
2505 if (GET_CODE (XEXP (addr, 0)) == REG)
2506 addr = XEXP (addr, 0);
2507 else if (GET_CODE (XEXP (addr, 1)) == REG)
2508 addr = XEXP (addr, 1);
2509 else if (CONSTANT_P (XEXP (addr, 0)))
2510 addr = XEXP (addr, 1);
2511 else if (CONSTANT_P (XEXP (addr, 1)))
2512 addr = XEXP (addr, 0);
2513 else
2514 gcc_unreachable ();
2516 gcc_assert (GET_CODE (addr) == REG);
2517 return addr;
2520 /* Emit code to perform a block move.
2522 OPERANDS[0] is the destination pointer as a REG, clobbered.
2523 OPERANDS[1] is the source pointer as a REG, clobbered.
2524 OPERANDS[2] is a register for temporary storage.
2525 OPERANDS[3] is a register for temporary storage.
2526 OPERANDS[4] is the size as a CONST_INT
2527 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2528 OPERANDS[6] is another temporary register. */
2530 const char *
2531 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2533 int align = INTVAL (operands[5]);
2534 unsigned long n_bytes = INTVAL (operands[4]);
2536 /* We can't move more than a word at a time because the PA
2537 has no longer integer move insns. (Could use fp mem ops?) */
2538 if (align > (TARGET_64BIT ? 8 : 4))
2539 align = (TARGET_64BIT ? 8 : 4);
2541 /* Note that we know each loop below will execute at least twice
2542 (else we would have open-coded the copy). */
2543 switch (align)
2545 case 8:
2546 /* Pre-adjust the loop counter. */
2547 operands[4] = GEN_INT (n_bytes - 16);
2548 output_asm_insn ("ldi %4,%2", operands);
2550 /* Copying loop. */
2551 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2552 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2553 output_asm_insn ("std,ma %3,8(%0)", operands);
2554 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2555 output_asm_insn ("std,ma %6,8(%0)", operands);
2557 /* Handle the residual. There could be up to 7 bytes of
2558 residual to copy! */
2559 if (n_bytes % 16 != 0)
2561 operands[4] = GEN_INT (n_bytes % 8);
2562 if (n_bytes % 16 >= 8)
2563 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2564 if (n_bytes % 8 != 0)
2565 output_asm_insn ("ldd 0(%1),%6", operands);
2566 if (n_bytes % 16 >= 8)
2567 output_asm_insn ("std,ma %3,8(%0)", operands);
2568 if (n_bytes % 8 != 0)
2569 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2571 return "";
2573 case 4:
2574 /* Pre-adjust the loop counter. */
2575 operands[4] = GEN_INT (n_bytes - 8);
2576 output_asm_insn ("ldi %4,%2", operands);
2578 /* Copying loop. */
2579 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2580 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2581 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2582 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2583 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2585 /* Handle the residual. There could be up to 7 bytes of
2586 residual to copy! */
2587 if (n_bytes % 8 != 0)
2589 operands[4] = GEN_INT (n_bytes % 4);
2590 if (n_bytes % 8 >= 4)
2591 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2592 if (n_bytes % 4 != 0)
2593 output_asm_insn ("ldw 0(%1),%6", operands);
2594 if (n_bytes % 8 >= 4)
2595 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2596 if (n_bytes % 4 != 0)
2597 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2599 return "";
2601 case 2:
2602 /* Pre-adjust the loop counter. */
2603 operands[4] = GEN_INT (n_bytes - 4);
2604 output_asm_insn ("ldi %4,%2", operands);
2606 /* Copying loop. */
2607 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2608 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2609 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2610 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2611 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2613 /* Handle the residual. */
2614 if (n_bytes % 4 != 0)
2616 if (n_bytes % 4 >= 2)
2617 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2618 if (n_bytes % 2 != 0)
2619 output_asm_insn ("ldb 0(%1),%6", operands);
2620 if (n_bytes % 4 >= 2)
2621 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2622 if (n_bytes % 2 != 0)
2623 output_asm_insn ("stb %6,0(%0)", operands);
2625 return "";
2627 case 1:
2628 /* Pre-adjust the loop counter. */
2629 operands[4] = GEN_INT (n_bytes - 2);
2630 output_asm_insn ("ldi %4,%2", operands);
2632 /* Copying loop. */
2633 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2634 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2635 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2636 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2637 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2639 /* Handle the residual. */
2640 if (n_bytes % 2 != 0)
2642 output_asm_insn ("ldb 0(%1),%3", operands);
2643 output_asm_insn ("stb %3,0(%0)", operands);
2645 return "";
2647 default:
2648 gcc_unreachable ();
2652 /* Count the number of insns necessary to handle this block move.
2654 Basic structure is the same as emit_block_move, except that we
2655 count insns rather than emit them. */
2657 static int
2658 compute_movmem_length (rtx insn)
2660 rtx pat = PATTERN (insn);
2661 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2662 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2663 unsigned int n_insns = 0;
2665 /* We can't move more than four bytes at a time because the PA
2666 has no longer integer move insns. (Could use fp mem ops?) */
2667 if (align > (TARGET_64BIT ? 8 : 4))
2668 align = (TARGET_64BIT ? 8 : 4);
2670 /* The basic copying loop. */
2671 n_insns = 6;
2673 /* Residuals. */
2674 if (n_bytes % (2 * align) != 0)
2676 if ((n_bytes % (2 * align)) >= align)
2677 n_insns += 2;
2679 if ((n_bytes % align) != 0)
2680 n_insns += 2;
2683 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2684 return n_insns * 4;
2687 /* Emit code to perform a block clear.
2689 OPERANDS[0] is the destination pointer as a REG, clobbered.
2690 OPERANDS[1] is a register for temporary storage.
2691 OPERANDS[2] is the size as a CONST_INT
2692 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2694 const char *
2695 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2697 int align = INTVAL (operands[3]);
2698 unsigned long n_bytes = INTVAL (operands[2]);
2700 /* We can't clear more than a word at a time because the PA
2701 has no longer integer move insns. */
2702 if (align > (TARGET_64BIT ? 8 : 4))
2703 align = (TARGET_64BIT ? 8 : 4);
2705 /* Note that we know each loop below will execute at least twice
2706 (else we would have open-coded the copy). */
2707 switch (align)
2709 case 8:
2710 /* Pre-adjust the loop counter. */
2711 operands[2] = GEN_INT (n_bytes - 16);
2712 output_asm_insn ("ldi %2,%1", operands);
2714 /* Loop. */
2715 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2716 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2717 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2719 /* Handle the residual. There could be up to 7 bytes of
2720 residual to copy! */
2721 if (n_bytes % 16 != 0)
2723 operands[2] = GEN_INT (n_bytes % 8);
2724 if (n_bytes % 16 >= 8)
2725 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2726 if (n_bytes % 8 != 0)
2727 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2729 return "";
2731 case 4:
2732 /* Pre-adjust the loop counter. */
2733 operands[2] = GEN_INT (n_bytes - 8);
2734 output_asm_insn ("ldi %2,%1", operands);
2736 /* Loop. */
2737 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2738 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2739 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2741 /* Handle the residual. There could be up to 7 bytes of
2742 residual to copy! */
2743 if (n_bytes % 8 != 0)
2745 operands[2] = GEN_INT (n_bytes % 4);
2746 if (n_bytes % 8 >= 4)
2747 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2748 if (n_bytes % 4 != 0)
2749 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2751 return "";
2753 case 2:
2754 /* Pre-adjust the loop counter. */
2755 operands[2] = GEN_INT (n_bytes - 4);
2756 output_asm_insn ("ldi %2,%1", operands);
2758 /* Loop. */
2759 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2760 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2761 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2763 /* Handle the residual. */
2764 if (n_bytes % 4 != 0)
2766 if (n_bytes % 4 >= 2)
2767 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2768 if (n_bytes % 2 != 0)
2769 output_asm_insn ("stb %%r0,0(%0)", operands);
2771 return "";
2773 case 1:
2774 /* Pre-adjust the loop counter. */
2775 operands[2] = GEN_INT (n_bytes - 2);
2776 output_asm_insn ("ldi %2,%1", operands);
2778 /* Loop. */
2779 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2780 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2781 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2783 /* Handle the residual. */
2784 if (n_bytes % 2 != 0)
2785 output_asm_insn ("stb %%r0,0(%0)", operands);
2787 return "";
2789 default:
2790 gcc_unreachable ();
2794 /* Count the number of insns necessary to handle this block move.
2796 Basic structure is the same as emit_block_move, except that we
2797 count insns rather than emit them. */
2799 static int
2800 compute_clrmem_length (rtx insn)
2802 rtx pat = PATTERN (insn);
2803 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2804 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2805 unsigned int n_insns = 0;
2807 /* We can't clear more than a word at a time because the PA
2808 has no longer integer move insns. */
2809 if (align > (TARGET_64BIT ? 8 : 4))
2810 align = (TARGET_64BIT ? 8 : 4);
2812 /* The basic loop. */
2813 n_insns = 4;
2815 /* Residuals. */
2816 if (n_bytes % (2 * align) != 0)
2818 if ((n_bytes % (2 * align)) >= align)
2819 n_insns++;
2821 if ((n_bytes % align) != 0)
2822 n_insns++;
2825 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2826 return n_insns * 4;
2830 const char *
2831 output_and (rtx *operands)
2833 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2835 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2836 int ls0, ls1, ms0, p, len;
2838 for (ls0 = 0; ls0 < 32; ls0++)
2839 if ((mask & (1 << ls0)) == 0)
2840 break;
2842 for (ls1 = ls0; ls1 < 32; ls1++)
2843 if ((mask & (1 << ls1)) != 0)
2844 break;
2846 for (ms0 = ls1; ms0 < 32; ms0++)
2847 if ((mask & (1 << ms0)) == 0)
2848 break;
2850 gcc_assert (ms0 == 32);
2852 if (ls1 == 32)
2854 len = ls0;
2856 gcc_assert (len);
2858 operands[2] = GEN_INT (len);
2859 return "{extru|extrw,u} %1,31,%2,%0";
2861 else
2863 /* We could use this `depi' for the case above as well, but `depi'
2864 requires one more register file access than an `extru'. */
2866 p = 31 - ls0;
2867 len = ls1 - ls0;
2869 operands[2] = GEN_INT (p);
2870 operands[3] = GEN_INT (len);
2871 return "{depi|depwi} 0,%2,%3,%0";
2874 else
2875 return "and %1,%2,%0";
2878 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2879 storing the result in operands[0]. */
2880 const char *
2881 output_64bit_and (rtx *operands)
2883 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2885 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2886 int ls0, ls1, ms0, p, len;
2888 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2889 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2890 break;
2892 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2893 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2894 break;
2896 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2897 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2898 break;
2900 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2902 if (ls1 == HOST_BITS_PER_WIDE_INT)
2904 len = ls0;
2906 gcc_assert (len);
2908 operands[2] = GEN_INT (len);
2909 return "extrd,u %1,63,%2,%0";
2911 else
2913 /* We could use this `depi' for the case above as well, but `depi'
2914 requires one more register file access than an `extru'. */
2916 p = 63 - ls0;
2917 len = ls1 - ls0;
2919 operands[2] = GEN_INT (p);
2920 operands[3] = GEN_INT (len);
2921 return "depdi 0,%2,%3,%0";
2924 else
2925 return "and %1,%2,%0";
2928 const char *
2929 output_ior (rtx *operands)
2931 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2932 int bs0, bs1, p, len;
2934 if (INTVAL (operands[2]) == 0)
2935 return "copy %1,%0";
2937 for (bs0 = 0; bs0 < 32; bs0++)
2938 if ((mask & (1 << bs0)) != 0)
2939 break;
2941 for (bs1 = bs0; bs1 < 32; bs1++)
2942 if ((mask & (1 << bs1)) == 0)
2943 break;
2945 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2947 p = 31 - bs0;
2948 len = bs1 - bs0;
2950 operands[2] = GEN_INT (p);
2951 operands[3] = GEN_INT (len);
2952 return "{depi|depwi} -1,%2,%3,%0";
2955 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2956 storing the result in operands[0]. */
2957 const char *
2958 output_64bit_ior (rtx *operands)
2960 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2961 int bs0, bs1, p, len;
2963 if (INTVAL (operands[2]) == 0)
2964 return "copy %1,%0";
2966 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2967 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2968 break;
2970 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2971 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2972 break;
2974 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2975 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2977 p = 63 - bs0;
2978 len = bs1 - bs0;
2980 operands[2] = GEN_INT (p);
2981 operands[3] = GEN_INT (len);
2982 return "depdi -1,%2,%3,%0";
2985 /* Target hook for assembling integer objects. This code handles
2986 aligned SI and DI integers specially since function references
2987 must be preceded by P%. */
2989 static bool
2990 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2992 if (size == UNITS_PER_WORD
2993 && aligned_p
2994 && function_label_operand (x, VOIDmode))
2996 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2997 output_addr_const (asm_out_file, x);
2998 fputc ('\n', asm_out_file);
2999 return true;
3001 return default_assemble_integer (x, size, aligned_p);
3004 /* Output an ascii string. */
3005 void
3006 output_ascii (FILE *file, const char *p, int size)
3008 int i;
3009 int chars_output;
3010 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3012 /* The HP assembler can only take strings of 256 characters at one
3013 time. This is a limitation on input line length, *not* the
3014 length of the string. Sigh. Even worse, it seems that the
3015 restriction is in number of input characters (see \xnn &
3016 \whatever). So we have to do this very carefully. */
3018 fputs ("\t.STRING \"", file);
3020 chars_output = 0;
3021 for (i = 0; i < size; i += 4)
3023 int co = 0;
3024 int io = 0;
3025 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3027 register unsigned int c = (unsigned char) p[i + io];
3029 if (c == '\"' || c == '\\')
3030 partial_output[co++] = '\\';
3031 if (c >= ' ' && c < 0177)
3032 partial_output[co++] = c;
3033 else
3035 unsigned int hexd;
3036 partial_output[co++] = '\\';
3037 partial_output[co++] = 'x';
3038 hexd = c / 16 - 0 + '0';
3039 if (hexd > '9')
3040 hexd -= '9' - 'a' + 1;
3041 partial_output[co++] = hexd;
3042 hexd = c % 16 - 0 + '0';
3043 if (hexd > '9')
3044 hexd -= '9' - 'a' + 1;
3045 partial_output[co++] = hexd;
3048 if (chars_output + co > 243)
3050 fputs ("\"\n\t.STRING \"", file);
3051 chars_output = 0;
3053 fwrite (partial_output, 1, (size_t) co, file);
3054 chars_output += co;
3055 co = 0;
3057 fputs ("\"\n", file);
3060 /* Try to rewrite floating point comparisons & branches to avoid
3061 useless add,tr insns.
3063 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3064 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3065 first attempt to remove useless add,tr insns. It is zero
3066 for the second pass as reorg sometimes leaves bogus REG_DEAD
3067 notes lying around.
3069 When CHECK_NOTES is zero we can only eliminate add,tr insns
3070 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3071 instructions. */
3072 static void
3073 remove_useless_addtr_insns (int check_notes)
3075 rtx insn;
3076 static int pass = 0;
3078 /* This is fairly cheap, so always run it when optimizing. */
3079 if (optimize > 0)
3081 int fcmp_count = 0;
3082 int fbranch_count = 0;
3084 /* Walk all the insns in this function looking for fcmp & fbranch
3085 instructions. Keep track of how many of each we find. */
3086 for (insn = get_insns (); insn; insn = next_insn (insn))
3088 rtx tmp;
3090 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3091 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3092 continue;
3094 tmp = PATTERN (insn);
3096 /* It must be a set. */
3097 if (GET_CODE (tmp) != SET)
3098 continue;
3100 /* If the destination is CCFP, then we've found an fcmp insn. */
3101 tmp = SET_DEST (tmp);
3102 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3104 fcmp_count++;
3105 continue;
3108 tmp = PATTERN (insn);
3109 /* If this is an fbranch instruction, bump the fbranch counter. */
3110 if (GET_CODE (tmp) == SET
3111 && SET_DEST (tmp) == pc_rtx
3112 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3113 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3114 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3115 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3117 fbranch_count++;
3118 continue;
3123 /* Find all floating point compare + branch insns. If possible,
3124 reverse the comparison & the branch to avoid add,tr insns. */
3125 for (insn = get_insns (); insn; insn = next_insn (insn))
3127 rtx tmp, next;
3129 /* Ignore anything that isn't an INSN. */
3130 if (GET_CODE (insn) != INSN)
3131 continue;
3133 tmp = PATTERN (insn);
3135 /* It must be a set. */
3136 if (GET_CODE (tmp) != SET)
3137 continue;
3139 /* The destination must be CCFP, which is register zero. */
3140 tmp = SET_DEST (tmp);
3141 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3142 continue;
3144 /* INSN should be a set of CCFP.
3146 See if the result of this insn is used in a reversed FP
3147 conditional branch. If so, reverse our condition and
3148 the branch. Doing so avoids useless add,tr insns. */
3149 next = next_insn (insn);
3150 while (next)
3152 /* Jumps, calls and labels stop our search. */
3153 if (GET_CODE (next) == JUMP_INSN
3154 || GET_CODE (next) == CALL_INSN
3155 || GET_CODE (next) == CODE_LABEL)
3156 break;
3158 /* As does another fcmp insn. */
3159 if (GET_CODE (next) == INSN
3160 && GET_CODE (PATTERN (next)) == SET
3161 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3162 && REGNO (SET_DEST (PATTERN (next))) == 0)
3163 break;
3165 next = next_insn (next);
3168 /* Is NEXT_INSN a branch? */
3169 if (next
3170 && GET_CODE (next) == JUMP_INSN)
3172 rtx pattern = PATTERN (next);
3174 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3175 and CCFP dies, then reverse our conditional and the branch
3176 to avoid the add,tr. */
3177 if (GET_CODE (pattern) == SET
3178 && SET_DEST (pattern) == pc_rtx
3179 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3180 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3181 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3182 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3183 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3184 && (fcmp_count == fbranch_count
3185 || (check_notes
3186 && find_regno_note (next, REG_DEAD, 0))))
3188 /* Reverse the branch. */
3189 tmp = XEXP (SET_SRC (pattern), 1);
3190 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3191 XEXP (SET_SRC (pattern), 2) = tmp;
3192 INSN_CODE (next) = -1;
3194 /* Reverse our condition. */
3195 tmp = PATTERN (insn);
3196 PUT_CODE (XEXP (tmp, 1),
3197 (reverse_condition_maybe_unordered
3198 (GET_CODE (XEXP (tmp, 1)))));
3204 pass = !pass;
3208 /* You may have trouble believing this, but this is the 32 bit HP-PA
3209 stack layout. Wow.
3211 Offset Contents
3213 Variable arguments (optional; any number may be allocated)
3215 SP-(4*(N+9)) arg word N
3217 SP-56 arg word 5
3218 SP-52 arg word 4
3220 Fixed arguments (must be allocated; may remain unused)
3222 SP-48 arg word 3
3223 SP-44 arg word 2
3224 SP-40 arg word 1
3225 SP-36 arg word 0
3227 Frame Marker
3229 SP-32 External Data Pointer (DP)
3230 SP-28 External sr4
3231 SP-24 External/stub RP (RP')
3232 SP-20 Current RP
3233 SP-16 Static Link
3234 SP-12 Clean up
3235 SP-8 Calling Stub RP (RP'')
3236 SP-4 Previous SP
3238 Top of Frame
3240 SP-0 Stack Pointer (points to next available address)
3244 /* This function saves registers as follows. Registers marked with ' are
3245 this function's registers (as opposed to the previous function's).
3246 If a frame_pointer isn't needed, r4 is saved as a general register;
3247 the space for the frame pointer is still allocated, though, to keep
3248 things simple.
3251 Top of Frame
3253 SP (FP') Previous FP
3254 SP + 4 Alignment filler (sigh)
3255 SP + 8 Space for locals reserved here.
3259 SP + n All call saved register used.
3263 SP + o All call saved fp registers used.
3267 SP + p (SP') points to next available address.
3271 /* Global variables set by output_function_prologue(). */
3272 /* Size of frame. Need to know this to emit return insns from
3273 leaf procedures. */
3274 static HOST_WIDE_INT actual_fsize, local_fsize;
3275 static int save_fregs;
3277 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3278 Handle case where DISP > 8k by using the add_high_const patterns.
3280 Note in DISP > 8k case, we will leave the high part of the address
3281 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3283 static void
3284 store_reg (int reg, HOST_WIDE_INT disp, int base)
3286 rtx insn, dest, src, basereg;
3288 src = gen_rtx_REG (word_mode, reg);
3289 basereg = gen_rtx_REG (Pmode, base);
3290 if (VAL_14_BITS_P (disp))
3292 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3293 insn = emit_move_insn (dest, src);
3295 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3297 rtx delta = GEN_INT (disp);
3298 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3300 emit_move_insn (tmpreg, delta);
3301 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3302 if (DO_FRAME_NOTES)
3304 REG_NOTES (insn)
3305 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3306 gen_rtx_SET (VOIDmode, tmpreg,
3307 gen_rtx_PLUS (Pmode, basereg, delta)),
3308 REG_NOTES (insn));
3309 RTX_FRAME_RELATED_P (insn) = 1;
3311 dest = gen_rtx_MEM (word_mode, tmpreg);
3312 insn = emit_move_insn (dest, src);
3314 else
3316 rtx delta = GEN_INT (disp);
3317 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3318 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3320 emit_move_insn (tmpreg, high);
3321 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3322 insn = emit_move_insn (dest, src);
3323 if (DO_FRAME_NOTES)
3325 REG_NOTES (insn)
3326 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3327 gen_rtx_SET (VOIDmode,
3328 gen_rtx_MEM (word_mode,
3329 gen_rtx_PLUS (word_mode, basereg,
3330 delta)),
3331 src),
3332 REG_NOTES (insn));
3336 if (DO_FRAME_NOTES)
3337 RTX_FRAME_RELATED_P (insn) = 1;
3340 /* Emit RTL to store REG at the memory location specified by BASE and then
3341 add MOD to BASE. MOD must be <= 8k. */
3343 static void
3344 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3346 rtx insn, basereg, srcreg, delta;
3348 gcc_assert (VAL_14_BITS_P (mod));
3350 basereg = gen_rtx_REG (Pmode, base);
3351 srcreg = gen_rtx_REG (word_mode, reg);
3352 delta = GEN_INT (mod);
3354 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3355 if (DO_FRAME_NOTES)
3357 RTX_FRAME_RELATED_P (insn) = 1;
3359 /* RTX_FRAME_RELATED_P must be set on each frame related set
3360 in a parallel with more than one element. */
3361 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3362 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3366 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3367 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3368 whether to add a frame note or not.
3370 In the DISP > 8k case, we leave the high part of the address in %r1.
3371 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3373 static void
3374 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3376 rtx insn;
3378 if (VAL_14_BITS_P (disp))
3380 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3381 plus_constant (gen_rtx_REG (Pmode, base), disp));
3383 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3385 rtx basereg = gen_rtx_REG (Pmode, base);
3386 rtx delta = GEN_INT (disp);
3387 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3389 emit_move_insn (tmpreg, delta);
3390 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3391 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3392 if (DO_FRAME_NOTES)
3393 REG_NOTES (insn)
3394 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3395 gen_rtx_SET (VOIDmode, tmpreg,
3396 gen_rtx_PLUS (Pmode, basereg, delta)),
3397 REG_NOTES (insn));
3399 else
3401 rtx basereg = gen_rtx_REG (Pmode, base);
3402 rtx delta = GEN_INT (disp);
3403 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3405 emit_move_insn (tmpreg,
3406 gen_rtx_PLUS (Pmode, basereg,
3407 gen_rtx_HIGH (Pmode, delta)));
3408 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3409 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3412 if (DO_FRAME_NOTES && note)
3413 RTX_FRAME_RELATED_P (insn) = 1;
3416 HOST_WIDE_INT
3417 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3419 int freg_saved = 0;
3420 int i, j;
3422 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3423 be consistent with the rounding and size calculation done here.
3424 Change them at the same time. */
3426 /* We do our own stack alignment. First, round the size of the
3427 stack locals up to a word boundary. */
3428 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3430 /* Space for previous frame pointer + filler. If any frame is
3431 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3432 waste some space here for the sake of HP compatibility. The
3433 first slot is only used when the frame pointer is needed. */
3434 if (size || frame_pointer_needed)
3435 size += STARTING_FRAME_OFFSET;
3437 /* If the current function calls __builtin_eh_return, then we need
3438 to allocate stack space for registers that will hold data for
3439 the exception handler. */
3440 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3442 unsigned int i;
3444 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3445 continue;
3446 size += i * UNITS_PER_WORD;
3449 /* Account for space used by the callee general register saves. */
3450 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3451 if (regs_ever_live[i])
3452 size += UNITS_PER_WORD;
3454 /* Account for space used by the callee floating point register saves. */
3455 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3456 if (regs_ever_live[i]
3457 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3459 freg_saved = 1;
3461 /* We always save both halves of the FP register, so always
3462 increment the frame size by 8 bytes. */
3463 size += 8;
3466 /* If any of the floating registers are saved, account for the
3467 alignment needed for the floating point register save block. */
3468 if (freg_saved)
3470 size = (size + 7) & ~7;
3471 if (fregs_live)
3472 *fregs_live = 1;
3475 /* The various ABIs include space for the outgoing parameters in the
3476 size of the current function's stack frame. We don't need to align
3477 for the outgoing arguments as their alignment is set by the final
3478 rounding for the frame as a whole. */
3479 size += current_function_outgoing_args_size;
3481 /* Allocate space for the fixed frame marker. This space must be
3482 allocated for any function that makes calls or allocates
3483 stack space. */
3484 if (!current_function_is_leaf || size)
3485 size += TARGET_64BIT ? 48 : 32;
3487 /* Finally, round to the preferred stack boundary. */
3488 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3489 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3492 /* Generate the assembly code for function entry. FILE is a stdio
3493 stream to output the code to. SIZE is an int: how many units of
3494 temporary storage to allocate.
3496 Refer to the array `regs_ever_live' to determine which registers to
3497 save; `regs_ever_live[I]' is nonzero if register number I is ever
3498 used in the function. This function is responsible for knowing
3499 which registers should not be saved even if used. */
3501 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3502 of memory. If any fpu reg is used in the function, we allocate
3503 such a block here, at the bottom of the frame, just in case it's needed.
3505 If this function is a leaf procedure, then we may choose not
3506 to do a "save" insn. The decision about whether or not
3507 to do this is made in regclass.c. */
3509 static void
3510 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3512 /* The function's label and associated .PROC must never be
3513 separated and must be output *after* any profiling declarations
3514 to avoid changing spaces/subspaces within a procedure. */
3515 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3516 fputs ("\t.PROC\n", file);
3518 /* hppa_expand_prologue does the dirty work now. We just need
3519 to output the assembler directives which denote the start
3520 of a function. */
3521 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3522 if (regs_ever_live[2])
3523 fputs (",CALLS,SAVE_RP", file);
3524 else
3525 fputs (",NO_CALLS", file);
3527 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3528 at the beginning of the frame and that it is used as the frame
3529 pointer for the frame. We do this because our current frame
3530 layout doesn't conform to that specified in the HP runtime
3531 documentation and we need a way to indicate to programs such as
3532 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3533 isn't used by HP compilers but is supported by the assembler.
3534 However, SAVE_SP is supposed to indicate that the previous stack
3535 pointer has been saved in the frame marker. */
3536 if (frame_pointer_needed)
3537 fputs (",SAVE_SP", file);
3539 /* Pass on information about the number of callee register saves
3540 performed in the prologue.
3542 The compiler is supposed to pass the highest register number
3543 saved, the assembler then has to adjust that number before
3544 entering it into the unwind descriptor (to account for any
3545 caller saved registers with lower register numbers than the
3546 first callee saved register). */
3547 if (gr_saved)
3548 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3550 if (fr_saved)
3551 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3553 fputs ("\n\t.ENTRY\n", file);
3555 remove_useless_addtr_insns (0);
3558 void
3559 hppa_expand_prologue (void)
3561 int merge_sp_adjust_with_store = 0;
3562 HOST_WIDE_INT size = get_frame_size ();
3563 HOST_WIDE_INT offset;
3564 int i;
3565 rtx insn, tmpreg;
3567 gr_saved = 0;
3568 fr_saved = 0;
3569 save_fregs = 0;
3571 /* Compute total size for frame pointer, filler, locals and rounding to
3572 the next word boundary. Similar code appears in compute_frame_size
3573 and must be changed in tandem with this code. */
3574 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3575 if (local_fsize || frame_pointer_needed)
3576 local_fsize += STARTING_FRAME_OFFSET;
3578 actual_fsize = compute_frame_size (size, &save_fregs);
3580 /* Compute a few things we will use often. */
3581 tmpreg = gen_rtx_REG (word_mode, 1);
3583 /* Save RP first. The calling conventions manual states RP will
3584 always be stored into the caller's frame at sp - 20 or sp - 16
3585 depending on which ABI is in use. */
3586 if (regs_ever_live[2] || current_function_calls_eh_return)
3587 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3589 /* Allocate the local frame and set up the frame pointer if needed. */
3590 if (actual_fsize != 0)
3592 if (frame_pointer_needed)
3594 /* Copy the old frame pointer temporarily into %r1. Set up the
3595 new stack pointer, then store away the saved old frame pointer
3596 into the stack at sp and at the same time update the stack
3597 pointer by actual_fsize bytes. Two versions, first
3598 handles small (<8k) frames. The second handles large (>=8k)
3599 frames. */
3600 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3601 if (DO_FRAME_NOTES)
3602 RTX_FRAME_RELATED_P (insn) = 1;
3604 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3605 if (DO_FRAME_NOTES)
3606 RTX_FRAME_RELATED_P (insn) = 1;
3608 if (VAL_14_BITS_P (actual_fsize))
3609 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3610 else
3612 /* It is incorrect to store the saved frame pointer at *sp,
3613 then increment sp (writes beyond the current stack boundary).
3615 So instead use stwm to store at *sp and post-increment the
3616 stack pointer as an atomic operation. Then increment sp to
3617 finish allocating the new frame. */
3618 HOST_WIDE_INT adjust1 = 8192 - 64;
3619 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3621 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3622 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3623 adjust2, 1);
3626 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3627 we need to store the previous stack pointer (frame pointer)
3628 into the frame marker on targets that use the HP unwind
3629 library. This allows the HP unwind library to be used to
3630 unwind GCC frames. However, we are not fully compatible
3631 with the HP library because our frame layout differs from
3632 that specified in the HP runtime specification.
3634 We don't want a frame note on this instruction as the frame
3635 marker moves during dynamic stack allocation.
3637 This instruction also serves as a blockage to prevent
3638 register spills from being scheduled before the stack
3639 pointer is raised. This is necessary as we store
3640 registers using the frame pointer as a base register,
3641 and the frame pointer is set before sp is raised. */
3642 if (TARGET_HPUX_UNWIND_LIBRARY)
3644 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3645 GEN_INT (TARGET_64BIT ? -8 : -4));
3647 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3648 frame_pointer_rtx);
3650 else
3651 emit_insn (gen_blockage ());
3653 /* no frame pointer needed. */
3654 else
3656 /* In some cases we can perform the first callee register save
3657 and allocating the stack frame at the same time. If so, just
3658 make a note of it and defer allocating the frame until saving
3659 the callee registers. */
3660 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3661 merge_sp_adjust_with_store = 1;
3662 /* Can not optimize. Adjust the stack frame by actual_fsize
3663 bytes. */
3664 else
3665 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3666 actual_fsize, 1);
3670 /* Normal register save.
3672 Do not save the frame pointer in the frame_pointer_needed case. It
3673 was done earlier. */
3674 if (frame_pointer_needed)
3676 offset = local_fsize;
3678 /* Saving the EH return data registers in the frame is the simplest
3679 way to get the frame unwind information emitted. We put them
3680 just before the general registers. */
3681 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3683 unsigned int i, regno;
3685 for (i = 0; ; ++i)
3687 regno = EH_RETURN_DATA_REGNO (i);
3688 if (regno == INVALID_REGNUM)
3689 break;
3691 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3692 offset += UNITS_PER_WORD;
3696 for (i = 18; i >= 4; i--)
3697 if (regs_ever_live[i] && ! call_used_regs[i])
3699 store_reg (i, offset, FRAME_POINTER_REGNUM);
3700 offset += UNITS_PER_WORD;
3701 gr_saved++;
3703 /* Account for %r3 which is saved in a special place. */
3704 gr_saved++;
3706 /* No frame pointer needed. */
3707 else
3709 offset = local_fsize - actual_fsize;
3711 /* Saving the EH return data registers in the frame is the simplest
3712 way to get the frame unwind information emitted. */
3713 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3715 unsigned int i, regno;
3717 for (i = 0; ; ++i)
3719 regno = EH_RETURN_DATA_REGNO (i);
3720 if (regno == INVALID_REGNUM)
3721 break;
3723 /* If merge_sp_adjust_with_store is nonzero, then we can
3724 optimize the first save. */
3725 if (merge_sp_adjust_with_store)
3727 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3728 merge_sp_adjust_with_store = 0;
3730 else
3731 store_reg (regno, offset, STACK_POINTER_REGNUM);
3732 offset += UNITS_PER_WORD;
3736 for (i = 18; i >= 3; i--)
3737 if (regs_ever_live[i] && ! call_used_regs[i])
3739 /* If merge_sp_adjust_with_store is nonzero, then we can
3740 optimize the first GR save. */
3741 if (merge_sp_adjust_with_store)
3743 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3744 merge_sp_adjust_with_store = 0;
3746 else
3747 store_reg (i, offset, STACK_POINTER_REGNUM);
3748 offset += UNITS_PER_WORD;
3749 gr_saved++;
3752 /* If we wanted to merge the SP adjustment with a GR save, but we never
3753 did any GR saves, then just emit the adjustment here. */
3754 if (merge_sp_adjust_with_store)
3755 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3756 actual_fsize, 1);
3759 /* The hppa calling conventions say that %r19, the pic offset
3760 register, is saved at sp - 32 (in this function's frame)
3761 when generating PIC code. FIXME: What is the correct thing
3762 to do for functions which make no calls and allocate no
3763 frame? Do we need to allocate a frame, or can we just omit
3764 the save? For now we'll just omit the save.
3766 We don't want a note on this insn as the frame marker can
3767 move if there is a dynamic stack allocation. */
3768 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3770 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3772 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3776 /* Align pointer properly (doubleword boundary). */
3777 offset = (offset + 7) & ~7;
3779 /* Floating point register store. */
3780 if (save_fregs)
3782 rtx base;
3784 /* First get the frame or stack pointer to the start of the FP register
3785 save area. */
3786 if (frame_pointer_needed)
3788 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3789 base = frame_pointer_rtx;
3791 else
3793 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3794 base = stack_pointer_rtx;
3797 /* Now actually save the FP registers. */
3798 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3800 if (regs_ever_live[i]
3801 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3803 rtx addr, insn, reg;
3804 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3805 reg = gen_rtx_REG (DFmode, i);
3806 insn = emit_move_insn (addr, reg);
3807 if (DO_FRAME_NOTES)
3809 RTX_FRAME_RELATED_P (insn) = 1;
3810 if (TARGET_64BIT)
3812 rtx mem = gen_rtx_MEM (DFmode,
3813 plus_constant (base, offset));
3814 REG_NOTES (insn)
3815 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3816 gen_rtx_SET (VOIDmode, mem, reg),
3817 REG_NOTES (insn));
3819 else
3821 rtx meml = gen_rtx_MEM (SFmode,
3822 plus_constant (base, offset));
3823 rtx memr = gen_rtx_MEM (SFmode,
3824 plus_constant (base, offset + 4));
3825 rtx regl = gen_rtx_REG (SFmode, i);
3826 rtx regr = gen_rtx_REG (SFmode, i + 1);
3827 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3828 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3829 rtvec vec;
3831 RTX_FRAME_RELATED_P (setl) = 1;
3832 RTX_FRAME_RELATED_P (setr) = 1;
3833 vec = gen_rtvec (2, setl, setr);
3834 REG_NOTES (insn)
3835 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3836 gen_rtx_SEQUENCE (VOIDmode, vec),
3837 REG_NOTES (insn));
3840 offset += GET_MODE_SIZE (DFmode);
3841 fr_saved++;
3847 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3848 Handle case where DISP > 8k by using the add_high_const patterns. */
3850 static void
3851 load_reg (int reg, HOST_WIDE_INT disp, int base)
3853 rtx dest = gen_rtx_REG (word_mode, reg);
3854 rtx basereg = gen_rtx_REG (Pmode, base);
3855 rtx src;
3857 if (VAL_14_BITS_P (disp))
3858 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3859 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3861 rtx delta = GEN_INT (disp);
3862 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3864 emit_move_insn (tmpreg, delta);
3865 if (TARGET_DISABLE_INDEXING)
3867 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3868 src = gen_rtx_MEM (word_mode, tmpreg);
3870 else
3871 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3873 else
3875 rtx delta = GEN_INT (disp);
3876 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3877 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3879 emit_move_insn (tmpreg, high);
3880 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3883 emit_move_insn (dest, src);
3886 /* Update the total code bytes output to the text section. */
3888 static void
3889 update_total_code_bytes (int nbytes)
3891 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3892 && !IN_NAMED_SECTION_P (cfun->decl))
3894 if (INSN_ADDRESSES_SET_P ())
3896 unsigned long old_total = total_code_bytes;
3898 total_code_bytes += nbytes;
3900 /* Be prepared to handle overflows. */
3901 if (old_total > total_code_bytes)
3902 total_code_bytes = -1;
3904 else
3905 total_code_bytes = -1;
3909 /* This function generates the assembly code for function exit.
3910 Args are as for output_function_prologue ().
3912 The function epilogue should not depend on the current stack
3913 pointer! It should use the frame pointer only. This is mandatory
3914 because of alloca; we also take advantage of it to omit stack
3915 adjustments before returning. */
3917 static void
3918 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3920 rtx insn = get_last_insn ();
3922 last_address = 0;
3924 /* hppa_expand_epilogue does the dirty work now. We just need
3925 to output the assembler directives which denote the end
3926 of a function.
3928 To make debuggers happy, emit a nop if the epilogue was completely
3929 eliminated due to a volatile call as the last insn in the
3930 current function. That way the return address (in %r2) will
3931 always point to a valid instruction in the current function. */
3933 /* Get the last real insn. */
3934 if (GET_CODE (insn) == NOTE)
3935 insn = prev_real_insn (insn);
3937 /* If it is a sequence, then look inside. */
3938 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3939 insn = XVECEXP (PATTERN (insn), 0, 0);
3941 /* If insn is a CALL_INSN, then it must be a call to a volatile
3942 function (otherwise there would be epilogue insns). */
3943 if (insn && GET_CODE (insn) == CALL_INSN)
3945 fputs ("\tnop\n", file);
3946 last_address += 4;
3949 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3951 if (TARGET_SOM && TARGET_GAS)
3953 /* We done with this subspace except possibly for some additional
3954 debug information. Forget that we are in this subspace to ensure
3955 that the next function is output in its own subspace. */
3956 in_section = NULL;
3957 cfun->machine->in_nsubspa = 2;
3960 if (INSN_ADDRESSES_SET_P ())
3962 insn = get_last_nonnote_insn ();
3963 last_address += INSN_ADDRESSES (INSN_UID (insn));
3964 if (INSN_P (insn))
3965 last_address += insn_default_length (insn);
3966 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3967 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3970 /* Finally, update the total number of code bytes output so far. */
3971 update_total_code_bytes (last_address);
3974 void
3975 hppa_expand_epilogue (void)
3977 rtx tmpreg;
3978 HOST_WIDE_INT offset;
3979 HOST_WIDE_INT ret_off = 0;
3980 int i;
3981 int merge_sp_adjust_with_load = 0;
3983 /* We will use this often. */
3984 tmpreg = gen_rtx_REG (word_mode, 1);
3986 /* Try to restore RP early to avoid load/use interlocks when
3987 RP gets used in the return (bv) instruction. This appears to still
3988 be necessary even when we schedule the prologue and epilogue. */
3989 if (regs_ever_live [2] || current_function_calls_eh_return)
3991 ret_off = TARGET_64BIT ? -16 : -20;
3992 if (frame_pointer_needed)
3994 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
3995 ret_off = 0;
3997 else
3999 /* No frame pointer, and stack is smaller than 8k. */
4000 if (VAL_14_BITS_P (ret_off - actual_fsize))
4002 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4003 ret_off = 0;
4008 /* General register restores. */
4009 if (frame_pointer_needed)
4011 offset = local_fsize;
4013 /* If the current function calls __builtin_eh_return, then we need
4014 to restore the saved EH data registers. */
4015 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4017 unsigned int i, regno;
4019 for (i = 0; ; ++i)
4021 regno = EH_RETURN_DATA_REGNO (i);
4022 if (regno == INVALID_REGNUM)
4023 break;
4025 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4026 offset += UNITS_PER_WORD;
4030 for (i = 18; i >= 4; i--)
4031 if (regs_ever_live[i] && ! call_used_regs[i])
4033 load_reg (i, offset, FRAME_POINTER_REGNUM);
4034 offset += UNITS_PER_WORD;
4037 else
4039 offset = local_fsize - actual_fsize;
4041 /* If the current function calls __builtin_eh_return, then we need
4042 to restore the saved EH data registers. */
4043 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4045 unsigned int i, regno;
4047 for (i = 0; ; ++i)
4049 regno = EH_RETURN_DATA_REGNO (i);
4050 if (regno == INVALID_REGNUM)
4051 break;
4053 /* Only for the first load.
4054 merge_sp_adjust_with_load holds the register load
4055 with which we will merge the sp adjustment. */
4056 if (merge_sp_adjust_with_load == 0
4057 && local_fsize == 0
4058 && VAL_14_BITS_P (-actual_fsize))
4059 merge_sp_adjust_with_load = regno;
4060 else
4061 load_reg (regno, offset, STACK_POINTER_REGNUM);
4062 offset += UNITS_PER_WORD;
4066 for (i = 18; i >= 3; i--)
4068 if (regs_ever_live[i] && ! call_used_regs[i])
4070 /* Only for the first load.
4071 merge_sp_adjust_with_load holds the register load
4072 with which we will merge the sp adjustment. */
4073 if (merge_sp_adjust_with_load == 0
4074 && local_fsize == 0
4075 && VAL_14_BITS_P (-actual_fsize))
4076 merge_sp_adjust_with_load = i;
4077 else
4078 load_reg (i, offset, STACK_POINTER_REGNUM);
4079 offset += UNITS_PER_WORD;
4084 /* Align pointer properly (doubleword boundary). */
4085 offset = (offset + 7) & ~7;
4087 /* FP register restores. */
4088 if (save_fregs)
4090 /* Adjust the register to index off of. */
4091 if (frame_pointer_needed)
4092 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4093 else
4094 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4096 /* Actually do the restores now. */
4097 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4098 if (regs_ever_live[i]
4099 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4101 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4102 rtx dest = gen_rtx_REG (DFmode, i);
4103 emit_move_insn (dest, src);
4107 /* Emit a blockage insn here to keep these insns from being moved to
4108 an earlier spot in the epilogue, or into the main instruction stream.
4110 This is necessary as we must not cut the stack back before all the
4111 restores are finished. */
4112 emit_insn (gen_blockage ());
4114 /* Reset stack pointer (and possibly frame pointer). The stack
4115 pointer is initially set to fp + 64 to avoid a race condition. */
4116 if (frame_pointer_needed)
4118 rtx delta = GEN_INT (-64);
4120 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4121 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4123 /* If we were deferring a callee register restore, do it now. */
4124 else if (merge_sp_adjust_with_load)
4126 rtx delta = GEN_INT (-actual_fsize);
4127 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4129 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4131 else if (actual_fsize != 0)
4132 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4133 - actual_fsize, 0);
4135 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4136 frame greater than 8k), do so now. */
4137 if (ret_off != 0)
4138 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4140 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4142 rtx sa = EH_RETURN_STACKADJ_RTX;
4144 emit_insn (gen_blockage ());
4145 emit_insn (TARGET_64BIT
4146 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4147 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4152 hppa_pic_save_rtx (void)
4154 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4157 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4158 #define NO_DEFERRED_PROFILE_COUNTERS 0
4159 #endif
4162 /* Vector of funcdef numbers. */
4163 static VEC(int,heap) *funcdef_nos;
4165 /* Output deferred profile counters. */
4166 static void
4167 output_deferred_profile_counters (void)
4169 unsigned int i;
4170 int align, n;
4172 if (VEC_empty (int, funcdef_nos))
4173 return;
4175 switch_to_section (data_section);
4176 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4177 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4179 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4181 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4182 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4185 VEC_free (int, heap, funcdef_nos);
4188 void
4189 hppa_profile_hook (int label_no)
4191 /* We use SImode for the address of the function in both 32 and
4192 64-bit code to avoid having to provide DImode versions of the
4193 lcla2 and load_offset_label_address insn patterns. */
4194 rtx reg = gen_reg_rtx (SImode);
4195 rtx label_rtx = gen_label_rtx ();
4196 rtx begin_label_rtx, call_insn;
4197 char begin_label_name[16];
4199 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4200 label_no);
4201 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4203 if (TARGET_64BIT)
4204 emit_move_insn (arg_pointer_rtx,
4205 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4206 GEN_INT (64)));
4208 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4210 /* The address of the function is loaded into %r25 with a instruction-
4211 relative sequence that avoids the use of relocations. The sequence
4212 is split so that the load_offset_label_address instruction can
4213 occupy the delay slot of the call to _mcount. */
4214 if (TARGET_PA_20)
4215 emit_insn (gen_lcla2 (reg, label_rtx));
4216 else
4217 emit_insn (gen_lcla1 (reg, label_rtx));
4219 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4220 reg, begin_label_rtx, label_rtx));
4222 #if !NO_DEFERRED_PROFILE_COUNTERS
4224 rtx count_label_rtx, addr, r24;
4225 char count_label_name[16];
4227 VEC_safe_push (int, heap, funcdef_nos, label_no);
4228 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4229 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4231 addr = force_reg (Pmode, count_label_rtx);
4232 r24 = gen_rtx_REG (Pmode, 24);
4233 emit_move_insn (r24, addr);
4235 call_insn =
4236 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4237 gen_rtx_SYMBOL_REF (Pmode,
4238 "_mcount")),
4239 GEN_INT (TARGET_64BIT ? 24 : 12)));
4241 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4243 #else
4245 call_insn =
4246 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4247 gen_rtx_SYMBOL_REF (Pmode,
4248 "_mcount")),
4249 GEN_INT (TARGET_64BIT ? 16 : 8)));
4251 #endif
4253 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4254 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4256 /* Indicate the _mcount call cannot throw, nor will it execute a
4257 non-local goto. */
4258 REG_NOTES (call_insn)
4259 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4262 /* Fetch the return address for the frame COUNT steps up from
4263 the current frame, after the prologue. FRAMEADDR is the
4264 frame pointer of the COUNT frame.
4266 We want to ignore any export stub remnants here. To handle this,
4267 we examine the code at the return address, and if it is an export
4268 stub, we return a memory rtx for the stub return address stored
4269 at frame-24.
4271 The value returned is used in two different ways:
4273 1. To find a function's caller.
4275 2. To change the return address for a function.
4277 This function handles most instances of case 1; however, it will
4278 fail if there are two levels of stubs to execute on the return
4279 path. The only way I believe that can happen is if the return value
4280 needs a parameter relocation, which never happens for C code.
4282 This function handles most instances of case 2; however, it will
4283 fail if we did not originally have stub code on the return path
4284 but will need stub code on the new return path. This can happen if
4285 the caller & callee are both in the main program, but the new
4286 return location is in a shared library. */
4289 return_addr_rtx (int count, rtx frameaddr)
4291 rtx label;
4292 rtx rp;
4293 rtx saved_rp;
4294 rtx ins;
4296 if (count != 0)
4297 return NULL_RTX;
4299 rp = get_hard_reg_initial_val (Pmode, 2);
4301 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4302 return rp;
4304 saved_rp = gen_reg_rtx (Pmode);
4305 emit_move_insn (saved_rp, rp);
4307 /* Get pointer to the instruction stream. We have to mask out the
4308 privilege level from the two low order bits of the return address
4309 pointer here so that ins will point to the start of the first
4310 instruction that would have been executed if we returned. */
4311 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4312 label = gen_label_rtx ();
4314 /* Check the instruction stream at the normal return address for the
4315 export stub:
4317 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4318 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4319 0x00011820 | stub+16: mtsp r1,sr0
4320 0xe0400002 | stub+20: be,n 0(sr0,rp)
4322 If it is an export stub, than our return address is really in
4323 -24[frameaddr]. */
4325 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4326 NULL_RTX, SImode, 1);
4327 emit_jump_insn (gen_bne (label));
4329 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4330 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4331 emit_jump_insn (gen_bne (label));
4333 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4334 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4335 emit_jump_insn (gen_bne (label));
4337 /* 0xe0400002 must be specified as -532676606 so that it won't be
4338 rejected as an invalid immediate operand on 64-bit hosts. */
4339 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4340 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4342 /* If there is no export stub then just use the value saved from
4343 the return pointer register. */
4345 emit_jump_insn (gen_bne (label));
4347 /* Here we know that our return address points to an export
4348 stub. We don't want to return the address of the export stub,
4349 but rather the return address of the export stub. That return
4350 address is stored at -24[frameaddr]. */
4352 emit_move_insn (saved_rp,
4353 gen_rtx_MEM (Pmode,
4354 memory_address (Pmode,
4355 plus_constant (frameaddr,
4356 -24))));
4358 emit_label (label);
4359 return saved_rp;
4362 /* This is only valid once reload has completed because it depends on
4363 knowing exactly how much (if any) frame there is and...
4365 It's only valid if there is no frame marker to de-allocate and...
4367 It's only valid if %r2 hasn't been saved into the caller's frame
4368 (we're not profiling and %r2 isn't live anywhere). */
4370 hppa_can_use_return_insn_p (void)
4372 return (reload_completed
4373 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4374 && ! regs_ever_live[2]
4375 && ! frame_pointer_needed);
4378 void
4379 emit_bcond_fp (enum rtx_code code, rtx operand0)
4381 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4382 gen_rtx_IF_THEN_ELSE (VOIDmode,
4383 gen_rtx_fmt_ee (code,
4384 VOIDmode,
4385 gen_rtx_REG (CCFPmode, 0),
4386 const0_rtx),
4387 gen_rtx_LABEL_REF (VOIDmode, operand0),
4388 pc_rtx)));
4393 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4395 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4396 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4399 /* Adjust the cost of a scheduling dependency. Return the new cost of
4400 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4402 static int
4403 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4405 enum attr_type attr_type;
4407 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4408 true dependencies as they are described with bypasses now. */
4409 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4410 return cost;
4412 if (! recog_memoized (insn))
4413 return 0;
4415 attr_type = get_attr_type (insn);
4417 switch (REG_NOTE_KIND (link))
4419 case REG_DEP_ANTI:
4420 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4421 cycles later. */
4423 if (attr_type == TYPE_FPLOAD)
4425 rtx pat = PATTERN (insn);
4426 rtx dep_pat = PATTERN (dep_insn);
4427 if (GET_CODE (pat) == PARALLEL)
4429 /* This happens for the fldXs,mb patterns. */
4430 pat = XVECEXP (pat, 0, 0);
4432 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4433 /* If this happens, we have to extend this to schedule
4434 optimally. Return 0 for now. */
4435 return 0;
4437 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4439 if (! recog_memoized (dep_insn))
4440 return 0;
4441 switch (get_attr_type (dep_insn))
4443 case TYPE_FPALU:
4444 case TYPE_FPMULSGL:
4445 case TYPE_FPMULDBL:
4446 case TYPE_FPDIVSGL:
4447 case TYPE_FPDIVDBL:
4448 case TYPE_FPSQRTSGL:
4449 case TYPE_FPSQRTDBL:
4450 /* A fpload can't be issued until one cycle before a
4451 preceding arithmetic operation has finished if
4452 the target of the fpload is any of the sources
4453 (or destination) of the arithmetic operation. */
4454 return insn_default_latency (dep_insn) - 1;
4456 default:
4457 return 0;
4461 else if (attr_type == TYPE_FPALU)
4463 rtx pat = PATTERN (insn);
4464 rtx dep_pat = PATTERN (dep_insn);
4465 if (GET_CODE (pat) == PARALLEL)
4467 /* This happens for the fldXs,mb patterns. */
4468 pat = XVECEXP (pat, 0, 0);
4470 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4471 /* If this happens, we have to extend this to schedule
4472 optimally. Return 0 for now. */
4473 return 0;
4475 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4477 if (! recog_memoized (dep_insn))
4478 return 0;
4479 switch (get_attr_type (dep_insn))
4481 case TYPE_FPDIVSGL:
4482 case TYPE_FPDIVDBL:
4483 case TYPE_FPSQRTSGL:
4484 case TYPE_FPSQRTDBL:
4485 /* An ALU flop can't be issued until two cycles before a
4486 preceding divide or sqrt operation has finished if
4487 the target of the ALU flop is any of the sources
4488 (or destination) of the divide or sqrt operation. */
4489 return insn_default_latency (dep_insn) - 2;
4491 default:
4492 return 0;
4497 /* For other anti dependencies, the cost is 0. */
4498 return 0;
4500 case REG_DEP_OUTPUT:
4501 /* Output dependency; DEP_INSN writes a register that INSN writes some
4502 cycles later. */
4503 if (attr_type == TYPE_FPLOAD)
4505 rtx pat = PATTERN (insn);
4506 rtx dep_pat = PATTERN (dep_insn);
4507 if (GET_CODE (pat) == PARALLEL)
4509 /* This happens for the fldXs,mb patterns. */
4510 pat = XVECEXP (pat, 0, 0);
4512 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4513 /* If this happens, we have to extend this to schedule
4514 optimally. Return 0 for now. */
4515 return 0;
4517 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4519 if (! recog_memoized (dep_insn))
4520 return 0;
4521 switch (get_attr_type (dep_insn))
4523 case TYPE_FPALU:
4524 case TYPE_FPMULSGL:
4525 case TYPE_FPMULDBL:
4526 case TYPE_FPDIVSGL:
4527 case TYPE_FPDIVDBL:
4528 case TYPE_FPSQRTSGL:
4529 case TYPE_FPSQRTDBL:
4530 /* A fpload can't be issued until one cycle before a
4531 preceding arithmetic operation has finished if
4532 the target of the fpload is the destination of the
4533 arithmetic operation.
4535 Exception: For PA7100LC, PA7200 and PA7300, the cost
4536 is 3 cycles, unless they bundle together. We also
4537 pay the penalty if the second insn is a fpload. */
4538 return insn_default_latency (dep_insn) - 1;
4540 default:
4541 return 0;
4545 else if (attr_type == TYPE_FPALU)
4547 rtx pat = PATTERN (insn);
4548 rtx dep_pat = PATTERN (dep_insn);
4549 if (GET_CODE (pat) == PARALLEL)
4551 /* This happens for the fldXs,mb patterns. */
4552 pat = XVECEXP (pat, 0, 0);
4554 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4555 /* If this happens, we have to extend this to schedule
4556 optimally. Return 0 for now. */
4557 return 0;
4559 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4561 if (! recog_memoized (dep_insn))
4562 return 0;
4563 switch (get_attr_type (dep_insn))
4565 case TYPE_FPDIVSGL:
4566 case TYPE_FPDIVDBL:
4567 case TYPE_FPSQRTSGL:
4568 case TYPE_FPSQRTDBL:
4569 /* An ALU flop can't be issued until two cycles before a
4570 preceding divide or sqrt operation has finished if
4571 the target of the ALU flop is also the target of
4572 the divide or sqrt operation. */
4573 return insn_default_latency (dep_insn) - 2;
4575 default:
4576 return 0;
4581 /* For other output dependencies, the cost is 0. */
4582 return 0;
4584 default:
4585 gcc_unreachable ();
4589 /* Adjust scheduling priorities. We use this to try and keep addil
4590 and the next use of %r1 close together. */
4591 static int
4592 pa_adjust_priority (rtx insn, int priority)
4594 rtx set = single_set (insn);
4595 rtx src, dest;
4596 if (set)
4598 src = SET_SRC (set);
4599 dest = SET_DEST (set);
4600 if (GET_CODE (src) == LO_SUM
4601 && symbolic_operand (XEXP (src, 1), VOIDmode)
4602 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4603 priority >>= 3;
4605 else if (GET_CODE (src) == MEM
4606 && GET_CODE (XEXP (src, 0)) == LO_SUM
4607 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4608 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4609 priority >>= 1;
4611 else if (GET_CODE (dest) == MEM
4612 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4613 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4614 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4615 priority >>= 3;
4617 return priority;
4620 /* The 700 can only issue a single insn at a time.
4621 The 7XXX processors can issue two insns at a time.
4622 The 8000 can issue 4 insns at a time. */
4623 static int
4624 pa_issue_rate (void)
4626 switch (pa_cpu)
4628 case PROCESSOR_700: return 1;
4629 case PROCESSOR_7100: return 2;
4630 case PROCESSOR_7100LC: return 2;
4631 case PROCESSOR_7200: return 2;
4632 case PROCESSOR_7300: return 2;
4633 case PROCESSOR_8000: return 4;
4635 default:
4636 gcc_unreachable ();
4642 /* Return any length adjustment needed by INSN which already has its length
4643 computed as LENGTH. Return zero if no adjustment is necessary.
4645 For the PA: function calls, millicode calls, and backwards short
4646 conditional branches with unfilled delay slots need an adjustment by +1
4647 (to account for the NOP which will be inserted into the instruction stream).
4649 Also compute the length of an inline block move here as it is too
4650 complicated to express as a length attribute in pa.md. */
4652 pa_adjust_insn_length (rtx insn, int length)
4654 rtx pat = PATTERN (insn);
4656 /* Jumps inside switch tables which have unfilled delay slots need
4657 adjustment. */
4658 if (GET_CODE (insn) == JUMP_INSN
4659 && GET_CODE (pat) == PARALLEL
4660 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4661 return 4;
4662 /* Millicode insn with an unfilled delay slot. */
4663 else if (GET_CODE (insn) == INSN
4664 && GET_CODE (pat) != SEQUENCE
4665 && GET_CODE (pat) != USE
4666 && GET_CODE (pat) != CLOBBER
4667 && get_attr_type (insn) == TYPE_MILLI)
4668 return 4;
4669 /* Block move pattern. */
4670 else if (GET_CODE (insn) == INSN
4671 && GET_CODE (pat) == PARALLEL
4672 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4673 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4674 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4675 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4676 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4677 return compute_movmem_length (insn) - 4;
4678 /* Block clear pattern. */
4679 else if (GET_CODE (insn) == INSN
4680 && GET_CODE (pat) == PARALLEL
4681 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4682 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4683 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4684 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4685 return compute_clrmem_length (insn) - 4;
4686 /* Conditional branch with an unfilled delay slot. */
4687 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4689 /* Adjust a short backwards conditional with an unfilled delay slot. */
4690 if (GET_CODE (pat) == SET
4691 && length == 4
4692 && ! forward_branch_p (insn))
4693 return 4;
4694 else if (GET_CODE (pat) == PARALLEL
4695 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4696 && length == 4)
4697 return 4;
4698 /* Adjust dbra insn with short backwards conditional branch with
4699 unfilled delay slot -- only for case where counter is in a
4700 general register register. */
4701 else if (GET_CODE (pat) == PARALLEL
4702 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4703 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4704 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4705 && length == 4
4706 && ! forward_branch_p (insn))
4707 return 4;
4708 else
4709 return 0;
4711 return 0;
4714 /* Print operand X (an rtx) in assembler syntax to file FILE.
4715 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4716 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4718 void
4719 print_operand (FILE *file, rtx x, int code)
4721 switch (code)
4723 case '#':
4724 /* Output a 'nop' if there's nothing for the delay slot. */
4725 if (dbr_sequence_length () == 0)
4726 fputs ("\n\tnop", file);
4727 return;
4728 case '*':
4729 /* Output a nullification completer if there's nothing for the */
4730 /* delay slot or nullification is requested. */
4731 if (dbr_sequence_length () == 0 ||
4732 (final_sequence &&
4733 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4734 fputs (",n", file);
4735 return;
4736 case 'R':
4737 /* Print out the second register name of a register pair.
4738 I.e., R (6) => 7. */
4739 fputs (reg_names[REGNO (x) + 1], file);
4740 return;
4741 case 'r':
4742 /* A register or zero. */
4743 if (x == const0_rtx
4744 || (x == CONST0_RTX (DFmode))
4745 || (x == CONST0_RTX (SFmode)))
4747 fputs ("%r0", file);
4748 return;
4750 else
4751 break;
4752 case 'f':
4753 /* A register or zero (floating point). */
4754 if (x == const0_rtx
4755 || (x == CONST0_RTX (DFmode))
4756 || (x == CONST0_RTX (SFmode)))
4758 fputs ("%fr0", file);
4759 return;
4761 else
4762 break;
4763 case 'A':
4765 rtx xoperands[2];
4767 xoperands[0] = XEXP (XEXP (x, 0), 0);
4768 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4769 output_global_address (file, xoperands[1], 0);
4770 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4771 return;
4774 case 'C': /* Plain (C)ondition */
4775 case 'X':
4776 switch (GET_CODE (x))
4778 case EQ:
4779 fputs ("=", file); break;
4780 case NE:
4781 fputs ("<>", file); break;
4782 case GT:
4783 fputs (">", file); break;
4784 case GE:
4785 fputs (">=", file); break;
4786 case GEU:
4787 fputs (">>=", file); break;
4788 case GTU:
4789 fputs (">>", file); break;
4790 case LT:
4791 fputs ("<", file); break;
4792 case LE:
4793 fputs ("<=", file); break;
4794 case LEU:
4795 fputs ("<<=", file); break;
4796 case LTU:
4797 fputs ("<<", file); break;
4798 default:
4799 gcc_unreachable ();
4801 return;
4802 case 'N': /* Condition, (N)egated */
4803 switch (GET_CODE (x))
4805 case EQ:
4806 fputs ("<>", file); break;
4807 case NE:
4808 fputs ("=", file); break;
4809 case GT:
4810 fputs ("<=", file); break;
4811 case GE:
4812 fputs ("<", file); break;
4813 case GEU:
4814 fputs ("<<", file); break;
4815 case GTU:
4816 fputs ("<<=", file); break;
4817 case LT:
4818 fputs (">=", file); break;
4819 case LE:
4820 fputs (">", file); break;
4821 case LEU:
4822 fputs (">>", file); break;
4823 case LTU:
4824 fputs (">>=", file); break;
4825 default:
4826 gcc_unreachable ();
4828 return;
4829 /* For floating point comparisons. Note that the output
4830 predicates are the complement of the desired mode. The
4831 conditions for GT, GE, LT, LE and LTGT cause an invalid
4832 operation exception if the result is unordered and this
4833 exception is enabled in the floating-point status register. */
4834 case 'Y':
4835 switch (GET_CODE (x))
4837 case EQ:
4838 fputs ("!=", file); break;
4839 case NE:
4840 fputs ("=", file); break;
4841 case GT:
4842 fputs ("!>", file); break;
4843 case GE:
4844 fputs ("!>=", file); break;
4845 case LT:
4846 fputs ("!<", file); break;
4847 case LE:
4848 fputs ("!<=", file); break;
4849 case LTGT:
4850 fputs ("!<>", file); break;
4851 case UNLE:
4852 fputs ("!?<=", file); break;
4853 case UNLT:
4854 fputs ("!?<", file); break;
4855 case UNGE:
4856 fputs ("!?>=", file); break;
4857 case UNGT:
4858 fputs ("!?>", file); break;
4859 case UNEQ:
4860 fputs ("!?=", file); break;
4861 case UNORDERED:
4862 fputs ("!?", file); break;
4863 case ORDERED:
4864 fputs ("?", file); break;
4865 default:
4866 gcc_unreachable ();
4868 return;
4869 case 'S': /* Condition, operands are (S)wapped. */
4870 switch (GET_CODE (x))
4872 case EQ:
4873 fputs ("=", file); break;
4874 case NE:
4875 fputs ("<>", file); break;
4876 case GT:
4877 fputs ("<", file); break;
4878 case GE:
4879 fputs ("<=", file); break;
4880 case GEU:
4881 fputs ("<<=", file); break;
4882 case GTU:
4883 fputs ("<<", file); break;
4884 case LT:
4885 fputs (">", file); break;
4886 case LE:
4887 fputs (">=", file); break;
4888 case LEU:
4889 fputs (">>=", file); break;
4890 case LTU:
4891 fputs (">>", file); break;
4892 default:
4893 gcc_unreachable ();
4895 return;
4896 case 'B': /* Condition, (B)oth swapped and negate. */
4897 switch (GET_CODE (x))
4899 case EQ:
4900 fputs ("<>", file); break;
4901 case NE:
4902 fputs ("=", file); break;
4903 case GT:
4904 fputs (">=", file); break;
4905 case GE:
4906 fputs (">", file); break;
4907 case GEU:
4908 fputs (">>", file); break;
4909 case GTU:
4910 fputs (">>=", file); break;
4911 case LT:
4912 fputs ("<=", file); break;
4913 case LE:
4914 fputs ("<", file); break;
4915 case LEU:
4916 fputs ("<<", file); break;
4917 case LTU:
4918 fputs ("<<=", file); break;
4919 default:
4920 gcc_unreachable ();
4922 return;
4923 case 'k':
4924 gcc_assert (GET_CODE (x) == CONST_INT);
4925 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4926 return;
4927 case 'Q':
4928 gcc_assert (GET_CODE (x) == CONST_INT);
4929 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4930 return;
4931 case 'L':
4932 gcc_assert (GET_CODE (x) == CONST_INT);
4933 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4934 return;
4935 case 'O':
4936 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4937 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4938 return;
4939 case 'p':
4940 gcc_assert (GET_CODE (x) == CONST_INT);
4941 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4942 return;
4943 case 'P':
4944 gcc_assert (GET_CODE (x) == CONST_INT);
4945 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4946 return;
4947 case 'I':
4948 if (GET_CODE (x) == CONST_INT)
4949 fputs ("i", file);
4950 return;
4951 case 'M':
4952 case 'F':
4953 switch (GET_CODE (XEXP (x, 0)))
4955 case PRE_DEC:
4956 case PRE_INC:
4957 if (ASSEMBLER_DIALECT == 0)
4958 fputs ("s,mb", file);
4959 else
4960 fputs (",mb", file);
4961 break;
4962 case POST_DEC:
4963 case POST_INC:
4964 if (ASSEMBLER_DIALECT == 0)
4965 fputs ("s,ma", file);
4966 else
4967 fputs (",ma", file);
4968 break;
4969 case PLUS:
4970 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4971 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4973 if (ASSEMBLER_DIALECT == 0)
4974 fputs ("x", file);
4976 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4977 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4979 if (ASSEMBLER_DIALECT == 0)
4980 fputs ("x,s", file);
4981 else
4982 fputs (",s", file);
4984 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4985 fputs ("s", file);
4986 break;
4987 default:
4988 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4989 fputs ("s", file);
4990 break;
4992 return;
4993 case 'G':
4994 output_global_address (file, x, 0);
4995 return;
4996 case 'H':
4997 output_global_address (file, x, 1);
4998 return;
4999 case 0: /* Don't do anything special */
5000 break;
5001 case 'Z':
5003 unsigned op[3];
5004 compute_zdepwi_operands (INTVAL (x), op);
5005 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5006 return;
5008 case 'z':
5010 unsigned op[3];
5011 compute_zdepdi_operands (INTVAL (x), op);
5012 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5013 return;
5015 case 'c':
5016 /* We can get here from a .vtable_inherit due to our
5017 CONSTANT_ADDRESS_P rejecting perfectly good constant
5018 addresses. */
5019 break;
5020 default:
5021 gcc_unreachable ();
5023 if (GET_CODE (x) == REG)
5025 fputs (reg_names [REGNO (x)], file);
5026 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5028 fputs ("R", file);
5029 return;
5031 if (FP_REG_P (x)
5032 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5033 && (REGNO (x) & 1) == 0)
5034 fputs ("L", file);
5036 else if (GET_CODE (x) == MEM)
5038 int size = GET_MODE_SIZE (GET_MODE (x));
5039 rtx base = NULL_RTX;
5040 switch (GET_CODE (XEXP (x, 0)))
5042 case PRE_DEC:
5043 case POST_DEC:
5044 base = XEXP (XEXP (x, 0), 0);
5045 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5046 break;
5047 case PRE_INC:
5048 case POST_INC:
5049 base = XEXP (XEXP (x, 0), 0);
5050 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5051 break;
5052 case PLUS:
5053 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5054 fprintf (file, "%s(%s)",
5055 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5056 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5057 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5058 fprintf (file, "%s(%s)",
5059 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5060 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5061 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5062 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5064 /* Because the REG_POINTER flag can get lost during reload,
5065 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5066 index and base registers in the combined move patterns. */
5067 rtx base = XEXP (XEXP (x, 0), 1);
5068 rtx index = XEXP (XEXP (x, 0), 0);
5070 fprintf (file, "%s(%s)",
5071 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5073 else
5074 output_address (XEXP (x, 0));
5075 break;
5076 default:
5077 output_address (XEXP (x, 0));
5078 break;
5081 else
5082 output_addr_const (file, x);
5085 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5087 void
5088 output_global_address (FILE *file, rtx x, int round_constant)
5091 /* Imagine (high (const (plus ...))). */
5092 if (GET_CODE (x) == HIGH)
5093 x = XEXP (x, 0);
5095 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5096 output_addr_const (file, x);
5097 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5099 output_addr_const (file, x);
5100 fputs ("-$global$", file);
5102 else if (GET_CODE (x) == CONST)
5104 const char *sep = "";
5105 int offset = 0; /* assembler wants -$global$ at end */
5106 rtx base = NULL_RTX;
5108 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5110 case SYMBOL_REF:
5111 base = XEXP (XEXP (x, 0), 0);
5112 output_addr_const (file, base);
5113 break;
5114 case CONST_INT:
5115 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5116 break;
5117 default:
5118 gcc_unreachable ();
5121 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5123 case SYMBOL_REF:
5124 base = XEXP (XEXP (x, 0), 1);
5125 output_addr_const (file, base);
5126 break;
5127 case CONST_INT:
5128 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5129 break;
5130 default:
5131 gcc_unreachable ();
5134 /* How bogus. The compiler is apparently responsible for
5135 rounding the constant if it uses an LR field selector.
5137 The linker and/or assembler seem a better place since
5138 they have to do this kind of thing already.
5140 If we fail to do this, HP's optimizing linker may eliminate
5141 an addil, but not update the ldw/stw/ldo instruction that
5142 uses the result of the addil. */
5143 if (round_constant)
5144 offset = ((offset + 0x1000) & ~0x1fff);
5146 switch (GET_CODE (XEXP (x, 0)))
5148 case PLUS:
5149 if (offset < 0)
5151 offset = -offset;
5152 sep = "-";
5154 else
5155 sep = "+";
5156 break;
5158 case MINUS:
5159 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5160 sep = "-";
5161 break;
5163 default:
5164 gcc_unreachable ();
5167 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5168 fputs ("-$global$", file);
5169 if (offset)
5170 fprintf (file, "%s%d", sep, offset);
5172 else
5173 output_addr_const (file, x);
5176 /* Output boilerplate text to appear at the beginning of the file.
5177 There are several possible versions. */
5178 #define aputs(x) fputs(x, asm_out_file)
5179 static inline void
5180 pa_file_start_level (void)
5182 if (TARGET_64BIT)
5183 aputs ("\t.LEVEL 2.0w\n");
5184 else if (TARGET_PA_20)
5185 aputs ("\t.LEVEL 2.0\n");
5186 else if (TARGET_PA_11)
5187 aputs ("\t.LEVEL 1.1\n");
5188 else
5189 aputs ("\t.LEVEL 1.0\n");
5192 static inline void
5193 pa_file_start_space (int sortspace)
5195 aputs ("\t.SPACE $PRIVATE$");
5196 if (sortspace)
5197 aputs (",SORT=16");
5198 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5199 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5200 "\n\t.SPACE $TEXT$");
5201 if (sortspace)
5202 aputs (",SORT=8");
5203 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5204 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5207 static inline void
5208 pa_file_start_file (int want_version)
5210 if (write_symbols != NO_DEBUG)
5212 output_file_directive (asm_out_file, main_input_filename);
5213 if (want_version)
5214 aputs ("\t.version\t\"01.01\"\n");
5218 static inline void
5219 pa_file_start_mcount (const char *aswhat)
5221 if (profile_flag)
5222 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5225 static void
5226 pa_elf_file_start (void)
5228 pa_file_start_level ();
5229 pa_file_start_mcount ("ENTRY");
5230 pa_file_start_file (0);
5233 static void
5234 pa_som_file_start (void)
5236 pa_file_start_level ();
5237 pa_file_start_space (0);
5238 aputs ("\t.IMPORT $global$,DATA\n"
5239 "\t.IMPORT $$dyncall,MILLICODE\n");
5240 pa_file_start_mcount ("CODE");
5241 pa_file_start_file (0);
5244 static void
5245 pa_linux_file_start (void)
5247 pa_file_start_file (1);
5248 pa_file_start_level ();
5249 pa_file_start_mcount ("CODE");
5252 static void
5253 pa_hpux64_gas_file_start (void)
5255 pa_file_start_level ();
5256 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5257 if (profile_flag)
5258 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5259 #endif
5260 pa_file_start_file (1);
5263 static void
5264 pa_hpux64_hpas_file_start (void)
5266 pa_file_start_level ();
5267 pa_file_start_space (1);
5268 pa_file_start_mcount ("CODE");
5269 pa_file_start_file (0);
5271 #undef aputs
5273 /* Search the deferred plabel list for SYMBOL and return its internal
5274 label. If an entry for SYMBOL is not found, a new entry is created. */
5277 get_deferred_plabel (rtx symbol)
5279 const char *fname = XSTR (symbol, 0);
5280 size_t i;
5282 /* See if we have already put this function on the list of deferred
5283 plabels. This list is generally small, so a liner search is not
5284 too ugly. If it proves too slow replace it with something faster. */
5285 for (i = 0; i < n_deferred_plabels; i++)
5286 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5287 break;
5289 /* If the deferred plabel list is empty, or this entry was not found
5290 on the list, create a new entry on the list. */
5291 if (deferred_plabels == NULL || i == n_deferred_plabels)
5293 tree id;
5295 if (deferred_plabels == 0)
5296 deferred_plabels = (struct deferred_plabel *)
5297 ggc_alloc (sizeof (struct deferred_plabel));
5298 else
5299 deferred_plabels = (struct deferred_plabel *)
5300 ggc_realloc (deferred_plabels,
5301 ((n_deferred_plabels + 1)
5302 * sizeof (struct deferred_plabel)));
5304 i = n_deferred_plabels++;
5305 deferred_plabels[i].internal_label = gen_label_rtx ();
5306 deferred_plabels[i].symbol = symbol;
5308 /* Gross. We have just implicitly taken the address of this
5309 function. Mark it in the same manner as assemble_name. */
5310 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5311 if (id)
5312 mark_referenced (id);
5315 return deferred_plabels[i].internal_label;
5318 static void
5319 output_deferred_plabels (void)
5321 size_t i;
5323 /* If we have some deferred plabels, then we need to switch into the
5324 data or readonly data section, and align it to a 4 byte boundary
5325 before outputting the deferred plabels. */
5326 if (n_deferred_plabels)
5328 switch_to_section (flag_pic ? data_section : readonly_data_section);
5329 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5332 /* Now output the deferred plabels. */
5333 for (i = 0; i < n_deferred_plabels; i++)
5335 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5336 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5337 assemble_integer (deferred_plabels[i].symbol,
5338 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5342 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5343 /* Initialize optabs to point to HPUX long double emulation routines. */
5344 static void
5345 pa_hpux_init_libfuncs (void)
5347 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5348 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5349 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5350 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5351 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5352 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5353 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5354 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5355 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5357 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5358 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5359 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5360 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5361 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5362 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5363 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5365 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5366 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5367 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5368 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5370 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5371 ? "__U_Qfcnvfxt_quad_to_sgl"
5372 : "_U_Qfcnvfxt_quad_to_sgl");
5373 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5374 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5375 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5377 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5378 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5379 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5380 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5382 #endif
5384 /* HP's millicode routines mean something special to the assembler.
5385 Keep track of which ones we have used. */
5387 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5388 static void import_milli (enum millicodes);
5389 static char imported[(int) end1000];
5390 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5391 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5392 #define MILLI_START 10
5394 static void
5395 import_milli (enum millicodes code)
5397 char str[sizeof (import_string)];
5399 if (!imported[(int) code])
5401 imported[(int) code] = 1;
5402 strcpy (str, import_string);
5403 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5404 output_asm_insn (str, 0);
5408 /* The register constraints have put the operands and return value in
5409 the proper registers. */
5411 const char *
5412 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5414 import_milli (mulI);
5415 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5418 /* Emit the rtl for doing a division by a constant. */
5420 /* Do magic division millicodes exist for this value? */
5421 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5423 /* We'll use an array to keep track of the magic millicodes and
5424 whether or not we've used them already. [n][0] is signed, [n][1] is
5425 unsigned. */
5427 static int div_milli[16][2];
5430 emit_hpdiv_const (rtx *operands, int unsignedp)
5432 if (GET_CODE (operands[2]) == CONST_INT
5433 && INTVAL (operands[2]) > 0
5434 && INTVAL (operands[2]) < 16
5435 && magic_milli[INTVAL (operands[2])])
5437 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5439 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5440 emit
5441 (gen_rtx_PARALLEL
5442 (VOIDmode,
5443 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5444 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5445 SImode,
5446 gen_rtx_REG (SImode, 26),
5447 operands[2])),
5448 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5449 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5450 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5451 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5452 gen_rtx_CLOBBER (VOIDmode, ret))));
5453 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5454 return 1;
5456 return 0;
5459 const char *
5460 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5462 int divisor;
5464 /* If the divisor is a constant, try to use one of the special
5465 opcodes .*/
5466 if (GET_CODE (operands[0]) == CONST_INT)
5468 static char buf[100];
5469 divisor = INTVAL (operands[0]);
5470 if (!div_milli[divisor][unsignedp])
5472 div_milli[divisor][unsignedp] = 1;
5473 if (unsignedp)
5474 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5475 else
5476 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5478 if (unsignedp)
5480 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5481 INTVAL (operands[0]));
5482 return output_millicode_call (insn,
5483 gen_rtx_SYMBOL_REF (SImode, buf));
5485 else
5487 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5488 INTVAL (operands[0]));
5489 return output_millicode_call (insn,
5490 gen_rtx_SYMBOL_REF (SImode, buf));
5493 /* Divisor isn't a special constant. */
5494 else
5496 if (unsignedp)
5498 import_milli (divU);
5499 return output_millicode_call (insn,
5500 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5502 else
5504 import_milli (divI);
5505 return output_millicode_call (insn,
5506 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5511 /* Output a $$rem millicode to do mod. */
5513 const char *
5514 output_mod_insn (int unsignedp, rtx insn)
5516 if (unsignedp)
5518 import_milli (remU);
5519 return output_millicode_call (insn,
5520 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5522 else
5524 import_milli (remI);
5525 return output_millicode_call (insn,
5526 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5530 void
5531 output_arg_descriptor (rtx call_insn)
5533 const char *arg_regs[4];
5534 enum machine_mode arg_mode;
5535 rtx link;
5536 int i, output_flag = 0;
5537 int regno;
5539 /* We neither need nor want argument location descriptors for the
5540 64bit runtime environment or the ELF32 environment. */
5541 if (TARGET_64BIT || TARGET_ELF32)
5542 return;
5544 for (i = 0; i < 4; i++)
5545 arg_regs[i] = 0;
5547 /* Specify explicitly that no argument relocations should take place
5548 if using the portable runtime calling conventions. */
5549 if (TARGET_PORTABLE_RUNTIME)
5551 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5552 asm_out_file);
5553 return;
5556 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5557 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5558 link; link = XEXP (link, 1))
5560 rtx use = XEXP (link, 0);
5562 if (! (GET_CODE (use) == USE
5563 && GET_CODE (XEXP (use, 0)) == REG
5564 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5565 continue;
5567 arg_mode = GET_MODE (XEXP (use, 0));
5568 regno = REGNO (XEXP (use, 0));
5569 if (regno >= 23 && regno <= 26)
5571 arg_regs[26 - regno] = "GR";
5572 if (arg_mode == DImode)
5573 arg_regs[25 - regno] = "GR";
5575 else if (regno >= 32 && regno <= 39)
5577 if (arg_mode == SFmode)
5578 arg_regs[(regno - 32) / 2] = "FR";
5579 else
5581 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5582 arg_regs[(regno - 34) / 2] = "FR";
5583 arg_regs[(regno - 34) / 2 + 1] = "FU";
5584 #else
5585 arg_regs[(regno - 34) / 2] = "FU";
5586 arg_regs[(regno - 34) / 2 + 1] = "FR";
5587 #endif
5591 fputs ("\t.CALL ", asm_out_file);
5592 for (i = 0; i < 4; i++)
5594 if (arg_regs[i])
5596 if (output_flag++)
5597 fputc (',', asm_out_file);
5598 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5601 fputc ('\n', asm_out_file);
5604 static enum reg_class
5605 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5606 enum machine_mode mode, secondary_reload_info *sri)
5608 int is_symbolic, regno;
5610 /* Handle the easy stuff first. */
5611 if (class == R1_REGS)
5612 return NO_REGS;
5614 if (REG_P (x))
5616 regno = REGNO (x);
5617 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5618 return NO_REGS;
5620 else
5621 regno = -1;
5623 /* If we have something like (mem (mem (...)), we can safely assume the
5624 inner MEM will end up in a general register after reloading, so there's
5625 no need for a secondary reload. */
5626 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5627 return NO_REGS;
5629 /* Trying to load a constant into a FP register during PIC code
5630 generation requires %r1 as a scratch register. */
5631 if (flag_pic
5632 && (mode == SImode || mode == DImode)
5633 && FP_REG_CLASS_P (class)
5634 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5636 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5637 : CODE_FOR_reload_indi_r1);
5638 return NO_REGS;
5641 /* Profiling showed the PA port spends about 1.3% of its compilation
5642 time in true_regnum from calls inside pa_secondary_reload_class. */
5643 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5644 regno = true_regnum (x);
5646 /* Handle out of range displacement for integer mode loads/stores of
5647 FP registers. */
5648 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5649 && GET_MODE_CLASS (mode) == MODE_INT
5650 && FP_REG_CLASS_P (class))
5651 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5653 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5654 return NO_REGS;
5657 /* A SAR<->FP register copy requires a secondary register (GPR) as
5658 well as secondary memory. */
5659 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5660 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5661 || (class == SHIFT_REGS
5662 && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5664 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5665 return NO_REGS;
5668 /* Secondary reloads of symbolic operands require %r1 as a scratch
5669 register when we're generating PIC code and the operand isn't
5670 readonly. */
5671 if (GET_CODE (x) == HIGH)
5672 x = XEXP (x, 0);
5674 /* Profiling has showed GCC spends about 2.6% of its compilation
5675 time in symbolic_operand from calls inside pa_secondary_reload_class.
5676 So, we use an inline copy to avoid useless work. */
5677 switch (GET_CODE (x))
5679 rtx op;
5681 case SYMBOL_REF:
5682 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5683 break;
5684 case LABEL_REF:
5685 is_symbolic = 1;
5686 break;
5687 case CONST:
5688 op = XEXP (x, 0);
5689 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5690 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5691 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5692 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5693 break;
5694 default:
5695 is_symbolic = 0;
5696 break;
5699 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5701 gcc_assert (mode == SImode || mode == DImode);
5702 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5703 : CODE_FOR_reload_indi_r1);
5706 return NO_REGS;
5709 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5710 by invisible reference. As a GCC extension, we also pass anything
5711 with a zero or variable size by reference.
5713 The 64-bit runtime does not describe passing any types by invisible
5714 reference. The internals of GCC can't currently handle passing
5715 empty structures, and zero or variable length arrays when they are
5716 not passed entirely on the stack or by reference. Thus, as a GCC
5717 extension, we pass these types by reference. The HP compiler doesn't
5718 support these types, so hopefully there shouldn't be any compatibility
5719 issues. This may have to be revisited when HP releases a C99 compiler
5720 or updates the ABI. */
5722 static bool
5723 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5724 enum machine_mode mode, tree type,
5725 bool named ATTRIBUTE_UNUSED)
5727 HOST_WIDE_INT size;
5729 if (type)
5730 size = int_size_in_bytes (type);
5731 else
5732 size = GET_MODE_SIZE (mode);
5734 if (TARGET_64BIT)
5735 return size <= 0;
5736 else
5737 return size <= 0 || size > 8;
5740 enum direction
5741 function_arg_padding (enum machine_mode mode, tree type)
5743 if (mode == BLKmode
5744 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5746 /* Return none if justification is not required. */
5747 if (type
5748 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5749 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5750 return none;
5752 /* The directions set here are ignored when a BLKmode argument larger
5753 than a word is placed in a register. Different code is used for
5754 the stack and registers. This makes it difficult to have a
5755 consistent data representation for both the stack and registers.
5756 For both runtimes, the justification and padding for arguments on
5757 the stack and in registers should be identical. */
5758 if (TARGET_64BIT)
5759 /* The 64-bit runtime specifies left justification for aggregates. */
5760 return upward;
5761 else
5762 /* The 32-bit runtime architecture specifies right justification.
5763 When the argument is passed on the stack, the argument is padded
5764 with garbage on the left. The HP compiler pads with zeros. */
5765 return downward;
5768 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5769 return downward;
5770 else
5771 return none;
5775 /* Do what is necessary for `va_start'. We look at the current function
5776 to determine if stdargs or varargs is used and fill in an initial
5777 va_list. A pointer to this constructor is returned. */
5779 static rtx
5780 hppa_builtin_saveregs (void)
5782 rtx offset, dest;
5783 tree fntype = TREE_TYPE (current_function_decl);
5784 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5785 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5786 != void_type_node)))
5787 ? UNITS_PER_WORD : 0);
5789 if (argadj)
5790 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5791 else
5792 offset = current_function_arg_offset_rtx;
5794 if (TARGET_64BIT)
5796 int i, off;
5798 /* Adjust for varargs/stdarg differences. */
5799 if (argadj)
5800 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5801 else
5802 offset = current_function_arg_offset_rtx;
5804 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5805 from the incoming arg pointer and growing to larger addresses. */
5806 for (i = 26, off = -64; i >= 19; i--, off += 8)
5807 emit_move_insn (gen_rtx_MEM (word_mode,
5808 plus_constant (arg_pointer_rtx, off)),
5809 gen_rtx_REG (word_mode, i));
5811 /* The incoming args pointer points just beyond the flushback area;
5812 normally this is not a serious concern. However, when we are doing
5813 varargs/stdargs we want to make the arg pointer point to the start
5814 of the incoming argument area. */
5815 emit_move_insn (virtual_incoming_args_rtx,
5816 plus_constant (arg_pointer_rtx, -64));
5818 /* Now return a pointer to the first anonymous argument. */
5819 return copy_to_reg (expand_binop (Pmode, add_optab,
5820 virtual_incoming_args_rtx,
5821 offset, 0, 0, OPTAB_LIB_WIDEN));
5824 /* Store general registers on the stack. */
5825 dest = gen_rtx_MEM (BLKmode,
5826 plus_constant (current_function_internal_arg_pointer,
5827 -16));
5828 set_mem_alias_set (dest, get_varargs_alias_set ());
5829 set_mem_align (dest, BITS_PER_WORD);
5830 move_block_from_reg (23, dest, 4);
5832 /* move_block_from_reg will emit code to store the argument registers
5833 individually as scalar stores.
5835 However, other insns may later load from the same addresses for
5836 a structure load (passing a struct to a varargs routine).
5838 The alias code assumes that such aliasing can never happen, so we
5839 have to keep memory referencing insns from moving up beyond the
5840 last argument register store. So we emit a blockage insn here. */
5841 emit_insn (gen_blockage ());
5843 return copy_to_reg (expand_binop (Pmode, add_optab,
5844 current_function_internal_arg_pointer,
5845 offset, 0, 0, OPTAB_LIB_WIDEN));
5848 void
5849 hppa_va_start (tree valist, rtx nextarg)
5851 nextarg = expand_builtin_saveregs ();
5852 std_expand_builtin_va_start (valist, nextarg);
5855 static tree
5856 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5858 if (TARGET_64BIT)
5860 /* Args grow upward. We can use the generic routines. */
5861 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5863 else /* !TARGET_64BIT */
5865 tree ptr = build_pointer_type (type);
5866 tree valist_type;
5867 tree t, u;
5868 unsigned int size, ofs;
5869 bool indirect;
5871 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5872 if (indirect)
5874 type = ptr;
5875 ptr = build_pointer_type (type);
5877 size = int_size_in_bytes (type);
5878 valist_type = TREE_TYPE (valist);
5880 /* Args grow down. Not handled by generic routines. */
5882 u = fold_convert (valist_type, size_in_bytes (type));
5883 t = build2 (MINUS_EXPR, valist_type, valist, u);
5885 /* Copied from va-pa.h, but we probably don't need to align to
5886 word size, since we generate and preserve that invariant. */
5887 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5888 t = build2 (BIT_AND_EXPR, valist_type, t, u);
5890 t = build2 (MODIFY_EXPR, valist_type, valist, t);
5892 ofs = (8 - size) % 4;
5893 if (ofs != 0)
5895 u = fold_convert (valist_type, size_int (ofs));
5896 t = build2 (PLUS_EXPR, valist_type, t, u);
5899 t = fold_convert (ptr, t);
5900 t = build_va_arg_indirect_ref (t);
5902 if (indirect)
5903 t = build_va_arg_indirect_ref (t);
5905 return t;
5909 /* True if MODE is valid for the target. By "valid", we mean able to
5910 be manipulated in non-trivial ways. In particular, this means all
5911 the arithmetic is supported.
5913 Currently, TImode is not valid as the HP 64-bit runtime documentation
5914 doesn't document the alignment and calling conventions for this type.
5915 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5916 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5918 static bool
5919 pa_scalar_mode_supported_p (enum machine_mode mode)
5921 int precision = GET_MODE_PRECISION (mode);
5923 switch (GET_MODE_CLASS (mode))
5925 case MODE_PARTIAL_INT:
5926 case MODE_INT:
5927 if (precision == CHAR_TYPE_SIZE)
5928 return true;
5929 if (precision == SHORT_TYPE_SIZE)
5930 return true;
5931 if (precision == INT_TYPE_SIZE)
5932 return true;
5933 if (precision == LONG_TYPE_SIZE)
5934 return true;
5935 if (precision == LONG_LONG_TYPE_SIZE)
5936 return true;
5937 return false;
5939 case MODE_FLOAT:
5940 if (precision == FLOAT_TYPE_SIZE)
5941 return true;
5942 if (precision == DOUBLE_TYPE_SIZE)
5943 return true;
5944 if (precision == LONG_DOUBLE_TYPE_SIZE)
5945 return true;
5946 return false;
5948 case MODE_DECIMAL_FLOAT:
5949 return false;
5951 default:
5952 gcc_unreachable ();
5956 /* This routine handles all the normal conditional branch sequences we
5957 might need to generate. It handles compare immediate vs compare
5958 register, nullification of delay slots, varying length branches,
5959 negated branches, and all combinations of the above. It returns the
5960 output appropriate to emit the branch corresponding to all given
5961 parameters. */
5963 const char *
5964 output_cbranch (rtx *operands, int negated, rtx insn)
5966 static char buf[100];
5967 int useskip = 0;
5968 int nullify = INSN_ANNULLED_BRANCH_P (insn);
5969 int length = get_attr_length (insn);
5970 int xdelay;
5972 /* A conditional branch to the following instruction (e.g. the delay slot)
5973 is asking for a disaster. This can happen when not optimizing and
5974 when jump optimization fails.
5976 While it is usually safe to emit nothing, this can fail if the
5977 preceding instruction is a nullified branch with an empty delay
5978 slot and the same branch target as this branch. We could check
5979 for this but jump optimization should eliminate nop jumps. It
5980 is always safe to emit a nop. */
5981 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5982 return "nop";
5984 /* The doubleword form of the cmpib instruction doesn't have the LEU
5985 and GTU conditions while the cmpb instruction does. Since we accept
5986 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5987 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5988 operands[2] = gen_rtx_REG (DImode, 0);
5989 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
5990 operands[1] = gen_rtx_REG (DImode, 0);
5992 /* If this is a long branch with its delay slot unfilled, set `nullify'
5993 as it can nullify the delay slot and save a nop. */
5994 if (length == 8 && dbr_sequence_length () == 0)
5995 nullify = 1;
5997 /* If this is a short forward conditional branch which did not get
5998 its delay slot filled, the delay slot can still be nullified. */
5999 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6000 nullify = forward_branch_p (insn);
6002 /* A forward branch over a single nullified insn can be done with a
6003 comclr instruction. This avoids a single cycle penalty due to
6004 mis-predicted branch if we fall through (branch not taken). */
6005 if (length == 4
6006 && next_real_insn (insn) != 0
6007 && get_attr_length (next_real_insn (insn)) == 4
6008 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6009 && nullify)
6010 useskip = 1;
6012 switch (length)
6014 /* All short conditional branches except backwards with an unfilled
6015 delay slot. */
6016 case 4:
6017 if (useskip)
6018 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6019 else
6020 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6021 if (GET_MODE (operands[1]) == DImode)
6022 strcat (buf, "*");
6023 if (negated)
6024 strcat (buf, "%B3");
6025 else
6026 strcat (buf, "%S3");
6027 if (useskip)
6028 strcat (buf, " %2,%r1,%%r0");
6029 else if (nullify)
6030 strcat (buf, ",n %2,%r1,%0");
6031 else
6032 strcat (buf, " %2,%r1,%0");
6033 break;
6035 /* All long conditionals. Note a short backward branch with an
6036 unfilled delay slot is treated just like a long backward branch
6037 with an unfilled delay slot. */
6038 case 8:
6039 /* Handle weird backwards branch with a filled delay slot
6040 which is nullified. */
6041 if (dbr_sequence_length () != 0
6042 && ! forward_branch_p (insn)
6043 && nullify)
6045 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6046 if (GET_MODE (operands[1]) == DImode)
6047 strcat (buf, "*");
6048 if (negated)
6049 strcat (buf, "%S3");
6050 else
6051 strcat (buf, "%B3");
6052 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6054 /* Handle short backwards branch with an unfilled delay slot.
6055 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6056 taken and untaken branches. */
6057 else if (dbr_sequence_length () == 0
6058 && ! forward_branch_p (insn)
6059 && INSN_ADDRESSES_SET_P ()
6060 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6061 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6063 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6064 if (GET_MODE (operands[1]) == DImode)
6065 strcat (buf, "*");
6066 if (negated)
6067 strcat (buf, "%B3 %2,%r1,%0%#");
6068 else
6069 strcat (buf, "%S3 %2,%r1,%0%#");
6071 else
6073 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6074 if (GET_MODE (operands[1]) == DImode)
6075 strcat (buf, "*");
6076 if (negated)
6077 strcat (buf, "%S3");
6078 else
6079 strcat (buf, "%B3");
6080 if (nullify)
6081 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6082 else
6083 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6085 break;
6087 default:
6088 /* The reversed conditional branch must branch over one additional
6089 instruction if the delay slot is filled and needs to be extracted
6090 by output_lbranch. If the delay slot is empty or this is a
6091 nullified forward branch, the instruction after the reversed
6092 condition branch must be nullified. */
6093 if (dbr_sequence_length () == 0
6094 || (nullify && forward_branch_p (insn)))
6096 nullify = 1;
6097 xdelay = 0;
6098 operands[4] = GEN_INT (length);
6100 else
6102 xdelay = 1;
6103 operands[4] = GEN_INT (length + 4);
6106 /* Create a reversed conditional branch which branches around
6107 the following insns. */
6108 if (GET_MODE (operands[1]) != DImode)
6110 if (nullify)
6112 if (negated)
6113 strcpy (buf,
6114 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6115 else
6116 strcpy (buf,
6117 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6119 else
6121 if (negated)
6122 strcpy (buf,
6123 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6124 else
6125 strcpy (buf,
6126 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6129 else
6131 if (nullify)
6133 if (negated)
6134 strcpy (buf,
6135 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6136 else
6137 strcpy (buf,
6138 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6140 else
6142 if (negated)
6143 strcpy (buf,
6144 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6145 else
6146 strcpy (buf,
6147 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6151 output_asm_insn (buf, operands);
6152 return output_lbranch (operands[0], insn, xdelay);
6154 return buf;
6157 /* This routine handles output of long unconditional branches that
6158 exceed the maximum range of a simple branch instruction. Since
6159 we don't have a register available for the branch, we save register
6160 %r1 in the frame marker, load the branch destination DEST into %r1,
6161 execute the branch, and restore %r1 in the delay slot of the branch.
6163 Since long branches may have an insn in the delay slot and the
6164 delay slot is used to restore %r1, we in general need to extract
6165 this insn and execute it before the branch. However, to facilitate
6166 use of this function by conditional branches, we also provide an
6167 option to not extract the delay insn so that it will be emitted
6168 after the long branch. So, if there is an insn in the delay slot,
6169 it is extracted if XDELAY is nonzero.
6171 The lengths of the various long-branch sequences are 20, 16 and 24
6172 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6174 const char *
6175 output_lbranch (rtx dest, rtx insn, int xdelay)
6177 rtx xoperands[2];
6179 xoperands[0] = dest;
6181 /* First, free up the delay slot. */
6182 if (xdelay && dbr_sequence_length () != 0)
6184 /* We can't handle a jump in the delay slot. */
6185 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6187 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6188 optimize, 0, NULL);
6190 /* Now delete the delay insn. */
6191 PUT_CODE (NEXT_INSN (insn), NOTE);
6192 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6193 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6196 /* Output an insn to save %r1. The runtime documentation doesn't
6197 specify whether the "Clean Up" slot in the callers frame can
6198 be clobbered by the callee. It isn't copied by HP's builtin
6199 alloca, so this suggests that it can be clobbered if necessary.
6200 The "Static Link" location is copied by HP builtin alloca, so
6201 we avoid using it. Using the cleanup slot might be a problem
6202 if we have to interoperate with languages that pass cleanup
6203 information. However, it should be possible to handle these
6204 situations with GCC's asm feature.
6206 The "Current RP" slot is reserved for the called procedure, so
6207 we try to use it when we don't have a frame of our own. It's
6208 rather unlikely that we won't have a frame when we need to emit
6209 a very long branch.
6211 Really the way to go long term is a register scavenger; goto
6212 the target of the jump and find a register which we can use
6213 as a scratch to hold the value in %r1. Then, we wouldn't have
6214 to free up the delay slot or clobber a slot that may be needed
6215 for other purposes. */
6216 if (TARGET_64BIT)
6218 if (actual_fsize == 0 && !regs_ever_live[2])
6219 /* Use the return pointer slot in the frame marker. */
6220 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6221 else
6222 /* Use the slot at -40 in the frame marker since HP builtin
6223 alloca doesn't copy it. */
6224 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6226 else
6228 if (actual_fsize == 0 && !regs_ever_live[2])
6229 /* Use the return pointer slot in the frame marker. */
6230 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6231 else
6232 /* Use the "Clean Up" slot in the frame marker. In GCC,
6233 the only other use of this location is for copying a
6234 floating point double argument from a floating-point
6235 register to two general registers. The copy is done
6236 as an "atomic" operation when outputting a call, so it
6237 won't interfere with our using the location here. */
6238 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6241 if (TARGET_PORTABLE_RUNTIME)
6243 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6244 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6245 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6247 else if (flag_pic)
6249 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6250 if (TARGET_SOM || !TARGET_GAS)
6252 xoperands[1] = gen_label_rtx ();
6253 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6254 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6255 CODE_LABEL_NUMBER (xoperands[1]));
6256 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6258 else
6260 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6261 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6263 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6265 else
6266 /* Now output a very long branch to the original target. */
6267 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6269 /* Now restore the value of %r1 in the delay slot. */
6270 if (TARGET_64BIT)
6272 if (actual_fsize == 0 && !regs_ever_live[2])
6273 return "ldd -16(%%r30),%%r1";
6274 else
6275 return "ldd -40(%%r30),%%r1";
6277 else
6279 if (actual_fsize == 0 && !regs_ever_live[2])
6280 return "ldw -20(%%r30),%%r1";
6281 else
6282 return "ldw -12(%%r30),%%r1";
6286 /* This routine handles all the branch-on-bit conditional branch sequences we
6287 might need to generate. It handles nullification of delay slots,
6288 varying length branches, negated branches and all combinations of the
6289 above. it returns the appropriate output template to emit the branch. */
6291 const char *
6292 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6294 static char buf[100];
6295 int useskip = 0;
6296 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6297 int length = get_attr_length (insn);
6298 int xdelay;
6300 /* A conditional branch to the following instruction (e.g. the delay slot) is
6301 asking for a disaster. I do not think this can happen as this pattern
6302 is only used when optimizing; jump optimization should eliminate the
6303 jump. But be prepared just in case. */
6305 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6306 return "nop";
6308 /* If this is a long branch with its delay slot unfilled, set `nullify'
6309 as it can nullify the delay slot and save a nop. */
6310 if (length == 8 && dbr_sequence_length () == 0)
6311 nullify = 1;
6313 /* If this is a short forward conditional branch which did not get
6314 its delay slot filled, the delay slot can still be nullified. */
6315 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6316 nullify = forward_branch_p (insn);
6318 /* A forward branch over a single nullified insn can be done with a
6319 extrs instruction. This avoids a single cycle penalty due to
6320 mis-predicted branch if we fall through (branch not taken). */
6322 if (length == 4
6323 && next_real_insn (insn) != 0
6324 && get_attr_length (next_real_insn (insn)) == 4
6325 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6326 && nullify)
6327 useskip = 1;
6329 switch (length)
6332 /* All short conditional branches except backwards with an unfilled
6333 delay slot. */
6334 case 4:
6335 if (useskip)
6336 strcpy (buf, "{extrs,|extrw,s,}");
6337 else
6338 strcpy (buf, "bb,");
6339 if (useskip && GET_MODE (operands[0]) == DImode)
6340 strcpy (buf, "extrd,s,*");
6341 else if (GET_MODE (operands[0]) == DImode)
6342 strcpy (buf, "bb,*");
6343 if ((which == 0 && negated)
6344 || (which == 1 && ! negated))
6345 strcat (buf, ">=");
6346 else
6347 strcat (buf, "<");
6348 if (useskip)
6349 strcat (buf, " %0,%1,1,%%r0");
6350 else if (nullify && negated)
6351 strcat (buf, ",n %0,%1,%3");
6352 else if (nullify && ! negated)
6353 strcat (buf, ",n %0,%1,%2");
6354 else if (! nullify && negated)
6355 strcat (buf, "%0,%1,%3");
6356 else if (! nullify && ! negated)
6357 strcat (buf, " %0,%1,%2");
6358 break;
6360 /* All long conditionals. Note a short backward branch with an
6361 unfilled delay slot is treated just like a long backward branch
6362 with an unfilled delay slot. */
6363 case 8:
6364 /* Handle weird backwards branch with a filled delay slot
6365 which is nullified. */
6366 if (dbr_sequence_length () != 0
6367 && ! forward_branch_p (insn)
6368 && nullify)
6370 strcpy (buf, "bb,");
6371 if (GET_MODE (operands[0]) == DImode)
6372 strcat (buf, "*");
6373 if ((which == 0 && negated)
6374 || (which == 1 && ! negated))
6375 strcat (buf, "<");
6376 else
6377 strcat (buf, ">=");
6378 if (negated)
6379 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6380 else
6381 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6383 /* Handle short backwards branch with an unfilled delay slot.
6384 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6385 taken and untaken branches. */
6386 else if (dbr_sequence_length () == 0
6387 && ! forward_branch_p (insn)
6388 && INSN_ADDRESSES_SET_P ()
6389 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6390 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6392 strcpy (buf, "bb,");
6393 if (GET_MODE (operands[0]) == DImode)
6394 strcat (buf, "*");
6395 if ((which == 0 && negated)
6396 || (which == 1 && ! negated))
6397 strcat (buf, ">=");
6398 else
6399 strcat (buf, "<");
6400 if (negated)
6401 strcat (buf, " %0,%1,%3%#");
6402 else
6403 strcat (buf, " %0,%1,%2%#");
6405 else
6407 if (GET_MODE (operands[0]) == DImode)
6408 strcpy (buf, "extrd,s,*");
6409 else
6410 strcpy (buf, "{extrs,|extrw,s,}");
6411 if ((which == 0 && negated)
6412 || (which == 1 && ! negated))
6413 strcat (buf, "<");
6414 else
6415 strcat (buf, ">=");
6416 if (nullify && negated)
6417 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6418 else if (nullify && ! negated)
6419 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6420 else if (negated)
6421 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6422 else
6423 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6425 break;
6427 default:
6428 /* The reversed conditional branch must branch over one additional
6429 instruction if the delay slot is filled and needs to be extracted
6430 by output_lbranch. If the delay slot is empty or this is a
6431 nullified forward branch, the instruction after the reversed
6432 condition branch must be nullified. */
6433 if (dbr_sequence_length () == 0
6434 || (nullify && forward_branch_p (insn)))
6436 nullify = 1;
6437 xdelay = 0;
6438 operands[4] = GEN_INT (length);
6440 else
6442 xdelay = 1;
6443 operands[4] = GEN_INT (length + 4);
6446 if (GET_MODE (operands[0]) == DImode)
6447 strcpy (buf, "bb,*");
6448 else
6449 strcpy (buf, "bb,");
6450 if ((which == 0 && negated)
6451 || (which == 1 && !negated))
6452 strcat (buf, "<");
6453 else
6454 strcat (buf, ">=");
6455 if (nullify)
6456 strcat (buf, ",n %0,%1,.+%4");
6457 else
6458 strcat (buf, " %0,%1,.+%4");
6459 output_asm_insn (buf, operands);
6460 return output_lbranch (negated ? operands[3] : operands[2],
6461 insn, xdelay);
6463 return buf;
6466 /* This routine handles all the branch-on-variable-bit conditional branch
6467 sequences we might need to generate. It handles nullification of delay
6468 slots, varying length branches, negated branches and all combinations
6469 of the above. it returns the appropriate output template to emit the
6470 branch. */
6472 const char *
6473 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6475 static char buf[100];
6476 int useskip = 0;
6477 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6478 int length = get_attr_length (insn);
6479 int xdelay;
6481 /* A conditional branch to the following instruction (e.g. the delay slot) is
6482 asking for a disaster. I do not think this can happen as this pattern
6483 is only used when optimizing; jump optimization should eliminate the
6484 jump. But be prepared just in case. */
6486 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6487 return "nop";
6489 /* If this is a long branch with its delay slot unfilled, set `nullify'
6490 as it can nullify the delay slot and save a nop. */
6491 if (length == 8 && dbr_sequence_length () == 0)
6492 nullify = 1;
6494 /* If this is a short forward conditional branch which did not get
6495 its delay slot filled, the delay slot can still be nullified. */
6496 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6497 nullify = forward_branch_p (insn);
6499 /* A forward branch over a single nullified insn can be done with a
6500 extrs instruction. This avoids a single cycle penalty due to
6501 mis-predicted branch if we fall through (branch not taken). */
6503 if (length == 4
6504 && next_real_insn (insn) != 0
6505 && get_attr_length (next_real_insn (insn)) == 4
6506 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6507 && nullify)
6508 useskip = 1;
6510 switch (length)
6513 /* All short conditional branches except backwards with an unfilled
6514 delay slot. */
6515 case 4:
6516 if (useskip)
6517 strcpy (buf, "{vextrs,|extrw,s,}");
6518 else
6519 strcpy (buf, "{bvb,|bb,}");
6520 if (useskip && GET_MODE (operands[0]) == DImode)
6521 strcpy (buf, "extrd,s,*");
6522 else if (GET_MODE (operands[0]) == DImode)
6523 strcpy (buf, "bb,*");
6524 if ((which == 0 && negated)
6525 || (which == 1 && ! negated))
6526 strcat (buf, ">=");
6527 else
6528 strcat (buf, "<");
6529 if (useskip)
6530 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6531 else if (nullify && negated)
6532 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6533 else if (nullify && ! negated)
6534 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6535 else if (! nullify && negated)
6536 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6537 else if (! nullify && ! negated)
6538 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6539 break;
6541 /* All long conditionals. Note a short backward branch with an
6542 unfilled delay slot is treated just like a long backward branch
6543 with an unfilled delay slot. */
6544 case 8:
6545 /* Handle weird backwards branch with a filled delay slot
6546 which is nullified. */
6547 if (dbr_sequence_length () != 0
6548 && ! forward_branch_p (insn)
6549 && nullify)
6551 strcpy (buf, "{bvb,|bb,}");
6552 if (GET_MODE (operands[0]) == DImode)
6553 strcat (buf, "*");
6554 if ((which == 0 && negated)
6555 || (which == 1 && ! negated))
6556 strcat (buf, "<");
6557 else
6558 strcat (buf, ">=");
6559 if (negated)
6560 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6561 else
6562 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6564 /* Handle short backwards branch with an unfilled delay slot.
6565 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6566 taken and untaken branches. */
6567 else if (dbr_sequence_length () == 0
6568 && ! forward_branch_p (insn)
6569 && INSN_ADDRESSES_SET_P ()
6570 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6571 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6573 strcpy (buf, "{bvb,|bb,}");
6574 if (GET_MODE (operands[0]) == DImode)
6575 strcat (buf, "*");
6576 if ((which == 0 && negated)
6577 || (which == 1 && ! negated))
6578 strcat (buf, ">=");
6579 else
6580 strcat (buf, "<");
6581 if (negated)
6582 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6583 else
6584 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6586 else
6588 strcpy (buf, "{vextrs,|extrw,s,}");
6589 if (GET_MODE (operands[0]) == DImode)
6590 strcpy (buf, "extrd,s,*");
6591 if ((which == 0 && negated)
6592 || (which == 1 && ! negated))
6593 strcat (buf, "<");
6594 else
6595 strcat (buf, ">=");
6596 if (nullify && negated)
6597 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6598 else if (nullify && ! negated)
6599 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6600 else if (negated)
6601 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6602 else
6603 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6605 break;
6607 default:
6608 /* The reversed conditional branch must branch over one additional
6609 instruction if the delay slot is filled and needs to be extracted
6610 by output_lbranch. If the delay slot is empty or this is a
6611 nullified forward branch, the instruction after the reversed
6612 condition branch must be nullified. */
6613 if (dbr_sequence_length () == 0
6614 || (nullify && forward_branch_p (insn)))
6616 nullify = 1;
6617 xdelay = 0;
6618 operands[4] = GEN_INT (length);
6620 else
6622 xdelay = 1;
6623 operands[4] = GEN_INT (length + 4);
6626 if (GET_MODE (operands[0]) == DImode)
6627 strcpy (buf, "bb,*");
6628 else
6629 strcpy (buf, "{bvb,|bb,}");
6630 if ((which == 0 && negated)
6631 || (which == 1 && !negated))
6632 strcat (buf, "<");
6633 else
6634 strcat (buf, ">=");
6635 if (nullify)
6636 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6637 else
6638 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6639 output_asm_insn (buf, operands);
6640 return output_lbranch (negated ? operands[3] : operands[2],
6641 insn, xdelay);
6643 return buf;
6646 /* Return the output template for emitting a dbra type insn.
6648 Note it may perform some output operations on its own before
6649 returning the final output string. */
6650 const char *
6651 output_dbra (rtx *operands, rtx insn, int which_alternative)
6653 int length = get_attr_length (insn);
6655 /* A conditional branch to the following instruction (e.g. the delay slot) is
6656 asking for a disaster. Be prepared! */
6658 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6660 if (which_alternative == 0)
6661 return "ldo %1(%0),%0";
6662 else if (which_alternative == 1)
6664 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6665 output_asm_insn ("ldw -16(%%r30),%4", operands);
6666 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6667 return "{fldws|fldw} -16(%%r30),%0";
6669 else
6671 output_asm_insn ("ldw %0,%4", operands);
6672 return "ldo %1(%4),%4\n\tstw %4,%0";
6676 if (which_alternative == 0)
6678 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6679 int xdelay;
6681 /* If this is a long branch with its delay slot unfilled, set `nullify'
6682 as it can nullify the delay slot and save a nop. */
6683 if (length == 8 && dbr_sequence_length () == 0)
6684 nullify = 1;
6686 /* If this is a short forward conditional branch which did not get
6687 its delay slot filled, the delay slot can still be nullified. */
6688 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6689 nullify = forward_branch_p (insn);
6691 switch (length)
6693 case 4:
6694 if (nullify)
6695 return "addib,%C2,n %1,%0,%3";
6696 else
6697 return "addib,%C2 %1,%0,%3";
6699 case 8:
6700 /* Handle weird backwards branch with a fulled delay slot
6701 which is nullified. */
6702 if (dbr_sequence_length () != 0
6703 && ! forward_branch_p (insn)
6704 && nullify)
6705 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6706 /* Handle short backwards branch with an unfilled delay slot.
6707 Using a addb;nop rather than addi;bl saves 1 cycle for both
6708 taken and untaken branches. */
6709 else if (dbr_sequence_length () == 0
6710 && ! forward_branch_p (insn)
6711 && INSN_ADDRESSES_SET_P ()
6712 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6713 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6714 return "addib,%C2 %1,%0,%3%#";
6716 /* Handle normal cases. */
6717 if (nullify)
6718 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6719 else
6720 return "addi,%N2 %1,%0,%0\n\tb %3";
6722 default:
6723 /* The reversed conditional branch must branch over one additional
6724 instruction if the delay slot is filled and needs to be extracted
6725 by output_lbranch. If the delay slot is empty or this is a
6726 nullified forward branch, the instruction after the reversed
6727 condition branch must be nullified. */
6728 if (dbr_sequence_length () == 0
6729 || (nullify && forward_branch_p (insn)))
6731 nullify = 1;
6732 xdelay = 0;
6733 operands[4] = GEN_INT (length);
6735 else
6737 xdelay = 1;
6738 operands[4] = GEN_INT (length + 4);
6741 if (nullify)
6742 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6743 else
6744 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6746 return output_lbranch (operands[3], insn, xdelay);
6750 /* Deal with gross reload from FP register case. */
6751 else if (which_alternative == 1)
6753 /* Move loop counter from FP register to MEM then into a GR,
6754 increment the GR, store the GR into MEM, and finally reload
6755 the FP register from MEM from within the branch's delay slot. */
6756 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6757 operands);
6758 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6759 if (length == 24)
6760 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6761 else if (length == 28)
6762 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6763 else
6765 operands[5] = GEN_INT (length - 16);
6766 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6767 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6768 return output_lbranch (operands[3], insn, 0);
6771 /* Deal with gross reload from memory case. */
6772 else
6774 /* Reload loop counter from memory, the store back to memory
6775 happens in the branch's delay slot. */
6776 output_asm_insn ("ldw %0,%4", operands);
6777 if (length == 12)
6778 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6779 else if (length == 16)
6780 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6781 else
6783 operands[5] = GEN_INT (length - 4);
6784 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6785 return output_lbranch (operands[3], insn, 0);
6790 /* Return the output template for emitting a movb type insn.
6792 Note it may perform some output operations on its own before
6793 returning the final output string. */
6794 const char *
6795 output_movb (rtx *operands, rtx insn, int which_alternative,
6796 int reverse_comparison)
6798 int length = get_attr_length (insn);
6800 /* A conditional branch to the following instruction (e.g. the delay slot) is
6801 asking for a disaster. Be prepared! */
6803 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6805 if (which_alternative == 0)
6806 return "copy %1,%0";
6807 else if (which_alternative == 1)
6809 output_asm_insn ("stw %1,-16(%%r30)", operands);
6810 return "{fldws|fldw} -16(%%r30),%0";
6812 else if (which_alternative == 2)
6813 return "stw %1,%0";
6814 else
6815 return "mtsar %r1";
6818 /* Support the second variant. */
6819 if (reverse_comparison)
6820 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6822 if (which_alternative == 0)
6824 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6825 int xdelay;
6827 /* If this is a long branch with its delay slot unfilled, set `nullify'
6828 as it can nullify the delay slot and save a nop. */
6829 if (length == 8 && dbr_sequence_length () == 0)
6830 nullify = 1;
6832 /* If this is a short forward conditional branch which did not get
6833 its delay slot filled, the delay slot can still be nullified. */
6834 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6835 nullify = forward_branch_p (insn);
6837 switch (length)
6839 case 4:
6840 if (nullify)
6841 return "movb,%C2,n %1,%0,%3";
6842 else
6843 return "movb,%C2 %1,%0,%3";
6845 case 8:
6846 /* Handle weird backwards branch with a filled delay slot
6847 which is nullified. */
6848 if (dbr_sequence_length () != 0
6849 && ! forward_branch_p (insn)
6850 && nullify)
6851 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6853 /* Handle short backwards branch with an unfilled delay slot.
6854 Using a movb;nop rather than or;bl saves 1 cycle for both
6855 taken and untaken branches. */
6856 else if (dbr_sequence_length () == 0
6857 && ! forward_branch_p (insn)
6858 && INSN_ADDRESSES_SET_P ()
6859 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6860 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6861 return "movb,%C2 %1,%0,%3%#";
6862 /* Handle normal cases. */
6863 if (nullify)
6864 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6865 else
6866 return "or,%N2 %1,%%r0,%0\n\tb %3";
6868 default:
6869 /* The reversed conditional branch must branch over one additional
6870 instruction if the delay slot is filled and needs to be extracted
6871 by output_lbranch. If the delay slot is empty or this is a
6872 nullified forward branch, the instruction after the reversed
6873 condition branch must be nullified. */
6874 if (dbr_sequence_length () == 0
6875 || (nullify && forward_branch_p (insn)))
6877 nullify = 1;
6878 xdelay = 0;
6879 operands[4] = GEN_INT (length);
6881 else
6883 xdelay = 1;
6884 operands[4] = GEN_INT (length + 4);
6887 if (nullify)
6888 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
6889 else
6890 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
6892 return output_lbranch (operands[3], insn, xdelay);
6895 /* Deal with gross reload for FP destination register case. */
6896 else if (which_alternative == 1)
6898 /* Move source register to MEM, perform the branch test, then
6899 finally load the FP register from MEM from within the branch's
6900 delay slot. */
6901 output_asm_insn ("stw %1,-16(%%r30)", operands);
6902 if (length == 12)
6903 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6904 else if (length == 16)
6905 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6906 else
6908 operands[4] = GEN_INT (length - 4);
6909 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
6910 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6911 return output_lbranch (operands[3], insn, 0);
6914 /* Deal with gross reload from memory case. */
6915 else if (which_alternative == 2)
6917 /* Reload loop counter from memory, the store back to memory
6918 happens in the branch's delay slot. */
6919 if (length == 8)
6920 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6921 else if (length == 12)
6922 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6923 else
6925 operands[4] = GEN_INT (length);
6926 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
6927 operands);
6928 return output_lbranch (operands[3], insn, 0);
6931 /* Handle SAR as a destination. */
6932 else
6934 if (length == 8)
6935 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6936 else if (length == 12)
6937 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6938 else
6940 operands[4] = GEN_INT (length);
6941 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
6942 operands);
6943 return output_lbranch (operands[3], insn, 0);
6948 /* Copy any FP arguments in INSN into integer registers. */
6949 static void
6950 copy_fp_args (rtx insn)
6952 rtx link;
6953 rtx xoperands[2];
6955 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6957 int arg_mode, regno;
6958 rtx use = XEXP (link, 0);
6960 if (! (GET_CODE (use) == USE
6961 && GET_CODE (XEXP (use, 0)) == REG
6962 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6963 continue;
6965 arg_mode = GET_MODE (XEXP (use, 0));
6966 regno = REGNO (XEXP (use, 0));
6968 /* Is it a floating point register? */
6969 if (regno >= 32 && regno <= 39)
6971 /* Copy the FP register into an integer register via memory. */
6972 if (arg_mode == SFmode)
6974 xoperands[0] = XEXP (use, 0);
6975 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6976 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6977 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6979 else
6981 xoperands[0] = XEXP (use, 0);
6982 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6983 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6984 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6985 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6991 /* Compute length of the FP argument copy sequence for INSN. */
6992 static int
6993 length_fp_args (rtx insn)
6995 int length = 0;
6996 rtx link;
6998 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7000 int arg_mode, regno;
7001 rtx use = XEXP (link, 0);
7003 if (! (GET_CODE (use) == USE
7004 && GET_CODE (XEXP (use, 0)) == REG
7005 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7006 continue;
7008 arg_mode = GET_MODE (XEXP (use, 0));
7009 regno = REGNO (XEXP (use, 0));
7011 /* Is it a floating point register? */
7012 if (regno >= 32 && regno <= 39)
7014 if (arg_mode == SFmode)
7015 length += 8;
7016 else
7017 length += 12;
7021 return length;
7024 /* Return the attribute length for the millicode call instruction INSN.
7025 The length must match the code generated by output_millicode_call.
7026 We include the delay slot in the returned length as it is better to
7027 over estimate the length than to under estimate it. */
7030 attr_length_millicode_call (rtx insn)
7032 unsigned long distance = -1;
7033 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7035 if (INSN_ADDRESSES_SET_P ())
7037 distance = (total + insn_current_reference_address (insn));
7038 if (distance < total)
7039 distance = -1;
7042 if (TARGET_64BIT)
7044 if (!TARGET_LONG_CALLS && distance < 7600000)
7045 return 8;
7047 return 20;
7049 else if (TARGET_PORTABLE_RUNTIME)
7050 return 24;
7051 else
7053 if (!TARGET_LONG_CALLS && distance < 240000)
7054 return 8;
7056 if (TARGET_LONG_ABS_CALL && !flag_pic)
7057 return 12;
7059 return 24;
7063 /* INSN is a function call. It may have an unconditional jump
7064 in its delay slot.
7066 CALL_DEST is the routine we are calling. */
7068 const char *
7069 output_millicode_call (rtx insn, rtx call_dest)
7071 int attr_length = get_attr_length (insn);
7072 int seq_length = dbr_sequence_length ();
7073 int distance;
7074 rtx seq_insn;
7075 rtx xoperands[3];
7077 xoperands[0] = call_dest;
7078 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7080 /* Handle the common case where we are sure that the branch will
7081 reach the beginning of the $CODE$ subspace. The within reach
7082 form of the $$sh_func_adrs call has a length of 28. Because
7083 it has an attribute type of multi, it never has a nonzero
7084 sequence length. The length of the $$sh_func_adrs is the same
7085 as certain out of reach PIC calls to other routines. */
7086 if (!TARGET_LONG_CALLS
7087 && ((seq_length == 0
7088 && (attr_length == 12
7089 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7090 || (seq_length != 0 && attr_length == 8)))
7092 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7094 else
7096 if (TARGET_64BIT)
7098 /* It might seem that one insn could be saved by accessing
7099 the millicode function using the linkage table. However,
7100 this doesn't work in shared libraries and other dynamically
7101 loaded objects. Using a pc-relative sequence also avoids
7102 problems related to the implicit use of the gp register. */
7103 output_asm_insn ("b,l .+8,%%r1", xoperands);
7105 if (TARGET_GAS)
7107 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7108 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7110 else
7112 xoperands[1] = gen_label_rtx ();
7113 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7114 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7115 CODE_LABEL_NUMBER (xoperands[1]));
7116 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7119 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7121 else if (TARGET_PORTABLE_RUNTIME)
7123 /* Pure portable runtime doesn't allow be/ble; we also don't
7124 have PIC support in the assembler/linker, so this sequence
7125 is needed. */
7127 /* Get the address of our target into %r1. */
7128 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7129 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7131 /* Get our return address into %r31. */
7132 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7133 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7135 /* Jump to our target address in %r1. */
7136 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7138 else if (!flag_pic)
7140 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7141 if (TARGET_PA_20)
7142 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7143 else
7144 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7146 else
7148 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7149 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7151 if (TARGET_SOM || !TARGET_GAS)
7153 /* The HP assembler can generate relocations for the
7154 difference of two symbols. GAS can do this for a
7155 millicode symbol but not an arbitrary external
7156 symbol when generating SOM output. */
7157 xoperands[1] = gen_label_rtx ();
7158 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7159 CODE_LABEL_NUMBER (xoperands[1]));
7160 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7161 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7163 else
7165 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7166 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7167 xoperands);
7170 /* Jump to our target address in %r1. */
7171 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7175 if (seq_length == 0)
7176 output_asm_insn ("nop", xoperands);
7178 /* We are done if there isn't a jump in the delay slot. */
7179 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7180 return "";
7182 /* This call has an unconditional jump in its delay slot. */
7183 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7185 /* See if the return address can be adjusted. Use the containing
7186 sequence insn's address. */
7187 if (INSN_ADDRESSES_SET_P ())
7189 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7190 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7191 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7193 if (VAL_14_BITS_P (distance))
7195 xoperands[1] = gen_label_rtx ();
7196 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7197 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7198 CODE_LABEL_NUMBER (xoperands[1]));
7200 else
7201 /* ??? This branch may not reach its target. */
7202 output_asm_insn ("nop\n\tb,n %0", xoperands);
7204 else
7205 /* ??? This branch may not reach its target. */
7206 output_asm_insn ("nop\n\tb,n %0", xoperands);
7208 /* Delete the jump. */
7209 PUT_CODE (NEXT_INSN (insn), NOTE);
7210 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7211 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7213 return "";
7216 /* Return the attribute length of the call instruction INSN. The SIBCALL
7217 flag indicates whether INSN is a regular call or a sibling call. The
7218 length returned must be longer than the code actually generated by
7219 output_call. Since branch shortening is done before delay branch
7220 sequencing, there is no way to determine whether or not the delay
7221 slot will be filled during branch shortening. Even when the delay
7222 slot is filled, we may have to add a nop if the delay slot contains
7223 a branch that can't reach its target. Thus, we always have to include
7224 the delay slot in the length estimate. This used to be done in
7225 pa_adjust_insn_length but we do it here now as some sequences always
7226 fill the delay slot and we can save four bytes in the estimate for
7227 these sequences. */
7230 attr_length_call (rtx insn, int sibcall)
7232 int local_call;
7233 rtx call_dest;
7234 tree call_decl;
7235 int length = 0;
7236 rtx pat = PATTERN (insn);
7237 unsigned long distance = -1;
7239 if (INSN_ADDRESSES_SET_P ())
7241 unsigned long total;
7243 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7244 distance = (total + insn_current_reference_address (insn));
7245 if (distance < total)
7246 distance = -1;
7249 /* Determine if this is a local call. */
7250 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7251 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7252 else
7253 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7255 call_decl = SYMBOL_REF_DECL (call_dest);
7256 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7258 /* pc-relative branch. */
7259 if (!TARGET_LONG_CALLS
7260 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7261 || distance < 240000))
7262 length += 8;
7264 /* 64-bit plabel sequence. */
7265 else if (TARGET_64BIT && !local_call)
7266 length += sibcall ? 28 : 24;
7268 /* non-pic long absolute branch sequence. */
7269 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7270 length += 12;
7272 /* long pc-relative branch sequence. */
7273 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7274 || (TARGET_64BIT && !TARGET_GAS)
7275 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7277 length += 20;
7279 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7280 length += 8;
7283 /* 32-bit plabel sequence. */
7284 else
7286 length += 32;
7288 if (TARGET_SOM)
7289 length += length_fp_args (insn);
7291 if (flag_pic)
7292 length += 4;
7294 if (!TARGET_PA_20)
7296 if (!sibcall)
7297 length += 8;
7299 if (!TARGET_NO_SPACE_REGS)
7300 length += 8;
7304 return length;
7307 /* INSN is a function call. It may have an unconditional jump
7308 in its delay slot.
7310 CALL_DEST is the routine we are calling. */
7312 const char *
7313 output_call (rtx insn, rtx call_dest, int sibcall)
7315 int delay_insn_deleted = 0;
7316 int delay_slot_filled = 0;
7317 int seq_length = dbr_sequence_length ();
7318 tree call_decl = SYMBOL_REF_DECL (call_dest);
7319 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7320 rtx xoperands[2];
7322 xoperands[0] = call_dest;
7324 /* Handle the common case where we're sure that the branch will reach
7325 the beginning of the "$CODE$" subspace. This is the beginning of
7326 the current function if we are in a named section. */
7327 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7329 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7330 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7332 else
7334 if (TARGET_64BIT && !local_call)
7336 /* ??? As far as I can tell, the HP linker doesn't support the
7337 long pc-relative sequence described in the 64-bit runtime
7338 architecture. So, we use a slightly longer indirect call. */
7339 xoperands[0] = get_deferred_plabel (call_dest);
7340 xoperands[1] = gen_label_rtx ();
7342 /* If this isn't a sibcall, we put the load of %r27 into the
7343 delay slot. We can't do this in a sibcall as we don't
7344 have a second call-clobbered scratch register available. */
7345 if (seq_length != 0
7346 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7347 && !sibcall)
7349 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7350 optimize, 0, NULL);
7352 /* Now delete the delay insn. */
7353 PUT_CODE (NEXT_INSN (insn), NOTE);
7354 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7355 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7356 delay_insn_deleted = 1;
7359 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7360 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7361 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7363 if (sibcall)
7365 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7366 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7367 output_asm_insn ("bve (%%r1)", xoperands);
7369 else
7371 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7372 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7373 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7374 delay_slot_filled = 1;
7377 else
7379 int indirect_call = 0;
7381 /* Emit a long call. There are several different sequences
7382 of increasing length and complexity. In most cases,
7383 they don't allow an instruction in the delay slot. */
7384 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7385 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7386 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7387 && !TARGET_64BIT)
7388 indirect_call = 1;
7390 if (seq_length != 0
7391 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7392 && !sibcall
7393 && (!TARGET_PA_20 || indirect_call))
7395 /* A non-jump insn in the delay slot. By definition we can
7396 emit this insn before the call (and in fact before argument
7397 relocating. */
7398 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7399 NULL);
7401 /* Now delete the delay insn. */
7402 PUT_CODE (NEXT_INSN (insn), NOTE);
7403 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7404 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7405 delay_insn_deleted = 1;
7408 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7410 /* This is the best sequence for making long calls in
7411 non-pic code. Unfortunately, GNU ld doesn't provide
7412 the stub needed for external calls, and GAS's support
7413 for this with the SOM linker is buggy. It is safe
7414 to use this for local calls. */
7415 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7416 if (sibcall)
7417 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7418 else
7420 if (TARGET_PA_20)
7421 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7422 xoperands);
7423 else
7424 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7426 output_asm_insn ("copy %%r31,%%r2", xoperands);
7427 delay_slot_filled = 1;
7430 else
7432 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7433 || (TARGET_64BIT && !TARGET_GAS))
7435 /* The HP assembler and linker can handle relocations
7436 for the difference of two symbols. GAS and the HP
7437 linker can't do this when one of the symbols is
7438 external. */
7439 xoperands[1] = gen_label_rtx ();
7440 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7441 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7442 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7443 CODE_LABEL_NUMBER (xoperands[1]));
7444 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7446 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7448 /* GAS currently can't generate the relocations that
7449 are needed for the SOM linker under HP-UX using this
7450 sequence. The GNU linker doesn't generate the stubs
7451 that are needed for external calls on TARGET_ELF32
7452 with this sequence. For now, we have to use a
7453 longer plabel sequence when using GAS. */
7454 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7455 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7456 xoperands);
7457 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7458 xoperands);
7460 else
7462 /* Emit a long plabel-based call sequence. This is
7463 essentially an inline implementation of $$dyncall.
7464 We don't actually try to call $$dyncall as this is
7465 as difficult as calling the function itself. */
7466 xoperands[0] = get_deferred_plabel (call_dest);
7467 xoperands[1] = gen_label_rtx ();
7469 /* Since the call is indirect, FP arguments in registers
7470 need to be copied to the general registers. Then, the
7471 argument relocation stub will copy them back. */
7472 if (TARGET_SOM)
7473 copy_fp_args (insn);
7475 if (flag_pic)
7477 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7478 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7479 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7481 else
7483 output_asm_insn ("addil LR'%0-$global$,%%r27",
7484 xoperands);
7485 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7486 xoperands);
7489 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7490 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7491 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7492 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7494 if (!sibcall && !TARGET_PA_20)
7496 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7497 if (TARGET_NO_SPACE_REGS)
7498 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7499 else
7500 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7504 if (TARGET_PA_20)
7506 if (sibcall)
7507 output_asm_insn ("bve (%%r1)", xoperands);
7508 else
7510 if (indirect_call)
7512 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7513 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7514 delay_slot_filled = 1;
7516 else
7517 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7520 else
7522 if (!TARGET_NO_SPACE_REGS)
7523 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7524 xoperands);
7526 if (sibcall)
7528 if (TARGET_NO_SPACE_REGS)
7529 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7530 else
7531 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7533 else
7535 if (TARGET_NO_SPACE_REGS)
7536 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7537 else
7538 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7540 if (indirect_call)
7541 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7542 else
7543 output_asm_insn ("copy %%r31,%%r2", xoperands);
7544 delay_slot_filled = 1;
7551 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7552 output_asm_insn ("nop", xoperands);
7554 /* We are done if there isn't a jump in the delay slot. */
7555 if (seq_length == 0
7556 || delay_insn_deleted
7557 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7558 return "";
7560 /* A sibcall should never have a branch in the delay slot. */
7561 gcc_assert (!sibcall);
7563 /* This call has an unconditional jump in its delay slot. */
7564 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7566 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7568 /* See if the return address can be adjusted. Use the containing
7569 sequence insn's address. */
7570 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7571 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7572 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7574 if (VAL_14_BITS_P (distance))
7576 xoperands[1] = gen_label_rtx ();
7577 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7578 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7579 CODE_LABEL_NUMBER (xoperands[1]));
7581 else
7582 output_asm_insn ("nop\n\tb,n %0", xoperands);
7584 else
7585 output_asm_insn ("b,n %0", xoperands);
7587 /* Delete the jump. */
7588 PUT_CODE (NEXT_INSN (insn), NOTE);
7589 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7590 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7592 return "";
7595 /* Return the attribute length of the indirect call instruction INSN.
7596 The length must match the code generated by output_indirect call.
7597 The returned length includes the delay slot. Currently, the delay
7598 slot of an indirect call sequence is not exposed and it is used by
7599 the sequence itself. */
7602 attr_length_indirect_call (rtx insn)
7604 unsigned long distance = -1;
7605 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7607 if (INSN_ADDRESSES_SET_P ())
7609 distance = (total + insn_current_reference_address (insn));
7610 if (distance < total)
7611 distance = -1;
7614 if (TARGET_64BIT)
7615 return 12;
7617 if (TARGET_FAST_INDIRECT_CALLS
7618 || (!TARGET_PORTABLE_RUNTIME
7619 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7620 || distance < 240000)))
7621 return 8;
7623 if (flag_pic)
7624 return 24;
7626 if (TARGET_PORTABLE_RUNTIME)
7627 return 20;
7629 /* Out of reach, can use ble. */
7630 return 12;
7633 const char *
7634 output_indirect_call (rtx insn, rtx call_dest)
7636 rtx xoperands[1];
7638 if (TARGET_64BIT)
7640 xoperands[0] = call_dest;
7641 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7642 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7643 return "";
7646 /* First the special case for kernels, level 0 systems, etc. */
7647 if (TARGET_FAST_INDIRECT_CALLS)
7648 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7650 /* Now the normal case -- we can reach $$dyncall directly or
7651 we're sure that we can get there via a long-branch stub.
7653 No need to check target flags as the length uniquely identifies
7654 the remaining cases. */
7655 if (attr_length_indirect_call (insn) == 8)
7657 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7658 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7659 variant of the B,L instruction can't be used on the SOM target. */
7660 if (TARGET_PA_20 && !TARGET_SOM)
7661 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7662 else
7663 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7666 /* Long millicode call, but we are not generating PIC or portable runtime
7667 code. */
7668 if (attr_length_indirect_call (insn) == 12)
7669 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7671 /* Long millicode call for portable runtime. */
7672 if (attr_length_indirect_call (insn) == 20)
7673 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7675 /* We need a long PIC call to $$dyncall. */
7676 xoperands[0] = NULL_RTX;
7677 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7678 if (TARGET_SOM || !TARGET_GAS)
7680 xoperands[0] = gen_label_rtx ();
7681 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7682 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7683 CODE_LABEL_NUMBER (xoperands[0]));
7684 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7686 else
7688 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7689 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7690 xoperands);
7692 output_asm_insn ("blr %%r0,%%r2", xoperands);
7693 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7694 return "";
7697 /* Return the total length of the save and restore instructions needed for
7698 the data linkage table pointer (i.e., the PIC register) across the call
7699 instruction INSN. No-return calls do not require a save and restore.
7700 In addition, we may be able to avoid the save and restore for calls
7701 within the same translation unit. */
7704 attr_length_save_restore_dltp (rtx insn)
7706 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7707 return 0;
7709 return 8;
7712 /* In HPUX 8.0's shared library scheme, special relocations are needed
7713 for function labels if they might be passed to a function
7714 in a shared library (because shared libraries don't live in code
7715 space), and special magic is needed to construct their address. */
7717 void
7718 hppa_encode_label (rtx sym)
7720 const char *str = XSTR (sym, 0);
7721 int len = strlen (str) + 1;
7722 char *newstr, *p;
7724 p = newstr = alloca (len + 1);
7725 *p++ = '@';
7726 strcpy (p, str);
7728 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7731 static void
7732 pa_encode_section_info (tree decl, rtx rtl, int first)
7734 default_encode_section_info (decl, rtl, first);
7736 if (first && TEXT_SPACE_P (decl))
7738 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7739 if (TREE_CODE (decl) == FUNCTION_DECL)
7740 hppa_encode_label (XEXP (rtl, 0));
7744 /* This is sort of inverse to pa_encode_section_info. */
7746 static const char *
7747 pa_strip_name_encoding (const char *str)
7749 str += (*str == '@');
7750 str += (*str == '*');
7751 return str;
7755 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7757 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7760 /* Returns 1 if OP is a function label involved in a simple addition
7761 with a constant. Used to keep certain patterns from matching
7762 during instruction combination. */
7764 is_function_label_plus_const (rtx op)
7766 /* Strip off any CONST. */
7767 if (GET_CODE (op) == CONST)
7768 op = XEXP (op, 0);
7770 return (GET_CODE (op) == PLUS
7771 && function_label_operand (XEXP (op, 0), Pmode)
7772 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7775 /* Output assembly code for a thunk to FUNCTION. */
7777 static void
7778 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7779 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7780 tree function)
7782 static unsigned int current_thunk_number;
7783 int val_14 = VAL_14_BITS_P (delta);
7784 int nbytes = 0;
7785 char label[16];
7786 rtx xoperands[4];
7788 xoperands[0] = XEXP (DECL_RTL (function), 0);
7789 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7790 xoperands[2] = GEN_INT (delta);
7792 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7793 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7795 /* Output the thunk. We know that the function is in the same
7796 translation unit (i.e., the same space) as the thunk, and that
7797 thunks are output after their method. Thus, we don't need an
7798 external branch to reach the function. With SOM and GAS,
7799 functions and thunks are effectively in different sections.
7800 Thus, we can always use a IA-relative branch and the linker
7801 will add a long branch stub if necessary.
7803 However, we have to be careful when generating PIC code on the
7804 SOM port to ensure that the sequence does not transfer to an
7805 import stub for the target function as this could clobber the
7806 return value saved at SP-24. This would also apply to the
7807 32-bit linux port if the multi-space model is implemented. */
7808 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7809 && !(flag_pic && TREE_PUBLIC (function))
7810 && (TARGET_GAS || last_address < 262132))
7811 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7812 && ((targetm.have_named_sections
7813 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7814 /* The GNU 64-bit linker has rather poor stub management.
7815 So, we use a long branch from thunks that aren't in
7816 the same section as the target function. */
7817 && ((!TARGET_64BIT
7818 && (DECL_SECTION_NAME (thunk_fndecl)
7819 != DECL_SECTION_NAME (function)))
7820 || ((DECL_SECTION_NAME (thunk_fndecl)
7821 == DECL_SECTION_NAME (function))
7822 && last_address < 262132)))
7823 || (!targetm.have_named_sections && last_address < 262132))))
7825 if (!val_14)
7826 output_asm_insn ("addil L'%2,%%r26", xoperands);
7828 output_asm_insn ("b %0", xoperands);
7830 if (val_14)
7832 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7833 nbytes += 8;
7835 else
7837 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7838 nbytes += 12;
7841 else if (TARGET_64BIT)
7843 /* We only have one call-clobbered scratch register, so we can't
7844 make use of the delay slot if delta doesn't fit in 14 bits. */
7845 if (!val_14)
7847 output_asm_insn ("addil L'%2,%%r26", xoperands);
7848 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7851 output_asm_insn ("b,l .+8,%%r1", xoperands);
7853 if (TARGET_GAS)
7855 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7856 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7858 else
7860 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7861 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7864 if (val_14)
7866 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7867 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7868 nbytes += 20;
7870 else
7872 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7873 nbytes += 24;
7876 else if (TARGET_PORTABLE_RUNTIME)
7878 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7879 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7881 if (!val_14)
7882 output_asm_insn ("addil L'%2,%%r26", xoperands);
7884 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7886 if (val_14)
7888 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7889 nbytes += 16;
7891 else
7893 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7894 nbytes += 20;
7897 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7899 /* The function is accessible from outside this module. The only
7900 way to avoid an import stub between the thunk and function is to
7901 call the function directly with an indirect sequence similar to
7902 that used by $$dyncall. This is possible because $$dyncall acts
7903 as the import stub in an indirect call. */
7904 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7905 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7906 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7907 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7908 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7909 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7910 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7911 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7912 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7914 if (!val_14)
7916 output_asm_insn ("addil L'%2,%%r26", xoperands);
7917 nbytes += 4;
7920 if (TARGET_PA_20)
7922 output_asm_insn ("bve (%%r22)", xoperands);
7923 nbytes += 36;
7925 else if (TARGET_NO_SPACE_REGS)
7927 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7928 nbytes += 36;
7930 else
7932 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7933 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7934 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7935 nbytes += 44;
7938 if (val_14)
7939 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7940 else
7941 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7943 else if (flag_pic)
7945 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7947 if (TARGET_SOM || !TARGET_GAS)
7949 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7950 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7952 else
7954 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7955 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7958 if (!val_14)
7959 output_asm_insn ("addil L'%2,%%r26", xoperands);
7961 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7963 if (val_14)
7965 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7966 nbytes += 20;
7968 else
7970 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7971 nbytes += 24;
7974 else
7976 if (!val_14)
7977 output_asm_insn ("addil L'%2,%%r26", xoperands);
7979 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7980 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7982 if (val_14)
7984 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7985 nbytes += 12;
7987 else
7989 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7990 nbytes += 16;
7994 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7996 if (TARGET_SOM && TARGET_GAS)
7998 /* We done with this subspace except possibly for some additional
7999 debug information. Forget that we are in this subspace to ensure
8000 that the next function is output in its own subspace. */
8001 in_section = NULL;
8002 cfun->machine->in_nsubspa = 2;
8005 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8007 switch_to_section (data_section);
8008 output_asm_insn (".align 4", xoperands);
8009 ASM_OUTPUT_LABEL (file, label);
8010 output_asm_insn (".word P'%0", xoperands);
8013 current_thunk_number++;
8014 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8015 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8016 last_address += nbytes;
8017 update_total_code_bytes (nbytes);
8020 /* Only direct calls to static functions are allowed to be sibling (tail)
8021 call optimized.
8023 This restriction is necessary because some linker generated stubs will
8024 store return pointers into rp' in some cases which might clobber a
8025 live value already in rp'.
8027 In a sibcall the current function and the target function share stack
8028 space. Thus if the path to the current function and the path to the
8029 target function save a value in rp', they save the value into the
8030 same stack slot, which has undesirable consequences.
8032 Because of the deferred binding nature of shared libraries any function
8033 with external scope could be in a different load module and thus require
8034 rp' to be saved when calling that function. So sibcall optimizations
8035 can only be safe for static function.
8037 Note that GCC never needs return value relocations, so we don't have to
8038 worry about static calls with return value relocations (which require
8039 saving rp').
8041 It is safe to perform a sibcall optimization when the target function
8042 will never return. */
8043 static bool
8044 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8046 if (TARGET_PORTABLE_RUNTIME)
8047 return false;
8049 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8050 single subspace mode and the call is not indirect. As far as I know,
8051 there is no operating system support for the multiple subspace mode.
8052 It might be possible to support indirect calls if we didn't use
8053 $$dyncall (see the indirect sequence generated in output_call). */
8054 if (TARGET_ELF32)
8055 return (decl != NULL_TREE);
8057 /* Sibcalls are not ok because the arg pointer register is not a fixed
8058 register. This prevents the sibcall optimization from occurring. In
8059 addition, there are problems with stub placement using GNU ld. This
8060 is because a normal sibcall branch uses a 17-bit relocation while
8061 a regular call branch uses a 22-bit relocation. As a result, more
8062 care needs to be taken in the placement of long-branch stubs. */
8063 if (TARGET_64BIT)
8064 return false;
8066 /* Sibcalls are only ok within a translation unit. */
8067 return (decl && !TREE_PUBLIC (decl));
8070 /* ??? Addition is not commutative on the PA due to the weird implicit
8071 space register selection rules for memory addresses. Therefore, we
8072 don't consider a + b == b + a, as this might be inside a MEM. */
8073 static bool
8074 pa_commutative_p (rtx x, int outer_code)
8076 return (COMMUTATIVE_P (x)
8077 && (TARGET_NO_SPACE_REGS
8078 || (outer_code != UNKNOWN && outer_code != MEM)
8079 || GET_CODE (x) != PLUS));
8082 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8083 use in fmpyadd instructions. */
8085 fmpyaddoperands (rtx *operands)
8087 enum machine_mode mode = GET_MODE (operands[0]);
8089 /* Must be a floating point mode. */
8090 if (mode != SFmode && mode != DFmode)
8091 return 0;
8093 /* All modes must be the same. */
8094 if (! (mode == GET_MODE (operands[1])
8095 && mode == GET_MODE (operands[2])
8096 && mode == GET_MODE (operands[3])
8097 && mode == GET_MODE (operands[4])
8098 && mode == GET_MODE (operands[5])))
8099 return 0;
8101 /* All operands must be registers. */
8102 if (! (GET_CODE (operands[1]) == REG
8103 && GET_CODE (operands[2]) == REG
8104 && GET_CODE (operands[3]) == REG
8105 && GET_CODE (operands[4]) == REG
8106 && GET_CODE (operands[5]) == REG))
8107 return 0;
8109 /* Only 2 real operands to the addition. One of the input operands must
8110 be the same as the output operand. */
8111 if (! rtx_equal_p (operands[3], operands[4])
8112 && ! rtx_equal_p (operands[3], operands[5]))
8113 return 0;
8115 /* Inout operand of add cannot conflict with any operands from multiply. */
8116 if (rtx_equal_p (operands[3], operands[0])
8117 || rtx_equal_p (operands[3], operands[1])
8118 || rtx_equal_p (operands[3], operands[2]))
8119 return 0;
8121 /* multiply cannot feed into addition operands. */
8122 if (rtx_equal_p (operands[4], operands[0])
8123 || rtx_equal_p (operands[5], operands[0]))
8124 return 0;
8126 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8127 if (mode == SFmode
8128 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8129 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8130 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8131 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8132 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8133 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8134 return 0;
8136 /* Passed. Operands are suitable for fmpyadd. */
8137 return 1;
8140 #if !defined(USE_COLLECT2)
8141 static void
8142 pa_asm_out_constructor (rtx symbol, int priority)
8144 if (!function_label_operand (symbol, VOIDmode))
8145 hppa_encode_label (symbol);
8147 #ifdef CTORS_SECTION_ASM_OP
8148 default_ctor_section_asm_out_constructor (symbol, priority);
8149 #else
8150 # ifdef TARGET_ASM_NAMED_SECTION
8151 default_named_section_asm_out_constructor (symbol, priority);
8152 # else
8153 default_stabs_asm_out_constructor (symbol, priority);
8154 # endif
8155 #endif
8158 static void
8159 pa_asm_out_destructor (rtx symbol, int priority)
8161 if (!function_label_operand (symbol, VOIDmode))
8162 hppa_encode_label (symbol);
8164 #ifdef DTORS_SECTION_ASM_OP
8165 default_dtor_section_asm_out_destructor (symbol, priority);
8166 #else
8167 # ifdef TARGET_ASM_NAMED_SECTION
8168 default_named_section_asm_out_destructor (symbol, priority);
8169 # else
8170 default_stabs_asm_out_destructor (symbol, priority);
8171 # endif
8172 #endif
8174 #endif
8176 /* This function places uninitialized global data in the bss section.
8177 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8178 function on the SOM port to prevent uninitialized global data from
8179 being placed in the data section. */
8181 void
8182 pa_asm_output_aligned_bss (FILE *stream,
8183 const char *name,
8184 unsigned HOST_WIDE_INT size,
8185 unsigned int align)
8187 switch_to_section (bss_section);
8188 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8190 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8191 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8192 #endif
8194 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8195 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8196 #endif
8198 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8199 ASM_OUTPUT_LABEL (stream, name);
8200 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8203 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8204 that doesn't allow the alignment of global common storage to be directly
8205 specified. The SOM linker aligns common storage based on the rounded
8206 value of the NUM_BYTES parameter in the .comm directive. It's not
8207 possible to use the .align directive as it doesn't affect the alignment
8208 of the label associated with a .comm directive. */
8210 void
8211 pa_asm_output_aligned_common (FILE *stream,
8212 const char *name,
8213 unsigned HOST_WIDE_INT size,
8214 unsigned int align)
8216 unsigned int max_common_align;
8218 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8219 if (align > max_common_align)
8221 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8222 "for global common data. Using %u",
8223 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8224 align = max_common_align;
8227 switch_to_section (bss_section);
8229 assemble_name (stream, name);
8230 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8231 MAX (size, align / BITS_PER_UNIT));
8234 /* We can't use .comm for local common storage as the SOM linker effectively
8235 treats the symbol as universal and uses the same storage for local symbols
8236 with the same name in different object files. The .block directive
8237 reserves an uninitialized block of storage. However, it's not common
8238 storage. Fortunately, GCC never requests common storage with the same
8239 name in any given translation unit. */
8241 void
8242 pa_asm_output_aligned_local (FILE *stream,
8243 const char *name,
8244 unsigned HOST_WIDE_INT size,
8245 unsigned int align)
8247 switch_to_section (bss_section);
8248 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8250 #ifdef LOCAL_ASM_OP
8251 fprintf (stream, "%s", LOCAL_ASM_OP);
8252 assemble_name (stream, name);
8253 fprintf (stream, "\n");
8254 #endif
8256 ASM_OUTPUT_LABEL (stream, name);
8257 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8260 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8261 use in fmpysub instructions. */
8263 fmpysuboperands (rtx *operands)
8265 enum machine_mode mode = GET_MODE (operands[0]);
8267 /* Must be a floating point mode. */
8268 if (mode != SFmode && mode != DFmode)
8269 return 0;
8271 /* All modes must be the same. */
8272 if (! (mode == GET_MODE (operands[1])
8273 && mode == GET_MODE (operands[2])
8274 && mode == GET_MODE (operands[3])
8275 && mode == GET_MODE (operands[4])
8276 && mode == GET_MODE (operands[5])))
8277 return 0;
8279 /* All operands must be registers. */
8280 if (! (GET_CODE (operands[1]) == REG
8281 && GET_CODE (operands[2]) == REG
8282 && GET_CODE (operands[3]) == REG
8283 && GET_CODE (operands[4]) == REG
8284 && GET_CODE (operands[5]) == REG))
8285 return 0;
8287 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8288 operation, so operands[4] must be the same as operand[3]. */
8289 if (! rtx_equal_p (operands[3], operands[4]))
8290 return 0;
8292 /* multiply cannot feed into subtraction. */
8293 if (rtx_equal_p (operands[5], operands[0]))
8294 return 0;
8296 /* Inout operand of sub cannot conflict with any operands from multiply. */
8297 if (rtx_equal_p (operands[3], operands[0])
8298 || rtx_equal_p (operands[3], operands[1])
8299 || rtx_equal_p (operands[3], operands[2]))
8300 return 0;
8302 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8303 if (mode == SFmode
8304 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8305 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8306 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8307 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8308 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8309 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8310 return 0;
8312 /* Passed. Operands are suitable for fmpysub. */
8313 return 1;
8316 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8317 constants for shadd instructions. */
8319 shadd_constant_p (int val)
8321 if (val == 2 || val == 4 || val == 8)
8322 return 1;
8323 else
8324 return 0;
8327 /* Return 1 if OP is valid as a base or index register in a
8328 REG+REG address. */
8331 borx_reg_operand (rtx op, enum machine_mode mode)
8333 if (GET_CODE (op) != REG)
8334 return 0;
8336 /* We must reject virtual registers as the only expressions that
8337 can be instantiated are REG and REG+CONST. */
8338 if (op == virtual_incoming_args_rtx
8339 || op == virtual_stack_vars_rtx
8340 || op == virtual_stack_dynamic_rtx
8341 || op == virtual_outgoing_args_rtx
8342 || op == virtual_cfa_rtx)
8343 return 0;
8345 /* While it's always safe to index off the frame pointer, it's not
8346 profitable to do so when the frame pointer is being eliminated. */
8347 if (!reload_completed
8348 && flag_omit_frame_pointer
8349 && !current_function_calls_alloca
8350 && op == frame_pointer_rtx)
8351 return 0;
8353 return register_operand (op, mode);
8356 /* Return 1 if this operand is anything other than a hard register. */
8359 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8361 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8364 /* Return 1 if INSN branches forward. Should be using insn_addresses
8365 to avoid walking through all the insns... */
8366 static int
8367 forward_branch_p (rtx insn)
8369 rtx label = JUMP_LABEL (insn);
8371 while (insn)
8373 if (insn == label)
8374 break;
8375 else
8376 insn = NEXT_INSN (insn);
8379 return (insn == label);
8382 /* Return 1 if OP is an equality comparison, else return 0. */
8384 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8386 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8389 /* Return 1 if INSN is in the delay slot of a call instruction. */
8391 jump_in_call_delay (rtx insn)
8394 if (GET_CODE (insn) != JUMP_INSN)
8395 return 0;
8397 if (PREV_INSN (insn)
8398 && PREV_INSN (PREV_INSN (insn))
8399 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8401 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8403 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8404 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8407 else
8408 return 0;
8411 /* Output an unconditional move and branch insn. */
8413 const char *
8414 output_parallel_movb (rtx *operands, rtx insn)
8416 int length = get_attr_length (insn);
8418 /* These are the cases in which we win. */
8419 if (length == 4)
8420 return "mov%I1b,tr %1,%0,%2";
8422 /* None of the following cases win, but they don't lose either. */
8423 if (length == 8)
8425 if (dbr_sequence_length () == 0)
8427 /* Nothing in the delay slot, fake it by putting the combined
8428 insn (the copy or add) in the delay slot of a bl. */
8429 if (GET_CODE (operands[1]) == CONST_INT)
8430 return "b %2\n\tldi %1,%0";
8431 else
8432 return "b %2\n\tcopy %1,%0";
8434 else
8436 /* Something in the delay slot, but we've got a long branch. */
8437 if (GET_CODE (operands[1]) == CONST_INT)
8438 return "ldi %1,%0\n\tb %2";
8439 else
8440 return "copy %1,%0\n\tb %2";
8444 if (GET_CODE (operands[1]) == CONST_INT)
8445 output_asm_insn ("ldi %1,%0", operands);
8446 else
8447 output_asm_insn ("copy %1,%0", operands);
8448 return output_lbranch (operands[2], insn, 1);
8451 /* Output an unconditional add and branch insn. */
8453 const char *
8454 output_parallel_addb (rtx *operands, rtx insn)
8456 int length = get_attr_length (insn);
8458 /* To make life easy we want operand0 to be the shared input/output
8459 operand and operand1 to be the readonly operand. */
8460 if (operands[0] == operands[1])
8461 operands[1] = operands[2];
8463 /* These are the cases in which we win. */
8464 if (length == 4)
8465 return "add%I1b,tr %1,%0,%3";
8467 /* None of the following cases win, but they don't lose either. */
8468 if (length == 8)
8470 if (dbr_sequence_length () == 0)
8471 /* Nothing in the delay slot, fake it by putting the combined
8472 insn (the copy or add) in the delay slot of a bl. */
8473 return "b %3\n\tadd%I1 %1,%0,%0";
8474 else
8475 /* Something in the delay slot, but we've got a long branch. */
8476 return "add%I1 %1,%0,%0\n\tb %3";
8479 output_asm_insn ("add%I1 %1,%0,%0", operands);
8480 return output_lbranch (operands[3], insn, 1);
8483 /* Return nonzero if INSN (a jump insn) immediately follows a call
8484 to a named function. This is used to avoid filling the delay slot
8485 of the jump since it can usually be eliminated by modifying RP in
8486 the delay slot of the call. */
8489 following_call (rtx insn)
8491 if (! TARGET_JUMP_IN_DELAY)
8492 return 0;
8494 /* Find the previous real insn, skipping NOTEs. */
8495 insn = PREV_INSN (insn);
8496 while (insn && GET_CODE (insn) == NOTE)
8497 insn = PREV_INSN (insn);
8499 /* Check for CALL_INSNs and millicode calls. */
8500 if (insn
8501 && ((GET_CODE (insn) == CALL_INSN
8502 && get_attr_type (insn) != TYPE_DYNCALL)
8503 || (GET_CODE (insn) == INSN
8504 && GET_CODE (PATTERN (insn)) != SEQUENCE
8505 && GET_CODE (PATTERN (insn)) != USE
8506 && GET_CODE (PATTERN (insn)) != CLOBBER
8507 && get_attr_type (insn) == TYPE_MILLI)))
8508 return 1;
8510 return 0;
8513 /* We use this hook to perform a PA specific optimization which is difficult
8514 to do in earlier passes.
8516 We want the delay slots of branches within jump tables to be filled.
8517 None of the compiler passes at the moment even has the notion that a
8518 PA jump table doesn't contain addresses, but instead contains actual
8519 instructions!
8521 Because we actually jump into the table, the addresses of each entry
8522 must stay constant in relation to the beginning of the table (which
8523 itself must stay constant relative to the instruction to jump into
8524 it). I don't believe we can guarantee earlier passes of the compiler
8525 will adhere to those rules.
8527 So, late in the compilation process we find all the jump tables, and
8528 expand them into real code -- e.g. each entry in the jump table vector
8529 will get an appropriate label followed by a jump to the final target.
8531 Reorg and the final jump pass can then optimize these branches and
8532 fill their delay slots. We end up with smaller, more efficient code.
8534 The jump instructions within the table are special; we must be able
8535 to identify them during assembly output (if the jumps don't get filled
8536 we need to emit a nop rather than nullifying the delay slot)). We
8537 identify jumps in switch tables by using insns with the attribute
8538 type TYPE_BTABLE_BRANCH.
8540 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8541 insns. This serves two purposes, first it prevents jump.c from
8542 noticing that the last N entries in the table jump to the instruction
8543 immediately after the table and deleting the jumps. Second, those
8544 insns mark where we should emit .begin_brtab and .end_brtab directives
8545 when using GAS (allows for better link time optimizations). */
8547 static void
8548 pa_reorg (void)
8550 rtx insn;
8552 remove_useless_addtr_insns (1);
8554 if (pa_cpu < PROCESSOR_8000)
8555 pa_combine_instructions ();
8558 /* This is fairly cheap, so always run it if optimizing. */
8559 if (optimize > 0 && !TARGET_BIG_SWITCH)
8561 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8562 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8564 rtx pattern, tmp, location, label;
8565 unsigned int length, i;
8567 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8568 if (GET_CODE (insn) != JUMP_INSN
8569 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8570 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8571 continue;
8573 /* Emit marker for the beginning of the branch table. */
8574 emit_insn_before (gen_begin_brtab (), insn);
8576 pattern = PATTERN (insn);
8577 location = PREV_INSN (insn);
8578 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8580 for (i = 0; i < length; i++)
8582 /* Emit a label before each jump to keep jump.c from
8583 removing this code. */
8584 tmp = gen_label_rtx ();
8585 LABEL_NUSES (tmp) = 1;
8586 emit_label_after (tmp, location);
8587 location = NEXT_INSN (location);
8589 if (GET_CODE (pattern) == ADDR_VEC)
8590 label = XEXP (XVECEXP (pattern, 0, i), 0);
8591 else
8592 label = XEXP (XVECEXP (pattern, 1, i), 0);
8594 tmp = gen_short_jump (label);
8596 /* Emit the jump itself. */
8597 tmp = emit_jump_insn_after (tmp, location);
8598 JUMP_LABEL (tmp) = label;
8599 LABEL_NUSES (label)++;
8600 location = NEXT_INSN (location);
8602 /* Emit a BARRIER after the jump. */
8603 emit_barrier_after (location);
8604 location = NEXT_INSN (location);
8607 /* Emit marker for the end of the branch table. */
8608 emit_insn_before (gen_end_brtab (), location);
8609 location = NEXT_INSN (location);
8610 emit_barrier_after (location);
8612 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8613 delete_insn (insn);
8616 else
8618 /* Still need brtab marker insns. FIXME: the presence of these
8619 markers disables output of the branch table to readonly memory,
8620 and any alignment directives that might be needed. Possibly,
8621 the begin_brtab insn should be output before the label for the
8622 table. This doesn't matter at the moment since the tables are
8623 always output in the text section. */
8624 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8626 /* Find an ADDR_VEC insn. */
8627 if (GET_CODE (insn) != JUMP_INSN
8628 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8629 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8630 continue;
8632 /* Now generate markers for the beginning and end of the
8633 branch table. */
8634 emit_insn_before (gen_begin_brtab (), insn);
8635 emit_insn_after (gen_end_brtab (), insn);
8640 /* The PA has a number of odd instructions which can perform multiple
8641 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8642 it may be profitable to combine two instructions into one instruction
8643 with two outputs. It's not profitable PA2.0 machines because the
8644 two outputs would take two slots in the reorder buffers.
8646 This routine finds instructions which can be combined and combines
8647 them. We only support some of the potential combinations, and we
8648 only try common ways to find suitable instructions.
8650 * addb can add two registers or a register and a small integer
8651 and jump to a nearby (+-8k) location. Normally the jump to the
8652 nearby location is conditional on the result of the add, but by
8653 using the "true" condition we can make the jump unconditional.
8654 Thus addb can perform two independent operations in one insn.
8656 * movb is similar to addb in that it can perform a reg->reg
8657 or small immediate->reg copy and jump to a nearby (+-8k location).
8659 * fmpyadd and fmpysub can perform a FP multiply and either an
8660 FP add or FP sub if the operands of the multiply and add/sub are
8661 independent (there are other minor restrictions). Note both
8662 the fmpy and fadd/fsub can in theory move to better spots according
8663 to data dependencies, but for now we require the fmpy stay at a
8664 fixed location.
8666 * Many of the memory operations can perform pre & post updates
8667 of index registers. GCC's pre/post increment/decrement addressing
8668 is far too simple to take advantage of all the possibilities. This
8669 pass may not be suitable since those insns may not be independent.
8671 * comclr can compare two ints or an int and a register, nullify
8672 the following instruction and zero some other register. This
8673 is more difficult to use as it's harder to find an insn which
8674 will generate a comclr than finding something like an unconditional
8675 branch. (conditional moves & long branches create comclr insns).
8677 * Most arithmetic operations can conditionally skip the next
8678 instruction. They can be viewed as "perform this operation
8679 and conditionally jump to this nearby location" (where nearby
8680 is an insns away). These are difficult to use due to the
8681 branch length restrictions. */
8683 static void
8684 pa_combine_instructions (void)
8686 rtx anchor, new;
8688 /* This can get expensive since the basic algorithm is on the
8689 order of O(n^2) (or worse). Only do it for -O2 or higher
8690 levels of optimization. */
8691 if (optimize < 2)
8692 return;
8694 /* Walk down the list of insns looking for "anchor" insns which
8695 may be combined with "floating" insns. As the name implies,
8696 "anchor" instructions don't move, while "floating" insns may
8697 move around. */
8698 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8699 new = make_insn_raw (new);
8701 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8703 enum attr_pa_combine_type anchor_attr;
8704 enum attr_pa_combine_type floater_attr;
8706 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8707 Also ignore any special USE insns. */
8708 if ((GET_CODE (anchor) != INSN
8709 && GET_CODE (anchor) != JUMP_INSN
8710 && GET_CODE (anchor) != CALL_INSN)
8711 || GET_CODE (PATTERN (anchor)) == USE
8712 || GET_CODE (PATTERN (anchor)) == CLOBBER
8713 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8714 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8715 continue;
8717 anchor_attr = get_attr_pa_combine_type (anchor);
8718 /* See if anchor is an insn suitable for combination. */
8719 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8720 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8721 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8722 && ! forward_branch_p (anchor)))
8724 rtx floater;
8726 for (floater = PREV_INSN (anchor);
8727 floater;
8728 floater = PREV_INSN (floater))
8730 if (GET_CODE (floater) == NOTE
8731 || (GET_CODE (floater) == INSN
8732 && (GET_CODE (PATTERN (floater)) == USE
8733 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8734 continue;
8736 /* Anything except a regular INSN will stop our search. */
8737 if (GET_CODE (floater) != INSN
8738 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8739 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8741 floater = NULL_RTX;
8742 break;
8745 /* See if FLOATER is suitable for combination with the
8746 anchor. */
8747 floater_attr = get_attr_pa_combine_type (floater);
8748 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8749 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8750 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8751 && floater_attr == PA_COMBINE_TYPE_FMPY))
8753 /* If ANCHOR and FLOATER can be combined, then we're
8754 done with this pass. */
8755 if (pa_can_combine_p (new, anchor, floater, 0,
8756 SET_DEST (PATTERN (floater)),
8757 XEXP (SET_SRC (PATTERN (floater)), 0),
8758 XEXP (SET_SRC (PATTERN (floater)), 1)))
8759 break;
8762 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8763 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8765 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8767 if (pa_can_combine_p (new, anchor, floater, 0,
8768 SET_DEST (PATTERN (floater)),
8769 XEXP (SET_SRC (PATTERN (floater)), 0),
8770 XEXP (SET_SRC (PATTERN (floater)), 1)))
8771 break;
8773 else
8775 if (pa_can_combine_p (new, anchor, floater, 0,
8776 SET_DEST (PATTERN (floater)),
8777 SET_SRC (PATTERN (floater)),
8778 SET_SRC (PATTERN (floater))))
8779 break;
8784 /* If we didn't find anything on the backwards scan try forwards. */
8785 if (!floater
8786 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8787 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8789 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8791 if (GET_CODE (floater) == NOTE
8792 || (GET_CODE (floater) == INSN
8793 && (GET_CODE (PATTERN (floater)) == USE
8794 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8796 continue;
8798 /* Anything except a regular INSN will stop our search. */
8799 if (GET_CODE (floater) != INSN
8800 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8801 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8803 floater = NULL_RTX;
8804 break;
8807 /* See if FLOATER is suitable for combination with the
8808 anchor. */
8809 floater_attr = get_attr_pa_combine_type (floater);
8810 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8811 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8812 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8813 && floater_attr == PA_COMBINE_TYPE_FMPY))
8815 /* If ANCHOR and FLOATER can be combined, then we're
8816 done with this pass. */
8817 if (pa_can_combine_p (new, anchor, floater, 1,
8818 SET_DEST (PATTERN (floater)),
8819 XEXP (SET_SRC (PATTERN (floater)),
8821 XEXP (SET_SRC (PATTERN (floater)),
8822 1)))
8823 break;
8828 /* FLOATER will be nonzero if we found a suitable floating
8829 insn for combination with ANCHOR. */
8830 if (floater
8831 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8832 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8834 /* Emit the new instruction and delete the old anchor. */
8835 emit_insn_before (gen_rtx_PARALLEL
8836 (VOIDmode,
8837 gen_rtvec (2, PATTERN (anchor),
8838 PATTERN (floater))),
8839 anchor);
8841 PUT_CODE (anchor, NOTE);
8842 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8843 NOTE_SOURCE_FILE (anchor) = 0;
8845 /* Emit a special USE insn for FLOATER, then delete
8846 the floating insn. */
8847 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8848 delete_insn (floater);
8850 continue;
8852 else if (floater
8853 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8855 rtx temp;
8856 /* Emit the new_jump instruction and delete the old anchor. */
8857 temp
8858 = emit_jump_insn_before (gen_rtx_PARALLEL
8859 (VOIDmode,
8860 gen_rtvec (2, PATTERN (anchor),
8861 PATTERN (floater))),
8862 anchor);
8864 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8865 PUT_CODE (anchor, NOTE);
8866 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8867 NOTE_SOURCE_FILE (anchor) = 0;
8869 /* Emit a special USE insn for FLOATER, then delete
8870 the floating insn. */
8871 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8872 delete_insn (floater);
8873 continue;
8879 static int
8880 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8881 rtx src1, rtx src2)
8883 int insn_code_number;
8884 rtx start, end;
8886 /* Create a PARALLEL with the patterns of ANCHOR and
8887 FLOATER, try to recognize it, then test constraints
8888 for the resulting pattern.
8890 If the pattern doesn't match or the constraints
8891 aren't met keep searching for a suitable floater
8892 insn. */
8893 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8894 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8895 INSN_CODE (new) = -1;
8896 insn_code_number = recog_memoized (new);
8897 if (insn_code_number < 0
8898 || (extract_insn (new), ! constrain_operands (1)))
8899 return 0;
8901 if (reversed)
8903 start = anchor;
8904 end = floater;
8906 else
8908 start = floater;
8909 end = anchor;
8912 /* There's up to three operands to consider. One
8913 output and two inputs.
8915 The output must not be used between FLOATER & ANCHOR
8916 exclusive. The inputs must not be set between
8917 FLOATER and ANCHOR exclusive. */
8919 if (reg_used_between_p (dest, start, end))
8920 return 0;
8922 if (reg_set_between_p (src1, start, end))
8923 return 0;
8925 if (reg_set_between_p (src2, start, end))
8926 return 0;
8928 /* If we get here, then everything is good. */
8929 return 1;
8932 /* Return nonzero if references for INSN are delayed.
8934 Millicode insns are actually function calls with some special
8935 constraints on arguments and register usage.
8937 Millicode calls always expect their arguments in the integer argument
8938 registers, and always return their result in %r29 (ret1). They
8939 are expected to clobber their arguments, %r1, %r29, and the return
8940 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8942 This function tells reorg that the references to arguments and
8943 millicode calls do not appear to happen until after the millicode call.
8944 This allows reorg to put insns which set the argument registers into the
8945 delay slot of the millicode call -- thus they act more like traditional
8946 CALL_INSNs.
8948 Note we cannot consider side effects of the insn to be delayed because
8949 the branch and link insn will clobber the return pointer. If we happened
8950 to use the return pointer in the delay slot of the call, then we lose.
8952 get_attr_type will try to recognize the given insn, so make sure to
8953 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8954 in particular. */
8956 insn_refs_are_delayed (rtx insn)
8958 return ((GET_CODE (insn) == INSN
8959 && GET_CODE (PATTERN (insn)) != SEQUENCE
8960 && GET_CODE (PATTERN (insn)) != USE
8961 && GET_CODE (PATTERN (insn)) != CLOBBER
8962 && get_attr_type (insn) == TYPE_MILLI));
8965 /* On the HP-PA the value is found in register(s) 28(-29), unless
8966 the mode is SF or DF. Then the value is returned in fr4 (32).
8968 This must perform the same promotions as PROMOTE_MODE, else
8969 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8971 Small structures must be returned in a PARALLEL on PA64 in order
8972 to match the HP Compiler ABI. */
8975 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8977 enum machine_mode valmode;
8979 if (AGGREGATE_TYPE_P (valtype)
8980 || TREE_CODE (valtype) == COMPLEX_TYPE
8981 || TREE_CODE (valtype) == VECTOR_TYPE)
8983 if (TARGET_64BIT)
8985 /* Aggregates with a size less than or equal to 128 bits are
8986 returned in GR 28(-29). They are left justified. The pad
8987 bits are undefined. Larger aggregates are returned in
8988 memory. */
8989 rtx loc[2];
8990 int i, offset = 0;
8991 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8993 for (i = 0; i < ub; i++)
8995 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8996 gen_rtx_REG (DImode, 28 + i),
8997 GEN_INT (offset));
8998 offset += 8;
9001 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9003 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9005 /* Aggregates 5 to 8 bytes in size are returned in general
9006 registers r28-r29 in the same manner as other non
9007 floating-point objects. The data is right-justified and
9008 zero-extended to 64 bits. This is opposite to the normal
9009 justification used on big endian targets and requires
9010 special treatment. */
9011 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9012 gen_rtx_REG (DImode, 28), const0_rtx);
9013 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9017 if ((INTEGRAL_TYPE_P (valtype)
9018 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
9019 || POINTER_TYPE_P (valtype))
9020 valmode = word_mode;
9021 else
9022 valmode = TYPE_MODE (valtype);
9024 if (TREE_CODE (valtype) == REAL_TYPE
9025 && !AGGREGATE_TYPE_P (valtype)
9026 && TYPE_MODE (valtype) != TFmode
9027 && !TARGET_SOFT_FLOAT)
9028 return gen_rtx_REG (valmode, 32);
9030 return gen_rtx_REG (valmode, 28);
9033 /* Return the location of a parameter that is passed in a register or NULL
9034 if the parameter has any component that is passed in memory.
9036 This is new code and will be pushed to into the net sources after
9037 further testing.
9039 ??? We might want to restructure this so that it looks more like other
9040 ports. */
9042 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9043 int named ATTRIBUTE_UNUSED)
9045 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9046 int alignment = 0;
9047 int arg_size;
9048 int fpr_reg_base;
9049 int gpr_reg_base;
9050 rtx retval;
9052 if (mode == VOIDmode)
9053 return NULL_RTX;
9055 arg_size = FUNCTION_ARG_SIZE (mode, type);
9057 /* If this arg would be passed partially or totally on the stack, then
9058 this routine should return zero. pa_arg_partial_bytes will
9059 handle arguments which are split between regs and stack slots if
9060 the ABI mandates split arguments. */
9061 if (!TARGET_64BIT)
9063 /* The 32-bit ABI does not split arguments. */
9064 if (cum->words + arg_size > max_arg_words)
9065 return NULL_RTX;
9067 else
9069 if (arg_size > 1)
9070 alignment = cum->words & 1;
9071 if (cum->words + alignment >= max_arg_words)
9072 return NULL_RTX;
9075 /* The 32bit ABIs and the 64bit ABIs are rather different,
9076 particularly in their handling of FP registers. We might
9077 be able to cleverly share code between them, but I'm not
9078 going to bother in the hope that splitting them up results
9079 in code that is more easily understood. */
9081 if (TARGET_64BIT)
9083 /* Advance the base registers to their current locations.
9085 Remember, gprs grow towards smaller register numbers while
9086 fprs grow to higher register numbers. Also remember that
9087 although FP regs are 32-bit addressable, we pretend that
9088 the registers are 64-bits wide. */
9089 gpr_reg_base = 26 - cum->words;
9090 fpr_reg_base = 32 + cum->words;
9092 /* Arguments wider than one word and small aggregates need special
9093 treatment. */
9094 if (arg_size > 1
9095 || mode == BLKmode
9096 || (type && (AGGREGATE_TYPE_P (type)
9097 || TREE_CODE (type) == COMPLEX_TYPE
9098 || TREE_CODE (type) == VECTOR_TYPE)))
9100 /* Double-extended precision (80-bit), quad-precision (128-bit)
9101 and aggregates including complex numbers are aligned on
9102 128-bit boundaries. The first eight 64-bit argument slots
9103 are associated one-to-one, with general registers r26
9104 through r19, and also with floating-point registers fr4
9105 through fr11. Arguments larger than one word are always
9106 passed in general registers.
9108 Using a PARALLEL with a word mode register results in left
9109 justified data on a big-endian target. */
9111 rtx loc[8];
9112 int i, offset = 0, ub = arg_size;
9114 /* Align the base register. */
9115 gpr_reg_base -= alignment;
9117 ub = MIN (ub, max_arg_words - cum->words - alignment);
9118 for (i = 0; i < ub; i++)
9120 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9121 gen_rtx_REG (DImode, gpr_reg_base),
9122 GEN_INT (offset));
9123 gpr_reg_base -= 1;
9124 offset += 8;
9127 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9130 else
9132 /* If the argument is larger than a word, then we know precisely
9133 which registers we must use. */
9134 if (arg_size > 1)
9136 if (cum->words)
9138 gpr_reg_base = 23;
9139 fpr_reg_base = 38;
9141 else
9143 gpr_reg_base = 25;
9144 fpr_reg_base = 34;
9147 /* Structures 5 to 8 bytes in size are passed in the general
9148 registers in the same manner as other non floating-point
9149 objects. The data is right-justified and zero-extended
9150 to 64 bits. This is opposite to the normal justification
9151 used on big endian targets and requires special treatment.
9152 We now define BLOCK_REG_PADDING to pad these objects.
9153 Aggregates, complex and vector types are passed in the same
9154 manner as structures. */
9155 if (mode == BLKmode
9156 || (type && (AGGREGATE_TYPE_P (type)
9157 || TREE_CODE (type) == COMPLEX_TYPE
9158 || TREE_CODE (type) == VECTOR_TYPE)))
9160 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9161 gen_rtx_REG (DImode, gpr_reg_base),
9162 const0_rtx);
9163 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9166 else
9168 /* We have a single word (32 bits). A simple computation
9169 will get us the register #s we need. */
9170 gpr_reg_base = 26 - cum->words;
9171 fpr_reg_base = 32 + 2 * cum->words;
9175 /* Determine if the argument needs to be passed in both general and
9176 floating point registers. */
9177 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9178 /* If we are doing soft-float with portable runtime, then there
9179 is no need to worry about FP regs. */
9180 && !TARGET_SOFT_FLOAT
9181 /* The parameter must be some kind of scalar float, else we just
9182 pass it in integer registers. */
9183 && GET_MODE_CLASS (mode) == MODE_FLOAT
9184 /* The target function must not have a prototype. */
9185 && cum->nargs_prototype <= 0
9186 /* libcalls do not need to pass items in both FP and general
9187 registers. */
9188 && type != NULL_TREE
9189 /* All this hair applies to "outgoing" args only. This includes
9190 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9191 && !cum->incoming)
9192 /* Also pass outgoing floating arguments in both registers in indirect
9193 calls with the 32 bit ABI and the HP assembler since there is no
9194 way to the specify argument locations in static functions. */
9195 || (!TARGET_64BIT
9196 && !TARGET_GAS
9197 && !cum->incoming
9198 && cum->indirect
9199 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9201 retval
9202 = gen_rtx_PARALLEL
9203 (mode,
9204 gen_rtvec (2,
9205 gen_rtx_EXPR_LIST (VOIDmode,
9206 gen_rtx_REG (mode, fpr_reg_base),
9207 const0_rtx),
9208 gen_rtx_EXPR_LIST (VOIDmode,
9209 gen_rtx_REG (mode, gpr_reg_base),
9210 const0_rtx)));
9212 else
9214 /* See if we should pass this parameter in a general register. */
9215 if (TARGET_SOFT_FLOAT
9216 /* Indirect calls in the normal 32bit ABI require all arguments
9217 to be passed in general registers. */
9218 || (!TARGET_PORTABLE_RUNTIME
9219 && !TARGET_64BIT
9220 && !TARGET_ELF32
9221 && cum->indirect)
9222 /* If the parameter is not a scalar floating-point parameter,
9223 then it belongs in GPRs. */
9224 || GET_MODE_CLASS (mode) != MODE_FLOAT
9225 /* Structure with single SFmode field belongs in GPR. */
9226 || (type && AGGREGATE_TYPE_P (type)))
9227 retval = gen_rtx_REG (mode, gpr_reg_base);
9228 else
9229 retval = gen_rtx_REG (mode, fpr_reg_base);
9231 return retval;
9235 /* If this arg would be passed totally in registers or totally on the stack,
9236 then this routine should return zero. */
9238 static int
9239 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9240 tree type, bool named ATTRIBUTE_UNUSED)
9242 unsigned int max_arg_words = 8;
9243 unsigned int offset = 0;
9245 if (!TARGET_64BIT)
9246 return 0;
9248 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9249 offset = 1;
9251 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9252 /* Arg fits fully into registers. */
9253 return 0;
9254 else if (cum->words + offset >= max_arg_words)
9255 /* Arg fully on the stack. */
9256 return 0;
9257 else
9258 /* Arg is split. */
9259 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9263 /* A get_unnamed_section callback for switching to the text section.
9265 This function is only used with SOM. Because we don't support
9266 named subspaces, we can only create a new subspace or switch back
9267 to the default text subspace. */
9269 static void
9270 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9272 gcc_assert (TARGET_SOM);
9273 if (TARGET_GAS)
9275 if (cfun && !cfun->machine->in_nsubspa)
9277 /* We only want to emit a .nsubspa directive once at the
9278 start of the function. */
9279 cfun->machine->in_nsubspa = 1;
9281 /* Create a new subspace for the text. This provides
9282 better stub placement and one-only functions. */
9283 if (cfun->decl
9284 && DECL_ONE_ONLY (cfun->decl)
9285 && !DECL_WEAK (cfun->decl))
9287 output_section_asm_op ("\t.SPACE $TEXT$\n"
9288 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9289 "ACCESS=44,SORT=24,COMDAT");
9290 return;
9293 else
9295 /* There isn't a current function or the body of the current
9296 function has been completed. So, we are changing to the
9297 text section to output debugging information. Thus, we
9298 need to forget that we are in the text section so that
9299 varasm.c will call us when text_section is selected again. */
9300 gcc_assert (!cfun || cfun->machine->in_nsubspa == 2);
9301 in_section = NULL;
9303 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9304 return;
9306 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9309 /* A get_unnamed_section callback for switching to comdat data
9310 sections. This function is only used with SOM. */
9312 static void
9313 som_output_comdat_data_section_asm_op (const void *data)
9315 in_section = NULL;
9316 output_section_asm_op (data);
9319 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9321 static void
9322 pa_som_asm_init_sections (void)
9324 text_section
9325 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9327 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9328 is not being generated. */
9329 som_readonly_data_section
9330 = get_unnamed_section (0, output_section_asm_op,
9331 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9333 /* When secondary definitions are not supported, SOM makes readonly
9334 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9335 the comdat flag. */
9336 som_one_only_readonly_data_section
9337 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9338 "\t.SPACE $TEXT$\n"
9339 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9340 "ACCESS=0x2c,SORT=16,COMDAT");
9343 /* When secondary definitions are not supported, SOM makes data one-only
9344 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9345 som_one_only_data_section
9346 = get_unnamed_section (SECTION_WRITE,
9347 som_output_comdat_data_section_asm_op,
9348 "\t.SPACE $PRIVATE$\n"
9349 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9350 "ACCESS=31,SORT=24,COMDAT");
9352 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9353 which reference data within the $TEXT$ space (for example constant
9354 strings in the $LIT$ subspace).
9356 The assemblers (GAS and HP as) both have problems with handling
9357 the difference of two symbols which is the other correct way to
9358 reference constant data during PIC code generation.
9360 So, there's no way to reference constant data which is in the
9361 $TEXT$ space during PIC generation. Instead place all constant
9362 data into the $PRIVATE$ subspace (this reduces sharing, but it
9363 works correctly). */
9364 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9366 /* We must not have a reference to an external symbol defined in a
9367 shared library in a readonly section, else the SOM linker will
9368 complain.
9370 So, we force exception information into the data section. */
9371 exception_section = data_section;
9374 /* On hpux10, the linker will give an error if we have a reference
9375 in the read-only data section to a symbol defined in a shared
9376 library. Therefore, expressions that might require a reloc can
9377 not be placed in the read-only data section. */
9379 static section *
9380 pa_select_section (tree exp, int reloc,
9381 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9383 if (TREE_CODE (exp) == VAR_DECL
9384 && TREE_READONLY (exp)
9385 && !TREE_THIS_VOLATILE (exp)
9386 && DECL_INITIAL (exp)
9387 && (DECL_INITIAL (exp) == error_mark_node
9388 || TREE_CONSTANT (DECL_INITIAL (exp)))
9389 && !reloc)
9391 if (TARGET_SOM
9392 && DECL_ONE_ONLY (exp)
9393 && !DECL_WEAK (exp))
9394 return som_one_only_readonly_data_section;
9395 else
9396 return readonly_data_section;
9398 else if (CONSTANT_CLASS_P (exp) && !reloc)
9399 return readonly_data_section;
9400 else if (TARGET_SOM
9401 && TREE_CODE (exp) == VAR_DECL
9402 && DECL_ONE_ONLY (exp)
9403 && !DECL_WEAK (exp))
9404 return som_one_only_data_section;
9405 else
9406 return data_section;
9409 static void
9410 pa_globalize_label (FILE *stream, const char *name)
9412 /* We only handle DATA objects here, functions are globalized in
9413 ASM_DECLARE_FUNCTION_NAME. */
9414 if (! FUNCTION_NAME_P (name))
9416 fputs ("\t.EXPORT ", stream);
9417 assemble_name (stream, name);
9418 fputs (",DATA\n", stream);
9422 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9424 static rtx
9425 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9426 int incoming ATTRIBUTE_UNUSED)
9428 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9431 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9433 bool
9434 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9436 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9437 PA64 ABI says that objects larger than 128 bits are returned in memory.
9438 Note, int_size_in_bytes can return -1 if the size of the object is
9439 variable or larger than the maximum value that can be expressed as
9440 a HOST_WIDE_INT. It can also return zero for an empty type. The
9441 simplest way to handle variable and empty types is to pass them in
9442 memory. This avoids problems in defining the boundaries of argument
9443 slots, allocating registers, etc. */
9444 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9445 || int_size_in_bytes (type) <= 0);
9448 /* Structure to hold declaration and name of external symbols that are
9449 emitted by GCC. We generate a vector of these symbols and output them
9450 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9451 This avoids putting out names that are never really used. */
9453 typedef struct extern_symbol GTY(())
9455 tree decl;
9456 const char *name;
9457 } extern_symbol;
9459 /* Define gc'd vector type for extern_symbol. */
9460 DEF_VEC_O(extern_symbol);
9461 DEF_VEC_ALLOC_O(extern_symbol,gc);
9463 /* Vector of extern_symbol pointers. */
9464 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9466 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9467 /* Mark DECL (name NAME) as an external reference (assembler output
9468 file FILE). This saves the names to output at the end of the file
9469 if actually referenced. */
9471 void
9472 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9474 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9476 gcc_assert (file == asm_out_file);
9477 p->decl = decl;
9478 p->name = name;
9481 /* Output text required at the end of an assembler file.
9482 This includes deferred plabels and .import directives for
9483 all external symbols that were actually referenced. */
9485 static void
9486 pa_hpux_file_end (void)
9488 unsigned int i;
9489 extern_symbol *p;
9491 if (!NO_DEFERRED_PROFILE_COUNTERS)
9492 output_deferred_profile_counters ();
9494 output_deferred_plabels ();
9496 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9498 tree decl = p->decl;
9500 if (!TREE_ASM_WRITTEN (decl)
9501 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9502 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9505 VEC_free (extern_symbol, gc, extern_symbols);
9507 #endif
9509 #include "gt-pa.h"