gcc/:
[official-gcc.git] / gcc / config / pa / pa.c
blob0f42d6bd98cdd7047cd4bc37dd50b22fa70e246f
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "df.h"
52 /* Return nonzero if there is a bypass for the output of
53 OUT_INSN and the fp store IN_INSN. */
54 int
55 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
57 enum machine_mode store_mode;
58 enum machine_mode other_mode;
59 rtx set;
61 if (recog_memoized (in_insn) < 0
62 || (get_attr_type (in_insn) != TYPE_FPSTORE
63 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
64 || recog_memoized (out_insn) < 0)
65 return 0;
67 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
69 set = single_set (out_insn);
70 if (!set)
71 return 0;
73 other_mode = GET_MODE (SET_SRC (set));
75 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
79 #ifndef DO_FRAME_NOTES
80 #ifdef INCOMING_RETURN_ADDR_RTX
81 #define DO_FRAME_NOTES 1
82 #else
83 #define DO_FRAME_NOTES 0
84 #endif
85 #endif
87 static void copy_reg_pointer (rtx, rtx);
88 static void fix_range (const char *);
89 static bool pa_handle_option (size_t, const char *, int);
90 static int hppa_address_cost (rtx, bool);
91 static bool hppa_rtx_costs (rtx, int, int, int *, bool);
92 static inline rtx force_mode (enum machine_mode, rtx);
93 static void pa_reorg (void);
94 static void pa_combine_instructions (void);
95 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
96 static int forward_branch_p (rtx);
97 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
98 static int compute_movmem_length (rtx);
99 static int compute_clrmem_length (rtx);
100 static bool pa_assemble_integer (rtx, unsigned int, int);
101 static void remove_useless_addtr_insns (int);
102 static void store_reg (int, HOST_WIDE_INT, int);
103 static void store_reg_modify (int, int, HOST_WIDE_INT);
104 static void load_reg (int, HOST_WIDE_INT, int);
105 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
106 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
107 static void update_total_code_bytes (unsigned int);
108 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
109 static int pa_adjust_cost (rtx, rtx, rtx, int);
110 static int pa_adjust_priority (rtx, int);
111 static int pa_issue_rate (void);
112 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
113 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
114 ATTRIBUTE_UNUSED;
115 static void pa_encode_section_info (tree, rtx, int);
116 static const char *pa_strip_name_encoding (const char *);
117 static bool pa_function_ok_for_sibcall (tree, tree);
118 static void pa_globalize_label (FILE *, const char *)
119 ATTRIBUTE_UNUSED;
120 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
121 HOST_WIDE_INT, tree);
122 #if !defined(USE_COLLECT2)
123 static void pa_asm_out_constructor (rtx, int);
124 static void pa_asm_out_destructor (rtx, int);
125 #endif
126 static void pa_init_builtins (void);
127 static rtx hppa_builtin_saveregs (void);
128 static void hppa_va_start (tree, rtx);
129 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
130 static bool pa_scalar_mode_supported_p (enum machine_mode);
131 static bool pa_commutative_p (const_rtx x, int outer_code);
132 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
133 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
136 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
137 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
138 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
141 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
142 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
143 static void output_deferred_plabels (void);
144 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
145 #ifdef ASM_OUTPUT_EXTERNAL_REAL
146 static void pa_hpux_file_end (void);
147 #endif
148 #ifdef HPUX_LONG_DOUBLE_LIBRARY
149 static void pa_hpux_init_libfuncs (void);
150 #endif
151 static rtx pa_struct_value_rtx (tree, int);
152 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
153 const_tree, bool);
154 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
155 tree, bool);
156 static struct machine_function * pa_init_machine_status (void);
157 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
158 enum machine_mode,
159 secondary_reload_info *);
160 static void pa_extra_live_on_entry (bitmap);
162 /* The following extra sections are only used for SOM. */
163 static GTY(()) section *som_readonly_data_section;
164 static GTY(()) section *som_one_only_readonly_data_section;
165 static GTY(()) section *som_one_only_data_section;
167 /* Save the operands last given to a compare for use when we
168 generate a scc or bcc insn. */
169 rtx hppa_compare_op0, hppa_compare_op1;
170 enum cmp_type hppa_branch_type;
172 /* Which cpu we are scheduling for. */
173 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
175 /* The UNIX standard to use for predefines and linking. */
176 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
178 /* Counts for the number of callee-saved general and floating point
179 registers which were saved by the current function's prologue. */
180 static int gr_saved, fr_saved;
182 /* Boolean indicating whether the return pointer was saved by the
183 current function's prologue. */
184 static bool rp_saved;
186 static rtx find_addr_reg (rtx);
188 /* Keep track of the number of bytes we have output in the CODE subspace
189 during this compilation so we'll know when to emit inline long-calls. */
190 unsigned long total_code_bytes;
192 /* The last address of the previous function plus the number of bytes in
193 associated thunks that have been output. This is used to determine if
194 a thunk can use an IA-relative branch to reach its target function. */
195 static unsigned int last_address;
197 /* Variables to handle plabels that we discover are necessary at assembly
198 output time. They are output after the current function. */
199 struct deferred_plabel GTY(())
201 rtx internal_label;
202 rtx symbol;
204 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
205 deferred_plabels;
206 static size_t n_deferred_plabels = 0;
209 /* Initialize the GCC target structure. */
211 #undef TARGET_ASM_ALIGNED_HI_OP
212 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
213 #undef TARGET_ASM_ALIGNED_SI_OP
214 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
215 #undef TARGET_ASM_ALIGNED_DI_OP
216 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
217 #undef TARGET_ASM_UNALIGNED_HI_OP
218 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
219 #undef TARGET_ASM_UNALIGNED_SI_OP
220 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
221 #undef TARGET_ASM_UNALIGNED_DI_OP
222 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
223 #undef TARGET_ASM_INTEGER
224 #define TARGET_ASM_INTEGER pa_assemble_integer
226 #undef TARGET_ASM_FUNCTION_PROLOGUE
227 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
228 #undef TARGET_ASM_FUNCTION_EPILOGUE
229 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
231 #undef TARGET_SCHED_ADJUST_COST
232 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
233 #undef TARGET_SCHED_ADJUST_PRIORITY
234 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
235 #undef TARGET_SCHED_ISSUE_RATE
236 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
238 #undef TARGET_ENCODE_SECTION_INFO
239 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
240 #undef TARGET_STRIP_NAME_ENCODING
241 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
243 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
244 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
246 #undef TARGET_COMMUTATIVE_P
247 #define TARGET_COMMUTATIVE_P pa_commutative_p
249 #undef TARGET_ASM_OUTPUT_MI_THUNK
250 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
251 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
252 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
254 #undef TARGET_ASM_FILE_END
255 #ifdef ASM_OUTPUT_EXTERNAL_REAL
256 #define TARGET_ASM_FILE_END pa_hpux_file_end
257 #else
258 #define TARGET_ASM_FILE_END output_deferred_plabels
259 #endif
261 #if !defined(USE_COLLECT2)
262 #undef TARGET_ASM_CONSTRUCTOR
263 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
264 #undef TARGET_ASM_DESTRUCTOR
265 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
266 #endif
268 #undef TARGET_DEFAULT_TARGET_FLAGS
269 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
270 #undef TARGET_HANDLE_OPTION
271 #define TARGET_HANDLE_OPTION pa_handle_option
273 #undef TARGET_INIT_BUILTINS
274 #define TARGET_INIT_BUILTINS pa_init_builtins
276 #undef TARGET_RTX_COSTS
277 #define TARGET_RTX_COSTS hppa_rtx_costs
278 #undef TARGET_ADDRESS_COST
279 #define TARGET_ADDRESS_COST hppa_address_cost
281 #undef TARGET_MACHINE_DEPENDENT_REORG
282 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
284 #ifdef HPUX_LONG_DOUBLE_LIBRARY
285 #undef TARGET_INIT_LIBFUNCS
286 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
287 #endif
289 #undef TARGET_PROMOTE_FUNCTION_RETURN
290 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
291 #undef TARGET_PROMOTE_PROTOTYPES
292 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
294 #undef TARGET_STRUCT_VALUE_RTX
295 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
296 #undef TARGET_RETURN_IN_MEMORY
297 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
298 #undef TARGET_MUST_PASS_IN_STACK
299 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
300 #undef TARGET_PASS_BY_REFERENCE
301 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
302 #undef TARGET_CALLEE_COPIES
303 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
304 #undef TARGET_ARG_PARTIAL_BYTES
305 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
307 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
308 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
309 #undef TARGET_EXPAND_BUILTIN_VA_START
310 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
311 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
312 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
314 #undef TARGET_SCALAR_MODE_SUPPORTED_P
315 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
317 #undef TARGET_CANNOT_FORCE_CONST_MEM
318 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
320 #undef TARGET_SECONDARY_RELOAD
321 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
323 #undef TARGET_EXTRA_LIVE_ON_ENTRY
324 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
326 struct gcc_target targetm = TARGET_INITIALIZER;
328 /* Parse the -mfixed-range= option string. */
330 static void
331 fix_range (const char *const_str)
333 int i, first, last;
334 char *str, *dash, *comma;
336 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
337 REG2 are either register names or register numbers. The effect
338 of this option is to mark the registers in the range from REG1 to
339 REG2 as ``fixed'' so they won't be used by the compiler. This is
340 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
342 i = strlen (const_str);
343 str = (char *) alloca (i + 1);
344 memcpy (str, const_str, i + 1);
346 while (1)
348 dash = strchr (str, '-');
349 if (!dash)
351 warning (0, "value of -mfixed-range must have form REG1-REG2");
352 return;
354 *dash = '\0';
356 comma = strchr (dash + 1, ',');
357 if (comma)
358 *comma = '\0';
360 first = decode_reg_name (str);
361 if (first < 0)
363 warning (0, "unknown register name: %s", str);
364 return;
367 last = decode_reg_name (dash + 1);
368 if (last < 0)
370 warning (0, "unknown register name: %s", dash + 1);
371 return;
374 *dash = '-';
376 if (first > last)
378 warning (0, "%s-%s is an empty range", str, dash + 1);
379 return;
382 for (i = first; i <= last; ++i)
383 fixed_regs[i] = call_used_regs[i] = 1;
385 if (!comma)
386 break;
388 *comma = ',';
389 str = comma + 1;
392 /* Check if all floating point registers have been fixed. */
393 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
394 if (!fixed_regs[i])
395 break;
397 if (i > FP_REG_LAST)
398 target_flags |= MASK_DISABLE_FPREGS;
401 /* Implement TARGET_HANDLE_OPTION. */
403 static bool
404 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
406 switch (code)
408 case OPT_mnosnake:
409 case OPT_mpa_risc_1_0:
410 case OPT_march_1_0:
411 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
412 return true;
414 case OPT_msnake:
415 case OPT_mpa_risc_1_1:
416 case OPT_march_1_1:
417 target_flags &= ~MASK_PA_20;
418 target_flags |= MASK_PA_11;
419 return true;
421 case OPT_mpa_risc_2_0:
422 case OPT_march_2_0:
423 target_flags |= MASK_PA_11 | MASK_PA_20;
424 return true;
426 case OPT_mschedule_:
427 if (strcmp (arg, "8000") == 0)
428 pa_cpu = PROCESSOR_8000;
429 else if (strcmp (arg, "7100") == 0)
430 pa_cpu = PROCESSOR_7100;
431 else if (strcmp (arg, "700") == 0)
432 pa_cpu = PROCESSOR_700;
433 else if (strcmp (arg, "7100LC") == 0)
434 pa_cpu = PROCESSOR_7100LC;
435 else if (strcmp (arg, "7200") == 0)
436 pa_cpu = PROCESSOR_7200;
437 else if (strcmp (arg, "7300") == 0)
438 pa_cpu = PROCESSOR_7300;
439 else
440 return false;
441 return true;
443 case OPT_mfixed_range_:
444 fix_range (arg);
445 return true;
447 #if TARGET_HPUX
448 case OPT_munix_93:
449 flag_pa_unix = 1993;
450 return true;
451 #endif
453 #if TARGET_HPUX_10_10
454 case OPT_munix_95:
455 flag_pa_unix = 1995;
456 return true;
457 #endif
459 #if TARGET_HPUX_11_11
460 case OPT_munix_98:
461 flag_pa_unix = 1998;
462 return true;
463 #endif
465 default:
466 return true;
470 void
471 override_options (void)
473 /* Unconditional branches in the delay slot are not compatible with dwarf2
474 call frame information. There is no benefit in using this optimization
475 on PA8000 and later processors. */
476 if (pa_cpu >= PROCESSOR_8000
477 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
478 || flag_unwind_tables)
479 target_flags &= ~MASK_JUMP_IN_DELAY;
481 if (flag_pic && TARGET_PORTABLE_RUNTIME)
483 warning (0, "PIC code generation is not supported in the portable runtime model");
486 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
488 warning (0, "PIC code generation is not compatible with fast indirect calls");
491 if (! TARGET_GAS && write_symbols != NO_DEBUG)
493 warning (0, "-g is only supported when using GAS on this processor,");
494 warning (0, "-g option disabled");
495 write_symbols = NO_DEBUG;
498 /* We only support the "big PIC" model now. And we always generate PIC
499 code when in 64bit mode. */
500 if (flag_pic == 1 || TARGET_64BIT)
501 flag_pic = 2;
503 /* We can't guarantee that .dword is available for 32-bit targets. */
504 if (UNITS_PER_WORD == 4)
505 targetm.asm_out.aligned_op.di = NULL;
507 /* The unaligned ops are only available when using GAS. */
508 if (!TARGET_GAS)
510 targetm.asm_out.unaligned_op.hi = NULL;
511 targetm.asm_out.unaligned_op.si = NULL;
512 targetm.asm_out.unaligned_op.di = NULL;
515 init_machine_status = pa_init_machine_status;
518 static void
519 pa_init_builtins (void)
521 #ifdef DONT_HAVE_FPUTC_UNLOCKED
522 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
523 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
524 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
525 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
526 #endif
527 #if TARGET_HPUX_11
528 if (built_in_decls [BUILT_IN_FINITE])
529 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
530 if (built_in_decls [BUILT_IN_FINITEF])
531 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
532 #endif
535 /* Function to init struct machine_function.
536 This will be called, via a pointer variable,
537 from push_function_context. */
539 static struct machine_function *
540 pa_init_machine_status (void)
542 return GGC_CNEW (machine_function);
545 /* If FROM is a probable pointer register, mark TO as a probable
546 pointer register with the same pointer alignment as FROM. */
548 static void
549 copy_reg_pointer (rtx to, rtx from)
551 if (REG_POINTER (from))
552 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
555 /* Return 1 if X contains a symbolic expression. We know these
556 expressions will have one of a few well defined forms, so
557 we need only check those forms. */
559 symbolic_expression_p (rtx x)
562 /* Strip off any HIGH. */
563 if (GET_CODE (x) == HIGH)
564 x = XEXP (x, 0);
566 return (symbolic_operand (x, VOIDmode));
569 /* Accept any constant that can be moved in one instruction into a
570 general register. */
572 cint_ok_for_move (HOST_WIDE_INT ival)
574 /* OK if ldo, ldil, or zdepi, can be used. */
575 return (VAL_14_BITS_P (ival)
576 || ldil_cint_p (ival)
577 || zdepi_cint_p (ival));
580 /* Return truth value of whether OP can be used as an operand in a
581 adddi3 insn. */
583 adddi3_operand (rtx op, enum machine_mode mode)
585 return (register_operand (op, mode)
586 || (GET_CODE (op) == CONST_INT
587 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
590 /* True iff the operand OP can be used as the destination operand of
591 an integer store. This also implies the operand could be used as
592 the source operand of an integer load. Symbolic, lo_sum and indexed
593 memory operands are not allowed. We accept reloading pseudos and
594 other memory operands. */
596 integer_store_memory_operand (rtx op, enum machine_mode mode)
598 return ((reload_in_progress
599 && REG_P (op)
600 && REGNO (op) >= FIRST_PSEUDO_REGISTER
601 && reg_renumber [REGNO (op)] < 0)
602 || (GET_CODE (op) == MEM
603 && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
604 && !symbolic_memory_operand (op, VOIDmode)
605 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
606 && !IS_INDEX_ADDR_P (XEXP (op, 0))));
609 /* True iff ldil can be used to load this CONST_INT. The least
610 significant 11 bits of the value must be zero and the value must
611 not change sign when extended from 32 to 64 bits. */
613 ldil_cint_p (HOST_WIDE_INT ival)
615 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
617 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
620 /* True iff zdepi can be used to generate this CONST_INT.
621 zdepi first sign extends a 5-bit signed number to a given field
622 length, then places this field anywhere in a zero. */
624 zdepi_cint_p (unsigned HOST_WIDE_INT x)
626 unsigned HOST_WIDE_INT lsb_mask, t;
628 /* This might not be obvious, but it's at least fast.
629 This function is critical; we don't have the time loops would take. */
630 lsb_mask = x & -x;
631 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
632 /* Return true iff t is a power of two. */
633 return ((t & (t - 1)) == 0);
636 /* True iff depi or extru can be used to compute (reg & mask).
637 Accept bit pattern like these:
638 0....01....1
639 1....10....0
640 1..10..01..1 */
642 and_mask_p (unsigned HOST_WIDE_INT mask)
644 mask = ~mask;
645 mask += mask & -mask;
646 return (mask & (mask - 1)) == 0;
649 /* True iff depi can be used to compute (reg | MASK). */
651 ior_mask_p (unsigned HOST_WIDE_INT mask)
653 mask += mask & -mask;
654 return (mask & (mask - 1)) == 0;
657 /* Legitimize PIC addresses. If the address is already
658 position-independent, we return ORIG. Newly generated
659 position-independent addresses go to REG. If we need more
660 than one register, we lose. */
663 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
665 rtx pic_ref = orig;
667 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
669 /* Labels need special handling. */
670 if (pic_label_operand (orig, mode))
672 rtx insn;
674 /* We do not want to go through the movXX expanders here since that
675 would create recursion.
677 Nor do we really want to call a generator for a named pattern
678 since that requires multiple patterns if we want to support
679 multiple word sizes.
681 So instead we just emit the raw set, which avoids the movXX
682 expanders completely. */
683 mark_reg_pointer (reg, BITS_PER_UNIT);
684 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
686 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
687 add_reg_note (insn, REG_EQUAL, orig);
689 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
690 and update LABEL_NUSES because this is not done automatically. */
691 if (reload_in_progress || reload_completed)
693 /* Extract LABEL_REF. */
694 if (GET_CODE (orig) == CONST)
695 orig = XEXP (XEXP (orig, 0), 0);
696 /* Extract CODE_LABEL. */
697 orig = XEXP (orig, 0);
698 add_reg_note (insn, REG_LABEL_OPERAND, orig);
699 LABEL_NUSES (orig)++;
701 crtl->uses_pic_offset_table = 1;
702 return reg;
704 if (GET_CODE (orig) == SYMBOL_REF)
706 rtx insn, tmp_reg;
708 gcc_assert (reg);
710 /* Before reload, allocate a temporary register for the intermediate
711 result. This allows the sequence to be deleted when the final
712 result is unused and the insns are trivially dead. */
713 tmp_reg = ((reload_in_progress || reload_completed)
714 ? reg : gen_reg_rtx (Pmode));
716 if (function_label_operand (orig, mode))
718 /* Force function label into memory in word mode. */
719 orig = XEXP (force_const_mem (word_mode, orig), 0);
720 /* Load plabel address from DLT. */
721 emit_move_insn (tmp_reg,
722 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
723 gen_rtx_HIGH (word_mode, orig)));
724 pic_ref
725 = gen_const_mem (Pmode,
726 gen_rtx_LO_SUM (Pmode, tmp_reg,
727 gen_rtx_UNSPEC (Pmode,
728 gen_rtvec (1, orig),
729 UNSPEC_DLTIND14R)));
730 emit_move_insn (reg, pic_ref);
731 /* Now load address of function descriptor. */
732 pic_ref = gen_rtx_MEM (Pmode, reg);
734 else
736 /* Load symbol reference from DLT. */
737 emit_move_insn (tmp_reg,
738 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
739 gen_rtx_HIGH (word_mode, orig)));
740 pic_ref
741 = gen_const_mem (Pmode,
742 gen_rtx_LO_SUM (Pmode, tmp_reg,
743 gen_rtx_UNSPEC (Pmode,
744 gen_rtvec (1, orig),
745 UNSPEC_DLTIND14R)));
748 crtl->uses_pic_offset_table = 1;
749 mark_reg_pointer (reg, BITS_PER_UNIT);
750 insn = emit_move_insn (reg, pic_ref);
752 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
753 set_unique_reg_note (insn, REG_EQUAL, orig);
755 return reg;
757 else if (GET_CODE (orig) == CONST)
759 rtx base;
761 if (GET_CODE (XEXP (orig, 0)) == PLUS
762 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
763 return orig;
765 gcc_assert (reg);
766 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
768 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
769 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
770 base == reg ? 0 : reg);
772 if (GET_CODE (orig) == CONST_INT)
774 if (INT_14_BITS (orig))
775 return plus_constant (base, INTVAL (orig));
776 orig = force_reg (Pmode, orig);
778 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
779 /* Likewise, should we set special REG_NOTEs here? */
782 return pic_ref;
785 static GTY(()) rtx gen_tls_tga;
787 static rtx
788 gen_tls_get_addr (void)
790 if (!gen_tls_tga)
791 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
792 return gen_tls_tga;
795 static rtx
796 hppa_tls_call (rtx arg)
798 rtx ret;
800 ret = gen_reg_rtx (Pmode);
801 emit_library_call_value (gen_tls_get_addr (), ret,
802 LCT_CONST, Pmode, 1, arg, Pmode);
804 return ret;
807 static rtx
808 legitimize_tls_address (rtx addr)
810 rtx ret, insn, tmp, t1, t2, tp;
811 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
813 switch (model)
815 case TLS_MODEL_GLOBAL_DYNAMIC:
816 tmp = gen_reg_rtx (Pmode);
817 if (flag_pic)
818 emit_insn (gen_tgd_load_pic (tmp, addr));
819 else
820 emit_insn (gen_tgd_load (tmp, addr));
821 ret = hppa_tls_call (tmp);
822 break;
824 case TLS_MODEL_LOCAL_DYNAMIC:
825 ret = gen_reg_rtx (Pmode);
826 tmp = gen_reg_rtx (Pmode);
827 start_sequence ();
828 if (flag_pic)
829 emit_insn (gen_tld_load_pic (tmp, addr));
830 else
831 emit_insn (gen_tld_load (tmp, addr));
832 t1 = hppa_tls_call (tmp);
833 insn = get_insns ();
834 end_sequence ();
835 t2 = gen_reg_rtx (Pmode);
836 emit_libcall_block (insn, t2, t1,
837 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
838 UNSPEC_TLSLDBASE));
839 emit_insn (gen_tld_offset_load (ret, addr, t2));
840 break;
842 case TLS_MODEL_INITIAL_EXEC:
843 tp = gen_reg_rtx (Pmode);
844 tmp = gen_reg_rtx (Pmode);
845 ret = gen_reg_rtx (Pmode);
846 emit_insn (gen_tp_load (tp));
847 if (flag_pic)
848 emit_insn (gen_tie_load_pic (tmp, addr));
849 else
850 emit_insn (gen_tie_load (tmp, addr));
851 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
852 break;
854 case TLS_MODEL_LOCAL_EXEC:
855 tp = gen_reg_rtx (Pmode);
856 ret = gen_reg_rtx (Pmode);
857 emit_insn (gen_tp_load (tp));
858 emit_insn (gen_tle_load (ret, addr, tp));
859 break;
861 default:
862 gcc_unreachable ();
865 return ret;
868 /* Try machine-dependent ways of modifying an illegitimate address
869 to be legitimate. If we find one, return the new, valid address.
870 This macro is used in only one place: `memory_address' in explow.c.
872 OLDX is the address as it was before break_out_memory_refs was called.
873 In some cases it is useful to look at this to decide what needs to be done.
875 MODE and WIN are passed so that this macro can use
876 GO_IF_LEGITIMATE_ADDRESS.
878 It is always safe for this macro to do nothing. It exists to recognize
879 opportunities to optimize the output.
881 For the PA, transform:
883 memory(X + <large int>)
885 into:
887 if (<large int> & mask) >= 16
888 Y = (<large int> & ~mask) + mask + 1 Round up.
889 else
890 Y = (<large int> & ~mask) Round down.
891 Z = X + Y
892 memory (Z + (<large int> - Y));
894 This is for CSE to find several similar references, and only use one Z.
896 X can either be a SYMBOL_REF or REG, but because combine cannot
897 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
898 D will not fit in 14 bits.
900 MODE_FLOAT references allow displacements which fit in 5 bits, so use
901 0x1f as the mask.
903 MODE_INT references allow displacements which fit in 14 bits, so use
904 0x3fff as the mask.
906 This relies on the fact that most mode MODE_FLOAT references will use FP
907 registers and most mode MODE_INT references will use integer registers.
908 (In the rare case of an FP register used in an integer MODE, we depend
909 on secondary reloads to clean things up.)
912 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
913 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
914 addressing modes to be used).
916 Put X and Z into registers. Then put the entire expression into
917 a register. */
920 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
921 enum machine_mode mode)
923 rtx orig = x;
925 /* We need to canonicalize the order of operands in unscaled indexed
926 addresses since the code that checks if an address is valid doesn't
927 always try both orders. */
928 if (!TARGET_NO_SPACE_REGS
929 && GET_CODE (x) == PLUS
930 && GET_MODE (x) == Pmode
931 && REG_P (XEXP (x, 0))
932 && REG_P (XEXP (x, 1))
933 && REG_POINTER (XEXP (x, 0))
934 && !REG_POINTER (XEXP (x, 1)))
935 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
937 if (PA_SYMBOL_REF_TLS_P (x))
938 return legitimize_tls_address (x);
939 else if (flag_pic)
940 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
942 /* Strip off CONST. */
943 if (GET_CODE (x) == CONST)
944 x = XEXP (x, 0);
946 /* Special case. Get the SYMBOL_REF into a register and use indexing.
947 That should always be safe. */
948 if (GET_CODE (x) == PLUS
949 && GET_CODE (XEXP (x, 0)) == REG
950 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
952 rtx reg = force_reg (Pmode, XEXP (x, 1));
953 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
956 /* Note we must reject symbols which represent function addresses
957 since the assembler/linker can't handle arithmetic on plabels. */
958 if (GET_CODE (x) == PLUS
959 && GET_CODE (XEXP (x, 1)) == CONST_INT
960 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
961 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
962 || GET_CODE (XEXP (x, 0)) == REG))
964 rtx int_part, ptr_reg;
965 int newoffset;
966 int offset = INTVAL (XEXP (x, 1));
967 int mask;
969 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
970 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
972 /* Choose which way to round the offset. Round up if we
973 are >= halfway to the next boundary. */
974 if ((offset & mask) >= ((mask + 1) / 2))
975 newoffset = (offset & ~ mask) + mask + 1;
976 else
977 newoffset = (offset & ~ mask);
979 /* If the newoffset will not fit in 14 bits (ldo), then
980 handling this would take 4 or 5 instructions (2 to load
981 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
982 add the new offset and the SYMBOL_REF.) Combine can
983 not handle 4->2 or 5->2 combinations, so do not create
984 them. */
985 if (! VAL_14_BITS_P (newoffset)
986 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
988 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
989 rtx tmp_reg
990 = force_reg (Pmode,
991 gen_rtx_HIGH (Pmode, const_part));
992 ptr_reg
993 = force_reg (Pmode,
994 gen_rtx_LO_SUM (Pmode,
995 tmp_reg, const_part));
997 else
999 if (! VAL_14_BITS_P (newoffset))
1000 int_part = force_reg (Pmode, GEN_INT (newoffset));
1001 else
1002 int_part = GEN_INT (newoffset);
1004 ptr_reg = force_reg (Pmode,
1005 gen_rtx_PLUS (Pmode,
1006 force_reg (Pmode, XEXP (x, 0)),
1007 int_part));
1009 return plus_constant (ptr_reg, offset - newoffset);
1012 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1014 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1015 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1016 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1017 && (OBJECT_P (XEXP (x, 1))
1018 || GET_CODE (XEXP (x, 1)) == SUBREG)
1019 && GET_CODE (XEXP (x, 1)) != CONST)
1021 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1022 rtx reg1, reg2;
1024 reg1 = XEXP (x, 1);
1025 if (GET_CODE (reg1) != REG)
1026 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1028 reg2 = XEXP (XEXP (x, 0), 0);
1029 if (GET_CODE (reg2) != REG)
1030 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1032 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1033 gen_rtx_MULT (Pmode,
1034 reg2,
1035 GEN_INT (val)),
1036 reg1));
1039 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1041 Only do so for floating point modes since this is more speculative
1042 and we lose if it's an integer store. */
1043 if (GET_CODE (x) == PLUS
1044 && GET_CODE (XEXP (x, 0)) == PLUS
1045 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1046 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1047 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1048 && (mode == SFmode || mode == DFmode))
1051 /* First, try and figure out what to use as a base register. */
1052 rtx reg1, reg2, base, idx, orig_base;
1054 reg1 = XEXP (XEXP (x, 0), 1);
1055 reg2 = XEXP (x, 1);
1056 base = NULL_RTX;
1057 idx = NULL_RTX;
1059 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1060 then emit_move_sequence will turn on REG_POINTER so we'll know
1061 it's a base register below. */
1062 if (GET_CODE (reg1) != REG)
1063 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1065 if (GET_CODE (reg2) != REG)
1066 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1068 /* Figure out what the base and index are. */
1070 if (GET_CODE (reg1) == REG
1071 && REG_POINTER (reg1))
1073 base = reg1;
1074 orig_base = XEXP (XEXP (x, 0), 1);
1075 idx = gen_rtx_PLUS (Pmode,
1076 gen_rtx_MULT (Pmode,
1077 XEXP (XEXP (XEXP (x, 0), 0), 0),
1078 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1079 XEXP (x, 1));
1081 else if (GET_CODE (reg2) == REG
1082 && REG_POINTER (reg2))
1084 base = reg2;
1085 orig_base = XEXP (x, 1);
1086 idx = XEXP (x, 0);
1089 if (base == 0)
1090 return orig;
1092 /* If the index adds a large constant, try to scale the
1093 constant so that it can be loaded with only one insn. */
1094 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1095 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1096 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1097 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1099 /* Divide the CONST_INT by the scale factor, then add it to A. */
1100 int val = INTVAL (XEXP (idx, 1));
1102 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1103 reg1 = XEXP (XEXP (idx, 0), 0);
1104 if (GET_CODE (reg1) != REG)
1105 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1107 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1109 /* We can now generate a simple scaled indexed address. */
1110 return
1111 force_reg
1112 (Pmode, gen_rtx_PLUS (Pmode,
1113 gen_rtx_MULT (Pmode, reg1,
1114 XEXP (XEXP (idx, 0), 1)),
1115 base));
1118 /* If B + C is still a valid base register, then add them. */
1119 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1120 && INTVAL (XEXP (idx, 1)) <= 4096
1121 && INTVAL (XEXP (idx, 1)) >= -4096)
1123 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1124 rtx reg1, reg2;
1126 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1128 reg2 = XEXP (XEXP (idx, 0), 0);
1129 if (GET_CODE (reg2) != CONST_INT)
1130 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1132 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1133 gen_rtx_MULT (Pmode,
1134 reg2,
1135 GEN_INT (val)),
1136 reg1));
1139 /* Get the index into a register, then add the base + index and
1140 return a register holding the result. */
1142 /* First get A into a register. */
1143 reg1 = XEXP (XEXP (idx, 0), 0);
1144 if (GET_CODE (reg1) != REG)
1145 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1147 /* And get B into a register. */
1148 reg2 = XEXP (idx, 1);
1149 if (GET_CODE (reg2) != REG)
1150 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1152 reg1 = force_reg (Pmode,
1153 gen_rtx_PLUS (Pmode,
1154 gen_rtx_MULT (Pmode, reg1,
1155 XEXP (XEXP (idx, 0), 1)),
1156 reg2));
1158 /* Add the result to our base register and return. */
1159 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1163 /* Uh-oh. We might have an address for x[n-100000]. This needs
1164 special handling to avoid creating an indexed memory address
1165 with x-100000 as the base.
1167 If the constant part is small enough, then it's still safe because
1168 there is a guard page at the beginning and end of the data segment.
1170 Scaled references are common enough that we want to try and rearrange the
1171 terms so that we can use indexing for these addresses too. Only
1172 do the optimization for floatint point modes. */
1174 if (GET_CODE (x) == PLUS
1175 && symbolic_expression_p (XEXP (x, 1)))
1177 /* Ugly. We modify things here so that the address offset specified
1178 by the index expression is computed first, then added to x to form
1179 the entire address. */
1181 rtx regx1, regx2, regy1, regy2, y;
1183 /* Strip off any CONST. */
1184 y = XEXP (x, 1);
1185 if (GET_CODE (y) == CONST)
1186 y = XEXP (y, 0);
1188 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1190 /* See if this looks like
1191 (plus (mult (reg) (shadd_const))
1192 (const (plus (symbol_ref) (const_int))))
1194 Where const_int is small. In that case the const
1195 expression is a valid pointer for indexing.
1197 If const_int is big, but can be divided evenly by shadd_const
1198 and added to (reg). This allows more scaled indexed addresses. */
1199 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1200 && GET_CODE (XEXP (x, 0)) == MULT
1201 && GET_CODE (XEXP (y, 1)) == CONST_INT
1202 && INTVAL (XEXP (y, 1)) >= -4096
1203 && INTVAL (XEXP (y, 1)) <= 4095
1204 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1205 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1207 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1208 rtx reg1, reg2;
1210 reg1 = XEXP (x, 1);
1211 if (GET_CODE (reg1) != REG)
1212 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1214 reg2 = XEXP (XEXP (x, 0), 0);
1215 if (GET_CODE (reg2) != REG)
1216 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1218 return force_reg (Pmode,
1219 gen_rtx_PLUS (Pmode,
1220 gen_rtx_MULT (Pmode,
1221 reg2,
1222 GEN_INT (val)),
1223 reg1));
1225 else if ((mode == DFmode || mode == SFmode)
1226 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1227 && GET_CODE (XEXP (x, 0)) == MULT
1228 && GET_CODE (XEXP (y, 1)) == CONST_INT
1229 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1230 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1231 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1233 regx1
1234 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1235 / INTVAL (XEXP (XEXP (x, 0), 1))));
1236 regx2 = XEXP (XEXP (x, 0), 0);
1237 if (GET_CODE (regx2) != REG)
1238 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1239 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1240 regx2, regx1));
1241 return
1242 force_reg (Pmode,
1243 gen_rtx_PLUS (Pmode,
1244 gen_rtx_MULT (Pmode, regx2,
1245 XEXP (XEXP (x, 0), 1)),
1246 force_reg (Pmode, XEXP (y, 0))));
1248 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1249 && INTVAL (XEXP (y, 1)) >= -4096
1250 && INTVAL (XEXP (y, 1)) <= 4095)
1252 /* This is safe because of the guard page at the
1253 beginning and end of the data space. Just
1254 return the original address. */
1255 return orig;
1257 else
1259 /* Doesn't look like one we can optimize. */
1260 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1261 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1262 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1263 regx1 = force_reg (Pmode,
1264 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1265 regx1, regy2));
1266 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1271 return orig;
1274 /* For the HPPA, REG and REG+CONST is cost 0
1275 and addresses involving symbolic constants are cost 2.
1277 PIC addresses are very expensive.
1279 It is no coincidence that this has the same structure
1280 as GO_IF_LEGITIMATE_ADDRESS. */
1282 static int
1283 hppa_address_cost (rtx X,
1284 bool speed ATTRIBUTE_UNUSED)
1286 switch (GET_CODE (X))
1288 case REG:
1289 case PLUS:
1290 case LO_SUM:
1291 return 1;
1292 case HIGH:
1293 return 2;
1294 default:
1295 return 4;
1299 /* Compute a (partial) cost for rtx X. Return true if the complete
1300 cost has been computed, and false if subexpressions should be
1301 scanned. In either case, *TOTAL contains the cost result. */
1303 static bool
1304 hppa_rtx_costs (rtx x, int code, int outer_code, int *total,
1305 bool speed ATTRIBUTE_UNUSED)
1307 switch (code)
1309 case CONST_INT:
1310 if (INTVAL (x) == 0)
1311 *total = 0;
1312 else if (INT_14_BITS (x))
1313 *total = 1;
1314 else
1315 *total = 2;
1316 return true;
1318 case HIGH:
1319 *total = 2;
1320 return true;
1322 case CONST:
1323 case LABEL_REF:
1324 case SYMBOL_REF:
1325 *total = 4;
1326 return true;
1328 case CONST_DOUBLE:
1329 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1330 && outer_code != SET)
1331 *total = 0;
1332 else
1333 *total = 8;
1334 return true;
1336 case MULT:
1337 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1338 *total = COSTS_N_INSNS (3);
1339 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1340 *total = COSTS_N_INSNS (8);
1341 else
1342 *total = COSTS_N_INSNS (20);
1343 return true;
1345 case DIV:
1346 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1348 *total = COSTS_N_INSNS (14);
1349 return true;
1351 /* FALLTHRU */
1353 case UDIV:
1354 case MOD:
1355 case UMOD:
1356 *total = COSTS_N_INSNS (60);
1357 return true;
1359 case PLUS: /* this includes shNadd insns */
1360 case MINUS:
1361 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1362 *total = COSTS_N_INSNS (3);
1363 else
1364 *total = COSTS_N_INSNS (1);
1365 return true;
1367 case ASHIFT:
1368 case ASHIFTRT:
1369 case LSHIFTRT:
1370 *total = COSTS_N_INSNS (1);
1371 return true;
1373 default:
1374 return false;
1378 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1379 new rtx with the correct mode. */
1380 static inline rtx
1381 force_mode (enum machine_mode mode, rtx orig)
1383 if (mode == GET_MODE (orig))
1384 return orig;
1386 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1388 return gen_rtx_REG (mode, REGNO (orig));
1391 /* Return 1 if *X is a thread-local symbol. */
1393 static int
1394 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1396 return PA_SYMBOL_REF_TLS_P (*x);
1399 /* Return 1 if X contains a thread-local symbol. */
1401 bool
1402 pa_tls_referenced_p (rtx x)
1404 if (!TARGET_HAVE_TLS)
1405 return false;
1407 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1410 /* Emit insns to move operands[1] into operands[0].
1412 Return 1 if we have written out everything that needs to be done to
1413 do the move. Otherwise, return 0 and the caller will emit the move
1414 normally.
1416 Note SCRATCH_REG may not be in the proper mode depending on how it
1417 will be used. This routine is responsible for creating a new copy
1418 of SCRATCH_REG in the proper mode. */
1421 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1423 register rtx operand0 = operands[0];
1424 register rtx operand1 = operands[1];
1425 register rtx tem;
1427 /* We can only handle indexed addresses in the destination operand
1428 of floating point stores. Thus, we need to break out indexed
1429 addresses from the destination operand. */
1430 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1432 gcc_assert (can_create_pseudo_p ());
1434 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1435 operand0 = replace_equiv_address (operand0, tem);
1438 /* On targets with non-equivalent space registers, break out unscaled
1439 indexed addresses from the source operand before the final CSE.
1440 We have to do this because the REG_POINTER flag is not correctly
1441 carried through various optimization passes and CSE may substitute
1442 a pseudo without the pointer set for one with the pointer set. As
1443 a result, we loose various opportunities to create insns with
1444 unscaled indexed addresses. */
1445 if (!TARGET_NO_SPACE_REGS
1446 && !cse_not_expected
1447 && GET_CODE (operand1) == MEM
1448 && GET_CODE (XEXP (operand1, 0)) == PLUS
1449 && REG_P (XEXP (XEXP (operand1, 0), 0))
1450 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1451 operand1
1452 = replace_equiv_address (operand1,
1453 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1455 if (scratch_reg
1456 && reload_in_progress && GET_CODE (operand0) == REG
1457 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1458 operand0 = reg_equiv_mem[REGNO (operand0)];
1459 else if (scratch_reg
1460 && reload_in_progress && GET_CODE (operand0) == SUBREG
1461 && GET_CODE (SUBREG_REG (operand0)) == REG
1462 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1464 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1465 the code which tracks sets/uses for delete_output_reload. */
1466 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1467 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1468 SUBREG_BYTE (operand0));
1469 operand0 = alter_subreg (&temp);
1472 if (scratch_reg
1473 && reload_in_progress && GET_CODE (operand1) == REG
1474 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1475 operand1 = reg_equiv_mem[REGNO (operand1)];
1476 else if (scratch_reg
1477 && reload_in_progress && GET_CODE (operand1) == SUBREG
1478 && GET_CODE (SUBREG_REG (operand1)) == REG
1479 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1481 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1482 the code which tracks sets/uses for delete_output_reload. */
1483 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1484 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1485 SUBREG_BYTE (operand1));
1486 operand1 = alter_subreg (&temp);
1489 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1490 && ((tem = find_replacement (&XEXP (operand0, 0)))
1491 != XEXP (operand0, 0)))
1492 operand0 = replace_equiv_address (operand0, tem);
1494 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1495 && ((tem = find_replacement (&XEXP (operand1, 0)))
1496 != XEXP (operand1, 0)))
1497 operand1 = replace_equiv_address (operand1, tem);
1499 /* Handle secondary reloads for loads/stores of FP registers from
1500 REG+D addresses where D does not fit in 5 or 14 bits, including
1501 (subreg (mem (addr))) cases. */
1502 if (scratch_reg
1503 && fp_reg_operand (operand0, mode)
1504 && ((GET_CODE (operand1) == MEM
1505 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1506 XEXP (operand1, 0)))
1507 || ((GET_CODE (operand1) == SUBREG
1508 && GET_CODE (XEXP (operand1, 0)) == MEM
1509 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1510 ? SFmode : DFmode),
1511 XEXP (XEXP (operand1, 0), 0))))))
1513 if (GET_CODE (operand1) == SUBREG)
1514 operand1 = XEXP (operand1, 0);
1516 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1517 it in WORD_MODE regardless of what mode it was originally given
1518 to us. */
1519 scratch_reg = force_mode (word_mode, scratch_reg);
1521 /* D might not fit in 14 bits either; for such cases load D into
1522 scratch reg. */
1523 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1525 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1526 emit_move_insn (scratch_reg,
1527 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1528 Pmode,
1529 XEXP (XEXP (operand1, 0), 0),
1530 scratch_reg));
1532 else
1533 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1534 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1535 replace_equiv_address (operand1, scratch_reg)));
1536 return 1;
1538 else if (scratch_reg
1539 && fp_reg_operand (operand1, mode)
1540 && ((GET_CODE (operand0) == MEM
1541 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1542 ? SFmode : DFmode),
1543 XEXP (operand0, 0)))
1544 || ((GET_CODE (operand0) == SUBREG)
1545 && GET_CODE (XEXP (operand0, 0)) == MEM
1546 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1547 ? SFmode : DFmode),
1548 XEXP (XEXP (operand0, 0), 0)))))
1550 if (GET_CODE (operand0) == SUBREG)
1551 operand0 = XEXP (operand0, 0);
1553 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1554 it in WORD_MODE regardless of what mode it was originally given
1555 to us. */
1556 scratch_reg = force_mode (word_mode, scratch_reg);
1558 /* D might not fit in 14 bits either; for such cases load D into
1559 scratch reg. */
1560 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1562 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1563 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1564 0)),
1565 Pmode,
1566 XEXP (XEXP (operand0, 0),
1568 scratch_reg));
1570 else
1571 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1572 emit_insn (gen_rtx_SET (VOIDmode,
1573 replace_equiv_address (operand0, scratch_reg),
1574 operand1));
1575 return 1;
1577 /* Handle secondary reloads for loads of FP registers from constant
1578 expressions by forcing the constant into memory.
1580 Use scratch_reg to hold the address of the memory location.
1582 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1583 NO_REGS when presented with a const_int and a register class
1584 containing only FP registers. Doing so unfortunately creates
1585 more problems than it solves. Fix this for 2.5. */
1586 else if (scratch_reg
1587 && CONSTANT_P (operand1)
1588 && fp_reg_operand (operand0, mode))
1590 rtx const_mem, xoperands[2];
1592 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1593 it in WORD_MODE regardless of what mode it was originally given
1594 to us. */
1595 scratch_reg = force_mode (word_mode, scratch_reg);
1597 /* Force the constant into memory and put the address of the
1598 memory location into scratch_reg. */
1599 const_mem = force_const_mem (mode, operand1);
1600 xoperands[0] = scratch_reg;
1601 xoperands[1] = XEXP (const_mem, 0);
1602 emit_move_sequence (xoperands, Pmode, 0);
1604 /* Now load the destination register. */
1605 emit_insn (gen_rtx_SET (mode, operand0,
1606 replace_equiv_address (const_mem, scratch_reg)));
1607 return 1;
1609 /* Handle secondary reloads for SAR. These occur when trying to load
1610 the SAR from memory, FP register, or with a constant. */
1611 else if (scratch_reg
1612 && GET_CODE (operand0) == REG
1613 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1614 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1615 && (GET_CODE (operand1) == MEM
1616 || GET_CODE (operand1) == CONST_INT
1617 || (GET_CODE (operand1) == REG
1618 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1620 /* D might not fit in 14 bits either; for such cases load D into
1621 scratch reg. */
1622 if (GET_CODE (operand1) == MEM
1623 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1625 /* We are reloading the address into the scratch register, so we
1626 want to make sure the scratch register is a full register. */
1627 scratch_reg = force_mode (word_mode, scratch_reg);
1629 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1630 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1631 0)),
1632 Pmode,
1633 XEXP (XEXP (operand1, 0),
1635 scratch_reg));
1637 /* Now we are going to load the scratch register from memory,
1638 we want to load it in the same width as the original MEM,
1639 which must be the same as the width of the ultimate destination,
1640 OPERAND0. */
1641 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1643 emit_move_insn (scratch_reg,
1644 replace_equiv_address (operand1, scratch_reg));
1646 else
1648 /* We want to load the scratch register using the same mode as
1649 the ultimate destination. */
1650 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1652 emit_move_insn (scratch_reg, operand1);
1655 /* And emit the insn to set the ultimate destination. We know that
1656 the scratch register has the same mode as the destination at this
1657 point. */
1658 emit_move_insn (operand0, scratch_reg);
1659 return 1;
1661 /* Handle the most common case: storing into a register. */
1662 else if (register_operand (operand0, mode))
1664 if (register_operand (operand1, mode)
1665 || (GET_CODE (operand1) == CONST_INT
1666 && cint_ok_for_move (INTVAL (operand1)))
1667 || (operand1 == CONST0_RTX (mode))
1668 || (GET_CODE (operand1) == HIGH
1669 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1670 /* Only `general_operands' can come here, so MEM is ok. */
1671 || GET_CODE (operand1) == MEM)
1673 /* Various sets are created during RTL generation which don't
1674 have the REG_POINTER flag correctly set. After the CSE pass,
1675 instruction recognition can fail if we don't consistently
1676 set this flag when performing register copies. This should
1677 also improve the opportunities for creating insns that use
1678 unscaled indexing. */
1679 if (REG_P (operand0) && REG_P (operand1))
1681 if (REG_POINTER (operand1)
1682 && !REG_POINTER (operand0)
1683 && !HARD_REGISTER_P (operand0))
1684 copy_reg_pointer (operand0, operand1);
1685 else if (REG_POINTER (operand0)
1686 && !REG_POINTER (operand1)
1687 && !HARD_REGISTER_P (operand1))
1688 copy_reg_pointer (operand1, operand0);
1691 /* When MEMs are broken out, the REG_POINTER flag doesn't
1692 get set. In some cases, we can set the REG_POINTER flag
1693 from the declaration for the MEM. */
1694 if (REG_P (operand0)
1695 && GET_CODE (operand1) == MEM
1696 && !REG_POINTER (operand0))
1698 tree decl = MEM_EXPR (operand1);
1700 /* Set the register pointer flag and register alignment
1701 if the declaration for this memory reference is a
1702 pointer type. Fortran indirect argument references
1703 are ignored. */
1704 if (decl
1705 && !(flag_argument_noalias > 1
1706 && TREE_CODE (decl) == INDIRECT_REF
1707 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1709 tree type;
1711 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1712 tree operand 1. */
1713 if (TREE_CODE (decl) == COMPONENT_REF)
1714 decl = TREE_OPERAND (decl, 1);
1716 type = TREE_TYPE (decl);
1717 type = strip_array_types (type);
1719 if (POINTER_TYPE_P (type))
1721 int align;
1723 type = TREE_TYPE (type);
1724 /* Using TYPE_ALIGN_OK is rather conservative as
1725 only the ada frontend actually sets it. */
1726 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1727 : BITS_PER_UNIT);
1728 mark_reg_pointer (operand0, align);
1733 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1734 return 1;
1737 else if (GET_CODE (operand0) == MEM)
1739 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1740 && !(reload_in_progress || reload_completed))
1742 rtx temp = gen_reg_rtx (DFmode);
1744 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1745 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1746 return 1;
1748 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1750 /* Run this case quickly. */
1751 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1752 return 1;
1754 if (! (reload_in_progress || reload_completed))
1756 operands[0] = validize_mem (operand0);
1757 operands[1] = operand1 = force_reg (mode, operand1);
1761 /* Simplify the source if we need to.
1762 Note we do have to handle function labels here, even though we do
1763 not consider them legitimate constants. Loop optimizations can
1764 call the emit_move_xxx with one as a source. */
1765 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1766 || function_label_operand (operand1, mode)
1767 || (GET_CODE (operand1) == HIGH
1768 && symbolic_operand (XEXP (operand1, 0), mode)))
1770 int ishighonly = 0;
1772 if (GET_CODE (operand1) == HIGH)
1774 ishighonly = 1;
1775 operand1 = XEXP (operand1, 0);
1777 if (symbolic_operand (operand1, mode))
1779 /* Argh. The assembler and linker can't handle arithmetic
1780 involving plabels.
1782 So we force the plabel into memory, load operand0 from
1783 the memory location, then add in the constant part. */
1784 if ((GET_CODE (operand1) == CONST
1785 && GET_CODE (XEXP (operand1, 0)) == PLUS
1786 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1787 || function_label_operand (operand1, mode))
1789 rtx temp, const_part;
1791 /* Figure out what (if any) scratch register to use. */
1792 if (reload_in_progress || reload_completed)
1794 scratch_reg = scratch_reg ? scratch_reg : operand0;
1795 /* SCRATCH_REG will hold an address and maybe the actual
1796 data. We want it in WORD_MODE regardless of what mode it
1797 was originally given to us. */
1798 scratch_reg = force_mode (word_mode, scratch_reg);
1800 else if (flag_pic)
1801 scratch_reg = gen_reg_rtx (Pmode);
1803 if (GET_CODE (operand1) == CONST)
1805 /* Save away the constant part of the expression. */
1806 const_part = XEXP (XEXP (operand1, 0), 1);
1807 gcc_assert (GET_CODE (const_part) == CONST_INT);
1809 /* Force the function label into memory. */
1810 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1812 else
1814 /* No constant part. */
1815 const_part = NULL_RTX;
1817 /* Force the function label into memory. */
1818 temp = force_const_mem (mode, operand1);
1822 /* Get the address of the memory location. PIC-ify it if
1823 necessary. */
1824 temp = XEXP (temp, 0);
1825 if (flag_pic)
1826 temp = legitimize_pic_address (temp, mode, scratch_reg);
1828 /* Put the address of the memory location into our destination
1829 register. */
1830 operands[1] = temp;
1831 emit_move_sequence (operands, mode, scratch_reg);
1833 /* Now load from the memory location into our destination
1834 register. */
1835 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1836 emit_move_sequence (operands, mode, scratch_reg);
1838 /* And add back in the constant part. */
1839 if (const_part != NULL_RTX)
1840 expand_inc (operand0, const_part);
1842 return 1;
1845 if (flag_pic)
1847 rtx temp;
1849 if (reload_in_progress || reload_completed)
1851 temp = scratch_reg ? scratch_reg : operand0;
1852 /* TEMP will hold an address and maybe the actual
1853 data. We want it in WORD_MODE regardless of what mode it
1854 was originally given to us. */
1855 temp = force_mode (word_mode, temp);
1857 else
1858 temp = gen_reg_rtx (Pmode);
1860 /* (const (plus (symbol) (const_int))) must be forced to
1861 memory during/after reload if the const_int will not fit
1862 in 14 bits. */
1863 if (GET_CODE (operand1) == CONST
1864 && GET_CODE (XEXP (operand1, 0)) == PLUS
1865 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1866 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1867 && (reload_completed || reload_in_progress)
1868 && flag_pic)
1870 rtx const_mem = force_const_mem (mode, operand1);
1871 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1872 mode, temp);
1873 operands[1] = replace_equiv_address (const_mem, operands[1]);
1874 emit_move_sequence (operands, mode, temp);
1876 else
1878 operands[1] = legitimize_pic_address (operand1, mode, temp);
1879 if (REG_P (operand0) && REG_P (operands[1]))
1880 copy_reg_pointer (operand0, operands[1]);
1881 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1884 /* On the HPPA, references to data space are supposed to use dp,
1885 register 27, but showing it in the RTL inhibits various cse
1886 and loop optimizations. */
1887 else
1889 rtx temp, set;
1891 if (reload_in_progress || reload_completed)
1893 temp = scratch_reg ? scratch_reg : operand0;
1894 /* TEMP will hold an address and maybe the actual
1895 data. We want it in WORD_MODE regardless of what mode it
1896 was originally given to us. */
1897 temp = force_mode (word_mode, temp);
1899 else
1900 temp = gen_reg_rtx (mode);
1902 /* Loading a SYMBOL_REF into a register makes that register
1903 safe to be used as the base in an indexed address.
1905 Don't mark hard registers though. That loses. */
1906 if (GET_CODE (operand0) == REG
1907 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1908 mark_reg_pointer (operand0, BITS_PER_UNIT);
1909 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1910 mark_reg_pointer (temp, BITS_PER_UNIT);
1912 if (ishighonly)
1913 set = gen_rtx_SET (mode, operand0, temp);
1914 else
1915 set = gen_rtx_SET (VOIDmode,
1916 operand0,
1917 gen_rtx_LO_SUM (mode, temp, operand1));
1919 emit_insn (gen_rtx_SET (VOIDmode,
1920 temp,
1921 gen_rtx_HIGH (mode, operand1)));
1922 emit_insn (set);
1925 return 1;
1927 else if (pa_tls_referenced_p (operand1))
1929 rtx tmp = operand1;
1930 rtx addend = NULL;
1932 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1934 addend = XEXP (XEXP (tmp, 0), 1);
1935 tmp = XEXP (XEXP (tmp, 0), 0);
1938 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1939 tmp = legitimize_tls_address (tmp);
1940 if (addend)
1942 tmp = gen_rtx_PLUS (mode, tmp, addend);
1943 tmp = force_operand (tmp, operands[0]);
1945 operands[1] = tmp;
1947 else if (GET_CODE (operand1) != CONST_INT
1948 || !cint_ok_for_move (INTVAL (operand1)))
1950 rtx insn, temp;
1951 rtx op1 = operand1;
1952 HOST_WIDE_INT value = 0;
1953 HOST_WIDE_INT insv = 0;
1954 int insert = 0;
1956 if (GET_CODE (operand1) == CONST_INT)
1957 value = INTVAL (operand1);
1959 if (TARGET_64BIT
1960 && GET_CODE (operand1) == CONST_INT
1961 && HOST_BITS_PER_WIDE_INT > 32
1962 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1964 HOST_WIDE_INT nval;
1966 /* Extract the low order 32 bits of the value and sign extend.
1967 If the new value is the same as the original value, we can
1968 can use the original value as-is. If the new value is
1969 different, we use it and insert the most-significant 32-bits
1970 of the original value into the final result. */
1971 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1972 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1973 if (value != nval)
1975 #if HOST_BITS_PER_WIDE_INT > 32
1976 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1977 #endif
1978 insert = 1;
1979 value = nval;
1980 operand1 = GEN_INT (nval);
1984 if (reload_in_progress || reload_completed)
1985 temp = scratch_reg ? scratch_reg : operand0;
1986 else
1987 temp = gen_reg_rtx (mode);
1989 /* We don't directly split DImode constants on 32-bit targets
1990 because PLUS uses an 11-bit immediate and the insn sequence
1991 generated is not as efficient as the one using HIGH/LO_SUM. */
1992 if (GET_CODE (operand1) == CONST_INT
1993 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1994 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1995 && !insert)
1997 /* Directly break constant into high and low parts. This
1998 provides better optimization opportunities because various
1999 passes recognize constants split with PLUS but not LO_SUM.
2000 We use a 14-bit signed low part except when the addition
2001 of 0x4000 to the high part might change the sign of the
2002 high part. */
2003 HOST_WIDE_INT low = value & 0x3fff;
2004 HOST_WIDE_INT high = value & ~ 0x3fff;
2006 if (low >= 0x2000)
2008 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2009 high += 0x2000;
2010 else
2011 high += 0x4000;
2014 low = value - high;
2016 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2017 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2019 else
2021 emit_insn (gen_rtx_SET (VOIDmode, temp,
2022 gen_rtx_HIGH (mode, operand1)));
2023 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2026 insn = emit_move_insn (operands[0], operands[1]);
2028 /* Now insert the most significant 32 bits of the value
2029 into the register. When we don't have a second register
2030 available, it could take up to nine instructions to load
2031 a 64-bit integer constant. Prior to reload, we force
2032 constants that would take more than three instructions
2033 to load to the constant pool. During and after reload,
2034 we have to handle all possible values. */
2035 if (insert)
2037 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2038 register and the value to be inserted is outside the
2039 range that can be loaded with three depdi instructions. */
2040 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2042 operand1 = GEN_INT (insv);
2044 emit_insn (gen_rtx_SET (VOIDmode, temp,
2045 gen_rtx_HIGH (mode, operand1)));
2046 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2047 emit_insn (gen_insv (operand0, GEN_INT (32),
2048 const0_rtx, temp));
2050 else
2052 int len = 5, pos = 27;
2054 /* Insert the bits using the depdi instruction. */
2055 while (pos >= 0)
2057 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2058 HOST_WIDE_INT sign = v5 < 0;
2060 /* Left extend the insertion. */
2061 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2062 while (pos > 0 && (insv & 1) == sign)
2064 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2065 len += 1;
2066 pos -= 1;
2069 emit_insn (gen_insv (operand0, GEN_INT (len),
2070 GEN_INT (pos), GEN_INT (v5)));
2072 len = pos > 0 && pos < 5 ? pos : 5;
2073 pos -= len;
2078 set_unique_reg_note (insn, REG_EQUAL, op1);
2080 return 1;
2083 /* Now have insn-emit do whatever it normally does. */
2084 return 0;
2087 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2088 it will need a link/runtime reloc). */
2091 reloc_needed (tree exp)
2093 int reloc = 0;
2095 switch (TREE_CODE (exp))
2097 case ADDR_EXPR:
2098 return 1;
2100 case POINTER_PLUS_EXPR:
2101 case PLUS_EXPR:
2102 case MINUS_EXPR:
2103 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2104 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2105 break;
2107 CASE_CONVERT:
2108 case NON_LVALUE_EXPR:
2109 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2110 break;
2112 case CONSTRUCTOR:
2114 tree value;
2115 unsigned HOST_WIDE_INT ix;
2117 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2118 if (value)
2119 reloc |= reloc_needed (value);
2121 break;
2123 case ERROR_MARK:
2124 break;
2126 default:
2127 break;
2129 return reloc;
2132 /* Does operand (which is a symbolic_operand) live in text space?
2133 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2134 will be true. */
2137 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2139 if (GET_CODE (operand) == CONST)
2140 operand = XEXP (XEXP (operand, 0), 0);
2141 if (flag_pic)
2143 if (GET_CODE (operand) == SYMBOL_REF)
2144 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2146 else
2148 if (GET_CODE (operand) == SYMBOL_REF)
2149 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2151 return 1;
2155 /* Return the best assembler insn template
2156 for moving operands[1] into operands[0] as a fullword. */
2157 const char *
2158 singlemove_string (rtx *operands)
2160 HOST_WIDE_INT intval;
2162 if (GET_CODE (operands[0]) == MEM)
2163 return "stw %r1,%0";
2164 if (GET_CODE (operands[1]) == MEM)
2165 return "ldw %1,%0";
2166 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2168 long i;
2169 REAL_VALUE_TYPE d;
2171 gcc_assert (GET_MODE (operands[1]) == SFmode);
2173 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2174 bit pattern. */
2175 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2176 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2178 operands[1] = GEN_INT (i);
2179 /* Fall through to CONST_INT case. */
2181 if (GET_CODE (operands[1]) == CONST_INT)
2183 intval = INTVAL (operands[1]);
2185 if (VAL_14_BITS_P (intval))
2186 return "ldi %1,%0";
2187 else if ((intval & 0x7ff) == 0)
2188 return "ldil L'%1,%0";
2189 else if (zdepi_cint_p (intval))
2190 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2191 else
2192 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2194 return "copy %1,%0";
2198 /* Compute position (in OP[1]) and width (in OP[2])
2199 useful for copying IMM to a register using the zdepi
2200 instructions. Store the immediate value to insert in OP[0]. */
2201 static void
2202 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2204 int lsb, len;
2206 /* Find the least significant set bit in IMM. */
2207 for (lsb = 0; lsb < 32; lsb++)
2209 if ((imm & 1) != 0)
2210 break;
2211 imm >>= 1;
2214 /* Choose variants based on *sign* of the 5-bit field. */
2215 if ((imm & 0x10) == 0)
2216 len = (lsb <= 28) ? 4 : 32 - lsb;
2217 else
2219 /* Find the width of the bitstring in IMM. */
2220 for (len = 5; len < 32; len++)
2222 if ((imm & (1 << len)) == 0)
2223 break;
2226 /* Sign extend IMM as a 5-bit value. */
2227 imm = (imm & 0xf) - 0x10;
2230 op[0] = imm;
2231 op[1] = 31 - lsb;
2232 op[2] = len;
2235 /* Compute position (in OP[1]) and width (in OP[2])
2236 useful for copying IMM to a register using the depdi,z
2237 instructions. Store the immediate value to insert in OP[0]. */
2238 void
2239 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2241 HOST_WIDE_INT lsb, len;
2243 /* Find the least significant set bit in IMM. */
2244 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2246 if ((imm & 1) != 0)
2247 break;
2248 imm >>= 1;
2251 /* Choose variants based on *sign* of the 5-bit field. */
2252 if ((imm & 0x10) == 0)
2253 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2254 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2255 else
2257 /* Find the width of the bitstring in IMM. */
2258 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2260 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2261 break;
2264 /* Sign extend IMM as a 5-bit value. */
2265 imm = (imm & 0xf) - 0x10;
2268 op[0] = imm;
2269 op[1] = 63 - lsb;
2270 op[2] = len;
2273 /* Output assembler code to perform a doubleword move insn
2274 with operands OPERANDS. */
2276 const char *
2277 output_move_double (rtx *operands)
2279 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2280 rtx latehalf[2];
2281 rtx addreg0 = 0, addreg1 = 0;
2283 /* First classify both operands. */
2285 if (REG_P (operands[0]))
2286 optype0 = REGOP;
2287 else if (offsettable_memref_p (operands[0]))
2288 optype0 = OFFSOP;
2289 else if (GET_CODE (operands[0]) == MEM)
2290 optype0 = MEMOP;
2291 else
2292 optype0 = RNDOP;
2294 if (REG_P (operands[1]))
2295 optype1 = REGOP;
2296 else if (CONSTANT_P (operands[1]))
2297 optype1 = CNSTOP;
2298 else if (offsettable_memref_p (operands[1]))
2299 optype1 = OFFSOP;
2300 else if (GET_CODE (operands[1]) == MEM)
2301 optype1 = MEMOP;
2302 else
2303 optype1 = RNDOP;
2305 /* Check for the cases that the operand constraints are not
2306 supposed to allow to happen. */
2307 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2309 /* Handle copies between general and floating registers. */
2311 if (optype0 == REGOP && optype1 == REGOP
2312 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2314 if (FP_REG_P (operands[0]))
2316 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2317 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2318 return "{fldds|fldd} -16(%%sp),%0";
2320 else
2322 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2323 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2324 return "{ldws|ldw} -12(%%sp),%R0";
2328 /* Handle auto decrementing and incrementing loads and stores
2329 specifically, since the structure of the function doesn't work
2330 for them without major modification. Do it better when we learn
2331 this port about the general inc/dec addressing of PA.
2332 (This was written by tege. Chide him if it doesn't work.) */
2334 if (optype0 == MEMOP)
2336 /* We have to output the address syntax ourselves, since print_operand
2337 doesn't deal with the addresses we want to use. Fix this later. */
2339 rtx addr = XEXP (operands[0], 0);
2340 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2342 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2344 operands[0] = XEXP (addr, 0);
2345 gcc_assert (GET_CODE (operands[1]) == REG
2346 && GET_CODE (operands[0]) == REG);
2348 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2350 /* No overlap between high target register and address
2351 register. (We do this in a non-obvious way to
2352 save a register file writeback) */
2353 if (GET_CODE (addr) == POST_INC)
2354 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2355 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2357 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2359 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2361 operands[0] = XEXP (addr, 0);
2362 gcc_assert (GET_CODE (operands[1]) == REG
2363 && GET_CODE (operands[0]) == REG);
2365 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2366 /* No overlap between high target register and address
2367 register. (We do this in a non-obvious way to save a
2368 register file writeback) */
2369 if (GET_CODE (addr) == PRE_INC)
2370 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2371 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2374 if (optype1 == MEMOP)
2376 /* We have to output the address syntax ourselves, since print_operand
2377 doesn't deal with the addresses we want to use. Fix this later. */
2379 rtx addr = XEXP (operands[1], 0);
2380 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2382 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2384 operands[1] = XEXP (addr, 0);
2385 gcc_assert (GET_CODE (operands[0]) == REG
2386 && GET_CODE (operands[1]) == REG);
2388 if (!reg_overlap_mentioned_p (high_reg, addr))
2390 /* No overlap between high target register and address
2391 register. (We do this in a non-obvious way to
2392 save a register file writeback) */
2393 if (GET_CODE (addr) == POST_INC)
2394 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2395 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2397 else
2399 /* This is an undefined situation. We should load into the
2400 address register *and* update that register. Probably
2401 we don't need to handle this at all. */
2402 if (GET_CODE (addr) == POST_INC)
2403 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2404 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2407 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2409 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2411 operands[1] = XEXP (addr, 0);
2412 gcc_assert (GET_CODE (operands[0]) == REG
2413 && GET_CODE (operands[1]) == REG);
2415 if (!reg_overlap_mentioned_p (high_reg, addr))
2417 /* No overlap between high target register and address
2418 register. (We do this in a non-obvious way to
2419 save a register file writeback) */
2420 if (GET_CODE (addr) == PRE_INC)
2421 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2422 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2424 else
2426 /* This is an undefined situation. We should load into the
2427 address register *and* update that register. Probably
2428 we don't need to handle this at all. */
2429 if (GET_CODE (addr) == PRE_INC)
2430 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2431 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2434 else if (GET_CODE (addr) == PLUS
2435 && GET_CODE (XEXP (addr, 0)) == MULT)
2437 rtx xoperands[4];
2438 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2440 if (!reg_overlap_mentioned_p (high_reg, addr))
2442 xoperands[0] = high_reg;
2443 xoperands[1] = XEXP (addr, 1);
2444 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2445 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2446 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2447 xoperands);
2448 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2450 else
2452 xoperands[0] = high_reg;
2453 xoperands[1] = XEXP (addr, 1);
2454 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2455 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2456 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2457 xoperands);
2458 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2463 /* If an operand is an unoffsettable memory ref, find a register
2464 we can increment temporarily to make it refer to the second word. */
2466 if (optype0 == MEMOP)
2467 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2469 if (optype1 == MEMOP)
2470 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2472 /* Ok, we can do one word at a time.
2473 Normally we do the low-numbered word first.
2475 In either case, set up in LATEHALF the operands to use
2476 for the high-numbered word and in some cases alter the
2477 operands in OPERANDS to be suitable for the low-numbered word. */
2479 if (optype0 == REGOP)
2480 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2481 else if (optype0 == OFFSOP)
2482 latehalf[0] = adjust_address (operands[0], SImode, 4);
2483 else
2484 latehalf[0] = operands[0];
2486 if (optype1 == REGOP)
2487 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2488 else if (optype1 == OFFSOP)
2489 latehalf[1] = adjust_address (operands[1], SImode, 4);
2490 else if (optype1 == CNSTOP)
2491 split_double (operands[1], &operands[1], &latehalf[1]);
2492 else
2493 latehalf[1] = operands[1];
2495 /* If the first move would clobber the source of the second one,
2496 do them in the other order.
2498 This can happen in two cases:
2500 mem -> register where the first half of the destination register
2501 is the same register used in the memory's address. Reload
2502 can create such insns.
2504 mem in this case will be either register indirect or register
2505 indirect plus a valid offset.
2507 register -> register move where REGNO(dst) == REGNO(src + 1)
2508 someone (Tim/Tege?) claimed this can happen for parameter loads.
2510 Handle mem -> register case first. */
2511 if (optype0 == REGOP
2512 && (optype1 == MEMOP || optype1 == OFFSOP)
2513 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2514 operands[1], 0))
2516 /* Do the late half first. */
2517 if (addreg1)
2518 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2519 output_asm_insn (singlemove_string (latehalf), latehalf);
2521 /* Then clobber. */
2522 if (addreg1)
2523 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2524 return singlemove_string (operands);
2527 /* Now handle register -> register case. */
2528 if (optype0 == REGOP && optype1 == REGOP
2529 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2531 output_asm_insn (singlemove_string (latehalf), latehalf);
2532 return singlemove_string (operands);
2535 /* Normal case: do the two words, low-numbered first. */
2537 output_asm_insn (singlemove_string (operands), operands);
2539 /* Make any unoffsettable addresses point at high-numbered word. */
2540 if (addreg0)
2541 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2542 if (addreg1)
2543 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2545 /* Do that word. */
2546 output_asm_insn (singlemove_string (latehalf), latehalf);
2548 /* Undo the adds we just did. */
2549 if (addreg0)
2550 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2551 if (addreg1)
2552 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2554 return "";
2557 const char *
2558 output_fp_move_double (rtx *operands)
2560 if (FP_REG_P (operands[0]))
2562 if (FP_REG_P (operands[1])
2563 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2564 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2565 else
2566 output_asm_insn ("fldd%F1 %1,%0", operands);
2568 else if (FP_REG_P (operands[1]))
2570 output_asm_insn ("fstd%F0 %1,%0", operands);
2572 else
2574 rtx xoperands[2];
2576 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2578 /* This is a pain. You have to be prepared to deal with an
2579 arbitrary address here including pre/post increment/decrement.
2581 so avoid this in the MD. */
2582 gcc_assert (GET_CODE (operands[0]) == REG);
2584 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2585 xoperands[0] = operands[0];
2586 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2588 return "";
2591 /* Return a REG that occurs in ADDR with coefficient 1.
2592 ADDR can be effectively incremented by incrementing REG. */
2594 static rtx
2595 find_addr_reg (rtx addr)
2597 while (GET_CODE (addr) == PLUS)
2599 if (GET_CODE (XEXP (addr, 0)) == REG)
2600 addr = XEXP (addr, 0);
2601 else if (GET_CODE (XEXP (addr, 1)) == REG)
2602 addr = XEXP (addr, 1);
2603 else if (CONSTANT_P (XEXP (addr, 0)))
2604 addr = XEXP (addr, 1);
2605 else if (CONSTANT_P (XEXP (addr, 1)))
2606 addr = XEXP (addr, 0);
2607 else
2608 gcc_unreachable ();
2610 gcc_assert (GET_CODE (addr) == REG);
2611 return addr;
2614 /* Emit code to perform a block move.
2616 OPERANDS[0] is the destination pointer as a REG, clobbered.
2617 OPERANDS[1] is the source pointer as a REG, clobbered.
2618 OPERANDS[2] is a register for temporary storage.
2619 OPERANDS[3] is a register for temporary storage.
2620 OPERANDS[4] is the size as a CONST_INT
2621 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2622 OPERANDS[6] is another temporary register. */
2624 const char *
2625 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2627 int align = INTVAL (operands[5]);
2628 unsigned long n_bytes = INTVAL (operands[4]);
2630 /* We can't move more than a word at a time because the PA
2631 has no longer integer move insns. (Could use fp mem ops?) */
2632 if (align > (TARGET_64BIT ? 8 : 4))
2633 align = (TARGET_64BIT ? 8 : 4);
2635 /* Note that we know each loop below will execute at least twice
2636 (else we would have open-coded the copy). */
2637 switch (align)
2639 case 8:
2640 /* Pre-adjust the loop counter. */
2641 operands[4] = GEN_INT (n_bytes - 16);
2642 output_asm_insn ("ldi %4,%2", operands);
2644 /* Copying loop. */
2645 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2646 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2647 output_asm_insn ("std,ma %3,8(%0)", operands);
2648 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2649 output_asm_insn ("std,ma %6,8(%0)", operands);
2651 /* Handle the residual. There could be up to 7 bytes of
2652 residual to copy! */
2653 if (n_bytes % 16 != 0)
2655 operands[4] = GEN_INT (n_bytes % 8);
2656 if (n_bytes % 16 >= 8)
2657 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2658 if (n_bytes % 8 != 0)
2659 output_asm_insn ("ldd 0(%1),%6", operands);
2660 if (n_bytes % 16 >= 8)
2661 output_asm_insn ("std,ma %3,8(%0)", operands);
2662 if (n_bytes % 8 != 0)
2663 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2665 return "";
2667 case 4:
2668 /* Pre-adjust the loop counter. */
2669 operands[4] = GEN_INT (n_bytes - 8);
2670 output_asm_insn ("ldi %4,%2", operands);
2672 /* Copying loop. */
2673 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2674 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2675 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2676 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2677 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2679 /* Handle the residual. There could be up to 7 bytes of
2680 residual to copy! */
2681 if (n_bytes % 8 != 0)
2683 operands[4] = GEN_INT (n_bytes % 4);
2684 if (n_bytes % 8 >= 4)
2685 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2686 if (n_bytes % 4 != 0)
2687 output_asm_insn ("ldw 0(%1),%6", operands);
2688 if (n_bytes % 8 >= 4)
2689 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2690 if (n_bytes % 4 != 0)
2691 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2693 return "";
2695 case 2:
2696 /* Pre-adjust the loop counter. */
2697 operands[4] = GEN_INT (n_bytes - 4);
2698 output_asm_insn ("ldi %4,%2", operands);
2700 /* Copying loop. */
2701 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2702 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2703 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2704 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2705 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2707 /* Handle the residual. */
2708 if (n_bytes % 4 != 0)
2710 if (n_bytes % 4 >= 2)
2711 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2712 if (n_bytes % 2 != 0)
2713 output_asm_insn ("ldb 0(%1),%6", operands);
2714 if (n_bytes % 4 >= 2)
2715 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2716 if (n_bytes % 2 != 0)
2717 output_asm_insn ("stb %6,0(%0)", operands);
2719 return "";
2721 case 1:
2722 /* Pre-adjust the loop counter. */
2723 operands[4] = GEN_INT (n_bytes - 2);
2724 output_asm_insn ("ldi %4,%2", operands);
2726 /* Copying loop. */
2727 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2728 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2729 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2730 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2731 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2733 /* Handle the residual. */
2734 if (n_bytes % 2 != 0)
2736 output_asm_insn ("ldb 0(%1),%3", operands);
2737 output_asm_insn ("stb %3,0(%0)", operands);
2739 return "";
2741 default:
2742 gcc_unreachable ();
2746 /* Count the number of insns necessary to handle this block move.
2748 Basic structure is the same as emit_block_move, except that we
2749 count insns rather than emit them. */
2751 static int
2752 compute_movmem_length (rtx insn)
2754 rtx pat = PATTERN (insn);
2755 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2756 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2757 unsigned int n_insns = 0;
2759 /* We can't move more than four bytes at a time because the PA
2760 has no longer integer move insns. (Could use fp mem ops?) */
2761 if (align > (TARGET_64BIT ? 8 : 4))
2762 align = (TARGET_64BIT ? 8 : 4);
2764 /* The basic copying loop. */
2765 n_insns = 6;
2767 /* Residuals. */
2768 if (n_bytes % (2 * align) != 0)
2770 if ((n_bytes % (2 * align)) >= align)
2771 n_insns += 2;
2773 if ((n_bytes % align) != 0)
2774 n_insns += 2;
2777 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2778 return n_insns * 4;
2781 /* Emit code to perform a block clear.
2783 OPERANDS[0] is the destination pointer as a REG, clobbered.
2784 OPERANDS[1] is a register for temporary storage.
2785 OPERANDS[2] is the size as a CONST_INT
2786 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2788 const char *
2789 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2791 int align = INTVAL (operands[3]);
2792 unsigned long n_bytes = INTVAL (operands[2]);
2794 /* We can't clear more than a word at a time because the PA
2795 has no longer integer move insns. */
2796 if (align > (TARGET_64BIT ? 8 : 4))
2797 align = (TARGET_64BIT ? 8 : 4);
2799 /* Note that we know each loop below will execute at least twice
2800 (else we would have open-coded the copy). */
2801 switch (align)
2803 case 8:
2804 /* Pre-adjust the loop counter. */
2805 operands[2] = GEN_INT (n_bytes - 16);
2806 output_asm_insn ("ldi %2,%1", operands);
2808 /* Loop. */
2809 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2810 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2811 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2813 /* Handle the residual. There could be up to 7 bytes of
2814 residual to copy! */
2815 if (n_bytes % 16 != 0)
2817 operands[2] = GEN_INT (n_bytes % 8);
2818 if (n_bytes % 16 >= 8)
2819 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2820 if (n_bytes % 8 != 0)
2821 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2823 return "";
2825 case 4:
2826 /* Pre-adjust the loop counter. */
2827 operands[2] = GEN_INT (n_bytes - 8);
2828 output_asm_insn ("ldi %2,%1", operands);
2830 /* Loop. */
2831 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2832 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2833 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2835 /* Handle the residual. There could be up to 7 bytes of
2836 residual to copy! */
2837 if (n_bytes % 8 != 0)
2839 operands[2] = GEN_INT (n_bytes % 4);
2840 if (n_bytes % 8 >= 4)
2841 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2842 if (n_bytes % 4 != 0)
2843 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2845 return "";
2847 case 2:
2848 /* Pre-adjust the loop counter. */
2849 operands[2] = GEN_INT (n_bytes - 4);
2850 output_asm_insn ("ldi %2,%1", operands);
2852 /* Loop. */
2853 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2854 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2855 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2857 /* Handle the residual. */
2858 if (n_bytes % 4 != 0)
2860 if (n_bytes % 4 >= 2)
2861 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2862 if (n_bytes % 2 != 0)
2863 output_asm_insn ("stb %%r0,0(%0)", operands);
2865 return "";
2867 case 1:
2868 /* Pre-adjust the loop counter. */
2869 operands[2] = GEN_INT (n_bytes - 2);
2870 output_asm_insn ("ldi %2,%1", operands);
2872 /* Loop. */
2873 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2874 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2875 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2877 /* Handle the residual. */
2878 if (n_bytes % 2 != 0)
2879 output_asm_insn ("stb %%r0,0(%0)", operands);
2881 return "";
2883 default:
2884 gcc_unreachable ();
2888 /* Count the number of insns necessary to handle this block move.
2890 Basic structure is the same as emit_block_move, except that we
2891 count insns rather than emit them. */
2893 static int
2894 compute_clrmem_length (rtx insn)
2896 rtx pat = PATTERN (insn);
2897 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2898 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2899 unsigned int n_insns = 0;
2901 /* We can't clear more than a word at a time because the PA
2902 has no longer integer move insns. */
2903 if (align > (TARGET_64BIT ? 8 : 4))
2904 align = (TARGET_64BIT ? 8 : 4);
2906 /* The basic loop. */
2907 n_insns = 4;
2909 /* Residuals. */
2910 if (n_bytes % (2 * align) != 0)
2912 if ((n_bytes % (2 * align)) >= align)
2913 n_insns++;
2915 if ((n_bytes % align) != 0)
2916 n_insns++;
2919 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2920 return n_insns * 4;
2924 const char *
2925 output_and (rtx *operands)
2927 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2929 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2930 int ls0, ls1, ms0, p, len;
2932 for (ls0 = 0; ls0 < 32; ls0++)
2933 if ((mask & (1 << ls0)) == 0)
2934 break;
2936 for (ls1 = ls0; ls1 < 32; ls1++)
2937 if ((mask & (1 << ls1)) != 0)
2938 break;
2940 for (ms0 = ls1; ms0 < 32; ms0++)
2941 if ((mask & (1 << ms0)) == 0)
2942 break;
2944 gcc_assert (ms0 == 32);
2946 if (ls1 == 32)
2948 len = ls0;
2950 gcc_assert (len);
2952 operands[2] = GEN_INT (len);
2953 return "{extru|extrw,u} %1,31,%2,%0";
2955 else
2957 /* We could use this `depi' for the case above as well, but `depi'
2958 requires one more register file access than an `extru'. */
2960 p = 31 - ls0;
2961 len = ls1 - ls0;
2963 operands[2] = GEN_INT (p);
2964 operands[3] = GEN_INT (len);
2965 return "{depi|depwi} 0,%2,%3,%0";
2968 else
2969 return "and %1,%2,%0";
2972 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2973 storing the result in operands[0]. */
2974 const char *
2975 output_64bit_and (rtx *operands)
2977 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2979 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2980 int ls0, ls1, ms0, p, len;
2982 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2983 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2984 break;
2986 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2987 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2988 break;
2990 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2991 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2992 break;
2994 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2996 if (ls1 == HOST_BITS_PER_WIDE_INT)
2998 len = ls0;
3000 gcc_assert (len);
3002 operands[2] = GEN_INT (len);
3003 return "extrd,u %1,63,%2,%0";
3005 else
3007 /* We could use this `depi' for the case above as well, but `depi'
3008 requires one more register file access than an `extru'. */
3010 p = 63 - ls0;
3011 len = ls1 - ls0;
3013 operands[2] = GEN_INT (p);
3014 operands[3] = GEN_INT (len);
3015 return "depdi 0,%2,%3,%0";
3018 else
3019 return "and %1,%2,%0";
3022 const char *
3023 output_ior (rtx *operands)
3025 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3026 int bs0, bs1, p, len;
3028 if (INTVAL (operands[2]) == 0)
3029 return "copy %1,%0";
3031 for (bs0 = 0; bs0 < 32; bs0++)
3032 if ((mask & (1 << bs0)) != 0)
3033 break;
3035 for (bs1 = bs0; bs1 < 32; bs1++)
3036 if ((mask & (1 << bs1)) == 0)
3037 break;
3039 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3041 p = 31 - bs0;
3042 len = bs1 - bs0;
3044 operands[2] = GEN_INT (p);
3045 operands[3] = GEN_INT (len);
3046 return "{depi|depwi} -1,%2,%3,%0";
3049 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3050 storing the result in operands[0]. */
3051 const char *
3052 output_64bit_ior (rtx *operands)
3054 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3055 int bs0, bs1, p, len;
3057 if (INTVAL (operands[2]) == 0)
3058 return "copy %1,%0";
3060 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3061 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3062 break;
3064 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3065 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3066 break;
3068 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3069 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3071 p = 63 - bs0;
3072 len = bs1 - bs0;
3074 operands[2] = GEN_INT (p);
3075 operands[3] = GEN_INT (len);
3076 return "depdi -1,%2,%3,%0";
3079 /* Target hook for assembling integer objects. This code handles
3080 aligned SI and DI integers specially since function references
3081 must be preceded by P%. */
3083 static bool
3084 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3086 if (size == UNITS_PER_WORD
3087 && aligned_p
3088 && function_label_operand (x, VOIDmode))
3090 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3091 output_addr_const (asm_out_file, x);
3092 fputc ('\n', asm_out_file);
3093 return true;
3095 return default_assemble_integer (x, size, aligned_p);
3098 /* Output an ascii string. */
3099 void
3100 output_ascii (FILE *file, const char *p, int size)
3102 int i;
3103 int chars_output;
3104 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3106 /* The HP assembler can only take strings of 256 characters at one
3107 time. This is a limitation on input line length, *not* the
3108 length of the string. Sigh. Even worse, it seems that the
3109 restriction is in number of input characters (see \xnn &
3110 \whatever). So we have to do this very carefully. */
3112 fputs ("\t.STRING \"", file);
3114 chars_output = 0;
3115 for (i = 0; i < size; i += 4)
3117 int co = 0;
3118 int io = 0;
3119 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3121 register unsigned int c = (unsigned char) p[i + io];
3123 if (c == '\"' || c == '\\')
3124 partial_output[co++] = '\\';
3125 if (c >= ' ' && c < 0177)
3126 partial_output[co++] = c;
3127 else
3129 unsigned int hexd;
3130 partial_output[co++] = '\\';
3131 partial_output[co++] = 'x';
3132 hexd = c / 16 - 0 + '0';
3133 if (hexd > '9')
3134 hexd -= '9' - 'a' + 1;
3135 partial_output[co++] = hexd;
3136 hexd = c % 16 - 0 + '0';
3137 if (hexd > '9')
3138 hexd -= '9' - 'a' + 1;
3139 partial_output[co++] = hexd;
3142 if (chars_output + co > 243)
3144 fputs ("\"\n\t.STRING \"", file);
3145 chars_output = 0;
3147 fwrite (partial_output, 1, (size_t) co, file);
3148 chars_output += co;
3149 co = 0;
3151 fputs ("\"\n", file);
3154 /* Try to rewrite floating point comparisons & branches to avoid
3155 useless add,tr insns.
3157 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3158 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3159 first attempt to remove useless add,tr insns. It is zero
3160 for the second pass as reorg sometimes leaves bogus REG_DEAD
3161 notes lying around.
3163 When CHECK_NOTES is zero we can only eliminate add,tr insns
3164 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3165 instructions. */
3166 static void
3167 remove_useless_addtr_insns (int check_notes)
3169 rtx insn;
3170 static int pass = 0;
3172 /* This is fairly cheap, so always run it when optimizing. */
3173 if (optimize > 0)
3175 int fcmp_count = 0;
3176 int fbranch_count = 0;
3178 /* Walk all the insns in this function looking for fcmp & fbranch
3179 instructions. Keep track of how many of each we find. */
3180 for (insn = get_insns (); insn; insn = next_insn (insn))
3182 rtx tmp;
3184 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3185 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3186 continue;
3188 tmp = PATTERN (insn);
3190 /* It must be a set. */
3191 if (GET_CODE (tmp) != SET)
3192 continue;
3194 /* If the destination is CCFP, then we've found an fcmp insn. */
3195 tmp = SET_DEST (tmp);
3196 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3198 fcmp_count++;
3199 continue;
3202 tmp = PATTERN (insn);
3203 /* If this is an fbranch instruction, bump the fbranch counter. */
3204 if (GET_CODE (tmp) == SET
3205 && SET_DEST (tmp) == pc_rtx
3206 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3207 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3208 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3209 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3211 fbranch_count++;
3212 continue;
3217 /* Find all floating point compare + branch insns. If possible,
3218 reverse the comparison & the branch to avoid add,tr insns. */
3219 for (insn = get_insns (); insn; insn = next_insn (insn))
3221 rtx tmp, next;
3223 /* Ignore anything that isn't an INSN. */
3224 if (GET_CODE (insn) != INSN)
3225 continue;
3227 tmp = PATTERN (insn);
3229 /* It must be a set. */
3230 if (GET_CODE (tmp) != SET)
3231 continue;
3233 /* The destination must be CCFP, which is register zero. */
3234 tmp = SET_DEST (tmp);
3235 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3236 continue;
3238 /* INSN should be a set of CCFP.
3240 See if the result of this insn is used in a reversed FP
3241 conditional branch. If so, reverse our condition and
3242 the branch. Doing so avoids useless add,tr insns. */
3243 next = next_insn (insn);
3244 while (next)
3246 /* Jumps, calls and labels stop our search. */
3247 if (GET_CODE (next) == JUMP_INSN
3248 || GET_CODE (next) == CALL_INSN
3249 || GET_CODE (next) == CODE_LABEL)
3250 break;
3252 /* As does another fcmp insn. */
3253 if (GET_CODE (next) == INSN
3254 && GET_CODE (PATTERN (next)) == SET
3255 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3256 && REGNO (SET_DEST (PATTERN (next))) == 0)
3257 break;
3259 next = next_insn (next);
3262 /* Is NEXT_INSN a branch? */
3263 if (next
3264 && GET_CODE (next) == JUMP_INSN)
3266 rtx pattern = PATTERN (next);
3268 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3269 and CCFP dies, then reverse our conditional and the branch
3270 to avoid the add,tr. */
3271 if (GET_CODE (pattern) == SET
3272 && SET_DEST (pattern) == pc_rtx
3273 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3274 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3275 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3276 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3277 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3278 && (fcmp_count == fbranch_count
3279 || (check_notes
3280 && find_regno_note (next, REG_DEAD, 0))))
3282 /* Reverse the branch. */
3283 tmp = XEXP (SET_SRC (pattern), 1);
3284 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3285 XEXP (SET_SRC (pattern), 2) = tmp;
3286 INSN_CODE (next) = -1;
3288 /* Reverse our condition. */
3289 tmp = PATTERN (insn);
3290 PUT_CODE (XEXP (tmp, 1),
3291 (reverse_condition_maybe_unordered
3292 (GET_CODE (XEXP (tmp, 1)))));
3298 pass = !pass;
3302 /* You may have trouble believing this, but this is the 32 bit HP-PA
3303 stack layout. Wow.
3305 Offset Contents
3307 Variable arguments (optional; any number may be allocated)
3309 SP-(4*(N+9)) arg word N
3311 SP-56 arg word 5
3312 SP-52 arg word 4
3314 Fixed arguments (must be allocated; may remain unused)
3316 SP-48 arg word 3
3317 SP-44 arg word 2
3318 SP-40 arg word 1
3319 SP-36 arg word 0
3321 Frame Marker
3323 SP-32 External Data Pointer (DP)
3324 SP-28 External sr4
3325 SP-24 External/stub RP (RP')
3326 SP-20 Current RP
3327 SP-16 Static Link
3328 SP-12 Clean up
3329 SP-8 Calling Stub RP (RP'')
3330 SP-4 Previous SP
3332 Top of Frame
3334 SP-0 Stack Pointer (points to next available address)
3338 /* This function saves registers as follows. Registers marked with ' are
3339 this function's registers (as opposed to the previous function's).
3340 If a frame_pointer isn't needed, r4 is saved as a general register;
3341 the space for the frame pointer is still allocated, though, to keep
3342 things simple.
3345 Top of Frame
3347 SP (FP') Previous FP
3348 SP + 4 Alignment filler (sigh)
3349 SP + 8 Space for locals reserved here.
3353 SP + n All call saved register used.
3357 SP + o All call saved fp registers used.
3361 SP + p (SP') points to next available address.
3365 /* Global variables set by output_function_prologue(). */
3366 /* Size of frame. Need to know this to emit return insns from
3367 leaf procedures. */
3368 static HOST_WIDE_INT actual_fsize, local_fsize;
3369 static int save_fregs;
3371 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3372 Handle case where DISP > 8k by using the add_high_const patterns.
3374 Note in DISP > 8k case, we will leave the high part of the address
3375 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3377 static void
3378 store_reg (int reg, HOST_WIDE_INT disp, int base)
3380 rtx insn, dest, src, basereg;
3382 src = gen_rtx_REG (word_mode, reg);
3383 basereg = gen_rtx_REG (Pmode, base);
3384 if (VAL_14_BITS_P (disp))
3386 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3387 insn = emit_move_insn (dest, src);
3389 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3391 rtx delta = GEN_INT (disp);
3392 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3394 emit_move_insn (tmpreg, delta);
3395 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3396 if (DO_FRAME_NOTES)
3398 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3399 gen_rtx_SET (VOIDmode, tmpreg,
3400 gen_rtx_PLUS (Pmode, basereg, delta)));
3401 RTX_FRAME_RELATED_P (insn) = 1;
3403 dest = gen_rtx_MEM (word_mode, tmpreg);
3404 insn = emit_move_insn (dest, src);
3406 else
3408 rtx delta = GEN_INT (disp);
3409 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3410 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3412 emit_move_insn (tmpreg, high);
3413 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3414 insn = emit_move_insn (dest, src);
3415 if (DO_FRAME_NOTES)
3416 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3417 gen_rtx_SET (VOIDmode,
3418 gen_rtx_MEM (word_mode,
3419 gen_rtx_PLUS (word_mode,
3420 basereg,
3421 delta)),
3422 src));
3425 if (DO_FRAME_NOTES)
3426 RTX_FRAME_RELATED_P (insn) = 1;
3429 /* Emit RTL to store REG at the memory location specified by BASE and then
3430 add MOD to BASE. MOD must be <= 8k. */
3432 static void
3433 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3435 rtx insn, basereg, srcreg, delta;
3437 gcc_assert (VAL_14_BITS_P (mod));
3439 basereg = gen_rtx_REG (Pmode, base);
3440 srcreg = gen_rtx_REG (word_mode, reg);
3441 delta = GEN_INT (mod);
3443 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3444 if (DO_FRAME_NOTES)
3446 RTX_FRAME_RELATED_P (insn) = 1;
3448 /* RTX_FRAME_RELATED_P must be set on each frame related set
3449 in a parallel with more than one element. */
3450 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3451 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3455 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3456 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3457 whether to add a frame note or not.
3459 In the DISP > 8k case, we leave the high part of the address in %r1.
3460 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3462 static void
3463 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3465 rtx insn;
3467 if (VAL_14_BITS_P (disp))
3469 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3470 plus_constant (gen_rtx_REG (Pmode, base), disp));
3472 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3474 rtx basereg = gen_rtx_REG (Pmode, base);
3475 rtx delta = GEN_INT (disp);
3476 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3478 emit_move_insn (tmpreg, delta);
3479 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3480 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3481 if (DO_FRAME_NOTES)
3482 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3483 gen_rtx_SET (VOIDmode, tmpreg,
3484 gen_rtx_PLUS (Pmode, basereg, delta)));
3486 else
3488 rtx basereg = gen_rtx_REG (Pmode, base);
3489 rtx delta = GEN_INT (disp);
3490 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3492 emit_move_insn (tmpreg,
3493 gen_rtx_PLUS (Pmode, basereg,
3494 gen_rtx_HIGH (Pmode, delta)));
3495 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3496 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3499 if (DO_FRAME_NOTES && note)
3500 RTX_FRAME_RELATED_P (insn) = 1;
3503 HOST_WIDE_INT
3504 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3506 int freg_saved = 0;
3507 int i, j;
3509 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3510 be consistent with the rounding and size calculation done here.
3511 Change them at the same time. */
3513 /* We do our own stack alignment. First, round the size of the
3514 stack locals up to a word boundary. */
3515 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3517 /* Space for previous frame pointer + filler. If any frame is
3518 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3519 waste some space here for the sake of HP compatibility. The
3520 first slot is only used when the frame pointer is needed. */
3521 if (size || frame_pointer_needed)
3522 size += STARTING_FRAME_OFFSET;
3524 /* If the current function calls __builtin_eh_return, then we need
3525 to allocate stack space for registers that will hold data for
3526 the exception handler. */
3527 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3529 unsigned int i;
3531 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3532 continue;
3533 size += i * UNITS_PER_WORD;
3536 /* Account for space used by the callee general register saves. */
3537 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3538 if (df_regs_ever_live_p (i))
3539 size += UNITS_PER_WORD;
3541 /* Account for space used by the callee floating point register saves. */
3542 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3543 if (df_regs_ever_live_p (i)
3544 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3546 freg_saved = 1;
3548 /* We always save both halves of the FP register, so always
3549 increment the frame size by 8 bytes. */
3550 size += 8;
3553 /* If any of the floating registers are saved, account for the
3554 alignment needed for the floating point register save block. */
3555 if (freg_saved)
3557 size = (size + 7) & ~7;
3558 if (fregs_live)
3559 *fregs_live = 1;
3562 /* The various ABIs include space for the outgoing parameters in the
3563 size of the current function's stack frame. We don't need to align
3564 for the outgoing arguments as their alignment is set by the final
3565 rounding for the frame as a whole. */
3566 size += crtl->outgoing_args_size;
3568 /* Allocate space for the fixed frame marker. This space must be
3569 allocated for any function that makes calls or allocates
3570 stack space. */
3571 if (!current_function_is_leaf || size)
3572 size += TARGET_64BIT ? 48 : 32;
3574 /* Finally, round to the preferred stack boundary. */
3575 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3576 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3579 /* Generate the assembly code for function entry. FILE is a stdio
3580 stream to output the code to. SIZE is an int: how many units of
3581 temporary storage to allocate.
3583 Refer to the array `regs_ever_live' to determine which registers to
3584 save; `regs_ever_live[I]' is nonzero if register number I is ever
3585 used in the function. This function is responsible for knowing
3586 which registers should not be saved even if used. */
3588 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3589 of memory. If any fpu reg is used in the function, we allocate
3590 such a block here, at the bottom of the frame, just in case it's needed.
3592 If this function is a leaf procedure, then we may choose not
3593 to do a "save" insn. The decision about whether or not
3594 to do this is made in regclass.c. */
3596 static void
3597 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3599 /* The function's label and associated .PROC must never be
3600 separated and must be output *after* any profiling declarations
3601 to avoid changing spaces/subspaces within a procedure. */
3602 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3603 fputs ("\t.PROC\n", file);
3605 /* hppa_expand_prologue does the dirty work now. We just need
3606 to output the assembler directives which denote the start
3607 of a function. */
3608 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3609 if (current_function_is_leaf)
3610 fputs (",NO_CALLS", file);
3611 else
3612 fputs (",CALLS", file);
3613 if (rp_saved)
3614 fputs (",SAVE_RP", file);
3616 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3617 at the beginning of the frame and that it is used as the frame
3618 pointer for the frame. We do this because our current frame
3619 layout doesn't conform to that specified in the HP runtime
3620 documentation and we need a way to indicate to programs such as
3621 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3622 isn't used by HP compilers but is supported by the assembler.
3623 However, SAVE_SP is supposed to indicate that the previous stack
3624 pointer has been saved in the frame marker. */
3625 if (frame_pointer_needed)
3626 fputs (",SAVE_SP", file);
3628 /* Pass on information about the number of callee register saves
3629 performed in the prologue.
3631 The compiler is supposed to pass the highest register number
3632 saved, the assembler then has to adjust that number before
3633 entering it into the unwind descriptor (to account for any
3634 caller saved registers with lower register numbers than the
3635 first callee saved register). */
3636 if (gr_saved)
3637 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3639 if (fr_saved)
3640 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3642 fputs ("\n\t.ENTRY\n", file);
3644 remove_useless_addtr_insns (0);
3647 void
3648 hppa_expand_prologue (void)
3650 int merge_sp_adjust_with_store = 0;
3651 HOST_WIDE_INT size = get_frame_size ();
3652 HOST_WIDE_INT offset;
3653 int i;
3654 rtx insn, tmpreg;
3656 gr_saved = 0;
3657 fr_saved = 0;
3658 save_fregs = 0;
3660 /* Compute total size for frame pointer, filler, locals and rounding to
3661 the next word boundary. Similar code appears in compute_frame_size
3662 and must be changed in tandem with this code. */
3663 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3664 if (local_fsize || frame_pointer_needed)
3665 local_fsize += STARTING_FRAME_OFFSET;
3667 actual_fsize = compute_frame_size (size, &save_fregs);
3669 /* Compute a few things we will use often. */
3670 tmpreg = gen_rtx_REG (word_mode, 1);
3672 /* Save RP first. The calling conventions manual states RP will
3673 always be stored into the caller's frame at sp - 20 or sp - 16
3674 depending on which ABI is in use. */
3675 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3677 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3678 rp_saved = true;
3680 else
3681 rp_saved = false;
3683 /* Allocate the local frame and set up the frame pointer if needed. */
3684 if (actual_fsize != 0)
3686 if (frame_pointer_needed)
3688 /* Copy the old frame pointer temporarily into %r1. Set up the
3689 new stack pointer, then store away the saved old frame pointer
3690 into the stack at sp and at the same time update the stack
3691 pointer by actual_fsize bytes. Two versions, first
3692 handles small (<8k) frames. The second handles large (>=8k)
3693 frames. */
3694 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3695 if (DO_FRAME_NOTES)
3696 RTX_FRAME_RELATED_P (insn) = 1;
3698 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3699 if (DO_FRAME_NOTES)
3700 RTX_FRAME_RELATED_P (insn) = 1;
3702 if (VAL_14_BITS_P (actual_fsize))
3703 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3704 else
3706 /* It is incorrect to store the saved frame pointer at *sp,
3707 then increment sp (writes beyond the current stack boundary).
3709 So instead use stwm to store at *sp and post-increment the
3710 stack pointer as an atomic operation. Then increment sp to
3711 finish allocating the new frame. */
3712 HOST_WIDE_INT adjust1 = 8192 - 64;
3713 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3715 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3716 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3717 adjust2, 1);
3720 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3721 we need to store the previous stack pointer (frame pointer)
3722 into the frame marker on targets that use the HP unwind
3723 library. This allows the HP unwind library to be used to
3724 unwind GCC frames. However, we are not fully compatible
3725 with the HP library because our frame layout differs from
3726 that specified in the HP runtime specification.
3728 We don't want a frame note on this instruction as the frame
3729 marker moves during dynamic stack allocation.
3731 This instruction also serves as a blockage to prevent
3732 register spills from being scheduled before the stack
3733 pointer is raised. This is necessary as we store
3734 registers using the frame pointer as a base register,
3735 and the frame pointer is set before sp is raised. */
3736 if (TARGET_HPUX_UNWIND_LIBRARY)
3738 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3739 GEN_INT (TARGET_64BIT ? -8 : -4));
3741 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3742 frame_pointer_rtx);
3744 else
3745 emit_insn (gen_blockage ());
3747 /* no frame pointer needed. */
3748 else
3750 /* In some cases we can perform the first callee register save
3751 and allocating the stack frame at the same time. If so, just
3752 make a note of it and defer allocating the frame until saving
3753 the callee registers. */
3754 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3755 merge_sp_adjust_with_store = 1;
3756 /* Can not optimize. Adjust the stack frame by actual_fsize
3757 bytes. */
3758 else
3759 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3760 actual_fsize, 1);
3764 /* Normal register save.
3766 Do not save the frame pointer in the frame_pointer_needed case. It
3767 was done earlier. */
3768 if (frame_pointer_needed)
3770 offset = local_fsize;
3772 /* Saving the EH return data registers in the frame is the simplest
3773 way to get the frame unwind information emitted. We put them
3774 just before the general registers. */
3775 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3777 unsigned int i, regno;
3779 for (i = 0; ; ++i)
3781 regno = EH_RETURN_DATA_REGNO (i);
3782 if (regno == INVALID_REGNUM)
3783 break;
3785 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3786 offset += UNITS_PER_WORD;
3790 for (i = 18; i >= 4; i--)
3791 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3793 store_reg (i, offset, FRAME_POINTER_REGNUM);
3794 offset += UNITS_PER_WORD;
3795 gr_saved++;
3797 /* Account for %r3 which is saved in a special place. */
3798 gr_saved++;
3800 /* No frame pointer needed. */
3801 else
3803 offset = local_fsize - actual_fsize;
3805 /* Saving the EH return data registers in the frame is the simplest
3806 way to get the frame unwind information emitted. */
3807 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3809 unsigned int i, regno;
3811 for (i = 0; ; ++i)
3813 regno = EH_RETURN_DATA_REGNO (i);
3814 if (regno == INVALID_REGNUM)
3815 break;
3817 /* If merge_sp_adjust_with_store is nonzero, then we can
3818 optimize the first save. */
3819 if (merge_sp_adjust_with_store)
3821 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3822 merge_sp_adjust_with_store = 0;
3824 else
3825 store_reg (regno, offset, STACK_POINTER_REGNUM);
3826 offset += UNITS_PER_WORD;
3830 for (i = 18; i >= 3; i--)
3831 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3833 /* If merge_sp_adjust_with_store is nonzero, then we can
3834 optimize the first GR save. */
3835 if (merge_sp_adjust_with_store)
3837 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3838 merge_sp_adjust_with_store = 0;
3840 else
3841 store_reg (i, offset, STACK_POINTER_REGNUM);
3842 offset += UNITS_PER_WORD;
3843 gr_saved++;
3846 /* If we wanted to merge the SP adjustment with a GR save, but we never
3847 did any GR saves, then just emit the adjustment here. */
3848 if (merge_sp_adjust_with_store)
3849 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3850 actual_fsize, 1);
3853 /* The hppa calling conventions say that %r19, the pic offset
3854 register, is saved at sp - 32 (in this function's frame)
3855 when generating PIC code. FIXME: What is the correct thing
3856 to do for functions which make no calls and allocate no
3857 frame? Do we need to allocate a frame, or can we just omit
3858 the save? For now we'll just omit the save.
3860 We don't want a note on this insn as the frame marker can
3861 move if there is a dynamic stack allocation. */
3862 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3864 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3866 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3870 /* Align pointer properly (doubleword boundary). */
3871 offset = (offset + 7) & ~7;
3873 /* Floating point register store. */
3874 if (save_fregs)
3876 rtx base;
3878 /* First get the frame or stack pointer to the start of the FP register
3879 save area. */
3880 if (frame_pointer_needed)
3882 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3883 base = frame_pointer_rtx;
3885 else
3887 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3888 base = stack_pointer_rtx;
3891 /* Now actually save the FP registers. */
3892 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3894 if (df_regs_ever_live_p (i)
3895 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3897 rtx addr, insn, reg;
3898 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3899 reg = gen_rtx_REG (DFmode, i);
3900 insn = emit_move_insn (addr, reg);
3901 if (DO_FRAME_NOTES)
3903 RTX_FRAME_RELATED_P (insn) = 1;
3904 if (TARGET_64BIT)
3906 rtx mem = gen_rtx_MEM (DFmode,
3907 plus_constant (base, offset));
3908 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3909 gen_rtx_SET (VOIDmode, mem, reg));
3911 else
3913 rtx meml = gen_rtx_MEM (SFmode,
3914 plus_constant (base, offset));
3915 rtx memr = gen_rtx_MEM (SFmode,
3916 plus_constant (base, offset + 4));
3917 rtx regl = gen_rtx_REG (SFmode, i);
3918 rtx regr = gen_rtx_REG (SFmode, i + 1);
3919 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3920 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3921 rtvec vec;
3923 RTX_FRAME_RELATED_P (setl) = 1;
3924 RTX_FRAME_RELATED_P (setr) = 1;
3925 vec = gen_rtvec (2, setl, setr);
3926 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3927 gen_rtx_SEQUENCE (VOIDmode, vec));
3930 offset += GET_MODE_SIZE (DFmode);
3931 fr_saved++;
3937 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3938 Handle case where DISP > 8k by using the add_high_const patterns. */
3940 static void
3941 load_reg (int reg, HOST_WIDE_INT disp, int base)
3943 rtx dest = gen_rtx_REG (word_mode, reg);
3944 rtx basereg = gen_rtx_REG (Pmode, base);
3945 rtx src;
3947 if (VAL_14_BITS_P (disp))
3948 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3949 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3951 rtx delta = GEN_INT (disp);
3952 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3954 emit_move_insn (tmpreg, delta);
3955 if (TARGET_DISABLE_INDEXING)
3957 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3958 src = gen_rtx_MEM (word_mode, tmpreg);
3960 else
3961 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3963 else
3965 rtx delta = GEN_INT (disp);
3966 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3967 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3969 emit_move_insn (tmpreg, high);
3970 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3973 emit_move_insn (dest, src);
3976 /* Update the total code bytes output to the text section. */
3978 static void
3979 update_total_code_bytes (unsigned int nbytes)
3981 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3982 && !IN_NAMED_SECTION_P (cfun->decl))
3984 unsigned int old_total = total_code_bytes;
3986 total_code_bytes += nbytes;
3988 /* Be prepared to handle overflows. */
3989 if (old_total > total_code_bytes)
3990 total_code_bytes = UINT_MAX;
3994 /* This function generates the assembly code for function exit.
3995 Args are as for output_function_prologue ().
3997 The function epilogue should not depend on the current stack
3998 pointer! It should use the frame pointer only. This is mandatory
3999 because of alloca; we also take advantage of it to omit stack
4000 adjustments before returning. */
4002 static void
4003 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4005 rtx insn = get_last_insn ();
4007 last_address = 0;
4009 /* hppa_expand_epilogue does the dirty work now. We just need
4010 to output the assembler directives which denote the end
4011 of a function.
4013 To make debuggers happy, emit a nop if the epilogue was completely
4014 eliminated due to a volatile call as the last insn in the
4015 current function. That way the return address (in %r2) will
4016 always point to a valid instruction in the current function. */
4018 /* Get the last real insn. */
4019 if (GET_CODE (insn) == NOTE)
4020 insn = prev_real_insn (insn);
4022 /* If it is a sequence, then look inside. */
4023 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4024 insn = XVECEXP (PATTERN (insn), 0, 0);
4026 /* If insn is a CALL_INSN, then it must be a call to a volatile
4027 function (otherwise there would be epilogue insns). */
4028 if (insn && GET_CODE (insn) == CALL_INSN)
4030 fputs ("\tnop\n", file);
4031 last_address += 4;
4034 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4036 if (TARGET_SOM && TARGET_GAS)
4038 /* We done with this subspace except possibly for some additional
4039 debug information. Forget that we are in this subspace to ensure
4040 that the next function is output in its own subspace. */
4041 in_section = NULL;
4042 cfun->machine->in_nsubspa = 2;
4045 if (INSN_ADDRESSES_SET_P ())
4047 insn = get_last_nonnote_insn ();
4048 last_address += INSN_ADDRESSES (INSN_UID (insn));
4049 if (INSN_P (insn))
4050 last_address += insn_default_length (insn);
4051 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4052 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4054 else
4055 last_address = UINT_MAX;
4057 /* Finally, update the total number of code bytes output so far. */
4058 update_total_code_bytes (last_address);
4061 void
4062 hppa_expand_epilogue (void)
4064 rtx tmpreg;
4065 HOST_WIDE_INT offset;
4066 HOST_WIDE_INT ret_off = 0;
4067 int i;
4068 int merge_sp_adjust_with_load = 0;
4070 /* We will use this often. */
4071 tmpreg = gen_rtx_REG (word_mode, 1);
4073 /* Try to restore RP early to avoid load/use interlocks when
4074 RP gets used in the return (bv) instruction. This appears to still
4075 be necessary even when we schedule the prologue and epilogue. */
4076 if (rp_saved)
4078 ret_off = TARGET_64BIT ? -16 : -20;
4079 if (frame_pointer_needed)
4081 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4082 ret_off = 0;
4084 else
4086 /* No frame pointer, and stack is smaller than 8k. */
4087 if (VAL_14_BITS_P (ret_off - actual_fsize))
4089 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4090 ret_off = 0;
4095 /* General register restores. */
4096 if (frame_pointer_needed)
4098 offset = local_fsize;
4100 /* If the current function calls __builtin_eh_return, then we need
4101 to restore the saved EH data registers. */
4102 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4104 unsigned int i, regno;
4106 for (i = 0; ; ++i)
4108 regno = EH_RETURN_DATA_REGNO (i);
4109 if (regno == INVALID_REGNUM)
4110 break;
4112 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4113 offset += UNITS_PER_WORD;
4117 for (i = 18; i >= 4; i--)
4118 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4120 load_reg (i, offset, FRAME_POINTER_REGNUM);
4121 offset += UNITS_PER_WORD;
4124 else
4126 offset = local_fsize - actual_fsize;
4128 /* If the current function calls __builtin_eh_return, then we need
4129 to restore the saved EH data registers. */
4130 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4132 unsigned int i, regno;
4134 for (i = 0; ; ++i)
4136 regno = EH_RETURN_DATA_REGNO (i);
4137 if (regno == INVALID_REGNUM)
4138 break;
4140 /* Only for the first load.
4141 merge_sp_adjust_with_load holds the register load
4142 with which we will merge the sp adjustment. */
4143 if (merge_sp_adjust_with_load == 0
4144 && local_fsize == 0
4145 && VAL_14_BITS_P (-actual_fsize))
4146 merge_sp_adjust_with_load = regno;
4147 else
4148 load_reg (regno, offset, STACK_POINTER_REGNUM);
4149 offset += UNITS_PER_WORD;
4153 for (i = 18; i >= 3; i--)
4155 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4157 /* Only for the first load.
4158 merge_sp_adjust_with_load holds the register load
4159 with which we will merge the sp adjustment. */
4160 if (merge_sp_adjust_with_load == 0
4161 && local_fsize == 0
4162 && VAL_14_BITS_P (-actual_fsize))
4163 merge_sp_adjust_with_load = i;
4164 else
4165 load_reg (i, offset, STACK_POINTER_REGNUM);
4166 offset += UNITS_PER_WORD;
4171 /* Align pointer properly (doubleword boundary). */
4172 offset = (offset + 7) & ~7;
4174 /* FP register restores. */
4175 if (save_fregs)
4177 /* Adjust the register to index off of. */
4178 if (frame_pointer_needed)
4179 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4180 else
4181 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4183 /* Actually do the restores now. */
4184 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4185 if (df_regs_ever_live_p (i)
4186 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4188 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4189 rtx dest = gen_rtx_REG (DFmode, i);
4190 emit_move_insn (dest, src);
4194 /* Emit a blockage insn here to keep these insns from being moved to
4195 an earlier spot in the epilogue, or into the main instruction stream.
4197 This is necessary as we must not cut the stack back before all the
4198 restores are finished. */
4199 emit_insn (gen_blockage ());
4201 /* Reset stack pointer (and possibly frame pointer). The stack
4202 pointer is initially set to fp + 64 to avoid a race condition. */
4203 if (frame_pointer_needed)
4205 rtx delta = GEN_INT (-64);
4207 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4208 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4210 /* If we were deferring a callee register restore, do it now. */
4211 else if (merge_sp_adjust_with_load)
4213 rtx delta = GEN_INT (-actual_fsize);
4214 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4216 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4218 else if (actual_fsize != 0)
4219 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4220 - actual_fsize, 0);
4222 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4223 frame greater than 8k), do so now. */
4224 if (ret_off != 0)
4225 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4227 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4229 rtx sa = EH_RETURN_STACKADJ_RTX;
4231 emit_insn (gen_blockage ());
4232 emit_insn (TARGET_64BIT
4233 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4234 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4239 hppa_pic_save_rtx (void)
4241 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4244 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4245 #define NO_DEFERRED_PROFILE_COUNTERS 0
4246 #endif
4249 /* Vector of funcdef numbers. */
4250 static VEC(int,heap) *funcdef_nos;
4252 /* Output deferred profile counters. */
4253 static void
4254 output_deferred_profile_counters (void)
4256 unsigned int i;
4257 int align, n;
4259 if (VEC_empty (int, funcdef_nos))
4260 return;
4262 switch_to_section (data_section);
4263 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4264 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4266 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4268 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4269 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4272 VEC_free (int, heap, funcdef_nos);
4275 void
4276 hppa_profile_hook (int label_no)
4278 /* We use SImode for the address of the function in both 32 and
4279 64-bit code to avoid having to provide DImode versions of the
4280 lcla2 and load_offset_label_address insn patterns. */
4281 rtx reg = gen_reg_rtx (SImode);
4282 rtx label_rtx = gen_label_rtx ();
4283 rtx begin_label_rtx, call_insn;
4284 char begin_label_name[16];
4286 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4287 label_no);
4288 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4290 if (TARGET_64BIT)
4291 emit_move_insn (arg_pointer_rtx,
4292 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4293 GEN_INT (64)));
4295 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4297 /* The address of the function is loaded into %r25 with an instruction-
4298 relative sequence that avoids the use of relocations. The sequence
4299 is split so that the load_offset_label_address instruction can
4300 occupy the delay slot of the call to _mcount. */
4301 if (TARGET_PA_20)
4302 emit_insn (gen_lcla2 (reg, label_rtx));
4303 else
4304 emit_insn (gen_lcla1 (reg, label_rtx));
4306 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4307 reg, begin_label_rtx, label_rtx));
4309 #if !NO_DEFERRED_PROFILE_COUNTERS
4311 rtx count_label_rtx, addr, r24;
4312 char count_label_name[16];
4314 VEC_safe_push (int, heap, funcdef_nos, label_no);
4315 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4316 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4318 addr = force_reg (Pmode, count_label_rtx);
4319 r24 = gen_rtx_REG (Pmode, 24);
4320 emit_move_insn (r24, addr);
4322 call_insn =
4323 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4324 gen_rtx_SYMBOL_REF (Pmode,
4325 "_mcount")),
4326 GEN_INT (TARGET_64BIT ? 24 : 12)));
4328 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4330 #else
4332 call_insn =
4333 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4334 gen_rtx_SYMBOL_REF (Pmode,
4335 "_mcount")),
4336 GEN_INT (TARGET_64BIT ? 16 : 8)));
4338 #endif
4340 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4341 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4343 /* Indicate the _mcount call cannot throw, nor will it execute a
4344 non-local goto. */
4345 add_reg_note (call_insn, REG_EH_REGION, constm1_rtx);
4348 /* Fetch the return address for the frame COUNT steps up from
4349 the current frame, after the prologue. FRAMEADDR is the
4350 frame pointer of the COUNT frame.
4352 We want to ignore any export stub remnants here. To handle this,
4353 we examine the code at the return address, and if it is an export
4354 stub, we return a memory rtx for the stub return address stored
4355 at frame-24.
4357 The value returned is used in two different ways:
4359 1. To find a function's caller.
4361 2. To change the return address for a function.
4363 This function handles most instances of case 1; however, it will
4364 fail if there are two levels of stubs to execute on the return
4365 path. The only way I believe that can happen is if the return value
4366 needs a parameter relocation, which never happens for C code.
4368 This function handles most instances of case 2; however, it will
4369 fail if we did not originally have stub code on the return path
4370 but will need stub code on the new return path. This can happen if
4371 the caller & callee are both in the main program, but the new
4372 return location is in a shared library. */
4375 return_addr_rtx (int count, rtx frameaddr)
4377 rtx label;
4378 rtx rp;
4379 rtx saved_rp;
4380 rtx ins;
4382 if (count != 0)
4383 return NULL_RTX;
4385 rp = get_hard_reg_initial_val (Pmode, 2);
4387 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4388 return rp;
4390 saved_rp = gen_reg_rtx (Pmode);
4391 emit_move_insn (saved_rp, rp);
4393 /* Get pointer to the instruction stream. We have to mask out the
4394 privilege level from the two low order bits of the return address
4395 pointer here so that ins will point to the start of the first
4396 instruction that would have been executed if we returned. */
4397 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4398 label = gen_label_rtx ();
4400 /* Check the instruction stream at the normal return address for the
4401 export stub:
4403 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4404 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4405 0x00011820 | stub+16: mtsp r1,sr0
4406 0xe0400002 | stub+20: be,n 0(sr0,rp)
4408 If it is an export stub, than our return address is really in
4409 -24[frameaddr]. */
4411 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4412 NULL_RTX, SImode, 1);
4413 emit_jump_insn (gen_bne (label));
4415 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4416 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4417 emit_jump_insn (gen_bne (label));
4419 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4420 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4421 emit_jump_insn (gen_bne (label));
4423 /* 0xe0400002 must be specified as -532676606 so that it won't be
4424 rejected as an invalid immediate operand on 64-bit hosts. */
4425 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4426 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4428 /* If there is no export stub then just use the value saved from
4429 the return pointer register. */
4431 emit_jump_insn (gen_bne (label));
4433 /* Here we know that our return address points to an export
4434 stub. We don't want to return the address of the export stub,
4435 but rather the return address of the export stub. That return
4436 address is stored at -24[frameaddr]. */
4438 emit_move_insn (saved_rp,
4439 gen_rtx_MEM (Pmode,
4440 memory_address (Pmode,
4441 plus_constant (frameaddr,
4442 -24))));
4444 emit_label (label);
4445 return saved_rp;
4448 void
4449 emit_bcond_fp (enum rtx_code code, rtx operand0)
4451 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4452 gen_rtx_IF_THEN_ELSE (VOIDmode,
4453 gen_rtx_fmt_ee (code,
4454 VOIDmode,
4455 gen_rtx_REG (CCFPmode, 0),
4456 const0_rtx),
4457 gen_rtx_LABEL_REF (VOIDmode, operand0),
4458 pc_rtx)));
4463 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4465 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4466 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4469 /* Adjust the cost of a scheduling dependency. Return the new cost of
4470 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4472 static int
4473 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4475 enum attr_type attr_type;
4477 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4478 true dependencies as they are described with bypasses now. */
4479 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4480 return cost;
4482 if (! recog_memoized (insn))
4483 return 0;
4485 attr_type = get_attr_type (insn);
4487 switch (REG_NOTE_KIND (link))
4489 case REG_DEP_ANTI:
4490 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4491 cycles later. */
4493 if (attr_type == TYPE_FPLOAD)
4495 rtx pat = PATTERN (insn);
4496 rtx dep_pat = PATTERN (dep_insn);
4497 if (GET_CODE (pat) == PARALLEL)
4499 /* This happens for the fldXs,mb patterns. */
4500 pat = XVECEXP (pat, 0, 0);
4502 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4503 /* If this happens, we have to extend this to schedule
4504 optimally. Return 0 for now. */
4505 return 0;
4507 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4509 if (! recog_memoized (dep_insn))
4510 return 0;
4511 switch (get_attr_type (dep_insn))
4513 case TYPE_FPALU:
4514 case TYPE_FPMULSGL:
4515 case TYPE_FPMULDBL:
4516 case TYPE_FPDIVSGL:
4517 case TYPE_FPDIVDBL:
4518 case TYPE_FPSQRTSGL:
4519 case TYPE_FPSQRTDBL:
4520 /* A fpload can't be issued until one cycle before a
4521 preceding arithmetic operation has finished if
4522 the target of the fpload is any of the sources
4523 (or destination) of the arithmetic operation. */
4524 return insn_default_latency (dep_insn) - 1;
4526 default:
4527 return 0;
4531 else if (attr_type == TYPE_FPALU)
4533 rtx pat = PATTERN (insn);
4534 rtx dep_pat = PATTERN (dep_insn);
4535 if (GET_CODE (pat) == PARALLEL)
4537 /* This happens for the fldXs,mb patterns. */
4538 pat = XVECEXP (pat, 0, 0);
4540 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4541 /* If this happens, we have to extend this to schedule
4542 optimally. Return 0 for now. */
4543 return 0;
4545 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4547 if (! recog_memoized (dep_insn))
4548 return 0;
4549 switch (get_attr_type (dep_insn))
4551 case TYPE_FPDIVSGL:
4552 case TYPE_FPDIVDBL:
4553 case TYPE_FPSQRTSGL:
4554 case TYPE_FPSQRTDBL:
4555 /* An ALU flop can't be issued until two cycles before a
4556 preceding divide or sqrt operation has finished if
4557 the target of the ALU flop is any of the sources
4558 (or destination) of the divide or sqrt operation. */
4559 return insn_default_latency (dep_insn) - 2;
4561 default:
4562 return 0;
4567 /* For other anti dependencies, the cost is 0. */
4568 return 0;
4570 case REG_DEP_OUTPUT:
4571 /* Output dependency; DEP_INSN writes a register that INSN writes some
4572 cycles later. */
4573 if (attr_type == TYPE_FPLOAD)
4575 rtx pat = PATTERN (insn);
4576 rtx dep_pat = PATTERN (dep_insn);
4577 if (GET_CODE (pat) == PARALLEL)
4579 /* This happens for the fldXs,mb patterns. */
4580 pat = XVECEXP (pat, 0, 0);
4582 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4583 /* If this happens, we have to extend this to schedule
4584 optimally. Return 0 for now. */
4585 return 0;
4587 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4589 if (! recog_memoized (dep_insn))
4590 return 0;
4591 switch (get_attr_type (dep_insn))
4593 case TYPE_FPALU:
4594 case TYPE_FPMULSGL:
4595 case TYPE_FPMULDBL:
4596 case TYPE_FPDIVSGL:
4597 case TYPE_FPDIVDBL:
4598 case TYPE_FPSQRTSGL:
4599 case TYPE_FPSQRTDBL:
4600 /* A fpload can't be issued until one cycle before a
4601 preceding arithmetic operation has finished if
4602 the target of the fpload is the destination of the
4603 arithmetic operation.
4605 Exception: For PA7100LC, PA7200 and PA7300, the cost
4606 is 3 cycles, unless they bundle together. We also
4607 pay the penalty if the second insn is a fpload. */
4608 return insn_default_latency (dep_insn) - 1;
4610 default:
4611 return 0;
4615 else if (attr_type == TYPE_FPALU)
4617 rtx pat = PATTERN (insn);
4618 rtx dep_pat = PATTERN (dep_insn);
4619 if (GET_CODE (pat) == PARALLEL)
4621 /* This happens for the fldXs,mb patterns. */
4622 pat = XVECEXP (pat, 0, 0);
4624 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4625 /* If this happens, we have to extend this to schedule
4626 optimally. Return 0 for now. */
4627 return 0;
4629 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4631 if (! recog_memoized (dep_insn))
4632 return 0;
4633 switch (get_attr_type (dep_insn))
4635 case TYPE_FPDIVSGL:
4636 case TYPE_FPDIVDBL:
4637 case TYPE_FPSQRTSGL:
4638 case TYPE_FPSQRTDBL:
4639 /* An ALU flop can't be issued until two cycles before a
4640 preceding divide or sqrt operation has finished if
4641 the target of the ALU flop is also the target of
4642 the divide or sqrt operation. */
4643 return insn_default_latency (dep_insn) - 2;
4645 default:
4646 return 0;
4651 /* For other output dependencies, the cost is 0. */
4652 return 0;
4654 default:
4655 gcc_unreachable ();
4659 /* Adjust scheduling priorities. We use this to try and keep addil
4660 and the next use of %r1 close together. */
4661 static int
4662 pa_adjust_priority (rtx insn, int priority)
4664 rtx set = single_set (insn);
4665 rtx src, dest;
4666 if (set)
4668 src = SET_SRC (set);
4669 dest = SET_DEST (set);
4670 if (GET_CODE (src) == LO_SUM
4671 && symbolic_operand (XEXP (src, 1), VOIDmode)
4672 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4673 priority >>= 3;
4675 else if (GET_CODE (src) == MEM
4676 && GET_CODE (XEXP (src, 0)) == LO_SUM
4677 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4678 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4679 priority >>= 1;
4681 else if (GET_CODE (dest) == MEM
4682 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4683 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4684 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4685 priority >>= 3;
4687 return priority;
4690 /* The 700 can only issue a single insn at a time.
4691 The 7XXX processors can issue two insns at a time.
4692 The 8000 can issue 4 insns at a time. */
4693 static int
4694 pa_issue_rate (void)
4696 switch (pa_cpu)
4698 case PROCESSOR_700: return 1;
4699 case PROCESSOR_7100: return 2;
4700 case PROCESSOR_7100LC: return 2;
4701 case PROCESSOR_7200: return 2;
4702 case PROCESSOR_7300: return 2;
4703 case PROCESSOR_8000: return 4;
4705 default:
4706 gcc_unreachable ();
4712 /* Return any length adjustment needed by INSN which already has its length
4713 computed as LENGTH. Return zero if no adjustment is necessary.
4715 For the PA: function calls, millicode calls, and backwards short
4716 conditional branches with unfilled delay slots need an adjustment by +1
4717 (to account for the NOP which will be inserted into the instruction stream).
4719 Also compute the length of an inline block move here as it is too
4720 complicated to express as a length attribute in pa.md. */
4722 pa_adjust_insn_length (rtx insn, int length)
4724 rtx pat = PATTERN (insn);
4726 /* Jumps inside switch tables which have unfilled delay slots need
4727 adjustment. */
4728 if (GET_CODE (insn) == JUMP_INSN
4729 && GET_CODE (pat) == PARALLEL
4730 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4731 return 4;
4732 /* Millicode insn with an unfilled delay slot. */
4733 else if (GET_CODE (insn) == INSN
4734 && GET_CODE (pat) != SEQUENCE
4735 && GET_CODE (pat) != USE
4736 && GET_CODE (pat) != CLOBBER
4737 && get_attr_type (insn) == TYPE_MILLI)
4738 return 4;
4739 /* Block move pattern. */
4740 else if (GET_CODE (insn) == INSN
4741 && GET_CODE (pat) == PARALLEL
4742 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4743 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4744 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4745 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4746 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4747 return compute_movmem_length (insn) - 4;
4748 /* Block clear pattern. */
4749 else if (GET_CODE (insn) == INSN
4750 && GET_CODE (pat) == PARALLEL
4751 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4752 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4753 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4754 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4755 return compute_clrmem_length (insn) - 4;
4756 /* Conditional branch with an unfilled delay slot. */
4757 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4759 /* Adjust a short backwards conditional with an unfilled delay slot. */
4760 if (GET_CODE (pat) == SET
4761 && length == 4
4762 && ! forward_branch_p (insn))
4763 return 4;
4764 else if (GET_CODE (pat) == PARALLEL
4765 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4766 && length == 4)
4767 return 4;
4768 /* Adjust dbra insn with short backwards conditional branch with
4769 unfilled delay slot -- only for case where counter is in a
4770 general register register. */
4771 else if (GET_CODE (pat) == PARALLEL
4772 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4773 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4774 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4775 && length == 4
4776 && ! forward_branch_p (insn))
4777 return 4;
4778 else
4779 return 0;
4781 return 0;
4784 /* Print operand X (an rtx) in assembler syntax to file FILE.
4785 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4786 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4788 void
4789 print_operand (FILE *file, rtx x, int code)
4791 switch (code)
4793 case '#':
4794 /* Output a 'nop' if there's nothing for the delay slot. */
4795 if (dbr_sequence_length () == 0)
4796 fputs ("\n\tnop", file);
4797 return;
4798 case '*':
4799 /* Output a nullification completer if there's nothing for the */
4800 /* delay slot or nullification is requested. */
4801 if (dbr_sequence_length () == 0 ||
4802 (final_sequence &&
4803 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4804 fputs (",n", file);
4805 return;
4806 case 'R':
4807 /* Print out the second register name of a register pair.
4808 I.e., R (6) => 7. */
4809 fputs (reg_names[REGNO (x) + 1], file);
4810 return;
4811 case 'r':
4812 /* A register or zero. */
4813 if (x == const0_rtx
4814 || (x == CONST0_RTX (DFmode))
4815 || (x == CONST0_RTX (SFmode)))
4817 fputs ("%r0", file);
4818 return;
4820 else
4821 break;
4822 case 'f':
4823 /* A register or zero (floating point). */
4824 if (x == const0_rtx
4825 || (x == CONST0_RTX (DFmode))
4826 || (x == CONST0_RTX (SFmode)))
4828 fputs ("%fr0", file);
4829 return;
4831 else
4832 break;
4833 case 'A':
4835 rtx xoperands[2];
4837 xoperands[0] = XEXP (XEXP (x, 0), 0);
4838 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4839 output_global_address (file, xoperands[1], 0);
4840 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4841 return;
4844 case 'C': /* Plain (C)ondition */
4845 case 'X':
4846 switch (GET_CODE (x))
4848 case EQ:
4849 fputs ("=", file); break;
4850 case NE:
4851 fputs ("<>", file); break;
4852 case GT:
4853 fputs (">", file); break;
4854 case GE:
4855 fputs (">=", file); break;
4856 case GEU:
4857 fputs (">>=", file); break;
4858 case GTU:
4859 fputs (">>", file); break;
4860 case LT:
4861 fputs ("<", file); break;
4862 case LE:
4863 fputs ("<=", file); break;
4864 case LEU:
4865 fputs ("<<=", file); break;
4866 case LTU:
4867 fputs ("<<", file); break;
4868 default:
4869 gcc_unreachable ();
4871 return;
4872 case 'N': /* Condition, (N)egated */
4873 switch (GET_CODE (x))
4875 case EQ:
4876 fputs ("<>", file); break;
4877 case NE:
4878 fputs ("=", file); break;
4879 case GT:
4880 fputs ("<=", file); break;
4881 case GE:
4882 fputs ("<", file); break;
4883 case GEU:
4884 fputs ("<<", file); break;
4885 case GTU:
4886 fputs ("<<=", file); break;
4887 case LT:
4888 fputs (">=", file); break;
4889 case LE:
4890 fputs (">", file); break;
4891 case LEU:
4892 fputs (">>", file); break;
4893 case LTU:
4894 fputs (">>=", file); break;
4895 default:
4896 gcc_unreachable ();
4898 return;
4899 /* For floating point comparisons. Note that the output
4900 predicates are the complement of the desired mode. The
4901 conditions for GT, GE, LT, LE and LTGT cause an invalid
4902 operation exception if the result is unordered and this
4903 exception is enabled in the floating-point status register. */
4904 case 'Y':
4905 switch (GET_CODE (x))
4907 case EQ:
4908 fputs ("!=", file); break;
4909 case NE:
4910 fputs ("=", file); break;
4911 case GT:
4912 fputs ("!>", file); break;
4913 case GE:
4914 fputs ("!>=", file); break;
4915 case LT:
4916 fputs ("!<", file); break;
4917 case LE:
4918 fputs ("!<=", file); break;
4919 case LTGT:
4920 fputs ("!<>", file); break;
4921 case UNLE:
4922 fputs ("!?<=", file); break;
4923 case UNLT:
4924 fputs ("!?<", file); break;
4925 case UNGE:
4926 fputs ("!?>=", file); break;
4927 case UNGT:
4928 fputs ("!?>", file); break;
4929 case UNEQ:
4930 fputs ("!?=", file); break;
4931 case UNORDERED:
4932 fputs ("!?", file); break;
4933 case ORDERED:
4934 fputs ("?", file); break;
4935 default:
4936 gcc_unreachable ();
4938 return;
4939 case 'S': /* Condition, operands are (S)wapped. */
4940 switch (GET_CODE (x))
4942 case EQ:
4943 fputs ("=", file); break;
4944 case NE:
4945 fputs ("<>", file); break;
4946 case GT:
4947 fputs ("<", file); break;
4948 case GE:
4949 fputs ("<=", file); break;
4950 case GEU:
4951 fputs ("<<=", file); break;
4952 case GTU:
4953 fputs ("<<", file); break;
4954 case LT:
4955 fputs (">", file); break;
4956 case LE:
4957 fputs (">=", file); break;
4958 case LEU:
4959 fputs (">>=", file); break;
4960 case LTU:
4961 fputs (">>", file); break;
4962 default:
4963 gcc_unreachable ();
4965 return;
4966 case 'B': /* Condition, (B)oth swapped and negate. */
4967 switch (GET_CODE (x))
4969 case EQ:
4970 fputs ("<>", file); break;
4971 case NE:
4972 fputs ("=", file); break;
4973 case GT:
4974 fputs (">=", file); break;
4975 case GE:
4976 fputs (">", file); break;
4977 case GEU:
4978 fputs (">>", file); break;
4979 case GTU:
4980 fputs (">>=", file); break;
4981 case LT:
4982 fputs ("<=", file); break;
4983 case LE:
4984 fputs ("<", file); break;
4985 case LEU:
4986 fputs ("<<", file); break;
4987 case LTU:
4988 fputs ("<<=", file); break;
4989 default:
4990 gcc_unreachable ();
4992 return;
4993 case 'k':
4994 gcc_assert (GET_CODE (x) == CONST_INT);
4995 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4996 return;
4997 case 'Q':
4998 gcc_assert (GET_CODE (x) == CONST_INT);
4999 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5000 return;
5001 case 'L':
5002 gcc_assert (GET_CODE (x) == CONST_INT);
5003 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5004 return;
5005 case 'O':
5006 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5007 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5008 return;
5009 case 'p':
5010 gcc_assert (GET_CODE (x) == CONST_INT);
5011 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5012 return;
5013 case 'P':
5014 gcc_assert (GET_CODE (x) == CONST_INT);
5015 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5016 return;
5017 case 'I':
5018 if (GET_CODE (x) == CONST_INT)
5019 fputs ("i", file);
5020 return;
5021 case 'M':
5022 case 'F':
5023 switch (GET_CODE (XEXP (x, 0)))
5025 case PRE_DEC:
5026 case PRE_INC:
5027 if (ASSEMBLER_DIALECT == 0)
5028 fputs ("s,mb", file);
5029 else
5030 fputs (",mb", file);
5031 break;
5032 case POST_DEC:
5033 case POST_INC:
5034 if (ASSEMBLER_DIALECT == 0)
5035 fputs ("s,ma", file);
5036 else
5037 fputs (",ma", file);
5038 break;
5039 case PLUS:
5040 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5041 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5043 if (ASSEMBLER_DIALECT == 0)
5044 fputs ("x", file);
5046 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5047 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5049 if (ASSEMBLER_DIALECT == 0)
5050 fputs ("x,s", file);
5051 else
5052 fputs (",s", file);
5054 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5055 fputs ("s", file);
5056 break;
5057 default:
5058 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5059 fputs ("s", file);
5060 break;
5062 return;
5063 case 'G':
5064 output_global_address (file, x, 0);
5065 return;
5066 case 'H':
5067 output_global_address (file, x, 1);
5068 return;
5069 case 0: /* Don't do anything special */
5070 break;
5071 case 'Z':
5073 unsigned op[3];
5074 compute_zdepwi_operands (INTVAL (x), op);
5075 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5076 return;
5078 case 'z':
5080 unsigned op[3];
5081 compute_zdepdi_operands (INTVAL (x), op);
5082 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5083 return;
5085 case 'c':
5086 /* We can get here from a .vtable_inherit due to our
5087 CONSTANT_ADDRESS_P rejecting perfectly good constant
5088 addresses. */
5089 break;
5090 default:
5091 gcc_unreachable ();
5093 if (GET_CODE (x) == REG)
5095 fputs (reg_names [REGNO (x)], file);
5096 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5098 fputs ("R", file);
5099 return;
5101 if (FP_REG_P (x)
5102 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5103 && (REGNO (x) & 1) == 0)
5104 fputs ("L", file);
5106 else if (GET_CODE (x) == MEM)
5108 int size = GET_MODE_SIZE (GET_MODE (x));
5109 rtx base = NULL_RTX;
5110 switch (GET_CODE (XEXP (x, 0)))
5112 case PRE_DEC:
5113 case POST_DEC:
5114 base = XEXP (XEXP (x, 0), 0);
5115 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5116 break;
5117 case PRE_INC:
5118 case POST_INC:
5119 base = XEXP (XEXP (x, 0), 0);
5120 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5121 break;
5122 case PLUS:
5123 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5124 fprintf (file, "%s(%s)",
5125 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5126 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5127 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5128 fprintf (file, "%s(%s)",
5129 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5130 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5131 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5132 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5134 /* Because the REG_POINTER flag can get lost during reload,
5135 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5136 index and base registers in the combined move patterns. */
5137 rtx base = XEXP (XEXP (x, 0), 1);
5138 rtx index = XEXP (XEXP (x, 0), 0);
5140 fprintf (file, "%s(%s)",
5141 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5143 else
5144 output_address (XEXP (x, 0));
5145 break;
5146 default:
5147 output_address (XEXP (x, 0));
5148 break;
5151 else
5152 output_addr_const (file, x);
5155 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5157 void
5158 output_global_address (FILE *file, rtx x, int round_constant)
5161 /* Imagine (high (const (plus ...))). */
5162 if (GET_CODE (x) == HIGH)
5163 x = XEXP (x, 0);
5165 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5166 output_addr_const (file, x);
5167 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5169 output_addr_const (file, x);
5170 fputs ("-$global$", file);
5172 else if (GET_CODE (x) == CONST)
5174 const char *sep = "";
5175 int offset = 0; /* assembler wants -$global$ at end */
5176 rtx base = NULL_RTX;
5178 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5180 case SYMBOL_REF:
5181 base = XEXP (XEXP (x, 0), 0);
5182 output_addr_const (file, base);
5183 break;
5184 case CONST_INT:
5185 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5186 break;
5187 default:
5188 gcc_unreachable ();
5191 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5193 case SYMBOL_REF:
5194 base = XEXP (XEXP (x, 0), 1);
5195 output_addr_const (file, base);
5196 break;
5197 case CONST_INT:
5198 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5199 break;
5200 default:
5201 gcc_unreachable ();
5204 /* How bogus. The compiler is apparently responsible for
5205 rounding the constant if it uses an LR field selector.
5207 The linker and/or assembler seem a better place since
5208 they have to do this kind of thing already.
5210 If we fail to do this, HP's optimizing linker may eliminate
5211 an addil, but not update the ldw/stw/ldo instruction that
5212 uses the result of the addil. */
5213 if (round_constant)
5214 offset = ((offset + 0x1000) & ~0x1fff);
5216 switch (GET_CODE (XEXP (x, 0)))
5218 case PLUS:
5219 if (offset < 0)
5221 offset = -offset;
5222 sep = "-";
5224 else
5225 sep = "+";
5226 break;
5228 case MINUS:
5229 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5230 sep = "-";
5231 break;
5233 default:
5234 gcc_unreachable ();
5237 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5238 fputs ("-$global$", file);
5239 if (offset)
5240 fprintf (file, "%s%d", sep, offset);
5242 else
5243 output_addr_const (file, x);
5246 /* Output boilerplate text to appear at the beginning of the file.
5247 There are several possible versions. */
5248 #define aputs(x) fputs(x, asm_out_file)
5249 static inline void
5250 pa_file_start_level (void)
5252 if (TARGET_64BIT)
5253 aputs ("\t.LEVEL 2.0w\n");
5254 else if (TARGET_PA_20)
5255 aputs ("\t.LEVEL 2.0\n");
5256 else if (TARGET_PA_11)
5257 aputs ("\t.LEVEL 1.1\n");
5258 else
5259 aputs ("\t.LEVEL 1.0\n");
5262 static inline void
5263 pa_file_start_space (int sortspace)
5265 aputs ("\t.SPACE $PRIVATE$");
5266 if (sortspace)
5267 aputs (",SORT=16");
5268 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5269 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5270 "\n\t.SPACE $TEXT$");
5271 if (sortspace)
5272 aputs (",SORT=8");
5273 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5274 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5277 static inline void
5278 pa_file_start_file (int want_version)
5280 if (write_symbols != NO_DEBUG)
5282 output_file_directive (asm_out_file, main_input_filename);
5283 if (want_version)
5284 aputs ("\t.version\t\"01.01\"\n");
5288 static inline void
5289 pa_file_start_mcount (const char *aswhat)
5291 if (profile_flag)
5292 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5295 static void
5296 pa_elf_file_start (void)
5298 pa_file_start_level ();
5299 pa_file_start_mcount ("ENTRY");
5300 pa_file_start_file (0);
5303 static void
5304 pa_som_file_start (void)
5306 pa_file_start_level ();
5307 pa_file_start_space (0);
5308 aputs ("\t.IMPORT $global$,DATA\n"
5309 "\t.IMPORT $$dyncall,MILLICODE\n");
5310 pa_file_start_mcount ("CODE");
5311 pa_file_start_file (0);
5314 static void
5315 pa_linux_file_start (void)
5317 pa_file_start_file (1);
5318 pa_file_start_level ();
5319 pa_file_start_mcount ("CODE");
5322 static void
5323 pa_hpux64_gas_file_start (void)
5325 pa_file_start_level ();
5326 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5327 if (profile_flag)
5328 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5329 #endif
5330 pa_file_start_file (1);
5333 static void
5334 pa_hpux64_hpas_file_start (void)
5336 pa_file_start_level ();
5337 pa_file_start_space (1);
5338 pa_file_start_mcount ("CODE");
5339 pa_file_start_file (0);
5341 #undef aputs
5343 /* Search the deferred plabel list for SYMBOL and return its internal
5344 label. If an entry for SYMBOL is not found, a new entry is created. */
5347 get_deferred_plabel (rtx symbol)
5349 const char *fname = XSTR (symbol, 0);
5350 size_t i;
5352 /* See if we have already put this function on the list of deferred
5353 plabels. This list is generally small, so a liner search is not
5354 too ugly. If it proves too slow replace it with something faster. */
5355 for (i = 0; i < n_deferred_plabels; i++)
5356 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5357 break;
5359 /* If the deferred plabel list is empty, or this entry was not found
5360 on the list, create a new entry on the list. */
5361 if (deferred_plabels == NULL || i == n_deferred_plabels)
5363 tree id;
5365 if (deferred_plabels == 0)
5366 deferred_plabels = (struct deferred_plabel *)
5367 ggc_alloc (sizeof (struct deferred_plabel));
5368 else
5369 deferred_plabels = (struct deferred_plabel *)
5370 ggc_realloc (deferred_plabels,
5371 ((n_deferred_plabels + 1)
5372 * sizeof (struct deferred_plabel)));
5374 i = n_deferred_plabels++;
5375 deferred_plabels[i].internal_label = gen_label_rtx ();
5376 deferred_plabels[i].symbol = symbol;
5378 /* Gross. We have just implicitly taken the address of this
5379 function. Mark it in the same manner as assemble_name. */
5380 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5381 if (id)
5382 mark_referenced (id);
5385 return deferred_plabels[i].internal_label;
5388 static void
5389 output_deferred_plabels (void)
5391 size_t i;
5393 /* If we have some deferred plabels, then we need to switch into the
5394 data or readonly data section, and align it to a 4 byte boundary
5395 before outputting the deferred plabels. */
5396 if (n_deferred_plabels)
5398 switch_to_section (flag_pic ? data_section : readonly_data_section);
5399 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5402 /* Now output the deferred plabels. */
5403 for (i = 0; i < n_deferred_plabels; i++)
5405 targetm.asm_out.internal_label (asm_out_file, "L",
5406 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5407 assemble_integer (deferred_plabels[i].symbol,
5408 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5412 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5413 /* Initialize optabs to point to HPUX long double emulation routines. */
5414 static void
5415 pa_hpux_init_libfuncs (void)
5417 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5418 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5419 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5420 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5421 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5422 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5423 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5424 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5425 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5427 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5428 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5429 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5430 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5431 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5432 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5433 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5435 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5436 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5437 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5438 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5440 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5441 ? "__U_Qfcnvfxt_quad_to_sgl"
5442 : "_U_Qfcnvfxt_quad_to_sgl");
5443 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5444 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5445 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5447 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5448 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5449 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5450 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5452 #endif
5454 /* HP's millicode routines mean something special to the assembler.
5455 Keep track of which ones we have used. */
5457 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5458 static void import_milli (enum millicodes);
5459 static char imported[(int) end1000];
5460 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5461 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5462 #define MILLI_START 10
5464 static void
5465 import_milli (enum millicodes code)
5467 char str[sizeof (import_string)];
5469 if (!imported[(int) code])
5471 imported[(int) code] = 1;
5472 strcpy (str, import_string);
5473 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5474 output_asm_insn (str, 0);
5478 /* The register constraints have put the operands and return value in
5479 the proper registers. */
5481 const char *
5482 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5484 import_milli (mulI);
5485 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5488 /* Emit the rtl for doing a division by a constant. */
5490 /* Do magic division millicodes exist for this value? */
5491 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5493 /* We'll use an array to keep track of the magic millicodes and
5494 whether or not we've used them already. [n][0] is signed, [n][1] is
5495 unsigned. */
5497 static int div_milli[16][2];
5500 emit_hpdiv_const (rtx *operands, int unsignedp)
5502 if (GET_CODE (operands[2]) == CONST_INT
5503 && INTVAL (operands[2]) > 0
5504 && INTVAL (operands[2]) < 16
5505 && magic_milli[INTVAL (operands[2])])
5507 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5509 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5510 emit
5511 (gen_rtx_PARALLEL
5512 (VOIDmode,
5513 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5514 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5515 SImode,
5516 gen_rtx_REG (SImode, 26),
5517 operands[2])),
5518 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5519 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5520 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5521 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5522 gen_rtx_CLOBBER (VOIDmode, ret))));
5523 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5524 return 1;
5526 return 0;
5529 const char *
5530 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5532 int divisor;
5534 /* If the divisor is a constant, try to use one of the special
5535 opcodes .*/
5536 if (GET_CODE (operands[0]) == CONST_INT)
5538 static char buf[100];
5539 divisor = INTVAL (operands[0]);
5540 if (!div_milli[divisor][unsignedp])
5542 div_milli[divisor][unsignedp] = 1;
5543 if (unsignedp)
5544 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5545 else
5546 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5548 if (unsignedp)
5550 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5551 INTVAL (operands[0]));
5552 return output_millicode_call (insn,
5553 gen_rtx_SYMBOL_REF (SImode, buf));
5555 else
5557 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5558 INTVAL (operands[0]));
5559 return output_millicode_call (insn,
5560 gen_rtx_SYMBOL_REF (SImode, buf));
5563 /* Divisor isn't a special constant. */
5564 else
5566 if (unsignedp)
5568 import_milli (divU);
5569 return output_millicode_call (insn,
5570 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5572 else
5574 import_milli (divI);
5575 return output_millicode_call (insn,
5576 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5581 /* Output a $$rem millicode to do mod. */
5583 const char *
5584 output_mod_insn (int unsignedp, rtx insn)
5586 if (unsignedp)
5588 import_milli (remU);
5589 return output_millicode_call (insn,
5590 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5592 else
5594 import_milli (remI);
5595 return output_millicode_call (insn,
5596 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5600 void
5601 output_arg_descriptor (rtx call_insn)
5603 const char *arg_regs[4];
5604 enum machine_mode arg_mode;
5605 rtx link;
5606 int i, output_flag = 0;
5607 int regno;
5609 /* We neither need nor want argument location descriptors for the
5610 64bit runtime environment or the ELF32 environment. */
5611 if (TARGET_64BIT || TARGET_ELF32)
5612 return;
5614 for (i = 0; i < 4; i++)
5615 arg_regs[i] = 0;
5617 /* Specify explicitly that no argument relocations should take place
5618 if using the portable runtime calling conventions. */
5619 if (TARGET_PORTABLE_RUNTIME)
5621 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5622 asm_out_file);
5623 return;
5626 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5627 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5628 link; link = XEXP (link, 1))
5630 rtx use = XEXP (link, 0);
5632 if (! (GET_CODE (use) == USE
5633 && GET_CODE (XEXP (use, 0)) == REG
5634 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5635 continue;
5637 arg_mode = GET_MODE (XEXP (use, 0));
5638 regno = REGNO (XEXP (use, 0));
5639 if (regno >= 23 && regno <= 26)
5641 arg_regs[26 - regno] = "GR";
5642 if (arg_mode == DImode)
5643 arg_regs[25 - regno] = "GR";
5645 else if (regno >= 32 && regno <= 39)
5647 if (arg_mode == SFmode)
5648 arg_regs[(regno - 32) / 2] = "FR";
5649 else
5651 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5652 arg_regs[(regno - 34) / 2] = "FR";
5653 arg_regs[(regno - 34) / 2 + 1] = "FU";
5654 #else
5655 arg_regs[(regno - 34) / 2] = "FU";
5656 arg_regs[(regno - 34) / 2 + 1] = "FR";
5657 #endif
5661 fputs ("\t.CALL ", asm_out_file);
5662 for (i = 0; i < 4; i++)
5664 if (arg_regs[i])
5666 if (output_flag++)
5667 fputc (',', asm_out_file);
5668 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5671 fputc ('\n', asm_out_file);
5674 static enum reg_class
5675 pa_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
5676 enum machine_mode mode, secondary_reload_info *sri)
5678 int is_symbolic, regno;
5680 /* Handle the easy stuff first. */
5681 if (rclass == R1_REGS)
5682 return NO_REGS;
5684 if (REG_P (x))
5686 regno = REGNO (x);
5687 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5688 return NO_REGS;
5690 else
5691 regno = -1;
5693 /* If we have something like (mem (mem (...)), we can safely assume the
5694 inner MEM will end up in a general register after reloading, so there's
5695 no need for a secondary reload. */
5696 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5697 return NO_REGS;
5699 /* Trying to load a constant into a FP register during PIC code
5700 generation requires %r1 as a scratch register. */
5701 if (flag_pic
5702 && (mode == SImode || mode == DImode)
5703 && FP_REG_CLASS_P (rclass)
5704 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5706 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5707 : CODE_FOR_reload_indi_r1);
5708 return NO_REGS;
5711 /* Profiling showed the PA port spends about 1.3% of its compilation
5712 time in true_regnum from calls inside pa_secondary_reload_class. */
5713 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5714 regno = true_regnum (x);
5716 /* In order to allow 14-bit displacements in integer loads and stores,
5717 we need to prevent reload from generating out of range integer mode
5718 loads and stores to the floating point registers. Previously, we
5719 used to call for a secondary reload and have emit_move_sequence()
5720 fix the instruction sequence. However, reload occasionally wouldn't
5721 generate the reload and we would end up with an invalid REG+D memory
5722 address. So, now we use an intermediate general register for most
5723 memory loads and stores. */
5724 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5725 && GET_MODE_CLASS (mode) == MODE_INT
5726 && FP_REG_CLASS_P (rclass))
5728 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5729 the secondary reload needed for a pseudo. It never passes a
5730 REG+D address. */
5731 if (GET_CODE (x) == MEM)
5733 x = XEXP (x, 0);
5735 /* We don't need an intermediate for indexed and LO_SUM DLT
5736 memory addresses. When INT14_OK_STRICT is true, it might
5737 appear that we could directly allow register indirect
5738 memory addresses. However, this doesn't work because we
5739 don't support SUBREGs in floating-point register copies
5740 and reload doesn't tell us when it's going to use a SUBREG. */
5741 if (IS_INDEX_ADDR_P (x)
5742 || IS_LO_SUM_DLT_ADDR_P (x))
5743 return NO_REGS;
5745 /* Otherwise, we need an intermediate general register. */
5746 return GENERAL_REGS;
5749 /* Request a secondary reload with a general scratch register
5750 for everthing else. ??? Could symbolic operands be handled
5751 directly when generating non-pic PA 2.0 code? */
5752 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5753 return NO_REGS;
5756 /* We need a secondary register (GPR) for copies between the SAR
5757 and anything other than a general register. */
5758 if (rclass == SHIFT_REGS && (regno <= 0 || regno >= 32))
5760 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5761 return NO_REGS;
5764 /* A SAR<->FP register copy requires a secondary register (GPR) as
5765 well as secondary memory. */
5766 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5767 && (REGNO_REG_CLASS (regno) == SHIFT_REGS
5768 && FP_REG_CLASS_P (rclass)))
5770 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5771 return NO_REGS;
5774 /* Secondary reloads of symbolic operands require %r1 as a scratch
5775 register when we're generating PIC code and when the operand isn't
5776 readonly. */
5777 if (GET_CODE (x) == HIGH)
5778 x = XEXP (x, 0);
5780 /* Profiling has showed GCC spends about 2.6% of its compilation
5781 time in symbolic_operand from calls inside pa_secondary_reload_class.
5782 So, we use an inline copy to avoid useless work. */
5783 switch (GET_CODE (x))
5785 rtx op;
5787 case SYMBOL_REF:
5788 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5789 break;
5790 case LABEL_REF:
5791 is_symbolic = 1;
5792 break;
5793 case CONST:
5794 op = XEXP (x, 0);
5795 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5796 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5797 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5798 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5799 break;
5800 default:
5801 is_symbolic = 0;
5802 break;
5805 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5807 gcc_assert (mode == SImode || mode == DImode);
5808 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5809 : CODE_FOR_reload_indi_r1);
5812 return NO_REGS;
5815 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5816 is only marked as live on entry by df-scan when it is a fixed
5817 register. It isn't a fixed register in the 64-bit runtime,
5818 so we need to mark it here. */
5820 static void
5821 pa_extra_live_on_entry (bitmap regs)
5823 if (TARGET_64BIT)
5824 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5827 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5828 to prevent it from being deleted. */
5831 pa_eh_return_handler_rtx (void)
5833 rtx tmp;
5835 tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx,
5836 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5837 tmp = gen_rtx_MEM (word_mode, tmp);
5838 tmp->volatil = 1;
5839 return tmp;
5842 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5843 by invisible reference. As a GCC extension, we also pass anything
5844 with a zero or variable size by reference.
5846 The 64-bit runtime does not describe passing any types by invisible
5847 reference. The internals of GCC can't currently handle passing
5848 empty structures, and zero or variable length arrays when they are
5849 not passed entirely on the stack or by reference. Thus, as a GCC
5850 extension, we pass these types by reference. The HP compiler doesn't
5851 support these types, so hopefully there shouldn't be any compatibility
5852 issues. This may have to be revisited when HP releases a C99 compiler
5853 or updates the ABI. */
5855 static bool
5856 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5857 enum machine_mode mode, const_tree type,
5858 bool named ATTRIBUTE_UNUSED)
5860 HOST_WIDE_INT size;
5862 if (type)
5863 size = int_size_in_bytes (type);
5864 else
5865 size = GET_MODE_SIZE (mode);
5867 if (TARGET_64BIT)
5868 return size <= 0;
5869 else
5870 return size <= 0 || size > 8;
5873 enum direction
5874 function_arg_padding (enum machine_mode mode, const_tree type)
5876 if (mode == BLKmode
5877 || (TARGET_64BIT
5878 && type
5879 && (AGGREGATE_TYPE_P (type)
5880 || TREE_CODE (type) == COMPLEX_TYPE
5881 || TREE_CODE (type) == VECTOR_TYPE)))
5883 /* Return none if justification is not required. */
5884 if (type
5885 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5886 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5887 return none;
5889 /* The directions set here are ignored when a BLKmode argument larger
5890 than a word is placed in a register. Different code is used for
5891 the stack and registers. This makes it difficult to have a
5892 consistent data representation for both the stack and registers.
5893 For both runtimes, the justification and padding for arguments on
5894 the stack and in registers should be identical. */
5895 if (TARGET_64BIT)
5896 /* The 64-bit runtime specifies left justification for aggregates. */
5897 return upward;
5898 else
5899 /* The 32-bit runtime architecture specifies right justification.
5900 When the argument is passed on the stack, the argument is padded
5901 with garbage on the left. The HP compiler pads with zeros. */
5902 return downward;
5905 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5906 return downward;
5907 else
5908 return none;
5912 /* Do what is necessary for `va_start'. We look at the current function
5913 to determine if stdargs or varargs is used and fill in an initial
5914 va_list. A pointer to this constructor is returned. */
5916 static rtx
5917 hppa_builtin_saveregs (void)
5919 rtx offset, dest;
5920 tree fntype = TREE_TYPE (current_function_decl);
5921 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5922 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5923 != void_type_node)))
5924 ? UNITS_PER_WORD : 0);
5926 if (argadj)
5927 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
5928 else
5929 offset = crtl->args.arg_offset_rtx;
5931 if (TARGET_64BIT)
5933 int i, off;
5935 /* Adjust for varargs/stdarg differences. */
5936 if (argadj)
5937 offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
5938 else
5939 offset = crtl->args.arg_offset_rtx;
5941 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5942 from the incoming arg pointer and growing to larger addresses. */
5943 for (i = 26, off = -64; i >= 19; i--, off += 8)
5944 emit_move_insn (gen_rtx_MEM (word_mode,
5945 plus_constant (arg_pointer_rtx, off)),
5946 gen_rtx_REG (word_mode, i));
5948 /* The incoming args pointer points just beyond the flushback area;
5949 normally this is not a serious concern. However, when we are doing
5950 varargs/stdargs we want to make the arg pointer point to the start
5951 of the incoming argument area. */
5952 emit_move_insn (virtual_incoming_args_rtx,
5953 plus_constant (arg_pointer_rtx, -64));
5955 /* Now return a pointer to the first anonymous argument. */
5956 return copy_to_reg (expand_binop (Pmode, add_optab,
5957 virtual_incoming_args_rtx,
5958 offset, 0, 0, OPTAB_LIB_WIDEN));
5961 /* Store general registers on the stack. */
5962 dest = gen_rtx_MEM (BLKmode,
5963 plus_constant (crtl->args.internal_arg_pointer,
5964 -16));
5965 set_mem_alias_set (dest, get_varargs_alias_set ());
5966 set_mem_align (dest, BITS_PER_WORD);
5967 move_block_from_reg (23, dest, 4);
5969 /* move_block_from_reg will emit code to store the argument registers
5970 individually as scalar stores.
5972 However, other insns may later load from the same addresses for
5973 a structure load (passing a struct to a varargs routine).
5975 The alias code assumes that such aliasing can never happen, so we
5976 have to keep memory referencing insns from moving up beyond the
5977 last argument register store. So we emit a blockage insn here. */
5978 emit_insn (gen_blockage ());
5980 return copy_to_reg (expand_binop (Pmode, add_optab,
5981 crtl->args.internal_arg_pointer,
5982 offset, 0, 0, OPTAB_LIB_WIDEN));
5985 static void
5986 hppa_va_start (tree valist, rtx nextarg)
5988 nextarg = expand_builtin_saveregs ();
5989 std_expand_builtin_va_start (valist, nextarg);
5992 static tree
5993 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5994 gimple_seq *post_p)
5996 if (TARGET_64BIT)
5998 /* Args grow upward. We can use the generic routines. */
5999 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6001 else /* !TARGET_64BIT */
6003 tree ptr = build_pointer_type (type);
6004 tree valist_type;
6005 tree t, u;
6006 unsigned int size, ofs;
6007 bool indirect;
6009 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6010 if (indirect)
6012 type = ptr;
6013 ptr = build_pointer_type (type);
6015 size = int_size_in_bytes (type);
6016 valist_type = TREE_TYPE (valist);
6018 /* Args grow down. Not handled by generic routines. */
6020 u = fold_convert (sizetype, size_in_bytes (type));
6021 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6022 t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
6024 /* Copied from va-pa.h, but we probably don't need to align to
6025 word size, since we generate and preserve that invariant. */
6026 u = size_int (size > 4 ? -8 : -4);
6027 t = fold_convert (sizetype, t);
6028 t = build2 (BIT_AND_EXPR, sizetype, t, u);
6029 t = fold_convert (valist_type, t);
6031 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6033 ofs = (8 - size) % 4;
6034 if (ofs != 0)
6036 u = size_int (ofs);
6037 t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
6040 t = fold_convert (ptr, t);
6041 t = build_va_arg_indirect_ref (t);
6043 if (indirect)
6044 t = build_va_arg_indirect_ref (t);
6046 return t;
6050 /* True if MODE is valid for the target. By "valid", we mean able to
6051 be manipulated in non-trivial ways. In particular, this means all
6052 the arithmetic is supported.
6054 Currently, TImode is not valid as the HP 64-bit runtime documentation
6055 doesn't document the alignment and calling conventions for this type.
6056 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6057 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6059 static bool
6060 pa_scalar_mode_supported_p (enum machine_mode mode)
6062 int precision = GET_MODE_PRECISION (mode);
6064 switch (GET_MODE_CLASS (mode))
6066 case MODE_PARTIAL_INT:
6067 case MODE_INT:
6068 if (precision == CHAR_TYPE_SIZE)
6069 return true;
6070 if (precision == SHORT_TYPE_SIZE)
6071 return true;
6072 if (precision == INT_TYPE_SIZE)
6073 return true;
6074 if (precision == LONG_TYPE_SIZE)
6075 return true;
6076 if (precision == LONG_LONG_TYPE_SIZE)
6077 return true;
6078 return false;
6080 case MODE_FLOAT:
6081 if (precision == FLOAT_TYPE_SIZE)
6082 return true;
6083 if (precision == DOUBLE_TYPE_SIZE)
6084 return true;
6085 if (precision == LONG_DOUBLE_TYPE_SIZE)
6086 return true;
6087 return false;
6089 case MODE_DECIMAL_FLOAT:
6090 return false;
6092 default:
6093 gcc_unreachable ();
6097 /* This routine handles all the normal conditional branch sequences we
6098 might need to generate. It handles compare immediate vs compare
6099 register, nullification of delay slots, varying length branches,
6100 negated branches, and all combinations of the above. It returns the
6101 output appropriate to emit the branch corresponding to all given
6102 parameters. */
6104 const char *
6105 output_cbranch (rtx *operands, int negated, rtx insn)
6107 static char buf[100];
6108 int useskip = 0;
6109 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6110 int length = get_attr_length (insn);
6111 int xdelay;
6113 /* A conditional branch to the following instruction (e.g. the delay slot)
6114 is asking for a disaster. This can happen when not optimizing and
6115 when jump optimization fails.
6117 While it is usually safe to emit nothing, this can fail if the
6118 preceding instruction is a nullified branch with an empty delay
6119 slot and the same branch target as this branch. We could check
6120 for this but jump optimization should eliminate nop jumps. It
6121 is always safe to emit a nop. */
6122 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6123 return "nop";
6125 /* The doubleword form of the cmpib instruction doesn't have the LEU
6126 and GTU conditions while the cmpb instruction does. Since we accept
6127 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6128 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6129 operands[2] = gen_rtx_REG (DImode, 0);
6130 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6131 operands[1] = gen_rtx_REG (DImode, 0);
6133 /* If this is a long branch with its delay slot unfilled, set `nullify'
6134 as it can nullify the delay slot and save a nop. */
6135 if (length == 8 && dbr_sequence_length () == 0)
6136 nullify = 1;
6138 /* If this is a short forward conditional branch which did not get
6139 its delay slot filled, the delay slot can still be nullified. */
6140 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6141 nullify = forward_branch_p (insn);
6143 /* A forward branch over a single nullified insn can be done with a
6144 comclr instruction. This avoids a single cycle penalty due to
6145 mis-predicted branch if we fall through (branch not taken). */
6146 if (length == 4
6147 && next_real_insn (insn) != 0
6148 && get_attr_length (next_real_insn (insn)) == 4
6149 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6150 && nullify)
6151 useskip = 1;
6153 switch (length)
6155 /* All short conditional branches except backwards with an unfilled
6156 delay slot. */
6157 case 4:
6158 if (useskip)
6159 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6160 else
6161 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6162 if (GET_MODE (operands[1]) == DImode)
6163 strcat (buf, "*");
6164 if (negated)
6165 strcat (buf, "%B3");
6166 else
6167 strcat (buf, "%S3");
6168 if (useskip)
6169 strcat (buf, " %2,%r1,%%r0");
6170 else if (nullify)
6171 strcat (buf, ",n %2,%r1,%0");
6172 else
6173 strcat (buf, " %2,%r1,%0");
6174 break;
6176 /* All long conditionals. Note a short backward branch with an
6177 unfilled delay slot is treated just like a long backward branch
6178 with an unfilled delay slot. */
6179 case 8:
6180 /* Handle weird backwards branch with a filled delay slot
6181 which is nullified. */
6182 if (dbr_sequence_length () != 0
6183 && ! forward_branch_p (insn)
6184 && nullify)
6186 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6187 if (GET_MODE (operands[1]) == DImode)
6188 strcat (buf, "*");
6189 if (negated)
6190 strcat (buf, "%S3");
6191 else
6192 strcat (buf, "%B3");
6193 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6195 /* Handle short backwards branch with an unfilled delay slot.
6196 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6197 taken and untaken branches. */
6198 else if (dbr_sequence_length () == 0
6199 && ! forward_branch_p (insn)
6200 && INSN_ADDRESSES_SET_P ()
6201 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6202 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6204 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6205 if (GET_MODE (operands[1]) == DImode)
6206 strcat (buf, "*");
6207 if (negated)
6208 strcat (buf, "%B3 %2,%r1,%0%#");
6209 else
6210 strcat (buf, "%S3 %2,%r1,%0%#");
6212 else
6214 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6215 if (GET_MODE (operands[1]) == DImode)
6216 strcat (buf, "*");
6217 if (negated)
6218 strcat (buf, "%S3");
6219 else
6220 strcat (buf, "%B3");
6221 if (nullify)
6222 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6223 else
6224 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6226 break;
6228 default:
6229 /* The reversed conditional branch must branch over one additional
6230 instruction if the delay slot is filled and needs to be extracted
6231 by output_lbranch. If the delay slot is empty or this is a
6232 nullified forward branch, the instruction after the reversed
6233 condition branch must be nullified. */
6234 if (dbr_sequence_length () == 0
6235 || (nullify && forward_branch_p (insn)))
6237 nullify = 1;
6238 xdelay = 0;
6239 operands[4] = GEN_INT (length);
6241 else
6243 xdelay = 1;
6244 operands[4] = GEN_INT (length + 4);
6247 /* Create a reversed conditional branch which branches around
6248 the following insns. */
6249 if (GET_MODE (operands[1]) != DImode)
6251 if (nullify)
6253 if (negated)
6254 strcpy (buf,
6255 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6256 else
6257 strcpy (buf,
6258 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6260 else
6262 if (negated)
6263 strcpy (buf,
6264 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6265 else
6266 strcpy (buf,
6267 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6270 else
6272 if (nullify)
6274 if (negated)
6275 strcpy (buf,
6276 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6277 else
6278 strcpy (buf,
6279 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6281 else
6283 if (negated)
6284 strcpy (buf,
6285 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6286 else
6287 strcpy (buf,
6288 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6292 output_asm_insn (buf, operands);
6293 return output_lbranch (operands[0], insn, xdelay);
6295 return buf;
6298 /* This routine handles output of long unconditional branches that
6299 exceed the maximum range of a simple branch instruction. Since
6300 we don't have a register available for the branch, we save register
6301 %r1 in the frame marker, load the branch destination DEST into %r1,
6302 execute the branch, and restore %r1 in the delay slot of the branch.
6304 Since long branches may have an insn in the delay slot and the
6305 delay slot is used to restore %r1, we in general need to extract
6306 this insn and execute it before the branch. However, to facilitate
6307 use of this function by conditional branches, we also provide an
6308 option to not extract the delay insn so that it will be emitted
6309 after the long branch. So, if there is an insn in the delay slot,
6310 it is extracted if XDELAY is nonzero.
6312 The lengths of the various long-branch sequences are 20, 16 and 24
6313 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6315 const char *
6316 output_lbranch (rtx dest, rtx insn, int xdelay)
6318 rtx xoperands[2];
6320 xoperands[0] = dest;
6322 /* First, free up the delay slot. */
6323 if (xdelay && dbr_sequence_length () != 0)
6325 /* We can't handle a jump in the delay slot. */
6326 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6328 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6329 optimize, 0, NULL);
6331 /* Now delete the delay insn. */
6332 SET_INSN_DELETED (NEXT_INSN (insn));
6335 /* Output an insn to save %r1. The runtime documentation doesn't
6336 specify whether the "Clean Up" slot in the callers frame can
6337 be clobbered by the callee. It isn't copied by HP's builtin
6338 alloca, so this suggests that it can be clobbered if necessary.
6339 The "Static Link" location is copied by HP builtin alloca, so
6340 we avoid using it. Using the cleanup slot might be a problem
6341 if we have to interoperate with languages that pass cleanup
6342 information. However, it should be possible to handle these
6343 situations with GCC's asm feature.
6345 The "Current RP" slot is reserved for the called procedure, so
6346 we try to use it when we don't have a frame of our own. It's
6347 rather unlikely that we won't have a frame when we need to emit
6348 a very long branch.
6350 Really the way to go long term is a register scavenger; goto
6351 the target of the jump and find a register which we can use
6352 as a scratch to hold the value in %r1. Then, we wouldn't have
6353 to free up the delay slot or clobber a slot that may be needed
6354 for other purposes. */
6355 if (TARGET_64BIT)
6357 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6358 /* Use the return pointer slot in the frame marker. */
6359 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6360 else
6361 /* Use the slot at -40 in the frame marker since HP builtin
6362 alloca doesn't copy it. */
6363 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6365 else
6367 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6368 /* Use the return pointer slot in the frame marker. */
6369 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6370 else
6371 /* Use the "Clean Up" slot in the frame marker. In GCC,
6372 the only other use of this location is for copying a
6373 floating point double argument from a floating-point
6374 register to two general registers. The copy is done
6375 as an "atomic" operation when outputting a call, so it
6376 won't interfere with our using the location here. */
6377 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6380 if (TARGET_PORTABLE_RUNTIME)
6382 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6383 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6384 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6386 else if (flag_pic)
6388 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6389 if (TARGET_SOM || !TARGET_GAS)
6391 xoperands[1] = gen_label_rtx ();
6392 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6393 targetm.asm_out.internal_label (asm_out_file, "L",
6394 CODE_LABEL_NUMBER (xoperands[1]));
6395 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6397 else
6399 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6400 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6402 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6404 else
6405 /* Now output a very long branch to the original target. */
6406 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6408 /* Now restore the value of %r1 in the delay slot. */
6409 if (TARGET_64BIT)
6411 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6412 return "ldd -16(%%r30),%%r1";
6413 else
6414 return "ldd -40(%%r30),%%r1";
6416 else
6418 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6419 return "ldw -20(%%r30),%%r1";
6420 else
6421 return "ldw -12(%%r30),%%r1";
6425 /* This routine handles all the branch-on-bit conditional branch sequences we
6426 might need to generate. It handles nullification of delay slots,
6427 varying length branches, negated branches and all combinations of the
6428 above. it returns the appropriate output template to emit the branch. */
6430 const char *
6431 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6433 static char buf[100];
6434 int useskip = 0;
6435 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6436 int length = get_attr_length (insn);
6437 int xdelay;
6439 /* A conditional branch to the following instruction (e.g. the delay slot) is
6440 asking for a disaster. I do not think this can happen as this pattern
6441 is only used when optimizing; jump optimization should eliminate the
6442 jump. But be prepared just in case. */
6444 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6445 return "nop";
6447 /* If this is a long branch with its delay slot unfilled, set `nullify'
6448 as it can nullify the delay slot and save a nop. */
6449 if (length == 8 && dbr_sequence_length () == 0)
6450 nullify = 1;
6452 /* If this is a short forward conditional branch which did not get
6453 its delay slot filled, the delay slot can still be nullified. */
6454 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6455 nullify = forward_branch_p (insn);
6457 /* A forward branch over a single nullified insn can be done with a
6458 extrs instruction. This avoids a single cycle penalty due to
6459 mis-predicted branch if we fall through (branch not taken). */
6461 if (length == 4
6462 && next_real_insn (insn) != 0
6463 && get_attr_length (next_real_insn (insn)) == 4
6464 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6465 && nullify)
6466 useskip = 1;
6468 switch (length)
6471 /* All short conditional branches except backwards with an unfilled
6472 delay slot. */
6473 case 4:
6474 if (useskip)
6475 strcpy (buf, "{extrs,|extrw,s,}");
6476 else
6477 strcpy (buf, "bb,");
6478 if (useskip && GET_MODE (operands[0]) == DImode)
6479 strcpy (buf, "extrd,s,*");
6480 else if (GET_MODE (operands[0]) == DImode)
6481 strcpy (buf, "bb,*");
6482 if ((which == 0 && negated)
6483 || (which == 1 && ! negated))
6484 strcat (buf, ">=");
6485 else
6486 strcat (buf, "<");
6487 if (useskip)
6488 strcat (buf, " %0,%1,1,%%r0");
6489 else if (nullify && negated)
6490 strcat (buf, ",n %0,%1,%3");
6491 else if (nullify && ! negated)
6492 strcat (buf, ",n %0,%1,%2");
6493 else if (! nullify && negated)
6494 strcat (buf, "%0,%1,%3");
6495 else if (! nullify && ! negated)
6496 strcat (buf, " %0,%1,%2");
6497 break;
6499 /* All long conditionals. Note a short backward branch with an
6500 unfilled delay slot is treated just like a long backward branch
6501 with an unfilled delay slot. */
6502 case 8:
6503 /* Handle weird backwards branch with a filled delay slot
6504 which is nullified. */
6505 if (dbr_sequence_length () != 0
6506 && ! forward_branch_p (insn)
6507 && nullify)
6509 strcpy (buf, "bb,");
6510 if (GET_MODE (operands[0]) == DImode)
6511 strcat (buf, "*");
6512 if ((which == 0 && negated)
6513 || (which == 1 && ! negated))
6514 strcat (buf, "<");
6515 else
6516 strcat (buf, ">=");
6517 if (negated)
6518 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6519 else
6520 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6522 /* Handle short backwards branch with an unfilled delay slot.
6523 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6524 taken and untaken branches. */
6525 else if (dbr_sequence_length () == 0
6526 && ! forward_branch_p (insn)
6527 && INSN_ADDRESSES_SET_P ()
6528 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6529 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6531 strcpy (buf, "bb,");
6532 if (GET_MODE (operands[0]) == DImode)
6533 strcat (buf, "*");
6534 if ((which == 0 && negated)
6535 || (which == 1 && ! negated))
6536 strcat (buf, ">=");
6537 else
6538 strcat (buf, "<");
6539 if (negated)
6540 strcat (buf, " %0,%1,%3%#");
6541 else
6542 strcat (buf, " %0,%1,%2%#");
6544 else
6546 if (GET_MODE (operands[0]) == DImode)
6547 strcpy (buf, "extrd,s,*");
6548 else
6549 strcpy (buf, "{extrs,|extrw,s,}");
6550 if ((which == 0 && negated)
6551 || (which == 1 && ! negated))
6552 strcat (buf, "<");
6553 else
6554 strcat (buf, ">=");
6555 if (nullify && negated)
6556 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6557 else if (nullify && ! negated)
6558 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6559 else if (negated)
6560 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6561 else
6562 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6564 break;
6566 default:
6567 /* The reversed conditional branch must branch over one additional
6568 instruction if the delay slot is filled and needs to be extracted
6569 by output_lbranch. If the delay slot is empty or this is a
6570 nullified forward branch, the instruction after the reversed
6571 condition branch must be nullified. */
6572 if (dbr_sequence_length () == 0
6573 || (nullify && forward_branch_p (insn)))
6575 nullify = 1;
6576 xdelay = 0;
6577 operands[4] = GEN_INT (length);
6579 else
6581 xdelay = 1;
6582 operands[4] = GEN_INT (length + 4);
6585 if (GET_MODE (operands[0]) == DImode)
6586 strcpy (buf, "bb,*");
6587 else
6588 strcpy (buf, "bb,");
6589 if ((which == 0 && negated)
6590 || (which == 1 && !negated))
6591 strcat (buf, "<");
6592 else
6593 strcat (buf, ">=");
6594 if (nullify)
6595 strcat (buf, ",n %0,%1,.+%4");
6596 else
6597 strcat (buf, " %0,%1,.+%4");
6598 output_asm_insn (buf, operands);
6599 return output_lbranch (negated ? operands[3] : operands[2],
6600 insn, xdelay);
6602 return buf;
6605 /* This routine handles all the branch-on-variable-bit conditional branch
6606 sequences we might need to generate. It handles nullification of delay
6607 slots, varying length branches, negated branches and all combinations
6608 of the above. it returns the appropriate output template to emit the
6609 branch. */
6611 const char *
6612 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6614 static char buf[100];
6615 int useskip = 0;
6616 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6617 int length = get_attr_length (insn);
6618 int xdelay;
6620 /* A conditional branch to the following instruction (e.g. the delay slot) is
6621 asking for a disaster. I do not think this can happen as this pattern
6622 is only used when optimizing; jump optimization should eliminate the
6623 jump. But be prepared just in case. */
6625 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6626 return "nop";
6628 /* If this is a long branch with its delay slot unfilled, set `nullify'
6629 as it can nullify the delay slot and save a nop. */
6630 if (length == 8 && dbr_sequence_length () == 0)
6631 nullify = 1;
6633 /* If this is a short forward conditional branch which did not get
6634 its delay slot filled, the delay slot can still be nullified. */
6635 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6636 nullify = forward_branch_p (insn);
6638 /* A forward branch over a single nullified insn can be done with a
6639 extrs instruction. This avoids a single cycle penalty due to
6640 mis-predicted branch if we fall through (branch not taken). */
6642 if (length == 4
6643 && next_real_insn (insn) != 0
6644 && get_attr_length (next_real_insn (insn)) == 4
6645 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6646 && nullify)
6647 useskip = 1;
6649 switch (length)
6652 /* All short conditional branches except backwards with an unfilled
6653 delay slot. */
6654 case 4:
6655 if (useskip)
6656 strcpy (buf, "{vextrs,|extrw,s,}");
6657 else
6658 strcpy (buf, "{bvb,|bb,}");
6659 if (useskip && GET_MODE (operands[0]) == DImode)
6660 strcpy (buf, "extrd,s,*");
6661 else if (GET_MODE (operands[0]) == DImode)
6662 strcpy (buf, "bb,*");
6663 if ((which == 0 && negated)
6664 || (which == 1 && ! negated))
6665 strcat (buf, ">=");
6666 else
6667 strcat (buf, "<");
6668 if (useskip)
6669 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6670 else if (nullify && negated)
6671 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6672 else if (nullify && ! negated)
6673 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6674 else if (! nullify && negated)
6675 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6676 else if (! nullify && ! negated)
6677 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6678 break;
6680 /* All long conditionals. Note a short backward branch with an
6681 unfilled delay slot is treated just like a long backward branch
6682 with an unfilled delay slot. */
6683 case 8:
6684 /* Handle weird backwards branch with a filled delay slot
6685 which is nullified. */
6686 if (dbr_sequence_length () != 0
6687 && ! forward_branch_p (insn)
6688 && nullify)
6690 strcpy (buf, "{bvb,|bb,}");
6691 if (GET_MODE (operands[0]) == DImode)
6692 strcat (buf, "*");
6693 if ((which == 0 && negated)
6694 || (which == 1 && ! negated))
6695 strcat (buf, "<");
6696 else
6697 strcat (buf, ">=");
6698 if (negated)
6699 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6700 else
6701 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6703 /* Handle short backwards branch with an unfilled delay slot.
6704 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6705 taken and untaken branches. */
6706 else if (dbr_sequence_length () == 0
6707 && ! forward_branch_p (insn)
6708 && INSN_ADDRESSES_SET_P ()
6709 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6710 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6712 strcpy (buf, "{bvb,|bb,}");
6713 if (GET_MODE (operands[0]) == DImode)
6714 strcat (buf, "*");
6715 if ((which == 0 && negated)
6716 || (which == 1 && ! negated))
6717 strcat (buf, ">=");
6718 else
6719 strcat (buf, "<");
6720 if (negated)
6721 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6722 else
6723 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6725 else
6727 strcpy (buf, "{vextrs,|extrw,s,}");
6728 if (GET_MODE (operands[0]) == DImode)
6729 strcpy (buf, "extrd,s,*");
6730 if ((which == 0 && negated)
6731 || (which == 1 && ! negated))
6732 strcat (buf, "<");
6733 else
6734 strcat (buf, ">=");
6735 if (nullify && negated)
6736 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6737 else if (nullify && ! negated)
6738 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6739 else if (negated)
6740 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6741 else
6742 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6744 break;
6746 default:
6747 /* The reversed conditional branch must branch over one additional
6748 instruction if the delay slot is filled and needs to be extracted
6749 by output_lbranch. If the delay slot is empty or this is a
6750 nullified forward branch, the instruction after the reversed
6751 condition branch must be nullified. */
6752 if (dbr_sequence_length () == 0
6753 || (nullify && forward_branch_p (insn)))
6755 nullify = 1;
6756 xdelay = 0;
6757 operands[4] = GEN_INT (length);
6759 else
6761 xdelay = 1;
6762 operands[4] = GEN_INT (length + 4);
6765 if (GET_MODE (operands[0]) == DImode)
6766 strcpy (buf, "bb,*");
6767 else
6768 strcpy (buf, "{bvb,|bb,}");
6769 if ((which == 0 && negated)
6770 || (which == 1 && !negated))
6771 strcat (buf, "<");
6772 else
6773 strcat (buf, ">=");
6774 if (nullify)
6775 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6776 else
6777 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6778 output_asm_insn (buf, operands);
6779 return output_lbranch (negated ? operands[3] : operands[2],
6780 insn, xdelay);
6782 return buf;
6785 /* Return the output template for emitting a dbra type insn.
6787 Note it may perform some output operations on its own before
6788 returning the final output string. */
6789 const char *
6790 output_dbra (rtx *operands, rtx insn, int which_alternative)
6792 int length = get_attr_length (insn);
6794 /* A conditional branch to the following instruction (e.g. the delay slot) is
6795 asking for a disaster. Be prepared! */
6797 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6799 if (which_alternative == 0)
6800 return "ldo %1(%0),%0";
6801 else if (which_alternative == 1)
6803 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6804 output_asm_insn ("ldw -16(%%r30),%4", operands);
6805 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6806 return "{fldws|fldw} -16(%%r30),%0";
6808 else
6810 output_asm_insn ("ldw %0,%4", operands);
6811 return "ldo %1(%4),%4\n\tstw %4,%0";
6815 if (which_alternative == 0)
6817 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6818 int xdelay;
6820 /* If this is a long branch with its delay slot unfilled, set `nullify'
6821 as it can nullify the delay slot and save a nop. */
6822 if (length == 8 && dbr_sequence_length () == 0)
6823 nullify = 1;
6825 /* If this is a short forward conditional branch which did not get
6826 its delay slot filled, the delay slot can still be nullified. */
6827 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6828 nullify = forward_branch_p (insn);
6830 switch (length)
6832 case 4:
6833 if (nullify)
6834 return "addib,%C2,n %1,%0,%3";
6835 else
6836 return "addib,%C2 %1,%0,%3";
6838 case 8:
6839 /* Handle weird backwards branch with a fulled delay slot
6840 which is nullified. */
6841 if (dbr_sequence_length () != 0
6842 && ! forward_branch_p (insn)
6843 && nullify)
6844 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6845 /* Handle short backwards branch with an unfilled delay slot.
6846 Using a addb;nop rather than addi;bl saves 1 cycle for both
6847 taken and untaken branches. */
6848 else if (dbr_sequence_length () == 0
6849 && ! forward_branch_p (insn)
6850 && INSN_ADDRESSES_SET_P ()
6851 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6852 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6853 return "addib,%C2 %1,%0,%3%#";
6855 /* Handle normal cases. */
6856 if (nullify)
6857 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6858 else
6859 return "addi,%N2 %1,%0,%0\n\tb %3";
6861 default:
6862 /* The reversed conditional branch must branch over one additional
6863 instruction if the delay slot is filled and needs to be extracted
6864 by output_lbranch. If the delay slot is empty or this is a
6865 nullified forward branch, the instruction after the reversed
6866 condition branch must be nullified. */
6867 if (dbr_sequence_length () == 0
6868 || (nullify && forward_branch_p (insn)))
6870 nullify = 1;
6871 xdelay = 0;
6872 operands[4] = GEN_INT (length);
6874 else
6876 xdelay = 1;
6877 operands[4] = GEN_INT (length + 4);
6880 if (nullify)
6881 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6882 else
6883 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6885 return output_lbranch (operands[3], insn, xdelay);
6889 /* Deal with gross reload from FP register case. */
6890 else if (which_alternative == 1)
6892 /* Move loop counter from FP register to MEM then into a GR,
6893 increment the GR, store the GR into MEM, and finally reload
6894 the FP register from MEM from within the branch's delay slot. */
6895 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6896 operands);
6897 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6898 if (length == 24)
6899 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6900 else if (length == 28)
6901 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6902 else
6904 operands[5] = GEN_INT (length - 16);
6905 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6906 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6907 return output_lbranch (operands[3], insn, 0);
6910 /* Deal with gross reload from memory case. */
6911 else
6913 /* Reload loop counter from memory, the store back to memory
6914 happens in the branch's delay slot. */
6915 output_asm_insn ("ldw %0,%4", operands);
6916 if (length == 12)
6917 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6918 else if (length == 16)
6919 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6920 else
6922 operands[5] = GEN_INT (length - 4);
6923 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6924 return output_lbranch (operands[3], insn, 0);
6929 /* Return the output template for emitting a movb type insn.
6931 Note it may perform some output operations on its own before
6932 returning the final output string. */
6933 const char *
6934 output_movb (rtx *operands, rtx insn, int which_alternative,
6935 int reverse_comparison)
6937 int length = get_attr_length (insn);
6939 /* A conditional branch to the following instruction (e.g. the delay slot) is
6940 asking for a disaster. Be prepared! */
6942 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6944 if (which_alternative == 0)
6945 return "copy %1,%0";
6946 else if (which_alternative == 1)
6948 output_asm_insn ("stw %1,-16(%%r30)", operands);
6949 return "{fldws|fldw} -16(%%r30),%0";
6951 else if (which_alternative == 2)
6952 return "stw %1,%0";
6953 else
6954 return "mtsar %r1";
6957 /* Support the second variant. */
6958 if (reverse_comparison)
6959 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6961 if (which_alternative == 0)
6963 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6964 int xdelay;
6966 /* If this is a long branch with its delay slot unfilled, set `nullify'
6967 as it can nullify the delay slot and save a nop. */
6968 if (length == 8 && dbr_sequence_length () == 0)
6969 nullify = 1;
6971 /* If this is a short forward conditional branch which did not get
6972 its delay slot filled, the delay slot can still be nullified. */
6973 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6974 nullify = forward_branch_p (insn);
6976 switch (length)
6978 case 4:
6979 if (nullify)
6980 return "movb,%C2,n %1,%0,%3";
6981 else
6982 return "movb,%C2 %1,%0,%3";
6984 case 8:
6985 /* Handle weird backwards branch with a filled delay slot
6986 which is nullified. */
6987 if (dbr_sequence_length () != 0
6988 && ! forward_branch_p (insn)
6989 && nullify)
6990 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6992 /* Handle short backwards branch with an unfilled delay slot.
6993 Using a movb;nop rather than or;bl saves 1 cycle for both
6994 taken and untaken branches. */
6995 else if (dbr_sequence_length () == 0
6996 && ! forward_branch_p (insn)
6997 && INSN_ADDRESSES_SET_P ()
6998 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6999 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7000 return "movb,%C2 %1,%0,%3%#";
7001 /* Handle normal cases. */
7002 if (nullify)
7003 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7004 else
7005 return "or,%N2 %1,%%r0,%0\n\tb %3";
7007 default:
7008 /* The reversed conditional branch must branch over one additional
7009 instruction if the delay slot is filled and needs to be extracted
7010 by output_lbranch. If the delay slot is empty or this is a
7011 nullified forward branch, the instruction after the reversed
7012 condition branch must be nullified. */
7013 if (dbr_sequence_length () == 0
7014 || (nullify && forward_branch_p (insn)))
7016 nullify = 1;
7017 xdelay = 0;
7018 operands[4] = GEN_INT (length);
7020 else
7022 xdelay = 1;
7023 operands[4] = GEN_INT (length + 4);
7026 if (nullify)
7027 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7028 else
7029 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7031 return output_lbranch (operands[3], insn, xdelay);
7034 /* Deal with gross reload for FP destination register case. */
7035 else if (which_alternative == 1)
7037 /* Move source register to MEM, perform the branch test, then
7038 finally load the FP register from MEM from within the branch's
7039 delay slot. */
7040 output_asm_insn ("stw %1,-16(%%r30)", operands);
7041 if (length == 12)
7042 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7043 else if (length == 16)
7044 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7045 else
7047 operands[4] = GEN_INT (length - 4);
7048 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7049 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7050 return output_lbranch (operands[3], insn, 0);
7053 /* Deal with gross reload from memory case. */
7054 else if (which_alternative == 2)
7056 /* Reload loop counter from memory, the store back to memory
7057 happens in the branch's delay slot. */
7058 if (length == 8)
7059 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7060 else if (length == 12)
7061 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7062 else
7064 operands[4] = GEN_INT (length);
7065 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7066 operands);
7067 return output_lbranch (operands[3], insn, 0);
7070 /* Handle SAR as a destination. */
7071 else
7073 if (length == 8)
7074 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7075 else if (length == 12)
7076 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7077 else
7079 operands[4] = GEN_INT (length);
7080 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7081 operands);
7082 return output_lbranch (operands[3], insn, 0);
7087 /* Copy any FP arguments in INSN into integer registers. */
7088 static void
7089 copy_fp_args (rtx insn)
7091 rtx link;
7092 rtx xoperands[2];
7094 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7096 int arg_mode, regno;
7097 rtx use = XEXP (link, 0);
7099 if (! (GET_CODE (use) == USE
7100 && GET_CODE (XEXP (use, 0)) == REG
7101 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7102 continue;
7104 arg_mode = GET_MODE (XEXP (use, 0));
7105 regno = REGNO (XEXP (use, 0));
7107 /* Is it a floating point register? */
7108 if (regno >= 32 && regno <= 39)
7110 /* Copy the FP register into an integer register via memory. */
7111 if (arg_mode == SFmode)
7113 xoperands[0] = XEXP (use, 0);
7114 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7115 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7116 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7118 else
7120 xoperands[0] = XEXP (use, 0);
7121 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7122 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7123 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7124 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7130 /* Compute length of the FP argument copy sequence for INSN. */
7131 static int
7132 length_fp_args (rtx insn)
7134 int length = 0;
7135 rtx link;
7137 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7139 int arg_mode, regno;
7140 rtx use = XEXP (link, 0);
7142 if (! (GET_CODE (use) == USE
7143 && GET_CODE (XEXP (use, 0)) == REG
7144 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7145 continue;
7147 arg_mode = GET_MODE (XEXP (use, 0));
7148 regno = REGNO (XEXP (use, 0));
7150 /* Is it a floating point register? */
7151 if (regno >= 32 && regno <= 39)
7153 if (arg_mode == SFmode)
7154 length += 8;
7155 else
7156 length += 12;
7160 return length;
7163 /* Return the attribute length for the millicode call instruction INSN.
7164 The length must match the code generated by output_millicode_call.
7165 We include the delay slot in the returned length as it is better to
7166 over estimate the length than to under estimate it. */
7169 attr_length_millicode_call (rtx insn)
7171 unsigned long distance = -1;
7172 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7174 if (INSN_ADDRESSES_SET_P ())
7176 distance = (total + insn_current_reference_address (insn));
7177 if (distance < total)
7178 distance = -1;
7181 if (TARGET_64BIT)
7183 if (!TARGET_LONG_CALLS && distance < 7600000)
7184 return 8;
7186 return 20;
7188 else if (TARGET_PORTABLE_RUNTIME)
7189 return 24;
7190 else
7192 if (!TARGET_LONG_CALLS && distance < 240000)
7193 return 8;
7195 if (TARGET_LONG_ABS_CALL && !flag_pic)
7196 return 12;
7198 return 24;
7202 /* INSN is a function call. It may have an unconditional jump
7203 in its delay slot.
7205 CALL_DEST is the routine we are calling. */
7207 const char *
7208 output_millicode_call (rtx insn, rtx call_dest)
7210 int attr_length = get_attr_length (insn);
7211 int seq_length = dbr_sequence_length ();
7212 int distance;
7213 rtx seq_insn;
7214 rtx xoperands[3];
7216 xoperands[0] = call_dest;
7217 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7219 /* Handle the common case where we are sure that the branch will
7220 reach the beginning of the $CODE$ subspace. The within reach
7221 form of the $$sh_func_adrs call has a length of 28. Because
7222 it has an attribute type of multi, it never has a nonzero
7223 sequence length. The length of the $$sh_func_adrs is the same
7224 as certain out of reach PIC calls to other routines. */
7225 if (!TARGET_LONG_CALLS
7226 && ((seq_length == 0
7227 && (attr_length == 12
7228 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7229 || (seq_length != 0 && attr_length == 8)))
7231 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7233 else
7235 if (TARGET_64BIT)
7237 /* It might seem that one insn could be saved by accessing
7238 the millicode function using the linkage table. However,
7239 this doesn't work in shared libraries and other dynamically
7240 loaded objects. Using a pc-relative sequence also avoids
7241 problems related to the implicit use of the gp register. */
7242 output_asm_insn ("b,l .+8,%%r1", xoperands);
7244 if (TARGET_GAS)
7246 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7247 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7249 else
7251 xoperands[1] = gen_label_rtx ();
7252 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7253 targetm.asm_out.internal_label (asm_out_file, "L",
7254 CODE_LABEL_NUMBER (xoperands[1]));
7255 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7258 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7260 else if (TARGET_PORTABLE_RUNTIME)
7262 /* Pure portable runtime doesn't allow be/ble; we also don't
7263 have PIC support in the assembler/linker, so this sequence
7264 is needed. */
7266 /* Get the address of our target into %r1. */
7267 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7268 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7270 /* Get our return address into %r31. */
7271 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7272 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7274 /* Jump to our target address in %r1. */
7275 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7277 else if (!flag_pic)
7279 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7280 if (TARGET_PA_20)
7281 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7282 else
7283 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7285 else
7287 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7288 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7290 if (TARGET_SOM || !TARGET_GAS)
7292 /* The HP assembler can generate relocations for the
7293 difference of two symbols. GAS can do this for a
7294 millicode symbol but not an arbitrary external
7295 symbol when generating SOM output. */
7296 xoperands[1] = gen_label_rtx ();
7297 targetm.asm_out.internal_label (asm_out_file, "L",
7298 CODE_LABEL_NUMBER (xoperands[1]));
7299 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7300 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7302 else
7304 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7305 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7306 xoperands);
7309 /* Jump to our target address in %r1. */
7310 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7314 if (seq_length == 0)
7315 output_asm_insn ("nop", xoperands);
7317 /* We are done if there isn't a jump in the delay slot. */
7318 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7319 return "";
7321 /* This call has an unconditional jump in its delay slot. */
7322 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7324 /* See if the return address can be adjusted. Use the containing
7325 sequence insn's address. */
7326 if (INSN_ADDRESSES_SET_P ())
7328 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7329 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7330 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7332 if (VAL_14_BITS_P (distance))
7334 xoperands[1] = gen_label_rtx ();
7335 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7336 targetm.asm_out.internal_label (asm_out_file, "L",
7337 CODE_LABEL_NUMBER (xoperands[1]));
7339 else
7340 /* ??? This branch may not reach its target. */
7341 output_asm_insn ("nop\n\tb,n %0", xoperands);
7343 else
7344 /* ??? This branch may not reach its target. */
7345 output_asm_insn ("nop\n\tb,n %0", xoperands);
7347 /* Delete the jump. */
7348 SET_INSN_DELETED (NEXT_INSN (insn));
7350 return "";
7353 /* Return the attribute length of the call instruction INSN. The SIBCALL
7354 flag indicates whether INSN is a regular call or a sibling call. The
7355 length returned must be longer than the code actually generated by
7356 output_call. Since branch shortening is done before delay branch
7357 sequencing, there is no way to determine whether or not the delay
7358 slot will be filled during branch shortening. Even when the delay
7359 slot is filled, we may have to add a nop if the delay slot contains
7360 a branch that can't reach its target. Thus, we always have to include
7361 the delay slot in the length estimate. This used to be done in
7362 pa_adjust_insn_length but we do it here now as some sequences always
7363 fill the delay slot and we can save four bytes in the estimate for
7364 these sequences. */
7367 attr_length_call (rtx insn, int sibcall)
7369 int local_call;
7370 rtx call, call_dest;
7371 tree call_decl;
7372 int length = 0;
7373 rtx pat = PATTERN (insn);
7374 unsigned long distance = -1;
7376 gcc_assert (GET_CODE (insn) == CALL_INSN);
7378 if (INSN_ADDRESSES_SET_P ())
7380 unsigned long total;
7382 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7383 distance = (total + insn_current_reference_address (insn));
7384 if (distance < total)
7385 distance = -1;
7388 gcc_assert (GET_CODE (pat) == PARALLEL);
7390 /* Get the call rtx. */
7391 call = XVECEXP (pat, 0, 0);
7392 if (GET_CODE (call) == SET)
7393 call = SET_SRC (call);
7395 gcc_assert (GET_CODE (call) == CALL);
7397 /* Determine if this is a local call. */
7398 call_dest = XEXP (XEXP (call, 0), 0);
7399 call_decl = SYMBOL_REF_DECL (call_dest);
7400 local_call = call_decl && targetm.binds_local_p (call_decl);
7402 /* pc-relative branch. */
7403 if (!TARGET_LONG_CALLS
7404 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7405 || distance < 240000))
7406 length += 8;
7408 /* 64-bit plabel sequence. */
7409 else if (TARGET_64BIT && !local_call)
7410 length += sibcall ? 28 : 24;
7412 /* non-pic long absolute branch sequence. */
7413 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7414 length += 12;
7416 /* long pc-relative branch sequence. */
7417 else if (TARGET_LONG_PIC_SDIFF_CALL
7418 || (TARGET_GAS && !TARGET_SOM
7419 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7421 length += 20;
7423 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && flag_pic)
7424 length += 8;
7427 /* 32-bit plabel sequence. */
7428 else
7430 length += 32;
7432 if (TARGET_SOM)
7433 length += length_fp_args (insn);
7435 if (flag_pic)
7436 length += 4;
7438 if (!TARGET_PA_20)
7440 if (!sibcall)
7441 length += 8;
7443 if (!TARGET_NO_SPACE_REGS && flag_pic)
7444 length += 8;
7448 return length;
7451 /* INSN is a function call. It may have an unconditional jump
7452 in its delay slot.
7454 CALL_DEST is the routine we are calling. */
7456 const char *
7457 output_call (rtx insn, rtx call_dest, int sibcall)
7459 int delay_insn_deleted = 0;
7460 int delay_slot_filled = 0;
7461 int seq_length = dbr_sequence_length ();
7462 tree call_decl = SYMBOL_REF_DECL (call_dest);
7463 int local_call = call_decl && targetm.binds_local_p (call_decl);
7464 rtx xoperands[2];
7466 xoperands[0] = call_dest;
7468 /* Handle the common case where we're sure that the branch will reach
7469 the beginning of the "$CODE$" subspace. This is the beginning of
7470 the current function if we are in a named section. */
7471 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7473 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7474 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7476 else
7478 if (TARGET_64BIT && !local_call)
7480 /* ??? As far as I can tell, the HP linker doesn't support the
7481 long pc-relative sequence described in the 64-bit runtime
7482 architecture. So, we use a slightly longer indirect call. */
7483 xoperands[0] = get_deferred_plabel (call_dest);
7484 xoperands[1] = gen_label_rtx ();
7486 /* If this isn't a sibcall, we put the load of %r27 into the
7487 delay slot. We can't do this in a sibcall as we don't
7488 have a second call-clobbered scratch register available. */
7489 if (seq_length != 0
7490 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7491 && !sibcall)
7493 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7494 optimize, 0, NULL);
7496 /* Now delete the delay insn. */
7497 SET_INSN_DELETED (NEXT_INSN (insn));
7498 delay_insn_deleted = 1;
7501 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7502 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7503 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7505 if (sibcall)
7507 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7508 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7509 output_asm_insn ("bve (%%r1)", xoperands);
7511 else
7513 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7514 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7515 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7516 delay_slot_filled = 1;
7519 else
7521 int indirect_call = 0;
7523 /* Emit a long call. There are several different sequences
7524 of increasing length and complexity. In most cases,
7525 they don't allow an instruction in the delay slot. */
7526 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7527 && !TARGET_LONG_PIC_SDIFF_CALL
7528 && !(TARGET_GAS && !TARGET_SOM
7529 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7530 && !TARGET_64BIT)
7531 indirect_call = 1;
7533 if (seq_length != 0
7534 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7535 && !sibcall
7536 && (!TARGET_PA_20
7537 || indirect_call
7538 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7540 /* A non-jump insn in the delay slot. By definition we can
7541 emit this insn before the call (and in fact before argument
7542 relocating. */
7543 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7544 NULL);
7546 /* Now delete the delay insn. */
7547 SET_INSN_DELETED (NEXT_INSN (insn));
7548 delay_insn_deleted = 1;
7551 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7553 /* This is the best sequence for making long calls in
7554 non-pic code. Unfortunately, GNU ld doesn't provide
7555 the stub needed for external calls, and GAS's support
7556 for this with the SOM linker is buggy. It is safe
7557 to use this for local calls. */
7558 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7559 if (sibcall)
7560 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7561 else
7563 if (TARGET_PA_20)
7564 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7565 xoperands);
7566 else
7567 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7569 output_asm_insn ("copy %%r31,%%r2", xoperands);
7570 delay_slot_filled = 1;
7573 else
7575 if (TARGET_LONG_PIC_SDIFF_CALL)
7577 /* The HP assembler and linker can handle relocations
7578 for the difference of two symbols. The HP assembler
7579 recognizes the sequence as a pc-relative call and
7580 the linker provides stubs when needed. */
7581 xoperands[1] = gen_label_rtx ();
7582 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7583 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7584 targetm.asm_out.internal_label (asm_out_file, "L",
7585 CODE_LABEL_NUMBER (xoperands[1]));
7586 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7588 else if (TARGET_GAS && !TARGET_SOM
7589 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7591 /* GAS currently can't generate the relocations that
7592 are needed for the SOM linker under HP-UX using this
7593 sequence. The GNU linker doesn't generate the stubs
7594 that are needed for external calls on TARGET_ELF32
7595 with this sequence. For now, we have to use a
7596 longer plabel sequence when using GAS. */
7597 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7598 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7599 xoperands);
7600 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7601 xoperands);
7603 else
7605 /* Emit a long plabel-based call sequence. This is
7606 essentially an inline implementation of $$dyncall.
7607 We don't actually try to call $$dyncall as this is
7608 as difficult as calling the function itself. */
7609 xoperands[0] = get_deferred_plabel (call_dest);
7610 xoperands[1] = gen_label_rtx ();
7612 /* Since the call is indirect, FP arguments in registers
7613 need to be copied to the general registers. Then, the
7614 argument relocation stub will copy them back. */
7615 if (TARGET_SOM)
7616 copy_fp_args (insn);
7618 if (flag_pic)
7620 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7621 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7622 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7624 else
7626 output_asm_insn ("addil LR'%0-$global$,%%r27",
7627 xoperands);
7628 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7629 xoperands);
7632 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7633 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7634 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7635 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7637 if (!sibcall && !TARGET_PA_20)
7639 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7640 if (TARGET_NO_SPACE_REGS)
7641 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7642 else
7643 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7647 if (TARGET_PA_20)
7649 if (sibcall)
7650 output_asm_insn ("bve (%%r1)", xoperands);
7651 else
7653 if (indirect_call)
7655 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7656 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7657 delay_slot_filled = 1;
7659 else
7660 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7663 else
7665 if (!TARGET_NO_SPACE_REGS && flag_pic)
7666 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7667 xoperands);
7669 if (sibcall)
7671 if (TARGET_NO_SPACE_REGS || !flag_pic)
7672 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7673 else
7674 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7676 else
7678 if (TARGET_NO_SPACE_REGS || !flag_pic)
7679 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7680 else
7681 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7683 if (indirect_call)
7684 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7685 else
7686 output_asm_insn ("copy %%r31,%%r2", xoperands);
7687 delay_slot_filled = 1;
7694 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7695 output_asm_insn ("nop", xoperands);
7697 /* We are done if there isn't a jump in the delay slot. */
7698 if (seq_length == 0
7699 || delay_insn_deleted
7700 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7701 return "";
7703 /* A sibcall should never have a branch in the delay slot. */
7704 gcc_assert (!sibcall);
7706 /* This call has an unconditional jump in its delay slot. */
7707 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7709 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7711 /* See if the return address can be adjusted. Use the containing
7712 sequence insn's address. */
7713 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7714 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7715 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7717 if (VAL_14_BITS_P (distance))
7719 xoperands[1] = gen_label_rtx ();
7720 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7721 targetm.asm_out.internal_label (asm_out_file, "L",
7722 CODE_LABEL_NUMBER (xoperands[1]));
7724 else
7725 output_asm_insn ("nop\n\tb,n %0", xoperands);
7727 else
7728 output_asm_insn ("b,n %0", xoperands);
7730 /* Delete the jump. */
7731 SET_INSN_DELETED (NEXT_INSN (insn));
7733 return "";
7736 /* Return the attribute length of the indirect call instruction INSN.
7737 The length must match the code generated by output_indirect call.
7738 The returned length includes the delay slot. Currently, the delay
7739 slot of an indirect call sequence is not exposed and it is used by
7740 the sequence itself. */
7743 attr_length_indirect_call (rtx insn)
7745 unsigned long distance = -1;
7746 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7748 if (INSN_ADDRESSES_SET_P ())
7750 distance = (total + insn_current_reference_address (insn));
7751 if (distance < total)
7752 distance = -1;
7755 if (TARGET_64BIT)
7756 return 12;
7758 if (TARGET_FAST_INDIRECT_CALLS
7759 || (!TARGET_PORTABLE_RUNTIME
7760 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7761 || distance < 240000)))
7762 return 8;
7764 if (flag_pic)
7765 return 24;
7767 if (TARGET_PORTABLE_RUNTIME)
7768 return 20;
7770 /* Out of reach, can use ble. */
7771 return 12;
7774 const char *
7775 output_indirect_call (rtx insn, rtx call_dest)
7777 rtx xoperands[1];
7779 if (TARGET_64BIT)
7781 xoperands[0] = call_dest;
7782 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7783 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7784 return "";
7787 /* First the special case for kernels, level 0 systems, etc. */
7788 if (TARGET_FAST_INDIRECT_CALLS)
7789 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7791 /* Now the normal case -- we can reach $$dyncall directly or
7792 we're sure that we can get there via a long-branch stub.
7794 No need to check target flags as the length uniquely identifies
7795 the remaining cases. */
7796 if (attr_length_indirect_call (insn) == 8)
7798 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7799 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7800 variant of the B,L instruction can't be used on the SOM target. */
7801 if (TARGET_PA_20 && !TARGET_SOM)
7802 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7803 else
7804 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7807 /* Long millicode call, but we are not generating PIC or portable runtime
7808 code. */
7809 if (attr_length_indirect_call (insn) == 12)
7810 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7812 /* Long millicode call for portable runtime. */
7813 if (attr_length_indirect_call (insn) == 20)
7814 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7816 /* We need a long PIC call to $$dyncall. */
7817 xoperands[0] = NULL_RTX;
7818 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7819 if (TARGET_SOM || !TARGET_GAS)
7821 xoperands[0] = gen_label_rtx ();
7822 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7823 targetm.asm_out.internal_label (asm_out_file, "L",
7824 CODE_LABEL_NUMBER (xoperands[0]));
7825 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7827 else
7829 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7830 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7831 xoperands);
7833 output_asm_insn ("blr %%r0,%%r2", xoperands);
7834 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7835 return "";
7838 /* Return the total length of the save and restore instructions needed for
7839 the data linkage table pointer (i.e., the PIC register) across the call
7840 instruction INSN. No-return calls do not require a save and restore.
7841 In addition, we may be able to avoid the save and restore for calls
7842 within the same translation unit. */
7845 attr_length_save_restore_dltp (rtx insn)
7847 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7848 return 0;
7850 return 8;
7853 /* In HPUX 8.0's shared library scheme, special relocations are needed
7854 for function labels if they might be passed to a function
7855 in a shared library (because shared libraries don't live in code
7856 space), and special magic is needed to construct their address. */
7858 void
7859 hppa_encode_label (rtx sym)
7861 const char *str = XSTR (sym, 0);
7862 int len = strlen (str) + 1;
7863 char *newstr, *p;
7865 p = newstr = XALLOCAVEC (char, len + 1);
7866 *p++ = '@';
7867 strcpy (p, str);
7869 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7872 static void
7873 pa_encode_section_info (tree decl, rtx rtl, int first)
7875 int old_referenced = 0;
7877 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
7878 old_referenced
7879 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
7881 default_encode_section_info (decl, rtl, first);
7883 if (first && TEXT_SPACE_P (decl))
7885 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7886 if (TREE_CODE (decl) == FUNCTION_DECL)
7887 hppa_encode_label (XEXP (rtl, 0));
7889 else if (old_referenced)
7890 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
7893 /* This is sort of inverse to pa_encode_section_info. */
7895 static const char *
7896 pa_strip_name_encoding (const char *str)
7898 str += (*str == '@');
7899 str += (*str == '*');
7900 return str;
7904 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7906 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7909 /* Returns 1 if OP is a function label involved in a simple addition
7910 with a constant. Used to keep certain patterns from matching
7911 during instruction combination. */
7913 is_function_label_plus_const (rtx op)
7915 /* Strip off any CONST. */
7916 if (GET_CODE (op) == CONST)
7917 op = XEXP (op, 0);
7919 return (GET_CODE (op) == PLUS
7920 && function_label_operand (XEXP (op, 0), Pmode)
7921 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7924 /* Output assembly code for a thunk to FUNCTION. */
7926 static void
7927 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7928 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7929 tree function)
7931 static unsigned int current_thunk_number;
7932 int val_14 = VAL_14_BITS_P (delta);
7933 unsigned int old_last_address = last_address, nbytes = 0;
7934 char label[16];
7935 rtx xoperands[4];
7937 xoperands[0] = XEXP (DECL_RTL (function), 0);
7938 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7939 xoperands[2] = GEN_INT (delta);
7941 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7942 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7944 /* Output the thunk. We know that the function is in the same
7945 translation unit (i.e., the same space) as the thunk, and that
7946 thunks are output after their method. Thus, we don't need an
7947 external branch to reach the function. With SOM and GAS,
7948 functions and thunks are effectively in different sections.
7949 Thus, we can always use a IA-relative branch and the linker
7950 will add a long branch stub if necessary.
7952 However, we have to be careful when generating PIC code on the
7953 SOM port to ensure that the sequence does not transfer to an
7954 import stub for the target function as this could clobber the
7955 return value saved at SP-24. This would also apply to the
7956 32-bit linux port if the multi-space model is implemented. */
7957 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7958 && !(flag_pic && TREE_PUBLIC (function))
7959 && (TARGET_GAS || last_address < 262132))
7960 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7961 && ((targetm.have_named_sections
7962 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7963 /* The GNU 64-bit linker has rather poor stub management.
7964 So, we use a long branch from thunks that aren't in
7965 the same section as the target function. */
7966 && ((!TARGET_64BIT
7967 && (DECL_SECTION_NAME (thunk_fndecl)
7968 != DECL_SECTION_NAME (function)))
7969 || ((DECL_SECTION_NAME (thunk_fndecl)
7970 == DECL_SECTION_NAME (function))
7971 && last_address < 262132)))
7972 || (targetm.have_named_sections
7973 && DECL_SECTION_NAME (thunk_fndecl) == NULL
7974 && DECL_SECTION_NAME (function) == NULL
7975 && last_address < 262132)
7976 || (!targetm.have_named_sections && last_address < 262132))))
7978 if (!val_14)
7979 output_asm_insn ("addil L'%2,%%r26", xoperands);
7981 output_asm_insn ("b %0", xoperands);
7983 if (val_14)
7985 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7986 nbytes += 8;
7988 else
7990 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7991 nbytes += 12;
7994 else if (TARGET_64BIT)
7996 /* We only have one call-clobbered scratch register, so we can't
7997 make use of the delay slot if delta doesn't fit in 14 bits. */
7998 if (!val_14)
8000 output_asm_insn ("addil L'%2,%%r26", xoperands);
8001 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8004 output_asm_insn ("b,l .+8,%%r1", xoperands);
8006 if (TARGET_GAS)
8008 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8009 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8011 else
8013 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8014 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8017 if (val_14)
8019 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8020 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8021 nbytes += 20;
8023 else
8025 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8026 nbytes += 24;
8029 else if (TARGET_PORTABLE_RUNTIME)
8031 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8032 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8034 if (!val_14)
8035 output_asm_insn ("addil L'%2,%%r26", xoperands);
8037 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8039 if (val_14)
8041 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8042 nbytes += 16;
8044 else
8046 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8047 nbytes += 20;
8050 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8052 /* The function is accessible from outside this module. The only
8053 way to avoid an import stub between the thunk and function is to
8054 call the function directly with an indirect sequence similar to
8055 that used by $$dyncall. This is possible because $$dyncall acts
8056 as the import stub in an indirect call. */
8057 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8058 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8059 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8060 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8061 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8062 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8063 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8064 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8065 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8067 if (!val_14)
8069 output_asm_insn ("addil L'%2,%%r26", xoperands);
8070 nbytes += 4;
8073 if (TARGET_PA_20)
8075 output_asm_insn ("bve (%%r22)", xoperands);
8076 nbytes += 36;
8078 else if (TARGET_NO_SPACE_REGS)
8080 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8081 nbytes += 36;
8083 else
8085 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8086 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8087 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8088 nbytes += 44;
8091 if (val_14)
8092 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8093 else
8094 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8096 else if (flag_pic)
8098 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8100 if (TARGET_SOM || !TARGET_GAS)
8102 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8103 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8105 else
8107 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8108 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8111 if (!val_14)
8112 output_asm_insn ("addil L'%2,%%r26", xoperands);
8114 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8116 if (val_14)
8118 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8119 nbytes += 20;
8121 else
8123 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8124 nbytes += 24;
8127 else
8129 if (!val_14)
8130 output_asm_insn ("addil L'%2,%%r26", xoperands);
8132 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8133 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8135 if (val_14)
8137 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8138 nbytes += 12;
8140 else
8142 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8143 nbytes += 16;
8147 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8149 if (TARGET_SOM && TARGET_GAS)
8151 /* We done with this subspace except possibly for some additional
8152 debug information. Forget that we are in this subspace to ensure
8153 that the next function is output in its own subspace. */
8154 in_section = NULL;
8155 cfun->machine->in_nsubspa = 2;
8158 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8160 switch_to_section (data_section);
8161 output_asm_insn (".align 4", xoperands);
8162 ASM_OUTPUT_LABEL (file, label);
8163 output_asm_insn (".word P'%0", xoperands);
8166 current_thunk_number++;
8167 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8168 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8169 last_address += nbytes;
8170 if (old_last_address > last_address)
8171 last_address = UINT_MAX;
8172 update_total_code_bytes (nbytes);
8175 /* Only direct calls to static functions are allowed to be sibling (tail)
8176 call optimized.
8178 This restriction is necessary because some linker generated stubs will
8179 store return pointers into rp' in some cases which might clobber a
8180 live value already in rp'.
8182 In a sibcall the current function and the target function share stack
8183 space. Thus if the path to the current function and the path to the
8184 target function save a value in rp', they save the value into the
8185 same stack slot, which has undesirable consequences.
8187 Because of the deferred binding nature of shared libraries any function
8188 with external scope could be in a different load module and thus require
8189 rp' to be saved when calling that function. So sibcall optimizations
8190 can only be safe for static function.
8192 Note that GCC never needs return value relocations, so we don't have to
8193 worry about static calls with return value relocations (which require
8194 saving rp').
8196 It is safe to perform a sibcall optimization when the target function
8197 will never return. */
8198 static bool
8199 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8201 if (TARGET_PORTABLE_RUNTIME)
8202 return false;
8204 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8205 single subspace mode and the call is not indirect. As far as I know,
8206 there is no operating system support for the multiple subspace mode.
8207 It might be possible to support indirect calls if we didn't use
8208 $$dyncall (see the indirect sequence generated in output_call). */
8209 if (TARGET_ELF32)
8210 return (decl != NULL_TREE);
8212 /* Sibcalls are not ok because the arg pointer register is not a fixed
8213 register. This prevents the sibcall optimization from occurring. In
8214 addition, there are problems with stub placement using GNU ld. This
8215 is because a normal sibcall branch uses a 17-bit relocation while
8216 a regular call branch uses a 22-bit relocation. As a result, more
8217 care needs to be taken in the placement of long-branch stubs. */
8218 if (TARGET_64BIT)
8219 return false;
8221 /* Sibcalls are only ok within a translation unit. */
8222 return (decl && !TREE_PUBLIC (decl));
8225 /* ??? Addition is not commutative on the PA due to the weird implicit
8226 space register selection rules for memory addresses. Therefore, we
8227 don't consider a + b == b + a, as this might be inside a MEM. */
8228 static bool
8229 pa_commutative_p (const_rtx x, int outer_code)
8231 return (COMMUTATIVE_P (x)
8232 && (TARGET_NO_SPACE_REGS
8233 || (outer_code != UNKNOWN && outer_code != MEM)
8234 || GET_CODE (x) != PLUS));
8237 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8238 use in fmpyadd instructions. */
8240 fmpyaddoperands (rtx *operands)
8242 enum machine_mode mode = GET_MODE (operands[0]);
8244 /* Must be a floating point mode. */
8245 if (mode != SFmode && mode != DFmode)
8246 return 0;
8248 /* All modes must be the same. */
8249 if (! (mode == GET_MODE (operands[1])
8250 && mode == GET_MODE (operands[2])
8251 && mode == GET_MODE (operands[3])
8252 && mode == GET_MODE (operands[4])
8253 && mode == GET_MODE (operands[5])))
8254 return 0;
8256 /* All operands must be registers. */
8257 if (! (GET_CODE (operands[1]) == REG
8258 && GET_CODE (operands[2]) == REG
8259 && GET_CODE (operands[3]) == REG
8260 && GET_CODE (operands[4]) == REG
8261 && GET_CODE (operands[5]) == REG))
8262 return 0;
8264 /* Only 2 real operands to the addition. One of the input operands must
8265 be the same as the output operand. */
8266 if (! rtx_equal_p (operands[3], operands[4])
8267 && ! rtx_equal_p (operands[3], operands[5]))
8268 return 0;
8270 /* Inout operand of add cannot conflict with any operands from multiply. */
8271 if (rtx_equal_p (operands[3], operands[0])
8272 || rtx_equal_p (operands[3], operands[1])
8273 || rtx_equal_p (operands[3], operands[2]))
8274 return 0;
8276 /* multiply cannot feed into addition operands. */
8277 if (rtx_equal_p (operands[4], operands[0])
8278 || rtx_equal_p (operands[5], operands[0]))
8279 return 0;
8281 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8282 if (mode == SFmode
8283 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8284 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8285 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8286 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8287 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8288 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8289 return 0;
8291 /* Passed. Operands are suitable for fmpyadd. */
8292 return 1;
8295 #if !defined(USE_COLLECT2)
8296 static void
8297 pa_asm_out_constructor (rtx symbol, int priority)
8299 if (!function_label_operand (symbol, VOIDmode))
8300 hppa_encode_label (symbol);
8302 #ifdef CTORS_SECTION_ASM_OP
8303 default_ctor_section_asm_out_constructor (symbol, priority);
8304 #else
8305 # ifdef TARGET_ASM_NAMED_SECTION
8306 default_named_section_asm_out_constructor (symbol, priority);
8307 # else
8308 default_stabs_asm_out_constructor (symbol, priority);
8309 # endif
8310 #endif
8313 static void
8314 pa_asm_out_destructor (rtx symbol, int priority)
8316 if (!function_label_operand (symbol, VOIDmode))
8317 hppa_encode_label (symbol);
8319 #ifdef DTORS_SECTION_ASM_OP
8320 default_dtor_section_asm_out_destructor (symbol, priority);
8321 #else
8322 # ifdef TARGET_ASM_NAMED_SECTION
8323 default_named_section_asm_out_destructor (symbol, priority);
8324 # else
8325 default_stabs_asm_out_destructor (symbol, priority);
8326 # endif
8327 #endif
8329 #endif
8331 /* This function places uninitialized global data in the bss section.
8332 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8333 function on the SOM port to prevent uninitialized global data from
8334 being placed in the data section. */
8336 void
8337 pa_asm_output_aligned_bss (FILE *stream,
8338 const char *name,
8339 unsigned HOST_WIDE_INT size,
8340 unsigned int align)
8342 switch_to_section (bss_section);
8343 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8345 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8346 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8347 #endif
8349 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8350 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8351 #endif
8353 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8354 ASM_OUTPUT_LABEL (stream, name);
8355 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8358 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8359 that doesn't allow the alignment of global common storage to be directly
8360 specified. The SOM linker aligns common storage based on the rounded
8361 value of the NUM_BYTES parameter in the .comm directive. It's not
8362 possible to use the .align directive as it doesn't affect the alignment
8363 of the label associated with a .comm directive. */
8365 void
8366 pa_asm_output_aligned_common (FILE *stream,
8367 const char *name,
8368 unsigned HOST_WIDE_INT size,
8369 unsigned int align)
8371 unsigned int max_common_align;
8373 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8374 if (align > max_common_align)
8376 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8377 "for global common data. Using %u",
8378 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8379 align = max_common_align;
8382 switch_to_section (bss_section);
8384 assemble_name (stream, name);
8385 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8386 MAX (size, align / BITS_PER_UNIT));
8389 /* We can't use .comm for local common storage as the SOM linker effectively
8390 treats the symbol as universal and uses the same storage for local symbols
8391 with the same name in different object files. The .block directive
8392 reserves an uninitialized block of storage. However, it's not common
8393 storage. Fortunately, GCC never requests common storage with the same
8394 name in any given translation unit. */
8396 void
8397 pa_asm_output_aligned_local (FILE *stream,
8398 const char *name,
8399 unsigned HOST_WIDE_INT size,
8400 unsigned int align)
8402 switch_to_section (bss_section);
8403 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8405 #ifdef LOCAL_ASM_OP
8406 fprintf (stream, "%s", LOCAL_ASM_OP);
8407 assemble_name (stream, name);
8408 fprintf (stream, "\n");
8409 #endif
8411 ASM_OUTPUT_LABEL (stream, name);
8412 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8415 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8416 use in fmpysub instructions. */
8418 fmpysuboperands (rtx *operands)
8420 enum machine_mode mode = GET_MODE (operands[0]);
8422 /* Must be a floating point mode. */
8423 if (mode != SFmode && mode != DFmode)
8424 return 0;
8426 /* All modes must be the same. */
8427 if (! (mode == GET_MODE (operands[1])
8428 && mode == GET_MODE (operands[2])
8429 && mode == GET_MODE (operands[3])
8430 && mode == GET_MODE (operands[4])
8431 && mode == GET_MODE (operands[5])))
8432 return 0;
8434 /* All operands must be registers. */
8435 if (! (GET_CODE (operands[1]) == REG
8436 && GET_CODE (operands[2]) == REG
8437 && GET_CODE (operands[3]) == REG
8438 && GET_CODE (operands[4]) == REG
8439 && GET_CODE (operands[5]) == REG))
8440 return 0;
8442 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8443 operation, so operands[4] must be the same as operand[3]. */
8444 if (! rtx_equal_p (operands[3], operands[4]))
8445 return 0;
8447 /* multiply cannot feed into subtraction. */
8448 if (rtx_equal_p (operands[5], operands[0]))
8449 return 0;
8451 /* Inout operand of sub cannot conflict with any operands from multiply. */
8452 if (rtx_equal_p (operands[3], operands[0])
8453 || rtx_equal_p (operands[3], operands[1])
8454 || rtx_equal_p (operands[3], operands[2]))
8455 return 0;
8457 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8458 if (mode == SFmode
8459 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8460 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8461 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8462 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8463 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8464 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8465 return 0;
8467 /* Passed. Operands are suitable for fmpysub. */
8468 return 1;
8471 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8472 constants for shadd instructions. */
8474 shadd_constant_p (int val)
8476 if (val == 2 || val == 4 || val == 8)
8477 return 1;
8478 else
8479 return 0;
8482 /* Return 1 if OP is valid as a base or index register in a
8483 REG+REG address. */
8486 borx_reg_operand (rtx op, enum machine_mode mode)
8488 if (GET_CODE (op) != REG)
8489 return 0;
8491 /* We must reject virtual registers as the only expressions that
8492 can be instantiated are REG and REG+CONST. */
8493 if (op == virtual_incoming_args_rtx
8494 || op == virtual_stack_vars_rtx
8495 || op == virtual_stack_dynamic_rtx
8496 || op == virtual_outgoing_args_rtx
8497 || op == virtual_cfa_rtx)
8498 return 0;
8500 /* While it's always safe to index off the frame pointer, it's not
8501 profitable to do so when the frame pointer is being eliminated. */
8502 if (!reload_completed
8503 && flag_omit_frame_pointer
8504 && !cfun->calls_alloca
8505 && op == frame_pointer_rtx)
8506 return 0;
8508 return register_operand (op, mode);
8511 /* Return 1 if this operand is anything other than a hard register. */
8514 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8516 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8519 /* Return 1 if INSN branches forward. Should be using insn_addresses
8520 to avoid walking through all the insns... */
8521 static int
8522 forward_branch_p (rtx insn)
8524 rtx label = JUMP_LABEL (insn);
8526 while (insn)
8528 if (insn == label)
8529 break;
8530 else
8531 insn = NEXT_INSN (insn);
8534 return (insn == label);
8537 /* Return 1 if OP is an equality comparison, else return 0. */
8539 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8541 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8544 /* Return 1 if INSN is in the delay slot of a call instruction. */
8546 jump_in_call_delay (rtx insn)
8549 if (GET_CODE (insn) != JUMP_INSN)
8550 return 0;
8552 if (PREV_INSN (insn)
8553 && PREV_INSN (PREV_INSN (insn))
8554 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8556 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8558 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8559 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8562 else
8563 return 0;
8566 /* Output an unconditional move and branch insn. */
8568 const char *
8569 output_parallel_movb (rtx *operands, rtx insn)
8571 int length = get_attr_length (insn);
8573 /* These are the cases in which we win. */
8574 if (length == 4)
8575 return "mov%I1b,tr %1,%0,%2";
8577 /* None of the following cases win, but they don't lose either. */
8578 if (length == 8)
8580 if (dbr_sequence_length () == 0)
8582 /* Nothing in the delay slot, fake it by putting the combined
8583 insn (the copy or add) in the delay slot of a bl. */
8584 if (GET_CODE (operands[1]) == CONST_INT)
8585 return "b %2\n\tldi %1,%0";
8586 else
8587 return "b %2\n\tcopy %1,%0";
8589 else
8591 /* Something in the delay slot, but we've got a long branch. */
8592 if (GET_CODE (operands[1]) == CONST_INT)
8593 return "ldi %1,%0\n\tb %2";
8594 else
8595 return "copy %1,%0\n\tb %2";
8599 if (GET_CODE (operands[1]) == CONST_INT)
8600 output_asm_insn ("ldi %1,%0", operands);
8601 else
8602 output_asm_insn ("copy %1,%0", operands);
8603 return output_lbranch (operands[2], insn, 1);
8606 /* Output an unconditional add and branch insn. */
8608 const char *
8609 output_parallel_addb (rtx *operands, rtx insn)
8611 int length = get_attr_length (insn);
8613 /* To make life easy we want operand0 to be the shared input/output
8614 operand and operand1 to be the readonly operand. */
8615 if (operands[0] == operands[1])
8616 operands[1] = operands[2];
8618 /* These are the cases in which we win. */
8619 if (length == 4)
8620 return "add%I1b,tr %1,%0,%3";
8622 /* None of the following cases win, but they don't lose either. */
8623 if (length == 8)
8625 if (dbr_sequence_length () == 0)
8626 /* Nothing in the delay slot, fake it by putting the combined
8627 insn (the copy or add) in the delay slot of a bl. */
8628 return "b %3\n\tadd%I1 %1,%0,%0";
8629 else
8630 /* Something in the delay slot, but we've got a long branch. */
8631 return "add%I1 %1,%0,%0\n\tb %3";
8634 output_asm_insn ("add%I1 %1,%0,%0", operands);
8635 return output_lbranch (operands[3], insn, 1);
8638 /* Return nonzero if INSN (a jump insn) immediately follows a call
8639 to a named function. This is used to avoid filling the delay slot
8640 of the jump since it can usually be eliminated by modifying RP in
8641 the delay slot of the call. */
8644 following_call (rtx insn)
8646 if (! TARGET_JUMP_IN_DELAY)
8647 return 0;
8649 /* Find the previous real insn, skipping NOTEs. */
8650 insn = PREV_INSN (insn);
8651 while (insn && GET_CODE (insn) == NOTE)
8652 insn = PREV_INSN (insn);
8654 /* Check for CALL_INSNs and millicode calls. */
8655 if (insn
8656 && ((GET_CODE (insn) == CALL_INSN
8657 && get_attr_type (insn) != TYPE_DYNCALL)
8658 || (GET_CODE (insn) == INSN
8659 && GET_CODE (PATTERN (insn)) != SEQUENCE
8660 && GET_CODE (PATTERN (insn)) != USE
8661 && GET_CODE (PATTERN (insn)) != CLOBBER
8662 && get_attr_type (insn) == TYPE_MILLI)))
8663 return 1;
8665 return 0;
8668 /* We use this hook to perform a PA specific optimization which is difficult
8669 to do in earlier passes.
8671 We want the delay slots of branches within jump tables to be filled.
8672 None of the compiler passes at the moment even has the notion that a
8673 PA jump table doesn't contain addresses, but instead contains actual
8674 instructions!
8676 Because we actually jump into the table, the addresses of each entry
8677 must stay constant in relation to the beginning of the table (which
8678 itself must stay constant relative to the instruction to jump into
8679 it). I don't believe we can guarantee earlier passes of the compiler
8680 will adhere to those rules.
8682 So, late in the compilation process we find all the jump tables, and
8683 expand them into real code -- e.g. each entry in the jump table vector
8684 will get an appropriate label followed by a jump to the final target.
8686 Reorg and the final jump pass can then optimize these branches and
8687 fill their delay slots. We end up with smaller, more efficient code.
8689 The jump instructions within the table are special; we must be able
8690 to identify them during assembly output (if the jumps don't get filled
8691 we need to emit a nop rather than nullifying the delay slot)). We
8692 identify jumps in switch tables by using insns with the attribute
8693 type TYPE_BTABLE_BRANCH.
8695 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8696 insns. This serves two purposes, first it prevents jump.c from
8697 noticing that the last N entries in the table jump to the instruction
8698 immediately after the table and deleting the jumps. Second, those
8699 insns mark where we should emit .begin_brtab and .end_brtab directives
8700 when using GAS (allows for better link time optimizations). */
8702 static void
8703 pa_reorg (void)
8705 rtx insn;
8707 remove_useless_addtr_insns (1);
8709 if (pa_cpu < PROCESSOR_8000)
8710 pa_combine_instructions ();
8713 /* This is fairly cheap, so always run it if optimizing. */
8714 if (optimize > 0 && !TARGET_BIG_SWITCH)
8716 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8717 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8719 rtx pattern, tmp, location, label;
8720 unsigned int length, i;
8722 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8723 if (GET_CODE (insn) != JUMP_INSN
8724 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8725 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8726 continue;
8728 /* Emit marker for the beginning of the branch table. */
8729 emit_insn_before (gen_begin_brtab (), insn);
8731 pattern = PATTERN (insn);
8732 location = PREV_INSN (insn);
8733 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8735 for (i = 0; i < length; i++)
8737 /* Emit a label before each jump to keep jump.c from
8738 removing this code. */
8739 tmp = gen_label_rtx ();
8740 LABEL_NUSES (tmp) = 1;
8741 emit_label_after (tmp, location);
8742 location = NEXT_INSN (location);
8744 if (GET_CODE (pattern) == ADDR_VEC)
8745 label = XEXP (XVECEXP (pattern, 0, i), 0);
8746 else
8747 label = XEXP (XVECEXP (pattern, 1, i), 0);
8749 tmp = gen_short_jump (label);
8751 /* Emit the jump itself. */
8752 tmp = emit_jump_insn_after (tmp, location);
8753 JUMP_LABEL (tmp) = label;
8754 LABEL_NUSES (label)++;
8755 location = NEXT_INSN (location);
8757 /* Emit a BARRIER after the jump. */
8758 emit_barrier_after (location);
8759 location = NEXT_INSN (location);
8762 /* Emit marker for the end of the branch table. */
8763 emit_insn_before (gen_end_brtab (), location);
8764 location = NEXT_INSN (location);
8765 emit_barrier_after (location);
8767 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8768 delete_insn (insn);
8771 else
8773 /* Still need brtab marker insns. FIXME: the presence of these
8774 markers disables output of the branch table to readonly memory,
8775 and any alignment directives that might be needed. Possibly,
8776 the begin_brtab insn should be output before the label for the
8777 table. This doesn't matter at the moment since the tables are
8778 always output in the text section. */
8779 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8781 /* Find an ADDR_VEC insn. */
8782 if (GET_CODE (insn) != JUMP_INSN
8783 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8784 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8785 continue;
8787 /* Now generate markers for the beginning and end of the
8788 branch table. */
8789 emit_insn_before (gen_begin_brtab (), insn);
8790 emit_insn_after (gen_end_brtab (), insn);
8795 /* The PA has a number of odd instructions which can perform multiple
8796 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8797 it may be profitable to combine two instructions into one instruction
8798 with two outputs. It's not profitable PA2.0 machines because the
8799 two outputs would take two slots in the reorder buffers.
8801 This routine finds instructions which can be combined and combines
8802 them. We only support some of the potential combinations, and we
8803 only try common ways to find suitable instructions.
8805 * addb can add two registers or a register and a small integer
8806 and jump to a nearby (+-8k) location. Normally the jump to the
8807 nearby location is conditional on the result of the add, but by
8808 using the "true" condition we can make the jump unconditional.
8809 Thus addb can perform two independent operations in one insn.
8811 * movb is similar to addb in that it can perform a reg->reg
8812 or small immediate->reg copy and jump to a nearby (+-8k location).
8814 * fmpyadd and fmpysub can perform a FP multiply and either an
8815 FP add or FP sub if the operands of the multiply and add/sub are
8816 independent (there are other minor restrictions). Note both
8817 the fmpy and fadd/fsub can in theory move to better spots according
8818 to data dependencies, but for now we require the fmpy stay at a
8819 fixed location.
8821 * Many of the memory operations can perform pre & post updates
8822 of index registers. GCC's pre/post increment/decrement addressing
8823 is far too simple to take advantage of all the possibilities. This
8824 pass may not be suitable since those insns may not be independent.
8826 * comclr can compare two ints or an int and a register, nullify
8827 the following instruction and zero some other register. This
8828 is more difficult to use as it's harder to find an insn which
8829 will generate a comclr than finding something like an unconditional
8830 branch. (conditional moves & long branches create comclr insns).
8832 * Most arithmetic operations can conditionally skip the next
8833 instruction. They can be viewed as "perform this operation
8834 and conditionally jump to this nearby location" (where nearby
8835 is an insns away). These are difficult to use due to the
8836 branch length restrictions. */
8838 static void
8839 pa_combine_instructions (void)
8841 rtx anchor, new_rtx;
8843 /* This can get expensive since the basic algorithm is on the
8844 order of O(n^2) (or worse). Only do it for -O2 or higher
8845 levels of optimization. */
8846 if (optimize < 2)
8847 return;
8849 /* Walk down the list of insns looking for "anchor" insns which
8850 may be combined with "floating" insns. As the name implies,
8851 "anchor" instructions don't move, while "floating" insns may
8852 move around. */
8853 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8854 new_rtx = make_insn_raw (new_rtx);
8856 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8858 enum attr_pa_combine_type anchor_attr;
8859 enum attr_pa_combine_type floater_attr;
8861 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8862 Also ignore any special USE insns. */
8863 if ((GET_CODE (anchor) != INSN
8864 && GET_CODE (anchor) != JUMP_INSN
8865 && GET_CODE (anchor) != CALL_INSN)
8866 || GET_CODE (PATTERN (anchor)) == USE
8867 || GET_CODE (PATTERN (anchor)) == CLOBBER
8868 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8869 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8870 continue;
8872 anchor_attr = get_attr_pa_combine_type (anchor);
8873 /* See if anchor is an insn suitable for combination. */
8874 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8875 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8876 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8877 && ! forward_branch_p (anchor)))
8879 rtx floater;
8881 for (floater = PREV_INSN (anchor);
8882 floater;
8883 floater = PREV_INSN (floater))
8885 if (GET_CODE (floater) == NOTE
8886 || (GET_CODE (floater) == INSN
8887 && (GET_CODE (PATTERN (floater)) == USE
8888 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8889 continue;
8891 /* Anything except a regular INSN will stop our search. */
8892 if (GET_CODE (floater) != INSN
8893 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8894 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8896 floater = NULL_RTX;
8897 break;
8900 /* See if FLOATER is suitable for combination with the
8901 anchor. */
8902 floater_attr = get_attr_pa_combine_type (floater);
8903 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8904 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8905 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8906 && floater_attr == PA_COMBINE_TYPE_FMPY))
8908 /* If ANCHOR and FLOATER can be combined, then we're
8909 done with this pass. */
8910 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8911 SET_DEST (PATTERN (floater)),
8912 XEXP (SET_SRC (PATTERN (floater)), 0),
8913 XEXP (SET_SRC (PATTERN (floater)), 1)))
8914 break;
8917 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8918 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8920 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8922 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8923 SET_DEST (PATTERN (floater)),
8924 XEXP (SET_SRC (PATTERN (floater)), 0),
8925 XEXP (SET_SRC (PATTERN (floater)), 1)))
8926 break;
8928 else
8930 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8931 SET_DEST (PATTERN (floater)),
8932 SET_SRC (PATTERN (floater)),
8933 SET_SRC (PATTERN (floater))))
8934 break;
8939 /* If we didn't find anything on the backwards scan try forwards. */
8940 if (!floater
8941 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8942 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8944 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8946 if (GET_CODE (floater) == NOTE
8947 || (GET_CODE (floater) == INSN
8948 && (GET_CODE (PATTERN (floater)) == USE
8949 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8951 continue;
8953 /* Anything except a regular INSN will stop our search. */
8954 if (GET_CODE (floater) != INSN
8955 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8956 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8958 floater = NULL_RTX;
8959 break;
8962 /* See if FLOATER is suitable for combination with the
8963 anchor. */
8964 floater_attr = get_attr_pa_combine_type (floater);
8965 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8966 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8967 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8968 && floater_attr == PA_COMBINE_TYPE_FMPY))
8970 /* If ANCHOR and FLOATER can be combined, then we're
8971 done with this pass. */
8972 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
8973 SET_DEST (PATTERN (floater)),
8974 XEXP (SET_SRC (PATTERN (floater)),
8976 XEXP (SET_SRC (PATTERN (floater)),
8977 1)))
8978 break;
8983 /* FLOATER will be nonzero if we found a suitable floating
8984 insn for combination with ANCHOR. */
8985 if (floater
8986 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8987 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8989 /* Emit the new instruction and delete the old anchor. */
8990 emit_insn_before (gen_rtx_PARALLEL
8991 (VOIDmode,
8992 gen_rtvec (2, PATTERN (anchor),
8993 PATTERN (floater))),
8994 anchor);
8996 SET_INSN_DELETED (anchor);
8998 /* Emit a special USE insn for FLOATER, then delete
8999 the floating insn. */
9000 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9001 delete_insn (floater);
9003 continue;
9005 else if (floater
9006 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9008 rtx temp;
9009 /* Emit the new_jump instruction and delete the old anchor. */
9010 temp
9011 = emit_jump_insn_before (gen_rtx_PARALLEL
9012 (VOIDmode,
9013 gen_rtvec (2, PATTERN (anchor),
9014 PATTERN (floater))),
9015 anchor);
9017 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9018 SET_INSN_DELETED (anchor);
9020 /* Emit a special USE insn for FLOATER, then delete
9021 the floating insn. */
9022 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9023 delete_insn (floater);
9024 continue;
9030 static int
9031 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9032 rtx src1, rtx src2)
9034 int insn_code_number;
9035 rtx start, end;
9037 /* Create a PARALLEL with the patterns of ANCHOR and
9038 FLOATER, try to recognize it, then test constraints
9039 for the resulting pattern.
9041 If the pattern doesn't match or the constraints
9042 aren't met keep searching for a suitable floater
9043 insn. */
9044 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9045 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9046 INSN_CODE (new_rtx) = -1;
9047 insn_code_number = recog_memoized (new_rtx);
9048 if (insn_code_number < 0
9049 || (extract_insn (new_rtx), ! constrain_operands (1)))
9050 return 0;
9052 if (reversed)
9054 start = anchor;
9055 end = floater;
9057 else
9059 start = floater;
9060 end = anchor;
9063 /* There's up to three operands to consider. One
9064 output and two inputs.
9066 The output must not be used between FLOATER & ANCHOR
9067 exclusive. The inputs must not be set between
9068 FLOATER and ANCHOR exclusive. */
9070 if (reg_used_between_p (dest, start, end))
9071 return 0;
9073 if (reg_set_between_p (src1, start, end))
9074 return 0;
9076 if (reg_set_between_p (src2, start, end))
9077 return 0;
9079 /* If we get here, then everything is good. */
9080 return 1;
9083 /* Return nonzero if references for INSN are delayed.
9085 Millicode insns are actually function calls with some special
9086 constraints on arguments and register usage.
9088 Millicode calls always expect their arguments in the integer argument
9089 registers, and always return their result in %r29 (ret1). They
9090 are expected to clobber their arguments, %r1, %r29, and the return
9091 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9093 This function tells reorg that the references to arguments and
9094 millicode calls do not appear to happen until after the millicode call.
9095 This allows reorg to put insns which set the argument registers into the
9096 delay slot of the millicode call -- thus they act more like traditional
9097 CALL_INSNs.
9099 Note we cannot consider side effects of the insn to be delayed because
9100 the branch and link insn will clobber the return pointer. If we happened
9101 to use the return pointer in the delay slot of the call, then we lose.
9103 get_attr_type will try to recognize the given insn, so make sure to
9104 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9105 in particular. */
9107 insn_refs_are_delayed (rtx insn)
9109 return ((GET_CODE (insn) == INSN
9110 && GET_CODE (PATTERN (insn)) != SEQUENCE
9111 && GET_CODE (PATTERN (insn)) != USE
9112 && GET_CODE (PATTERN (insn)) != CLOBBER
9113 && get_attr_type (insn) == TYPE_MILLI));
9116 /* On the HP-PA the value is found in register(s) 28(-29), unless
9117 the mode is SF or DF. Then the value is returned in fr4 (32).
9119 This must perform the same promotions as PROMOTE_MODE, else
9120 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
9122 Small structures must be returned in a PARALLEL on PA64 in order
9123 to match the HP Compiler ABI. */
9126 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
9128 enum machine_mode valmode;
9130 if (AGGREGATE_TYPE_P (valtype)
9131 || TREE_CODE (valtype) == COMPLEX_TYPE
9132 || TREE_CODE (valtype) == VECTOR_TYPE)
9134 if (TARGET_64BIT)
9136 /* Aggregates with a size less than or equal to 128 bits are
9137 returned in GR 28(-29). They are left justified. The pad
9138 bits are undefined. Larger aggregates are returned in
9139 memory. */
9140 rtx loc[2];
9141 int i, offset = 0;
9142 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9144 for (i = 0; i < ub; i++)
9146 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9147 gen_rtx_REG (DImode, 28 + i),
9148 GEN_INT (offset));
9149 offset += 8;
9152 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9154 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9156 /* Aggregates 5 to 8 bytes in size are returned in general
9157 registers r28-r29 in the same manner as other non
9158 floating-point objects. The data is right-justified and
9159 zero-extended to 64 bits. This is opposite to the normal
9160 justification used on big endian targets and requires
9161 special treatment. */
9162 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9163 gen_rtx_REG (DImode, 28), const0_rtx);
9164 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9168 if ((INTEGRAL_TYPE_P (valtype)
9169 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9170 || POINTER_TYPE_P (valtype))
9171 valmode = word_mode;
9172 else
9173 valmode = TYPE_MODE (valtype);
9175 if (TREE_CODE (valtype) == REAL_TYPE
9176 && !AGGREGATE_TYPE_P (valtype)
9177 && TYPE_MODE (valtype) != TFmode
9178 && !TARGET_SOFT_FLOAT)
9179 return gen_rtx_REG (valmode, 32);
9181 return gen_rtx_REG (valmode, 28);
9184 /* Return the location of a parameter that is passed in a register or NULL
9185 if the parameter has any component that is passed in memory.
9187 This is new code and will be pushed to into the net sources after
9188 further testing.
9190 ??? We might want to restructure this so that it looks more like other
9191 ports. */
9193 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9194 int named ATTRIBUTE_UNUSED)
9196 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9197 int alignment = 0;
9198 int arg_size;
9199 int fpr_reg_base;
9200 int gpr_reg_base;
9201 rtx retval;
9203 if (mode == VOIDmode)
9204 return NULL_RTX;
9206 arg_size = FUNCTION_ARG_SIZE (mode, type);
9208 /* If this arg would be passed partially or totally on the stack, then
9209 this routine should return zero. pa_arg_partial_bytes will
9210 handle arguments which are split between regs and stack slots if
9211 the ABI mandates split arguments. */
9212 if (!TARGET_64BIT)
9214 /* The 32-bit ABI does not split arguments. */
9215 if (cum->words + arg_size > max_arg_words)
9216 return NULL_RTX;
9218 else
9220 if (arg_size > 1)
9221 alignment = cum->words & 1;
9222 if (cum->words + alignment >= max_arg_words)
9223 return NULL_RTX;
9226 /* The 32bit ABIs and the 64bit ABIs are rather different,
9227 particularly in their handling of FP registers. We might
9228 be able to cleverly share code between them, but I'm not
9229 going to bother in the hope that splitting them up results
9230 in code that is more easily understood. */
9232 if (TARGET_64BIT)
9234 /* Advance the base registers to their current locations.
9236 Remember, gprs grow towards smaller register numbers while
9237 fprs grow to higher register numbers. Also remember that
9238 although FP regs are 32-bit addressable, we pretend that
9239 the registers are 64-bits wide. */
9240 gpr_reg_base = 26 - cum->words;
9241 fpr_reg_base = 32 + cum->words;
9243 /* Arguments wider than one word and small aggregates need special
9244 treatment. */
9245 if (arg_size > 1
9246 || mode == BLKmode
9247 || (type && (AGGREGATE_TYPE_P (type)
9248 || TREE_CODE (type) == COMPLEX_TYPE
9249 || TREE_CODE (type) == VECTOR_TYPE)))
9251 /* Double-extended precision (80-bit), quad-precision (128-bit)
9252 and aggregates including complex numbers are aligned on
9253 128-bit boundaries. The first eight 64-bit argument slots
9254 are associated one-to-one, with general registers r26
9255 through r19, and also with floating-point registers fr4
9256 through fr11. Arguments larger than one word are always
9257 passed in general registers.
9259 Using a PARALLEL with a word mode register results in left
9260 justified data on a big-endian target. */
9262 rtx loc[8];
9263 int i, offset = 0, ub = arg_size;
9265 /* Align the base register. */
9266 gpr_reg_base -= alignment;
9268 ub = MIN (ub, max_arg_words - cum->words - alignment);
9269 for (i = 0; i < ub; i++)
9271 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9272 gen_rtx_REG (DImode, gpr_reg_base),
9273 GEN_INT (offset));
9274 gpr_reg_base -= 1;
9275 offset += 8;
9278 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9281 else
9283 /* If the argument is larger than a word, then we know precisely
9284 which registers we must use. */
9285 if (arg_size > 1)
9287 if (cum->words)
9289 gpr_reg_base = 23;
9290 fpr_reg_base = 38;
9292 else
9294 gpr_reg_base = 25;
9295 fpr_reg_base = 34;
9298 /* Structures 5 to 8 bytes in size are passed in the general
9299 registers in the same manner as other non floating-point
9300 objects. The data is right-justified and zero-extended
9301 to 64 bits. This is opposite to the normal justification
9302 used on big endian targets and requires special treatment.
9303 We now define BLOCK_REG_PADDING to pad these objects.
9304 Aggregates, complex and vector types are passed in the same
9305 manner as structures. */
9306 if (mode == BLKmode
9307 || (type && (AGGREGATE_TYPE_P (type)
9308 || TREE_CODE (type) == COMPLEX_TYPE
9309 || TREE_CODE (type) == VECTOR_TYPE)))
9311 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9312 gen_rtx_REG (DImode, gpr_reg_base),
9313 const0_rtx);
9314 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9317 else
9319 /* We have a single word (32 bits). A simple computation
9320 will get us the register #s we need. */
9321 gpr_reg_base = 26 - cum->words;
9322 fpr_reg_base = 32 + 2 * cum->words;
9326 /* Determine if the argument needs to be passed in both general and
9327 floating point registers. */
9328 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9329 /* If we are doing soft-float with portable runtime, then there
9330 is no need to worry about FP regs. */
9331 && !TARGET_SOFT_FLOAT
9332 /* The parameter must be some kind of scalar float, else we just
9333 pass it in integer registers. */
9334 && GET_MODE_CLASS (mode) == MODE_FLOAT
9335 /* The target function must not have a prototype. */
9336 && cum->nargs_prototype <= 0
9337 /* libcalls do not need to pass items in both FP and general
9338 registers. */
9339 && type != NULL_TREE
9340 /* All this hair applies to "outgoing" args only. This includes
9341 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9342 && !cum->incoming)
9343 /* Also pass outgoing floating arguments in both registers in indirect
9344 calls with the 32 bit ABI and the HP assembler since there is no
9345 way to the specify argument locations in static functions. */
9346 || (!TARGET_64BIT
9347 && !TARGET_GAS
9348 && !cum->incoming
9349 && cum->indirect
9350 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9352 retval
9353 = gen_rtx_PARALLEL
9354 (mode,
9355 gen_rtvec (2,
9356 gen_rtx_EXPR_LIST (VOIDmode,
9357 gen_rtx_REG (mode, fpr_reg_base),
9358 const0_rtx),
9359 gen_rtx_EXPR_LIST (VOIDmode,
9360 gen_rtx_REG (mode, gpr_reg_base),
9361 const0_rtx)));
9363 else
9365 /* See if we should pass this parameter in a general register. */
9366 if (TARGET_SOFT_FLOAT
9367 /* Indirect calls in the normal 32bit ABI require all arguments
9368 to be passed in general registers. */
9369 || (!TARGET_PORTABLE_RUNTIME
9370 && !TARGET_64BIT
9371 && !TARGET_ELF32
9372 && cum->indirect)
9373 /* If the parameter is not a scalar floating-point parameter,
9374 then it belongs in GPRs. */
9375 || GET_MODE_CLASS (mode) != MODE_FLOAT
9376 /* Structure with single SFmode field belongs in GPR. */
9377 || (type && AGGREGATE_TYPE_P (type)))
9378 retval = gen_rtx_REG (mode, gpr_reg_base);
9379 else
9380 retval = gen_rtx_REG (mode, fpr_reg_base);
9382 return retval;
9386 /* If this arg would be passed totally in registers or totally on the stack,
9387 then this routine should return zero. */
9389 static int
9390 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9391 tree type, bool named ATTRIBUTE_UNUSED)
9393 unsigned int max_arg_words = 8;
9394 unsigned int offset = 0;
9396 if (!TARGET_64BIT)
9397 return 0;
9399 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9400 offset = 1;
9402 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9403 /* Arg fits fully into registers. */
9404 return 0;
9405 else if (cum->words + offset >= max_arg_words)
9406 /* Arg fully on the stack. */
9407 return 0;
9408 else
9409 /* Arg is split. */
9410 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9414 /* A get_unnamed_section callback for switching to the text section.
9416 This function is only used with SOM. Because we don't support
9417 named subspaces, we can only create a new subspace or switch back
9418 to the default text subspace. */
9420 static void
9421 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9423 gcc_assert (TARGET_SOM);
9424 if (TARGET_GAS)
9426 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9428 /* We only want to emit a .nsubspa directive once at the
9429 start of the function. */
9430 cfun->machine->in_nsubspa = 1;
9432 /* Create a new subspace for the text. This provides
9433 better stub placement and one-only functions. */
9434 if (cfun->decl
9435 && DECL_ONE_ONLY (cfun->decl)
9436 && !DECL_WEAK (cfun->decl))
9438 output_section_asm_op ("\t.SPACE $TEXT$\n"
9439 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9440 "ACCESS=44,SORT=24,COMDAT");
9441 return;
9444 else
9446 /* There isn't a current function or the body of the current
9447 function has been completed. So, we are changing to the
9448 text section to output debugging information. Thus, we
9449 need to forget that we are in the text section so that
9450 varasm.c will call us when text_section is selected again. */
9451 gcc_assert (!cfun || !cfun->machine
9452 || cfun->machine->in_nsubspa == 2);
9453 in_section = NULL;
9455 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9456 return;
9458 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9461 /* A get_unnamed_section callback for switching to comdat data
9462 sections. This function is only used with SOM. */
9464 static void
9465 som_output_comdat_data_section_asm_op (const void *data)
9467 in_section = NULL;
9468 output_section_asm_op (data);
9471 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9473 static void
9474 pa_som_asm_init_sections (void)
9476 text_section
9477 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9479 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9480 is not being generated. */
9481 som_readonly_data_section
9482 = get_unnamed_section (0, output_section_asm_op,
9483 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9485 /* When secondary definitions are not supported, SOM makes readonly
9486 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9487 the comdat flag. */
9488 som_one_only_readonly_data_section
9489 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9490 "\t.SPACE $TEXT$\n"
9491 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9492 "ACCESS=0x2c,SORT=16,COMDAT");
9495 /* When secondary definitions are not supported, SOM makes data one-only
9496 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9497 som_one_only_data_section
9498 = get_unnamed_section (SECTION_WRITE,
9499 som_output_comdat_data_section_asm_op,
9500 "\t.SPACE $PRIVATE$\n"
9501 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9502 "ACCESS=31,SORT=24,COMDAT");
9504 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9505 which reference data within the $TEXT$ space (for example constant
9506 strings in the $LIT$ subspace).
9508 The assemblers (GAS and HP as) both have problems with handling
9509 the difference of two symbols which is the other correct way to
9510 reference constant data during PIC code generation.
9512 So, there's no way to reference constant data which is in the
9513 $TEXT$ space during PIC generation. Instead place all constant
9514 data into the $PRIVATE$ subspace (this reduces sharing, but it
9515 works correctly). */
9516 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9518 /* We must not have a reference to an external symbol defined in a
9519 shared library in a readonly section, else the SOM linker will
9520 complain.
9522 So, we force exception information into the data section. */
9523 exception_section = data_section;
9526 /* On hpux10, the linker will give an error if we have a reference
9527 in the read-only data section to a symbol defined in a shared
9528 library. Therefore, expressions that might require a reloc can
9529 not be placed in the read-only data section. */
9531 static section *
9532 pa_select_section (tree exp, int reloc,
9533 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9535 if (TREE_CODE (exp) == VAR_DECL
9536 && TREE_READONLY (exp)
9537 && !TREE_THIS_VOLATILE (exp)
9538 && DECL_INITIAL (exp)
9539 && (DECL_INITIAL (exp) == error_mark_node
9540 || TREE_CONSTANT (DECL_INITIAL (exp)))
9541 && !reloc)
9543 if (TARGET_SOM
9544 && DECL_ONE_ONLY (exp)
9545 && !DECL_WEAK (exp))
9546 return som_one_only_readonly_data_section;
9547 else
9548 return readonly_data_section;
9550 else if (CONSTANT_CLASS_P (exp) && !reloc)
9551 return readonly_data_section;
9552 else if (TARGET_SOM
9553 && TREE_CODE (exp) == VAR_DECL
9554 && DECL_ONE_ONLY (exp)
9555 && !DECL_WEAK (exp))
9556 return som_one_only_data_section;
9557 else
9558 return data_section;
9561 static void
9562 pa_globalize_label (FILE *stream, const char *name)
9564 /* We only handle DATA objects here, functions are globalized in
9565 ASM_DECLARE_FUNCTION_NAME. */
9566 if (! FUNCTION_NAME_P (name))
9568 fputs ("\t.EXPORT ", stream);
9569 assemble_name (stream, name);
9570 fputs (",DATA\n", stream);
9574 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9576 static rtx
9577 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9578 int incoming ATTRIBUTE_UNUSED)
9580 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9583 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9585 bool
9586 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9588 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9589 PA64 ABI says that objects larger than 128 bits are returned in memory.
9590 Note, int_size_in_bytes can return -1 if the size of the object is
9591 variable or larger than the maximum value that can be expressed as
9592 a HOST_WIDE_INT. It can also return zero for an empty type. The
9593 simplest way to handle variable and empty types is to pass them in
9594 memory. This avoids problems in defining the boundaries of argument
9595 slots, allocating registers, etc. */
9596 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9597 || int_size_in_bytes (type) <= 0);
9600 /* Structure to hold declaration and name of external symbols that are
9601 emitted by GCC. We generate a vector of these symbols and output them
9602 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9603 This avoids putting out names that are never really used. */
9605 typedef struct extern_symbol GTY(())
9607 tree decl;
9608 const char *name;
9609 } extern_symbol;
9611 /* Define gc'd vector type for extern_symbol. */
9612 DEF_VEC_O(extern_symbol);
9613 DEF_VEC_ALLOC_O(extern_symbol,gc);
9615 /* Vector of extern_symbol pointers. */
9616 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9618 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9619 /* Mark DECL (name NAME) as an external reference (assembler output
9620 file FILE). This saves the names to output at the end of the file
9621 if actually referenced. */
9623 void
9624 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9626 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9628 gcc_assert (file == asm_out_file);
9629 p->decl = decl;
9630 p->name = name;
9633 /* Output text required at the end of an assembler file.
9634 This includes deferred plabels and .import directives for
9635 all external symbols that were actually referenced. */
9637 static void
9638 pa_hpux_file_end (void)
9640 unsigned int i;
9641 extern_symbol *p;
9643 if (!NO_DEFERRED_PROFILE_COUNTERS)
9644 output_deferred_profile_counters ();
9646 output_deferred_plabels ();
9648 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9650 tree decl = p->decl;
9652 if (!TREE_ASM_WRITTEN (decl)
9653 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9654 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9657 VEC_free (extern_symbol, gc, extern_symbols);
9659 #endif
9661 /* Return true if a change from mode FROM to mode TO for a register
9662 in register class RCLASS is invalid. */
9664 bool
9665 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9666 enum reg_class rclass)
9668 if (from == to)
9669 return false;
9671 /* Reject changes to/from complex and vector modes. */
9672 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9673 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9674 return true;
9676 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9677 return false;
9679 /* There is no way to load QImode or HImode values directly from
9680 memory. SImode loads to the FP registers are not zero extended.
9681 On the 64-bit target, this conflicts with the definition of
9682 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9683 with different sizes in the floating-point registers. */
9684 if (MAYBE_FP_REG_CLASS_P (rclass))
9685 return true;
9687 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9688 in specific sets of registers. Thus, we cannot allow changing
9689 to a larger mode when it's larger than a word. */
9690 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9691 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9692 return true;
9694 return false;
9697 /* Returns TRUE if it is a good idea to tie two pseudo registers
9698 when one has mode MODE1 and one has mode MODE2.
9699 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9700 for any hard reg, then this must be FALSE for correct output.
9702 We should return FALSE for QImode and HImode because these modes
9703 are not ok in the floating-point registers. However, this prevents
9704 tieing these modes to SImode and DImode in the general registers.
9705 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9706 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9707 in the floating-point registers. */
9709 bool
9710 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9712 /* Don't tie modes in different classes. */
9713 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9714 return false;
9716 return true;
9719 #include "gt-pa.h"