2003-12-26 Guilhem Lavaux <guilhem@kaffe.org>
[official-gcc.git] / gcc / config / pa / pa.c
blob6cc3deb57e015d4858d1bb315d41b81bd65523c4
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "obstack.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "recog.h"
47 #include "predict.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
52 static int hppa_use_dfa_pipeline_interface (void);
54 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
55 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE hppa_use_dfa_pipeline_interface
57 static int
58 hppa_use_dfa_pipeline_interface (void)
60 return 1;
63 /* Return nonzero if there is a bypass for the output of
64 OUT_INSN and the fp store IN_INSN. */
65 int
66 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
68 enum machine_mode store_mode;
69 enum machine_mode other_mode;
70 rtx set;
72 if (recog_memoized (in_insn) < 0
73 || get_attr_type (in_insn) != TYPE_FPSTORE
74 || recog_memoized (out_insn) < 0)
75 return 0;
77 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
79 set = single_set (out_insn);
80 if (!set)
81 return 0;
83 other_mode = GET_MODE (SET_SRC (set));
85 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
89 #ifndef DO_FRAME_NOTES
90 #ifdef INCOMING_RETURN_ADDR_RTX
91 #define DO_FRAME_NOTES 1
92 #else
93 #define DO_FRAME_NOTES 0
94 #endif
95 #endif
97 static void copy_reg_pointer (rtx, rtx);
98 static int hppa_address_cost (rtx);
99 static bool hppa_rtx_costs (rtx, int, int, int *);
100 static inline rtx force_mode (enum machine_mode, rtx);
101 static void pa_reorg (void);
102 static void pa_combine_instructions (void);
103 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
104 static int forward_branch_p (rtx);
105 static int shadd_constant_p (int);
106 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static int compute_movstr_length (rtx);
108 static int compute_clrstr_length (rtx);
109 static bool pa_assemble_integer (rtx, unsigned int, int);
110 static void remove_useless_addtr_insns (int);
111 static void store_reg (int, int, int);
112 static void store_reg_modify (int, int, int);
113 static void load_reg (int, int, int);
114 static void set_reg_plus_d (int, int, int, int);
115 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void update_total_code_bytes (int);
117 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
118 static int pa_adjust_cost (rtx, rtx, rtx, int);
119 static int pa_adjust_priority (rtx, int);
120 static int pa_issue_rate (void);
121 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
122 ATTRIBUTE_UNUSED;
123 static void pa_encode_section_info (tree, rtx, int);
124 static const char *pa_strip_name_encoding (const char *);
125 static bool pa_function_ok_for_sibcall (tree, tree);
126 static void pa_globalize_label (FILE *, const char *)
127 ATTRIBUTE_UNUSED;
128 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
129 HOST_WIDE_INT, tree);
130 #if !defined(USE_COLLECT2)
131 static void pa_asm_out_constructor (rtx, int);
132 static void pa_asm_out_destructor (rtx, int);
133 #endif
134 static void pa_init_builtins (void);
135 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
136 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
137 static struct deferred_plabel *get_plabel (const char *)
138 ATTRIBUTE_UNUSED;
139 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
140 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
141 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
142 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
143 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
144 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
145 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
146 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
147 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
148 static void output_deferred_plabels (void);
149 #ifdef HPUX_LONG_DOUBLE_LIBRARY
150 static void pa_hpux_init_libfuncs (void);
151 #endif
153 /* Save the operands last given to a compare for use when we
154 generate a scc or bcc insn. */
155 rtx hppa_compare_op0, hppa_compare_op1;
156 enum cmp_type hppa_branch_type;
158 /* Which cpu we are scheduling for. */
159 enum processor_type pa_cpu;
161 /* String to hold which cpu we are scheduling for. */
162 const char *pa_cpu_string;
164 /* Which architecture we are generating code for. */
165 enum architecture_type pa_arch;
167 /* String to hold which architecture we are generating code for. */
168 const char *pa_arch_string;
170 /* Counts for the number of callee-saved general and floating point
171 registers which were saved by the current function's prologue. */
172 static int gr_saved, fr_saved;
174 static rtx find_addr_reg (rtx);
176 /* Keep track of the number of bytes we have output in the CODE subspace
177 during this compilation so we'll know when to emit inline long-calls. */
178 unsigned long total_code_bytes;
180 /* The last address of the previous function plus the number of bytes in
181 associated thunks that have been output. This is used to determine if
182 a thunk can use an IA-relative branch to reach its target function. */
183 static int last_address;
185 /* Variables to handle plabels that we discover are necessary at assembly
186 output time. They are output after the current function. */
187 struct deferred_plabel GTY(())
189 rtx internal_label;
190 const char *name;
192 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
193 deferred_plabels;
194 static size_t n_deferred_plabels = 0;
197 /* Initialize the GCC target structure. */
199 #undef TARGET_ASM_ALIGNED_HI_OP
200 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
201 #undef TARGET_ASM_ALIGNED_SI_OP
202 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
203 #undef TARGET_ASM_ALIGNED_DI_OP
204 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
205 #undef TARGET_ASM_UNALIGNED_HI_OP
206 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
207 #undef TARGET_ASM_UNALIGNED_SI_OP
208 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
209 #undef TARGET_ASM_UNALIGNED_DI_OP
210 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
211 #undef TARGET_ASM_INTEGER
212 #define TARGET_ASM_INTEGER pa_assemble_integer
214 #undef TARGET_ASM_FUNCTION_PROLOGUE
215 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
216 #undef TARGET_ASM_FUNCTION_EPILOGUE
217 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
219 #undef TARGET_SCHED_ADJUST_COST
220 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
221 #undef TARGET_SCHED_ADJUST_PRIORITY
222 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
223 #undef TARGET_SCHED_ISSUE_RATE
224 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
226 #undef TARGET_ENCODE_SECTION_INFO
227 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
228 #undef TARGET_STRIP_NAME_ENCODING
229 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
231 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
232 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
234 #undef TARGET_ASM_OUTPUT_MI_THUNK
235 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
236 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
237 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
239 #undef TARGET_ASM_FILE_END
240 #define TARGET_ASM_FILE_END output_deferred_plabels
242 #if !defined(USE_COLLECT2)
243 #undef TARGET_ASM_CONSTRUCTOR
244 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
245 #undef TARGET_ASM_DESTRUCTOR
246 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
247 #endif
249 #undef TARGET_INIT_BUILTINS
250 #define TARGET_INIT_BUILTINS pa_init_builtins
252 #undef TARGET_RTX_COSTS
253 #define TARGET_RTX_COSTS hppa_rtx_costs
254 #undef TARGET_ADDRESS_COST
255 #define TARGET_ADDRESS_COST hppa_address_cost
257 #undef TARGET_MACHINE_DEPENDENT_REORG
258 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
260 #ifdef HPUX_LONG_DOUBLE_LIBRARY
261 #undef TARGET_INIT_LIBFUNCS
262 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
263 #endif
265 struct gcc_target targetm = TARGET_INITIALIZER;
267 void
268 override_options (void)
270 if (pa_cpu_string == NULL)
271 pa_cpu_string = TARGET_SCHED_DEFAULT;
273 if (! strcmp (pa_cpu_string, "8000"))
275 pa_cpu_string = "8000";
276 pa_cpu = PROCESSOR_8000;
278 else if (! strcmp (pa_cpu_string, "7100"))
280 pa_cpu_string = "7100";
281 pa_cpu = PROCESSOR_7100;
283 else if (! strcmp (pa_cpu_string, "700"))
285 pa_cpu_string = "700";
286 pa_cpu = PROCESSOR_700;
288 else if (! strcmp (pa_cpu_string, "7100LC"))
290 pa_cpu_string = "7100LC";
291 pa_cpu = PROCESSOR_7100LC;
293 else if (! strcmp (pa_cpu_string, "7200"))
295 pa_cpu_string = "7200";
296 pa_cpu = PROCESSOR_7200;
298 else if (! strcmp (pa_cpu_string, "7300"))
300 pa_cpu_string = "7300";
301 pa_cpu = PROCESSOR_7300;
303 else
305 warning ("unknown -mschedule= option (%s).\nValid options are 700, 7100, 7100LC, 7200, 7300, and 8000\n", pa_cpu_string);
308 /* Set the instruction set architecture. */
309 if (pa_arch_string && ! strcmp (pa_arch_string, "1.0"))
311 pa_arch_string = "1.0";
312 pa_arch = ARCHITECTURE_10;
313 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
315 else if (pa_arch_string && ! strcmp (pa_arch_string, "1.1"))
317 pa_arch_string = "1.1";
318 pa_arch = ARCHITECTURE_11;
319 target_flags &= ~MASK_PA_20;
320 target_flags |= MASK_PA_11;
322 else if (pa_arch_string && ! strcmp (pa_arch_string, "2.0"))
324 pa_arch_string = "2.0";
325 pa_arch = ARCHITECTURE_20;
326 target_flags |= MASK_PA_11 | MASK_PA_20;
328 else if (pa_arch_string)
330 warning ("unknown -march= option (%s).\nValid options are 1.0, 1.1, and 2.0\n", pa_arch_string);
333 /* Unconditional branches in the delay slot are not compatible with dwarf2
334 call frame information. There is no benefit in using this optimization
335 on PA8000 and later processors. */
336 if (pa_cpu >= PROCESSOR_8000
337 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
338 || flag_unwind_tables)
339 target_flags &= ~MASK_JUMP_IN_DELAY;
341 if (flag_pic && TARGET_PORTABLE_RUNTIME)
343 warning ("PIC code generation is not supported in the portable runtime model\n");
346 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
348 warning ("PIC code generation is not compatible with fast indirect calls\n");
351 if (! TARGET_GAS && write_symbols != NO_DEBUG)
353 warning ("-g is only supported when using GAS on this processor,");
354 warning ("-g option disabled");
355 write_symbols = NO_DEBUG;
358 /* We only support the "big PIC" model now. And we always generate PIC
359 code when in 64bit mode. */
360 if (flag_pic == 1 || TARGET_64BIT)
361 flag_pic = 2;
363 /* We can't guarantee that .dword is available for 32-bit targets. */
364 if (UNITS_PER_WORD == 4)
365 targetm.asm_out.aligned_op.di = NULL;
367 /* The unaligned ops are only available when using GAS. */
368 if (!TARGET_GAS)
370 targetm.asm_out.unaligned_op.hi = NULL;
371 targetm.asm_out.unaligned_op.si = NULL;
372 targetm.asm_out.unaligned_op.di = NULL;
376 static void
377 pa_init_builtins (void)
379 #ifdef DONT_HAVE_FPUTC_UNLOCKED
380 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
381 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
382 #endif
385 /* If FROM is a probable pointer register, mark TO as a probable
386 pointer register with the same pointer alignment as FROM. */
388 static void
389 copy_reg_pointer (rtx to, rtx from)
391 if (REG_POINTER (from))
392 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
395 /* Return nonzero only if OP is a register of mode MODE,
396 or CONST0_RTX. */
398 reg_or_0_operand (rtx op, enum machine_mode mode)
400 return (op == CONST0_RTX (mode) || register_operand (op, mode));
403 /* Return nonzero if OP is suitable for use in a call to a named
404 function.
406 For 2.5 try to eliminate either call_operand_address or
407 function_label_operand, they perform very similar functions. */
409 call_operand_address (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
411 return (GET_MODE (op) == word_mode
412 && CONSTANT_P (op) && ! TARGET_PORTABLE_RUNTIME);
415 /* Return 1 if X contains a symbolic expression. We know these
416 expressions will have one of a few well defined forms, so
417 we need only check those forms. */
419 symbolic_expression_p (rtx x)
422 /* Strip off any HIGH. */
423 if (GET_CODE (x) == HIGH)
424 x = XEXP (x, 0);
426 return (symbolic_operand (x, VOIDmode));
430 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
432 switch (GET_CODE (op))
434 case SYMBOL_REF:
435 case LABEL_REF:
436 return 1;
437 case CONST:
438 op = XEXP (op, 0);
439 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
440 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
441 && GET_CODE (XEXP (op, 1)) == CONST_INT);
442 default:
443 return 0;
447 /* Return truth value of statement that OP is a symbolic memory
448 operand of mode MODE. */
451 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
453 if (GET_CODE (op) == SUBREG)
454 op = SUBREG_REG (op);
455 if (GET_CODE (op) != MEM)
456 return 0;
457 op = XEXP (op, 0);
458 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
459 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
462 /* Return 1 if the operand is either a register, zero, or a memory operand
463 that is not symbolic. */
466 reg_or_0_or_nonsymb_mem_operand (rtx op, enum machine_mode mode)
468 if (register_operand (op, mode))
469 return 1;
471 if (op == CONST0_RTX (mode))
472 return 1;
474 if (GET_CODE (op) == SUBREG)
475 op = SUBREG_REG (op);
477 if (GET_CODE (op) != MEM)
478 return 0;
480 /* Until problems with management of the REG_POINTER flag are resolved,
481 we need to delay creating move insns with unscaled indexed addresses
482 until CSE is not expected. */
483 if (!TARGET_NO_SPACE_REGS
484 && !cse_not_expected
485 && GET_CODE (XEXP (op, 0)) == PLUS
486 && REG_P (XEXP (XEXP (op, 0), 0))
487 && REG_P (XEXP (XEXP (op, 0), 1)))
488 return 0;
490 return (!symbolic_memory_operand (op, mode)
491 && memory_address_p (mode, XEXP (op, 0)));
494 /* Return 1 if the operand is a register operand or a non-symbolic memory
495 operand after reload. This predicate is used for branch patterns that
496 internally handle register reloading. We need to accept non-symbolic
497 memory operands after reload to ensure that the pattern is still valid
498 if reload didn't find a hard register for the operand. */
501 reg_before_reload_operand (rtx op, enum machine_mode mode)
503 /* Don't accept a SUBREG since it will need a reload. */
504 if (GET_CODE (op) == SUBREG)
505 return 0;
507 if (register_operand (op, mode))
508 return 1;
510 if (reload_completed
511 && memory_operand (op, mode)
512 && !symbolic_memory_operand (op, mode))
513 return 1;
515 return 0;
518 /* Accept any constant that can be moved in one instruction into a
519 general register. */
521 cint_ok_for_move (HOST_WIDE_INT intval)
523 /* OK if ldo, ldil, or zdepi, can be used. */
524 return (CONST_OK_FOR_LETTER_P (intval, 'J')
525 || CONST_OK_FOR_LETTER_P (intval, 'N')
526 || CONST_OK_FOR_LETTER_P (intval, 'K'));
529 /* Return 1 iff OP is an indexed memory operand. */
531 indexed_memory_operand (rtx op, enum machine_mode mode)
533 if (GET_MODE (op) != mode)
534 return 0;
536 /* Before reload, a (SUBREG (MEM...)) forces reloading into a register. */
537 if (reload_completed && GET_CODE (op) == SUBREG)
538 op = SUBREG_REG (op);
540 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
541 return 0;
543 op = XEXP (op, 0);
545 return (memory_address_p (mode, op) && IS_INDEX_ADDR_P (op));
548 /* Accept anything that can be used as a destination operand for a
549 move instruction. We don't accept indexed memory operands since
550 they are supported only for floating point stores. */
552 move_dest_operand (rtx op, enum machine_mode mode)
554 if (register_operand (op, mode))
555 return 1;
557 if (GET_MODE (op) != mode)
558 return 0;
560 if (GET_CODE (op) == SUBREG)
561 op = SUBREG_REG (op);
563 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
564 return 0;
566 op = XEXP (op, 0);
568 return (memory_address_p (mode, op)
569 && !IS_INDEX_ADDR_P (op)
570 && !IS_LO_SUM_DLT_ADDR_P (op));
573 /* Accept anything that can be used as a source operand for a move
574 instruction. */
576 move_src_operand (rtx op, enum machine_mode mode)
578 if (register_operand (op, mode))
579 return 1;
581 if (GET_CODE (op) == CONSTANT_P_RTX)
582 return 1;
584 if (GET_CODE (op) == CONST_INT)
585 return cint_ok_for_move (INTVAL (op));
587 if (GET_MODE (op) != mode)
588 return 0;
590 if (GET_CODE (op) == SUBREG)
591 op = SUBREG_REG (op);
593 if (GET_CODE (op) != MEM)
594 return 0;
596 /* Until problems with management of the REG_POINTER flag are resolved,
597 we need to delay creating move insns with unscaled indexed addresses
598 until CSE is not expected. */
599 if (!TARGET_NO_SPACE_REGS
600 && !cse_not_expected
601 && GET_CODE (XEXP (op, 0)) == PLUS
602 && REG_P (XEXP (XEXP (op, 0), 0))
603 && REG_P (XEXP (XEXP (op, 0), 1)))
604 return 0;
606 return memory_address_p (mode, XEXP (op, 0));
609 /* Accept REG and any CONST_INT that can be moved in one instruction into a
610 general register. */
612 reg_or_cint_move_operand (rtx op, enum machine_mode mode)
614 if (register_operand (op, mode))
615 return 1;
617 return (GET_CODE (op) == CONST_INT && cint_ok_for_move (INTVAL (op)));
621 pic_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
623 if (!flag_pic)
624 return 0;
626 switch (GET_CODE (op))
628 case LABEL_REF:
629 return 1;
630 case CONST:
631 op = XEXP (op, 0);
632 return (GET_CODE (XEXP (op, 0)) == LABEL_REF
633 && GET_CODE (XEXP (op, 1)) == CONST_INT);
634 default:
635 return 0;
640 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
642 return reg_renumber && FP_REG_P (op);
647 /* Return truth value of whether OP can be used as an operand in a
648 three operand arithmetic insn that accepts registers of mode MODE
649 or 14-bit signed integers. */
651 arith_operand (rtx op, enum machine_mode mode)
653 return (register_operand (op, mode)
654 || (GET_CODE (op) == CONST_INT && INT_14_BITS (op)));
657 /* Return truth value of whether OP can be used as an operand in a
658 three operand arithmetic insn that accepts registers of mode MODE
659 or 11-bit signed integers. */
661 arith11_operand (rtx op, enum machine_mode mode)
663 return (register_operand (op, mode)
664 || (GET_CODE (op) == CONST_INT && INT_11_BITS (op)));
667 /* Return truth value of whether OP can be used as an operand in a
668 adddi3 insn. */
670 adddi3_operand (rtx op, enum machine_mode mode)
672 return (register_operand (op, mode)
673 || (GET_CODE (op) == CONST_INT
674 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
677 /* A constant integer suitable for use in a PRE_MODIFY memory
678 reference. */
680 pre_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
682 return (GET_CODE (op) == CONST_INT
683 && INTVAL (op) >= -0x2000 && INTVAL (op) < 0x10);
686 /* A constant integer suitable for use in a POST_MODIFY memory
687 reference. */
689 post_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
691 return (GET_CODE (op) == CONST_INT
692 && INTVAL (op) < 0x2000 && INTVAL (op) >= -0x10);
696 arith_double_operand (rtx op, enum machine_mode mode)
698 return (register_operand (op, mode)
699 || (GET_CODE (op) == CONST_DOUBLE
700 && GET_MODE (op) == mode
701 && VAL_14_BITS_P (CONST_DOUBLE_LOW (op))
702 && ((CONST_DOUBLE_HIGH (op) >= 0)
703 == ((CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
706 /* Return truth value of whether OP is an integer which fits the
707 range constraining immediate operands in three-address insns, or
708 is an integer register. */
711 ireg_or_int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
713 return ((GET_CODE (op) == CONST_INT && INT_5_BITS (op))
714 || (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32));
717 /* Return nonzero if OP is an integer register, else return zero. */
719 ireg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
721 return (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32);
724 /* Return truth value of whether OP is an integer which fits the
725 range constraining immediate operands in three-address insns. */
728 int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
730 return (GET_CODE (op) == CONST_INT && INT_5_BITS (op));
734 uint5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
736 return (GET_CODE (op) == CONST_INT && INT_U5_BITS (op));
740 int11_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
742 return (GET_CODE (op) == CONST_INT && INT_11_BITS (op));
746 uint32_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
748 #if HOST_BITS_PER_WIDE_INT > 32
749 /* All allowed constants will fit a CONST_INT. */
750 return (GET_CODE (op) == CONST_INT
751 && (INTVAL (op) >= 0 && INTVAL (op) < (HOST_WIDE_INT) 1 << 32));
752 #else
753 return (GET_CODE (op) == CONST_INT
754 || (GET_CODE (op) == CONST_DOUBLE
755 && CONST_DOUBLE_HIGH (op) == 0));
756 #endif
760 arith5_operand (rtx op, enum machine_mode mode)
762 return register_operand (op, mode) || int5_operand (op, mode);
765 /* True iff zdepi can be used to generate this CONST_INT.
766 zdepi first sign extends a 5 bit signed number to a given field
767 length, then places this field anywhere in a zero. */
769 zdepi_cint_p (unsigned HOST_WIDE_INT x)
771 unsigned HOST_WIDE_INT lsb_mask, t;
773 /* This might not be obvious, but it's at least fast.
774 This function is critical; we don't have the time loops would take. */
775 lsb_mask = x & -x;
776 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
777 /* Return true iff t is a power of two. */
778 return ((t & (t - 1)) == 0);
781 /* True iff depi or extru can be used to compute (reg & mask).
782 Accept bit pattern like these:
783 0....01....1
784 1....10....0
785 1..10..01..1 */
787 and_mask_p (unsigned HOST_WIDE_INT mask)
789 mask = ~mask;
790 mask += mask & -mask;
791 return (mask & (mask - 1)) == 0;
794 /* True iff depi or extru can be used to compute (reg & OP). */
796 and_operand (rtx op, enum machine_mode mode)
798 return (register_operand (op, mode)
799 || (GET_CODE (op) == CONST_INT && and_mask_p (INTVAL (op))));
802 /* True iff depi can be used to compute (reg | MASK). */
804 ior_mask_p (unsigned HOST_WIDE_INT mask)
806 mask += mask & -mask;
807 return (mask & (mask - 1)) == 0;
810 /* True iff depi can be used to compute (reg | OP). */
812 ior_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
814 return (GET_CODE (op) == CONST_INT && ior_mask_p (INTVAL (op)));
818 lhs_lshift_operand (rtx op, enum machine_mode mode)
820 return register_operand (op, mode) || lhs_lshift_cint_operand (op, mode);
823 /* True iff OP is a CONST_INT of the forms 0...0xxxx or 0...01...1xxxx.
824 Such values can be the left hand side x in (x << r), using the zvdepi
825 instruction. */
827 lhs_lshift_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
829 unsigned HOST_WIDE_INT x;
830 if (GET_CODE (op) != CONST_INT)
831 return 0;
832 x = INTVAL (op) >> 4;
833 return (x & (x + 1)) == 0;
837 arith32_operand (rtx op, enum machine_mode mode)
839 return register_operand (op, mode) || GET_CODE (op) == CONST_INT;
843 pc_or_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
845 return (GET_CODE (op) == PC || GET_CODE (op) == LABEL_REF);
848 /* Legitimize PIC addresses. If the address is already
849 position-independent, we return ORIG. Newly generated
850 position-independent addresses go to REG. If we need more
851 than one register, we lose. */
854 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
856 rtx pic_ref = orig;
858 /* Labels need special handling. */
859 if (pic_label_operand (orig, mode))
861 /* We do not want to go through the movXX expanders here since that
862 would create recursion.
864 Nor do we really want to call a generator for a named pattern
865 since that requires multiple patterns if we want to support
866 multiple word sizes.
868 So instead we just emit the raw set, which avoids the movXX
869 expanders completely. */
870 mark_reg_pointer (reg, BITS_PER_UNIT);
871 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
872 current_function_uses_pic_offset_table = 1;
873 return reg;
875 if (GET_CODE (orig) == SYMBOL_REF)
877 rtx insn, tmp_reg;
879 if (reg == 0)
880 abort ();
882 /* Before reload, allocate a temporary register for the intermediate
883 result. This allows the sequence to be deleted when the final
884 result is unused and the insns are trivially dead. */
885 tmp_reg = ((reload_in_progress || reload_completed)
886 ? reg : gen_reg_rtx (Pmode));
888 emit_move_insn (tmp_reg,
889 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
890 gen_rtx_HIGH (word_mode, orig)));
891 pic_ref
892 = gen_rtx_MEM (Pmode,
893 gen_rtx_LO_SUM (Pmode, tmp_reg,
894 gen_rtx_UNSPEC (Pmode,
895 gen_rtvec (1, orig),
896 0)));
898 current_function_uses_pic_offset_table = 1;
899 MEM_NOTRAP_P (pic_ref) = 1;
900 RTX_UNCHANGING_P (pic_ref) = 1;
901 mark_reg_pointer (reg, BITS_PER_UNIT);
902 insn = emit_move_insn (reg, pic_ref);
904 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
905 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
907 return reg;
909 else if (GET_CODE (orig) == CONST)
911 rtx base;
913 if (GET_CODE (XEXP (orig, 0)) == PLUS
914 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
915 return orig;
917 if (reg == 0)
918 abort ();
920 if (GET_CODE (XEXP (orig, 0)) == PLUS)
922 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
923 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
924 base == reg ? 0 : reg);
926 else
927 abort ();
929 if (GET_CODE (orig) == CONST_INT)
931 if (INT_14_BITS (orig))
932 return plus_constant (base, INTVAL (orig));
933 orig = force_reg (Pmode, orig);
935 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
936 /* Likewise, should we set special REG_NOTEs here? */
939 return pic_ref;
942 /* Try machine-dependent ways of modifying an illegitimate address
943 to be legitimate. If we find one, return the new, valid address.
944 This macro is used in only one place: `memory_address' in explow.c.
946 OLDX is the address as it was before break_out_memory_refs was called.
947 In some cases it is useful to look at this to decide what needs to be done.
949 MODE and WIN are passed so that this macro can use
950 GO_IF_LEGITIMATE_ADDRESS.
952 It is always safe for this macro to do nothing. It exists to recognize
953 opportunities to optimize the output.
955 For the PA, transform:
957 memory(X + <large int>)
959 into:
961 if (<large int> & mask) >= 16
962 Y = (<large int> & ~mask) + mask + 1 Round up.
963 else
964 Y = (<large int> & ~mask) Round down.
965 Z = X + Y
966 memory (Z + (<large int> - Y));
968 This is for CSE to find several similar references, and only use one Z.
970 X can either be a SYMBOL_REF or REG, but because combine can not
971 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
972 D will not fit in 14 bits.
974 MODE_FLOAT references allow displacements which fit in 5 bits, so use
975 0x1f as the mask.
977 MODE_INT references allow displacements which fit in 14 bits, so use
978 0x3fff as the mask.
980 This relies on the fact that most mode MODE_FLOAT references will use FP
981 registers and most mode MODE_INT references will use integer registers.
982 (In the rare case of an FP register used in an integer MODE, we depend
983 on secondary reloads to clean things up.)
986 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
987 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
988 addressing modes to be used).
990 Put X and Z into registers. Then put the entire expression into
991 a register. */
994 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
995 enum machine_mode mode)
997 rtx orig = x;
999 /* We need to canonicalize the order of operands in unscaled indexed
1000 addresses since the code that checks if an address is valid doesn't
1001 always try both orders. */
1002 if (!TARGET_NO_SPACE_REGS
1003 && GET_CODE (x) == PLUS
1004 && GET_MODE (x) == Pmode
1005 && REG_P (XEXP (x, 0))
1006 && REG_P (XEXP (x, 1))
1007 && REG_POINTER (XEXP (x, 0))
1008 && !REG_POINTER (XEXP (x, 1)))
1009 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1011 if (flag_pic)
1012 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1014 /* Strip off CONST. */
1015 if (GET_CODE (x) == CONST)
1016 x = XEXP (x, 0);
1018 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1019 That should always be safe. */
1020 if (GET_CODE (x) == PLUS
1021 && GET_CODE (XEXP (x, 0)) == REG
1022 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1024 rtx reg = force_reg (Pmode, XEXP (x, 1));
1025 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1028 /* Note we must reject symbols which represent function addresses
1029 since the assembler/linker can't handle arithmetic on plabels. */
1030 if (GET_CODE (x) == PLUS
1031 && GET_CODE (XEXP (x, 1)) == CONST_INT
1032 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1033 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1034 || GET_CODE (XEXP (x, 0)) == REG))
1036 rtx int_part, ptr_reg;
1037 int newoffset;
1038 int offset = INTVAL (XEXP (x, 1));
1039 int mask;
1041 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1042 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
1044 /* Choose which way to round the offset. Round up if we
1045 are >= halfway to the next boundary. */
1046 if ((offset & mask) >= ((mask + 1) / 2))
1047 newoffset = (offset & ~ mask) + mask + 1;
1048 else
1049 newoffset = (offset & ~ mask);
1051 /* If the newoffset will not fit in 14 bits (ldo), then
1052 handling this would take 4 or 5 instructions (2 to load
1053 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1054 add the new offset and the SYMBOL_REF.) Combine can
1055 not handle 4->2 or 5->2 combinations, so do not create
1056 them. */
1057 if (! VAL_14_BITS_P (newoffset)
1058 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1060 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
1061 rtx tmp_reg
1062 = force_reg (Pmode,
1063 gen_rtx_HIGH (Pmode, const_part));
1064 ptr_reg
1065 = force_reg (Pmode,
1066 gen_rtx_LO_SUM (Pmode,
1067 tmp_reg, const_part));
1069 else
1071 if (! VAL_14_BITS_P (newoffset))
1072 int_part = force_reg (Pmode, GEN_INT (newoffset));
1073 else
1074 int_part = GEN_INT (newoffset);
1076 ptr_reg = force_reg (Pmode,
1077 gen_rtx_PLUS (Pmode,
1078 force_reg (Pmode, XEXP (x, 0)),
1079 int_part));
1081 return plus_constant (ptr_reg, offset - newoffset);
1084 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1086 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1087 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1088 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1089 && (GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == 'o'
1090 || GET_CODE (XEXP (x, 1)) == SUBREG)
1091 && GET_CODE (XEXP (x, 1)) != CONST)
1093 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1094 rtx reg1, reg2;
1096 reg1 = XEXP (x, 1);
1097 if (GET_CODE (reg1) != REG)
1098 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1100 reg2 = XEXP (XEXP (x, 0), 0);
1101 if (GET_CODE (reg2) != REG)
1102 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1104 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1105 gen_rtx_MULT (Pmode,
1106 reg2,
1107 GEN_INT (val)),
1108 reg1));
1111 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1113 Only do so for floating point modes since this is more speculative
1114 and we lose if it's an integer store. */
1115 if (GET_CODE (x) == PLUS
1116 && GET_CODE (XEXP (x, 0)) == PLUS
1117 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1118 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1119 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1120 && (mode == SFmode || mode == DFmode))
1123 /* First, try and figure out what to use as a base register. */
1124 rtx reg1, reg2, base, idx, orig_base;
1126 reg1 = XEXP (XEXP (x, 0), 1);
1127 reg2 = XEXP (x, 1);
1128 base = NULL_RTX;
1129 idx = NULL_RTX;
1131 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1132 then emit_move_sequence will turn on REG_POINTER so we'll know
1133 it's a base register below. */
1134 if (GET_CODE (reg1) != REG)
1135 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1137 if (GET_CODE (reg2) != REG)
1138 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1140 /* Figure out what the base and index are. */
1142 if (GET_CODE (reg1) == REG
1143 && REG_POINTER (reg1))
1145 base = reg1;
1146 orig_base = XEXP (XEXP (x, 0), 1);
1147 idx = gen_rtx_PLUS (Pmode,
1148 gen_rtx_MULT (Pmode,
1149 XEXP (XEXP (XEXP (x, 0), 0), 0),
1150 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1151 XEXP (x, 1));
1153 else if (GET_CODE (reg2) == REG
1154 && REG_POINTER (reg2))
1156 base = reg2;
1157 orig_base = XEXP (x, 1);
1158 idx = XEXP (x, 0);
1161 if (base == 0)
1162 return orig;
1164 /* If the index adds a large constant, try to scale the
1165 constant so that it can be loaded with only one insn. */
1166 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1167 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1168 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1169 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1171 /* Divide the CONST_INT by the scale factor, then add it to A. */
1172 int val = INTVAL (XEXP (idx, 1));
1174 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1175 reg1 = XEXP (XEXP (idx, 0), 0);
1176 if (GET_CODE (reg1) != REG)
1177 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1179 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1181 /* We can now generate a simple scaled indexed address. */
1182 return
1183 force_reg
1184 (Pmode, gen_rtx_PLUS (Pmode,
1185 gen_rtx_MULT (Pmode, reg1,
1186 XEXP (XEXP (idx, 0), 1)),
1187 base));
1190 /* If B + C is still a valid base register, then add them. */
1191 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1192 && INTVAL (XEXP (idx, 1)) <= 4096
1193 && INTVAL (XEXP (idx, 1)) >= -4096)
1195 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1196 rtx reg1, reg2;
1198 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1200 reg2 = XEXP (XEXP (idx, 0), 0);
1201 if (GET_CODE (reg2) != CONST_INT)
1202 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1204 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1205 gen_rtx_MULT (Pmode,
1206 reg2,
1207 GEN_INT (val)),
1208 reg1));
1211 /* Get the index into a register, then add the base + index and
1212 return a register holding the result. */
1214 /* First get A into a register. */
1215 reg1 = XEXP (XEXP (idx, 0), 0);
1216 if (GET_CODE (reg1) != REG)
1217 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1219 /* And get B into a register. */
1220 reg2 = XEXP (idx, 1);
1221 if (GET_CODE (reg2) != REG)
1222 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1224 reg1 = force_reg (Pmode,
1225 gen_rtx_PLUS (Pmode,
1226 gen_rtx_MULT (Pmode, reg1,
1227 XEXP (XEXP (idx, 0), 1)),
1228 reg2));
1230 /* Add the result to our base register and return. */
1231 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1235 /* Uh-oh. We might have an address for x[n-100000]. This needs
1236 special handling to avoid creating an indexed memory address
1237 with x-100000 as the base.
1239 If the constant part is small enough, then it's still safe because
1240 there is a guard page at the beginning and end of the data segment.
1242 Scaled references are common enough that we want to try and rearrange the
1243 terms so that we can use indexing for these addresses too. Only
1244 do the optimization for floatint point modes. */
1246 if (GET_CODE (x) == PLUS
1247 && symbolic_expression_p (XEXP (x, 1)))
1249 /* Ugly. We modify things here so that the address offset specified
1250 by the index expression is computed first, then added to x to form
1251 the entire address. */
1253 rtx regx1, regx2, regy1, regy2, y;
1255 /* Strip off any CONST. */
1256 y = XEXP (x, 1);
1257 if (GET_CODE (y) == CONST)
1258 y = XEXP (y, 0);
1260 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1262 /* See if this looks like
1263 (plus (mult (reg) (shadd_const))
1264 (const (plus (symbol_ref) (const_int))))
1266 Where const_int is small. In that case the const
1267 expression is a valid pointer for indexing.
1269 If const_int is big, but can be divided evenly by shadd_const
1270 and added to (reg). This allows more scaled indexed addresses. */
1271 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1272 && GET_CODE (XEXP (x, 0)) == MULT
1273 && GET_CODE (XEXP (y, 1)) == CONST_INT
1274 && INTVAL (XEXP (y, 1)) >= -4096
1275 && INTVAL (XEXP (y, 1)) <= 4095
1276 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1277 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1279 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1280 rtx reg1, reg2;
1282 reg1 = XEXP (x, 1);
1283 if (GET_CODE (reg1) != REG)
1284 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1286 reg2 = XEXP (XEXP (x, 0), 0);
1287 if (GET_CODE (reg2) != REG)
1288 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1290 return force_reg (Pmode,
1291 gen_rtx_PLUS (Pmode,
1292 gen_rtx_MULT (Pmode,
1293 reg2,
1294 GEN_INT (val)),
1295 reg1));
1297 else if ((mode == DFmode || mode == SFmode)
1298 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1299 && GET_CODE (XEXP (x, 0)) == MULT
1300 && GET_CODE (XEXP (y, 1)) == CONST_INT
1301 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1302 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1303 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1305 regx1
1306 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1307 / INTVAL (XEXP (XEXP (x, 0), 1))));
1308 regx2 = XEXP (XEXP (x, 0), 0);
1309 if (GET_CODE (regx2) != REG)
1310 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1311 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1312 regx2, regx1));
1313 return
1314 force_reg (Pmode,
1315 gen_rtx_PLUS (Pmode,
1316 gen_rtx_MULT (Pmode, regx2,
1317 XEXP (XEXP (x, 0), 1)),
1318 force_reg (Pmode, XEXP (y, 0))));
1320 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1321 && INTVAL (XEXP (y, 1)) >= -4096
1322 && INTVAL (XEXP (y, 1)) <= 4095)
1324 /* This is safe because of the guard page at the
1325 beginning and end of the data space. Just
1326 return the original address. */
1327 return orig;
1329 else
1331 /* Doesn't look like one we can optimize. */
1332 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1333 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1334 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1335 regx1 = force_reg (Pmode,
1336 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1337 regx1, regy2));
1338 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1343 return orig;
1346 /* For the HPPA, REG and REG+CONST is cost 0
1347 and addresses involving symbolic constants are cost 2.
1349 PIC addresses are very expensive.
1351 It is no coincidence that this has the same structure
1352 as GO_IF_LEGITIMATE_ADDRESS. */
1354 static int
1355 hppa_address_cost (rtx X)
1357 switch (GET_CODE (X))
1359 case REG:
1360 case PLUS:
1361 case LO_SUM:
1362 return 1;
1363 case HIGH:
1364 return 2;
1365 default:
1366 return 4;
1370 /* Compute a (partial) cost for rtx X. Return true if the complete
1371 cost has been computed, and false if subexpressions should be
1372 scanned. In either case, *TOTAL contains the cost result. */
1374 static bool
1375 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1377 switch (code)
1379 case CONST_INT:
1380 if (INTVAL (x) == 0)
1381 *total = 0;
1382 else if (INT_14_BITS (x))
1383 *total = 1;
1384 else
1385 *total = 2;
1386 return true;
1388 case HIGH:
1389 *total = 2;
1390 return true;
1392 case CONST:
1393 case LABEL_REF:
1394 case SYMBOL_REF:
1395 *total = 4;
1396 return true;
1398 case CONST_DOUBLE:
1399 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1400 && outer_code != SET)
1401 *total = 0;
1402 else
1403 *total = 8;
1404 return true;
1406 case MULT:
1407 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1408 *total = COSTS_N_INSNS (3);
1409 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1410 *total = COSTS_N_INSNS (8);
1411 else
1412 *total = COSTS_N_INSNS (20);
1413 return true;
1415 case DIV:
1416 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1418 *total = COSTS_N_INSNS (14);
1419 return true;
1421 /* FALLTHRU */
1423 case UDIV:
1424 case MOD:
1425 case UMOD:
1426 *total = COSTS_N_INSNS (60);
1427 return true;
1429 case PLUS: /* this includes shNadd insns */
1430 case MINUS:
1431 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1432 *total = COSTS_N_INSNS (3);
1433 else
1434 *total = COSTS_N_INSNS (1);
1435 return true;
1437 case ASHIFT:
1438 case ASHIFTRT:
1439 case LSHIFTRT:
1440 *total = COSTS_N_INSNS (1);
1441 return true;
1443 default:
1444 return false;
1448 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1449 new rtx with the correct mode. */
1450 static inline rtx
1451 force_mode (enum machine_mode mode, rtx orig)
1453 if (mode == GET_MODE (orig))
1454 return orig;
1456 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
1457 abort ();
1459 return gen_rtx_REG (mode, REGNO (orig));
1462 /* Emit insns to move operands[1] into operands[0].
1464 Return 1 if we have written out everything that needs to be done to
1465 do the move. Otherwise, return 0 and the caller will emit the move
1466 normally.
1468 Note SCRATCH_REG may not be in the proper mode depending on how it
1469 will be used. This routine is responsible for creating a new copy
1470 of SCRATCH_REG in the proper mode. */
1473 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1475 register rtx operand0 = operands[0];
1476 register rtx operand1 = operands[1];
1477 register rtx tem;
1479 /* We can only handle indexed addresses in the destination operand
1480 of floating point stores. Thus, we need to break out indexed
1481 addresses from the destination operand. */
1482 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1484 /* This is only safe up to the beginning of life analysis. */
1485 if (no_new_pseudos)
1486 abort ();
1488 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1489 operand0 = replace_equiv_address (operand0, tem);
1492 /* On targets with non-equivalent space registers, break out unscaled
1493 indexed addresses from the source operand before the final CSE.
1494 We have to do this because the REG_POINTER flag is not correctly
1495 carried through various optimization passes and CSE may substitute
1496 a pseudo without the pointer set for one with the pointer set. As
1497 a result, we loose various opportunites to create insns with
1498 unscaled indexed addresses. */
1499 if (!TARGET_NO_SPACE_REGS
1500 && !cse_not_expected
1501 && GET_CODE (operand1) == MEM
1502 && GET_CODE (XEXP (operand1, 0)) == PLUS
1503 && REG_P (XEXP (XEXP (operand1, 0), 0))
1504 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1505 operand1
1506 = replace_equiv_address (operand1,
1507 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1509 if (scratch_reg
1510 && reload_in_progress && GET_CODE (operand0) == REG
1511 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1512 operand0 = reg_equiv_mem[REGNO (operand0)];
1513 else if (scratch_reg
1514 && reload_in_progress && GET_CODE (operand0) == SUBREG
1515 && GET_CODE (SUBREG_REG (operand0)) == REG
1516 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1518 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1519 the code which tracks sets/uses for delete_output_reload. */
1520 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1521 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1522 SUBREG_BYTE (operand0));
1523 operand0 = alter_subreg (&temp);
1526 if (scratch_reg
1527 && reload_in_progress && GET_CODE (operand1) == REG
1528 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1529 operand1 = reg_equiv_mem[REGNO (operand1)];
1530 else if (scratch_reg
1531 && reload_in_progress && GET_CODE (operand1) == SUBREG
1532 && GET_CODE (SUBREG_REG (operand1)) == REG
1533 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1535 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1536 the code which tracks sets/uses for delete_output_reload. */
1537 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1538 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1539 SUBREG_BYTE (operand1));
1540 operand1 = alter_subreg (&temp);
1543 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1544 && ((tem = find_replacement (&XEXP (operand0, 0)))
1545 != XEXP (operand0, 0)))
1546 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1548 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1549 && ((tem = find_replacement (&XEXP (operand1, 0)))
1550 != XEXP (operand1, 0)))
1551 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1553 /* Handle secondary reloads for loads/stores of FP registers from
1554 REG+D addresses where D does not fit in 5 bits, including
1555 (subreg (mem (addr))) cases. */
1556 if (fp_reg_operand (operand0, mode)
1557 && ((GET_CODE (operand1) == MEM
1558 && !memory_address_p (DFmode, XEXP (operand1, 0)))
1559 || ((GET_CODE (operand1) == SUBREG
1560 && GET_CODE (XEXP (operand1, 0)) == MEM
1561 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
1562 && scratch_reg)
1564 if (GET_CODE (operand1) == SUBREG)
1565 operand1 = XEXP (operand1, 0);
1567 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1568 it in WORD_MODE regardless of what mode it was originally given
1569 to us. */
1570 scratch_reg = force_mode (word_mode, scratch_reg);
1572 /* D might not fit in 14 bits either; for such cases load D into
1573 scratch reg. */
1574 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1576 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1577 emit_move_insn (scratch_reg,
1578 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1579 Pmode,
1580 XEXP (XEXP (operand1, 0), 0),
1581 scratch_reg));
1583 else
1584 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1585 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1586 gen_rtx_MEM (mode, scratch_reg)));
1587 return 1;
1589 else if (fp_reg_operand (operand1, mode)
1590 && ((GET_CODE (operand0) == MEM
1591 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
1592 || ((GET_CODE (operand0) == SUBREG)
1593 && GET_CODE (XEXP (operand0, 0)) == MEM
1594 && !memory_address_p (DFmode,
1595 XEXP (XEXP (operand0, 0), 0))))
1596 && scratch_reg)
1598 if (GET_CODE (operand0) == SUBREG)
1599 operand0 = XEXP (operand0, 0);
1601 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1602 it in WORD_MODE regardless of what mode it was originally given
1603 to us. */
1604 scratch_reg = force_mode (word_mode, scratch_reg);
1606 /* D might not fit in 14 bits either; for such cases load D into
1607 scratch reg. */
1608 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1610 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1611 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1612 0)),
1613 Pmode,
1614 XEXP (XEXP (operand0, 0),
1616 scratch_reg));
1618 else
1619 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1620 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1621 operand1));
1622 return 1;
1624 /* Handle secondary reloads for loads of FP registers from constant
1625 expressions by forcing the constant into memory.
1627 use scratch_reg to hold the address of the memory location.
1629 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1630 NO_REGS when presented with a const_int and a register class
1631 containing only FP registers. Doing so unfortunately creates
1632 more problems than it solves. Fix this for 2.5. */
1633 else if (fp_reg_operand (operand0, mode)
1634 && CONSTANT_P (operand1)
1635 && scratch_reg)
1637 rtx xoperands[2];
1639 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1640 it in WORD_MODE regardless of what mode it was originally given
1641 to us. */
1642 scratch_reg = force_mode (word_mode, scratch_reg);
1644 /* Force the constant into memory and put the address of the
1645 memory location into scratch_reg. */
1646 xoperands[0] = scratch_reg;
1647 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1648 emit_move_sequence (xoperands, Pmode, 0);
1650 /* Now load the destination register. */
1651 emit_insn (gen_rtx_SET (mode, operand0,
1652 gen_rtx_MEM (mode, scratch_reg)));
1653 return 1;
1655 /* Handle secondary reloads for SAR. These occur when trying to load
1656 the SAR from memory, FP register, or with a constant. */
1657 else if (GET_CODE (operand0) == REG
1658 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1659 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1660 && (GET_CODE (operand1) == MEM
1661 || GET_CODE (operand1) == CONST_INT
1662 || (GET_CODE (operand1) == REG
1663 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1)))))
1664 && scratch_reg)
1666 /* D might not fit in 14 bits either; for such cases load D into
1667 scratch reg. */
1668 if (GET_CODE (operand1) == MEM
1669 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1671 /* We are reloading the address into the scratch register, so we
1672 want to make sure the scratch register is a full register. */
1673 scratch_reg = force_mode (word_mode, scratch_reg);
1675 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1676 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1677 0)),
1678 Pmode,
1679 XEXP (XEXP (operand1, 0),
1681 scratch_reg));
1683 /* Now we are going to load the scratch register from memory,
1684 we want to load it in the same width as the original MEM,
1685 which must be the same as the width of the ultimate destination,
1686 OPERAND0. */
1687 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1689 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1690 scratch_reg));
1692 else
1694 /* We want to load the scratch register using the same mode as
1695 the ultimate destination. */
1696 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1698 emit_move_insn (scratch_reg, operand1);
1701 /* And emit the insn to set the ultimate destination. We know that
1702 the scratch register has the same mode as the destination at this
1703 point. */
1704 emit_move_insn (operand0, scratch_reg);
1705 return 1;
1707 /* Handle the most common case: storing into a register. */
1708 else if (register_operand (operand0, mode))
1710 if (register_operand (operand1, mode)
1711 || (GET_CODE (operand1) == CONST_INT
1712 && cint_ok_for_move (INTVAL (operand1)))
1713 || (operand1 == CONST0_RTX (mode))
1714 || (GET_CODE (operand1) == HIGH
1715 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1716 /* Only `general_operands' can come here, so MEM is ok. */
1717 || GET_CODE (operand1) == MEM)
1719 /* Various sets are created during RTL generation which don't
1720 have the REG_POINTER flag correctly set. After the CSE pass,
1721 instruction recognition can fail if we don't consistently
1722 set this flag when performing register copies. This should
1723 also improve the opportunities for creating insns that use
1724 unscaled indexing. */
1725 if (REG_P (operand0) && REG_P (operand1))
1727 if (REG_POINTER (operand1)
1728 && !REG_POINTER (operand0)
1729 && !HARD_REGISTER_P (operand0))
1730 copy_reg_pointer (operand0, operand1);
1731 else if (REG_POINTER (operand0)
1732 && !REG_POINTER (operand1)
1733 && !HARD_REGISTER_P (operand1))
1734 copy_reg_pointer (operand1, operand0);
1737 /* When MEMs are broken out, the REG_POINTER flag doesn't
1738 get set. In some cases, we can set the REG_POINTER flag
1739 from the declaration for the MEM. */
1740 if (REG_P (operand0)
1741 && GET_CODE (operand1) == MEM
1742 && !REG_POINTER (operand0))
1744 tree decl = MEM_EXPR (operand1);
1746 /* Set the register pointer flag and register alignment
1747 if the declaration for this memory reference is a
1748 pointer type. Fortran indirect argument references
1749 are ignored. */
1750 if (decl
1751 && !(flag_argument_noalias > 1
1752 && TREE_CODE (decl) == INDIRECT_REF
1753 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1755 tree type;
1757 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1758 tree operand 1. */
1759 if (TREE_CODE (decl) == COMPONENT_REF)
1760 decl = TREE_OPERAND (decl, 1);
1762 type = TREE_TYPE (decl);
1763 if (TREE_CODE (type) == ARRAY_TYPE)
1764 type = get_inner_array_type (type);
1766 if (POINTER_TYPE_P (type))
1768 int align;
1770 type = TREE_TYPE (type);
1771 /* Using TYPE_ALIGN_OK is rather conservative as
1772 only the ada frontend actually sets it. */
1773 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1774 : BITS_PER_UNIT);
1775 mark_reg_pointer (operand0, align);
1780 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1781 return 1;
1784 else if (GET_CODE (operand0) == MEM)
1786 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1787 && !(reload_in_progress || reload_completed))
1789 rtx temp = gen_reg_rtx (DFmode);
1791 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1792 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1793 return 1;
1795 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1797 /* Run this case quickly. */
1798 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1799 return 1;
1801 if (! (reload_in_progress || reload_completed))
1803 operands[0] = validize_mem (operand0);
1804 operands[1] = operand1 = force_reg (mode, operand1);
1808 /* Simplify the source if we need to.
1809 Note we do have to handle function labels here, even though we do
1810 not consider them legitimate constants. Loop optimizations can
1811 call the emit_move_xxx with one as a source. */
1812 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1813 || function_label_operand (operand1, mode)
1814 || (GET_CODE (operand1) == HIGH
1815 && symbolic_operand (XEXP (operand1, 0), mode)))
1817 int ishighonly = 0;
1819 if (GET_CODE (operand1) == HIGH)
1821 ishighonly = 1;
1822 operand1 = XEXP (operand1, 0);
1824 if (symbolic_operand (operand1, mode))
1826 /* Argh. The assembler and linker can't handle arithmetic
1827 involving plabels.
1829 So we force the plabel into memory, load operand0 from
1830 the memory location, then add in the constant part. */
1831 if ((GET_CODE (operand1) == CONST
1832 && GET_CODE (XEXP (operand1, 0)) == PLUS
1833 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1834 || function_label_operand (operand1, mode))
1836 rtx temp, const_part;
1838 /* Figure out what (if any) scratch register to use. */
1839 if (reload_in_progress || reload_completed)
1841 scratch_reg = scratch_reg ? scratch_reg : operand0;
1842 /* SCRATCH_REG will hold an address and maybe the actual
1843 data. We want it in WORD_MODE regardless of what mode it
1844 was originally given to us. */
1845 scratch_reg = force_mode (word_mode, scratch_reg);
1847 else if (flag_pic)
1848 scratch_reg = gen_reg_rtx (Pmode);
1850 if (GET_CODE (operand1) == CONST)
1852 /* Save away the constant part of the expression. */
1853 const_part = XEXP (XEXP (operand1, 0), 1);
1854 if (GET_CODE (const_part) != CONST_INT)
1855 abort ();
1857 /* Force the function label into memory. */
1858 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1860 else
1862 /* No constant part. */
1863 const_part = NULL_RTX;
1865 /* Force the function label into memory. */
1866 temp = force_const_mem (mode, operand1);
1870 /* Get the address of the memory location. PIC-ify it if
1871 necessary. */
1872 temp = XEXP (temp, 0);
1873 if (flag_pic)
1874 temp = legitimize_pic_address (temp, mode, scratch_reg);
1876 /* Put the address of the memory location into our destination
1877 register. */
1878 operands[1] = temp;
1879 emit_move_sequence (operands, mode, scratch_reg);
1881 /* Now load from the memory location into our destination
1882 register. */
1883 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1884 emit_move_sequence (operands, mode, scratch_reg);
1886 /* And add back in the constant part. */
1887 if (const_part != NULL_RTX)
1888 expand_inc (operand0, const_part);
1890 return 1;
1893 if (flag_pic)
1895 rtx temp;
1897 if (reload_in_progress || reload_completed)
1899 temp = scratch_reg ? scratch_reg : operand0;
1900 /* TEMP will hold an address and maybe the actual
1901 data. We want it in WORD_MODE regardless of what mode it
1902 was originally given to us. */
1903 temp = force_mode (word_mode, temp);
1905 else
1906 temp = gen_reg_rtx (Pmode);
1908 /* (const (plus (symbol) (const_int))) must be forced to
1909 memory during/after reload if the const_int will not fit
1910 in 14 bits. */
1911 if (GET_CODE (operand1) == CONST
1912 && GET_CODE (XEXP (operand1, 0)) == PLUS
1913 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1914 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1915 && (reload_completed || reload_in_progress)
1916 && flag_pic)
1918 operands[1] = force_const_mem (mode, operand1);
1919 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
1920 mode, temp);
1921 emit_move_sequence (operands, mode, temp);
1923 else
1925 operands[1] = legitimize_pic_address (operand1, mode, temp);
1926 if (REG_P (operand0) && REG_P (operands[1]))
1927 copy_reg_pointer (operand0, operands[1]);
1928 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1931 /* On the HPPA, references to data space are supposed to use dp,
1932 register 27, but showing it in the RTL inhibits various cse
1933 and loop optimizations. */
1934 else
1936 rtx temp, set;
1938 if (reload_in_progress || reload_completed)
1940 temp = scratch_reg ? scratch_reg : operand0;
1941 /* TEMP will hold an address and maybe the actual
1942 data. We want it in WORD_MODE regardless of what mode it
1943 was originally given to us. */
1944 temp = force_mode (word_mode, temp);
1946 else
1947 temp = gen_reg_rtx (mode);
1949 /* Loading a SYMBOL_REF into a register makes that register
1950 safe to be used as the base in an indexed address.
1952 Don't mark hard registers though. That loses. */
1953 if (GET_CODE (operand0) == REG
1954 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1955 mark_reg_pointer (operand0, BITS_PER_UNIT);
1956 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1957 mark_reg_pointer (temp, BITS_PER_UNIT);
1959 if (ishighonly)
1960 set = gen_rtx_SET (mode, operand0, temp);
1961 else
1962 set = gen_rtx_SET (VOIDmode,
1963 operand0,
1964 gen_rtx_LO_SUM (mode, temp, operand1));
1966 emit_insn (gen_rtx_SET (VOIDmode,
1967 temp,
1968 gen_rtx_HIGH (mode, operand1)));
1969 emit_insn (set);
1972 return 1;
1974 else if (GET_CODE (operand1) != CONST_INT
1975 || ! cint_ok_for_move (INTVAL (operand1)))
1977 rtx extend = NULL_RTX;
1978 rtx temp;
1980 if (TARGET_64BIT && GET_CODE (operand1) == CONST_INT
1981 && HOST_BITS_PER_WIDE_INT > 32
1982 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1984 HOST_WIDE_INT val = INTVAL (operand1);
1985 HOST_WIDE_INT nval;
1987 /* Extract the low order 32 bits of the value and sign extend.
1988 If the new value is the same as the original value, we can
1989 can use the original value as-is. If the new value is
1990 different, we use it and insert the most-significant 32-bits
1991 of the original value into the final result. */
1992 nval = ((val & (((HOST_WIDE_INT) 2 << 31) - 1))
1993 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1994 if (val != nval)
1996 #if HOST_BITS_PER_WIDE_INT > 32
1997 extend = GEN_INT (val >> 32);
1998 #endif
1999 operand1 = GEN_INT (nval);
2003 if (reload_in_progress || reload_completed)
2004 temp = operand0;
2005 else
2006 temp = gen_reg_rtx (mode);
2008 /* We don't directly split DImode constants on 32-bit targets
2009 because PLUS uses an 11-bit immediate and the insn sequence
2010 generated is not as efficient as the one using HIGH/LO_SUM. */
2011 if (GET_CODE (operand1) == CONST_INT
2012 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2014 /* Directly break constant into high and low parts. This
2015 provides better optimization opportunities because various
2016 passes recognize constants split with PLUS but not LO_SUM.
2017 We use a 14-bit signed low part except when the addition
2018 of 0x4000 to the high part might change the sign of the
2019 high part. */
2020 HOST_WIDE_INT value = INTVAL (operand1);
2021 HOST_WIDE_INT low = value & 0x3fff;
2022 HOST_WIDE_INT high = value & ~ 0x3fff;
2024 if (low >= 0x2000)
2026 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2027 high += 0x2000;
2028 else
2029 high += 0x4000;
2032 low = value - high;
2034 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2035 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2037 else
2039 emit_insn (gen_rtx_SET (VOIDmode, temp,
2040 gen_rtx_HIGH (mode, operand1)));
2041 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2044 emit_move_insn (operands[0], operands[1]);
2046 if (extend != NULL_RTX)
2047 emit_insn (gen_insv (operands[0], GEN_INT (32), const0_rtx,
2048 extend));
2050 return 1;
2053 /* Now have insn-emit do whatever it normally does. */
2054 return 0;
2057 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2058 it will need a link/runtime reloc). */
2061 reloc_needed (tree exp)
2063 int reloc = 0;
2065 switch (TREE_CODE (exp))
2067 case ADDR_EXPR:
2068 return 1;
2070 case PLUS_EXPR:
2071 case MINUS_EXPR:
2072 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2073 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2074 break;
2076 case NOP_EXPR:
2077 case CONVERT_EXPR:
2078 case NON_LVALUE_EXPR:
2079 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2080 break;
2082 case CONSTRUCTOR:
2084 register tree link;
2085 for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
2086 if (TREE_VALUE (link) != 0)
2087 reloc |= reloc_needed (TREE_VALUE (link));
2089 break;
2091 case ERROR_MARK:
2092 break;
2094 default:
2095 break;
2097 return reloc;
2100 /* Does operand (which is a symbolic_operand) live in text space?
2101 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2102 will be true. */
2105 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2107 if (GET_CODE (operand) == CONST)
2108 operand = XEXP (XEXP (operand, 0), 0);
2109 if (flag_pic)
2111 if (GET_CODE (operand) == SYMBOL_REF)
2112 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2114 else
2116 if (GET_CODE (operand) == SYMBOL_REF)
2117 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2119 return 1;
2123 /* Return the best assembler insn template
2124 for moving operands[1] into operands[0] as a fullword. */
2125 const char *
2126 singlemove_string (rtx *operands)
2128 HOST_WIDE_INT intval;
2130 if (GET_CODE (operands[0]) == MEM)
2131 return "stw %r1,%0";
2132 if (GET_CODE (operands[1]) == MEM)
2133 return "ldw %1,%0";
2134 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2136 long i;
2137 REAL_VALUE_TYPE d;
2139 if (GET_MODE (operands[1]) != SFmode)
2140 abort ();
2142 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2143 bit pattern. */
2144 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2145 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2147 operands[1] = GEN_INT (i);
2148 /* Fall through to CONST_INT case. */
2150 if (GET_CODE (operands[1]) == CONST_INT)
2152 intval = INTVAL (operands[1]);
2154 if (VAL_14_BITS_P (intval))
2155 return "ldi %1,%0";
2156 else if ((intval & 0x7ff) == 0)
2157 return "ldil L'%1,%0";
2158 else if (zdepi_cint_p (intval))
2159 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2160 else
2161 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2163 return "copy %1,%0";
2167 /* Compute position (in OP[1]) and width (in OP[2])
2168 useful for copying IMM to a register using the zdepi
2169 instructions. Store the immediate value to insert in OP[0]. */
2170 static void
2171 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2173 int lsb, len;
2175 /* Find the least significant set bit in IMM. */
2176 for (lsb = 0; lsb < 32; lsb++)
2178 if ((imm & 1) != 0)
2179 break;
2180 imm >>= 1;
2183 /* Choose variants based on *sign* of the 5-bit field. */
2184 if ((imm & 0x10) == 0)
2185 len = (lsb <= 28) ? 4 : 32 - lsb;
2186 else
2188 /* Find the width of the bitstring in IMM. */
2189 for (len = 5; len < 32; len++)
2191 if ((imm & (1 << len)) == 0)
2192 break;
2195 /* Sign extend IMM as a 5-bit value. */
2196 imm = (imm & 0xf) - 0x10;
2199 op[0] = imm;
2200 op[1] = 31 - lsb;
2201 op[2] = len;
2204 /* Compute position (in OP[1]) and width (in OP[2])
2205 useful for copying IMM to a register using the depdi,z
2206 instructions. Store the immediate value to insert in OP[0]. */
2207 void
2208 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2210 HOST_WIDE_INT lsb, len;
2212 /* Find the least significant set bit in IMM. */
2213 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2215 if ((imm & 1) != 0)
2216 break;
2217 imm >>= 1;
2220 /* Choose variants based on *sign* of the 5-bit field. */
2221 if ((imm & 0x10) == 0)
2222 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2223 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2224 else
2226 /* Find the width of the bitstring in IMM. */
2227 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2229 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2230 break;
2233 /* Sign extend IMM as a 5-bit value. */
2234 imm = (imm & 0xf) - 0x10;
2237 op[0] = imm;
2238 op[1] = 63 - lsb;
2239 op[2] = len;
2242 /* Output assembler code to perform a doubleword move insn
2243 with operands OPERANDS. */
2245 const char *
2246 output_move_double (rtx *operands)
2248 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2249 rtx latehalf[2];
2250 rtx addreg0 = 0, addreg1 = 0;
2252 /* First classify both operands. */
2254 if (REG_P (operands[0]))
2255 optype0 = REGOP;
2256 else if (offsettable_memref_p (operands[0]))
2257 optype0 = OFFSOP;
2258 else if (GET_CODE (operands[0]) == MEM)
2259 optype0 = MEMOP;
2260 else
2261 optype0 = RNDOP;
2263 if (REG_P (operands[1]))
2264 optype1 = REGOP;
2265 else if (CONSTANT_P (operands[1]))
2266 optype1 = CNSTOP;
2267 else if (offsettable_memref_p (operands[1]))
2268 optype1 = OFFSOP;
2269 else if (GET_CODE (operands[1]) == MEM)
2270 optype1 = MEMOP;
2271 else
2272 optype1 = RNDOP;
2274 /* Check for the cases that the operand constraints are not
2275 supposed to allow to happen. Abort if we get one,
2276 because generating code for these cases is painful. */
2278 if (optype0 != REGOP && optype1 != REGOP)
2279 abort ();
2281 /* Handle auto decrementing and incrementing loads and stores
2282 specifically, since the structure of the function doesn't work
2283 for them without major modification. Do it better when we learn
2284 this port about the general inc/dec addressing of PA.
2285 (This was written by tege. Chide him if it doesn't work.) */
2287 if (optype0 == MEMOP)
2289 /* We have to output the address syntax ourselves, since print_operand
2290 doesn't deal with the addresses we want to use. Fix this later. */
2292 rtx addr = XEXP (operands[0], 0);
2293 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2295 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2297 operands[0] = XEXP (addr, 0);
2298 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2299 abort ();
2301 if (!reg_overlap_mentioned_p (high_reg, addr))
2303 /* No overlap between high target register and address
2304 register. (We do this in a non-obvious way to
2305 save a register file writeback) */
2306 if (GET_CODE (addr) == POST_INC)
2307 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2308 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2310 else
2311 abort ();
2313 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2315 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2317 operands[0] = XEXP (addr, 0);
2318 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2319 abort ();
2321 if (!reg_overlap_mentioned_p (high_reg, addr))
2323 /* No overlap between high target register and address
2324 register. (We do this in a non-obvious way to
2325 save a register file writeback) */
2326 if (GET_CODE (addr) == PRE_INC)
2327 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2328 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2330 else
2331 abort ();
2334 if (optype1 == MEMOP)
2336 /* We have to output the address syntax ourselves, since print_operand
2337 doesn't deal with the addresses we want to use. Fix this later. */
2339 rtx addr = XEXP (operands[1], 0);
2340 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2342 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2344 operands[1] = XEXP (addr, 0);
2345 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2346 abort ();
2348 if (!reg_overlap_mentioned_p (high_reg, addr))
2350 /* No overlap between high target register and address
2351 register. (We do this in a non-obvious way to
2352 save a register file writeback) */
2353 if (GET_CODE (addr) == POST_INC)
2354 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2355 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2357 else
2359 /* This is an undefined situation. We should load into the
2360 address register *and* update that register. Probably
2361 we don't need to handle this at all. */
2362 if (GET_CODE (addr) == POST_INC)
2363 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2364 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2367 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2369 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2371 operands[1] = XEXP (addr, 0);
2372 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2373 abort ();
2375 if (!reg_overlap_mentioned_p (high_reg, addr))
2377 /* No overlap between high target register and address
2378 register. (We do this in a non-obvious way to
2379 save a register file writeback) */
2380 if (GET_CODE (addr) == PRE_INC)
2381 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2382 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2384 else
2386 /* This is an undefined situation. We should load into the
2387 address register *and* update that register. Probably
2388 we don't need to handle this at all. */
2389 if (GET_CODE (addr) == PRE_INC)
2390 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2391 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2394 else if (GET_CODE (addr) == PLUS
2395 && GET_CODE (XEXP (addr, 0)) == MULT)
2397 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2399 if (!reg_overlap_mentioned_p (high_reg, addr))
2401 rtx xoperands[3];
2403 xoperands[0] = high_reg;
2404 xoperands[1] = XEXP (addr, 1);
2405 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2406 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2407 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2408 xoperands);
2409 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2411 else
2413 rtx xoperands[3];
2415 xoperands[0] = high_reg;
2416 xoperands[1] = XEXP (addr, 1);
2417 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2418 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2419 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2420 xoperands);
2421 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2426 /* If an operand is an unoffsettable memory ref, find a register
2427 we can increment temporarily to make it refer to the second word. */
2429 if (optype0 == MEMOP)
2430 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2432 if (optype1 == MEMOP)
2433 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2435 /* Ok, we can do one word at a time.
2436 Normally we do the low-numbered word first.
2438 In either case, set up in LATEHALF the operands to use
2439 for the high-numbered word and in some cases alter the
2440 operands in OPERANDS to be suitable for the low-numbered word. */
2442 if (optype0 == REGOP)
2443 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2444 else if (optype0 == OFFSOP)
2445 latehalf[0] = adjust_address (operands[0], SImode, 4);
2446 else
2447 latehalf[0] = operands[0];
2449 if (optype1 == REGOP)
2450 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2451 else if (optype1 == OFFSOP)
2452 latehalf[1] = adjust_address (operands[1], SImode, 4);
2453 else if (optype1 == CNSTOP)
2454 split_double (operands[1], &operands[1], &latehalf[1]);
2455 else
2456 latehalf[1] = operands[1];
2458 /* If the first move would clobber the source of the second one,
2459 do them in the other order.
2461 This can happen in two cases:
2463 mem -> register where the first half of the destination register
2464 is the same register used in the memory's address. Reload
2465 can create such insns.
2467 mem in this case will be either register indirect or register
2468 indirect plus a valid offset.
2470 register -> register move where REGNO(dst) == REGNO(src + 1)
2471 someone (Tim/Tege?) claimed this can happen for parameter loads.
2473 Handle mem -> register case first. */
2474 if (optype0 == REGOP
2475 && (optype1 == MEMOP || optype1 == OFFSOP)
2476 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2477 operands[1], 0))
2479 /* Do the late half first. */
2480 if (addreg1)
2481 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2482 output_asm_insn (singlemove_string (latehalf), latehalf);
2484 /* Then clobber. */
2485 if (addreg1)
2486 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2487 return singlemove_string (operands);
2490 /* Now handle register -> register case. */
2491 if (optype0 == REGOP && optype1 == REGOP
2492 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2494 output_asm_insn (singlemove_string (latehalf), latehalf);
2495 return singlemove_string (operands);
2498 /* Normal case: do the two words, low-numbered first. */
2500 output_asm_insn (singlemove_string (operands), operands);
2502 /* Make any unoffsettable addresses point at high-numbered word. */
2503 if (addreg0)
2504 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2505 if (addreg1)
2506 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2508 /* Do that word. */
2509 output_asm_insn (singlemove_string (latehalf), latehalf);
2511 /* Undo the adds we just did. */
2512 if (addreg0)
2513 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2514 if (addreg1)
2515 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2517 return "";
2520 const char *
2521 output_fp_move_double (rtx *operands)
2523 if (FP_REG_P (operands[0]))
2525 if (FP_REG_P (operands[1])
2526 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2527 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2528 else
2529 output_asm_insn ("fldd%F1 %1,%0", operands);
2531 else if (FP_REG_P (operands[1]))
2533 output_asm_insn ("fstd%F0 %1,%0", operands);
2535 else if (operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2537 if (GET_CODE (operands[0]) == REG)
2539 rtx xoperands[2];
2540 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2541 xoperands[0] = operands[0];
2542 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2544 /* This is a pain. You have to be prepared to deal with an
2545 arbitrary address here including pre/post increment/decrement.
2547 so avoid this in the MD. */
2548 else
2549 abort ();
2551 else abort ();
2552 return "";
2555 /* Return a REG that occurs in ADDR with coefficient 1.
2556 ADDR can be effectively incremented by incrementing REG. */
2558 static rtx
2559 find_addr_reg (rtx addr)
2561 while (GET_CODE (addr) == PLUS)
2563 if (GET_CODE (XEXP (addr, 0)) == REG)
2564 addr = XEXP (addr, 0);
2565 else if (GET_CODE (XEXP (addr, 1)) == REG)
2566 addr = XEXP (addr, 1);
2567 else if (CONSTANT_P (XEXP (addr, 0)))
2568 addr = XEXP (addr, 1);
2569 else if (CONSTANT_P (XEXP (addr, 1)))
2570 addr = XEXP (addr, 0);
2571 else
2572 abort ();
2574 if (GET_CODE (addr) == REG)
2575 return addr;
2576 abort ();
2579 /* Emit code to perform a block move.
2581 OPERANDS[0] is the destination pointer as a REG, clobbered.
2582 OPERANDS[1] is the source pointer as a REG, clobbered.
2583 OPERANDS[2] is a register for temporary storage.
2584 OPERANDS[3] is a register for temporary storage.
2585 OPERANDS[4] is the size as a CONST_INT
2586 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2587 OPERANDS[6] is another temporary register. */
2589 const char *
2590 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2592 int align = INTVAL (operands[5]);
2593 unsigned long n_bytes = INTVAL (operands[4]);
2595 /* We can't move more than a word at a time because the PA
2596 has no longer integer move insns. (Could use fp mem ops?) */
2597 if (align > (TARGET_64BIT ? 8 : 4))
2598 align = (TARGET_64BIT ? 8 : 4);
2600 /* Note that we know each loop below will execute at least twice
2601 (else we would have open-coded the copy). */
2602 switch (align)
2604 case 8:
2605 /* Pre-adjust the loop counter. */
2606 operands[4] = GEN_INT (n_bytes - 16);
2607 output_asm_insn ("ldi %4,%2", operands);
2609 /* Copying loop. */
2610 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2611 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2612 output_asm_insn ("std,ma %3,8(%0)", operands);
2613 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2614 output_asm_insn ("std,ma %6,8(%0)", operands);
2616 /* Handle the residual. There could be up to 7 bytes of
2617 residual to copy! */
2618 if (n_bytes % 16 != 0)
2620 operands[4] = GEN_INT (n_bytes % 8);
2621 if (n_bytes % 16 >= 8)
2622 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2623 if (n_bytes % 8 != 0)
2624 output_asm_insn ("ldd 0(%1),%6", operands);
2625 if (n_bytes % 16 >= 8)
2626 output_asm_insn ("std,ma %3,8(%0)", operands);
2627 if (n_bytes % 8 != 0)
2628 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2630 return "";
2632 case 4:
2633 /* Pre-adjust the loop counter. */
2634 operands[4] = GEN_INT (n_bytes - 8);
2635 output_asm_insn ("ldi %4,%2", operands);
2637 /* Copying loop. */
2638 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2639 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2640 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2641 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2642 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2644 /* Handle the residual. There could be up to 7 bytes of
2645 residual to copy! */
2646 if (n_bytes % 8 != 0)
2648 operands[4] = GEN_INT (n_bytes % 4);
2649 if (n_bytes % 8 >= 4)
2650 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2651 if (n_bytes % 4 != 0)
2652 output_asm_insn ("ldw 0(%1),%6", operands);
2653 if (n_bytes % 8 >= 4)
2654 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2655 if (n_bytes % 4 != 0)
2656 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2658 return "";
2660 case 2:
2661 /* Pre-adjust the loop counter. */
2662 operands[4] = GEN_INT (n_bytes - 4);
2663 output_asm_insn ("ldi %4,%2", operands);
2665 /* Copying loop. */
2666 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2667 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2668 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2669 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2670 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2672 /* Handle the residual. */
2673 if (n_bytes % 4 != 0)
2675 if (n_bytes % 4 >= 2)
2676 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2677 if (n_bytes % 2 != 0)
2678 output_asm_insn ("ldb 0(%1),%6", operands);
2679 if (n_bytes % 4 >= 2)
2680 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2681 if (n_bytes % 2 != 0)
2682 output_asm_insn ("stb %6,0(%0)", operands);
2684 return "";
2686 case 1:
2687 /* Pre-adjust the loop counter. */
2688 operands[4] = GEN_INT (n_bytes - 2);
2689 output_asm_insn ("ldi %4,%2", operands);
2691 /* Copying loop. */
2692 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2693 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2694 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2695 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2696 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2698 /* Handle the residual. */
2699 if (n_bytes % 2 != 0)
2701 output_asm_insn ("ldb 0(%1),%3", operands);
2702 output_asm_insn ("stb %3,0(%0)", operands);
2704 return "";
2706 default:
2707 abort ();
2711 /* Count the number of insns necessary to handle this block move.
2713 Basic structure is the same as emit_block_move, except that we
2714 count insns rather than emit them. */
2716 static int
2717 compute_movstr_length (rtx insn)
2719 rtx pat = PATTERN (insn);
2720 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2721 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2722 unsigned int n_insns = 0;
2724 /* We can't move more than four bytes at a time because the PA
2725 has no longer integer move insns. (Could use fp mem ops?) */
2726 if (align > (TARGET_64BIT ? 8 : 4))
2727 align = (TARGET_64BIT ? 8 : 4);
2729 /* The basic copying loop. */
2730 n_insns = 6;
2732 /* Residuals. */
2733 if (n_bytes % (2 * align) != 0)
2735 if ((n_bytes % (2 * align)) >= align)
2736 n_insns += 2;
2738 if ((n_bytes % align) != 0)
2739 n_insns += 2;
2742 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2743 return n_insns * 4;
2746 /* Emit code to perform a block clear.
2748 OPERANDS[0] is the destination pointer as a REG, clobbered.
2749 OPERANDS[1] is a register for temporary storage.
2750 OPERANDS[2] is the size as a CONST_INT
2751 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2753 const char *
2754 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2756 int align = INTVAL (operands[3]);
2757 unsigned long n_bytes = INTVAL (operands[2]);
2759 /* We can't clear more than a word at a time because the PA
2760 has no longer integer move insns. */
2761 if (align > (TARGET_64BIT ? 8 : 4))
2762 align = (TARGET_64BIT ? 8 : 4);
2764 /* Note that we know each loop below will execute at least twice
2765 (else we would have open-coded the copy). */
2766 switch (align)
2768 case 8:
2769 /* Pre-adjust the loop counter. */
2770 operands[2] = GEN_INT (n_bytes - 16);
2771 output_asm_insn ("ldi %2,%1", operands);
2773 /* Loop. */
2774 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2775 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2776 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2778 /* Handle the residual. There could be up to 7 bytes of
2779 residual to copy! */
2780 if (n_bytes % 16 != 0)
2782 operands[2] = GEN_INT (n_bytes % 8);
2783 if (n_bytes % 16 >= 8)
2784 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2785 if (n_bytes % 8 != 0)
2786 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2788 return "";
2790 case 4:
2791 /* Pre-adjust the loop counter. */
2792 operands[2] = GEN_INT (n_bytes - 8);
2793 output_asm_insn ("ldi %2,%1", operands);
2795 /* Loop. */
2796 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2797 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2798 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2800 /* Handle the residual. There could be up to 7 bytes of
2801 residual to copy! */
2802 if (n_bytes % 8 != 0)
2804 operands[2] = GEN_INT (n_bytes % 4);
2805 if (n_bytes % 8 >= 4)
2806 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2807 if (n_bytes % 4 != 0)
2808 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2810 return "";
2812 case 2:
2813 /* Pre-adjust the loop counter. */
2814 operands[2] = GEN_INT (n_bytes - 4);
2815 output_asm_insn ("ldi %2,%1", operands);
2817 /* Loop. */
2818 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2819 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2820 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2822 /* Handle the residual. */
2823 if (n_bytes % 4 != 0)
2825 if (n_bytes % 4 >= 2)
2826 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2827 if (n_bytes % 2 != 0)
2828 output_asm_insn ("stb %%r0,0(%0)", operands);
2830 return "";
2832 case 1:
2833 /* Pre-adjust the loop counter. */
2834 operands[2] = GEN_INT (n_bytes - 2);
2835 output_asm_insn ("ldi %2,%1", operands);
2837 /* Loop. */
2838 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2839 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2840 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2842 /* Handle the residual. */
2843 if (n_bytes % 2 != 0)
2844 output_asm_insn ("stb %%r0,0(%0)", operands);
2846 return "";
2848 default:
2849 abort ();
2853 /* Count the number of insns necessary to handle this block move.
2855 Basic structure is the same as emit_block_move, except that we
2856 count insns rather than emit them. */
2858 static int
2859 compute_clrstr_length (rtx insn)
2861 rtx pat = PATTERN (insn);
2862 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2863 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2864 unsigned int n_insns = 0;
2866 /* We can't clear more than a word at a time because the PA
2867 has no longer integer move insns. */
2868 if (align > (TARGET_64BIT ? 8 : 4))
2869 align = (TARGET_64BIT ? 8 : 4);
2871 /* The basic loop. */
2872 n_insns = 4;
2874 /* Residuals. */
2875 if (n_bytes % (2 * align) != 0)
2877 if ((n_bytes % (2 * align)) >= align)
2878 n_insns++;
2880 if ((n_bytes % align) != 0)
2881 n_insns++;
2884 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2885 return n_insns * 4;
2889 const char *
2890 output_and (rtx *operands)
2892 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2894 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2895 int ls0, ls1, ms0, p, len;
2897 for (ls0 = 0; ls0 < 32; ls0++)
2898 if ((mask & (1 << ls0)) == 0)
2899 break;
2901 for (ls1 = ls0; ls1 < 32; ls1++)
2902 if ((mask & (1 << ls1)) != 0)
2903 break;
2905 for (ms0 = ls1; ms0 < 32; ms0++)
2906 if ((mask & (1 << ms0)) == 0)
2907 break;
2909 if (ms0 != 32)
2910 abort ();
2912 if (ls1 == 32)
2914 len = ls0;
2916 if (len == 0)
2917 abort ();
2919 operands[2] = GEN_INT (len);
2920 return "{extru|extrw,u} %1,31,%2,%0";
2922 else
2924 /* We could use this `depi' for the case above as well, but `depi'
2925 requires one more register file access than an `extru'. */
2927 p = 31 - ls0;
2928 len = ls1 - ls0;
2930 operands[2] = GEN_INT (p);
2931 operands[3] = GEN_INT (len);
2932 return "{depi|depwi} 0,%2,%3,%0";
2935 else
2936 return "and %1,%2,%0";
2939 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2940 storing the result in operands[0]. */
2941 const char *
2942 output_64bit_and (rtx *operands)
2944 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2946 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2947 int ls0, ls1, ms0, p, len;
2949 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2950 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2951 break;
2953 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2954 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2955 break;
2957 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2958 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2959 break;
2961 if (ms0 != HOST_BITS_PER_WIDE_INT)
2962 abort ();
2964 if (ls1 == HOST_BITS_PER_WIDE_INT)
2966 len = ls0;
2968 if (len == 0)
2969 abort ();
2971 operands[2] = GEN_INT (len);
2972 return "extrd,u %1,63,%2,%0";
2974 else
2976 /* We could use this `depi' for the case above as well, but `depi'
2977 requires one more register file access than an `extru'. */
2979 p = 63 - ls0;
2980 len = ls1 - ls0;
2982 operands[2] = GEN_INT (p);
2983 operands[3] = GEN_INT (len);
2984 return "depdi 0,%2,%3,%0";
2987 else
2988 return "and %1,%2,%0";
2991 const char *
2992 output_ior (rtx *operands)
2994 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2995 int bs0, bs1, p, len;
2997 if (INTVAL (operands[2]) == 0)
2998 return "copy %1,%0";
3000 for (bs0 = 0; bs0 < 32; bs0++)
3001 if ((mask & (1 << bs0)) != 0)
3002 break;
3004 for (bs1 = bs0; bs1 < 32; bs1++)
3005 if ((mask & (1 << bs1)) == 0)
3006 break;
3008 if (bs1 != 32 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3009 abort ();
3011 p = 31 - bs0;
3012 len = bs1 - bs0;
3014 operands[2] = GEN_INT (p);
3015 operands[3] = GEN_INT (len);
3016 return "{depi|depwi} -1,%2,%3,%0";
3019 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3020 storing the result in operands[0]. */
3021 const char *
3022 output_64bit_ior (rtx *operands)
3024 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3025 int bs0, bs1, p, len;
3027 if (INTVAL (operands[2]) == 0)
3028 return "copy %1,%0";
3030 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3031 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3032 break;
3034 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3035 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3036 break;
3038 if (bs1 != HOST_BITS_PER_WIDE_INT
3039 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3040 abort ();
3042 p = 63 - bs0;
3043 len = bs1 - bs0;
3045 operands[2] = GEN_INT (p);
3046 operands[3] = GEN_INT (len);
3047 return "depdi -1,%2,%3,%0";
3050 /* Target hook for assembling integer objects. This code handles
3051 aligned SI and DI integers specially, since function references must
3052 be preceded by P%. */
3054 static bool
3055 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3057 if (size == UNITS_PER_WORD && aligned_p
3058 && function_label_operand (x, VOIDmode))
3060 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3061 output_addr_const (asm_out_file, x);
3062 fputc ('\n', asm_out_file);
3063 return true;
3065 return default_assemble_integer (x, size, aligned_p);
3068 /* Output an ascii string. */
3069 void
3070 output_ascii (FILE *file, const char *p, int size)
3072 int i;
3073 int chars_output;
3074 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3076 /* The HP assembler can only take strings of 256 characters at one
3077 time. This is a limitation on input line length, *not* the
3078 length of the string. Sigh. Even worse, it seems that the
3079 restriction is in number of input characters (see \xnn &
3080 \whatever). So we have to do this very carefully. */
3082 fputs ("\t.STRING \"", file);
3084 chars_output = 0;
3085 for (i = 0; i < size; i += 4)
3087 int co = 0;
3088 int io = 0;
3089 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3091 register unsigned int c = (unsigned char) p[i + io];
3093 if (c == '\"' || c == '\\')
3094 partial_output[co++] = '\\';
3095 if (c >= ' ' && c < 0177)
3096 partial_output[co++] = c;
3097 else
3099 unsigned int hexd;
3100 partial_output[co++] = '\\';
3101 partial_output[co++] = 'x';
3102 hexd = c / 16 - 0 + '0';
3103 if (hexd > '9')
3104 hexd -= '9' - 'a' + 1;
3105 partial_output[co++] = hexd;
3106 hexd = c % 16 - 0 + '0';
3107 if (hexd > '9')
3108 hexd -= '9' - 'a' + 1;
3109 partial_output[co++] = hexd;
3112 if (chars_output + co > 243)
3114 fputs ("\"\n\t.STRING \"", file);
3115 chars_output = 0;
3117 fwrite (partial_output, 1, (size_t) co, file);
3118 chars_output += co;
3119 co = 0;
3121 fputs ("\"\n", file);
3124 /* Try to rewrite floating point comparisons & branches to avoid
3125 useless add,tr insns.
3127 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3128 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3129 first attempt to remove useless add,tr insns. It is zero
3130 for the second pass as reorg sometimes leaves bogus REG_DEAD
3131 notes lying around.
3133 When CHECK_NOTES is zero we can only eliminate add,tr insns
3134 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3135 instructions. */
3136 static void
3137 remove_useless_addtr_insns (int check_notes)
3139 rtx insn;
3140 static int pass = 0;
3142 /* This is fairly cheap, so always run it when optimizing. */
3143 if (optimize > 0)
3145 int fcmp_count = 0;
3146 int fbranch_count = 0;
3148 /* Walk all the insns in this function looking for fcmp & fbranch
3149 instructions. Keep track of how many of each we find. */
3150 for (insn = get_insns (); insn; insn = next_insn (insn))
3152 rtx tmp;
3154 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3155 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3156 continue;
3158 tmp = PATTERN (insn);
3160 /* It must be a set. */
3161 if (GET_CODE (tmp) != SET)
3162 continue;
3164 /* If the destination is CCFP, then we've found an fcmp insn. */
3165 tmp = SET_DEST (tmp);
3166 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3168 fcmp_count++;
3169 continue;
3172 tmp = PATTERN (insn);
3173 /* If this is an fbranch instruction, bump the fbranch counter. */
3174 if (GET_CODE (tmp) == SET
3175 && SET_DEST (tmp) == pc_rtx
3176 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3177 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3178 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3179 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3181 fbranch_count++;
3182 continue;
3187 /* Find all floating point compare + branch insns. If possible,
3188 reverse the comparison & the branch to avoid add,tr insns. */
3189 for (insn = get_insns (); insn; insn = next_insn (insn))
3191 rtx tmp, next;
3193 /* Ignore anything that isn't an INSN. */
3194 if (GET_CODE (insn) != INSN)
3195 continue;
3197 tmp = PATTERN (insn);
3199 /* It must be a set. */
3200 if (GET_CODE (tmp) != SET)
3201 continue;
3203 /* The destination must be CCFP, which is register zero. */
3204 tmp = SET_DEST (tmp);
3205 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3206 continue;
3208 /* INSN should be a set of CCFP.
3210 See if the result of this insn is used in a reversed FP
3211 conditional branch. If so, reverse our condition and
3212 the branch. Doing so avoids useless add,tr insns. */
3213 next = next_insn (insn);
3214 while (next)
3216 /* Jumps, calls and labels stop our search. */
3217 if (GET_CODE (next) == JUMP_INSN
3218 || GET_CODE (next) == CALL_INSN
3219 || GET_CODE (next) == CODE_LABEL)
3220 break;
3222 /* As does another fcmp insn. */
3223 if (GET_CODE (next) == INSN
3224 && GET_CODE (PATTERN (next)) == SET
3225 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3226 && REGNO (SET_DEST (PATTERN (next))) == 0)
3227 break;
3229 next = next_insn (next);
3232 /* Is NEXT_INSN a branch? */
3233 if (next
3234 && GET_CODE (next) == JUMP_INSN)
3236 rtx pattern = PATTERN (next);
3238 /* If it a reversed fp conditional branch (eg uses add,tr)
3239 and CCFP dies, then reverse our conditional and the branch
3240 to avoid the add,tr. */
3241 if (GET_CODE (pattern) == SET
3242 && SET_DEST (pattern) == pc_rtx
3243 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3244 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3245 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3246 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3247 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3248 && (fcmp_count == fbranch_count
3249 || (check_notes
3250 && find_regno_note (next, REG_DEAD, 0))))
3252 /* Reverse the branch. */
3253 tmp = XEXP (SET_SRC (pattern), 1);
3254 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3255 XEXP (SET_SRC (pattern), 2) = tmp;
3256 INSN_CODE (next) = -1;
3258 /* Reverse our condition. */
3259 tmp = PATTERN (insn);
3260 PUT_CODE (XEXP (tmp, 1),
3261 (reverse_condition_maybe_unordered
3262 (GET_CODE (XEXP (tmp, 1)))));
3268 pass = !pass;
3272 /* You may have trouble believing this, but this is the 32 bit HP-PA
3273 stack layout. Wow.
3275 Offset Contents
3277 Variable arguments (optional; any number may be allocated)
3279 SP-(4*(N+9)) arg word N
3281 SP-56 arg word 5
3282 SP-52 arg word 4
3284 Fixed arguments (must be allocated; may remain unused)
3286 SP-48 arg word 3
3287 SP-44 arg word 2
3288 SP-40 arg word 1
3289 SP-36 arg word 0
3291 Frame Marker
3293 SP-32 External Data Pointer (DP)
3294 SP-28 External sr4
3295 SP-24 External/stub RP (RP')
3296 SP-20 Current RP
3297 SP-16 Static Link
3298 SP-12 Clean up
3299 SP-8 Calling Stub RP (RP'')
3300 SP-4 Previous SP
3302 Top of Frame
3304 SP-0 Stack Pointer (points to next available address)
3308 /* This function saves registers as follows. Registers marked with ' are
3309 this function's registers (as opposed to the previous function's).
3310 If a frame_pointer isn't needed, r4 is saved as a general register;
3311 the space for the frame pointer is still allocated, though, to keep
3312 things simple.
3315 Top of Frame
3317 SP (FP') Previous FP
3318 SP + 4 Alignment filler (sigh)
3319 SP + 8 Space for locals reserved here.
3323 SP + n All call saved register used.
3327 SP + o All call saved fp registers used.
3331 SP + p (SP') points to next available address.
3335 /* Global variables set by output_function_prologue(). */
3336 /* Size of frame. Need to know this to emit return insns from
3337 leaf procedures. */
3338 static int actual_fsize;
3339 static int local_fsize, save_fregs;
3341 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3342 Handle case where DISP > 8k by using the add_high_const patterns.
3344 Note in DISP > 8k case, we will leave the high part of the address
3345 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3347 static void
3348 store_reg (int reg, int disp, int base)
3350 rtx insn, dest, src, basereg;
3352 src = gen_rtx_REG (word_mode, reg);
3353 basereg = gen_rtx_REG (Pmode, base);
3354 if (VAL_14_BITS_P (disp))
3356 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3357 insn = emit_move_insn (dest, src);
3359 else
3361 rtx delta = GEN_INT (disp);
3362 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3363 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3364 emit_move_insn (tmpreg, high);
3365 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3366 insn = emit_move_insn (dest, src);
3367 if (DO_FRAME_NOTES)
3369 REG_NOTES (insn)
3370 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3371 gen_rtx_SET (VOIDmode,
3372 gen_rtx_MEM (word_mode,
3373 gen_rtx_PLUS (word_mode, basereg,
3374 delta)),
3375 src),
3376 REG_NOTES (insn));
3380 if (DO_FRAME_NOTES)
3381 RTX_FRAME_RELATED_P (insn) = 1;
3384 /* Emit RTL to store REG at the memory location specified by BASE and then
3385 add MOD to BASE. MOD must be <= 8k. */
3387 static void
3388 store_reg_modify (int base, int reg, int mod)
3390 rtx insn, basereg, srcreg, delta;
3392 if (! VAL_14_BITS_P (mod))
3393 abort ();
3395 basereg = gen_rtx_REG (Pmode, base);
3396 srcreg = gen_rtx_REG (word_mode, reg);
3397 delta = GEN_INT (mod);
3399 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3400 if (DO_FRAME_NOTES)
3402 RTX_FRAME_RELATED_P (insn) = 1;
3404 /* RTX_FRAME_RELATED_P must be set on each frame related set
3405 in a parallel with more than one element. Don't set
3406 RTX_FRAME_RELATED_P in the first set if reg is temporary
3407 register 1. The effect of this operation is recorded in
3408 the initial copy. */
3409 if (reg != 1)
3411 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3412 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3414 else
3416 /* The first element of a PARALLEL is always processed if it is
3417 a SET. Thus, we need an expression list for this case. */
3418 REG_NOTES (insn)
3419 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3420 gen_rtx_SET (VOIDmode, basereg,
3421 gen_rtx_PLUS (word_mode, basereg, delta)),
3422 REG_NOTES (insn));
3427 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3428 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3429 whether to add a frame note or not.
3431 In the DISP > 8k case, we leave the high part of the address in %r1.
3432 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3434 static void
3435 set_reg_plus_d (int reg, int base, int disp, int note)
3437 rtx insn;
3439 if (VAL_14_BITS_P (disp))
3441 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3442 plus_constant (gen_rtx_REG (Pmode, base), disp));
3444 else
3446 rtx basereg = gen_rtx_REG (Pmode, base);
3447 rtx delta = GEN_INT (disp);
3449 emit_move_insn (gen_rtx_REG (Pmode, 1),
3450 gen_rtx_PLUS (Pmode, basereg,
3451 gen_rtx_HIGH (Pmode, delta)));
3452 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3453 gen_rtx_LO_SUM (Pmode, gen_rtx_REG (Pmode, 1),
3454 delta));
3457 if (DO_FRAME_NOTES && note)
3458 RTX_FRAME_RELATED_P (insn) = 1;
3462 compute_frame_size (int size, int *fregs_live)
3464 int freg_saved = 0;
3465 int i, j;
3467 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3468 be consistent with the rounding and size calculation done here.
3469 Change them at the same time. */
3471 /* We do our own stack alignment. First, round the size of the
3472 stack locals up to a word boundary. */
3473 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3475 /* Space for previous frame pointer + filler. If any frame is
3476 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3477 waste some space here for the sake of HP compatibility. The
3478 first slot is only used when the frame pointer is needed. */
3479 if (size || frame_pointer_needed)
3480 size += STARTING_FRAME_OFFSET;
3482 /* If the current function calls __builtin_eh_return, then we need
3483 to allocate stack space for registers that will hold data for
3484 the exception handler. */
3485 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3487 unsigned int i;
3489 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3490 continue;
3491 size += i * UNITS_PER_WORD;
3494 /* Account for space used by the callee general register saves. */
3495 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3496 if (regs_ever_live[i])
3497 size += UNITS_PER_WORD;
3499 /* Account for space used by the callee floating point register saves. */
3500 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3501 if (regs_ever_live[i]
3502 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3504 freg_saved = 1;
3506 /* We always save both halves of the FP register, so always
3507 increment the frame size by 8 bytes. */
3508 size += 8;
3511 /* If any of the floating registers are saved, account for the
3512 alignment needed for the floating point register save block. */
3513 if (freg_saved)
3515 size = (size + 7) & ~7;
3516 if (fregs_live)
3517 *fregs_live = 1;
3520 /* The various ABIs include space for the outgoing parameters in the
3521 size of the current function's stack frame. We don't need to align
3522 for the outgoing arguments as their alignment is set by the final
3523 rounding for the frame as a whole. */
3524 size += current_function_outgoing_args_size;
3526 /* Allocate space for the fixed frame marker. This space must be
3527 allocated for any function that makes calls or allocates
3528 stack space. */
3529 if (!current_function_is_leaf || size)
3530 size += TARGET_64BIT ? 48 : 32;
3532 /* Finally, round to the preferred stack boundary. */
3533 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3534 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3537 /* Generate the assembly code for function entry. FILE is a stdio
3538 stream to output the code to. SIZE is an int: how many units of
3539 temporary storage to allocate.
3541 Refer to the array `regs_ever_live' to determine which registers to
3542 save; `regs_ever_live[I]' is nonzero if register number I is ever
3543 used in the function. This function is responsible for knowing
3544 which registers should not be saved even if used. */
3546 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3547 of memory. If any fpu reg is used in the function, we allocate
3548 such a block here, at the bottom of the frame, just in case it's needed.
3550 If this function is a leaf procedure, then we may choose not
3551 to do a "save" insn. The decision about whether or not
3552 to do this is made in regclass.c. */
3554 static void
3555 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3557 /* The function's label and associated .PROC must never be
3558 separated and must be output *after* any profiling declarations
3559 to avoid changing spaces/subspaces within a procedure. */
3560 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3561 fputs ("\t.PROC\n", file);
3563 /* hppa_expand_prologue does the dirty work now. We just need
3564 to output the assembler directives which denote the start
3565 of a function. */
3566 fprintf (file, "\t.CALLINFO FRAME=%d", actual_fsize);
3567 if (regs_ever_live[2])
3568 fputs (",CALLS,SAVE_RP", file);
3569 else
3570 fputs (",NO_CALLS", file);
3572 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3573 at the beginning of the frame and that it is used as the frame
3574 pointer for the frame. We do this because our current frame
3575 layout doesn't conform to that specified in the the HP runtime
3576 documentation and we need a way to indicate to programs such as
3577 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3578 isn't used by HP compilers but is supported by the assembler.
3579 However, SAVE_SP is supposed to indicate that the previous stack
3580 pointer has been saved in the frame marker. */
3581 if (frame_pointer_needed)
3582 fputs (",SAVE_SP", file);
3584 /* Pass on information about the number of callee register saves
3585 performed in the prologue.
3587 The compiler is supposed to pass the highest register number
3588 saved, the assembler then has to adjust that number before
3589 entering it into the unwind descriptor (to account for any
3590 caller saved registers with lower register numbers than the
3591 first callee saved register). */
3592 if (gr_saved)
3593 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3595 if (fr_saved)
3596 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3598 fputs ("\n\t.ENTRY\n", file);
3600 remove_useless_addtr_insns (0);
3603 void
3604 hppa_expand_prologue (void)
3606 int merge_sp_adjust_with_store = 0;
3607 int size = get_frame_size ();
3608 int i, offset;
3609 rtx insn, tmpreg;
3611 gr_saved = 0;
3612 fr_saved = 0;
3613 save_fregs = 0;
3615 /* Compute total size for frame pointer, filler, locals and rounding to
3616 the next word boundary. Similar code appears in compute_frame_size
3617 and must be changed in tandem with this code. */
3618 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3619 if (local_fsize || frame_pointer_needed)
3620 local_fsize += STARTING_FRAME_OFFSET;
3622 actual_fsize = compute_frame_size (size, &save_fregs);
3624 /* Compute a few things we will use often. */
3625 tmpreg = gen_rtx_REG (word_mode, 1);
3627 /* Save RP first. The calling conventions manual states RP will
3628 always be stored into the caller's frame at sp - 20 or sp - 16
3629 depending on which ABI is in use. */
3630 if (regs_ever_live[2] || current_function_calls_eh_return)
3631 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3633 /* Allocate the local frame and set up the frame pointer if needed. */
3634 if (actual_fsize != 0)
3636 if (frame_pointer_needed)
3638 /* Copy the old frame pointer temporarily into %r1. Set up the
3639 new stack pointer, then store away the saved old frame pointer
3640 into the stack at sp and at the same time update the stack
3641 pointer by actual_fsize bytes. Two versions, first
3642 handles small (<8k) frames. The second handles large (>=8k)
3643 frames. */
3644 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3645 if (DO_FRAME_NOTES)
3647 /* We need to record the frame pointer save here since the
3648 new frame pointer is set in the following insn. */
3649 RTX_FRAME_RELATED_P (insn) = 1;
3650 REG_NOTES (insn)
3651 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3652 gen_rtx_SET (VOIDmode,
3653 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3654 frame_pointer_rtx),
3655 REG_NOTES (insn));
3658 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3659 if (DO_FRAME_NOTES)
3660 RTX_FRAME_RELATED_P (insn) = 1;
3662 if (VAL_14_BITS_P (actual_fsize))
3663 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3664 else
3666 /* It is incorrect to store the saved frame pointer at *sp,
3667 then increment sp (writes beyond the current stack boundary).
3669 So instead use stwm to store at *sp and post-increment the
3670 stack pointer as an atomic operation. Then increment sp to
3671 finish allocating the new frame. */
3672 int adjust1 = 8192 - 64;
3673 int adjust2 = actual_fsize - adjust1;
3675 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3676 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3677 adjust2, 1);
3680 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3681 we need to store the previous stack pointer (frame pointer)
3682 into the frame marker on targets that use the HP unwind
3683 library. This allows the HP unwind library to be used to
3684 unwind GCC frames. However, we are not fully compatible
3685 with the HP library because our frame layout differs from
3686 that specified in the HP runtime specification.
3688 We don't want a frame note on this instruction as the frame
3689 marker moves during dynamic stack allocation.
3691 This instruction also serves as a blockage to prevent
3692 register spills from being scheduled before the stack
3693 pointer is raised. This is necessary as we store
3694 registers using the frame pointer as a base register,
3695 and the frame pointer is set before sp is raised. */
3696 if (TARGET_HPUX_UNWIND_LIBRARY)
3698 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3699 GEN_INT (TARGET_64BIT ? -8 : -4));
3701 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3702 frame_pointer_rtx);
3704 else
3705 emit_insn (gen_blockage ());
3707 /* no frame pointer needed. */
3708 else
3710 /* In some cases we can perform the first callee register save
3711 and allocating the stack frame at the same time. If so, just
3712 make a note of it and defer allocating the frame until saving
3713 the callee registers. */
3714 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3715 merge_sp_adjust_with_store = 1;
3716 /* Can not optimize. Adjust the stack frame by actual_fsize
3717 bytes. */
3718 else
3719 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3720 actual_fsize, 1);
3724 /* Normal register save.
3726 Do not save the frame pointer in the frame_pointer_needed case. It
3727 was done earlier. */
3728 if (frame_pointer_needed)
3730 offset = local_fsize;
3732 /* Saving the EH return data registers in the frame is the simplest
3733 way to get the frame unwind information emitted. We put them
3734 just before the general registers. */
3735 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3737 unsigned int i, regno;
3739 for (i = 0; ; ++i)
3741 regno = EH_RETURN_DATA_REGNO (i);
3742 if (regno == INVALID_REGNUM)
3743 break;
3745 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3746 offset += UNITS_PER_WORD;
3750 for (i = 18; i >= 4; i--)
3751 if (regs_ever_live[i] && ! call_used_regs[i])
3753 store_reg (i, offset, FRAME_POINTER_REGNUM);
3754 offset += UNITS_PER_WORD;
3755 gr_saved++;
3757 /* Account for %r3 which is saved in a special place. */
3758 gr_saved++;
3760 /* No frame pointer needed. */
3761 else
3763 offset = local_fsize - actual_fsize;
3765 /* Saving the EH return data registers in the frame is the simplest
3766 way to get the frame unwind information emitted. */
3767 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3769 unsigned int i, regno;
3771 for (i = 0; ; ++i)
3773 regno = EH_RETURN_DATA_REGNO (i);
3774 if (regno == INVALID_REGNUM)
3775 break;
3777 /* If merge_sp_adjust_with_store is nonzero, then we can
3778 optimize the first save. */
3779 if (merge_sp_adjust_with_store)
3781 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3782 merge_sp_adjust_with_store = 0;
3784 else
3785 store_reg (regno, offset, STACK_POINTER_REGNUM);
3786 offset += UNITS_PER_WORD;
3790 for (i = 18; i >= 3; i--)
3791 if (regs_ever_live[i] && ! call_used_regs[i])
3793 /* If merge_sp_adjust_with_store is nonzero, then we can
3794 optimize the first GR save. */
3795 if (merge_sp_adjust_with_store)
3797 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3798 merge_sp_adjust_with_store = 0;
3800 else
3801 store_reg (i, offset, STACK_POINTER_REGNUM);
3802 offset += UNITS_PER_WORD;
3803 gr_saved++;
3806 /* If we wanted to merge the SP adjustment with a GR save, but we never
3807 did any GR saves, then just emit the adjustment here. */
3808 if (merge_sp_adjust_with_store)
3809 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3810 actual_fsize, 1);
3813 /* The hppa calling conventions say that %r19, the pic offset
3814 register, is saved at sp - 32 (in this function's frame)
3815 when generating PIC code. FIXME: What is the correct thing
3816 to do for functions which make no calls and allocate no
3817 frame? Do we need to allocate a frame, or can we just omit
3818 the save? For now we'll just omit the save.
3820 We don't want a note on this insn as the frame marker can
3821 move if there is a dynamic stack allocation. */
3822 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3824 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3826 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3830 /* Align pointer properly (doubleword boundary). */
3831 offset = (offset + 7) & ~7;
3833 /* Floating point register store. */
3834 if (save_fregs)
3836 rtx base;
3838 /* First get the frame or stack pointer to the start of the FP register
3839 save area. */
3840 if (frame_pointer_needed)
3842 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3843 base = frame_pointer_rtx;
3845 else
3847 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3848 base = stack_pointer_rtx;
3851 /* Now actually save the FP registers. */
3852 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3854 if (regs_ever_live[i]
3855 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3857 rtx addr, insn, reg;
3858 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3859 reg = gen_rtx_REG (DFmode, i);
3860 insn = emit_move_insn (addr, reg);
3861 if (DO_FRAME_NOTES)
3863 RTX_FRAME_RELATED_P (insn) = 1;
3864 if (TARGET_64BIT)
3866 rtx mem = gen_rtx_MEM (DFmode,
3867 plus_constant (base, offset));
3868 REG_NOTES (insn)
3869 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3870 gen_rtx_SET (VOIDmode, mem, reg),
3871 REG_NOTES (insn));
3873 else
3875 rtx meml = gen_rtx_MEM (SFmode,
3876 plus_constant (base, offset));
3877 rtx memr = gen_rtx_MEM (SFmode,
3878 plus_constant (base, offset + 4));
3879 rtx regl = gen_rtx_REG (SFmode, i);
3880 rtx regr = gen_rtx_REG (SFmode, i + 1);
3881 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3882 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3883 rtvec vec;
3885 RTX_FRAME_RELATED_P (setl) = 1;
3886 RTX_FRAME_RELATED_P (setr) = 1;
3887 vec = gen_rtvec (2, setl, setr);
3888 REG_NOTES (insn)
3889 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3890 gen_rtx_SEQUENCE (VOIDmode, vec),
3891 REG_NOTES (insn));
3894 offset += GET_MODE_SIZE (DFmode);
3895 fr_saved++;
3901 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3902 Handle case where DISP > 8k by using the add_high_const patterns. */
3904 static void
3905 load_reg (int reg, int disp, int base)
3907 rtx src, dest, basereg;
3909 dest = gen_rtx_REG (word_mode, reg);
3910 basereg = gen_rtx_REG (Pmode, base);
3911 if (VAL_14_BITS_P (disp))
3913 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3914 emit_move_insn (dest, src);
3916 else
3918 rtx delta = GEN_INT (disp);
3919 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3920 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3921 emit_move_insn (tmpreg, high);
3922 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3923 emit_move_insn (dest, src);
3927 /* Update the total code bytes output to the text section. */
3929 static void
3930 update_total_code_bytes (int nbytes)
3932 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3933 && !IN_NAMED_SECTION_P (cfun->decl))
3935 if (INSN_ADDRESSES_SET_P ())
3937 unsigned long old_total = total_code_bytes;
3939 total_code_bytes += nbytes;
3941 /* Be prepared to handle overflows. */
3942 if (old_total > total_code_bytes)
3943 total_code_bytes = -1;
3945 else
3946 total_code_bytes = -1;
3950 /* This function generates the assembly code for function exit.
3951 Args are as for output_function_prologue ().
3953 The function epilogue should not depend on the current stack
3954 pointer! It should use the frame pointer only. This is mandatory
3955 because of alloca; we also take advantage of it to omit stack
3956 adjustments before returning. */
3958 static void
3959 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3961 rtx insn = get_last_insn ();
3963 last_address = 0;
3965 /* hppa_expand_epilogue does the dirty work now. We just need
3966 to output the assembler directives which denote the end
3967 of a function.
3969 To make debuggers happy, emit a nop if the epilogue was completely
3970 eliminated due to a volatile call as the last insn in the
3971 current function. That way the return address (in %r2) will
3972 always point to a valid instruction in the current function. */
3974 /* Get the last real insn. */
3975 if (GET_CODE (insn) == NOTE)
3976 insn = prev_real_insn (insn);
3978 /* If it is a sequence, then look inside. */
3979 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3980 insn = XVECEXP (PATTERN (insn), 0, 0);
3982 /* If insn is a CALL_INSN, then it must be a call to a volatile
3983 function (otherwise there would be epilogue insns). */
3984 if (insn && GET_CODE (insn) == CALL_INSN)
3986 fputs ("\tnop\n", file);
3987 last_address += 4;
3990 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3992 if (INSN_ADDRESSES_SET_P ())
3994 insn = get_last_nonnote_insn ();
3995 last_address += INSN_ADDRESSES (INSN_UID (insn));
3996 if (INSN_P (insn))
3997 last_address += insn_default_length (insn);
3998 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3999 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4002 /* Finally, update the total number of code bytes output so far. */
4003 update_total_code_bytes (last_address);
4006 void
4007 hppa_expand_epilogue (void)
4009 rtx tmpreg;
4010 int offset, i;
4011 int merge_sp_adjust_with_load = 0;
4012 int ret_off = 0;
4014 /* We will use this often. */
4015 tmpreg = gen_rtx_REG (word_mode, 1);
4017 /* Try to restore RP early to avoid load/use interlocks when
4018 RP gets used in the return (bv) instruction. This appears to still
4019 be necessary even when we schedule the prologue and epilogue. */
4020 if (regs_ever_live [2] || current_function_calls_eh_return)
4022 ret_off = TARGET_64BIT ? -16 : -20;
4023 if (frame_pointer_needed)
4025 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4026 ret_off = 0;
4028 else
4030 /* No frame pointer, and stack is smaller than 8k. */
4031 if (VAL_14_BITS_P (ret_off - actual_fsize))
4033 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4034 ret_off = 0;
4039 /* General register restores. */
4040 if (frame_pointer_needed)
4042 offset = local_fsize;
4044 /* If the current function calls __builtin_eh_return, then we need
4045 to restore the saved EH data registers. */
4046 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4048 unsigned int i, regno;
4050 for (i = 0; ; ++i)
4052 regno = EH_RETURN_DATA_REGNO (i);
4053 if (regno == INVALID_REGNUM)
4054 break;
4056 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4057 offset += UNITS_PER_WORD;
4061 for (i = 18; i >= 4; i--)
4062 if (regs_ever_live[i] && ! call_used_regs[i])
4064 load_reg (i, offset, FRAME_POINTER_REGNUM);
4065 offset += UNITS_PER_WORD;
4068 else
4070 offset = local_fsize - actual_fsize;
4072 /* If the current function calls __builtin_eh_return, then we need
4073 to restore the saved EH data registers. */
4074 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4076 unsigned int i, regno;
4078 for (i = 0; ; ++i)
4080 regno = EH_RETURN_DATA_REGNO (i);
4081 if (regno == INVALID_REGNUM)
4082 break;
4084 /* Only for the first load.
4085 merge_sp_adjust_with_load holds the register load
4086 with which we will merge the sp adjustment. */
4087 if (merge_sp_adjust_with_load == 0
4088 && local_fsize == 0
4089 && VAL_14_BITS_P (-actual_fsize))
4090 merge_sp_adjust_with_load = regno;
4091 else
4092 load_reg (regno, offset, STACK_POINTER_REGNUM);
4093 offset += UNITS_PER_WORD;
4097 for (i = 18; i >= 3; i--)
4099 if (regs_ever_live[i] && ! call_used_regs[i])
4101 /* Only for the first load.
4102 merge_sp_adjust_with_load holds the register load
4103 with which we will merge the sp adjustment. */
4104 if (merge_sp_adjust_with_load == 0
4105 && local_fsize == 0
4106 && VAL_14_BITS_P (-actual_fsize))
4107 merge_sp_adjust_with_load = i;
4108 else
4109 load_reg (i, offset, STACK_POINTER_REGNUM);
4110 offset += UNITS_PER_WORD;
4115 /* Align pointer properly (doubleword boundary). */
4116 offset = (offset + 7) & ~7;
4118 /* FP register restores. */
4119 if (save_fregs)
4121 /* Adjust the register to index off of. */
4122 if (frame_pointer_needed)
4123 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4124 else
4125 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4127 /* Actually do the restores now. */
4128 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4129 if (regs_ever_live[i]
4130 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4132 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4133 rtx dest = gen_rtx_REG (DFmode, i);
4134 emit_move_insn (dest, src);
4138 /* Emit a blockage insn here to keep these insns from being moved to
4139 an earlier spot in the epilogue, or into the main instruction stream.
4141 This is necessary as we must not cut the stack back before all the
4142 restores are finished. */
4143 emit_insn (gen_blockage ());
4145 /* Reset stack pointer (and possibly frame pointer). The stack
4146 pointer is initially set to fp + 64 to avoid a race condition. */
4147 if (frame_pointer_needed)
4149 rtx delta = GEN_INT (-64);
4151 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4152 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4154 /* If we were deferring a callee register restore, do it now. */
4155 else if (merge_sp_adjust_with_load)
4157 rtx delta = GEN_INT (-actual_fsize);
4158 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4160 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4162 else if (actual_fsize != 0)
4163 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4164 - actual_fsize, 0);
4166 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4167 frame greater than 8k), do so now. */
4168 if (ret_off != 0)
4169 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4171 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4173 rtx sa = EH_RETURN_STACKADJ_RTX;
4175 emit_insn (gen_blockage ());
4176 emit_insn (TARGET_64BIT
4177 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4178 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4183 hppa_pic_save_rtx (void)
4185 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4188 void
4189 hppa_profile_hook (int label_no)
4191 /* We use SImode for the address of the function in both 32 and
4192 64-bit code to avoid having to provide DImode versions of the
4193 lcla2 and load_offset_label_address insn patterns. */
4194 rtx reg = gen_reg_rtx (SImode);
4195 rtx label_rtx = gen_label_rtx ();
4196 rtx begin_label_rtx, call_insn;
4197 char begin_label_name[16];
4199 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4200 label_no);
4201 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4203 if (TARGET_64BIT)
4204 emit_move_insn (arg_pointer_rtx,
4205 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4206 GEN_INT (64)));
4208 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4210 /* The address of the function is loaded into %r25 with a instruction-
4211 relative sequence that avoids the use of relocations. The sequence
4212 is split so that the load_offset_label_address instruction can
4213 occupy the delay slot of the call to _mcount. */
4214 if (TARGET_PA_20)
4215 emit_insn (gen_lcla2 (reg, label_rtx));
4216 else
4217 emit_insn (gen_lcla1 (reg, label_rtx));
4219 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4220 reg, begin_label_rtx, label_rtx));
4222 #ifndef NO_PROFILE_COUNTERS
4224 rtx count_label_rtx, addr, r24;
4225 char count_label_name[16];
4227 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4228 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4230 addr = force_reg (Pmode, count_label_rtx);
4231 r24 = gen_rtx_REG (Pmode, 24);
4232 emit_move_insn (r24, addr);
4234 call_insn =
4235 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4236 gen_rtx_SYMBOL_REF (Pmode,
4237 "_mcount")),
4238 GEN_INT (TARGET_64BIT ? 24 : 12)));
4240 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4242 #else
4244 call_insn =
4245 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4246 gen_rtx_SYMBOL_REF (Pmode,
4247 "_mcount")),
4248 GEN_INT (TARGET_64BIT ? 16 : 8)));
4250 #endif
4252 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4253 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4255 /* Indicate the _mcount call cannot throw, nor will it execute a
4256 non-local goto. */
4257 REG_NOTES (call_insn)
4258 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4261 /* Fetch the return address for the frame COUNT steps up from
4262 the current frame, after the prologue. FRAMEADDR is the
4263 frame pointer of the COUNT frame.
4265 We want to ignore any export stub remnants here. To handle this,
4266 we examine the code at the return address, and if it is an export
4267 stub, we return a memory rtx for the stub return address stored
4268 at frame-24.
4270 The value returned is used in two different ways:
4272 1. To find a function's caller.
4274 2. To change the return address for a function.
4276 This function handles most instances of case 1; however, it will
4277 fail if there are two levels of stubs to execute on the return
4278 path. The only way I believe that can happen is if the return value
4279 needs a parameter relocation, which never happens for C code.
4281 This function handles most instances of case 2; however, it will
4282 fail if we did not originally have stub code on the return path
4283 but will need stub code on the new return path. This can happen if
4284 the caller & callee are both in the main program, but the new
4285 return location is in a shared library. */
4288 return_addr_rtx (int count, rtx frameaddr)
4290 rtx label;
4291 rtx rp;
4292 rtx saved_rp;
4293 rtx ins;
4295 if (count != 0)
4296 return NULL_RTX;
4298 rp = get_hard_reg_initial_val (Pmode, 2);
4300 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4301 return rp;
4303 saved_rp = gen_reg_rtx (Pmode);
4304 emit_move_insn (saved_rp, rp);
4306 /* Get pointer to the instruction stream. We have to mask out the
4307 privilege level from the two low order bits of the return address
4308 pointer here so that ins will point to the start of the first
4309 instruction that would have been executed if we returned. */
4310 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4311 label = gen_label_rtx ();
4313 /* Check the instruction stream at the normal return address for the
4314 export stub:
4316 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4317 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4318 0x00011820 | stub+16: mtsp r1,sr0
4319 0xe0400002 | stub+20: be,n 0(sr0,rp)
4321 If it is an export stub, than our return address is really in
4322 -24[frameaddr]. */
4324 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4325 NULL_RTX, SImode, 1);
4326 emit_jump_insn (gen_bne (label));
4328 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4329 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4330 emit_jump_insn (gen_bne (label));
4332 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4333 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4334 emit_jump_insn (gen_bne (label));
4336 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4337 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4339 /* If there is no export stub then just use the value saved from
4340 the return pointer register. */
4342 emit_jump_insn (gen_bne (label));
4344 /* Here we know that our return address points to an export
4345 stub. We don't want to return the address of the export stub,
4346 but rather the return address of the export stub. That return
4347 address is stored at -24[frameaddr]. */
4349 emit_move_insn (saved_rp,
4350 gen_rtx_MEM (Pmode,
4351 memory_address (Pmode,
4352 plus_constant (frameaddr,
4353 -24))));
4355 emit_label (label);
4356 return saved_rp;
4359 /* This is only valid once reload has completed because it depends on
4360 knowing exactly how much (if any) frame there is and...
4362 It's only valid if there is no frame marker to de-allocate and...
4364 It's only valid if %r2 hasn't been saved into the caller's frame
4365 (we're not profiling and %r2 isn't live anywhere). */
4367 hppa_can_use_return_insn_p (void)
4369 return (reload_completed
4370 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4371 && ! regs_ever_live[2]
4372 && ! frame_pointer_needed);
4375 void
4376 emit_bcond_fp (enum rtx_code code, rtx operand0)
4378 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4379 gen_rtx_IF_THEN_ELSE (VOIDmode,
4380 gen_rtx_fmt_ee (code,
4381 VOIDmode,
4382 gen_rtx_REG (CCFPmode, 0),
4383 const0_rtx),
4384 gen_rtx_LABEL_REF (VOIDmode, operand0),
4385 pc_rtx)));
4390 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4392 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4393 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4396 /* Adjust the cost of a scheduling dependency. Return the new cost of
4397 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4399 static int
4400 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4402 enum attr_type attr_type;
4404 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4405 true dependencies as they are described with bypasses now. */
4406 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4407 return cost;
4409 if (! recog_memoized (insn))
4410 return 0;
4412 attr_type = get_attr_type (insn);
4414 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
4416 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4417 cycles later. */
4419 if (attr_type == TYPE_FPLOAD)
4421 rtx pat = PATTERN (insn);
4422 rtx dep_pat = PATTERN (dep_insn);
4423 if (GET_CODE (pat) == PARALLEL)
4425 /* This happens for the fldXs,mb patterns. */
4426 pat = XVECEXP (pat, 0, 0);
4428 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4429 /* If this happens, we have to extend this to schedule
4430 optimally. Return 0 for now. */
4431 return 0;
4433 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4435 if (! recog_memoized (dep_insn))
4436 return 0;
4437 switch (get_attr_type (dep_insn))
4439 case TYPE_FPALU:
4440 case TYPE_FPMULSGL:
4441 case TYPE_FPMULDBL:
4442 case TYPE_FPDIVSGL:
4443 case TYPE_FPDIVDBL:
4444 case TYPE_FPSQRTSGL:
4445 case TYPE_FPSQRTDBL:
4446 /* A fpload can't be issued until one cycle before a
4447 preceding arithmetic operation has finished if
4448 the target of the fpload is any of the sources
4449 (or destination) of the arithmetic operation. */
4450 return insn_default_latency (dep_insn) - 1;
4452 default:
4453 return 0;
4457 else if (attr_type == TYPE_FPALU)
4459 rtx pat = PATTERN (insn);
4460 rtx dep_pat = PATTERN (dep_insn);
4461 if (GET_CODE (pat) == PARALLEL)
4463 /* This happens for the fldXs,mb patterns. */
4464 pat = XVECEXP (pat, 0, 0);
4466 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4467 /* If this happens, we have to extend this to schedule
4468 optimally. Return 0 for now. */
4469 return 0;
4471 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4473 if (! recog_memoized (dep_insn))
4474 return 0;
4475 switch (get_attr_type (dep_insn))
4477 case TYPE_FPDIVSGL:
4478 case TYPE_FPDIVDBL:
4479 case TYPE_FPSQRTSGL:
4480 case TYPE_FPSQRTDBL:
4481 /* An ALU flop can't be issued until two cycles before a
4482 preceding divide or sqrt operation has finished if
4483 the target of the ALU flop is any of the sources
4484 (or destination) of the divide or sqrt operation. */
4485 return insn_default_latency (dep_insn) - 2;
4487 default:
4488 return 0;
4493 /* For other anti dependencies, the cost is 0. */
4494 return 0;
4496 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4498 /* Output dependency; DEP_INSN writes a register that INSN writes some
4499 cycles later. */
4500 if (attr_type == TYPE_FPLOAD)
4502 rtx pat = PATTERN (insn);
4503 rtx dep_pat = PATTERN (dep_insn);
4504 if (GET_CODE (pat) == PARALLEL)
4506 /* This happens for the fldXs,mb patterns. */
4507 pat = XVECEXP (pat, 0, 0);
4509 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4510 /* If this happens, we have to extend this to schedule
4511 optimally. Return 0 for now. */
4512 return 0;
4514 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4516 if (! recog_memoized (dep_insn))
4517 return 0;
4518 switch (get_attr_type (dep_insn))
4520 case TYPE_FPALU:
4521 case TYPE_FPMULSGL:
4522 case TYPE_FPMULDBL:
4523 case TYPE_FPDIVSGL:
4524 case TYPE_FPDIVDBL:
4525 case TYPE_FPSQRTSGL:
4526 case TYPE_FPSQRTDBL:
4527 /* A fpload can't be issued until one cycle before a
4528 preceding arithmetic operation has finished if
4529 the target of the fpload is the destination of the
4530 arithmetic operation.
4532 Exception: For PA7100LC, PA7200 and PA7300, the cost
4533 is 3 cycles, unless they bundle together. We also
4534 pay the penalty if the second insn is a fpload. */
4535 return insn_default_latency (dep_insn) - 1;
4537 default:
4538 return 0;
4542 else if (attr_type == TYPE_FPALU)
4544 rtx pat = PATTERN (insn);
4545 rtx dep_pat = PATTERN (dep_insn);
4546 if (GET_CODE (pat) == PARALLEL)
4548 /* This happens for the fldXs,mb patterns. */
4549 pat = XVECEXP (pat, 0, 0);
4551 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4552 /* If this happens, we have to extend this to schedule
4553 optimally. Return 0 for now. */
4554 return 0;
4556 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4558 if (! recog_memoized (dep_insn))
4559 return 0;
4560 switch (get_attr_type (dep_insn))
4562 case TYPE_FPDIVSGL:
4563 case TYPE_FPDIVDBL:
4564 case TYPE_FPSQRTSGL:
4565 case TYPE_FPSQRTDBL:
4566 /* An ALU flop can't be issued until two cycles before a
4567 preceding divide or sqrt operation has finished if
4568 the target of the ALU flop is also the target of
4569 the divide or sqrt operation. */
4570 return insn_default_latency (dep_insn) - 2;
4572 default:
4573 return 0;
4578 /* For other output dependencies, the cost is 0. */
4579 return 0;
4581 else
4582 abort ();
4585 /* Adjust scheduling priorities. We use this to try and keep addil
4586 and the next use of %r1 close together. */
4587 static int
4588 pa_adjust_priority (rtx insn, int priority)
4590 rtx set = single_set (insn);
4591 rtx src, dest;
4592 if (set)
4594 src = SET_SRC (set);
4595 dest = SET_DEST (set);
4596 if (GET_CODE (src) == LO_SUM
4597 && symbolic_operand (XEXP (src, 1), VOIDmode)
4598 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4599 priority >>= 3;
4601 else if (GET_CODE (src) == MEM
4602 && GET_CODE (XEXP (src, 0)) == LO_SUM
4603 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4604 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4605 priority >>= 1;
4607 else if (GET_CODE (dest) == MEM
4608 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4609 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4610 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4611 priority >>= 3;
4613 return priority;
4616 /* The 700 can only issue a single insn at a time.
4617 The 7XXX processors can issue two insns at a time.
4618 The 8000 can issue 4 insns at a time. */
4619 static int
4620 pa_issue_rate (void)
4622 switch (pa_cpu)
4624 case PROCESSOR_700: return 1;
4625 case PROCESSOR_7100: return 2;
4626 case PROCESSOR_7100LC: return 2;
4627 case PROCESSOR_7200: return 2;
4628 case PROCESSOR_7300: return 2;
4629 case PROCESSOR_8000: return 4;
4631 default:
4632 abort ();
4638 /* Return any length adjustment needed by INSN which already has its length
4639 computed as LENGTH. Return zero if no adjustment is necessary.
4641 For the PA: function calls, millicode calls, and backwards short
4642 conditional branches with unfilled delay slots need an adjustment by +1
4643 (to account for the NOP which will be inserted into the instruction stream).
4645 Also compute the length of an inline block move here as it is too
4646 complicated to express as a length attribute in pa.md. */
4648 pa_adjust_insn_length (rtx insn, int length)
4650 rtx pat = PATTERN (insn);
4652 /* Jumps inside switch tables which have unfilled delay slots need
4653 adjustment. */
4654 if (GET_CODE (insn) == JUMP_INSN
4655 && GET_CODE (pat) == PARALLEL
4656 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4657 return 4;
4658 /* Millicode insn with an unfilled delay slot. */
4659 else if (GET_CODE (insn) == INSN
4660 && GET_CODE (pat) != SEQUENCE
4661 && GET_CODE (pat) != USE
4662 && GET_CODE (pat) != CLOBBER
4663 && get_attr_type (insn) == TYPE_MILLI)
4664 return 4;
4665 /* Block move pattern. */
4666 else if (GET_CODE (insn) == INSN
4667 && GET_CODE (pat) == PARALLEL
4668 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4669 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4670 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4671 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4672 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4673 return compute_movstr_length (insn) - 4;
4674 /* Block clear pattern. */
4675 else if (GET_CODE (insn) == INSN
4676 && GET_CODE (pat) == PARALLEL
4677 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4678 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4679 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4680 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4681 return compute_clrstr_length (insn) - 4;
4682 /* Conditional branch with an unfilled delay slot. */
4683 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4685 /* Adjust a short backwards conditional with an unfilled delay slot. */
4686 if (GET_CODE (pat) == SET
4687 && length == 4
4688 && ! forward_branch_p (insn))
4689 return 4;
4690 else if (GET_CODE (pat) == PARALLEL
4691 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4692 && length == 4)
4693 return 4;
4694 /* Adjust dbra insn with short backwards conditional branch with
4695 unfilled delay slot -- only for case where counter is in a
4696 general register register. */
4697 else if (GET_CODE (pat) == PARALLEL
4698 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4699 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4700 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4701 && length == 4
4702 && ! forward_branch_p (insn))
4703 return 4;
4704 else
4705 return 0;
4707 return 0;
4710 /* Print operand X (an rtx) in assembler syntax to file FILE.
4711 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4712 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4714 void
4715 print_operand (FILE *file, rtx x, int code)
4717 switch (code)
4719 case '#':
4720 /* Output a 'nop' if there's nothing for the delay slot. */
4721 if (dbr_sequence_length () == 0)
4722 fputs ("\n\tnop", file);
4723 return;
4724 case '*':
4725 /* Output a nullification completer if there's nothing for the */
4726 /* delay slot or nullification is requested. */
4727 if (dbr_sequence_length () == 0 ||
4728 (final_sequence &&
4729 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4730 fputs (",n", file);
4731 return;
4732 case 'R':
4733 /* Print out the second register name of a register pair.
4734 I.e., R (6) => 7. */
4735 fputs (reg_names[REGNO (x) + 1], file);
4736 return;
4737 case 'r':
4738 /* A register or zero. */
4739 if (x == const0_rtx
4740 || (x == CONST0_RTX (DFmode))
4741 || (x == CONST0_RTX (SFmode)))
4743 fputs ("%r0", file);
4744 return;
4746 else
4747 break;
4748 case 'f':
4749 /* A register or zero (floating point). */
4750 if (x == const0_rtx
4751 || (x == CONST0_RTX (DFmode))
4752 || (x == CONST0_RTX (SFmode)))
4754 fputs ("%fr0", file);
4755 return;
4757 else
4758 break;
4759 case 'A':
4761 rtx xoperands[2];
4763 xoperands[0] = XEXP (XEXP (x, 0), 0);
4764 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4765 output_global_address (file, xoperands[1], 0);
4766 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4767 return;
4770 case 'C': /* Plain (C)ondition */
4771 case 'X':
4772 switch (GET_CODE (x))
4774 case EQ:
4775 fputs ("=", file); break;
4776 case NE:
4777 fputs ("<>", file); break;
4778 case GT:
4779 fputs (">", file); break;
4780 case GE:
4781 fputs (">=", file); break;
4782 case GEU:
4783 fputs (">>=", file); break;
4784 case GTU:
4785 fputs (">>", file); break;
4786 case LT:
4787 fputs ("<", file); break;
4788 case LE:
4789 fputs ("<=", file); break;
4790 case LEU:
4791 fputs ("<<=", file); break;
4792 case LTU:
4793 fputs ("<<", file); break;
4794 default:
4795 abort ();
4797 return;
4798 case 'N': /* Condition, (N)egated */
4799 switch (GET_CODE (x))
4801 case EQ:
4802 fputs ("<>", file); break;
4803 case NE:
4804 fputs ("=", file); break;
4805 case GT:
4806 fputs ("<=", file); break;
4807 case GE:
4808 fputs ("<", file); break;
4809 case GEU:
4810 fputs ("<<", file); break;
4811 case GTU:
4812 fputs ("<<=", file); break;
4813 case LT:
4814 fputs (">=", file); break;
4815 case LE:
4816 fputs (">", file); break;
4817 case LEU:
4818 fputs (">>", file); break;
4819 case LTU:
4820 fputs (">>=", file); break;
4821 default:
4822 abort ();
4824 return;
4825 /* For floating point comparisons. Note that the output
4826 predicates are the complement of the desired mode. */
4827 case 'Y':
4828 switch (GET_CODE (x))
4830 case EQ:
4831 fputs ("!=", file); break;
4832 case NE:
4833 fputs ("=", file); break;
4834 case GT:
4835 fputs ("!>", file); break;
4836 case GE:
4837 fputs ("!>=", file); break;
4838 case LT:
4839 fputs ("!<", file); break;
4840 case LE:
4841 fputs ("!<=", file); break;
4842 case LTGT:
4843 fputs ("!<>", file); break;
4844 case UNLE:
4845 fputs (">", file); break;
4846 case UNLT:
4847 fputs (">=", file); break;
4848 case UNGE:
4849 fputs ("<", file); break;
4850 case UNGT:
4851 fputs ("<=", file); break;
4852 case UNEQ:
4853 fputs ("<>", file); break;
4854 case UNORDERED:
4855 fputs ("<=>", file); break;
4856 case ORDERED:
4857 fputs ("!<=>", file); break;
4858 default:
4859 abort ();
4861 return;
4862 case 'S': /* Condition, operands are (S)wapped. */
4863 switch (GET_CODE (x))
4865 case EQ:
4866 fputs ("=", file); break;
4867 case NE:
4868 fputs ("<>", file); break;
4869 case GT:
4870 fputs ("<", file); break;
4871 case GE:
4872 fputs ("<=", file); break;
4873 case GEU:
4874 fputs ("<<=", file); break;
4875 case GTU:
4876 fputs ("<<", file); break;
4877 case LT:
4878 fputs (">", file); break;
4879 case LE:
4880 fputs (">=", file); break;
4881 case LEU:
4882 fputs (">>=", file); break;
4883 case LTU:
4884 fputs (">>", file); break;
4885 default:
4886 abort ();
4888 return;
4889 case 'B': /* Condition, (B)oth swapped and negate. */
4890 switch (GET_CODE (x))
4892 case EQ:
4893 fputs ("<>", file); break;
4894 case NE:
4895 fputs ("=", file); break;
4896 case GT:
4897 fputs (">=", file); break;
4898 case GE:
4899 fputs (">", file); break;
4900 case GEU:
4901 fputs (">>", file); break;
4902 case GTU:
4903 fputs (">>=", file); break;
4904 case LT:
4905 fputs ("<=", file); break;
4906 case LE:
4907 fputs ("<", file); break;
4908 case LEU:
4909 fputs ("<<", file); break;
4910 case LTU:
4911 fputs ("<<=", file); break;
4912 default:
4913 abort ();
4915 return;
4916 case 'k':
4917 if (GET_CODE (x) == CONST_INT)
4919 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4920 return;
4922 abort ();
4923 case 'Q':
4924 if (GET_CODE (x) == CONST_INT)
4926 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4927 return;
4929 abort ();
4930 case 'L':
4931 if (GET_CODE (x) == CONST_INT)
4933 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4934 return;
4936 abort ();
4937 case 'O':
4938 if (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0)
4940 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4941 return;
4943 abort ();
4944 case 'p':
4945 if (GET_CODE (x) == CONST_INT)
4947 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4948 return;
4950 abort ();
4951 case 'P':
4952 if (GET_CODE (x) == CONST_INT)
4954 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4955 return;
4957 abort ();
4958 case 'I':
4959 if (GET_CODE (x) == CONST_INT)
4960 fputs ("i", file);
4961 return;
4962 case 'M':
4963 case 'F':
4964 switch (GET_CODE (XEXP (x, 0)))
4966 case PRE_DEC:
4967 case PRE_INC:
4968 if (ASSEMBLER_DIALECT == 0)
4969 fputs ("s,mb", file);
4970 else
4971 fputs (",mb", file);
4972 break;
4973 case POST_DEC:
4974 case POST_INC:
4975 if (ASSEMBLER_DIALECT == 0)
4976 fputs ("s,ma", file);
4977 else
4978 fputs (",ma", file);
4979 break;
4980 case PLUS:
4981 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4982 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4984 if (ASSEMBLER_DIALECT == 0)
4985 fputs ("x", file);
4987 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4988 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4990 if (ASSEMBLER_DIALECT == 0)
4991 fputs ("x,s", file);
4992 else
4993 fputs (",s", file);
4995 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4996 fputs ("s", file);
4997 break;
4998 default:
4999 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5000 fputs ("s", file);
5001 break;
5003 return;
5004 case 'G':
5005 output_global_address (file, x, 0);
5006 return;
5007 case 'H':
5008 output_global_address (file, x, 1);
5009 return;
5010 case 0: /* Don't do anything special */
5011 break;
5012 case 'Z':
5014 unsigned op[3];
5015 compute_zdepwi_operands (INTVAL (x), op);
5016 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5017 return;
5019 case 'z':
5021 unsigned op[3];
5022 compute_zdepdi_operands (INTVAL (x), op);
5023 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5024 return;
5026 case 'c':
5027 /* We can get here from a .vtable_inherit due to our
5028 CONSTANT_ADDRESS_P rejecting perfectly good constant
5029 addresses. */
5030 break;
5031 default:
5032 abort ();
5034 if (GET_CODE (x) == REG)
5036 fputs (reg_names [REGNO (x)], file);
5037 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5039 fputs ("R", file);
5040 return;
5042 if (FP_REG_P (x)
5043 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5044 && (REGNO (x) & 1) == 0)
5045 fputs ("L", file);
5047 else if (GET_CODE (x) == MEM)
5049 int size = GET_MODE_SIZE (GET_MODE (x));
5050 rtx base = NULL_RTX;
5051 switch (GET_CODE (XEXP (x, 0)))
5053 case PRE_DEC:
5054 case POST_DEC:
5055 base = XEXP (XEXP (x, 0), 0);
5056 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5057 break;
5058 case PRE_INC:
5059 case POST_INC:
5060 base = XEXP (XEXP (x, 0), 0);
5061 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5062 break;
5063 case PLUS:
5064 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5065 fprintf (file, "%s(%s)",
5066 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5067 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5068 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5069 fprintf (file, "%s(%s)",
5070 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5071 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5072 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5073 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5075 /* Because the REG_POINTER flag can get lost during reload,
5076 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5077 index and base registers in the combined move patterns. */
5078 rtx base = XEXP (XEXP (x, 0), 1);
5079 rtx index = XEXP (XEXP (x, 0), 0);
5081 fprintf (file, "%s(%s)",
5082 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5084 else
5085 output_address (XEXP (x, 0));
5086 break;
5087 default:
5088 output_address (XEXP (x, 0));
5089 break;
5092 else
5093 output_addr_const (file, x);
5096 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5098 void
5099 output_global_address (FILE *file, rtx x, int round_constant)
5102 /* Imagine (high (const (plus ...))). */
5103 if (GET_CODE (x) == HIGH)
5104 x = XEXP (x, 0);
5106 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5107 assemble_name (file, XSTR (x, 0));
5108 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5110 assemble_name (file, XSTR (x, 0));
5111 fputs ("-$global$", file);
5113 else if (GET_CODE (x) == CONST)
5115 const char *sep = "";
5116 int offset = 0; /* assembler wants -$global$ at end */
5117 rtx base = NULL_RTX;
5119 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
5121 base = XEXP (XEXP (x, 0), 0);
5122 output_addr_const (file, base);
5124 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == CONST_INT)
5125 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5126 else abort ();
5128 if (GET_CODE (XEXP (XEXP (x, 0), 1)) == SYMBOL_REF)
5130 base = XEXP (XEXP (x, 0), 1);
5131 output_addr_const (file, base);
5133 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5134 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5135 else abort ();
5137 /* How bogus. The compiler is apparently responsible for
5138 rounding the constant if it uses an LR field selector.
5140 The linker and/or assembler seem a better place since
5141 they have to do this kind of thing already.
5143 If we fail to do this, HP's optimizing linker may eliminate
5144 an addil, but not update the ldw/stw/ldo instruction that
5145 uses the result of the addil. */
5146 if (round_constant)
5147 offset = ((offset + 0x1000) & ~0x1fff);
5149 if (GET_CODE (XEXP (x, 0)) == PLUS)
5151 if (offset < 0)
5153 offset = -offset;
5154 sep = "-";
5156 else
5157 sep = "+";
5159 else if (GET_CODE (XEXP (x, 0)) == MINUS
5160 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
5161 sep = "-";
5162 else abort ();
5164 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5165 fputs ("-$global$", file);
5166 if (offset)
5167 fprintf (file, "%s%d", sep, offset);
5169 else
5170 output_addr_const (file, x);
5173 /* Output boilerplate text to appear at the beginning of the file.
5174 There are several possible versions. */
5175 #define aputs(x) fputs(x, asm_out_file)
5176 static inline void
5177 pa_file_start_level (void)
5179 if (TARGET_64BIT)
5180 aputs ("\t.LEVEL 2.0w\n");
5181 else if (TARGET_PA_20)
5182 aputs ("\t.LEVEL 2.0\n");
5183 else if (TARGET_PA_11)
5184 aputs ("\t.LEVEL 1.1\n");
5185 else
5186 aputs ("\t.LEVEL 1.0\n");
5189 static inline void
5190 pa_file_start_space (int sortspace)
5192 aputs ("\t.SPACE $PRIVATE$");
5193 if (sortspace)
5194 aputs (",SORT=16");
5195 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5196 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5197 "\n\t.SPACE $TEXT$");
5198 if (sortspace)
5199 aputs (",SORT=8");
5200 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5201 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5204 static inline void
5205 pa_file_start_file (int want_version)
5207 if (write_symbols != NO_DEBUG)
5209 output_file_directive (asm_out_file, main_input_filename);
5210 if (want_version)
5211 aputs ("\t.version\t\"01.01\"\n");
5215 static inline void
5216 pa_file_start_mcount (const char *aswhat)
5218 if (profile_flag)
5219 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5222 static void
5223 pa_elf_file_start (void)
5225 pa_file_start_level ();
5226 pa_file_start_mcount ("ENTRY");
5227 pa_file_start_file (0);
5230 static void
5231 pa_som_file_start (void)
5233 pa_file_start_level ();
5234 pa_file_start_space (0);
5235 aputs ("\t.IMPORT $global$,DATA\n"
5236 "\t.IMPORT $$dyncall,MILLICODE\n");
5237 pa_file_start_mcount ("CODE");
5238 pa_file_start_file (0);
5241 static void
5242 pa_linux_file_start (void)
5244 pa_file_start_file (1);
5245 pa_file_start_level ();
5246 pa_file_start_mcount ("CODE");
5249 static void
5250 pa_hpux64_gas_file_start (void)
5252 pa_file_start_level ();
5253 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5254 if (profile_flag)
5255 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5256 #endif
5257 pa_file_start_file (1);
5260 static void
5261 pa_hpux64_hpas_file_start (void)
5263 pa_file_start_level ();
5264 pa_file_start_space (1);
5265 pa_file_start_mcount ("CODE");
5266 pa_file_start_file (0);
5268 #undef aputs
5270 static struct deferred_plabel *
5271 get_plabel (const char *fname)
5273 size_t i;
5275 /* See if we have already put this function on the list of deferred
5276 plabels. This list is generally small, so a liner search is not
5277 too ugly. If it proves too slow replace it with something faster. */
5278 for (i = 0; i < n_deferred_plabels; i++)
5279 if (strcmp (fname, deferred_plabels[i].name) == 0)
5280 break;
5282 /* If the deferred plabel list is empty, or this entry was not found
5283 on the list, create a new entry on the list. */
5284 if (deferred_plabels == NULL || i == n_deferred_plabels)
5286 const char *real_name;
5288 if (deferred_plabels == 0)
5289 deferred_plabels = (struct deferred_plabel *)
5290 ggc_alloc (sizeof (struct deferred_plabel));
5291 else
5292 deferred_plabels = (struct deferred_plabel *)
5293 ggc_realloc (deferred_plabels,
5294 ((n_deferred_plabels + 1)
5295 * sizeof (struct deferred_plabel)));
5297 i = n_deferred_plabels++;
5298 deferred_plabels[i].internal_label = gen_label_rtx ();
5299 deferred_plabels[i].name = ggc_strdup (fname);
5301 /* Gross. We have just implicitly taken the address of this function,
5302 mark it as such. */
5303 real_name = (*targetm.strip_name_encoding) (fname);
5304 TREE_SYMBOL_REFERENCED (get_identifier (real_name)) = 1;
5307 return &deferred_plabels[i];
5310 static void
5311 output_deferred_plabels (void)
5313 size_t i;
5314 /* If we have deferred plabels, then we need to switch into the data
5315 section and align it to a 4 byte boundary before we output the
5316 deferred plabels. */
5317 if (n_deferred_plabels)
5319 data_section ();
5320 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5323 /* Now output the deferred plabels. */
5324 for (i = 0; i < n_deferred_plabels; i++)
5326 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5327 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5328 assemble_integer (gen_rtx_SYMBOL_REF (Pmode, deferred_plabels[i].name),
5329 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5333 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5334 /* Initialize optabs to point to HPUX long double emulation routines. */
5335 static void
5336 pa_hpux_init_libfuncs (void)
5338 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5339 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5340 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5341 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5342 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5343 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5344 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5345 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5346 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5348 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5349 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5350 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5351 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5352 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5353 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5355 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5356 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5357 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5358 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5360 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5361 ? "__U_Qfcnvfxt_quad_to_sgl"
5362 : "_U_Qfcnvfxt_quad_to_sgl");
5363 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5364 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5365 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5367 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5368 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5370 #endif
5372 /* HP's millicode routines mean something special to the assembler.
5373 Keep track of which ones we have used. */
5375 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5376 static void import_milli (enum millicodes);
5377 static char imported[(int) end1000];
5378 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5379 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5380 #define MILLI_START 10
5382 static void
5383 import_milli (enum millicodes code)
5385 char str[sizeof (import_string)];
5387 if (!imported[(int) code])
5389 imported[(int) code] = 1;
5390 strcpy (str, import_string);
5391 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5392 output_asm_insn (str, 0);
5396 /* The register constraints have put the operands and return value in
5397 the proper registers. */
5399 const char *
5400 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5402 import_milli (mulI);
5403 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5406 /* Emit the rtl for doing a division by a constant. */
5408 /* Do magic division millicodes exist for this value? */
5409 static const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,
5410 1, 1};
5412 /* We'll use an array to keep track of the magic millicodes and
5413 whether or not we've used them already. [n][0] is signed, [n][1] is
5414 unsigned. */
5416 static int div_milli[16][2];
5419 div_operand (rtx op, enum machine_mode mode)
5421 return (mode == SImode
5422 && ((GET_CODE (op) == REG && REGNO (op) == 25)
5423 || (GET_CODE (op) == CONST_INT && INTVAL (op) > 0
5424 && INTVAL (op) < 16 && magic_milli[INTVAL (op)])));
5428 emit_hpdiv_const (rtx *operands, int unsignedp)
5430 if (GET_CODE (operands[2]) == CONST_INT
5431 && INTVAL (operands[2]) > 0
5432 && INTVAL (operands[2]) < 16
5433 && magic_milli[INTVAL (operands[2])])
5435 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5437 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5438 emit
5439 (gen_rtx
5440 (PARALLEL, VOIDmode,
5441 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5442 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5443 SImode,
5444 gen_rtx_REG (SImode, 26),
5445 operands[2])),
5446 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5447 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5448 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5449 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5450 gen_rtx_CLOBBER (VOIDmode, ret))));
5451 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5452 return 1;
5454 return 0;
5457 const char *
5458 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5460 int divisor;
5462 /* If the divisor is a constant, try to use one of the special
5463 opcodes .*/
5464 if (GET_CODE (operands[0]) == CONST_INT)
5466 static char buf[100];
5467 divisor = INTVAL (operands[0]);
5468 if (!div_milli[divisor][unsignedp])
5470 div_milli[divisor][unsignedp] = 1;
5471 if (unsignedp)
5472 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5473 else
5474 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5476 if (unsignedp)
5478 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5479 INTVAL (operands[0]));
5480 return output_millicode_call (insn,
5481 gen_rtx_SYMBOL_REF (SImode, buf));
5483 else
5485 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5486 INTVAL (operands[0]));
5487 return output_millicode_call (insn,
5488 gen_rtx_SYMBOL_REF (SImode, buf));
5491 /* Divisor isn't a special constant. */
5492 else
5494 if (unsignedp)
5496 import_milli (divU);
5497 return output_millicode_call (insn,
5498 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5500 else
5502 import_milli (divI);
5503 return output_millicode_call (insn,
5504 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5509 /* Output a $$rem millicode to do mod. */
5511 const char *
5512 output_mod_insn (int unsignedp, rtx insn)
5514 if (unsignedp)
5516 import_milli (remU);
5517 return output_millicode_call (insn,
5518 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5520 else
5522 import_milli (remI);
5523 return output_millicode_call (insn,
5524 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5528 void
5529 output_arg_descriptor (rtx call_insn)
5531 const char *arg_regs[4];
5532 enum machine_mode arg_mode;
5533 rtx link;
5534 int i, output_flag = 0;
5535 int regno;
5537 /* We neither need nor want argument location descriptors for the
5538 64bit runtime environment or the ELF32 environment. */
5539 if (TARGET_64BIT || TARGET_ELF32)
5540 return;
5542 for (i = 0; i < 4; i++)
5543 arg_regs[i] = 0;
5545 /* Specify explicitly that no argument relocations should take place
5546 if using the portable runtime calling conventions. */
5547 if (TARGET_PORTABLE_RUNTIME)
5549 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5550 asm_out_file);
5551 return;
5554 if (GET_CODE (call_insn) != CALL_INSN)
5555 abort ();
5556 for (link = CALL_INSN_FUNCTION_USAGE (call_insn); link; link = XEXP (link, 1))
5558 rtx use = XEXP (link, 0);
5560 if (! (GET_CODE (use) == USE
5561 && GET_CODE (XEXP (use, 0)) == REG
5562 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5563 continue;
5565 arg_mode = GET_MODE (XEXP (use, 0));
5566 regno = REGNO (XEXP (use, 0));
5567 if (regno >= 23 && regno <= 26)
5569 arg_regs[26 - regno] = "GR";
5570 if (arg_mode == DImode)
5571 arg_regs[25 - regno] = "GR";
5573 else if (regno >= 32 && regno <= 39)
5575 if (arg_mode == SFmode)
5576 arg_regs[(regno - 32) / 2] = "FR";
5577 else
5579 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5580 arg_regs[(regno - 34) / 2] = "FR";
5581 arg_regs[(regno - 34) / 2 + 1] = "FU";
5582 #else
5583 arg_regs[(regno - 34) / 2] = "FU";
5584 arg_regs[(regno - 34) / 2 + 1] = "FR";
5585 #endif
5589 fputs ("\t.CALL ", asm_out_file);
5590 for (i = 0; i < 4; i++)
5592 if (arg_regs[i])
5594 if (output_flag++)
5595 fputc (',', asm_out_file);
5596 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5599 fputc ('\n', asm_out_file);
5602 /* Return the class of any secondary reload register that is needed to
5603 move IN into a register in class CLASS using mode MODE.
5605 Profiling has showed this routine and its descendants account for
5606 a significant amount of compile time (~7%). So it has been
5607 optimized to reduce redundant computations and eliminate useless
5608 function calls.
5610 It might be worthwhile to try and make this a leaf function too. */
5612 enum reg_class
5613 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5615 int regno, is_symbolic;
5617 /* Trying to load a constant into a FP register during PIC code
5618 generation will require %r1 as a scratch register. */
5619 if (flag_pic
5620 && GET_MODE_CLASS (mode) == MODE_INT
5621 && FP_REG_CLASS_P (class)
5622 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5623 return R1_REGS;
5625 /* Profiling showed the PA port spends about 1.3% of its compilation
5626 time in true_regnum from calls inside secondary_reload_class. */
5628 if (GET_CODE (in) == REG)
5630 regno = REGNO (in);
5631 if (regno >= FIRST_PSEUDO_REGISTER)
5632 regno = true_regnum (in);
5634 else if (GET_CODE (in) == SUBREG)
5635 regno = true_regnum (in);
5636 else
5637 regno = -1;
5639 /* If we have something like (mem (mem (...)), we can safely assume the
5640 inner MEM will end up in a general register after reloading, so there's
5641 no need for a secondary reload. */
5642 if (GET_CODE (in) == MEM
5643 && GET_CODE (XEXP (in, 0)) == MEM)
5644 return NO_REGS;
5646 /* Handle out of range displacement for integer mode loads/stores of
5647 FP registers. */
5648 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5649 && GET_MODE_CLASS (mode) == MODE_INT
5650 && FP_REG_CLASS_P (class))
5651 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5652 return GENERAL_REGS;
5654 /* A SAR<->FP register copy requires a secondary register (GPR) as
5655 well as secondary memory. */
5656 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5657 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5658 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5659 return GENERAL_REGS;
5661 if (GET_CODE (in) == HIGH)
5662 in = XEXP (in, 0);
5664 /* Profiling has showed GCC spends about 2.6% of its compilation
5665 time in symbolic_operand from calls inside secondary_reload_class.
5667 We use an inline copy and only compute its return value once to avoid
5668 useless work. */
5669 switch (GET_CODE (in))
5671 rtx tmp;
5673 case SYMBOL_REF:
5674 case LABEL_REF:
5675 is_symbolic = 1;
5676 break;
5677 case CONST:
5678 tmp = XEXP (in, 0);
5679 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5680 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5681 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5682 break;
5684 default:
5685 is_symbolic = 0;
5686 break;
5689 if (!flag_pic
5690 && is_symbolic
5691 && read_only_operand (in, VOIDmode))
5692 return NO_REGS;
5694 if (class != R1_REGS && is_symbolic)
5695 return R1_REGS;
5697 return NO_REGS;
5700 enum direction
5701 function_arg_padding (enum machine_mode mode, tree type)
5703 if (mode == BLKmode
5704 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5706 /* Return none if justification is not required. */
5707 if (type
5708 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5709 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5710 return none;
5712 /* The directions set here are ignored when a BLKmode argument larger
5713 than a word is placed in a register. Different code is used for
5714 the stack and registers. This makes it difficult to have a
5715 consistent data representation for both the stack and registers.
5716 For both runtimes, the justification and padding for arguments on
5717 the stack and in registers should be identical. */
5718 if (TARGET_64BIT)
5719 /* The 64-bit runtime specifies left justification for aggregates. */
5720 return upward;
5721 else
5722 /* The 32-bit runtime architecture specifies right justification.
5723 When the argument is passed on the stack, the argument is padded
5724 with garbage on the left. The HP compiler pads with zeros. */
5725 return downward;
5728 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5729 return downward;
5730 else
5731 return none;
5735 /* Do what is necessary for `va_start'. We look at the current function
5736 to determine if stdargs or varargs is used and fill in an initial
5737 va_list. A pointer to this constructor is returned. */
5739 struct rtx_def *
5740 hppa_builtin_saveregs (void)
5742 rtx offset, dest;
5743 tree fntype = TREE_TYPE (current_function_decl);
5744 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5745 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5746 != void_type_node)))
5747 ? UNITS_PER_WORD : 0);
5749 if (argadj)
5750 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5751 else
5752 offset = current_function_arg_offset_rtx;
5754 if (TARGET_64BIT)
5756 int i, off;
5758 /* Adjust for varargs/stdarg differences. */
5759 if (argadj)
5760 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5761 else
5762 offset = current_function_arg_offset_rtx;
5764 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5765 from the incoming arg pointer and growing to larger addresses. */
5766 for (i = 26, off = -64; i >= 19; i--, off += 8)
5767 emit_move_insn (gen_rtx_MEM (word_mode,
5768 plus_constant (arg_pointer_rtx, off)),
5769 gen_rtx_REG (word_mode, i));
5771 /* The incoming args pointer points just beyond the flushback area;
5772 normally this is not a serious concern. However, when we are doing
5773 varargs/stdargs we want to make the arg pointer point to the start
5774 of the incoming argument area. */
5775 emit_move_insn (virtual_incoming_args_rtx,
5776 plus_constant (arg_pointer_rtx, -64));
5778 /* Now return a pointer to the first anonymous argument. */
5779 return copy_to_reg (expand_binop (Pmode, add_optab,
5780 virtual_incoming_args_rtx,
5781 offset, 0, 0, OPTAB_LIB_WIDEN));
5784 /* Store general registers on the stack. */
5785 dest = gen_rtx_MEM (BLKmode,
5786 plus_constant (current_function_internal_arg_pointer,
5787 -16));
5788 set_mem_alias_set (dest, get_varargs_alias_set ());
5789 set_mem_align (dest, BITS_PER_WORD);
5790 move_block_from_reg (23, dest, 4);
5792 /* move_block_from_reg will emit code to store the argument registers
5793 individually as scalar stores.
5795 However, other insns may later load from the same addresses for
5796 a structure load (passing a struct to a varargs routine).
5798 The alias code assumes that such aliasing can never happen, so we
5799 have to keep memory referencing insns from moving up beyond the
5800 last argument register store. So we emit a blockage insn here. */
5801 emit_insn (gen_blockage ());
5803 return copy_to_reg (expand_binop (Pmode, add_optab,
5804 current_function_internal_arg_pointer,
5805 offset, 0, 0, OPTAB_LIB_WIDEN));
5808 void
5809 hppa_va_start (tree valist, rtx nextarg)
5811 nextarg = expand_builtin_saveregs ();
5812 std_expand_builtin_va_start (valist, nextarg);
5816 hppa_va_arg (tree valist, tree type)
5818 HOST_WIDE_INT size = int_size_in_bytes (type);
5819 HOST_WIDE_INT ofs;
5820 tree t, ptr, pptr;
5822 if (TARGET_64BIT)
5824 /* Every argument in PA64 is supposed to be passed by value
5825 (including large structs). However, as a GCC extension, we
5826 pass zero and variable sized arguments by reference. Empty
5827 structures are a GCC extension not supported by the HP
5828 compilers. Thus, passing them by reference isn't likely
5829 to conflict with the ABI. For variable sized arguments,
5830 GCC doesn't have the infrastructure to allocate these to
5831 registers. */
5833 /* Arguments with a size greater than 8 must be aligned 0 MOD 16. */
5835 if (size > UNITS_PER_WORD)
5837 t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
5838 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
5839 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
5840 build_int_2 (-2 * UNITS_PER_WORD, -1));
5841 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
5842 TREE_SIDE_EFFECTS (t) = 1;
5843 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5846 if (size > 0)
5847 return std_expand_builtin_va_arg (valist, type);
5848 else
5850 ptr = build_pointer_type (type);
5852 /* Args grow upward. */
5853 t = build (POSTINCREMENT_EXPR, TREE_TYPE (valist), valist,
5854 build_int_2 (POINTER_SIZE / BITS_PER_UNIT, 0));
5855 TREE_SIDE_EFFECTS (t) = 1;
5857 pptr = build_pointer_type (ptr);
5858 t = build1 (NOP_EXPR, pptr, t);
5859 TREE_SIDE_EFFECTS (t) = 1;
5861 t = build1 (INDIRECT_REF, ptr, t);
5862 TREE_SIDE_EFFECTS (t) = 1;
5865 else /* !TARGET_64BIT */
5867 ptr = build_pointer_type (type);
5869 /* "Large" and variable sized types are passed by reference. */
5870 if (size > 8 || size <= 0)
5872 /* Args grow downward. */
5873 t = build (PREDECREMENT_EXPR, TREE_TYPE (valist), valist,
5874 build_int_2 (POINTER_SIZE / BITS_PER_UNIT, 0));
5875 TREE_SIDE_EFFECTS (t) = 1;
5877 pptr = build_pointer_type (ptr);
5878 t = build1 (NOP_EXPR, pptr, t);
5879 TREE_SIDE_EFFECTS (t) = 1;
5881 t = build1 (INDIRECT_REF, ptr, t);
5882 TREE_SIDE_EFFECTS (t) = 1;
5884 else
5886 t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
5887 build_int_2 (-size, -1));
5889 /* Copied from va-pa.h, but we probably don't need to align to
5890 word size, since we generate and preserve that invariant. */
5891 t = build (BIT_AND_EXPR, TREE_TYPE (valist), t,
5892 build_int_2 ((size > 4 ? -8 : -4), -1));
5894 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
5895 TREE_SIDE_EFFECTS (t) = 1;
5897 ofs = (8 - size) % 4;
5898 if (ofs)
5900 t = build (PLUS_EXPR, TREE_TYPE (valist), t,
5901 build_int_2 (ofs, 0));
5902 TREE_SIDE_EFFECTS (t) = 1;
5905 t = build1 (NOP_EXPR, ptr, t);
5906 TREE_SIDE_EFFECTS (t) = 1;
5910 /* Calculate! */
5911 return expand_expr (t, NULL_RTX, VOIDmode, EXPAND_NORMAL);
5916 /* This routine handles all the normal conditional branch sequences we
5917 might need to generate. It handles compare immediate vs compare
5918 register, nullification of delay slots, varying length branches,
5919 negated branches, and all combinations of the above. It returns the
5920 output appropriate to emit the branch corresponding to all given
5921 parameters. */
5923 const char *
5924 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
5926 static char buf[100];
5927 int useskip = 0;
5928 rtx xoperands[5];
5930 /* A conditional branch to the following instruction (eg the delay slot)
5931 is asking for a disaster. This can happen when not optimizing and
5932 when jump optimization fails.
5934 While it is usually safe to emit nothing, this can fail if the
5935 preceding instruction is a nullified branch with an empty delay
5936 slot and the same branch target as this branch. We could check
5937 for this but jump optimization should eliminate nop jumps. It
5938 is always safe to emit a nop. */
5939 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5940 return "nop";
5942 /* The doubleword form of the cmpib instruction doesn't have the LEU
5943 and GTU conditions while the cmpb instruction does. Since we accept
5944 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5945 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5946 operands[2] = gen_rtx_REG (DImode, 0);
5948 /* If this is a long branch with its delay slot unfilled, set `nullify'
5949 as it can nullify the delay slot and save a nop. */
5950 if (length == 8 && dbr_sequence_length () == 0)
5951 nullify = 1;
5953 /* If this is a short forward conditional branch which did not get
5954 its delay slot filled, the delay slot can still be nullified. */
5955 if (! nullify && length == 4 && dbr_sequence_length () == 0)
5956 nullify = forward_branch_p (insn);
5958 /* A forward branch over a single nullified insn can be done with a
5959 comclr instruction. This avoids a single cycle penalty due to
5960 mis-predicted branch if we fall through (branch not taken). */
5961 if (length == 4
5962 && next_real_insn (insn) != 0
5963 && get_attr_length (next_real_insn (insn)) == 4
5964 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
5965 && nullify)
5966 useskip = 1;
5968 switch (length)
5970 /* All short conditional branches except backwards with an unfilled
5971 delay slot. */
5972 case 4:
5973 if (useskip)
5974 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
5975 else
5976 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5977 if (GET_MODE (operands[1]) == DImode)
5978 strcat (buf, "*");
5979 if (negated)
5980 strcat (buf, "%B3");
5981 else
5982 strcat (buf, "%S3");
5983 if (useskip)
5984 strcat (buf, " %2,%r1,%%r0");
5985 else if (nullify)
5986 strcat (buf, ",n %2,%r1,%0");
5987 else
5988 strcat (buf, " %2,%r1,%0");
5989 break;
5991 /* All long conditionals. Note a short backward branch with an
5992 unfilled delay slot is treated just like a long backward branch
5993 with an unfilled delay slot. */
5994 case 8:
5995 /* Handle weird backwards branch with a filled delay slot
5996 with is nullified. */
5997 if (dbr_sequence_length () != 0
5998 && ! forward_branch_p (insn)
5999 && nullify)
6001 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6002 if (GET_MODE (operands[1]) == DImode)
6003 strcat (buf, "*");
6004 if (negated)
6005 strcat (buf, "%S3");
6006 else
6007 strcat (buf, "%B3");
6008 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6010 /* Handle short backwards branch with an unfilled delay slot.
6011 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6012 taken and untaken branches. */
6013 else if (dbr_sequence_length () == 0
6014 && ! forward_branch_p (insn)
6015 && INSN_ADDRESSES_SET_P ()
6016 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6017 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6019 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6020 if (GET_MODE (operands[1]) == DImode)
6021 strcat (buf, "*");
6022 if (negated)
6023 strcat (buf, "%B3 %2,%r1,%0%#");
6024 else
6025 strcat (buf, "%S3 %2,%r1,%0%#");
6027 else
6029 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6030 if (GET_MODE (operands[1]) == DImode)
6031 strcat (buf, "*");
6032 if (negated)
6033 strcat (buf, "%S3");
6034 else
6035 strcat (buf, "%B3");
6036 if (nullify)
6037 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6038 else
6039 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6041 break;
6043 case 20:
6044 case 28:
6045 xoperands[0] = operands[0];
6046 xoperands[1] = operands[1];
6047 xoperands[2] = operands[2];
6048 xoperands[3] = operands[3];
6050 /* The reversed conditional branch must branch over one additional
6051 instruction if the delay slot is filled. If the delay slot
6052 is empty, the instruction after the reversed condition branch
6053 must be nullified. */
6054 nullify = dbr_sequence_length () == 0;
6055 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6057 /* Create a reversed conditional branch which branches around
6058 the following insns. */
6059 if (GET_MODE (operands[1]) != DImode)
6061 if (nullify)
6063 if (negated)
6064 strcpy (buf,
6065 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6066 else
6067 strcpy (buf,
6068 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6070 else
6072 if (negated)
6073 strcpy (buf,
6074 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6075 else
6076 strcpy (buf,
6077 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6080 else
6082 if (nullify)
6084 if (negated)
6085 strcpy (buf,
6086 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6087 else
6088 strcpy (buf,
6089 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6091 else
6093 if (negated)
6094 strcpy (buf,
6095 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6096 else
6097 strcpy (buf,
6098 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6102 output_asm_insn (buf, xoperands);
6103 return output_lbranch (operands[0], insn);
6105 default:
6106 abort ();
6108 return buf;
6111 /* This routine handles long unconditional branches that exceed the
6112 maximum range of a simple branch instruction. */
6114 const char *
6115 output_lbranch (rtx dest, rtx insn)
6117 rtx xoperands[2];
6119 xoperands[0] = dest;
6121 /* First, free up the delay slot. */
6122 if (dbr_sequence_length () != 0)
6124 /* We can't handle a jump in the delay slot. */
6125 if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
6126 abort ();
6128 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6129 optimize, 0, 0);
6131 /* Now delete the delay insn. */
6132 PUT_CODE (NEXT_INSN (insn), NOTE);
6133 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6134 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6137 /* Output an insn to save %r1. The runtime documentation doesn't
6138 specify whether the "Clean Up" slot in the callers frame can
6139 be clobbered by the callee. It isn't copied by HP's builtin
6140 alloca, so this suggests that it can be clobbered if necessary.
6141 The "Static Link" location is copied by HP builtin alloca, so
6142 we avoid using it. Using the cleanup slot might be a problem
6143 if we have to interoperate with languages that pass cleanup
6144 information. However, it should be possible to handle these
6145 situations with GCC's asm feature.
6147 The "Current RP" slot is reserved for the called procedure, so
6148 we try to use it when we don't have a frame of our own. It's
6149 rather unlikely that we won't have a frame when we need to emit
6150 a very long branch.
6152 Really the way to go long term is a register scavenger; goto
6153 the target of the jump and find a register which we can use
6154 as a scratch to hold the value in %r1. Then, we wouldn't have
6155 to free up the delay slot or clobber a slot that may be needed
6156 for other purposes. */
6157 if (TARGET_64BIT)
6159 if (actual_fsize == 0 && !regs_ever_live[2])
6160 /* Use the return pointer slot in the frame marker. */
6161 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6162 else
6163 /* Use the slot at -40 in the frame marker since HP builtin
6164 alloca doesn't copy it. */
6165 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6167 else
6169 if (actual_fsize == 0 && !regs_ever_live[2])
6170 /* Use the return pointer slot in the frame marker. */
6171 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6172 else
6173 /* Use the "Clean Up" slot in the frame marker. In GCC,
6174 the only other use of this location is for copying a
6175 floating point double argument from a floating-point
6176 register to two general registers. The copy is done
6177 as an "atomic" operation when outputting a call, so it
6178 won't interfere with our using the location here. */
6179 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6182 if (TARGET_PORTABLE_RUNTIME)
6184 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6185 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6186 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6188 else if (flag_pic)
6190 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6191 if (TARGET_SOM || !TARGET_GAS)
6193 xoperands[1] = gen_label_rtx ();
6194 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6195 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6196 CODE_LABEL_NUMBER (xoperands[1]));
6197 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6199 else
6201 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6202 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6204 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6206 else
6207 /* Now output a very long branch to the original target. */
6208 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6210 /* Now restore the value of %r1 in the delay slot. */
6211 if (TARGET_64BIT)
6213 if (actual_fsize == 0 && !regs_ever_live[2])
6214 return "ldd -16(%%r30),%%r1";
6215 else
6216 return "ldd -40(%%r30),%%r1";
6218 else
6220 if (actual_fsize == 0 && !regs_ever_live[2])
6221 return "ldw -20(%%r30),%%r1";
6222 else
6223 return "ldw -12(%%r30),%%r1";
6227 /* This routine handles all the branch-on-bit conditional branch sequences we
6228 might need to generate. It handles nullification of delay slots,
6229 varying length branches, negated branches and all combinations of the
6230 above. it returns the appropriate output template to emit the branch. */
6232 const char *
6233 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6234 int negated, rtx insn, int which)
6236 static char buf[100];
6237 int useskip = 0;
6239 /* A conditional branch to the following instruction (eg the delay slot) is
6240 asking for a disaster. I do not think this can happen as this pattern
6241 is only used when optimizing; jump optimization should eliminate the
6242 jump. But be prepared just in case. */
6244 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6245 return "nop";
6247 /* If this is a long branch with its delay slot unfilled, set `nullify'
6248 as it can nullify the delay slot and save a nop. */
6249 if (length == 8 && dbr_sequence_length () == 0)
6250 nullify = 1;
6252 /* If this is a short forward conditional branch which did not get
6253 its delay slot filled, the delay slot can still be nullified. */
6254 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6255 nullify = forward_branch_p (insn);
6257 /* A forward branch over a single nullified insn can be done with a
6258 extrs instruction. This avoids a single cycle penalty due to
6259 mis-predicted branch if we fall through (branch not taken). */
6261 if (length == 4
6262 && next_real_insn (insn) != 0
6263 && get_attr_length (next_real_insn (insn)) == 4
6264 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6265 && nullify)
6266 useskip = 1;
6268 switch (length)
6271 /* All short conditional branches except backwards with an unfilled
6272 delay slot. */
6273 case 4:
6274 if (useskip)
6275 strcpy (buf, "{extrs,|extrw,s,}");
6276 else
6277 strcpy (buf, "bb,");
6278 if (useskip && GET_MODE (operands[0]) == DImode)
6279 strcpy (buf, "extrd,s,*");
6280 else if (GET_MODE (operands[0]) == DImode)
6281 strcpy (buf, "bb,*");
6282 if ((which == 0 && negated)
6283 || (which == 1 && ! negated))
6284 strcat (buf, ">=");
6285 else
6286 strcat (buf, "<");
6287 if (useskip)
6288 strcat (buf, " %0,%1,1,%%r0");
6289 else if (nullify && negated)
6290 strcat (buf, ",n %0,%1,%3");
6291 else if (nullify && ! negated)
6292 strcat (buf, ",n %0,%1,%2");
6293 else if (! nullify && negated)
6294 strcat (buf, "%0,%1,%3");
6295 else if (! nullify && ! negated)
6296 strcat (buf, " %0,%1,%2");
6297 break;
6299 /* All long conditionals. Note a short backward branch with an
6300 unfilled delay slot is treated just like a long backward branch
6301 with an unfilled delay slot. */
6302 case 8:
6303 /* Handle weird backwards branch with a filled delay slot
6304 with is nullified. */
6305 if (dbr_sequence_length () != 0
6306 && ! forward_branch_p (insn)
6307 && nullify)
6309 strcpy (buf, "bb,");
6310 if (GET_MODE (operands[0]) == DImode)
6311 strcat (buf, "*");
6312 if ((which == 0 && negated)
6313 || (which == 1 && ! negated))
6314 strcat (buf, "<");
6315 else
6316 strcat (buf, ">=");
6317 if (negated)
6318 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6319 else
6320 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6322 /* Handle short backwards branch with an unfilled delay slot.
6323 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6324 taken and untaken branches. */
6325 else if (dbr_sequence_length () == 0
6326 && ! forward_branch_p (insn)
6327 && INSN_ADDRESSES_SET_P ()
6328 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6329 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6331 strcpy (buf, "bb,");
6332 if (GET_MODE (operands[0]) == DImode)
6333 strcat (buf, "*");
6334 if ((which == 0 && negated)
6335 || (which == 1 && ! negated))
6336 strcat (buf, ">=");
6337 else
6338 strcat (buf, "<");
6339 if (negated)
6340 strcat (buf, " %0,%1,%3%#");
6341 else
6342 strcat (buf, " %0,%1,%2%#");
6344 else
6346 strcpy (buf, "{extrs,|extrw,s,}");
6347 if (GET_MODE (operands[0]) == DImode)
6348 strcpy (buf, "extrd,s,*");
6349 if ((which == 0 && negated)
6350 || (which == 1 && ! negated))
6351 strcat (buf, "<");
6352 else
6353 strcat (buf, ">=");
6354 if (nullify && negated)
6355 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6356 else if (nullify && ! negated)
6357 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6358 else if (negated)
6359 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6360 else
6361 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6363 break;
6365 default:
6366 abort ();
6368 return buf;
6371 /* This routine handles all the branch-on-variable-bit conditional branch
6372 sequences we might need to generate. It handles nullification of delay
6373 slots, varying length branches, negated branches and all combinations
6374 of the above. it returns the appropriate output template to emit the
6375 branch. */
6377 const char *
6378 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6379 int negated, rtx insn, int which)
6381 static char buf[100];
6382 int useskip = 0;
6384 /* A conditional branch to the following instruction (eg the delay slot) is
6385 asking for a disaster. I do not think this can happen as this pattern
6386 is only used when optimizing; jump optimization should eliminate the
6387 jump. But be prepared just in case. */
6389 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6390 return "nop";
6392 /* If this is a long branch with its delay slot unfilled, set `nullify'
6393 as it can nullify the delay slot and save a nop. */
6394 if (length == 8 && dbr_sequence_length () == 0)
6395 nullify = 1;
6397 /* If this is a short forward conditional branch which did not get
6398 its delay slot filled, the delay slot can still be nullified. */
6399 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6400 nullify = forward_branch_p (insn);
6402 /* A forward branch over a single nullified insn can be done with a
6403 extrs instruction. This avoids a single cycle penalty due to
6404 mis-predicted branch if we fall through (branch not taken). */
6406 if (length == 4
6407 && next_real_insn (insn) != 0
6408 && get_attr_length (next_real_insn (insn)) == 4
6409 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6410 && nullify)
6411 useskip = 1;
6413 switch (length)
6416 /* All short conditional branches except backwards with an unfilled
6417 delay slot. */
6418 case 4:
6419 if (useskip)
6420 strcpy (buf, "{vextrs,|extrw,s,}");
6421 else
6422 strcpy (buf, "{bvb,|bb,}");
6423 if (useskip && GET_MODE (operands[0]) == DImode)
6424 strcpy (buf, "extrd,s,*");
6425 else if (GET_MODE (operands[0]) == DImode)
6426 strcpy (buf, "bb,*");
6427 if ((which == 0 && negated)
6428 || (which == 1 && ! negated))
6429 strcat (buf, ">=");
6430 else
6431 strcat (buf, "<");
6432 if (useskip)
6433 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6434 else if (nullify && negated)
6435 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6436 else if (nullify && ! negated)
6437 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6438 else if (! nullify && negated)
6439 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6440 else if (! nullify && ! negated)
6441 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6442 break;
6444 /* All long conditionals. Note a short backward branch with an
6445 unfilled delay slot is treated just like a long backward branch
6446 with an unfilled delay slot. */
6447 case 8:
6448 /* Handle weird backwards branch with a filled delay slot
6449 with is nullified. */
6450 if (dbr_sequence_length () != 0
6451 && ! forward_branch_p (insn)
6452 && nullify)
6454 strcpy (buf, "{bvb,|bb,}");
6455 if (GET_MODE (operands[0]) == DImode)
6456 strcat (buf, "*");
6457 if ((which == 0 && negated)
6458 || (which == 1 && ! negated))
6459 strcat (buf, "<");
6460 else
6461 strcat (buf, ">=");
6462 if (negated)
6463 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6464 else
6465 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6467 /* Handle short backwards branch with an unfilled delay slot.
6468 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6469 taken and untaken branches. */
6470 else if (dbr_sequence_length () == 0
6471 && ! forward_branch_p (insn)
6472 && INSN_ADDRESSES_SET_P ()
6473 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6474 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6476 strcpy (buf, "{bvb,|bb,}");
6477 if (GET_MODE (operands[0]) == DImode)
6478 strcat (buf, "*");
6479 if ((which == 0 && negated)
6480 || (which == 1 && ! negated))
6481 strcat (buf, ">=");
6482 else
6483 strcat (buf, "<");
6484 if (negated)
6485 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6486 else
6487 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6489 else
6491 strcpy (buf, "{vextrs,|extrw,s,}");
6492 if (GET_MODE (operands[0]) == DImode)
6493 strcpy (buf, "extrd,s,*");
6494 if ((which == 0 && negated)
6495 || (which == 1 && ! negated))
6496 strcat (buf, "<");
6497 else
6498 strcat (buf, ">=");
6499 if (nullify && negated)
6500 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6501 else if (nullify && ! negated)
6502 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6503 else if (negated)
6504 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6505 else
6506 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6508 break;
6510 default:
6511 abort ();
6513 return buf;
6516 /* Return the output template for emitting a dbra type insn.
6518 Note it may perform some output operations on its own before
6519 returning the final output string. */
6520 const char *
6521 output_dbra (rtx *operands, rtx insn, int which_alternative)
6524 /* A conditional branch to the following instruction (eg the delay slot) is
6525 asking for a disaster. Be prepared! */
6527 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6529 if (which_alternative == 0)
6530 return "ldo %1(%0),%0";
6531 else if (which_alternative == 1)
6533 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6534 output_asm_insn ("ldw -16(%%r30),%4", operands);
6535 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6536 return "{fldws|fldw} -16(%%r30),%0";
6538 else
6540 output_asm_insn ("ldw %0,%4", operands);
6541 return "ldo %1(%4),%4\n\tstw %4,%0";
6545 if (which_alternative == 0)
6547 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6548 int length = get_attr_length (insn);
6550 /* If this is a long branch with its delay slot unfilled, set `nullify'
6551 as it can nullify the delay slot and save a nop. */
6552 if (length == 8 && dbr_sequence_length () == 0)
6553 nullify = 1;
6555 /* If this is a short forward conditional branch which did not get
6556 its delay slot filled, the delay slot can still be nullified. */
6557 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6558 nullify = forward_branch_p (insn);
6560 /* Handle short versions first. */
6561 if (length == 4 && nullify)
6562 return "addib,%C2,n %1,%0,%3";
6563 else if (length == 4 && ! nullify)
6564 return "addib,%C2 %1,%0,%3";
6565 else if (length == 8)
6567 /* Handle weird backwards branch with a fulled delay slot
6568 which is nullified. */
6569 if (dbr_sequence_length () != 0
6570 && ! forward_branch_p (insn)
6571 && nullify)
6572 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6573 /* Handle short backwards branch with an unfilled delay slot.
6574 Using a addb;nop rather than addi;bl saves 1 cycle for both
6575 taken and untaken branches. */
6576 else if (dbr_sequence_length () == 0
6577 && ! forward_branch_p (insn)
6578 && INSN_ADDRESSES_SET_P ()
6579 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6580 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6581 return "addib,%C2 %1,%0,%3%#";
6583 /* Handle normal cases. */
6584 if (nullify)
6585 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6586 else
6587 return "addi,%N2 %1,%0,%0\n\tb %3";
6589 else
6590 abort ();
6592 /* Deal with gross reload from FP register case. */
6593 else if (which_alternative == 1)
6595 /* Move loop counter from FP register to MEM then into a GR,
6596 increment the GR, store the GR into MEM, and finally reload
6597 the FP register from MEM from within the branch's delay slot. */
6598 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6599 operands);
6600 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6601 if (get_attr_length (insn) == 24)
6602 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6603 else
6604 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6606 /* Deal with gross reload from memory case. */
6607 else
6609 /* Reload loop counter from memory, the store back to memory
6610 happens in the branch's delay slot. */
6611 output_asm_insn ("ldw %0,%4", operands);
6612 if (get_attr_length (insn) == 12)
6613 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6614 else
6615 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6619 /* Return the output template for emitting a dbra type insn.
6621 Note it may perform some output operations on its own before
6622 returning the final output string. */
6623 const char *
6624 output_movb (rtx *operands, rtx insn, int which_alternative,
6625 int reverse_comparison)
6628 /* A conditional branch to the following instruction (eg the delay slot) is
6629 asking for a disaster. Be prepared! */
6631 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6633 if (which_alternative == 0)
6634 return "copy %1,%0";
6635 else if (which_alternative == 1)
6637 output_asm_insn ("stw %1,-16(%%r30)", operands);
6638 return "{fldws|fldw} -16(%%r30),%0";
6640 else if (which_alternative == 2)
6641 return "stw %1,%0";
6642 else
6643 return "mtsar %r1";
6646 /* Support the second variant. */
6647 if (reverse_comparison)
6648 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6650 if (which_alternative == 0)
6652 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6653 int length = get_attr_length (insn);
6655 /* If this is a long branch with its delay slot unfilled, set `nullify'
6656 as it can nullify the delay slot and save a nop. */
6657 if (length == 8 && dbr_sequence_length () == 0)
6658 nullify = 1;
6660 /* If this is a short forward conditional branch which did not get
6661 its delay slot filled, the delay slot can still be nullified. */
6662 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6663 nullify = forward_branch_p (insn);
6665 /* Handle short versions first. */
6666 if (length == 4 && nullify)
6667 return "movb,%C2,n %1,%0,%3";
6668 else if (length == 4 && ! nullify)
6669 return "movb,%C2 %1,%0,%3";
6670 else if (length == 8)
6672 /* Handle weird backwards branch with a filled delay slot
6673 which is nullified. */
6674 if (dbr_sequence_length () != 0
6675 && ! forward_branch_p (insn)
6676 && nullify)
6677 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6679 /* Handle short backwards branch with an unfilled delay slot.
6680 Using a movb;nop rather than or;bl saves 1 cycle for both
6681 taken and untaken branches. */
6682 else if (dbr_sequence_length () == 0
6683 && ! forward_branch_p (insn)
6684 && INSN_ADDRESSES_SET_P ()
6685 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6686 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6687 return "movb,%C2 %1,%0,%3%#";
6688 /* Handle normal cases. */
6689 if (nullify)
6690 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6691 else
6692 return "or,%N2 %1,%%r0,%0\n\tb %3";
6694 else
6695 abort ();
6697 /* Deal with gross reload from FP register case. */
6698 else if (which_alternative == 1)
6700 /* Move loop counter from FP register to MEM then into a GR,
6701 increment the GR, store the GR into MEM, and finally reload
6702 the FP register from MEM from within the branch's delay slot. */
6703 output_asm_insn ("stw %1,-16(%%r30)", operands);
6704 if (get_attr_length (insn) == 12)
6705 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6706 else
6707 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6709 /* Deal with gross reload from memory case. */
6710 else if (which_alternative == 2)
6712 /* Reload loop counter from memory, the store back to memory
6713 happens in the branch's delay slot. */
6714 if (get_attr_length (insn) == 8)
6715 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6716 else
6717 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6719 /* Handle SAR as a destination. */
6720 else
6722 if (get_attr_length (insn) == 8)
6723 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6724 else
6725 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tbl %3\n\tmtsar %r1";
6729 /* Copy any FP arguments in INSN into integer registers. */
6730 static void
6731 copy_fp_args (rtx insn)
6733 rtx link;
6734 rtx xoperands[2];
6736 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6738 int arg_mode, regno;
6739 rtx use = XEXP (link, 0);
6741 if (! (GET_CODE (use) == USE
6742 && GET_CODE (XEXP (use, 0)) == REG
6743 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6744 continue;
6746 arg_mode = GET_MODE (XEXP (use, 0));
6747 regno = REGNO (XEXP (use, 0));
6749 /* Is it a floating point register? */
6750 if (regno >= 32 && regno <= 39)
6752 /* Copy the FP register into an integer register via memory. */
6753 if (arg_mode == SFmode)
6755 xoperands[0] = XEXP (use, 0);
6756 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6757 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6758 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6760 else
6762 xoperands[0] = XEXP (use, 0);
6763 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6764 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6765 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6766 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6772 /* Compute length of the FP argument copy sequence for INSN. */
6773 static int
6774 length_fp_args (rtx insn)
6776 int length = 0;
6777 rtx link;
6779 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6781 int arg_mode, regno;
6782 rtx use = XEXP (link, 0);
6784 if (! (GET_CODE (use) == USE
6785 && GET_CODE (XEXP (use, 0)) == REG
6786 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6787 continue;
6789 arg_mode = GET_MODE (XEXP (use, 0));
6790 regno = REGNO (XEXP (use, 0));
6792 /* Is it a floating point register? */
6793 if (regno >= 32 && regno <= 39)
6795 if (arg_mode == SFmode)
6796 length += 8;
6797 else
6798 length += 12;
6802 return length;
6805 /* Return the attribute length for the millicode call instruction INSN.
6806 The length must match the code generated by output_millicode_call.
6807 We include the delay slot in the returned length as it is better to
6808 over estimate the length than to under estimate it. */
6811 attr_length_millicode_call (rtx insn)
6813 unsigned long distance = -1;
6814 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6816 if (INSN_ADDRESSES_SET_P ())
6818 distance = (total + insn_current_reference_address (insn));
6819 if (distance < total)
6820 distance = -1;
6823 if (TARGET_64BIT)
6825 if (!TARGET_LONG_CALLS && distance < 7600000)
6826 return 8;
6828 return 20;
6830 else if (TARGET_PORTABLE_RUNTIME)
6831 return 24;
6832 else
6834 if (!TARGET_LONG_CALLS && distance < 240000)
6835 return 8;
6837 if (TARGET_LONG_ABS_CALL && !flag_pic)
6838 return 12;
6840 return 24;
6844 /* INSN is a function call. It may have an unconditional jump
6845 in its delay slot.
6847 CALL_DEST is the routine we are calling. */
6849 const char *
6850 output_millicode_call (rtx insn, rtx call_dest)
6852 int attr_length = get_attr_length (insn);
6853 int seq_length = dbr_sequence_length ();
6854 int distance;
6855 rtx seq_insn;
6856 rtx xoperands[3];
6858 xoperands[0] = call_dest;
6859 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6861 /* Handle the common case where we are sure that the branch will
6862 reach the beginning of the $CODE$ subspace. The within reach
6863 form of the $$sh_func_adrs call has a length of 28. Because
6864 it has an attribute type of multi, it never has a nonzero
6865 sequence length. The length of the $$sh_func_adrs is the same
6866 as certain out of reach PIC calls to other routines. */
6867 if (!TARGET_LONG_CALLS
6868 && ((seq_length == 0
6869 && (attr_length == 12
6870 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6871 || (seq_length != 0 && attr_length == 8)))
6873 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6875 else
6877 if (TARGET_64BIT)
6879 /* It might seem that one insn could be saved by accessing
6880 the millicode function using the linkage table. However,
6881 this doesn't work in shared libraries and other dynamically
6882 loaded objects. Using a pc-relative sequence also avoids
6883 problems related to the implicit use of the gp register. */
6884 output_asm_insn ("b,l .+8,%%r1", xoperands);
6886 if (TARGET_GAS)
6888 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6889 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6891 else
6893 xoperands[1] = gen_label_rtx ();
6894 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6895 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6896 CODE_LABEL_NUMBER (xoperands[1]));
6897 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6900 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
6902 else if (TARGET_PORTABLE_RUNTIME)
6904 /* Pure portable runtime doesn't allow be/ble; we also don't
6905 have PIC support in the assembler/linker, so this sequence
6906 is needed. */
6908 /* Get the address of our target into %r1. */
6909 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6910 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6912 /* Get our return address into %r31. */
6913 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
6914 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
6916 /* Jump to our target address in %r1. */
6917 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6919 else if (!flag_pic)
6921 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6922 if (TARGET_PA_20)
6923 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
6924 else
6925 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
6927 else
6929 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6930 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
6932 if (TARGET_SOM || !TARGET_GAS)
6934 /* The HP assembler can generate relocations for the
6935 difference of two symbols. GAS can do this for a
6936 millicode symbol but not an arbitrary external
6937 symbol when generating SOM output. */
6938 xoperands[1] = gen_label_rtx ();
6939 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6940 CODE_LABEL_NUMBER (xoperands[1]));
6941 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6942 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6944 else
6946 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
6947 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
6948 xoperands);
6951 /* Jump to our target address in %r1. */
6952 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6956 if (seq_length == 0)
6957 output_asm_insn ("nop", xoperands);
6959 /* We are done if there isn't a jump in the delay slot. */
6960 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
6961 return "";
6963 /* This call has an unconditional jump in its delay slot. */
6964 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
6966 /* See if the return address can be adjusted. Use the containing
6967 sequence insn's address. */
6968 if (INSN_ADDRESSES_SET_P ())
6970 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
6971 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
6972 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
6974 if (VAL_14_BITS_P (distance))
6976 xoperands[1] = gen_label_rtx ();
6977 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
6978 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6979 CODE_LABEL_NUMBER (xoperands[1]));
6981 else
6982 /* ??? This branch may not reach its target. */
6983 output_asm_insn ("nop\n\tb,n %0", xoperands);
6985 else
6986 /* ??? This branch may not reach its target. */
6987 output_asm_insn ("nop\n\tb,n %0", xoperands);
6989 /* Delete the jump. */
6990 PUT_CODE (NEXT_INSN (insn), NOTE);
6991 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6992 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6994 return "";
6997 /* Return the attribute length of the call instruction INSN. The SIBCALL
6998 flag indicates whether INSN is a regular call or a sibling call. The
6999 length returned must be longer than the code actually generated by
7000 output_call. Since branch shortening is done before delay branch
7001 sequencing, there is no way to determine whether or not the delay
7002 slot will be filled during branch shortening. Even when the delay
7003 slot is filled, we may have to add a nop if the delay slot contains
7004 a branch that can't reach its target. Thus, we always have to include
7005 the delay slot in the length estimate. This used to be done in
7006 pa_adjust_insn_length but we do it here now as some sequences always
7007 fill the delay slot and we can save four bytes in the estimate for
7008 these sequences. */
7011 attr_length_call (rtx insn, int sibcall)
7013 int local_call;
7014 rtx call_dest;
7015 tree call_decl;
7016 int length = 0;
7017 rtx pat = PATTERN (insn);
7018 unsigned long distance = -1;
7020 if (INSN_ADDRESSES_SET_P ())
7022 unsigned long total;
7024 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7025 distance = (total + insn_current_reference_address (insn));
7026 if (distance < total)
7027 distance = -1;
7030 /* Determine if this is a local call. */
7031 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7032 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7033 else
7034 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7036 call_decl = SYMBOL_REF_DECL (call_dest);
7037 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7039 /* pc-relative branch. */
7040 if (!TARGET_LONG_CALLS
7041 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7042 || distance < 240000))
7043 length += 8;
7045 /* 64-bit plabel sequence. */
7046 else if (TARGET_64BIT && !local_call)
7047 length += sibcall ? 28 : 24;
7049 /* non-pic long absolute branch sequence. */
7050 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7051 length += 12;
7053 /* long pc-relative branch sequence. */
7054 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7055 || (TARGET_64BIT && !TARGET_GAS)
7056 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7058 length += 20;
7060 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7061 length += 8;
7064 /* 32-bit plabel sequence. */
7065 else
7067 length += 32;
7069 if (TARGET_SOM)
7070 length += length_fp_args (insn);
7072 if (flag_pic)
7073 length += 4;
7075 if (!TARGET_PA_20)
7077 if (!sibcall)
7078 length += 8;
7080 if (!TARGET_NO_SPACE_REGS)
7081 length += 8;
7085 return length;
7088 /* INSN is a function call. It may have an unconditional jump
7089 in its delay slot.
7091 CALL_DEST is the routine we are calling. */
7093 const char *
7094 output_call (rtx insn, rtx call_dest, int sibcall)
7096 int delay_insn_deleted = 0;
7097 int delay_slot_filled = 0;
7098 int seq_length = dbr_sequence_length ();
7099 tree call_decl = SYMBOL_REF_DECL (call_dest);
7100 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7101 rtx xoperands[2];
7103 xoperands[0] = call_dest;
7105 /* Handle the common case where we're sure that the branch will reach
7106 the beginning of the "$CODE$" subspace. This is the beginning of
7107 the current function if we are in a named section. */
7108 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7110 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7111 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7113 else
7115 if (TARGET_64BIT && !local_call)
7117 /* ??? As far as I can tell, the HP linker doesn't support the
7118 long pc-relative sequence described in the 64-bit runtime
7119 architecture. So, we use a slightly longer indirect call. */
7120 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7122 xoperands[0] = p->internal_label;
7123 xoperands[1] = gen_label_rtx ();
7125 /* If this isn't a sibcall, we put the load of %r27 into the
7126 delay slot. We can't do this in a sibcall as we don't
7127 have a second call-clobbered scratch register available. */
7128 if (seq_length != 0
7129 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7130 && !sibcall)
7132 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7133 optimize, 0, 0);
7135 /* Now delete the delay insn. */
7136 PUT_CODE (NEXT_INSN (insn), NOTE);
7137 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7138 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7139 delay_insn_deleted = 1;
7142 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7143 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7144 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7146 if (sibcall)
7148 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7149 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7150 output_asm_insn ("bve (%%r1)", xoperands);
7152 else
7154 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7155 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7156 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7157 delay_slot_filled = 1;
7160 else
7162 int indirect_call = 0;
7164 /* Emit a long call. There are several different sequences
7165 of increasing length and complexity. In most cases,
7166 they don't allow an instruction in the delay slot. */
7167 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7168 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7169 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7170 && !TARGET_64BIT)
7171 indirect_call = 1;
7173 if (seq_length != 0
7174 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7175 && !sibcall
7176 && (!TARGET_PA_20 || indirect_call))
7178 /* A non-jump insn in the delay slot. By definition we can
7179 emit this insn before the call (and in fact before argument
7180 relocating. */
7181 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0, 0);
7183 /* Now delete the delay insn. */
7184 PUT_CODE (NEXT_INSN (insn), NOTE);
7185 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7186 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7187 delay_insn_deleted = 1;
7190 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7192 /* This is the best sequence for making long calls in
7193 non-pic code. Unfortunately, GNU ld doesn't provide
7194 the stub needed for external calls, and GAS's support
7195 for this with the SOM linker is buggy. It is safe
7196 to use this for local calls. */
7197 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7198 if (sibcall)
7199 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7200 else
7202 if (TARGET_PA_20)
7203 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7204 xoperands);
7205 else
7206 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7208 output_asm_insn ("copy %%r31,%%r2", xoperands);
7209 delay_slot_filled = 1;
7212 else
7214 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7215 || (TARGET_64BIT && !TARGET_GAS))
7217 /* The HP assembler and linker can handle relocations
7218 for the difference of two symbols. GAS and the HP
7219 linker can't do this when one of the symbols is
7220 external. */
7221 xoperands[1] = gen_label_rtx ();
7222 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7223 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7224 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7225 CODE_LABEL_NUMBER (xoperands[1]));
7226 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7228 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7230 /* GAS currently can't generate the relocations that
7231 are needed for the SOM linker under HP-UX using this
7232 sequence. The GNU linker doesn't generate the stubs
7233 that are needed for external calls on TARGET_ELF32
7234 with this sequence. For now, we have to use a
7235 longer plabel sequence when using GAS. */
7236 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7237 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7238 xoperands);
7239 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7240 xoperands);
7242 else
7244 /* Emit a long plabel-based call sequence. This is
7245 essentially an inline implementation of $$dyncall.
7246 We don't actually try to call $$dyncall as this is
7247 as difficult as calling the function itself. */
7248 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7250 xoperands[0] = p->internal_label;
7251 xoperands[1] = gen_label_rtx ();
7253 /* Since the call is indirect, FP arguments in registers
7254 need to be copied to the general registers. Then, the
7255 argument relocation stub will copy them back. */
7256 if (TARGET_SOM)
7257 copy_fp_args (insn);
7259 if (flag_pic)
7261 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7262 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7263 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7265 else
7267 output_asm_insn ("addil LR'%0-$global$,%%r27",
7268 xoperands);
7269 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7270 xoperands);
7273 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7274 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7275 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7276 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7278 if (!sibcall && !TARGET_PA_20)
7280 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7281 if (TARGET_NO_SPACE_REGS)
7282 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7283 else
7284 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7288 if (TARGET_PA_20)
7290 if (sibcall)
7291 output_asm_insn ("bve (%%r1)", xoperands);
7292 else
7294 if (indirect_call)
7296 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7297 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7298 delay_slot_filled = 1;
7300 else
7301 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7304 else
7306 if (!TARGET_NO_SPACE_REGS)
7307 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7308 xoperands);
7310 if (sibcall)
7312 if (TARGET_NO_SPACE_REGS)
7313 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7314 else
7315 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7317 else
7319 if (TARGET_NO_SPACE_REGS)
7320 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7321 else
7322 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7324 if (indirect_call)
7325 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7326 else
7327 output_asm_insn ("copy %%r31,%%r2", xoperands);
7328 delay_slot_filled = 1;
7335 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7336 output_asm_insn ("nop", xoperands);
7338 /* We are done if there isn't a jump in the delay slot. */
7339 if (seq_length == 0
7340 || delay_insn_deleted
7341 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7342 return "";
7344 /* A sibcall should never have a branch in the delay slot. */
7345 if (sibcall)
7346 abort ();
7348 /* This call has an unconditional jump in its delay slot. */
7349 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7351 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7353 /* See if the return address can be adjusted. Use the containing
7354 sequence insn's address. */
7355 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7356 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7357 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7359 if (VAL_14_BITS_P (distance))
7361 xoperands[1] = gen_label_rtx ();
7362 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7363 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7364 CODE_LABEL_NUMBER (xoperands[1]));
7366 else
7367 output_asm_insn ("nop\n\tb,n %0", xoperands);
7369 else
7370 output_asm_insn ("b,n %0", xoperands);
7372 /* Delete the jump. */
7373 PUT_CODE (NEXT_INSN (insn), NOTE);
7374 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7375 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7377 return "";
7380 /* Return the attribute length of the indirect call instruction INSN.
7381 The length must match the code generated by output_indirect call.
7382 The returned length includes the delay slot. Currently, the delay
7383 slot of an indirect call sequence is not exposed and it is used by
7384 the sequence itself. */
7387 attr_length_indirect_call (rtx insn)
7389 unsigned long distance = -1;
7390 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7392 if (INSN_ADDRESSES_SET_P ())
7394 distance = (total + insn_current_reference_address (insn));
7395 if (distance < total)
7396 distance = -1;
7399 if (TARGET_64BIT)
7400 return 12;
7402 if (TARGET_FAST_INDIRECT_CALLS
7403 || (!TARGET_PORTABLE_RUNTIME
7404 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7405 return 8;
7407 if (flag_pic)
7408 return 24;
7410 if (TARGET_PORTABLE_RUNTIME)
7411 return 20;
7413 /* Out of reach, can use ble. */
7414 return 12;
7417 const char *
7418 output_indirect_call (rtx insn, rtx call_dest)
7420 rtx xoperands[1];
7422 if (TARGET_64BIT)
7424 xoperands[0] = call_dest;
7425 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7426 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7427 return "";
7430 /* First the special case for kernels, level 0 systems, etc. */
7431 if (TARGET_FAST_INDIRECT_CALLS)
7432 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7434 /* Now the normal case -- we can reach $$dyncall directly or
7435 we're sure that we can get there via a long-branch stub.
7437 No need to check target flags as the length uniquely identifies
7438 the remaining cases. */
7439 if (attr_length_indirect_call (insn) == 8)
7440 return ".CALL\tARGW0=GR\n\t{bl|b,l} $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7442 /* Long millicode call, but we are not generating PIC or portable runtime
7443 code. */
7444 if (attr_length_indirect_call (insn) == 12)
7445 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7447 /* Long millicode call for portable runtime. */
7448 if (attr_length_indirect_call (insn) == 20)
7449 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7451 /* We need a long PIC call to $$dyncall. */
7452 xoperands[0] = NULL_RTX;
7453 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7454 if (TARGET_SOM || !TARGET_GAS)
7456 xoperands[0] = gen_label_rtx ();
7457 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7458 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7459 CODE_LABEL_NUMBER (xoperands[0]));
7460 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7462 else
7464 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7465 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7466 xoperands);
7468 output_asm_insn ("blr %%r0,%%r2", xoperands);
7469 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7470 return "";
7473 /* Return the total length of the save and restore instructions needed for
7474 the data linkage table pointer (i.e., the PIC register) across the call
7475 instruction INSN. No-return calls do not require a save and restore.
7476 In addition, we may be able to avoid the save and restore for calls
7477 within the same translation unit. */
7480 attr_length_save_restore_dltp (rtx insn)
7482 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7483 return 0;
7485 return 8;
7488 /* In HPUX 8.0's shared library scheme, special relocations are needed
7489 for function labels if they might be passed to a function
7490 in a shared library (because shared libraries don't live in code
7491 space), and special magic is needed to construct their address. */
7493 void
7494 hppa_encode_label (rtx sym)
7496 const char *str = XSTR (sym, 0);
7497 int len = strlen (str) + 1;
7498 char *newstr, *p;
7500 p = newstr = alloca (len + 1);
7501 *p++ = '@';
7502 strcpy (p, str);
7504 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7507 static void
7508 pa_encode_section_info (tree decl, rtx rtl, int first)
7510 if (first && TEXT_SPACE_P (decl))
7512 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7513 if (TREE_CODE (decl) == FUNCTION_DECL)
7514 hppa_encode_label (XEXP (rtl, 0));
7518 /* This is sort of inverse to pa_encode_section_info. */
7520 static const char *
7521 pa_strip_name_encoding (const char *str)
7523 str += (*str == '@');
7524 str += (*str == '*');
7525 return str;
7529 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7531 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7534 /* Returns 1 if OP is a function label involved in a simple addition
7535 with a constant. Used to keep certain patterns from matching
7536 during instruction combination. */
7538 is_function_label_plus_const (rtx op)
7540 /* Strip off any CONST. */
7541 if (GET_CODE (op) == CONST)
7542 op = XEXP (op, 0);
7544 return (GET_CODE (op) == PLUS
7545 && function_label_operand (XEXP (op, 0), Pmode)
7546 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7549 /* Output assembly code for a thunk to FUNCTION. */
7551 static void
7552 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7553 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7554 tree function)
7556 const char *fname = XSTR (XEXP (DECL_RTL (function), 0), 0);
7557 const char *tname = XSTR (XEXP (DECL_RTL (thunk_fndecl), 0), 0);
7558 int val_14 = VAL_14_BITS_P (delta);
7559 int nbytes = 0;
7560 static unsigned int current_thunk_number;
7561 char label[16];
7563 ASM_OUTPUT_LABEL (file, tname);
7564 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7566 fname = (*targetm.strip_name_encoding) (fname);
7567 tname = (*targetm.strip_name_encoding) (tname);
7569 /* Output the thunk. We know that the function is in the same
7570 translation unit (i.e., the same space) as the thunk, and that
7571 thunks are output after their method. Thus, we don't need an
7572 external branch to reach the function. With SOM and GAS,
7573 functions and thunks are effectively in different sections.
7574 Thus, we can always use a IA-relative branch and the linker
7575 will add a long branch stub if necessary.
7577 However, we have to be careful when generating PIC code on the
7578 SOM port to ensure that the sequence does not transfer to an
7579 import stub for the target function as this could clobber the
7580 return value saved at SP-24. This would also apply to the
7581 32-bit linux port if the multi-space model is implemented. */
7582 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7583 && !(flag_pic && TREE_PUBLIC (function))
7584 && (TARGET_GAS || last_address < 262132))
7585 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7586 && ((targetm.have_named_sections
7587 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7588 /* The GNU 64-bit linker has rather poor stub management.
7589 So, we use a long branch from thunks that aren't in
7590 the same section as the target function. */
7591 && ((!TARGET_64BIT
7592 && (DECL_SECTION_NAME (thunk_fndecl)
7593 != DECL_SECTION_NAME (function)))
7594 || ((DECL_SECTION_NAME (thunk_fndecl)
7595 == DECL_SECTION_NAME (function))
7596 && last_address < 262132)))
7597 || (!targetm.have_named_sections && last_address < 262132))))
7599 if (val_14)
7601 fprintf (file, "\tb %s\n\tldo " HOST_WIDE_INT_PRINT_DEC
7602 "(%%r26),%%r26\n", fname, delta);
7603 nbytes += 8;
7605 else
7607 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7608 ",%%r26\n", delta);
7609 fprintf (file, "\tb %s\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7610 "(%%r1),%%r26\n", fname, delta);
7611 nbytes += 12;
7614 else if (TARGET_64BIT)
7616 /* We only have one call-clobbered scratch register, so we can't
7617 make use of the delay slot if delta doesn't fit in 14 bits. */
7618 if (!val_14)
7619 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7620 ",%%r26\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7621 "(%%r1),%%r26\n", delta, delta);
7623 fprintf (file, "\tb,l .+8,%%r1\n");
7625 if (TARGET_GAS)
7627 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7628 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r1\n", fname);
7630 else
7632 int off = val_14 ? 8 : 16;
7633 fprintf (file, "\taddil L'%s-%s-%d,%%r1\n", fname, tname, off);
7634 fprintf (file, "\tldo R'%s-%s-%d(%%r1),%%r1\n", fname, tname, off);
7637 if (val_14)
7639 fprintf (file, "\tbv %%r0(%%r1)\n\tldo ");
7640 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7641 nbytes += 20;
7643 else
7645 fprintf (file, "\tbv,n %%r0(%%r1)\n");
7646 nbytes += 24;
7649 else if (TARGET_PORTABLE_RUNTIME)
7651 fprintf (file, "\tldil L'%s,%%r1\n", fname);
7652 fprintf (file, "\tldo R'%s(%%r1),%%r22\n", fname);
7654 if (val_14)
7656 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7657 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7658 nbytes += 16;
7660 else
7662 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7663 ",%%r26\n", delta);
7664 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7665 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7666 nbytes += 20;
7669 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7671 /* The function is accessible from outside this module. The only
7672 way to avoid an import stub between the thunk and function is to
7673 call the function directly with an indirect sequence similar to
7674 that used by $$dyncall. This is possible because $$dyncall acts
7675 as the import stub in an indirect call. */
7676 const char *lab;
7678 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7679 lab = (*targetm.strip_name_encoding) (label);
7681 fprintf (file, "\taddil LT'%s,%%r19\n", lab);
7682 fprintf (file, "\tldw RT'%s(%%r1),%%r22\n", lab);
7683 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7684 fprintf (file, "\tbb,>=,n %%r22,30,.+16\n");
7685 fprintf (file, "\tdepi 0,31,2,%%r22\n");
7686 fprintf (file, "\tldw 4(%%sr0,%%r22),%%r19\n");
7687 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7688 if (!val_14)
7690 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7691 ",%%r26\n", delta);
7692 nbytes += 4;
7694 if (TARGET_PA_20)
7696 fprintf (file, "\tbve (%%r22)\n\tldo ");
7697 nbytes += 36;
7699 else
7701 if (TARGET_NO_SPACE_REGS)
7703 fprintf (file, "\tbe 0(%%sr4,%%r22)\n\tldo ");
7704 nbytes += 36;
7706 else
7708 fprintf (file, "\tldsid (%%sr0,%%r22),%%r21\n");
7709 fprintf (file, "\tmtsp %%r21,%%sr0\n");
7710 fprintf (file, "\tbe 0(%%sr0,%%r22)\n\tldo ");
7711 nbytes += 44;
7715 if (val_14)
7716 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7717 else
7718 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7720 else if (flag_pic)
7722 if (TARGET_PA_20)
7723 fprintf (file, "\tb,l .+8,%%r1\n");
7724 else
7725 fprintf (file, "\tbl .+8,%%r1\n");
7727 if (TARGET_SOM || !TARGET_GAS)
7729 fprintf (file, "\taddil L'%s-%s-8,%%r1\n", fname, tname);
7730 fprintf (file, "\tldo R'%s-%s-8(%%r1),%%r22\n", fname, tname);
7732 else
7734 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7735 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r22\n", fname);
7738 if (val_14)
7740 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7741 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7742 nbytes += 20;
7744 else
7746 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7747 ",%%r26\n", delta);
7748 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7749 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7750 nbytes += 24;
7753 else
7755 if (!val_14)
7756 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC ",%%r26\n", delta);
7758 fprintf (file, "\tldil L'%s,%%r22\n", fname);
7759 fprintf (file, "\tbe R'%s(%%sr4,%%r22)\n\tldo ", fname);
7761 if (val_14)
7763 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7764 nbytes += 12;
7766 else
7768 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7769 nbytes += 16;
7773 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7775 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7777 data_section ();
7778 fprintf (file, "\t.align 4\n");
7779 ASM_OUTPUT_LABEL (file, label);
7780 fprintf (file, "\t.word P'%s\n", fname);
7781 function_section (thunk_fndecl);
7784 current_thunk_number++;
7785 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7786 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7787 last_address += nbytes;
7788 update_total_code_bytes (nbytes);
7791 /* Only direct calls to static functions are allowed to be sibling (tail)
7792 call optimized.
7794 This restriction is necessary because some linker generated stubs will
7795 store return pointers into rp' in some cases which might clobber a
7796 live value already in rp'.
7798 In a sibcall the current function and the target function share stack
7799 space. Thus if the path to the current function and the path to the
7800 target function save a value in rp', they save the value into the
7801 same stack slot, which has undesirable consequences.
7803 Because of the deferred binding nature of shared libraries any function
7804 with external scope could be in a different load module and thus require
7805 rp' to be saved when calling that function. So sibcall optimizations
7806 can only be safe for static function.
7808 Note that GCC never needs return value relocations, so we don't have to
7809 worry about static calls with return value relocations (which require
7810 saving rp').
7812 It is safe to perform a sibcall optimization when the target function
7813 will never return. */
7814 static bool
7815 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7817 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7818 single subspace mode and the call is not indirect. As far as I know,
7819 there is no operating system support for the multiple subspace mode.
7820 It might be possible to support indirect calls if we didn't use
7821 $$dyncall (see the indirect sequence generated in output_call). */
7822 if (TARGET_ELF32)
7823 return (decl != NULL_TREE);
7825 /* Sibcalls are not ok because the arg pointer register is not a fixed
7826 register. This prevents the sibcall optimization from occurring. In
7827 addition, there are problems with stub placement using GNU ld. This
7828 is because a normal sibcall branch uses a 17-bit relocation while
7829 a regular call branch uses a 22-bit relocation. As a result, more
7830 care needs to be taken in the placement of long-branch stubs. */
7831 if (TARGET_64BIT)
7832 return false;
7834 return (decl
7835 && !TARGET_PORTABLE_RUNTIME
7836 && !TREE_PUBLIC (decl));
7839 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7840 use in fmpyadd instructions. */
7842 fmpyaddoperands (rtx *operands)
7844 enum machine_mode mode = GET_MODE (operands[0]);
7846 /* Must be a floating point mode. */
7847 if (mode != SFmode && mode != DFmode)
7848 return 0;
7850 /* All modes must be the same. */
7851 if (! (mode == GET_MODE (operands[1])
7852 && mode == GET_MODE (operands[2])
7853 && mode == GET_MODE (operands[3])
7854 && mode == GET_MODE (operands[4])
7855 && mode == GET_MODE (operands[5])))
7856 return 0;
7858 /* All operands must be registers. */
7859 if (! (GET_CODE (operands[1]) == REG
7860 && GET_CODE (operands[2]) == REG
7861 && GET_CODE (operands[3]) == REG
7862 && GET_CODE (operands[4]) == REG
7863 && GET_CODE (operands[5]) == REG))
7864 return 0;
7866 /* Only 2 real operands to the addition. One of the input operands must
7867 be the same as the output operand. */
7868 if (! rtx_equal_p (operands[3], operands[4])
7869 && ! rtx_equal_p (operands[3], operands[5]))
7870 return 0;
7872 /* Inout operand of add can not conflict with any operands from multiply. */
7873 if (rtx_equal_p (operands[3], operands[0])
7874 || rtx_equal_p (operands[3], operands[1])
7875 || rtx_equal_p (operands[3], operands[2]))
7876 return 0;
7878 /* multiply can not feed into addition operands. */
7879 if (rtx_equal_p (operands[4], operands[0])
7880 || rtx_equal_p (operands[5], operands[0]))
7881 return 0;
7883 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7884 if (mode == SFmode
7885 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7886 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7887 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7888 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7889 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7890 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7891 return 0;
7893 /* Passed. Operands are suitable for fmpyadd. */
7894 return 1;
7897 #if !defined(USE_COLLECT2)
7898 static void
7899 pa_asm_out_constructor (rtx symbol, int priority)
7901 if (!function_label_operand (symbol, VOIDmode))
7902 hppa_encode_label (symbol);
7904 #ifdef CTORS_SECTION_ASM_OP
7905 default_ctor_section_asm_out_constructor (symbol, priority);
7906 #else
7907 # ifdef TARGET_ASM_NAMED_SECTION
7908 default_named_section_asm_out_constructor (symbol, priority);
7909 # else
7910 default_stabs_asm_out_constructor (symbol, priority);
7911 # endif
7912 #endif
7915 static void
7916 pa_asm_out_destructor (rtx symbol, int priority)
7918 if (!function_label_operand (symbol, VOIDmode))
7919 hppa_encode_label (symbol);
7921 #ifdef DTORS_SECTION_ASM_OP
7922 default_dtor_section_asm_out_destructor (symbol, priority);
7923 #else
7924 # ifdef TARGET_ASM_NAMED_SECTION
7925 default_named_section_asm_out_destructor (symbol, priority);
7926 # else
7927 default_stabs_asm_out_destructor (symbol, priority);
7928 # endif
7929 #endif
7931 #endif
7933 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7934 use in fmpysub instructions. */
7936 fmpysuboperands (rtx *operands)
7938 enum machine_mode mode = GET_MODE (operands[0]);
7940 /* Must be a floating point mode. */
7941 if (mode != SFmode && mode != DFmode)
7942 return 0;
7944 /* All modes must be the same. */
7945 if (! (mode == GET_MODE (operands[1])
7946 && mode == GET_MODE (operands[2])
7947 && mode == GET_MODE (operands[3])
7948 && mode == GET_MODE (operands[4])
7949 && mode == GET_MODE (operands[5])))
7950 return 0;
7952 /* All operands must be registers. */
7953 if (! (GET_CODE (operands[1]) == REG
7954 && GET_CODE (operands[2]) == REG
7955 && GET_CODE (operands[3]) == REG
7956 && GET_CODE (operands[4]) == REG
7957 && GET_CODE (operands[5]) == REG))
7958 return 0;
7960 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
7961 operation, so operands[4] must be the same as operand[3]. */
7962 if (! rtx_equal_p (operands[3], operands[4]))
7963 return 0;
7965 /* multiply can not feed into subtraction. */
7966 if (rtx_equal_p (operands[5], operands[0]))
7967 return 0;
7969 /* Inout operand of sub can not conflict with any operands from multiply. */
7970 if (rtx_equal_p (operands[3], operands[0])
7971 || rtx_equal_p (operands[3], operands[1])
7972 || rtx_equal_p (operands[3], operands[2]))
7973 return 0;
7975 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7976 if (mode == SFmode
7977 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7978 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7979 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7980 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7981 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7982 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7983 return 0;
7985 /* Passed. Operands are suitable for fmpysub. */
7986 return 1;
7990 plus_xor_ior_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7992 return (GET_CODE (op) == PLUS || GET_CODE (op) == XOR
7993 || GET_CODE (op) == IOR);
7996 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
7997 constants for shadd instructions. */
7998 static int
7999 shadd_constant_p (int val)
8001 if (val == 2 || val == 4 || val == 8)
8002 return 1;
8003 else
8004 return 0;
8007 /* Return 1 if OP is a CONST_INT with the value 2, 4, or 8. These are
8008 the valid constant for shadd instructions. */
8010 shadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8012 return (GET_CODE (op) == CONST_INT && shadd_constant_p (INTVAL (op)));
8015 /* Return 1 if OP is valid as a base or index register in a
8016 REG+REG address. */
8019 borx_reg_operand (rtx op, enum machine_mode mode)
8021 if (GET_CODE (op) != REG)
8022 return 0;
8024 /* We must reject virtual registers as the only expressions that
8025 can be instantiated are REG and REG+CONST. */
8026 if (op == virtual_incoming_args_rtx
8027 || op == virtual_stack_vars_rtx
8028 || op == virtual_stack_dynamic_rtx
8029 || op == virtual_outgoing_args_rtx
8030 || op == virtual_cfa_rtx)
8031 return 0;
8033 /* While it's always safe to index off the frame pointer, it's not
8034 profitable to do so when the frame pointer is being eliminated. */
8035 if (!reload_completed
8036 && flag_omit_frame_pointer
8037 && !current_function_calls_alloca
8038 && op == frame_pointer_rtx)
8039 return 0;
8041 return register_operand (op, mode);
8044 /* Return 1 if this operand is anything other than a hard register. */
8047 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8049 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8052 /* Return 1 if INSN branches forward. Should be using insn_addresses
8053 to avoid walking through all the insns... */
8054 static int
8055 forward_branch_p (rtx insn)
8057 rtx label = JUMP_LABEL (insn);
8059 while (insn)
8061 if (insn == label)
8062 break;
8063 else
8064 insn = NEXT_INSN (insn);
8067 return (insn == label);
8070 /* Return 1 if OP is an equality comparison, else return 0. */
8072 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8074 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8077 /* Return 1 if OP is an operator suitable for use in a movb instruction. */
8079 movb_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8081 return (GET_CODE (op) == EQ || GET_CODE (op) == NE
8082 || GET_CODE (op) == LT || GET_CODE (op) == GE);
8085 /* Return 1 if INSN is in the delay slot of a call instruction. */
8087 jump_in_call_delay (rtx insn)
8090 if (GET_CODE (insn) != JUMP_INSN)
8091 return 0;
8093 if (PREV_INSN (insn)
8094 && PREV_INSN (PREV_INSN (insn))
8095 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8097 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8099 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8100 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8103 else
8104 return 0;
8107 /* Output an unconditional move and branch insn. */
8109 const char *
8110 output_parallel_movb (rtx *operands, int length)
8112 /* These are the cases in which we win. */
8113 if (length == 4)
8114 return "mov%I1b,tr %1,%0,%2";
8116 /* None of these cases wins, but they don't lose either. */
8117 if (dbr_sequence_length () == 0)
8119 /* Nothing in the delay slot, fake it by putting the combined
8120 insn (the copy or add) in the delay slot of a bl. */
8121 if (GET_CODE (operands[1]) == CONST_INT)
8122 return "b %2\n\tldi %1,%0";
8123 else
8124 return "b %2\n\tcopy %1,%0";
8126 else
8128 /* Something in the delay slot, but we've got a long branch. */
8129 if (GET_CODE (operands[1]) == CONST_INT)
8130 return "ldi %1,%0\n\tb %2";
8131 else
8132 return "copy %1,%0\n\tb %2";
8136 /* Output an unconditional add and branch insn. */
8138 const char *
8139 output_parallel_addb (rtx *operands, int length)
8141 /* To make life easy we want operand0 to be the shared input/output
8142 operand and operand1 to be the readonly operand. */
8143 if (operands[0] == operands[1])
8144 operands[1] = operands[2];
8146 /* These are the cases in which we win. */
8147 if (length == 4)
8148 return "add%I1b,tr %1,%0,%3";
8150 /* None of these cases win, but they don't lose either. */
8151 if (dbr_sequence_length () == 0)
8153 /* Nothing in the delay slot, fake it by putting the combined
8154 insn (the copy or add) in the delay slot of a bl. */
8155 return "b %3\n\tadd%I1 %1,%0,%0";
8157 else
8159 /* Something in the delay slot, but we've got a long branch. */
8160 return "add%I1 %1,%0,%0\n\tb %3";
8164 /* Return nonzero if INSN (a jump insn) immediately follows a call
8165 to a named function. This is used to avoid filling the delay slot
8166 of the jump since it can usually be eliminated by modifying RP in
8167 the delay slot of the call. */
8170 following_call (rtx insn)
8172 if (! TARGET_JUMP_IN_DELAY)
8173 return 0;
8175 /* Find the previous real insn, skipping NOTEs. */
8176 insn = PREV_INSN (insn);
8177 while (insn && GET_CODE (insn) == NOTE)
8178 insn = PREV_INSN (insn);
8180 /* Check for CALL_INSNs and millicode calls. */
8181 if (insn
8182 && ((GET_CODE (insn) == CALL_INSN
8183 && get_attr_type (insn) != TYPE_DYNCALL)
8184 || (GET_CODE (insn) == INSN
8185 && GET_CODE (PATTERN (insn)) != SEQUENCE
8186 && GET_CODE (PATTERN (insn)) != USE
8187 && GET_CODE (PATTERN (insn)) != CLOBBER
8188 && get_attr_type (insn) == TYPE_MILLI)))
8189 return 1;
8191 return 0;
8194 /* We use this hook to perform a PA specific optimization which is difficult
8195 to do in earlier passes.
8197 We want the delay slots of branches within jump tables to be filled.
8198 None of the compiler passes at the moment even has the notion that a
8199 PA jump table doesn't contain addresses, but instead contains actual
8200 instructions!
8202 Because we actually jump into the table, the addresses of each entry
8203 must stay constant in relation to the beginning of the table (which
8204 itself must stay constant relative to the instruction to jump into
8205 it). I don't believe we can guarantee earlier passes of the compiler
8206 will adhere to those rules.
8208 So, late in the compilation process we find all the jump tables, and
8209 expand them into real code -- eg each entry in the jump table vector
8210 will get an appropriate label followed by a jump to the final target.
8212 Reorg and the final jump pass can then optimize these branches and
8213 fill their delay slots. We end up with smaller, more efficient code.
8215 The jump instructions within the table are special; we must be able
8216 to identify them during assembly output (if the jumps don't get filled
8217 we need to emit a nop rather than nullifying the delay slot)). We
8218 identify jumps in switch tables by using insns with the attribute
8219 type TYPE_BTABLE_BRANCH.
8221 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8222 insns. This serves two purposes, first it prevents jump.c from
8223 noticing that the last N entries in the table jump to the instruction
8224 immediately after the table and deleting the jumps. Second, those
8225 insns mark where we should emit .begin_brtab and .end_brtab directives
8226 when using GAS (allows for better link time optimizations). */
8228 static void
8229 pa_reorg (void)
8231 rtx insn;
8233 remove_useless_addtr_insns (1);
8235 if (pa_cpu < PROCESSOR_8000)
8236 pa_combine_instructions ();
8239 /* This is fairly cheap, so always run it if optimizing. */
8240 if (optimize > 0 && !TARGET_BIG_SWITCH)
8242 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8243 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8245 rtx pattern, tmp, location, label;
8246 unsigned int length, i;
8248 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8249 if (GET_CODE (insn) != JUMP_INSN
8250 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8251 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8252 continue;
8254 /* Emit marker for the beginning of the branch table. */
8255 emit_insn_before (gen_begin_brtab (), insn);
8257 pattern = PATTERN (insn);
8258 location = PREV_INSN (insn);
8259 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8261 for (i = 0; i < length; i++)
8263 /* Emit a label before each jump to keep jump.c from
8264 removing this code. */
8265 tmp = gen_label_rtx ();
8266 LABEL_NUSES (tmp) = 1;
8267 emit_label_after (tmp, location);
8268 location = NEXT_INSN (location);
8270 if (GET_CODE (pattern) == ADDR_VEC)
8271 label = XEXP (XVECEXP (pattern, 0, i), 0);
8272 else
8273 label = XEXP (XVECEXP (pattern, 1, i), 0);
8275 tmp = gen_short_jump (label);
8277 /* Emit the jump itself. */
8278 tmp = emit_jump_insn_after (tmp, location);
8279 JUMP_LABEL (tmp) = label;
8280 LABEL_NUSES (label)++;
8281 location = NEXT_INSN (location);
8283 /* Emit a BARRIER after the jump. */
8284 emit_barrier_after (location);
8285 location = NEXT_INSN (location);
8288 /* Emit marker for the end of the branch table. */
8289 emit_insn_before (gen_end_brtab (), location);
8290 location = NEXT_INSN (location);
8291 emit_barrier_after (location);
8293 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8294 delete_insn (insn);
8297 else
8299 /* Still need brtab marker insns. FIXME: the presence of these
8300 markers disables output of the branch table to readonly memory,
8301 and any alignment directives that might be needed. Possibly,
8302 the begin_brtab insn should be output before the label for the
8303 table. This doesn matter at the moment since the tables are
8304 always output in the text section. */
8305 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8307 /* Find an ADDR_VEC insn. */
8308 if (GET_CODE (insn) != JUMP_INSN
8309 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8310 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8311 continue;
8313 /* Now generate markers for the beginning and end of the
8314 branch table. */
8315 emit_insn_before (gen_begin_brtab (), insn);
8316 emit_insn_after (gen_end_brtab (), insn);
8321 /* The PA has a number of odd instructions which can perform multiple
8322 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8323 it may be profitable to combine two instructions into one instruction
8324 with two outputs. It's not profitable PA2.0 machines because the
8325 two outputs would take two slots in the reorder buffers.
8327 This routine finds instructions which can be combined and combines
8328 them. We only support some of the potential combinations, and we
8329 only try common ways to find suitable instructions.
8331 * addb can add two registers or a register and a small integer
8332 and jump to a nearby (+-8k) location. Normally the jump to the
8333 nearby location is conditional on the result of the add, but by
8334 using the "true" condition we can make the jump unconditional.
8335 Thus addb can perform two independent operations in one insn.
8337 * movb is similar to addb in that it can perform a reg->reg
8338 or small immediate->reg copy and jump to a nearby (+-8k location).
8340 * fmpyadd and fmpysub can perform a FP multiply and either an
8341 FP add or FP sub if the operands of the multiply and add/sub are
8342 independent (there are other minor restrictions). Note both
8343 the fmpy and fadd/fsub can in theory move to better spots according
8344 to data dependencies, but for now we require the fmpy stay at a
8345 fixed location.
8347 * Many of the memory operations can perform pre & post updates
8348 of index registers. GCC's pre/post increment/decrement addressing
8349 is far too simple to take advantage of all the possibilities. This
8350 pass may not be suitable since those insns may not be independent.
8352 * comclr can compare two ints or an int and a register, nullify
8353 the following instruction and zero some other register. This
8354 is more difficult to use as it's harder to find an insn which
8355 will generate a comclr than finding something like an unconditional
8356 branch. (conditional moves & long branches create comclr insns).
8358 * Most arithmetic operations can conditionally skip the next
8359 instruction. They can be viewed as "perform this operation
8360 and conditionally jump to this nearby location" (where nearby
8361 is an insns away). These are difficult to use due to the
8362 branch length restrictions. */
8364 static void
8365 pa_combine_instructions (void)
8367 rtx anchor, new;
8369 /* This can get expensive since the basic algorithm is on the
8370 order of O(n^2) (or worse). Only do it for -O2 or higher
8371 levels of optimization. */
8372 if (optimize < 2)
8373 return;
8375 /* Walk down the list of insns looking for "anchor" insns which
8376 may be combined with "floating" insns. As the name implies,
8377 "anchor" instructions don't move, while "floating" insns may
8378 move around. */
8379 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8380 new = make_insn_raw (new);
8382 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8384 enum attr_pa_combine_type anchor_attr;
8385 enum attr_pa_combine_type floater_attr;
8387 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8388 Also ignore any special USE insns. */
8389 if ((GET_CODE (anchor) != INSN
8390 && GET_CODE (anchor) != JUMP_INSN
8391 && GET_CODE (anchor) != CALL_INSN)
8392 || GET_CODE (PATTERN (anchor)) == USE
8393 || GET_CODE (PATTERN (anchor)) == CLOBBER
8394 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8395 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8396 continue;
8398 anchor_attr = get_attr_pa_combine_type (anchor);
8399 /* See if anchor is an insn suitable for combination. */
8400 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8401 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8402 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8403 && ! forward_branch_p (anchor)))
8405 rtx floater;
8407 for (floater = PREV_INSN (anchor);
8408 floater;
8409 floater = PREV_INSN (floater))
8411 if (GET_CODE (floater) == NOTE
8412 || (GET_CODE (floater) == INSN
8413 && (GET_CODE (PATTERN (floater)) == USE
8414 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8415 continue;
8417 /* Anything except a regular INSN will stop our search. */
8418 if (GET_CODE (floater) != INSN
8419 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8420 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8422 floater = NULL_RTX;
8423 break;
8426 /* See if FLOATER is suitable for combination with the
8427 anchor. */
8428 floater_attr = get_attr_pa_combine_type (floater);
8429 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8430 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8431 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8432 && floater_attr == PA_COMBINE_TYPE_FMPY))
8434 /* If ANCHOR and FLOATER can be combined, then we're
8435 done with this pass. */
8436 if (pa_can_combine_p (new, anchor, floater, 0,
8437 SET_DEST (PATTERN (floater)),
8438 XEXP (SET_SRC (PATTERN (floater)), 0),
8439 XEXP (SET_SRC (PATTERN (floater)), 1)))
8440 break;
8443 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8444 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8446 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8448 if (pa_can_combine_p (new, anchor, floater, 0,
8449 SET_DEST (PATTERN (floater)),
8450 XEXP (SET_SRC (PATTERN (floater)), 0),
8451 XEXP (SET_SRC (PATTERN (floater)), 1)))
8452 break;
8454 else
8456 if (pa_can_combine_p (new, anchor, floater, 0,
8457 SET_DEST (PATTERN (floater)),
8458 SET_SRC (PATTERN (floater)),
8459 SET_SRC (PATTERN (floater))))
8460 break;
8465 /* If we didn't find anything on the backwards scan try forwards. */
8466 if (!floater
8467 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8468 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8470 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8472 if (GET_CODE (floater) == NOTE
8473 || (GET_CODE (floater) == INSN
8474 && (GET_CODE (PATTERN (floater)) == USE
8475 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8477 continue;
8479 /* Anything except a regular INSN will stop our search. */
8480 if (GET_CODE (floater) != INSN
8481 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8482 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8484 floater = NULL_RTX;
8485 break;
8488 /* See if FLOATER is suitable for combination with the
8489 anchor. */
8490 floater_attr = get_attr_pa_combine_type (floater);
8491 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8492 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8493 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8494 && floater_attr == PA_COMBINE_TYPE_FMPY))
8496 /* If ANCHOR and FLOATER can be combined, then we're
8497 done with this pass. */
8498 if (pa_can_combine_p (new, anchor, floater, 1,
8499 SET_DEST (PATTERN (floater)),
8500 XEXP (SET_SRC (PATTERN (floater)),
8502 XEXP (SET_SRC (PATTERN (floater)),
8503 1)))
8504 break;
8509 /* FLOATER will be nonzero if we found a suitable floating
8510 insn for combination with ANCHOR. */
8511 if (floater
8512 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8513 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8515 /* Emit the new instruction and delete the old anchor. */
8516 emit_insn_before (gen_rtx_PARALLEL
8517 (VOIDmode,
8518 gen_rtvec (2, PATTERN (anchor),
8519 PATTERN (floater))),
8520 anchor);
8522 PUT_CODE (anchor, NOTE);
8523 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8524 NOTE_SOURCE_FILE (anchor) = 0;
8526 /* Emit a special USE insn for FLOATER, then delete
8527 the floating insn. */
8528 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8529 delete_insn (floater);
8531 continue;
8533 else if (floater
8534 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8536 rtx temp;
8537 /* Emit the new_jump instruction and delete the old anchor. */
8538 temp
8539 = emit_jump_insn_before (gen_rtx_PARALLEL
8540 (VOIDmode,
8541 gen_rtvec (2, PATTERN (anchor),
8542 PATTERN (floater))),
8543 anchor);
8545 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8546 PUT_CODE (anchor, NOTE);
8547 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8548 NOTE_SOURCE_FILE (anchor) = 0;
8550 /* Emit a special USE insn for FLOATER, then delete
8551 the floating insn. */
8552 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8553 delete_insn (floater);
8554 continue;
8560 static int
8561 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8562 rtx src1, rtx src2)
8564 int insn_code_number;
8565 rtx start, end;
8567 /* Create a PARALLEL with the patterns of ANCHOR and
8568 FLOATER, try to recognize it, then test constraints
8569 for the resulting pattern.
8571 If the pattern doesn't match or the constraints
8572 aren't met keep searching for a suitable floater
8573 insn. */
8574 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8575 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8576 INSN_CODE (new) = -1;
8577 insn_code_number = recog_memoized (new);
8578 if (insn_code_number < 0
8579 || (extract_insn (new), ! constrain_operands (1)))
8580 return 0;
8582 if (reversed)
8584 start = anchor;
8585 end = floater;
8587 else
8589 start = floater;
8590 end = anchor;
8593 /* There's up to three operands to consider. One
8594 output and two inputs.
8596 The output must not be used between FLOATER & ANCHOR
8597 exclusive. The inputs must not be set between
8598 FLOATER and ANCHOR exclusive. */
8600 if (reg_used_between_p (dest, start, end))
8601 return 0;
8603 if (reg_set_between_p (src1, start, end))
8604 return 0;
8606 if (reg_set_between_p (src2, start, end))
8607 return 0;
8609 /* If we get here, then everything is good. */
8610 return 1;
8613 /* Return nonzero if references for INSN are delayed.
8615 Millicode insns are actually function calls with some special
8616 constraints on arguments and register usage.
8618 Millicode calls always expect their arguments in the integer argument
8619 registers, and always return their result in %r29 (ret1). They
8620 are expected to clobber their arguments, %r1, %r29, and the return
8621 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8623 This function tells reorg that the references to arguments and
8624 millicode calls do not appear to happen until after the millicode call.
8625 This allows reorg to put insns which set the argument registers into the
8626 delay slot of the millicode call -- thus they act more like traditional
8627 CALL_INSNs.
8629 Note we can not consider side effects of the insn to be delayed because
8630 the branch and link insn will clobber the return pointer. If we happened
8631 to use the return pointer in the delay slot of the call, then we lose.
8633 get_attr_type will try to recognize the given insn, so make sure to
8634 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8635 in particular. */
8637 insn_refs_are_delayed (rtx insn)
8639 return ((GET_CODE (insn) == INSN
8640 && GET_CODE (PATTERN (insn)) != SEQUENCE
8641 && GET_CODE (PATTERN (insn)) != USE
8642 && GET_CODE (PATTERN (insn)) != CLOBBER
8643 && get_attr_type (insn) == TYPE_MILLI));
8646 /* On the HP-PA the value is found in register(s) 28(-29), unless
8647 the mode is SF or DF. Then the value is returned in fr4 (32).
8649 This must perform the same promotions as PROMOTE_MODE, else
8650 PROMOTE_FUNCTION_RETURN will not work correctly.
8652 Small structures must be returned in a PARALLEL on PA64 in order
8653 to match the HP Compiler ABI. */
8656 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8658 enum machine_mode valmode;
8660 /* Aggregates with a size less than or equal to 128 bits are returned
8661 in GR 28(-29). They are left justified. The pad bits are undefined.
8662 Larger aggregates are returned in memory. */
8663 if (TARGET_64BIT && AGGREGATE_TYPE_P (valtype))
8665 rtx loc[2];
8666 int i, offset = 0;
8667 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8669 for (i = 0; i < ub; i++)
8671 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8672 gen_rtx_REG (DImode, 28 + i),
8673 GEN_INT (offset));
8674 offset += 8;
8677 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8680 if ((INTEGRAL_TYPE_P (valtype)
8681 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8682 || POINTER_TYPE_P (valtype))
8683 valmode = word_mode;
8684 else
8685 valmode = TYPE_MODE (valtype);
8687 if (TREE_CODE (valtype) == REAL_TYPE
8688 && TYPE_MODE (valtype) != TFmode
8689 && !TARGET_SOFT_FLOAT)
8690 return gen_rtx_REG (valmode, 32);
8692 return gen_rtx_REG (valmode, 28);
8695 /* Return the location of a parameter that is passed in a register or NULL
8696 if the parameter has any component that is passed in memory.
8698 This is new code and will be pushed to into the net sources after
8699 further testing.
8701 ??? We might want to restructure this so that it looks more like other
8702 ports. */
8704 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8705 int named ATTRIBUTE_UNUSED)
8707 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8708 int alignment = 0;
8709 int arg_size;
8710 int fpr_reg_base;
8711 int gpr_reg_base;
8712 rtx retval;
8714 if (mode == VOIDmode)
8715 return NULL_RTX;
8717 arg_size = FUNCTION_ARG_SIZE (mode, type);
8719 /* If this arg would be passed partially or totally on the stack, then
8720 this routine should return zero. FUNCTION_ARG_PARTIAL_NREGS will
8721 handle arguments which are split between regs and stack slots if
8722 the ABI mandates split arguments. */
8723 if (! TARGET_64BIT)
8725 /* The 32-bit ABI does not split arguments. */
8726 if (cum->words + arg_size > max_arg_words)
8727 return NULL_RTX;
8729 else
8731 if (arg_size > 1)
8732 alignment = cum->words & 1;
8733 if (cum->words + alignment >= max_arg_words)
8734 return NULL_RTX;
8737 /* The 32bit ABIs and the 64bit ABIs are rather different,
8738 particularly in their handling of FP registers. We might
8739 be able to cleverly share code between them, but I'm not
8740 going to bother in the hope that splitting them up results
8741 in code that is more easily understood. */
8743 if (TARGET_64BIT)
8745 /* Advance the base registers to their current locations.
8747 Remember, gprs grow towards smaller register numbers while
8748 fprs grow to higher register numbers. Also remember that
8749 although FP regs are 32-bit addressable, we pretend that
8750 the registers are 64-bits wide. */
8751 gpr_reg_base = 26 - cum->words;
8752 fpr_reg_base = 32 + cum->words;
8754 /* Arguments wider than one word and small aggregates need special
8755 treatment. */
8756 if (arg_size > 1
8757 || mode == BLKmode
8758 || (type && AGGREGATE_TYPE_P (type)))
8760 /* Double-extended precision (80-bit), quad-precision (128-bit)
8761 and aggregates including complex numbers are aligned on
8762 128-bit boundaries. The first eight 64-bit argument slots
8763 are associated one-to-one, with general registers r26
8764 through r19, and also with floating-point registers fr4
8765 through fr11. Arguments larger than one word are always
8766 passed in general registers.
8768 Using a PARALLEL with a word mode register results in left
8769 justified data on a big-endian target. */
8771 rtx loc[8];
8772 int i, offset = 0, ub = arg_size;
8774 /* Align the base register. */
8775 gpr_reg_base -= alignment;
8777 ub = MIN (ub, max_arg_words - cum->words - alignment);
8778 for (i = 0; i < ub; i++)
8780 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8781 gen_rtx_REG (DImode, gpr_reg_base),
8782 GEN_INT (offset));
8783 gpr_reg_base -= 1;
8784 offset += 8;
8787 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8790 else
8792 /* If the argument is larger than a word, then we know precisely
8793 which registers we must use. */
8794 if (arg_size > 1)
8796 if (cum->words)
8798 gpr_reg_base = 23;
8799 fpr_reg_base = 38;
8801 else
8803 gpr_reg_base = 25;
8804 fpr_reg_base = 34;
8807 /* Structures 5 to 8 bytes in size are passed in the general
8808 registers in the same manner as other non floating-point
8809 objects. The data is right-justified and zero-extended
8810 to 64 bits.
8812 This is magic. Normally, using a PARALLEL results in left
8813 justified data on a big-endian target. However, using a
8814 single double-word register provides the required right
8815 justification for 5 to 8 byte structures. This has nothing
8816 to do with the direction of padding specified for the argument.
8817 It has to do with how the data is widened and shifted into
8818 and from the register.
8820 Aside from adding load_multiple and store_multiple patterns,
8821 this is the only way that I have found to obtain right
8822 justification of BLKmode data when it has a size greater
8823 than one word. Splitting the operation into two SImode loads
8824 or returning a DImode REG results in left justified data. */
8825 if (mode == BLKmode)
8827 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8828 gen_rtx_REG (DImode, gpr_reg_base),
8829 const0_rtx);
8830 return gen_rtx_PARALLEL (mode, gen_rtvec (1, loc));
8833 else
8835 /* We have a single word (32 bits). A simple computation
8836 will get us the register #s we need. */
8837 gpr_reg_base = 26 - cum->words;
8838 fpr_reg_base = 32 + 2 * cum->words;
8842 /* Determine if the argument needs to be passed in both general and
8843 floating point registers. */
8844 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8845 /* If we are doing soft-float with portable runtime, then there
8846 is no need to worry about FP regs. */
8847 && !TARGET_SOFT_FLOAT
8848 /* The parameter must be some kind of float, else we can just
8849 pass it in integer registers. */
8850 && FLOAT_MODE_P (mode)
8851 /* The target function must not have a prototype. */
8852 && cum->nargs_prototype <= 0
8853 /* libcalls do not need to pass items in both FP and general
8854 registers. */
8855 && type != NULL_TREE
8856 /* All this hair applies to "outgoing" args only. This includes
8857 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8858 && !cum->incoming)
8859 /* Also pass outgoing floating arguments in both registers in indirect
8860 calls with the 32 bit ABI and the HP assembler since there is no
8861 way to the specify argument locations in static functions. */
8862 || (!TARGET_64BIT
8863 && !TARGET_GAS
8864 && !cum->incoming
8865 && cum->indirect
8866 && FLOAT_MODE_P (mode)))
8868 retval
8869 = gen_rtx_PARALLEL
8870 (mode,
8871 gen_rtvec (2,
8872 gen_rtx_EXPR_LIST (VOIDmode,
8873 gen_rtx_REG (mode, fpr_reg_base),
8874 const0_rtx),
8875 gen_rtx_EXPR_LIST (VOIDmode,
8876 gen_rtx_REG (mode, gpr_reg_base),
8877 const0_rtx)));
8879 else
8881 /* See if we should pass this parameter in a general register. */
8882 if (TARGET_SOFT_FLOAT
8883 /* Indirect calls in the normal 32bit ABI require all arguments
8884 to be passed in general registers. */
8885 || (!TARGET_PORTABLE_RUNTIME
8886 && !TARGET_64BIT
8887 && !TARGET_ELF32
8888 && cum->indirect)
8889 /* If the parameter is not a floating point parameter, then
8890 it belongs in GPRs. */
8891 || !FLOAT_MODE_P (mode))
8892 retval = gen_rtx_REG (mode, gpr_reg_base);
8893 else
8894 retval = gen_rtx_REG (mode, fpr_reg_base);
8896 return retval;
8900 /* If this arg would be passed totally in registers or totally on the stack,
8901 then this routine should return zero. It is currently called only for
8902 the 64-bit target. */
8904 function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8905 tree type, int named ATTRIBUTE_UNUSED)
8907 unsigned int max_arg_words = 8;
8908 unsigned int offset = 0;
8910 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
8911 offset = 1;
8913 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
8914 /* Arg fits fully into registers. */
8915 return 0;
8916 else if (cum->words + offset >= max_arg_words)
8917 /* Arg fully on the stack. */
8918 return 0;
8919 else
8920 /* Arg is split. */
8921 return max_arg_words - cum->words - offset;
8925 /* Return 1 if this is a comparison operator. This allows the use of
8926 MATCH_OPERATOR to recognize all the branch insns. */
8929 cmpib_comparison_operator (rtx op, enum machine_mode mode)
8931 return ((mode == VOIDmode || GET_MODE (op) == mode)
8932 && (GET_CODE (op) == EQ
8933 || GET_CODE (op) == NE
8934 || GET_CODE (op) == GT
8935 || GET_CODE (op) == GTU
8936 || GET_CODE (op) == GE
8937 || GET_CODE (op) == LT
8938 || GET_CODE (op) == LE
8939 || GET_CODE (op) == LEU));
8942 /* On hpux10, the linker will give an error if we have a reference
8943 in the read-only data section to a symbol defined in a shared
8944 library. Therefore, expressions that might require a reloc can
8945 not be placed in the read-only data section. */
8947 static void
8948 pa_select_section (tree exp, int reloc,
8949 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
8951 if (TREE_CODE (exp) == VAR_DECL
8952 && TREE_READONLY (exp)
8953 && !TREE_THIS_VOLATILE (exp)
8954 && DECL_INITIAL (exp)
8955 && (DECL_INITIAL (exp) == error_mark_node
8956 || TREE_CONSTANT (DECL_INITIAL (exp)))
8957 && !reloc)
8958 readonly_data_section ();
8959 else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
8960 && !(TREE_CODE (exp) == STRING_CST && flag_writable_strings)
8961 && !reloc)
8962 readonly_data_section ();
8963 else
8964 data_section ();
8967 static void
8968 pa_globalize_label (FILE *stream, const char *name)
8970 /* We only handle DATA objects here, functions are globalized in
8971 ASM_DECLARE_FUNCTION_NAME. */
8972 if (! FUNCTION_NAME_P (name))
8974 fputs ("\t.EXPORT ", stream);
8975 assemble_name (stream, name);
8976 fputs (",DATA\n", stream);
8979 #include "gt-pa.h"