1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-attr.h"
45 #include "integrate.h"
48 #include "target-def.h"
51 extern int rtx_equal_function_value_matters
;
53 /* Specify which cpu to schedule for. */
55 enum processor_type alpha_cpu
;
56 static const char * const alpha_cpu_name
[] =
61 /* Specify how accurate floating-point traps need to be. */
63 enum alpha_trap_precision alpha_tp
;
65 /* Specify the floating-point rounding mode. */
67 enum alpha_fp_rounding_mode alpha_fprm
;
69 /* Specify which things cause traps. */
71 enum alpha_fp_trap_mode alpha_fptm
;
73 /* Strings decoded into the above options. */
75 const char *alpha_cpu_string
; /* -mcpu= */
76 const char *alpha_tune_string
; /* -mtune= */
77 const char *alpha_tp_string
; /* -mtrap-precision=[p|s|i] */
78 const char *alpha_fprm_string
; /* -mfp-rounding-mode=[n|m|c|d] */
79 const char *alpha_fptm_string
; /* -mfp-trap-mode=[n|u|su|sui] */
80 const char *alpha_mlat_string
; /* -mmemory-latency= */
82 /* Save information from a "cmpxx" operation until the branch or scc is
85 struct alpha_compare alpha_compare
;
87 /* Non-zero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function
= FALSE
;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency
= 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp
;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static int alpha_sr_alias_set
;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname
;
108 /* The next explicit relocation sequence number. */
109 int alpha_next_sequence_number
= 1;
111 /* The literal and gpdisp sequence numbers for this insn, as printed
112 by %# and %* respectively. */
113 int alpha_this_literal_sequence_number
;
114 int alpha_this_gpdisp_sequence_number
;
116 /* Declarations of static functions. */
117 static bool decl_in_text_section
119 static bool local_symbol_p
121 static void alpha_set_memflags_1
122 PARAMS ((rtx
, int, int, int));
123 static rtx alpha_emit_set_const_1
124 PARAMS ((rtx
, enum machine_mode
, HOST_WIDE_INT
, int));
125 static void alpha_expand_unaligned_load_words
126 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
127 static void alpha_expand_unaligned_store_words
128 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
129 static void alpha_sa_mask
130 PARAMS ((unsigned long *imaskP
, unsigned long *fmaskP
));
131 static int find_lo_sum
132 PARAMS ((rtx
*, void *));
133 static int alpha_does_function_need_gp
135 static int alpha_ra_ever_killed
137 static const char *get_trap_mode_suffix
139 static const char *get_round_mode_suffix
141 static rtx set_frame_related_p
143 static const char *alpha_lookup_xfloating_lib_func
144 PARAMS ((enum rtx_code
));
145 static int alpha_compute_xfloating_mode_arg
146 PARAMS ((enum rtx_code
, enum alpha_fp_rounding_mode
));
147 static void alpha_emit_xfloating_libcall
148 PARAMS ((const char *, rtx
, rtx
[], int, rtx
));
149 static rtx alpha_emit_xfloating_compare
150 PARAMS ((enum rtx_code
, rtx
, rtx
));
151 static void alpha_output_function_end_prologue
153 static int alpha_adjust_cost
154 PARAMS ((rtx
, rtx
, rtx
, int));
155 static int alpha_issue_rate
157 static int alpha_variable_issue
158 PARAMS ((FILE *, int, rtx
, int));
160 #if TARGET_ABI_UNICOSMK
161 static void alpha_init_machine_status
162 PARAMS ((struct function
*p
));
163 static void alpha_mark_machine_status
164 PARAMS ((struct function
*p
));
165 static void alpha_free_machine_status
166 PARAMS ((struct function
*p
));
169 static void unicosmk_output_deferred_case_vectors
PARAMS ((FILE *));
170 static void unicosmk_gen_dsib
PARAMS ((unsigned long *imaskP
));
171 static void unicosmk_output_ssib
PARAMS ((FILE *, const char *));
172 static int unicosmk_need_dex
PARAMS ((rtx
));
174 /* Get the number of args of a function in one of two ways. */
175 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
176 #define NUM_ARGS current_function_args_info.num_args
178 #define NUM_ARGS current_function_args_info
184 /* Initialize the GCC target structure. */
185 #if TARGET_ABI_OPEN_VMS
186 const struct attribute_spec vms_attribute_table
[];
187 static unsigned int vms_section_type_flags
PARAMS ((tree
, const char *, int));
188 static void vms_asm_named_section
PARAMS ((const char *, unsigned int));
189 static void vms_asm_out_constructor
PARAMS ((rtx
, int));
190 static void vms_asm_out_destructor
PARAMS ((rtx
, int));
191 # undef TARGET_ATTRIBUTE_TABLE
192 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
193 # undef TARGET_SECTION_TYPE_FLAGS
194 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
197 #if TARGET_ABI_UNICOSMK
198 static void unicosmk_asm_named_section
PARAMS ((const char *, unsigned int));
199 static void unicosmk_insert_attributes
PARAMS ((tree
, tree
*));
200 static unsigned int unicosmk_section_type_flags
PARAMS ((tree
, const char *,
202 # undef TARGET_INSERT_ATTRIBUTES
203 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
204 # undef TARGET_SECTION_TYPE_FLAGS
205 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
208 #undef TARGET_ASM_ALIGNED_HI_OP
209 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
210 #undef TARGET_ASM_ALIGNED_DI_OP
211 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
213 /* Default unaligned ops are provided for ELF systems. To get unaligned
214 data for non-ELF systems, we have to turn off auto alignment. */
215 #ifndef OBJECT_FORMAT_ELF
216 #undef TARGET_ASM_UNALIGNED_HI_OP
217 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
218 #undef TARGET_ASM_UNALIGNED_SI_OP
219 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
220 #undef TARGET_ASM_UNALIGNED_DI_OP
221 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
224 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
225 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
227 #undef TARGET_SCHED_ADJUST_COST
228 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
229 #undef TARGET_SCHED_ISSUE_RATE
230 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
231 #undef TARGET_SCHED_VARIABLE_ISSUE
232 #define TARGET_SCHED_VARIABLE_ISSUE alpha_variable_issue
234 struct gcc_target targetm
= TARGET_INITIALIZER
;
236 /* Parse target option strings. */
242 static const struct cpu_table
{
243 const char *const name
;
244 const enum processor_type processor
;
247 #define EV5_MASK (MASK_CPU_EV5)
248 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
249 { "ev4", PROCESSOR_EV4
, 0 },
250 { "ev45", PROCESSOR_EV4
, 0 },
251 { "21064", PROCESSOR_EV4
, 0 },
252 { "ev5", PROCESSOR_EV5
, EV5_MASK
},
253 { "21164", PROCESSOR_EV5
, EV5_MASK
},
254 { "ev56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
255 { "21164a", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
256 { "pca56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
257 { "21164PC",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
258 { "21164pc",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
259 { "ev6", PROCESSOR_EV6
, EV6_MASK
},
260 { "21264", PROCESSOR_EV6
, EV6_MASK
},
261 { "ev67", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
262 { "21264a", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
266 /* Unicos/Mk doesn't have shared libraries. */
267 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
269 warning ("-f%s ignored for Unicos/Mk (not supported)",
270 (flag_pic
> 1) ? "PIC" : "pic");
274 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
275 floating-point instructions. Make that the default for this target. */
276 if (TARGET_ABI_UNICOSMK
)
277 alpha_fprm
= ALPHA_FPRM_DYN
;
279 alpha_fprm
= ALPHA_FPRM_NORM
;
281 alpha_tp
= ALPHA_TP_PROG
;
282 alpha_fptm
= ALPHA_FPTM_N
;
284 /* We cannot use su and sui qualifiers for conversion instructions on
285 Unicos/Mk. I'm not sure if this is due to assembler or hardware
286 limitations. Right now, we issue a warning if -mieee is specified
287 and then ignore it; eventually, we should either get it right or
288 disable the option altogether. */
292 if (TARGET_ABI_UNICOSMK
)
293 warning ("-mieee not supported on Unicos/Mk");
296 alpha_tp
= ALPHA_TP_INSN
;
297 alpha_fptm
= ALPHA_FPTM_SU
;
301 if (TARGET_IEEE_WITH_INEXACT
)
303 if (TARGET_ABI_UNICOSMK
)
304 warning ("-mieee-with-inexact not supported on Unicos/Mk");
307 alpha_tp
= ALPHA_TP_INSN
;
308 alpha_fptm
= ALPHA_FPTM_SUI
;
314 if (! strcmp (alpha_tp_string
, "p"))
315 alpha_tp
= ALPHA_TP_PROG
;
316 else if (! strcmp (alpha_tp_string
, "f"))
317 alpha_tp
= ALPHA_TP_FUNC
;
318 else if (! strcmp (alpha_tp_string
, "i"))
319 alpha_tp
= ALPHA_TP_INSN
;
321 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string
);
324 if (alpha_fprm_string
)
326 if (! strcmp (alpha_fprm_string
, "n"))
327 alpha_fprm
= ALPHA_FPRM_NORM
;
328 else if (! strcmp (alpha_fprm_string
, "m"))
329 alpha_fprm
= ALPHA_FPRM_MINF
;
330 else if (! strcmp (alpha_fprm_string
, "c"))
331 alpha_fprm
= ALPHA_FPRM_CHOP
;
332 else if (! strcmp (alpha_fprm_string
,"d"))
333 alpha_fprm
= ALPHA_FPRM_DYN
;
335 error ("bad value `%s' for -mfp-rounding-mode switch",
339 if (alpha_fptm_string
)
341 if (strcmp (alpha_fptm_string
, "n") == 0)
342 alpha_fptm
= ALPHA_FPTM_N
;
343 else if (strcmp (alpha_fptm_string
, "u") == 0)
344 alpha_fptm
= ALPHA_FPTM_U
;
345 else if (strcmp (alpha_fptm_string
, "su") == 0)
346 alpha_fptm
= ALPHA_FPTM_SU
;
347 else if (strcmp (alpha_fptm_string
, "sui") == 0)
348 alpha_fptm
= ALPHA_FPTM_SUI
;
350 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string
);
354 = TARGET_CPU_DEFAULT
& MASK_CPU_EV6
? PROCESSOR_EV6
355 : (TARGET_CPU_DEFAULT
& MASK_CPU_EV5
? PROCESSOR_EV5
: PROCESSOR_EV4
);
357 if (alpha_cpu_string
)
359 for (i
= 0; cpu_table
[i
].name
; i
++)
360 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
362 alpha_cpu
= cpu_table
[i
].processor
;
363 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
364 | MASK_CPU_EV5
| MASK_CPU_EV6
);
365 target_flags
|= cpu_table
[i
].flags
;
368 if (! cpu_table
[i
].name
)
369 error ("bad value `%s' for -mcpu switch", alpha_cpu_string
);
372 if (alpha_tune_string
)
374 for (i
= 0; cpu_table
[i
].name
; i
++)
375 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
377 alpha_cpu
= cpu_table
[i
].processor
;
380 if (! cpu_table
[i
].name
)
381 error ("bad value `%s' for -mcpu switch", alpha_tune_string
);
384 /* Do some sanity checks on the above options. */
386 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
388 warning ("trap mode not supported on Unicos/Mk");
389 alpha_fptm
= ALPHA_FPTM_N
;
392 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
393 && alpha_tp
!= ALPHA_TP_INSN
&& ! TARGET_CPU_EV6
)
395 warning ("fp software completion requires -mtrap-precision=i");
396 alpha_tp
= ALPHA_TP_INSN
;
401 /* Except for EV6 pass 1 (not released), we always have precise
402 arithmetic traps. Which means we can do software completion
403 without minding trap shadows. */
404 alpha_tp
= ALPHA_TP_PROG
;
407 if (TARGET_FLOAT_VAX
)
409 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
411 warning ("rounding mode not supported for VAX floats");
412 alpha_fprm
= ALPHA_FPRM_NORM
;
414 if (alpha_fptm
== ALPHA_FPTM_SUI
)
416 warning ("trap mode not supported for VAX floats");
417 alpha_fptm
= ALPHA_FPTM_SU
;
425 if (!alpha_mlat_string
)
426 alpha_mlat_string
= "L1";
428 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
429 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
431 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
432 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
433 && alpha_mlat_string
[2] == '\0')
435 static int const cache_latency
[][4] =
437 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
438 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
439 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
442 lat
= alpha_mlat_string
[1] - '0';
443 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_cpu
][lat
-1] == -1)
445 warning ("L%d cache latency unknown for %s",
446 lat
, alpha_cpu_name
[alpha_cpu
]);
450 lat
= cache_latency
[alpha_cpu
][lat
-1];
452 else if (! strcmp (alpha_mlat_string
, "main"))
454 /* Most current memories have about 370ns latency. This is
455 a reasonable guess for a fast cpu. */
460 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string
);
464 alpha_memory_latency
= lat
;
467 /* Default the definition of "small data" to 8 bytes. */
471 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
473 target_flags
|= MASK_SMALL_DATA
;
474 else if (flag_pic
== 2)
475 target_flags
&= ~MASK_SMALL_DATA
;
477 /* Align labels and loops for optimal branching. */
478 /* ??? Kludge these by not doing anything if we don't optimize and also if
479 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
480 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
482 if (align_loops
<= 0)
484 if (align_jumps
<= 0)
487 if (align_functions
<= 0)
488 align_functions
= 16;
490 /* Acquire a unique set number for our register saves and restores. */
491 alpha_sr_alias_set
= new_alias_set ();
493 /* Register variables and functions with the garbage collector. */
495 #if TARGET_ABI_UNICOSMK
496 /* Set up function hooks. */
497 init_machine_status
= alpha_init_machine_status
;
498 mark_machine_status
= alpha_mark_machine_status
;
499 free_machine_status
= alpha_free_machine_status
;
503 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
511 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
513 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
519 /* Returns 1 if OP is either the constant zero or a register. If a
520 register, it must be in the proper mode unless MODE is VOIDmode. */
523 reg_or_0_operand (op
, mode
)
525 enum machine_mode mode
;
527 return op
== const0_rtx
|| register_operand (op
, mode
);
530 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
534 reg_or_6bit_operand (op
, mode
)
536 enum machine_mode mode
;
538 return ((GET_CODE (op
) == CONST_INT
539 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64)
540 || register_operand (op
, mode
));
544 /* Return 1 if OP is an 8-bit constant or any register. */
547 reg_or_8bit_operand (op
, mode
)
549 enum machine_mode mode
;
551 return ((GET_CODE (op
) == CONST_INT
552 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100)
553 || register_operand (op
, mode
));
556 /* Return 1 if OP is an 8-bit constant. */
559 cint8_operand (op
, mode
)
561 enum machine_mode mode ATTRIBUTE_UNUSED
;
563 return ((GET_CODE (op
) == CONST_INT
564 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100));
567 /* Return 1 if the operand is a valid second operand to an add insn. */
570 add_operand (op
, mode
)
572 enum machine_mode mode
;
574 if (GET_CODE (op
) == CONST_INT
)
575 /* Constraints I, J, O and P are covered by K. */
576 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'K')
577 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'L'));
579 return register_operand (op
, mode
);
582 /* Return 1 if the operand is a valid second operand to a sign-extending
586 sext_add_operand (op
, mode
)
588 enum machine_mode mode
;
590 if (GET_CODE (op
) == CONST_INT
)
591 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'I')
592 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'O'));
594 return reg_not_elim_operand (op
, mode
);
597 /* Return 1 if OP is the constant 4 or 8. */
600 const48_operand (op
, mode
)
602 enum machine_mode mode ATTRIBUTE_UNUSED
;
604 return (GET_CODE (op
) == CONST_INT
605 && (INTVAL (op
) == 4 || INTVAL (op
) == 8));
608 /* Return 1 if OP is a valid first operand to an AND insn. */
611 and_operand (op
, mode
)
613 enum machine_mode mode
;
615 if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == VOIDmode
)
616 return (zap_mask (CONST_DOUBLE_LOW (op
))
617 && zap_mask (CONST_DOUBLE_HIGH (op
)));
619 if (GET_CODE (op
) == CONST_INT
)
620 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
621 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100
622 || zap_mask (INTVAL (op
)));
624 return register_operand (op
, mode
);
627 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
630 or_operand (op
, mode
)
632 enum machine_mode mode
;
634 if (GET_CODE (op
) == CONST_INT
)
635 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
636 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100);
638 return register_operand (op
, mode
);
641 /* Return 1 if OP is a constant that is the width, in bits, of an integral
642 mode smaller than DImode. */
645 mode_width_operand (op
, mode
)
647 enum machine_mode mode ATTRIBUTE_UNUSED
;
649 return (GET_CODE (op
) == CONST_INT
650 && (INTVAL (op
) == 8 || INTVAL (op
) == 16
651 || INTVAL (op
) == 32 || INTVAL (op
) == 64));
654 /* Return 1 if OP is a constant that is the width of an integral machine mode
655 smaller than an integer. */
658 mode_mask_operand (op
, mode
)
660 enum machine_mode mode ATTRIBUTE_UNUSED
;
662 #if HOST_BITS_PER_WIDE_INT == 32
663 if (GET_CODE (op
) == CONST_DOUBLE
)
664 return (CONST_DOUBLE_LOW (op
) == -1
665 && (CONST_DOUBLE_HIGH (op
) == -1
666 || CONST_DOUBLE_HIGH (op
) == 0));
668 if (GET_CODE (op
) == CONST_DOUBLE
)
669 return (CONST_DOUBLE_LOW (op
) == -1 && CONST_DOUBLE_HIGH (op
) == 0);
672 return (GET_CODE (op
) == CONST_INT
673 && (INTVAL (op
) == 0xff
674 || INTVAL (op
) == 0xffff
675 || INTVAL (op
) == (HOST_WIDE_INT
)0xffffffff
676 #if HOST_BITS_PER_WIDE_INT == 64
682 /* Return 1 if OP is a multiple of 8 less than 64. */
685 mul8_operand (op
, mode
)
687 enum machine_mode mode ATTRIBUTE_UNUSED
;
689 return (GET_CODE (op
) == CONST_INT
690 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64
691 && (INTVAL (op
) & 7) == 0);
694 /* Return 1 if OP is the constant zero in floating-point. */
697 fp0_operand (op
, mode
)
699 enum machine_mode mode
;
701 return (GET_MODE (op
) == mode
702 && GET_MODE_CLASS (mode
) == MODE_FLOAT
&& op
== CONST0_RTX (mode
));
705 /* Return 1 if OP is the floating-point constant zero or a register. */
708 reg_or_fp0_operand (op
, mode
)
710 enum machine_mode mode
;
712 return fp0_operand (op
, mode
) || register_operand (op
, mode
);
715 /* Return 1 if OP is a hard floating-point register. */
718 hard_fp_register_operand (op
, mode
)
720 enum machine_mode mode
;
722 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
725 if (GET_CODE (op
) == SUBREG
)
726 op
= SUBREG_REG (op
);
727 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == FLOAT_REGS
;
730 /* Return 1 if OP is a hard general register. */
733 hard_int_register_operand (op
, mode
)
735 enum machine_mode mode
;
737 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
740 if (GET_CODE (op
) == SUBREG
)
741 op
= SUBREG_REG (op
);
742 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == GENERAL_REGS
;
745 /* Return 1 if OP is a register or a constant integer. */
749 reg_or_cint_operand (op
, mode
)
751 enum machine_mode mode
;
753 return (GET_CODE (op
) == CONST_INT
754 || register_operand (op
, mode
));
757 /* Return 1 if OP is something that can be reloaded into a register;
758 if it is a MEM, it need not be valid. */
761 some_operand (op
, mode
)
763 enum machine_mode mode
;
765 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
768 switch (GET_CODE (op
))
770 case REG
: case MEM
: case CONST_DOUBLE
: case CONST_INT
: case LABEL_REF
:
771 case SYMBOL_REF
: case CONST
:
775 return some_operand (SUBREG_REG (op
), VOIDmode
);
784 /* Likewise, but don't accept constants. */
787 some_ni_operand (op
, mode
)
789 enum machine_mode mode
;
791 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
794 if (GET_CODE (op
) == SUBREG
)
795 op
= SUBREG_REG (op
);
797 return (GET_CODE (op
) == REG
|| GET_CODE (op
) == MEM
);
800 /* Return 1 if OP is a valid operand for the source of a move insn. */
803 input_operand (op
, mode
)
805 enum machine_mode mode
;
807 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
810 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GET_MODE (op
) != mode
)
813 switch (GET_CODE (op
))
818 if (TARGET_EXPLICIT_RELOCS
)
821 /* This handles both the Windows/NT and OSF cases. */
822 return mode
== ptr_mode
|| mode
== DImode
;
829 if (register_operand (op
, mode
))
831 /* ... fall through ... */
833 return ((TARGET_BWX
|| (mode
!= HImode
&& mode
!= QImode
))
834 && general_operand (op
, mode
));
837 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& op
== CONST0_RTX (mode
);
840 return mode
== QImode
|| mode
== HImode
|| add_operand (op
, mode
);
852 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
853 file, and in the same section as the current function. */
856 current_file_function_operand (op
, mode
)
858 enum machine_mode mode ATTRIBUTE_UNUSED
;
860 if (GET_CODE (op
) != SYMBOL_REF
)
863 /* Easy test for recursion. */
864 if (op
== XEXP (DECL_RTL (current_function_decl
), 0))
867 /* Otherwise, we need the DECL for the SYMBOL_REF, which we can't get.
868 So SYMBOL_REF_FLAG has been declared to imply that the function is
869 in the default text section. So we must also check that the current
870 function is also in the text section. */
871 if (SYMBOL_REF_FLAG (op
) && decl_in_text_section (current_function_decl
))
877 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
880 direct_call_operand (op
, mode
)
882 enum machine_mode mode
;
884 /* Must be defined in this file. */
885 if (! current_file_function_operand (op
, mode
))
888 /* If profiling is implemented via linker tricks, we can't jump
889 to the nogp alternate entry point. */
890 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
891 but is approximately correct for the OSF ABIs. Don't know
892 what to do for VMS, NT, or UMK. */
893 if (! TARGET_PROFILING_NEEDS_GP
900 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
901 a variable known to be defined in this file. */
907 const char *str
= XSTR (op
, 0);
909 /* ??? SYMBOL_REF_FLAG is set for local function symbols, but we
910 run into problems with the rtl inliner in that the symbol was
911 once external, but is local after inlining, which results in
912 unrecognizable insns. */
914 return (CONSTANT_POOL_ADDRESS_P (op
)
915 /* If @, then ENCODE_SECTION_INFO sez it's local. */
917 /* If *$, then ASM_GENERATE_INTERNAL_LABEL sez it's local. */
918 || (str
[0] == '*' && str
[1] == '$'));
922 local_symbolic_operand (op
, mode
)
924 enum machine_mode mode
;
926 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
929 if (GET_CODE (op
) == LABEL_REF
)
932 if (GET_CODE (op
) == CONST
933 && GET_CODE (XEXP (op
, 0)) == PLUS
934 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
935 op
= XEXP (XEXP (op
, 0), 0);
937 if (GET_CODE (op
) != SYMBOL_REF
)
940 return local_symbol_p (op
);
943 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
944 known to be defined in this file in the small data area. */
947 small_symbolic_operand (op
, mode
)
949 enum machine_mode mode ATTRIBUTE_UNUSED
;
953 if (! TARGET_SMALL_DATA
)
956 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
959 if (GET_CODE (op
) == CONST
960 && GET_CODE (XEXP (op
, 0)) == PLUS
961 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
962 op
= XEXP (XEXP (op
, 0), 0);
964 if (GET_CODE (op
) != SYMBOL_REF
)
967 if (CONSTANT_POOL_ADDRESS_P (op
))
968 return GET_MODE_SIZE (get_pool_mode (op
)) <= (unsigned) g_switch_value
;
972 return str
[0] == '@' && str
[1] == 's';
976 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
977 not known (or known not) to be defined in this file. */
980 global_symbolic_operand (op
, mode
)
982 enum machine_mode mode
;
984 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
987 if (GET_CODE (op
) == CONST
988 && GET_CODE (XEXP (op
, 0)) == PLUS
989 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
990 op
= XEXP (XEXP (op
, 0), 0);
992 if (GET_CODE (op
) != SYMBOL_REF
)
995 return ! local_symbol_p (op
);
998 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
1001 call_operand (op
, mode
)
1003 enum machine_mode mode
;
1008 if (GET_CODE (op
) == REG
)
1011 return REGNO (op
) == 27;
1015 if (TARGET_ABI_UNICOSMK
)
1017 if (GET_CODE (op
) == SYMBOL_REF
)
1023 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1024 possibly with an offset. */
1027 symbolic_operand (op
, mode
)
1029 enum machine_mode mode
;
1031 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1033 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
1035 if (GET_CODE (op
) == CONST
1036 && GET_CODE (XEXP (op
,0)) == PLUS
1037 && GET_CODE (XEXP (XEXP (op
,0), 0)) == SYMBOL_REF
1038 && GET_CODE (XEXP (XEXP (op
,0), 1)) == CONST_INT
)
1043 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1044 comparisons are valid in which insn. */
1047 alpha_comparison_operator (op
, mode
)
1049 enum machine_mode mode
;
1051 enum rtx_code code
= GET_CODE (op
);
1053 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1056 return (code
== EQ
|| code
== LE
|| code
== LT
1057 || code
== LEU
|| code
== LTU
);
1060 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1061 Here we know which comparisons are valid in which insn. */
1064 alpha_zero_comparison_operator (op
, mode
)
1066 enum machine_mode mode
;
1068 enum rtx_code code
= GET_CODE (op
);
1070 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1073 return (code
== EQ
|| code
== NE
|| code
== LE
|| code
== LT
1074 || code
== LEU
|| code
== LTU
);
1077 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1080 alpha_swapped_comparison_operator (op
, mode
)
1082 enum machine_mode mode
;
1084 enum rtx_code code
= GET_CODE (op
);
1086 if ((mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1087 || GET_RTX_CLASS (code
) != '<')
1090 code
= swap_condition (code
);
1091 return (code
== EQ
|| code
== LE
|| code
== LT
1092 || code
== LEU
|| code
== LTU
);
1095 /* Return 1 if OP is a signed comparison operation. */
1098 signed_comparison_operator (op
, mode
)
1100 enum machine_mode mode ATTRIBUTE_UNUSED
;
1102 enum rtx_code code
= GET_CODE (op
);
1104 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1107 return (code
== EQ
|| code
== NE
1108 || code
== LE
|| code
== LT
1109 || code
== GE
|| code
== GT
);
1112 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1113 Here we know which comparisons are valid in which insn. */
1116 alpha_fp_comparison_operator (op
, mode
)
1118 enum machine_mode mode
;
1120 enum rtx_code code
= GET_CODE (op
);
1122 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1125 return (code
== EQ
|| code
== LE
|| code
== LT
|| code
== UNORDERED
);
1128 /* Return 1 if this is a divide or modulus operator. */
1131 divmod_operator (op
, mode
)
1133 enum machine_mode mode ATTRIBUTE_UNUSED
;
1135 switch (GET_CODE (op
))
1137 case DIV
: case MOD
: case UDIV
: case UMOD
:
1147 /* Return 1 if this memory address is a known aligned register plus
1148 a constant. It must be a valid address. This means that we can do
1149 this as an aligned reference plus some offset.
1151 Take into account what reload will do. */
1154 aligned_memory_operand (op
, mode
)
1156 enum machine_mode mode
;
1160 if (reload_in_progress
)
1163 if (GET_CODE (tmp
) == SUBREG
)
1164 tmp
= SUBREG_REG (tmp
);
1165 if (GET_CODE (tmp
) == REG
1166 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1168 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1174 if (GET_CODE (op
) != MEM
1175 || GET_MODE (op
) != mode
)
1179 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1180 sorts of constructs. Dig for the real base register. */
1181 if (reload_in_progress
1182 && GET_CODE (op
) == PLUS
1183 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1184 base
= XEXP (XEXP (op
, 0), 0);
1187 if (! memory_address_p (mode
, op
))
1189 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1192 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) >= 32);
1195 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1198 unaligned_memory_operand (op
, mode
)
1200 enum machine_mode mode
;
1204 if (reload_in_progress
)
1207 if (GET_CODE (tmp
) == SUBREG
)
1208 tmp
= SUBREG_REG (tmp
);
1209 if (GET_CODE (tmp
) == REG
1210 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1212 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1218 if (GET_CODE (op
) != MEM
1219 || GET_MODE (op
) != mode
)
1223 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1224 sorts of constructs. Dig for the real base register. */
1225 if (reload_in_progress
1226 && GET_CODE (op
) == PLUS
1227 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1228 base
= XEXP (XEXP (op
, 0), 0);
1231 if (! memory_address_p (mode
, op
))
1233 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1236 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) < 32);
1239 /* Return 1 if OP is either a register or an unaligned memory location. */
1242 reg_or_unaligned_mem_operand (op
, mode
)
1244 enum machine_mode mode
;
1246 return register_operand (op
, mode
) || unaligned_memory_operand (op
, mode
);
1249 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1252 any_memory_operand (op
, mode
)
1254 enum machine_mode mode ATTRIBUTE_UNUSED
;
1256 return (GET_CODE (op
) == MEM
1257 || (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == REG
)
1258 || (reload_in_progress
&& GET_CODE (op
) == REG
1259 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)
1260 || (reload_in_progress
&& GET_CODE (op
) == SUBREG
1261 && GET_CODE (SUBREG_REG (op
)) == REG
1262 && REGNO (SUBREG_REG (op
)) >= FIRST_PSEUDO_REGISTER
));
1265 /* Returns 1 if OP is not an eliminable register.
1267 This exists to cure a pathological abort in the s8addq (et al) patterns,
1269 long foo () { long t; bar(); return (long) &t * 26107; }
1271 which run afoul of a hack in reload to cure a (presumably) similar
1272 problem with lea-type instructions on other targets. But there is
1273 one of us and many of them, so work around the problem by selectively
1274 preventing combine from making the optimization. */
1277 reg_not_elim_operand (op
, mode
)
1279 enum machine_mode mode
;
1282 if (GET_CODE (op
) == SUBREG
)
1283 inner
= SUBREG_REG (op
);
1284 if (inner
== frame_pointer_rtx
|| inner
== arg_pointer_rtx
)
1287 return register_operand (op
, mode
);
1290 /* Return 1 is OP is a memory location that is not a reference (using
1291 an AND) to an unaligned location. Take into account what reload
1295 normal_memory_operand (op
, mode
)
1297 enum machine_mode mode ATTRIBUTE_UNUSED
;
1299 if (reload_in_progress
)
1302 if (GET_CODE (tmp
) == SUBREG
)
1303 tmp
= SUBREG_REG (tmp
);
1304 if (GET_CODE (tmp
) == REG
1305 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1307 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1309 /* This may not have been assigned an equivalent address if it will
1310 be eliminated. In that case, it doesn't matter what we do. */
1316 return GET_CODE (op
) == MEM
&& GET_CODE (XEXP (op
, 0)) != AND
;
1319 /* Accept a register, but not a subreg of any kind. This allows us to
1320 avoid pathological cases in reload wrt data movement common in
1321 int->fp conversion. */
1324 reg_no_subreg_operand (op
, mode
)
1326 enum machine_mode mode
;
1328 if (GET_CODE (op
) == SUBREG
)
1330 return register_operand (op
, mode
);
1333 /* Recognize an addition operation that includes a constant. Used to
1334 convince reload to canonize (plus (plus reg c1) c2) during register
1338 addition_operation (op
, mode
)
1340 enum machine_mode mode
;
1342 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1344 if (GET_CODE (op
) == PLUS
1345 && register_operand (XEXP (op
, 0), mode
)
1346 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1347 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op
, 1)), 'K'))
1352 /* Return 1 if this function can directly return via $26. */
1357 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
1359 && alpha_sa_size () == 0
1360 && get_frame_size () == 0
1361 && current_function_outgoing_args_size
== 0
1362 && current_function_pretend_args_size
== 0);
1365 /* Return the ADDR_VEC associated with a tablejump insn. */
1368 alpha_tablejump_addr_vec (insn
)
1373 tmp
= JUMP_LABEL (insn
);
1376 tmp
= NEXT_INSN (tmp
);
1379 if (GET_CODE (tmp
) == JUMP_INSN
1380 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
1381 return PATTERN (tmp
);
1385 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1388 alpha_tablejump_best_label (insn
)
1391 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
1392 rtx best_label
= NULL_RTX
;
1394 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1395 there for edge frequency counts from profile data. */
1399 int n_labels
= XVECLEN (jump_table
, 1);
1400 int best_count
= -1;
1403 for (i
= 0; i
< n_labels
; i
++)
1407 for (j
= i
+ 1; j
< n_labels
; j
++)
1408 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
1409 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
1412 if (count
> best_count
)
1413 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
1417 return best_label
? best_label
: const0_rtx
;
1420 /* Return true if the function DECL will be placed in the default text
1422 /* ??? Ideally we'd be able to always move from a SYMBOL_REF back to the
1423 decl, as that would allow us to determine if two functions are in the
1424 same section, which is what we really want to know. */
1427 decl_in_text_section (decl
)
1430 return (DECL_SECTION_NAME (decl
) == NULL_TREE
1431 && ! (flag_function_sections
1432 || (targetm
.have_named_sections
1433 && DECL_ONE_ONLY (decl
))));
1436 /* If we are referencing a function that is static, make the SYMBOL_REF
1437 special. We use this to see indicate we can branch to this function
1438 without setting PV or restoring GP.
1440 If this is a variable that is known to be defined locally, add "@v"
1441 to the name. If in addition the variable is to go in .sdata/.sbss,
1442 then add "@s" instead. */
1445 alpha_encode_section_info (decl
)
1448 const char *symbol_str
;
1449 bool is_local
, is_small
;
1451 if (TREE_CODE (decl
) == FUNCTION_DECL
)
1453 /* We mark public functions once they are emitted; otherwise we
1454 don't know that they exist in this unit of translation. */
1455 if (TREE_PUBLIC (decl
))
1457 /* Do not mark functions that are not in .text; otherwise we
1458 don't know that they are near enough for a direct branch. */
1459 if (! decl_in_text_section (decl
))
1462 SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl
), 0)) = 1;
1466 /* Early out if we're not going to do anything with this data. */
1467 if (! TARGET_EXPLICIT_RELOCS
)
1470 /* Careful not to prod global register variables. */
1471 if (TREE_CODE (decl
) != VAR_DECL
1472 || GET_CODE (DECL_RTL (decl
)) != MEM
1473 || GET_CODE (XEXP (DECL_RTL (decl
), 0)) != SYMBOL_REF
)
1476 symbol_str
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
1478 /* A variable is considered "local" if it is defined in this module. */
1480 if (DECL_EXTERNAL (decl
))
1482 /* Linkonce and weak data is never local. */
1483 else if (DECL_ONE_ONLY (decl
) || DECL_WEAK (decl
))
1485 else if (! TREE_PUBLIC (decl
))
1487 /* If PIC, then assume that any global name can be overridden by
1488 symbols resolved from other modules. */
1491 /* Uninitialized COMMON variable may be unified with symbols
1492 resolved from other modules. */
1493 else if (DECL_COMMON (decl
)
1494 && (DECL_INITIAL (decl
) == NULL
1495 || DECL_INITIAL (decl
) == error_mark_node
))
1497 /* Otherwise we're left with initialized (or non-common) global data
1498 which is of necessity defined locally. */
1502 /* Determine if DECL will wind up in .sdata/.sbss. */
1505 if (DECL_SECTION_NAME (decl
))
1507 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
1508 if (strcmp (section
, ".sdata") == 0
1509 || strcmp (section
, ".sbss") == 0)
1514 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
1516 /* If the variable has already been defined in the output file, then it
1517 is too late to put it in sdata if it wasn't put there in the first
1518 place. The test is here rather than above, because if it is already
1519 in sdata, then it can stay there. */
1521 if (TREE_ASM_WRITTEN (decl
))
1524 /* If this is an incomplete type with size 0, then we can't put it in
1525 sdata because it might be too big when completed. */
1526 else if (size
> 0 && size
<= g_switch_value
)
1530 /* Finally, encode this into the symbol string. */
1537 if (symbol_str
[0] == '@')
1539 if (symbol_str
[1] == (is_small
? 's' : 'v'))
1544 len
= strlen (symbol_str
) + 1;
1545 newstr
= alloca (len
+ 2);
1548 newstr
[1] = (is_small
? 's' : 'v');
1549 memcpy (newstr
+ 2, symbol_str
, len
);
1551 string
= ggc_alloc_string (newstr
, len
+ 2 - 1);
1552 XSTR (XEXP (DECL_RTL (decl
), 0), 0) = string
;
1554 else if (symbol_str
[0] == '@')
1558 /* legitimate_address_p recognizes an RTL expression that is a valid
1559 memory address for an instruction. The MODE argument is the
1560 machine mode for the MEM expression that wants to use this address.
1562 For Alpha, we have either a constant address or the sum of a
1563 register and a constant address, or just a register. For DImode,
1564 any of those forms can be surrounded with an AND that clear the
1565 low-order three bits; this is an "unaligned" access. */
1568 alpha_legitimate_address_p (mode
, x
, strict
)
1569 enum machine_mode mode
;
1573 /* If this is an ldq_u type address, discard the outer AND. */
1575 && GET_CODE (x
) == AND
1576 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1577 && INTVAL (XEXP (x
, 1)) == -8)
1580 /* Discard non-paradoxical subregs. */
1581 if (GET_CODE (x
) == SUBREG
1582 && (GET_MODE_SIZE (GET_MODE (x
))
1583 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1586 /* Unadorned general registers are valid. */
1589 ? STRICT_REG_OK_FOR_BASE_P (x
)
1590 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
1593 /* Constant addresses (i.e. +/- 32k) are valid. */
1594 if (CONSTANT_ADDRESS_P (x
))
1597 /* Register plus a small constant offset is valid. */
1598 if (GET_CODE (x
) == PLUS
)
1600 rtx ofs
= XEXP (x
, 1);
1603 /* Discard non-paradoxical subregs. */
1604 if (GET_CODE (x
) == SUBREG
1605 && (GET_MODE_SIZE (GET_MODE (x
))
1606 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1612 && NONSTRICT_REG_OK_FP_BASE_P (x
)
1613 && GET_CODE (ofs
) == CONST_INT
)
1616 ? STRICT_REG_OK_FOR_BASE_P (x
)
1617 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
1618 && CONSTANT_ADDRESS_P (ofs
))
1621 else if (GET_CODE (x
) == ADDRESSOF
1622 && GET_CODE (ofs
) == CONST_INT
)
1626 /* If we're managing explicit relocations, LO_SUM is valid. */
1627 else if (TARGET_EXPLICIT_RELOCS
&& GET_CODE (x
) == LO_SUM
)
1629 rtx ofs
= XEXP (x
, 1);
1632 /* Discard non-paradoxical subregs. */
1633 if (GET_CODE (x
) == SUBREG
1634 && (GET_MODE_SIZE (GET_MODE (x
))
1635 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1638 /* Must have a valid base register. */
1641 ? STRICT_REG_OK_FOR_BASE_P (x
)
1642 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
1645 /* The symbol must be local. */
1646 if (local_symbolic_operand (ofs
, Pmode
))
1653 /* Try machine-dependent ways of modifying an illegitimate address
1654 to be legitimate. If we find one, return the new, valid address. */
1657 alpha_legitimize_address (x
, oldx
, mode
)
1659 rtx oldx ATTRIBUTE_UNUSED
;
1660 enum machine_mode mode ATTRIBUTE_UNUSED
;
1662 HOST_WIDE_INT addend
;
1664 /* If the address is (plus reg const_int) and the CONST_INT is not a
1665 valid offset, compute the high part of the constant and add it to
1666 the register. Then our address is (plus temp low-part-const). */
1667 if (GET_CODE (x
) == PLUS
1668 && GET_CODE (XEXP (x
, 0)) == REG
1669 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1670 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
1672 addend
= INTVAL (XEXP (x
, 1));
1677 /* If the address is (const (plus FOO const_int)), find the low-order
1678 part of the CONST_INT. Then load FOO plus any high-order part of the
1679 CONST_INT into a register. Our address is (plus reg low-part-const).
1680 This is done to reduce the number of GOT entries. */
1681 if (GET_CODE (x
) == CONST
1682 && GET_CODE (XEXP (x
, 0)) == PLUS
1683 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
1685 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
1686 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
1690 /* If we have a (plus reg const), emit the load as in (2), then add
1691 the two registers, and finally generate (plus reg low-part-const) as
1693 if (GET_CODE (x
) == PLUS
1694 && GET_CODE (XEXP (x
, 0)) == REG
1695 && GET_CODE (XEXP (x
, 1)) == CONST
1696 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
1697 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
1699 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
1700 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
1701 XEXP (XEXP (XEXP (x
, 1), 0), 0),
1702 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1706 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1707 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
1710 if (local_symbolic_operand (x
, Pmode
))
1712 if (small_symbolic_operand (x
, Pmode
))
1713 scratch
= pic_offset_table_rtx
;
1718 scratch
= gen_reg_rtx (Pmode
);
1720 tmp
= gen_rtx_HIGH (Pmode
, x
);
1721 tmp
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, tmp
);
1722 insn
= emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp
));
1723 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, tmp
,
1727 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1731 scratch
= gen_reg_rtx (Pmode
);
1732 emit_insn (gen_movdi_er_high_g (scratch
, pic_offset_table_rtx
,
1734 /* ??? FIXME: Tag the use of scratch with a lituse. */
1743 HOST_WIDE_INT lowpart
= (addend
& 0xffff) - 2 * (addend
& 0x8000);
1744 HOST_WIDE_INT highpart
= addend
- lowpart
;
1745 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (highpart
),
1746 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1747 return plus_constant (x
, lowpart
);
1751 /* Try a machine-dependent way of reloading an illegitimate address
1752 operand. If we find one, push the reload and return the new rtx. */
1755 alpha_legitimize_reload_address (x
, mode
, opnum
, type
, ind_levels
)
1757 enum machine_mode mode ATTRIBUTE_UNUSED
;
1760 int ind_levels ATTRIBUTE_UNUSED
;
1762 /* We must recognize output that we have already generated ourselves. */
1763 if (GET_CODE (x
) == PLUS
1764 && GET_CODE (XEXP (x
, 0)) == PLUS
1765 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1766 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1767 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1769 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1770 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1775 /* We wish to handle large displacements off a base register by
1776 splitting the addend across an ldah and the mem insn. This
1777 cuts number of extra insns needed from 3 to 1. */
1778 if (GET_CODE (x
) == PLUS
1779 && GET_CODE (XEXP (x
, 0)) == REG
1780 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1781 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1782 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1784 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1785 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1787 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1789 /* Check for 32-bit overflow. */
1790 if (high
+ low
!= val
)
1793 /* Reload the high part into a base reg; leave the low part
1794 in the mem directly. */
1795 x
= gen_rtx_PLUS (GET_MODE (x
),
1796 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1800 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1801 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1809 /* REF is an alignable memory location. Place an aligned SImode
1810 reference into *PALIGNED_MEM and the number of bits to shift into
1811 *PBITNUM. SCRATCH is a free register for use in reloading out
1812 of range stack slots. */
1815 get_aligned_mem (ref
, paligned_mem
, pbitnum
)
1817 rtx
*paligned_mem
, *pbitnum
;
1820 HOST_WIDE_INT offset
= 0;
1822 if (GET_CODE (ref
) != MEM
)
1825 if (reload_in_progress
1826 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1828 base
= find_replacement (&XEXP (ref
, 0));
1830 if (! memory_address_p (GET_MODE (ref
), base
))
1835 base
= XEXP (ref
, 0);
1838 if (GET_CODE (base
) == PLUS
)
1839 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1842 = widen_memory_access (ref
, SImode
, (offset
& ~3) - offset
);
1844 if (WORDS_BIG_ENDIAN
)
1845 *pbitnum
= GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref
))
1846 + (offset
& 3) * 8));
1848 *pbitnum
= GEN_INT ((offset
& 3) * 8);
1851 /* Similar, but just get the address. Handle the two reload cases.
1852 Add EXTRA_OFFSET to the address we return. */
1855 get_unaligned_address (ref
, extra_offset
)
1860 HOST_WIDE_INT offset
= 0;
1862 if (GET_CODE (ref
) != MEM
)
1865 if (reload_in_progress
1866 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1868 base
= find_replacement (&XEXP (ref
, 0));
1870 if (! memory_address_p (GET_MODE (ref
), base
))
1875 base
= XEXP (ref
, 0);
1878 if (GET_CODE (base
) == PLUS
)
1879 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1881 return plus_constant (base
, offset
+ extra_offset
);
1884 /* Loading and storing HImode or QImode values to and from memory
1885 usually requires a scratch register. The exceptions are loading
1886 QImode and HImode from an aligned address to a general register
1887 unless byte instructions are permitted.
1889 We also cannot load an unaligned address or a paradoxical SUBREG
1890 into an FP register.
1892 We also cannot do integral arithmetic into FP regs, as might result
1893 from register elimination into a DImode fp register. */
1896 secondary_reload_class (class, mode
, x
, in
)
1897 enum reg_class
class;
1898 enum machine_mode mode
;
1902 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
1904 if (GET_CODE (x
) == MEM
1905 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
1906 || (GET_CODE (x
) == SUBREG
1907 && (GET_CODE (SUBREG_REG (x
)) == MEM
1908 || (GET_CODE (SUBREG_REG (x
)) == REG
1909 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
1911 if (!in
|| !aligned_memory_operand(x
, mode
))
1912 return GENERAL_REGS
;
1916 if (class == FLOAT_REGS
)
1918 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
1919 return GENERAL_REGS
;
1921 if (GET_CODE (x
) == SUBREG
1922 && (GET_MODE_SIZE (GET_MODE (x
))
1923 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1924 return GENERAL_REGS
;
1926 if (in
&& INTEGRAL_MODE_P (mode
)
1927 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
1928 return GENERAL_REGS
;
1934 /* Subfunction of the following function. Update the flags of any MEM
1935 found in part of X. */
1938 alpha_set_memflags_1 (x
, in_struct_p
, volatile_p
, unchanging_p
)
1940 int in_struct_p
, volatile_p
, unchanging_p
;
1944 switch (GET_CODE (x
))
1948 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1949 alpha_set_memflags_1 (XVECEXP (x
, 0, i
), in_struct_p
, volatile_p
,
1954 alpha_set_memflags_1 (PATTERN (x
), in_struct_p
, volatile_p
,
1959 alpha_set_memflags_1 (SET_DEST (x
), in_struct_p
, volatile_p
,
1961 alpha_set_memflags_1 (SET_SRC (x
), in_struct_p
, volatile_p
,
1966 MEM_IN_STRUCT_P (x
) = in_struct_p
;
1967 MEM_VOLATILE_P (x
) = volatile_p
;
1968 RTX_UNCHANGING_P (x
) = unchanging_p
;
1969 /* Sadly, we cannot use alias sets because the extra aliasing
1970 produced by the AND interferes. Given that two-byte quantities
1971 are the only thing we would be able to differentiate anyway,
1972 there does not seem to be any point in convoluting the early
1973 out of the alias check. */
1981 /* Given INSN, which is either an INSN or a SEQUENCE generated to
1982 perform a memory operation, look for any MEMs in either a SET_DEST or
1983 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
1984 REF into each of the MEMs found. If REF is not a MEM, don't do
1988 alpha_set_memflags (insn
, ref
)
1992 int in_struct_p
, volatile_p
, unchanging_p
;
1994 if (GET_CODE (ref
) != MEM
)
1997 in_struct_p
= MEM_IN_STRUCT_P (ref
);
1998 volatile_p
= MEM_VOLATILE_P (ref
);
1999 unchanging_p
= RTX_UNCHANGING_P (ref
);
2001 /* This is only called from alpha.md, after having had something
2002 generated from one of the insn patterns. So if everything is
2003 zero, the pattern is already up-to-date. */
2004 if (! in_struct_p
&& ! volatile_p
&& ! unchanging_p
)
2007 alpha_set_memflags_1 (insn
, in_struct_p
, volatile_p
, unchanging_p
);
2010 /* Try to output insns to set TARGET equal to the constant C if it can be
2011 done in less than N insns. Do all computations in MODE. Returns the place
2012 where the output has been placed if it can be done and the insns have been
2013 emitted. If it would take more than N insns, zero is returned and no
2014 insns and emitted. */
2017 alpha_emit_set_const (target
, mode
, c
, n
)
2019 enum machine_mode mode
;
2026 /* Try 1 insn, then 2, then up to N. */
2027 for (i
= 1; i
<= n
; i
++)
2028 if ((pat
= alpha_emit_set_const_1 (target
, mode
, c
, i
)) != 0)
2034 /* Internal routine for the above to check for N or below insns. */
2037 alpha_emit_set_const_1 (target
, mode
, c
, n
)
2039 enum machine_mode mode
;
2045 /* Use a pseudo if highly optimizing and still generating RTL. */
2047 = (flag_expensive_optimizations
&& rtx_equal_function_value_matters
2051 #if HOST_BITS_PER_WIDE_INT == 64
2052 /* We are only called for SImode and DImode. If this is SImode, ensure that
2053 we are sign extended to a full word. This does not make any sense when
2054 cross-compiling on a narrow machine. */
2057 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2060 /* If this is a sign-extended 32-bit constant, we can do this in at most
2061 three insns, so do it if we have enough insns left. We always have
2062 a sign-extended 32-bit constant when compiling on a narrow machine. */
2064 if (HOST_BITS_PER_WIDE_INT
!= 64
2065 || c
>> 31 == -1 || c
>> 31 == 0)
2067 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
2068 HOST_WIDE_INT tmp1
= c
- low
;
2069 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
2070 HOST_WIDE_INT extra
= 0;
2072 /* If HIGH will be interpreted as negative but the constant is
2073 positive, we must adjust it to do two ldha insns. */
2075 if ((high
& 0x8000) != 0 && c
>= 0)
2079 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
2082 if (c
== low
|| (low
== 0 && extra
== 0))
2084 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2085 but that meant that we can't handle INT_MIN on 32-bit machines
2086 (like NT/Alpha), because we recurse indefinitely through
2087 emit_move_insn to gen_movdi. So instead, since we know exactly
2088 what we want, create it explicitly. */
2091 target
= gen_reg_rtx (mode
);
2092 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
2095 else if (n
>= 2 + (extra
!= 0))
2097 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16), subtarget
, mode
);
2100 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (extra
<< 16),
2101 subtarget
, 0, OPTAB_WIDEN
);
2103 return expand_binop (mode
, add_optab
, temp
, GEN_INT (low
),
2104 target
, 0, OPTAB_WIDEN
);
2108 /* If we couldn't do it that way, try some other methods. But if we have
2109 no instructions left, don't bother. Likewise, if this is SImode and
2110 we can't make pseudos, we can't do anything since the expand_binop
2111 and expand_unop calls will widen and try to make pseudos. */
2114 || (mode
== SImode
&& ! rtx_equal_function_value_matters
))
2117 /* Next, see if we can load a related constant and then shift and possibly
2118 negate it to get the constant we want. Try this once each increasing
2119 numbers of insns. */
2121 for (i
= 1; i
< n
; i
++)
2123 /* First, see if minus some low bits, we've an easy load of
2126 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
2128 && (temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
)) != 0)
2129 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
2130 target
, 0, OPTAB_WIDEN
);
2132 /* Next try complementing. */
2133 if ((temp
= alpha_emit_set_const (subtarget
, mode
, ~ c
, i
)) != 0)
2134 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
2136 /* Next try to form a constant and do a left shift. We can do this
2137 if some low-order bits are zero; the exact_log2 call below tells
2138 us that information. The bits we are shifting out could be any
2139 value, but here we'll just try the 0- and sign-extended forms of
2140 the constant. To try to increase the chance of having the same
2141 constant in more than one insn, start at the highest number of
2142 bits to shift, but try all possibilities in case a ZAPNOT will
2145 if ((bits
= exact_log2 (c
& - c
)) > 0)
2146 for (; bits
> 0; bits
--)
2147 if ((temp
= (alpha_emit_set_const
2148 (subtarget
, mode
, c
>> bits
, i
))) != 0
2149 || ((temp
= (alpha_emit_set_const
2151 ((unsigned HOST_WIDE_INT
) c
) >> bits
, i
)))
2153 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
2154 target
, 0, OPTAB_WIDEN
);
2156 /* Now try high-order zero bits. Here we try the shifted-in bits as
2157 all zero and all ones. Be careful to avoid shifting outside the
2158 mode and to avoid shifting outside the host wide int size. */
2159 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2160 confuse the recursive call and set all of the high 32 bits. */
2162 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2163 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64))) > 0)
2164 for (; bits
> 0; bits
--)
2165 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2167 || ((temp
= (alpha_emit_set_const
2169 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2172 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
2173 target
, 1, OPTAB_WIDEN
);
2175 /* Now try high-order 1 bits. We get that with a sign-extension.
2176 But one bit isn't enough here. Be careful to avoid shifting outside
2177 the mode and to avoid shifting outside the host wide int size. */
2179 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2180 - floor_log2 (~ c
) - 2)) > 0)
2181 for (; bits
> 0; bits
--)
2182 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2184 || ((temp
= (alpha_emit_set_const
2186 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2189 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
2190 target
, 0, OPTAB_WIDEN
);
2193 #if HOST_BITS_PER_WIDE_INT == 64
2194 /* Finally, see if can load a value into the target that is the same as the
2195 constant except that all bytes that are 0 are changed to be 0xff. If we
2196 can, then we can do a ZAPNOT to obtain the desired constant. */
2199 for (i
= 0; i
< 64; i
+= 8)
2200 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
2201 new |= (HOST_WIDE_INT
) 0xff << i
;
2203 /* We are only called for SImode and DImode. If this is SImode, ensure that
2204 we are sign extended to a full word. */
2207 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
2209 if (new != c
&& new != -1
2210 && (temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1)) != 0)
2211 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
2212 target
, 0, OPTAB_WIDEN
);
2218 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2219 fall back to a straight forward decomposition. We do this to avoid
2220 exponential run times encountered when looking for longer sequences
2221 with alpha_emit_set_const. */
2224 alpha_emit_set_long_const (target
, c1
, c2
)
2226 HOST_WIDE_INT c1
, c2
;
2228 HOST_WIDE_INT d1
, d2
, d3
, d4
;
2230 /* Decompose the entire word */
2231 #if HOST_BITS_PER_WIDE_INT >= 64
2232 if (c2
!= -(c1
< 0))
2234 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2236 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2237 c1
= (c1
- d2
) >> 32;
2238 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2240 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2244 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2246 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2250 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
2252 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2257 /* Construct the high word */
2260 emit_move_insn (target
, GEN_INT (d4
));
2262 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
2265 emit_move_insn (target
, GEN_INT (d3
));
2267 /* Shift it into place */
2268 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2270 /* Add in the low bits. */
2272 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2274 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2279 /* Expand a move instruction; return true if all work is done.
2280 We don't handle non-bwx subword loads here. */
2283 alpha_expand_mov (mode
, operands
)
2284 enum machine_mode mode
;
2287 /* If the output is not a register, the input must be. */
2288 if (GET_CODE (operands
[0]) == MEM
2289 && ! reg_or_0_operand (operands
[1], mode
))
2290 operands
[1] = force_reg (mode
, operands
[1]);
2292 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (operands
[1], mode
))
2294 if (local_symbolic_operand (operands
[1], mode
))
2298 if (small_symbolic_operand (operands
[1], Pmode
))
2299 scratch
= pic_offset_table_rtx
;
2304 scratch
= no_new_pseudos
? operands
[0] : gen_reg_rtx (Pmode
);
2306 tmp
= gen_rtx_HIGH (Pmode
, operands
[1]);
2307 tmp
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, tmp
);
2308 insn
= emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp
));
2309 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, tmp
,
2313 operands
[1] = gen_rtx_LO_SUM (Pmode
, scratch
, operands
[1]);
2318 emit_insn (gen_movdi_er_high_g (operands
[0], pic_offset_table_rtx
,
2319 operands
[1], const0_rtx
));
2324 /* Early out for non-constants and valid constants. */
2325 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2328 /* Split large integers. */
2329 if (GET_CODE (operands
[1]) == CONST_INT
2330 || GET_CODE (operands
[1]) == CONST_DOUBLE
)
2332 HOST_WIDE_INT i0
, i1
;
2333 rtx temp
= NULL_RTX
;
2335 if (GET_CODE (operands
[1]) == CONST_INT
)
2337 i0
= INTVAL (operands
[1]);
2340 else if (HOST_BITS_PER_WIDE_INT
>= 64)
2342 i0
= CONST_DOUBLE_LOW (operands
[1]);
2347 i0
= CONST_DOUBLE_LOW (operands
[1]);
2348 i1
= CONST_DOUBLE_HIGH (operands
[1]);
2351 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2352 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3);
2354 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2355 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2359 if (rtx_equal_p (operands
[0], temp
))
2366 /* Otherwise we've nothing left but to drop the thing to memory. */
2367 operands
[1] = force_const_mem (DImode
, operands
[1]);
2368 if (reload_in_progress
)
2370 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
2371 operands
[1] = copy_rtx (operands
[1]);
2372 XEXP (operands
[1], 0) = operands
[0];
2375 operands
[1] = validize_mem (operands
[1]);
2379 /* Expand a non-bwx QImode or HImode move instruction;
2380 return true if all work is done. */
2383 alpha_expand_mov_nobwx (mode
, operands
)
2384 enum machine_mode mode
;
2387 /* If the output is not a register, the input must be. */
2388 if (GET_CODE (operands
[0]) == MEM
)
2389 operands
[1] = force_reg (mode
, operands
[1]);
2391 /* Handle four memory cases, unaligned and aligned for either the input
2392 or the output. The only case where we can be called during reload is
2393 for aligned loads; all other cases require temporaries. */
2395 if (GET_CODE (operands
[1]) == MEM
2396 || (GET_CODE (operands
[1]) == SUBREG
2397 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
2398 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
2399 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
2400 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
2401 && GET_CODE (SUBREG_REG (operands
[1])) == REG
2402 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
2404 if (aligned_memory_operand (operands
[1], mode
))
2406 if (reload_in_progress
)
2408 emit_insn ((mode
== QImode
2409 ? gen_reload_inqi_help
2410 : gen_reload_inhi_help
)
2411 (operands
[0], operands
[1],
2412 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
2416 rtx aligned_mem
, bitnum
;
2417 rtx scratch
= gen_reg_rtx (SImode
);
2419 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2421 emit_insn ((mode
== QImode
2422 ? gen_aligned_loadqi
2423 : gen_aligned_loadhi
)
2424 (operands
[0], aligned_mem
, bitnum
, scratch
));
2429 /* Don't pass these as parameters since that makes the generated
2430 code depend on parameter evaluation order which will cause
2431 bootstrap failures. */
2433 rtx temp1
= gen_reg_rtx (DImode
);
2434 rtx temp2
= gen_reg_rtx (DImode
);
2435 rtx seq
= ((mode
== QImode
2436 ? gen_unaligned_loadqi
2437 : gen_unaligned_loadhi
)
2438 (operands
[0], get_unaligned_address (operands
[1], 0),
2441 alpha_set_memflags (seq
, operands
[1]);
2447 if (GET_CODE (operands
[0]) == MEM
2448 || (GET_CODE (operands
[0]) == SUBREG
2449 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
2450 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
2451 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
2452 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
2453 && GET_CODE (SUBREG_REG (operands
[0])) == REG
2454 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
2456 if (aligned_memory_operand (operands
[0], mode
))
2458 rtx aligned_mem
, bitnum
;
2459 rtx temp1
= gen_reg_rtx (SImode
);
2460 rtx temp2
= gen_reg_rtx (SImode
);
2462 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2464 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2469 rtx temp1
= gen_reg_rtx (DImode
);
2470 rtx temp2
= gen_reg_rtx (DImode
);
2471 rtx temp3
= gen_reg_rtx (DImode
);
2472 rtx seq
= ((mode
== QImode
2473 ? gen_unaligned_storeqi
2474 : gen_unaligned_storehi
)
2475 (get_unaligned_address (operands
[0], 0),
2476 operands
[1], temp1
, temp2
, temp3
));
2478 alpha_set_memflags (seq
, operands
[0]);
2487 /* Generate an unsigned DImode to FP conversion. This is the same code
2488 optabs would emit if we didn't have TFmode patterns.
2490 For SFmode, this is the only construction I've found that can pass
2491 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2492 intermediates will work, because you'll get intermediate rounding
2493 that ruins the end result. Some of this could be fixed by turning
2494 on round-to-positive-infinity, but that requires diddling the fpsr,
2495 which kills performance. I tried turning this around and converting
2496 to a negative number, so that I could turn on /m, but either I did
2497 it wrong or there's something else cause I wound up with the exact
2498 same single-bit error. There is a branch-less form of this same code:
2509 fcmoveq $f10,$f11,$f0
2511 I'm not using it because it's the same number of instructions as
2512 this branch-full form, and it has more serialized long latency
2513 instructions on the critical path.
2515 For DFmode, we can avoid rounding errors by breaking up the word
2516 into two pieces, converting them separately, and adding them back:
2518 LC0: .long 0,0x5f800000
2523 cpyse $f11,$f31,$f10
2524 cpyse $f31,$f11,$f11
2532 This doesn't seem to be a clear-cut win over the optabs form.
2533 It probably all depends on the distribution of numbers being
2534 converted -- in the optabs form, all but high-bit-set has a
2535 much lower minimum execution time. */
2538 alpha_emit_floatuns (operands
)
2541 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2542 enum machine_mode mode
;
2545 in
= force_reg (DImode
, operands
[1]);
2546 mode
= GET_MODE (out
);
2547 neglab
= gen_label_rtx ();
2548 donelab
= gen_label_rtx ();
2549 i0
= gen_reg_rtx (DImode
);
2550 i1
= gen_reg_rtx (DImode
);
2551 f0
= gen_reg_rtx (mode
);
2553 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2555 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2556 emit_jump_insn (gen_jump (donelab
));
2559 emit_label (neglab
);
2561 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2562 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2563 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2564 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2565 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2567 emit_label (donelab
);
2570 /* Generate the comparison for a conditional branch. */
2573 alpha_emit_conditional_branch (code
)
2576 enum rtx_code cmp_code
, branch_code
;
2577 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
2578 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2581 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
2583 if (! TARGET_HAS_XFLOATING_LIBS
)
2586 /* X_floating library comparison functions return
2590 Convert the compare against the raw return value. */
2592 if (code
== UNORDERED
|| code
== ORDERED
)
2597 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
2599 alpha_compare
.fp_p
= 0;
2601 if (code
== UNORDERED
)
2603 else if (code
== ORDERED
)
2609 /* The general case: fold the comparison code to the types of compares
2610 that we have, choosing the branch as necessary. */
2613 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2615 /* We have these compares: */
2616 cmp_code
= code
, branch_code
= NE
;
2621 /* These must be reversed. */
2622 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2625 case GE
: case GT
: case GEU
: case GTU
:
2626 /* For FP, we swap them, for INT, we reverse them. */
2627 if (alpha_compare
.fp_p
)
2629 cmp_code
= swap_condition (code
);
2631 tem
= op0
, op0
= op1
, op1
= tem
;
2635 cmp_code
= reverse_condition (code
);
2644 if (alpha_compare
.fp_p
)
2647 if (flag_unsafe_math_optimizations
)
2649 /* When we are not as concerned about non-finite values, and we
2650 are comparing against zero, we can branch directly. */
2651 if (op1
== CONST0_RTX (DFmode
))
2652 cmp_code
= NIL
, branch_code
= code
;
2653 else if (op0
== CONST0_RTX (DFmode
))
2655 /* Undo the swap we probably did just above. */
2656 tem
= op0
, op0
= op1
, op1
= tem
;
2657 branch_code
= swap_condition (cmp_code
);
2663 /* ??? We mark the the branch mode to be CCmode to prevent the
2664 compare and branch from being combined, since the compare
2665 insn follows IEEE rules that the branch does not. */
2666 branch_mode
= CCmode
;
2673 /* The following optimizations are only for signed compares. */
2674 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2676 /* Whee. Compare and branch against 0 directly. */
2677 if (op1
== const0_rtx
)
2678 cmp_code
= NIL
, branch_code
= code
;
2680 /* We want to use cmpcc/bcc when we can, since there is a zero delay
2681 bypass between logicals and br/cmov on EV5. But we don't want to
2682 force valid immediate constants into registers needlessly. */
2683 else if (GET_CODE (op1
) == CONST_INT
)
2685 HOST_WIDE_INT v
= INTVAL (op1
), n
= -v
;
2687 if (! CONST_OK_FOR_LETTER_P (v
, 'I')
2688 && (CONST_OK_FOR_LETTER_P (n
, 'K')
2689 || CONST_OK_FOR_LETTER_P (n
, 'L')))
2691 cmp_code
= PLUS
, branch_code
= code
;
2697 if (!reg_or_0_operand (op0
, DImode
))
2698 op0
= force_reg (DImode
, op0
);
2699 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2700 op1
= force_reg (DImode
, op1
);
2703 /* Emit an initial compare instruction, if necessary. */
2705 if (cmp_code
!= NIL
)
2707 tem
= gen_reg_rtx (cmp_mode
);
2708 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2711 /* Zero the operands. */
2712 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2714 /* Return the branch comparison. */
2715 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
2718 /* Certain simplifications can be done to make invalid setcc operations
2719 valid. Return the final comparison, or NULL if we can't work. */
2722 alpha_emit_setcc (code
)
2725 enum rtx_code cmp_code
;
2726 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2727 int fp_p
= alpha_compare
.fp_p
;
2730 /* Zero the operands. */
2731 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2733 if (fp_p
&& GET_MODE (op0
) == TFmode
)
2735 if (! TARGET_HAS_XFLOATING_LIBS
)
2738 /* X_floating library comparison functions return
2742 Convert the compare against the raw return value. */
2744 if (code
== UNORDERED
|| code
== ORDERED
)
2749 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
2753 if (code
== UNORDERED
)
2755 else if (code
== ORDERED
)
2761 if (fp_p
&& !TARGET_FIX
)
2764 /* The general case: fold the comparison code to the types of compares
2765 that we have, choosing the branch as necessary. */
2770 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2772 /* We have these compares. */
2774 cmp_code
= code
, code
= NE
;
2778 if (!fp_p
&& op1
== const0_rtx
)
2783 cmp_code
= reverse_condition (code
);
2787 case GE
: case GT
: case GEU
: case GTU
:
2788 /* These normally need swapping, but for integer zero we have
2789 special patterns that recognize swapped operands. */
2790 if (!fp_p
&& op1
== const0_rtx
)
2792 code
= swap_condition (code
);
2794 cmp_code
= code
, code
= NE
;
2795 tmp
= op0
, op0
= op1
, op1
= tmp
;
2804 if (!register_operand (op0
, DImode
))
2805 op0
= force_reg (DImode
, op0
);
2806 if (!reg_or_8bit_operand (op1
, DImode
))
2807 op1
= force_reg (DImode
, op1
);
2810 /* Emit an initial compare instruction, if necessary. */
2811 if (cmp_code
!= NIL
)
2813 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
2815 tmp
= gen_reg_rtx (mode
);
2816 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2817 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
2819 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
2823 /* Return the setcc comparison. */
2824 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
2828 /* Rewrite a comparison against zero CMP of the form
2829 (CODE (cc0) (const_int 0)) so it can be written validly in
2830 a conditional move (if_then_else CMP ...).
2831 If both of the operands that set cc0 are non-zero we must emit
2832 an insn to perform the compare (it can't be done within
2833 the conditional move). */
2835 alpha_emit_conditional_move (cmp
, mode
)
2837 enum machine_mode mode
;
2839 enum rtx_code code
= GET_CODE (cmp
);
2840 enum rtx_code cmov_code
= NE
;
2841 rtx op0
= alpha_compare
.op0
;
2842 rtx op1
= alpha_compare
.op1
;
2843 int fp_p
= alpha_compare
.fp_p
;
2844 enum machine_mode cmp_mode
2845 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2846 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
2847 enum machine_mode cmov_mode
= VOIDmode
;
2848 int local_fast_math
= flag_unsafe_math_optimizations
;
2851 /* Zero the operands. */
2852 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2854 if (fp_p
!= FLOAT_MODE_P (mode
))
2856 enum rtx_code cmp_code
;
2861 /* If we have fp<->int register move instructions, do a cmov by
2862 performing the comparison in fp registers, and move the
2863 zero/non-zero value to integer registers, where we can then
2864 use a normal cmov, or vice-versa. */
2868 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2869 /* We have these compares. */
2870 cmp_code
= code
, code
= NE
;
2874 /* This must be reversed. */
2875 cmp_code
= EQ
, code
= EQ
;
2878 case GE
: case GT
: case GEU
: case GTU
:
2879 /* These normally need swapping, but for integer zero we have
2880 special patterns that recognize swapped operands. */
2881 if (!fp_p
&& op1
== const0_rtx
)
2882 cmp_code
= code
, code
= NE
;
2885 cmp_code
= swap_condition (code
);
2887 tem
= op0
, op0
= op1
, op1
= tem
;
2895 tem
= gen_reg_rtx (cmp_op_mode
);
2896 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
2897 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
2900 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
2901 op0
= gen_lowpart (cmp_op_mode
, tem
);
2902 op1
= CONST0_RTX (cmp_op_mode
);
2904 local_fast_math
= 1;
2907 /* We may be able to use a conditional move directly.
2908 This avoids emitting spurious compares. */
2909 if (signed_comparison_operator (cmp
, VOIDmode
)
2910 && (!fp_p
|| local_fast_math
)
2911 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2912 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2914 /* We can't put the comparison inside the conditional move;
2915 emit a compare instruction and put that inside the
2916 conditional move. Make sure we emit only comparisons we have;
2917 swap or reverse as necessary. */
2924 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2925 /* We have these compares: */
2929 /* This must be reversed. */
2930 code
= reverse_condition (code
);
2934 case GE
: case GT
: case GEU
: case GTU
:
2935 /* These must be swapped. */
2936 if (op1
!= CONST0_RTX (cmp_mode
))
2938 code
= swap_condition (code
);
2939 tem
= op0
, op0
= op1
, op1
= tem
;
2949 if (!reg_or_0_operand (op0
, DImode
))
2950 op0
= force_reg (DImode
, op0
);
2951 if (!reg_or_8bit_operand (op1
, DImode
))
2952 op1
= force_reg (DImode
, op1
);
2955 /* ??? We mark the branch mode to be CCmode to prevent the compare
2956 and cmov from being combined, since the compare insn follows IEEE
2957 rules that the cmov does not. */
2958 if (fp_p
&& !local_fast_math
)
2961 tem
= gen_reg_rtx (cmp_op_mode
);
2962 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
2963 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
2966 /* Simplify a conditional move of two constants into a setcc with
2967 arithmetic. This is done with a splitter since combine would
2968 just undo the work if done during code generation. It also catches
2969 cases we wouldn't have before cse. */
2972 alpha_split_conditional_move (code
, dest
, cond
, t_rtx
, f_rtx
)
2974 rtx dest
, cond
, t_rtx
, f_rtx
;
2976 HOST_WIDE_INT t
, f
, diff
;
2977 enum machine_mode mode
;
2978 rtx target
, subtarget
, tmp
;
2980 mode
= GET_MODE (dest
);
2985 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2986 || (code
== GE
|| code
== GT
))
2988 code
= reverse_condition (code
);
2989 diff
= t
, t
= f
, f
= diff
;
2993 subtarget
= target
= dest
;
2996 target
= gen_lowpart (DImode
, dest
);
2997 if (! no_new_pseudos
)
2998 subtarget
= gen_reg_rtx (DImode
);
3003 if (f
== 0 && exact_log2 (diff
) > 0
3004 /* On EV6, we've got enough shifters to make non-arithmatic shifts
3005 viable over a longer latency cmove. On EV5, the E0 slot is a
3006 scarce resource, and on EV4 shift has the same latency as a cmove. */
3007 && (diff
<= 8 || alpha_cpu
== PROCESSOR_EV6
))
3009 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3010 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3012 tmp
= gen_rtx_ASHIFT (DImode
, subtarget
, GEN_INT (exact_log2 (t
)));
3013 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3015 else if (f
== 0 && t
== -1)
3017 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3018 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3020 emit_insn (gen_negdi2 (target
, subtarget
));
3022 else if (diff
== 1 || diff
== 4 || diff
== 8)
3026 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3027 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3030 emit_insn (gen_adddi3 (target
, subtarget
, GEN_INT (f
)));
3033 add_op
= GEN_INT (f
);
3034 if (sext_add_operand (add_op
, mode
))
3036 tmp
= gen_rtx_MULT (DImode
, subtarget
, GEN_INT (diff
));
3037 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
3038 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3050 /* Look up the function X_floating library function name for the
3054 alpha_lookup_xfloating_lib_func (code
)
3059 const enum rtx_code code
;
3060 const char *const func
;
3063 static const struct xfloating_op vms_xfloating_ops
[] =
3065 { PLUS
, "OTS$ADD_X" },
3066 { MINUS
, "OTS$SUB_X" },
3067 { MULT
, "OTS$MUL_X" },
3068 { DIV
, "OTS$DIV_X" },
3069 { EQ
, "OTS$EQL_X" },
3070 { NE
, "OTS$NEQ_X" },
3071 { LT
, "OTS$LSS_X" },
3072 { LE
, "OTS$LEQ_X" },
3073 { GT
, "OTS$GTR_X" },
3074 { GE
, "OTS$GEQ_X" },
3075 { FIX
, "OTS$CVTXQ" },
3076 { FLOAT
, "OTS$CVTQX" },
3077 { UNSIGNED_FLOAT
, "OTS$CVTQUX" },
3078 { FLOAT_EXTEND
, "OTS$CVT_FLOAT_T_X" },
3079 { FLOAT_TRUNCATE
, "OTS$CVT_FLOAT_X_T" },
3082 static const struct xfloating_op osf_xfloating_ops
[] =
3084 { PLUS
, "_OtsAddX" },
3085 { MINUS
, "_OtsSubX" },
3086 { MULT
, "_OtsMulX" },
3087 { DIV
, "_OtsDivX" },
3094 { FIX
, "_OtsCvtXQ" },
3095 { FLOAT
, "_OtsCvtQX" },
3096 { UNSIGNED_FLOAT
, "_OtsCvtQUX" },
3097 { FLOAT_EXTEND
, "_OtsConvertFloatTX" },
3098 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT" },
3101 const struct xfloating_op
*ops
;
3102 const long n
= ARRAY_SIZE (osf_xfloating_ops
);
3105 /* How irritating. Nothing to key off for the table. Hardcode
3106 knowledge of the G_floating routines. */
3107 if (TARGET_FLOAT_VAX
)
3109 if (TARGET_ABI_OPEN_VMS
)
3111 if (code
== FLOAT_EXTEND
)
3112 return "OTS$CVT_FLOAT_G_X";
3113 if (code
== FLOAT_TRUNCATE
)
3114 return "OTS$CVT_FLOAT_X_G";
3118 if (code
== FLOAT_EXTEND
)
3119 return "_OtsConvertFloatGX";
3120 if (code
== FLOAT_TRUNCATE
)
3121 return "_OtsConvertFloatXG";
3125 if (TARGET_ABI_OPEN_VMS
)
3126 ops
= vms_xfloating_ops
;
3128 ops
= osf_xfloating_ops
;
3130 for (i
= 0; i
< n
; ++i
)
3131 if (ops
[i
].code
== code
)
3137 /* Most X_floating operations take the rounding mode as an argument.
3138 Compute that here. */
3141 alpha_compute_xfloating_mode_arg (code
, round
)
3143 enum alpha_fp_rounding_mode round
;
3149 case ALPHA_FPRM_NORM
:
3152 case ALPHA_FPRM_MINF
:
3155 case ALPHA_FPRM_CHOP
:
3158 case ALPHA_FPRM_DYN
:
3164 /* XXX For reference, round to +inf is mode = 3. */
3167 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3173 /* Emit an X_floating library function call.
3175 Note that these functions do not follow normal calling conventions:
3176 TFmode arguments are passed in two integer registers (as opposed to
3177 indirect); TFmode return values appear in R16+R17.
3179 FUNC is the function name to call.
3180 TARGET is where the output belongs.
3181 OPERANDS are the inputs.
3182 NOPERANDS is the count of inputs.
3183 EQUIV is the expression equivalent for the function.
3187 alpha_emit_xfloating_libcall (func
, target
, operands
, noperands
, equiv
)
3194 rtx usage
= NULL_RTX
, tmp
, reg
;
3199 for (i
= 0; i
< noperands
; ++i
)
3201 switch (GET_MODE (operands
[i
]))
3204 reg
= gen_rtx_REG (TFmode
, regno
);
3209 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
3214 if (GET_CODE (operands
[i
]) != CONST_INT
)
3218 reg
= gen_rtx_REG (DImode
, regno
);
3226 emit_move_insn (reg
, operands
[i
]);
3227 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
3230 switch (GET_MODE (target
))
3233 reg
= gen_rtx_REG (TFmode
, 16);
3236 reg
= gen_rtx_REG (DFmode
, 32);
3239 reg
= gen_rtx_REG (DImode
, 0);
3245 tmp
= gen_rtx_MEM (QImode
, gen_rtx_SYMBOL_REF (Pmode
, (char *) func
));
3246 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3247 const0_rtx
, const0_rtx
));
3248 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3253 emit_libcall_block (tmp
, target
, reg
, equiv
);
3256 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3259 alpha_emit_xfloating_arith (code
, operands
)
3265 rtx out_operands
[3];
3267 func
= alpha_lookup_xfloating_lib_func (code
);
3268 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3270 out_operands
[0] = operands
[1];
3271 out_operands
[1] = operands
[2];
3272 out_operands
[2] = GEN_INT (mode
);
3273 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3274 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3278 /* Emit an X_floating library function call for a comparison. */
3281 alpha_emit_xfloating_compare (code
, op0
, op1
)
3286 rtx out
, operands
[2];
3288 func
= alpha_lookup_xfloating_lib_func (code
);
3292 out
= gen_reg_rtx (DImode
);
3294 /* ??? Strange mode for equiv because what's actually returned
3295 is -1,0,1, not a proper boolean value. */
3296 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
3297 gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
));
3302 /* Emit an X_floating library function call for a conversion. */
3305 alpha_emit_xfloating_cvt (code
, operands
)
3309 int noperands
= 1, mode
;
3310 rtx out_operands
[2];
3313 func
= alpha_lookup_xfloating_lib_func (code
);
3315 out_operands
[0] = operands
[1];
3320 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3321 out_operands
[1] = GEN_INT (mode
);
3324 case FLOAT_TRUNCATE
:
3325 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3326 out_operands
[1] = GEN_INT (mode
);
3333 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3334 gen_rtx_fmt_e (code
, GET_MODE (operands
[0]),
3338 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3339 OP[0] into OP[0,1]. Naturally, output operand ordering is
3343 alpha_split_tfmode_pair (operands
)
3346 if (GET_CODE (operands
[1]) == REG
)
3348 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3349 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3351 else if (GET_CODE (operands
[1]) == MEM
)
3353 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3354 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3356 else if (operands
[1] == CONST0_RTX (TFmode
))
3357 operands
[2] = operands
[3] = const0_rtx
;
3361 if (GET_CODE (operands
[0]) == REG
)
3363 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3364 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3366 else if (GET_CODE (operands
[0]) == MEM
)
3368 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3369 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3375 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3376 op2 is a register containing the sign bit, operation is the
3377 logical operation to be performed. */
3380 alpha_split_tfmode_frobsign (operands
, operation
)
3382 rtx (*operation
) PARAMS ((rtx
, rtx
, rtx
));
3384 rtx high_bit
= operands
[2];
3388 alpha_split_tfmode_pair (operands
);
3390 /* Detect three flavours of operand overlap. */
3392 if (rtx_equal_p (operands
[0], operands
[2]))
3394 else if (rtx_equal_p (operands
[1], operands
[2]))
3396 if (rtx_equal_p (operands
[0], high_bit
))
3403 emit_move_insn (operands
[0], operands
[2]);
3405 /* ??? If the destination overlaps both source tf and high_bit, then
3406 assume source tf is dead in its entirety and use the other half
3407 for a scratch register. Otherwise "scratch" is just the proper
3408 destination register. */
3409 scratch
= operands
[move
< 2 ? 1 : 3];
3411 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3415 emit_move_insn (operands
[0], operands
[2]);
3417 emit_move_insn (operands
[1], scratch
);
3421 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3425 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3426 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3427 lda r3,X(r11) lda r3,X+2(r11)
3428 extwl r1,r3,r1 extql r1,r3,r1
3429 extwh r2,r3,r2 extqh r2,r3,r2
3430 or r1.r2.r1 or r1,r2,r1
3433 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3434 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3435 lda r3,X(r11) lda r3,X(r11)
3436 extll r1,r3,r1 extll r1,r3,r1
3437 extlh r2,r3,r2 extlh r2,r3,r2
3438 or r1.r2.r1 addl r1,r2,r1
3440 quad: ldq_u r1,X(r11)
3449 alpha_expand_unaligned_load (tgt
, mem
, size
, ofs
, sign
)
3451 HOST_WIDE_INT size
, ofs
;
3454 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3455 enum machine_mode mode
;
3457 meml
= gen_reg_rtx (DImode
);
3458 memh
= gen_reg_rtx (DImode
);
3459 addr
= gen_reg_rtx (DImode
);
3460 extl
= gen_reg_rtx (DImode
);
3461 exth
= gen_reg_rtx (DImode
);
3463 mema
= XEXP (mem
, 0);
3464 if (GET_CODE (mema
) == LO_SUM
)
3465 mema
= force_reg (Pmode
, mema
);
3467 /* AND addresses cannot be in any alias set, since they may implicitly
3468 alias surrounding code. Ideally we'd have some alias set that
3469 covered all types except those with alignment 8 or higher. */
3471 tmp
= change_address (mem
, DImode
,
3472 gen_rtx_AND (DImode
,
3473 plus_constant (mema
, ofs
),
3475 set_mem_alias_set (tmp
, 0);
3476 emit_move_insn (meml
, tmp
);
3478 tmp
= change_address (mem
, DImode
,
3479 gen_rtx_AND (DImode
,
3480 plus_constant (mema
, ofs
+ size
- 1),
3482 set_mem_alias_set (tmp
, 0);
3483 emit_move_insn (memh
, tmp
);
3485 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
3487 emit_move_insn (addr
, plus_constant (mema
, -1));
3489 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3490 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
3492 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3493 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
3494 addr
, 1, OPTAB_WIDEN
);
3496 else if (sign
&& size
== 2)
3498 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
3500 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
3501 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3503 /* We must use tgt here for the target. Alpha-vms port fails if we use
3504 addr for the target, because addr is marked as a pointer and combine
3505 knows that pointers are always sign-extended 32 bit values. */
3506 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3507 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3508 addr
, 1, OPTAB_WIDEN
);
3512 if (WORDS_BIG_ENDIAN
)
3514 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
3518 emit_insn (gen_extwh_be (extl
, meml
, addr
));
3523 emit_insn (gen_extlh_be (extl
, meml
, addr
));
3528 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3535 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
3539 emit_move_insn (addr
, plus_constant (mema
, ofs
));
3540 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
3544 emit_insn (gen_extwh_le (exth
, memh
, addr
));
3549 emit_insn (gen_extlh_le (exth
, memh
, addr
));
3554 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3563 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3564 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3569 emit_move_insn (tgt
, gen_lowpart(GET_MODE (tgt
), addr
));
3572 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3575 alpha_expand_unaligned_store (dst
, src
, size
, ofs
)
3577 HOST_WIDE_INT size
, ofs
;
3579 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3581 dstl
= gen_reg_rtx (DImode
);
3582 dsth
= gen_reg_rtx (DImode
);
3583 insl
= gen_reg_rtx (DImode
);
3584 insh
= gen_reg_rtx (DImode
);
3586 dsta
= XEXP (dst
, 0);
3587 if (GET_CODE (dsta
) == LO_SUM
)
3588 dsta
= force_reg (Pmode
, dsta
);
3590 /* AND addresses cannot be in any alias set, since they may implicitly
3591 alias surrounding code. Ideally we'd have some alias set that
3592 covered all types except those with alignment 8 or higher. */
3594 meml
= change_address (dst
, DImode
,
3595 gen_rtx_AND (DImode
,
3596 plus_constant (dsta
, ofs
),
3598 set_mem_alias_set (meml
, 0);
3600 memh
= change_address (dst
, DImode
,
3601 gen_rtx_AND (DImode
,
3602 plus_constant (dsta
, ofs
+ size
- 1),
3604 set_mem_alias_set (memh
, 0);
3606 emit_move_insn (dsth
, memh
);
3607 emit_move_insn (dstl
, meml
);
3608 if (WORDS_BIG_ENDIAN
)
3610 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
3612 if (src
!= const0_rtx
)
3617 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
3620 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
3623 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
3626 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
3627 GEN_INT (size
*8), addr
));
3633 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
3636 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffffffff), addr
));
3640 #if HOST_BITS_PER_WIDE_INT == 32
3641 rtx msk
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3643 rtx msk
= constm1_rtx
;
3645 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
3650 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
3654 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
3656 if (src
!= const0_rtx
)
3658 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3659 GEN_INT (size
*8), addr
));
3664 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
3667 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
3670 emit_insn (gen_insql_le (insl
, src
, addr
));
3675 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3680 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
3683 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffffffff), addr
));
3687 #if HOST_BITS_PER_WIDE_INT == 32
3688 rtx msk
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3690 rtx msk
= constm1_rtx
;
3692 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
3698 if (src
!= const0_rtx
)
3700 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3701 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3704 if (WORDS_BIG_ENDIAN
)
3706 emit_move_insn (meml
, dstl
);
3707 emit_move_insn (memh
, dsth
);
3711 /* Must store high before low for degenerate case of aligned. */
3712 emit_move_insn (memh
, dsth
);
3713 emit_move_insn (meml
, dstl
);
3717 /* The block move code tries to maximize speed by separating loads and
3718 stores at the expense of register pressure: we load all of the data
3719 before we store it back out. There are two secondary effects worth
3720 mentioning, that this speeds copying to/from aligned and unaligned
3721 buffers, and that it makes the code significantly easier to write. */
3723 #define MAX_MOVE_WORDS 8
3725 /* Load an integral number of consecutive unaligned quadwords. */
3728 alpha_expand_unaligned_load_words (out_regs
, smem
, words
, ofs
)
3731 HOST_WIDE_INT words
, ofs
;
3733 rtx
const im8
= GEN_INT (-8);
3734 rtx
const i64
= GEN_INT (64);
3735 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3736 rtx sreg
, areg
, tmp
, smema
;
3739 smema
= XEXP (smem
, 0);
3740 if (GET_CODE (smema
) == LO_SUM
)
3741 smema
= force_reg (Pmode
, smema
);
3743 /* Generate all the tmp registers we need. */
3744 for (i
= 0; i
< words
; ++i
)
3746 data_regs
[i
] = out_regs
[i
];
3747 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3749 data_regs
[words
] = gen_reg_rtx (DImode
);
3752 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3754 /* Load up all of the source data. */
3755 for (i
= 0; i
< words
; ++i
)
3757 tmp
= change_address (smem
, DImode
,
3758 gen_rtx_AND (DImode
,
3759 plus_constant (smema
, 8*i
),
3761 set_mem_alias_set (tmp
, 0);
3762 emit_move_insn (data_regs
[i
], tmp
);
3765 tmp
= change_address (smem
, DImode
,
3766 gen_rtx_AND (DImode
,
3767 plus_constant (smema
, 8*words
- 1),
3769 set_mem_alias_set (tmp
, 0);
3770 emit_move_insn (data_regs
[words
], tmp
);
3772 /* Extract the half-word fragments. Unfortunately DEC decided to make
3773 extxh with offset zero a noop instead of zeroing the register, so
3774 we must take care of that edge condition ourselves with cmov. */
3776 sreg
= copy_addr_to_reg (smema
);
3777 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3779 if (WORDS_BIG_ENDIAN
)
3780 emit_move_insn (sreg
, plus_constant (sreg
, 7));
3781 for (i
= 0; i
< words
; ++i
)
3783 if (WORDS_BIG_ENDIAN
)
3785 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
3786 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
3790 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
3791 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3793 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3794 gen_rtx_IF_THEN_ELSE (DImode
,
3795 gen_rtx_EQ (DImode
, areg
,
3797 const0_rtx
, ext_tmps
[i
])));
3800 /* Merge the half-words into whole words. */
3801 for (i
= 0; i
< words
; ++i
)
3803 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3804 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3808 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3809 may be NULL to store zeros. */
3812 alpha_expand_unaligned_store_words (data_regs
, dmem
, words
, ofs
)
3815 HOST_WIDE_INT words
, ofs
;
3817 rtx
const im8
= GEN_INT (-8);
3818 rtx
const i64
= GEN_INT (64);
3819 #if HOST_BITS_PER_WIDE_INT == 32
3820 rtx
const im1
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3822 rtx
const im1
= constm1_rtx
;
3824 rtx ins_tmps
[MAX_MOVE_WORDS
];
3825 rtx st_tmp_1
, st_tmp_2
, dreg
;
3826 rtx st_addr_1
, st_addr_2
, dmema
;
3829 dmema
= XEXP (dmem
, 0);
3830 if (GET_CODE (dmema
) == LO_SUM
)
3831 dmema
= force_reg (Pmode
, dmema
);
3833 /* Generate all the tmp registers we need. */
3834 if (data_regs
!= NULL
)
3835 for (i
= 0; i
< words
; ++i
)
3836 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3837 st_tmp_1
= gen_reg_rtx(DImode
);
3838 st_tmp_2
= gen_reg_rtx(DImode
);
3841 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3843 st_addr_2
= change_address (dmem
, DImode
,
3844 gen_rtx_AND (DImode
,
3845 plus_constant (dmema
, words
*8 - 1),
3847 set_mem_alias_set (st_addr_2
, 0);
3849 st_addr_1
= change_address (dmem
, DImode
,
3850 gen_rtx_AND (DImode
, dmema
, im8
));
3851 set_mem_alias_set (st_addr_1
, 0);
3853 /* Load up the destination end bits. */
3854 emit_move_insn (st_tmp_2
, st_addr_2
);
3855 emit_move_insn (st_tmp_1
, st_addr_1
);
3857 /* Shift the input data into place. */
3858 dreg
= copy_addr_to_reg (dmema
);
3859 if (WORDS_BIG_ENDIAN
)
3860 emit_move_insn (dreg
, plus_constant (dreg
, 7));
3861 if (data_regs
!= NULL
)
3863 for (i
= words
-1; i
>= 0; --i
)
3865 if (WORDS_BIG_ENDIAN
)
3867 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
3868 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
3872 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
3873 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
3876 for (i
= words
-1; i
> 0; --i
)
3878 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3879 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3884 /* Split and merge the ends with the destination data. */
3885 if (WORDS_BIG_ENDIAN
)
3887 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, im1
, dreg
));
3888 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
3892 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
3893 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, im1
, dreg
));
3896 if (data_regs
!= NULL
)
3898 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3899 st_tmp_2
, 1, OPTAB_WIDEN
);
3900 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3901 st_tmp_1
, 1, OPTAB_WIDEN
);
3905 if (WORDS_BIG_ENDIAN
)
3906 emit_move_insn (st_addr_1
, st_tmp_1
);
3908 emit_move_insn (st_addr_2
, st_tmp_2
);
3909 for (i
= words
-1; i
> 0; --i
)
3911 rtx tmp
= change_address (dmem
, DImode
,
3912 gen_rtx_AND (DImode
,
3913 plus_constant(dmema
,
3914 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
3916 set_mem_alias_set (tmp
, 0);
3917 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3919 if (WORDS_BIG_ENDIAN
)
3920 emit_move_insn (st_addr_2
, st_tmp_2
);
3922 emit_move_insn (st_addr_1
, st_tmp_1
);
3926 /* Expand string/block move operations.
3928 operands[0] is the pointer to the destination.
3929 operands[1] is the pointer to the source.
3930 operands[2] is the number of bytes to move.
3931 operands[3] is the alignment. */
3934 alpha_expand_block_move (operands
)
3937 rtx bytes_rtx
= operands
[2];
3938 rtx align_rtx
= operands
[3];
3939 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3940 HOST_WIDE_INT bytes
= orig_bytes
;
3941 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3942 HOST_WIDE_INT dst_align
= src_align
;
3943 rtx orig_src
= operands
[1];
3944 rtx orig_dst
= operands
[0];
3945 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3947 unsigned int i
, words
, ofs
, nregs
= 0;
3949 if (orig_bytes
<= 0)
3951 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3954 /* Look for additional alignment information from recorded register info. */
3956 tmp
= XEXP (orig_src
, 0);
3957 if (GET_CODE (tmp
) == REG
)
3958 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3959 else if (GET_CODE (tmp
) == PLUS
3960 && GET_CODE (XEXP (tmp
, 0)) == REG
3961 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3963 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3964 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3968 if (a
>= 64 && c
% 8 == 0)
3970 else if (a
>= 32 && c
% 4 == 0)
3972 else if (a
>= 16 && c
% 2 == 0)
3977 tmp
= XEXP (orig_dst
, 0);
3978 if (GET_CODE (tmp
) == REG
)
3979 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3980 else if (GET_CODE (tmp
) == PLUS
3981 && GET_CODE (XEXP (tmp
, 0)) == REG
3982 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3984 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3985 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3989 if (a
>= 64 && c
% 8 == 0)
3991 else if (a
>= 32 && c
% 4 == 0)
3993 else if (a
>= 16 && c
% 2 == 0)
3998 /* Load the entire block into registers. */
3999 if (GET_CODE (XEXP (orig_src
, 0)) == ADDRESSOF
)
4001 enum machine_mode mode
;
4003 tmp
= XEXP (XEXP (orig_src
, 0), 0);
4005 /* Don't use the existing register if we're reading more than
4006 is held in the register. Nor if there is not a mode that
4007 handles the exact size. */
4008 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4010 && GET_MODE_SIZE (GET_MODE (tmp
)) >= bytes
)
4014 data_regs
[nregs
] = gen_lowpart (DImode
, tmp
);
4015 data_regs
[nregs
+ 1] = gen_highpart (DImode
, tmp
);
4019 data_regs
[nregs
++] = gen_lowpart (mode
, tmp
);
4024 /* No appropriate mode; fall back on memory. */
4025 orig_src
= replace_equiv_address (orig_src
,
4026 copy_addr_to_reg (XEXP (orig_src
, 0)));
4027 src_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4031 if (src_align
>= 64 && bytes
>= 8)
4035 for (i
= 0; i
< words
; ++i
)
4036 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4038 for (i
= 0; i
< words
; ++i
)
4039 emit_move_insn (data_regs
[nregs
+ i
],
4040 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
4047 if (src_align
>= 32 && bytes
>= 4)
4051 for (i
= 0; i
< words
; ++i
)
4052 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
4054 for (i
= 0; i
< words
; ++i
)
4055 emit_move_insn (data_regs
[nregs
+ i
],
4056 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
4067 for (i
= 0; i
< words
+1; ++i
)
4068 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4070 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
4078 if (! TARGET_BWX
&& bytes
>= 4)
4080 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
4081 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
4088 if (src_align
>= 16)
4091 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4092 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
4095 } while (bytes
>= 2);
4097 else if (! TARGET_BWX
)
4099 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4100 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
4108 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
4109 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
4116 if (nregs
> ARRAY_SIZE (data_regs
))
4119 /* Now save it back out again. */
4123 if (GET_CODE (XEXP (orig_dst
, 0)) == ADDRESSOF
)
4125 enum machine_mode mode
;
4126 tmp
= XEXP (XEXP (orig_dst
, 0), 0);
4128 mode
= mode_for_size (orig_bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4129 if (GET_MODE (tmp
) == mode
)
4133 emit_move_insn (tmp
, data_regs
[0]);
4138 else if (nregs
== 2 && mode
== TImode
)
4140 /* Undo the subregging done above when copying between
4141 two TImode registers. */
4142 if (GET_CODE (data_regs
[0]) == SUBREG
4143 && GET_MODE (SUBREG_REG (data_regs
[0])) == TImode
)
4144 emit_move_insn (tmp
, SUBREG_REG (data_regs
[0]));
4150 emit_move_insn (gen_lowpart (DImode
, tmp
), data_regs
[0]);
4151 emit_move_insn (gen_highpart (DImode
, tmp
), data_regs
[1]);
4155 emit_no_conflict_block (seq
, tmp
, data_regs
[0],
4156 data_regs
[1], NULL_RTX
);
4164 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4165 /* ??? Optimize mode < dst_mode with strict_low_part. */
4167 /* No appropriate mode; fall back on memory. We can speed things
4168 up by recognizing extra alignment information. */
4169 orig_dst
= replace_equiv_address (orig_dst
,
4170 copy_addr_to_reg (XEXP (orig_dst
, 0)));
4171 dst_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4174 /* Write out the data in whatever chunks reading the source allowed. */
4175 if (dst_align
>= 64)
4177 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4179 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
4186 if (dst_align
>= 32)
4188 /* If the source has remaining DImode regs, write them out in
4190 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4192 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
4193 NULL_RTX
, 1, OPTAB_WIDEN
);
4195 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4196 gen_lowpart (SImode
, data_regs
[i
]));
4197 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
4198 gen_lowpart (SImode
, tmp
));
4203 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4205 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4212 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4214 /* Write out a remaining block of words using unaligned methods. */
4216 for (words
= 1; i
+ words
< nregs
; words
++)
4217 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
4221 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
4223 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
4230 /* Due to the above, this won't be aligned. */
4231 /* ??? If we have more than one of these, consider constructing full
4232 words in registers and using alpha_expand_unaligned_store_words. */
4233 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4235 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
4240 if (dst_align
>= 16)
4241 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4243 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4248 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4250 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4255 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == QImode
)
4257 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4271 alpha_expand_block_clear (operands
)
4274 rtx bytes_rtx
= operands
[1];
4275 rtx align_rtx
= operands
[2];
4276 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4277 HOST_WIDE_INT bytes
= orig_bytes
;
4278 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4279 HOST_WIDE_INT alignofs
= 0;
4280 rtx orig_dst
= operands
[0];
4282 int i
, words
, ofs
= 0;
4284 if (orig_bytes
<= 0)
4286 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4289 /* Look for stricter alignment. */
4290 tmp
= XEXP (orig_dst
, 0);
4291 if (GET_CODE (tmp
) == REG
)
4292 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4293 else if (GET_CODE (tmp
) == PLUS
4294 && GET_CODE (XEXP (tmp
, 0)) == REG
4295 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4297 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4298 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4303 align
= a
, alignofs
= 8 - c
% 8;
4305 align
= a
, alignofs
= 4 - c
% 4;
4307 align
= a
, alignofs
= 2 - c
% 2;
4310 else if (GET_CODE (tmp
) == ADDRESSOF
)
4312 enum machine_mode mode
;
4314 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4315 if (GET_MODE (XEXP (tmp
, 0)) == mode
)
4317 emit_move_insn (XEXP (tmp
, 0), const0_rtx
);
4321 /* No appropriate mode; fall back on memory. */
4322 orig_dst
= replace_equiv_address (orig_dst
, copy_addr_to_reg (tmp
));
4323 align
= GET_MODE_BITSIZE (GET_MODE (XEXP (tmp
, 0)));
4326 /* Handle an unaligned prefix first. */
4330 #if HOST_BITS_PER_WIDE_INT >= 64
4331 /* Given that alignofs is bounded by align, the only time BWX could
4332 generate three stores is for a 7 byte fill. Prefer two individual
4333 stores over a load/mask/store sequence. */
4334 if ((!TARGET_BWX
|| alignofs
== 7)
4336 && !(alignofs
== 4 && bytes
>= 4))
4338 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4339 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4343 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4344 set_mem_alias_set (mem
, 0);
4346 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
4347 if (bytes
< alignofs
)
4349 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
4360 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4361 NULL_RTX
, 1, OPTAB_WIDEN
);
4363 emit_move_insn (mem
, tmp
);
4367 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4369 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4374 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4376 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4381 if (alignofs
== 4 && bytes
>= 4)
4383 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4389 /* If we've not used the extra lead alignment information by now,
4390 we won't be able to. Downgrade align to match what's left over. */
4393 alignofs
= alignofs
& -alignofs
;
4394 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4398 /* Handle a block of contiguous long-words. */
4400 if (align
>= 64 && bytes
>= 8)
4404 for (i
= 0; i
< words
; ++i
)
4405 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4412 /* If the block is large and appropriately aligned, emit a single
4413 store followed by a sequence of stq_u insns. */
4415 if (align
>= 32 && bytes
> 16)
4419 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4423 orig_dsta
= XEXP (orig_dst
, 0);
4424 if (GET_CODE (orig_dsta
) == LO_SUM
)
4425 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4428 for (i
= 0; i
< words
; ++i
)
4431 = change_address (orig_dst
, DImode
,
4432 gen_rtx_AND (DImode
,
4433 plus_constant (orig_dsta
, ofs
+ i
*8),
4435 set_mem_alias_set (mem
, 0);
4436 emit_move_insn (mem
, const0_rtx
);
4439 /* Depending on the alignment, the first stq_u may have overlapped
4440 with the initial stl, which means that the last stq_u didn't
4441 write as much as it would appear. Leave those questionable bytes
4443 bytes
-= words
* 8 - 4;
4444 ofs
+= words
* 8 - 4;
4447 /* Handle a smaller block of aligned words. */
4449 if ((align
>= 64 && bytes
== 4)
4450 || (align
== 32 && bytes
>= 4))
4454 for (i
= 0; i
< words
; ++i
)
4455 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4462 /* An unaligned block uses stq_u stores for as many as possible. */
4468 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4474 /* Next clean up any trailing pieces. */
4476 #if HOST_BITS_PER_WIDE_INT >= 64
4477 /* Count the number of bits in BYTES for which aligned stores could
4480 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4484 /* If we have appropriate alignment (and it wouldn't take too many
4485 instructions otherwise), mask out the bytes we need. */
4486 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4493 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4494 set_mem_alias_set (mem
, 0);
4496 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4498 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4499 NULL_RTX
, 1, OPTAB_WIDEN
);
4501 emit_move_insn (mem
, tmp
);
4504 else if (align
>= 32 && bytes
< 4)
4509 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4510 set_mem_alias_set (mem
, 0);
4512 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4514 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4515 NULL_RTX
, 1, OPTAB_WIDEN
);
4517 emit_move_insn (mem
, tmp
);
4523 if (!TARGET_BWX
&& bytes
>= 4)
4525 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4535 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4539 } while (bytes
>= 2);
4541 else if (! TARGET_BWX
)
4543 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4551 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4559 /* Adjust the cost of a scheduling dependency. Return the new cost of
4560 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4563 alpha_adjust_cost (insn
, link
, dep_insn
, cost
)
4570 enum attr_type insn_type
, dep_insn_type
;
4572 /* If the dependence is an anti-dependence, there is no cost. For an
4573 output dependence, there is sometimes a cost, but it doesn't seem
4574 worth handling those few cases. */
4576 if (REG_NOTE_KIND (link
) != 0)
4579 /* If we can't recognize the insns, we can't really do anything. */
4580 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4583 insn_type
= get_attr_type (insn
);
4584 dep_insn_type
= get_attr_type (dep_insn
);
4586 /* Bring in the user-defined memory latency. */
4587 if (dep_insn_type
== TYPE_ILD
4588 || dep_insn_type
== TYPE_FLD
4589 || dep_insn_type
== TYPE_LDSYM
)
4590 cost
+= alpha_memory_latency
-1;
4595 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
4596 being stored, we can sometimes lower the cost. */
4598 if ((insn_type
== TYPE_IST
|| insn_type
== TYPE_FST
)
4599 && (set
= single_set (dep_insn
)) != 0
4600 && GET_CODE (PATTERN (insn
)) == SET
4601 && rtx_equal_p (SET_DEST (set
), SET_SRC (PATTERN (insn
))))
4603 switch (dep_insn_type
)
4607 /* No savings here. */
4611 /* In these cases, we save one cycle. */
4615 /* In all other cases, we save two cycles. */
4616 return MAX (0, cost
- 2);
4620 /* Another case that needs adjustment is an arithmetic or logical
4621 operation. It's cost is usually one cycle, but we default it to
4622 two in the MD file. The only case that it is actually two is
4623 for the address in loads, stores, and jumps. */
4625 if (dep_insn_type
== TYPE_IADD
|| dep_insn_type
== TYPE_ILOG
)
4640 /* The final case is when a compare feeds into an integer branch;
4641 the cost is only one cycle in that case. */
4643 if (dep_insn_type
== TYPE_ICMP
&& insn_type
== TYPE_IBR
)
4648 /* And the lord DEC saith: "A special bypass provides an effective
4649 latency of 0 cycles for an ICMP or ILOG insn producing the test
4650 operand of an IBR or ICMOV insn." */
4652 if ((dep_insn_type
== TYPE_ICMP
|| dep_insn_type
== TYPE_ILOG
)
4653 && (set
= single_set (dep_insn
)) != 0)
4655 /* A branch only has one input. This must be it. */
4656 if (insn_type
== TYPE_IBR
)
4658 /* A conditional move has three, make sure it is the test. */
4659 if (insn_type
== TYPE_ICMOV
4660 && GET_CODE (set_src
= PATTERN (insn
)) == SET
4661 && GET_CODE (set_src
= SET_SRC (set_src
)) == IF_THEN_ELSE
4662 && rtx_equal_p (SET_DEST (set
), XEXP (set_src
, 0)))
4666 /* "The multiplier is unable to receive data from IEU bypass paths.
4667 The instruction issues at the expected time, but its latency is
4668 increased by the time it takes for the input data to become
4669 available to the multiplier" -- which happens in pipeline stage
4670 six, when results are comitted to the register file. */
4672 if (insn_type
== TYPE_IMUL
)
4674 switch (dep_insn_type
)
4676 /* These insns produce their results in pipeline stage five. */
4683 /* Other integer insns produce results in pipeline stage four. */
4691 /* There is additional latency to move the result of (most) FP
4692 operations anywhere but the FP register file. */
4694 if ((insn_type
== TYPE_FST
|| insn_type
== TYPE_FTOI
)
4695 && (dep_insn_type
== TYPE_FADD
||
4696 dep_insn_type
== TYPE_FMUL
||
4697 dep_insn_type
== TYPE_FCMOV
))
4703 /* Otherwise, return the default cost. */
4707 /* Function to initialize the issue rate used by the scheduler. */
4711 return (alpha_cpu
== PROCESSOR_EV4
? 2 : 4);
4715 alpha_variable_issue (dump
, verbose
, insn
, cim
)
4716 FILE *dump ATTRIBUTE_UNUSED
;
4717 int verbose ATTRIBUTE_UNUSED
;
4721 if (recog_memoized (insn
) < 0 || get_attr_type (insn
) == TYPE_MULTI
)
4728 /* Register global variables and machine-specific functions with the
4729 garbage collector. */
4731 #if TARGET_ABI_UNICOSMK
4733 alpha_init_machine_status (p
)
4737 (struct machine_function
*) xcalloc (1, sizeof (struct machine_function
));
4739 p
->machine
->first_ciw
= NULL_RTX
;
4740 p
->machine
->last_ciw
= NULL_RTX
;
4741 p
->machine
->ciw_count
= 0;
4742 p
->machine
->addr_list
= NULL_RTX
;
4746 alpha_mark_machine_status (p
)
4749 struct machine_function
*machine
= p
->machine
;
4753 ggc_mark_rtx (machine
->first_ciw
);
4754 ggc_mark_rtx (machine
->addr_list
);
4759 alpha_free_machine_status (p
)
4765 #endif /* TARGET_ABI_UNICOSMK */
4767 /* Functions to save and restore alpha_return_addr_rtx. */
4769 /* Start the ball rolling with RETURN_ADDR_RTX. */
4772 alpha_return_addr (count
, frame
)
4774 rtx frame ATTRIBUTE_UNUSED
;
4779 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4782 /* Return or create a pseudo containing the gp value for the current
4783 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4786 alpha_gp_save_rtx ()
4788 return get_hard_reg_initial_val (DImode
, 29);
4792 alpha_ra_ever_killed ()
4796 #ifdef ASM_OUTPUT_MI_THUNK
4797 if (current_function_is_thunk
)
4800 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4801 return regs_ever_live
[REG_RA
];
4803 push_topmost_sequence ();
4805 pop_topmost_sequence ();
4807 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4811 /* Return the trap mode suffix applicable to the current
4812 instruction, or NULL. */
4815 get_trap_mode_suffix ()
4817 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4821 case TRAP_SUFFIX_NONE
:
4824 case TRAP_SUFFIX_SU
:
4825 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4829 case TRAP_SUFFIX_SUI
:
4830 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4834 case TRAP_SUFFIX_V_SV
:
4842 case ALPHA_FPTM_SUI
:
4847 case TRAP_SUFFIX_V_SV_SVI
:
4856 case ALPHA_FPTM_SUI
:
4861 case TRAP_SUFFIX_U_SU_SUI
:
4870 case ALPHA_FPTM_SUI
:
4878 /* Return the rounding mode suffix applicable to the current
4879 instruction, or NULL. */
4882 get_round_mode_suffix ()
4884 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
4888 case ROUND_SUFFIX_NONE
:
4890 case ROUND_SUFFIX_NORMAL
:
4893 case ALPHA_FPRM_NORM
:
4895 case ALPHA_FPRM_MINF
:
4897 case ALPHA_FPRM_CHOP
:
4899 case ALPHA_FPRM_DYN
:
4904 case ROUND_SUFFIX_C
:
4910 /* Print an operand. Recognize special options, documented below. */
4913 print_operand (file
, x
, code
)
4923 /* Print the assembler name of the current function. */
4924 assemble_name (file
, alpha_fnname
);
4929 const char *trap
= get_trap_mode_suffix ();
4930 const char *round
= get_round_mode_suffix ();
4933 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
4934 (trap
? trap
: ""), (round
? round
: ""));
4939 /* Generates single precision instruction suffix. */
4940 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
4944 /* Generates double precision instruction suffix. */
4945 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
4949 if (alpha_this_literal_sequence_number
== 0)
4950 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
4951 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
4955 if (alpha_this_gpdisp_sequence_number
== 0)
4956 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
4957 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
4961 if (GET_CODE (x
) == HIGH
)
4962 output_addr_const (file
, XEXP (x
, 0));
4964 output_operand_lossage ("invalid %%H value");
4968 /* If this operand is the constant zero, write it as "$31". */
4969 if (GET_CODE (x
) == REG
)
4970 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
4971 else if (x
== CONST0_RTX (GET_MODE (x
)))
4972 fprintf (file
, "$31");
4974 output_operand_lossage ("invalid %%r value");
4978 /* Similar, but for floating-point. */
4979 if (GET_CODE (x
) == REG
)
4980 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
4981 else if (x
== CONST0_RTX (GET_MODE (x
)))
4982 fprintf (file
, "$f31");
4984 output_operand_lossage ("invalid %%R value");
4988 /* Write the 1's complement of a constant. */
4989 if (GET_CODE (x
) != CONST_INT
)
4990 output_operand_lossage ("invalid %%N value");
4992 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
4996 /* Write 1 << C, for a constant C. */
4997 if (GET_CODE (x
) != CONST_INT
)
4998 output_operand_lossage ("invalid %%P value");
5000 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5004 /* Write the high-order 16 bits of a constant, sign-extended. */
5005 if (GET_CODE (x
) != CONST_INT
)
5006 output_operand_lossage ("invalid %%h value");
5008 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5012 /* Write the low-order 16 bits of a constant, sign-extended. */
5013 if (GET_CODE (x
) != CONST_INT
)
5014 output_operand_lossage ("invalid %%L value");
5016 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5017 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5021 /* Write mask for ZAP insn. */
5022 if (GET_CODE (x
) == CONST_DOUBLE
)
5024 HOST_WIDE_INT mask
= 0;
5025 HOST_WIDE_INT value
;
5027 value
= CONST_DOUBLE_LOW (x
);
5028 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5033 value
= CONST_DOUBLE_HIGH (x
);
5034 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5037 mask
|= (1 << (i
+ sizeof (int)));
5039 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5042 else if (GET_CODE (x
) == CONST_INT
)
5044 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5046 for (i
= 0; i
< 8; i
++, value
>>= 8)
5050 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5053 output_operand_lossage ("invalid %%m value");
5057 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5058 if (GET_CODE (x
) != CONST_INT
5059 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5060 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5061 output_operand_lossage ("invalid %%M value");
5063 fprintf (file
, "%s",
5064 (INTVAL (x
) == 8 ? "b"
5065 : INTVAL (x
) == 16 ? "w"
5066 : INTVAL (x
) == 32 ? "l"
5071 /* Similar, except do it from the mask. */
5072 if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xff)
5073 fprintf (file
, "b");
5074 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xffff)
5075 fprintf (file
, "w");
5076 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xffffffff)
5077 fprintf (file
, "l");
5078 #if HOST_BITS_PER_WIDE_INT == 32
5079 else if (GET_CODE (x
) == CONST_DOUBLE
5080 && CONST_DOUBLE_HIGH (x
) == 0
5081 && CONST_DOUBLE_LOW (x
) == -1)
5082 fprintf (file
, "l");
5083 else if (GET_CODE (x
) == CONST_DOUBLE
5084 && CONST_DOUBLE_HIGH (x
) == -1
5085 && CONST_DOUBLE_LOW (x
) == -1)
5086 fprintf (file
, "q");
5088 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == -1)
5089 fprintf (file
, "q");
5090 else if (GET_CODE (x
) == CONST_DOUBLE
5091 && CONST_DOUBLE_HIGH (x
) == 0
5092 && CONST_DOUBLE_LOW (x
) == -1)
5093 fprintf (file
, "q");
5096 output_operand_lossage ("invalid %%U value");
5100 /* Write the constant value divided by 8 for little-endian mode or
5101 (56 - value) / 8 for big-endian mode. */
5103 if (GET_CODE (x
) != CONST_INT
5104 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5107 || (INTVAL (x
) & 7) != 0)
5108 output_operand_lossage ("invalid %%s value");
5110 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5112 ? (56 - INTVAL (x
)) / 8
5117 /* Same, except compute (64 - c) / 8 */
5119 if (GET_CODE (x
) != CONST_INT
5120 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5121 && (INTVAL (x
) & 7) != 8)
5122 output_operand_lossage ("invalid %%s value");
5124 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5129 /* On Unicos/Mk systems: use a DEX expression if the symbol
5130 clashes with a register name. */
5131 int dex
= unicosmk_need_dex (x
);
5133 fprintf (file
, "DEX(%d)", dex
);
5135 output_addr_const (file
, x
);
5139 case 'C': case 'D': case 'c': case 'd':
5140 /* Write out comparison name. */
5142 enum rtx_code c
= GET_CODE (x
);
5144 if (GET_RTX_CLASS (c
) != '<')
5145 output_operand_lossage ("invalid %%C value");
5147 else if (code
== 'D')
5148 c
= reverse_condition (c
);
5149 else if (code
== 'c')
5150 c
= swap_condition (c
);
5151 else if (code
== 'd')
5152 c
= swap_condition (reverse_condition (c
));
5155 fprintf (file
, "ule");
5157 fprintf (file
, "ult");
5158 else if (c
== UNORDERED
)
5159 fprintf (file
, "un");
5161 fprintf (file
, "%s", GET_RTX_NAME (c
));
5166 /* Write the divide or modulus operator. */
5167 switch (GET_CODE (x
))
5170 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5173 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5176 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5179 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5182 output_operand_lossage ("invalid %%E value");
5188 /* Write "_u" for unaligned access. */
5189 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
5190 fprintf (file
, "_u");
5194 if (GET_CODE (x
) == REG
)
5195 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5196 else if (GET_CODE (x
) == MEM
)
5197 output_address (XEXP (x
, 0));
5199 output_addr_const (file
, x
);
5203 output_operand_lossage ("invalid %%xn code");
5208 print_operand_address (file
, addr
)
5213 HOST_WIDE_INT offset
= 0;
5215 if (GET_CODE (addr
) == AND
)
5216 addr
= XEXP (addr
, 0);
5218 if (GET_CODE (addr
) == PLUS
5219 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
5221 offset
= INTVAL (XEXP (addr
, 1));
5222 addr
= XEXP (addr
, 0);
5225 if (GET_CODE (addr
) == LO_SUM
)
5227 output_addr_const (file
, XEXP (addr
, 1));
5231 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
5234 addr
= XEXP (addr
, 0);
5235 if (GET_CODE (addr
) == REG
)
5236 basereg
= REGNO (addr
);
5237 else if (GET_CODE (addr
) == SUBREG
5238 && GET_CODE (SUBREG_REG (addr
)) == REG
)
5239 basereg
= subreg_regno (addr
);
5243 fprintf (file
, "($%d)\t\t!%s", basereg
,
5244 (basereg
== 29 ? "gprel" : "gprellow"));
5248 if (GET_CODE (addr
) == REG
)
5249 basereg
= REGNO (addr
);
5250 else if (GET_CODE (addr
) == SUBREG
5251 && GET_CODE (SUBREG_REG (addr
)) == REG
)
5252 basereg
= subreg_regno (addr
);
5253 else if (GET_CODE (addr
) == CONST_INT
)
5254 offset
= INTVAL (addr
);
5258 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
5259 fprintf (file
, "($%d)", basereg
);
5262 /* Emit RTL insns to initialize the variable parts of a trampoline at
5263 TRAMP. FNADDR is an RTX for the address of the function's pure
5264 code. CXT is an RTX for the static chain value for the function.
5266 The three offset parameters are for the individual template's
5267 layout. A JMPOFS < 0 indicates that the trampoline does not
5268 contain instructions at all.
5270 We assume here that a function will be called many more times than
5271 its address is taken (e.g., it might be passed to qsort), so we
5272 take the trouble to initialize the "hint" field in the JMP insn.
5273 Note that the hint field is PC (new) + 4 * bits 13:0. */
5276 alpha_initialize_trampoline (tramp
, fnaddr
, cxt
, fnofs
, cxtofs
, jmpofs
)
5277 rtx tramp
, fnaddr
, cxt
;
5278 int fnofs
, cxtofs
, jmpofs
;
5280 rtx temp
, temp1
, addr
;
5281 /* VMS really uses DImode pointers in memory at this point. */
5282 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
5284 #ifdef POINTERS_EXTEND_UNSIGNED
5285 fnaddr
= convert_memory_address (mode
, fnaddr
);
5286 cxt
= convert_memory_address (mode
, cxt
);
5289 /* Store function address and CXT. */
5290 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
5291 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
5292 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
5293 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
5295 /* This has been disabled since the hint only has a 32k range, and in
5296 no existing OS is the stack within 32k of the text segment. */
5297 if (0 && jmpofs
>= 0)
5299 /* Compute hint value. */
5300 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
5301 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
5303 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
5304 build_int_2 (2, 0), NULL_RTX
, 1);
5305 temp
= expand_and (gen_lowpart (SImode
, temp
), GEN_INT (0x3fff), 0);
5307 /* Merge in the hint. */
5308 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
5309 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
5310 temp1
= expand_and (temp1
, GEN_INT (0xffffc000), NULL_RTX
);
5311 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
5313 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
5316 #ifdef TRANSFER_FROM_TRAMPOLINE
5317 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
5318 0, VOIDmode
, 1, addr
, Pmode
);
5322 emit_insn (gen_imb ());
5325 /* Determine where to put an argument to a function.
5326 Value is zero to push the argument on the stack,
5327 or a hard register in which to store the argument.
5329 MODE is the argument's machine mode.
5330 TYPE is the data type of the argument (as a tree).
5331 This is null for libcalls where that information may
5333 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5334 the preceding args and about the function being called.
5335 NAMED is nonzero if this argument is a named parameter
5336 (otherwise it is an extra parameter matching an ellipsis).
5338 On Alpha the first 6 words of args are normally in registers
5339 and the rest are pushed. */
5342 function_arg (cum
, mode
, type
, named
)
5343 CUMULATIVE_ARGS cum
;
5344 enum machine_mode mode
;
5346 int named ATTRIBUTE_UNUSED
;
5351 /* Set up defaults for FP operands passed in FP registers, and
5352 integral operands passed in integer registers. */
5354 && (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5355 || GET_MODE_CLASS (mode
) == MODE_FLOAT
))
5360 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5361 the three platforms, so we can't avoid conditional compilation. */
5362 #if TARGET_ABI_OPEN_VMS
5364 if (mode
== VOIDmode
)
5365 return alpha_arg_info_reg_val (cum
);
5367 num_args
= cum
.num_args
;
5368 if (num_args
>= 6 || MUST_PASS_IN_STACK (mode
, type
))
5372 #if TARGET_ABI_UNICOSMK
5376 /* If this is the last argument, generate the call info word (CIW). */
5377 /* ??? We don't include the caller's line number in the CIW because
5378 I don't know how to determine it if debug infos are turned off. */
5379 if (mode
== VOIDmode
)
5388 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
5389 if (cum
.reg_args_type
[i
])
5390 lo
|= (1 << (7 - i
));
5392 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
5395 lo
|= cum
.num_reg_words
;
5397 #if HOST_BITS_PER_WIDE_INT == 32
5398 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
5400 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
5401 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
5404 ciw
= immed_double_const (lo
, hi
, DImode
);
5406 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
5407 UNSPEC_UMK_LOAD_CIW
);
5410 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
5411 num_args
= cum
.num_reg_words
;
5412 if (MUST_PASS_IN_STACK (mode
, type
)
5413 || cum
.num_reg_words
+ size
> 6 || cum
.force_stack
)
5415 else if (type
&& TYPE_MODE (type
) == BLKmode
)
5419 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
5420 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
5422 /* The argument fits in two registers. Note that we still need to
5423 reserve a register for empty structures. */
5427 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
5430 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
5431 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
5432 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
5442 /* VOID is passed as a special flag for "last argument". */
5443 if (type
== void_type_node
)
5445 else if (MUST_PASS_IN_STACK (mode
, type
))
5447 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum
, mode
, type
, named
))
5450 #endif /* TARGET_ABI_UNICOSMK */
5451 #endif /* TARGET_ABI_OPEN_VMS */
5453 return gen_rtx_REG (mode
, num_args
+ basereg
);
5457 alpha_build_va_list ()
5459 tree base
, ofs
, record
, type_decl
;
5461 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5462 return ptr_type_node
;
5464 record
= make_lang_type (RECORD_TYPE
);
5465 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5466 TREE_CHAIN (record
) = type_decl
;
5467 TYPE_NAME (record
) = type_decl
;
5469 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5471 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
5473 DECL_FIELD_CONTEXT (ofs
) = record
;
5475 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
5477 DECL_FIELD_CONTEXT (base
) = record
;
5478 TREE_CHAIN (base
) = ofs
;
5480 TYPE_FIELDS (record
) = base
;
5481 layout_type (record
);
5487 alpha_va_start (stdarg_p
, valist
, nextarg
)
5490 rtx nextarg ATTRIBUTE_UNUSED
;
5492 HOST_WIDE_INT offset
;
5493 tree t
, offset_field
, base_field
;
5495 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
5498 if (TARGET_ABI_UNICOSMK
)
5499 std_expand_builtin_va_start (stdarg_p
, valist
, nextarg
);
5501 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
5502 up by 48, storing fp arg registers in the first 48 bytes, and the
5503 integer arg registers in the next 48 bytes. This is only done,
5504 however, if any integer registers need to be stored.
5506 If no integer registers need be stored, then we must subtract 48
5507 in order to account for the integer arg registers which are counted
5508 in argsize above, but which are not actually stored on the stack. */
5510 if (NUM_ARGS
<= 5 + stdarg_p
)
5511 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
5513 offset
= -6 * UNITS_PER_WORD
;
5515 if (TARGET_ABI_OPEN_VMS
)
5517 nextarg
= plus_constant (nextarg
, offset
);
5518 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
5519 t
= build (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
5520 make_tree (ptr_type_node
, nextarg
));
5521 TREE_SIDE_EFFECTS (t
) = 1;
5523 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5527 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
5528 offset_field
= TREE_CHAIN (base_field
);
5530 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
5531 valist
, base_field
);
5532 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
5533 valist
, offset_field
);
5535 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
5536 t
= build (PLUS_EXPR
, ptr_type_node
, t
, build_int_2 (offset
, 0));
5537 t
= build (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
5538 TREE_SIDE_EFFECTS (t
) = 1;
5539 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5541 t
= build_int_2 (NUM_ARGS
* UNITS_PER_WORD
, 0);
5542 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
5543 TREE_SIDE_EFFECTS (t
) = 1;
5544 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5549 alpha_va_arg (valist
, type
)
5552 HOST_WIDE_INT tsize
;
5555 tree offset_field
, base_field
, addr_tree
, addend
;
5556 tree wide_type
, wide_ofs
;
5559 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5560 return std_expand_builtin_va_arg (valist
, type
);
5562 tsize
= ((TREE_INT_CST_LOW (TYPE_SIZE (type
)) / BITS_PER_UNIT
+ 7) / 8) * 8;
5564 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
5565 offset_field
= TREE_CHAIN (base_field
);
5567 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
5568 valist
, base_field
);
5569 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
5570 valist
, offset_field
);
5572 wide_type
= make_signed_type (64);
5573 wide_ofs
= save_expr (build1 (CONVERT_EXPR
, wide_type
, offset_field
));
5577 if (TYPE_MODE (type
) == TFmode
|| TYPE_MODE (type
) == TCmode
)
5580 tsize
= UNITS_PER_WORD
;
5582 else if (FLOAT_TYPE_P (type
))
5584 tree fpaddend
, cond
;
5586 fpaddend
= fold (build (PLUS_EXPR
, TREE_TYPE (addend
),
5587 addend
, build_int_2 (-6*8, 0)));
5589 cond
= fold (build (LT_EXPR
, integer_type_node
,
5590 wide_ofs
, build_int_2 (6*8, 0)));
5592 addend
= fold (build (COND_EXPR
, TREE_TYPE (addend
), cond
,
5596 addr_tree
= build (PLUS_EXPR
, TREE_TYPE (base_field
),
5597 base_field
, addend
);
5599 addr
= expand_expr (addr_tree
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
5600 addr
= copy_to_reg (addr
);
5602 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
5603 build (PLUS_EXPR
, TREE_TYPE (offset_field
),
5604 offset_field
, build_int_2 (tsize
, 0)));
5605 TREE_SIDE_EFFECTS (t
) = 1;
5606 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5610 addr
= force_reg (Pmode
, addr
);
5611 addr
= gen_rtx_MEM (Pmode
, addr
);
5617 /* This page contains routines that are used to determine what the function
5618 prologue and epilogue code will do and write them out. */
5620 /* Compute the size of the save area in the stack. */
5622 /* These variables are used for communication between the following functions.
5623 They indicate various things about the current function being compiled
5624 that are used to tell what kind of prologue, epilogue and procedure
5625 descriptior to generate. */
5627 /* Nonzero if we need a stack procedure. */
5628 static int alpha_is_stack_procedure
;
5630 /* Register number (either FP or SP) that is used to unwind the frame. */
5631 static int vms_unwind_regno
;
5633 /* Register number used to save FP. We need not have one for RA since
5634 we don't modify it for register procedures. This is only defined
5635 for register frame procedures. */
5636 static int vms_save_fp_regno
;
5638 /* Register number used to reference objects off our PV. */
5639 static int vms_base_regno
;
5641 /* Compute register masks for saved registers. */
5644 alpha_sa_mask (imaskP
, fmaskP
)
5645 unsigned long *imaskP
;
5646 unsigned long *fmaskP
;
5648 unsigned long imask
= 0;
5649 unsigned long fmask
= 0;
5652 #ifdef ASM_OUTPUT_MI_THUNK
5653 if (!current_function_is_thunk
)
5656 if (TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
5657 imask
|= (1L << HARD_FRAME_POINTER_REGNUM
);
5659 /* One for every register we have to save. */
5660 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5661 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5662 && regs_ever_live
[i
] && i
!= REG_RA
5663 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
5668 fmask
|= (1L << (i
- 32));
5671 /* We need to restore these for the handler. */
5672 if (current_function_calls_eh_return
)
5676 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
5677 if (regno
== INVALID_REGNUM
)
5679 imask
|= 1L << regno
;
5683 if (!TARGET_ABI_UNICOSMK
)
5685 /* If any register spilled, then spill the return address also. */
5686 /* ??? This is required by the Digital stack unwind specification
5687 and isn't needed if we're doing Dwarf2 unwinding. */
5688 if (imask
|| fmask
|| alpha_ra_ever_killed ())
5689 imask
|= (1L << REG_RA
);
5703 #ifdef ASM_OUTPUT_MI_THUNK
5704 if (current_function_is_thunk
)
5709 if (TARGET_ABI_UNICOSMK
)
5711 for (i
= 9; i
< 15 && sa_size
== 0; i
++)
5712 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5713 && regs_ever_live
[i
])
5715 for (i
= 32 + 2; i
< 32 + 10 && sa_size
== 0; i
++)
5716 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5717 && regs_ever_live
[i
])
5722 /* One for every register we have to save. */
5723 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5724 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5725 && regs_ever_live
[i
] && i
!= REG_RA
)
5730 if (TARGET_ABI_UNICOSMK
)
5732 /* We might not need to generate a frame if we don't make any calls
5733 (including calls to __T3E_MISMATCH if this is a vararg function),
5734 don't have any local variables which require stack slots, don't
5735 use alloca and have not determined that we need a frame for other
5738 alpha_is_stack_procedure
= sa_size
!= 0
5739 || alpha_ra_ever_killed ()
5740 || get_frame_size() != 0
5741 || current_function_outgoing_args_size
5742 || current_function_varargs
5743 || current_function_stdarg
5744 || current_function_calls_alloca
5745 || frame_pointer_needed
;
5747 /* Always reserve space for saving callee-saved registers if we
5748 need a frame as required by the calling convention. */
5749 if (alpha_is_stack_procedure
)
5752 else if (TARGET_ABI_OPEN_VMS
)
5754 /* Start by assuming we can use a register procedure if we don't
5755 make any calls (REG_RA not used) or need to save any
5756 registers and a stack procedure if we do. */
5757 alpha_is_stack_procedure
= sa_size
!= 0 || alpha_ra_ever_killed ();
5759 /* Decide whether to refer to objects off our PV via FP or PV.
5760 If we need FP for something else or if we receive a nonlocal
5761 goto (which expects PV to contain the value), we must use PV.
5762 Otherwise, start by assuming we can use FP. */
5763 vms_base_regno
= (frame_pointer_needed
5764 || current_function_has_nonlocal_label
5765 || alpha_is_stack_procedure
5766 || current_function_outgoing_args_size
5767 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
);
5769 /* If we want to copy PV into FP, we need to find some register
5770 in which to save FP. */
5772 vms_save_fp_regno
= -1;
5773 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
5774 for (i
= 0; i
< 32; i
++)
5775 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
5776 vms_save_fp_regno
= i
;
5778 if (vms_save_fp_regno
== -1)
5779 vms_base_regno
= REG_PV
, alpha_is_stack_procedure
= 1;
5781 /* Stack unwinding should be done via FP unless we use it for PV. */
5782 vms_unwind_regno
= (vms_base_regno
== REG_PV
5783 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
5785 /* If this is a stack procedure, allow space for saving FP and RA. */
5786 if (alpha_is_stack_procedure
)
5791 /* If some registers were saved but not RA, RA must also be saved,
5792 so leave space for it. */
5793 if (!TARGET_ABI_UNICOSMK
&& (sa_size
!= 0 || alpha_ra_ever_killed ()))
5796 /* Our size must be even (multiple of 16 bytes). */
5805 alpha_pv_save_size ()
5808 return alpha_is_stack_procedure
? 8 : 0;
5815 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
5818 #if TARGET_ABI_OPEN_VMS
5820 const struct attribute_spec vms_attribute_table
[] =
5822 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
5823 { "overlaid", 0, 0, true, false, false, NULL
},
5824 { "global", 0, 0, true, false, false, NULL
},
5825 { "initialize", 0, 0, true, false, false, NULL
},
5826 { NULL
, 0, 0, false, false, false, NULL
}
5832 find_lo_sum (px
, data
)
5834 void *data ATTRIBUTE_UNUSED
;
5836 return GET_CODE (*px
) == LO_SUM
;
5840 alpha_does_function_need_gp ()
5844 /* The GP being variable is an OSF abi thing. */
5845 if (! TARGET_ABI_OSF
)
5848 if (TARGET_PROFILING_NEEDS_GP
&& profile_flag
)
5851 #ifdef ASM_OUTPUT_MI_THUNK
5852 if (current_function_is_thunk
)
5856 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
5857 Even if we are a static function, we still need to do this in case
5858 our address is taken and passed to something like qsort. */
5860 push_topmost_sequence ();
5861 insn
= get_insns ();
5862 pop_topmost_sequence ();
5864 for (; insn
; insn
= NEXT_INSN (insn
))
5866 && GET_CODE (PATTERN (insn
)) != USE
5867 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
5869 enum attr_type type
= get_attr_type (insn
);
5870 if (type
== TYPE_LDSYM
|| type
== TYPE_JSR
)
5872 if (TARGET_EXPLICIT_RELOCS
5873 && for_each_rtx (&PATTERN (insn
), find_lo_sum
, NULL
) > 0)
5880 /* Write a version stamp. Don't write anything if we are running as a
5881 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
5888 alpha_write_verstamp (file
)
5889 FILE *file ATTRIBUTE_UNUSED
;
5892 fprintf (file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
5896 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
5900 set_frame_related_p ()
5902 rtx seq
= gen_sequence ();
5905 if (GET_CODE (seq
) == SEQUENCE
)
5907 int i
= XVECLEN (seq
, 0);
5909 RTX_FRAME_RELATED_P (XVECEXP (seq
, 0, i
)) = 1;
5910 return emit_insn (seq
);
5914 seq
= emit_insn (seq
);
5915 RTX_FRAME_RELATED_P (seq
) = 1;
5920 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
5922 /* Write function prologue. */
5924 /* On vms we have two kinds of functions:
5926 - stack frame (PROC_STACK)
5927 these are 'normal' functions with local vars and which are
5928 calling other functions
5929 - register frame (PROC_REGISTER)
5930 keeps all data in registers, needs no stack
5932 We must pass this to the assembler so it can generate the
5933 proper pdsc (procedure descriptor)
5934 This is done with the '.pdesc' command.
5936 On not-vms, we don't really differentiate between the two, as we can
5937 simply allocate stack without saving registers. */
5940 alpha_expand_prologue ()
5942 /* Registers to save. */
5943 unsigned long imask
= 0;
5944 unsigned long fmask
= 0;
5945 /* Stack space needed for pushing registers clobbered by us. */
5946 HOST_WIDE_INT sa_size
;
5947 /* Complete stack size needed. */
5948 HOST_WIDE_INT frame_size
;
5949 /* Offset from base reg to register save area. */
5950 HOST_WIDE_INT reg_offset
;
5954 sa_size
= alpha_sa_size ();
5956 frame_size
= get_frame_size ();
5957 if (TARGET_ABI_OPEN_VMS
)
5958 frame_size
= ALPHA_ROUND (sa_size
5959 + (alpha_is_stack_procedure
? 8 : 0)
5961 + current_function_pretend_args_size
);
5962 else if (TARGET_ABI_UNICOSMK
)
5963 /* We have to allocate space for the DSIB if we generate a frame. */
5964 frame_size
= ALPHA_ROUND (sa_size
5965 + (alpha_is_stack_procedure
? 48 : 0))
5966 + ALPHA_ROUND (frame_size
5967 + current_function_outgoing_args_size
);
5969 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
5971 + ALPHA_ROUND (frame_size
5972 + current_function_pretend_args_size
));
5974 if (TARGET_ABI_OPEN_VMS
)
5977 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
5979 alpha_sa_mask (&imask
, &fmask
);
5981 /* Emit an insn to reload GP, if needed. */
5984 alpha_function_needs_gp
= alpha_does_function_need_gp ();
5985 if (alpha_function_needs_gp
)
5986 emit_insn (gen_prologue_ldgp ());
5989 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
5990 the call to mcount ourselves, rather than having the linker do it
5991 magically in response to -pg. Since _mcount has special linkage,
5992 don't represent the call as a call. */
5993 if (TARGET_PROFILING_NEEDS_GP
&& profile_flag
)
5994 emit_insn (gen_prologue_mcount ());
5996 if (TARGET_ABI_UNICOSMK
)
5997 unicosmk_gen_dsib (&imask
);
5999 /* Adjust the stack by the frame size. If the frame size is > 4096
6000 bytes, we need to be sure we probe somewhere in the first and last
6001 4096 bytes (we can probably get away without the latter test) and
6002 every 8192 bytes in between. If the frame size is > 32768, we
6003 do this in a loop. Otherwise, we generate the explicit probe
6006 Note that we are only allowed to adjust sp once in the prologue. */
6008 if (frame_size
<= 32768)
6010 if (frame_size
> 4096)
6015 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
6018 while ((probed
+= 8192) < frame_size
);
6020 /* We only have to do this probe if we aren't saving registers. */
6021 if (sa_size
== 0 && probed
+ 4096 < frame_size
)
6022 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
6025 if (frame_size
!= 0)
6026 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
6027 GEN_INT (TARGET_ABI_UNICOSMK
6033 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
6034 number of 8192 byte blocks to probe. We then probe each block
6035 in the loop and then set SP to the proper location. If the
6036 amount remaining is > 4096, we have to do one more probe if we
6037 are not saving any registers. */
6039 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
6040 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
6041 rtx ptr
= gen_rtx_REG (DImode
, 22);
6042 rtx count
= gen_rtx_REG (DImode
, 23);
6045 emit_move_insn (count
, GEN_INT (blocks
));
6046 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
6047 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
6049 /* Because of the difficulty in emitting a new basic block this
6050 late in the compilation, generate the loop as a single insn. */
6051 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
6053 if (leftover
> 4096 && sa_size
== 0)
6055 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
6056 MEM_VOLATILE_P (last
) = 1;
6057 emit_move_insn (last
, const0_rtx
);
6060 if (TARGET_ABI_WINDOWS_NT
)
6062 /* For NT stack unwind (done by 'reverse execution'), it's
6063 not OK to take the result of a loop, even though the value
6064 is already in ptr, so we reload it via a single operation
6065 and subtract it to sp.
6067 Yes, that's correct -- we have to reload the whole constant
6068 into a temporary via ldah+lda then subtract from sp. To
6069 ensure we get ldah+lda, we use a special pattern. */
6071 HOST_WIDE_INT lo
, hi
;
6072 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
6073 hi
= frame_size
- lo
;
6075 emit_move_insn (ptr
, GEN_INT (hi
));
6076 emit_insn (gen_nt_lda (ptr
, GEN_INT (lo
)));
6077 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
6082 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
6083 GEN_INT (-leftover
)));
6086 /* This alternative is special, because the DWARF code cannot
6087 possibly intuit through the loop above. So we invent this
6088 note it looks at instead. */
6089 RTX_FRAME_RELATED_P (seq
) = 1;
6091 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
6092 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
6093 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
6094 GEN_INT (TARGET_ABI_UNICOSMK
6100 if (!TARGET_ABI_UNICOSMK
)
6102 /* Cope with very large offsets to the register save area. */
6103 sa_reg
= stack_pointer_rtx
;
6104 if (reg_offset
+ sa_size
> 0x8000)
6106 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
6109 if (low
+ sa_size
<= 0x8000)
6110 bias
= reg_offset
- low
, reg_offset
= low
;
6112 bias
= reg_offset
, reg_offset
= 0;
6114 sa_reg
= gen_rtx_REG (DImode
, 24);
6115 FRP (emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
,
6119 /* Save regs in stack order. Beginning with VMS PV. */
6120 if (TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
6122 mem
= gen_rtx_MEM (DImode
, stack_pointer_rtx
);
6123 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6124 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_PV
)));
6127 /* Save register RA next. */
6128 if (imask
& (1L << REG_RA
))
6130 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6131 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6132 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
6133 imask
&= ~(1L << REG_RA
);
6137 /* Now save any other registers required to be saved. */
6138 for (i
= 0; i
< 32; i
++)
6139 if (imask
& (1L << i
))
6141 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6142 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6143 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
6147 for (i
= 0; i
< 32; i
++)
6148 if (fmask
& (1L << i
))
6150 mem
= gen_rtx_MEM (DFmode
, plus_constant (sa_reg
, reg_offset
));
6151 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6152 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
6156 else if (TARGET_ABI_UNICOSMK
&& alpha_is_stack_procedure
)
6158 /* The standard frame on the T3E includes space for saving registers.
6159 We just have to use it. We don't have to save the return address and
6160 the old frame pointer here - they are saved in the DSIB. */
6163 for (i
= 9; i
< 15; i
++)
6164 if (imask
& (1L << i
))
6166 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
6168 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6169 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
6172 for (i
= 2; i
< 10; i
++)
6173 if (fmask
& (1L << i
))
6175 mem
= gen_rtx_MEM (DFmode
, plus_constant (hard_frame_pointer_rtx
,
6177 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6178 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
6183 if (TARGET_ABI_OPEN_VMS
)
6185 if (!alpha_is_stack_procedure
)
6186 /* Register frame procedures save the fp. */
6187 /* ??? Ought to have a dwarf2 save for this. */
6188 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
6189 hard_frame_pointer_rtx
);
6191 if (vms_base_regno
!= REG_PV
)
6192 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
6193 gen_rtx_REG (DImode
, REG_PV
)));
6195 if (vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
6196 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6198 /* If we have to allocate space for outgoing args, do it now. */
6199 if (current_function_outgoing_args_size
!= 0)
6202 plus_constant (hard_frame_pointer_rtx
,
6204 (current_function_outgoing_args_size
)))));
6206 else if (!TARGET_ABI_UNICOSMK
)
6208 /* If we need a frame pointer, set it from the stack pointer. */
6209 if (frame_pointer_needed
)
6211 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
6212 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6214 /* This must always be the last instruction in the
6215 prologue, thus we emit a special move + clobber. */
6216 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
6217 stack_pointer_rtx
, sa_reg
)));
6221 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
6222 the prologue, for exception handling reasons, we cannot do this for
6223 any insn that might fault. We could prevent this for mems with a
6224 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
6225 have to prevent all such scheduling with a blockage.
6227 Linux, on the other hand, never bothered to implement OSF/1's
6228 exception handling, and so doesn't care about such things. Anyone
6229 planning to use dwarf2 frame-unwind info can also omit the blockage. */
6231 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
6232 emit_insn (gen_blockage ());
6235 /* Output the textual info surrounding the prologue. */
6238 alpha_start_function (file
, fnname
, decl
)
6241 tree decl ATTRIBUTE_UNUSED
;
6243 unsigned long imask
= 0;
6244 unsigned long fmask
= 0;
6245 /* Stack space needed for pushing registers clobbered by us. */
6246 HOST_WIDE_INT sa_size
;
6247 /* Complete stack size needed. */
6248 HOST_WIDE_INT frame_size
;
6249 /* Offset from base reg to register save area. */
6250 HOST_WIDE_INT reg_offset
;
6251 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
6254 /* Don't emit an extern directive for functions defined in the same file. */
6255 if (TARGET_ABI_UNICOSMK
)
6258 name_tree
= get_identifier (fnname
);
6259 TREE_ASM_WRITTEN (name_tree
) = 1;
6262 alpha_fnname
= fnname
;
6263 sa_size
= alpha_sa_size ();
6265 frame_size
= get_frame_size ();
6266 if (TARGET_ABI_OPEN_VMS
)
6267 frame_size
= ALPHA_ROUND (sa_size
6268 + (alpha_is_stack_procedure
? 8 : 0)
6270 + current_function_pretend_args_size
);
6271 else if (TARGET_ABI_UNICOSMK
)
6272 frame_size
= ALPHA_ROUND (sa_size
6273 + (alpha_is_stack_procedure
? 48 : 0))
6274 + ALPHA_ROUND (frame_size
6275 + current_function_outgoing_args_size
);
6277 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6279 + ALPHA_ROUND (frame_size
6280 + current_function_pretend_args_size
));
6282 if (TARGET_ABI_OPEN_VMS
)
6285 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6287 alpha_sa_mask (&imask
, &fmask
);
6289 /* Ecoff can handle multiple .file directives, so put out file and lineno.
6290 We have to do that before the .ent directive as we cannot switch
6291 files within procedures with native ecoff because line numbers are
6292 linked to procedure descriptors.
6293 Outputting the lineno helps debugging of one line functions as they
6294 would otherwise get no line number at all. Please note that we would
6295 like to put out last_linenum from final.c, but it is not accessible. */
6297 if (write_symbols
== SDB_DEBUG
)
6299 #ifdef ASM_OUTPUT_SOURCE_FILENAME
6300 ASM_OUTPUT_SOURCE_FILENAME (file
,
6301 DECL_SOURCE_FILE (current_function_decl
));
6303 #ifdef ASM_OUTPUT_SOURCE_LINE
6304 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
6305 ASM_OUTPUT_SOURCE_LINE (file
,
6306 DECL_SOURCE_LINE (current_function_decl
));
6310 /* Issue function start and label. */
6311 if (TARGET_ABI_OPEN_VMS
6312 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
6314 fputs ("\t.ent ", file
);
6315 assemble_name (file
, fnname
);
6318 /* If the function needs GP, we'll write the "..ng" label there.
6319 Otherwise, do it here. */
6320 if (TARGET_ABI_OSF
&& ! alpha_function_needs_gp
)
6323 assemble_name (file
, fnname
);
6324 fputs ("..ng:\n", file
);
6328 strcpy (entry_label
, fnname
);
6329 if (TARGET_ABI_OPEN_VMS
)
6330 strcat (entry_label
, "..en");
6332 /* For public functions, the label must be globalized by appending an
6333 additional colon. */
6334 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
6335 strcat (entry_label
, ":");
6337 ASM_OUTPUT_LABEL (file
, entry_label
);
6338 inside_function
= TRUE
;
6340 if (TARGET_ABI_OPEN_VMS
)
6341 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
6343 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
6344 && !flag_inhibit_size_directive
)
6346 /* Set flags in procedure descriptor to request IEEE-conformant
6347 math-library routines. The value we set it to is PDSC_EXC_IEEE
6348 (/usr/include/pdsc.h). */
6349 fputs ("\t.eflag 48\n", file
);
6352 /* Set up offsets to alpha virtual arg/local debugging pointer. */
6353 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
6354 alpha_arg_offset
= -frame_size
+ 48;
6356 /* Describe our frame. If the frame size is larger than an integer,
6357 print it as zero to avoid an assembler error. We won't be
6358 properly describing such a frame, but that's the best we can do. */
6359 if (TARGET_ABI_UNICOSMK
)
6361 else if (TARGET_ABI_OPEN_VMS
)
6363 fprintf (file
, "\t.frame $%d,", vms_unwind_regno
);
6364 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6365 frame_size
>= ((HOST_WIDE_INT
) 1 << 31) ? 0 : frame_size
);
6366 fputs (",$26,", file
);
6367 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, reg_offset
);
6370 else if (!flag_inhibit_size_directive
)
6372 fprintf (file
, "\t.frame $%d,",
6373 (frame_pointer_needed
6374 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
));
6375 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6376 frame_size
>= (1l << 31) ? 0 : frame_size
);
6377 fprintf (file
, ",$26,%d\n", current_function_pretend_args_size
);
6380 /* Describe which registers were spilled. */
6381 if (TARGET_ABI_UNICOSMK
)
6383 else if (TARGET_ABI_OPEN_VMS
)
6386 /* ??? Does VMS care if mask contains ra? The old code didn't
6387 set it, so I don't here. */
6388 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1L << REG_RA
));
6390 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
6391 if (!alpha_is_stack_procedure
)
6392 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
6394 else if (!flag_inhibit_size_directive
)
6398 fprintf (file
, "\t.mask 0x%lx,", imask
);
6399 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6400 frame_size
>= (1l << 31) ? 0 : reg_offset
- frame_size
);
6403 for (i
= 0; i
< 32; ++i
)
6404 if (imask
& (1L << i
))
6410 fprintf (file
, "\t.fmask 0x%lx,", fmask
);
6411 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6412 frame_size
>= (1l << 31) ? 0 : reg_offset
- frame_size
);
6417 #if TARGET_ABI_OPEN_VMS
6418 /* Ifdef'ed cause readonly_section and link_section are only
6420 readonly_section ();
6421 fprintf (file
, "\t.align 3\n");
6422 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
6423 fputs ("\t.ascii \"", file
);
6424 assemble_name (file
, fnname
);
6425 fputs ("\\0\"\n", file
);
6428 fprintf (file
, "\t.align 3\n");
6429 fputs ("\t.name ", file
);
6430 assemble_name (file
, fnname
);
6431 fputs ("..na\n", file
);
6432 ASM_OUTPUT_LABEL (file
, fnname
);
6433 fprintf (file
, "\t.pdesc ");
6434 assemble_name (file
, fnname
);
6435 fprintf (file
, "..en,%s\n", alpha_is_stack_procedure
? "stack" : "reg");
6436 alpha_need_linkage (fnname
, 1);
6441 /* Emit the .prologue note at the scheduled end of the prologue. */
6444 alpha_output_function_end_prologue (file
)
6447 if (TARGET_ABI_UNICOSMK
)
6449 else if (TARGET_ABI_OPEN_VMS
)
6450 fputs ("\t.prologue\n", file
);
6451 else if (TARGET_ABI_WINDOWS_NT
)
6452 fputs ("\t.prologue 0\n", file
);
6453 else if (!flag_inhibit_size_directive
)
6454 fprintf (file
, "\t.prologue %d\n", alpha_function_needs_gp
);
6457 /* Write function epilogue. */
6459 /* ??? At some point we will want to support full unwind, and so will
6460 need to mark the epilogue as well. At the moment, we just confuse
6463 #define FRP(exp) exp
6466 alpha_expand_epilogue ()
6468 /* Registers to save. */
6469 unsigned long imask
= 0;
6470 unsigned long fmask
= 0;
6471 /* Stack space needed for pushing registers clobbered by us. */
6472 HOST_WIDE_INT sa_size
;
6473 /* Complete stack size needed. */
6474 HOST_WIDE_INT frame_size
;
6475 /* Offset from base reg to register save area. */
6476 HOST_WIDE_INT reg_offset
;
6477 int fp_is_frame_pointer
, fp_offset
;
6478 rtx sa_reg
, sa_reg_exp
= NULL
;
6479 rtx sp_adj1
, sp_adj2
, mem
;
6483 sa_size
= alpha_sa_size ();
6485 frame_size
= get_frame_size ();
6486 if (TARGET_ABI_OPEN_VMS
)
6487 frame_size
= ALPHA_ROUND (sa_size
6488 + (alpha_is_stack_procedure
? 8 : 0)
6490 + current_function_pretend_args_size
);
6491 else if (TARGET_ABI_UNICOSMK
)
6492 frame_size
= ALPHA_ROUND (sa_size
6493 + (alpha_is_stack_procedure
? 48 : 0))
6494 + ALPHA_ROUND (frame_size
6495 + current_function_outgoing_args_size
);
6497 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6499 + ALPHA_ROUND (frame_size
6500 + current_function_pretend_args_size
));
6502 if (TARGET_ABI_OPEN_VMS
)
6505 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6507 alpha_sa_mask (&imask
, &fmask
);
6509 fp_is_frame_pointer
= ((TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
6510 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
6512 sa_reg
= stack_pointer_rtx
;
6514 if (current_function_calls_eh_return
)
6515 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
6519 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
6521 /* If we have a frame pointer, restore SP from it. */
6522 if ((TARGET_ABI_OPEN_VMS
6523 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
6524 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
6525 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
6527 /* Cope with very large offsets to the register save area. */
6528 if (reg_offset
+ sa_size
> 0x8000)
6530 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
6533 if (low
+ sa_size
<= 0x8000)
6534 bias
= reg_offset
- low
, reg_offset
= low
;
6536 bias
= reg_offset
, reg_offset
= 0;
6538 sa_reg
= gen_rtx_REG (DImode
, 22);
6539 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
6541 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
6544 /* Restore registers in order, excepting a true frame pointer. */
6546 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6548 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6549 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
6552 imask
&= ~(1L << REG_RA
);
6554 for (i
= 0; i
< 32; ++i
)
6555 if (imask
& (1L << i
))
6557 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
6558 fp_offset
= reg_offset
;
6561 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
6562 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6563 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
6568 for (i
= 0; i
< 32; ++i
)
6569 if (fmask
& (1L << i
))
6571 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
6572 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6573 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
6577 else if (TARGET_ABI_UNICOSMK
&& alpha_is_stack_procedure
)
6579 /* Restore callee-saved general-purpose registers. */
6583 for (i
= 9; i
< 15; i
++)
6584 if (imask
& (1L << i
))
6586 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
6588 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6589 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
6593 for (i
= 2; i
< 10; i
++)
6594 if (fmask
& (1L << i
))
6596 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
6598 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6599 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
6603 /* Restore the return address from the DSIB. */
6605 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
6606 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6607 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
6610 if (frame_size
|| eh_ofs
)
6612 sp_adj1
= stack_pointer_rtx
;
6616 sp_adj1
= gen_rtx_REG (DImode
, 23);
6617 emit_move_insn (sp_adj1
,
6618 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
6621 /* If the stack size is large, begin computation into a temporary
6622 register so as not to interfere with a potential fp restore,
6623 which must be consecutive with an SP restore. */
6624 if (frame_size
< 32768
6625 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
6626 sp_adj2
= GEN_INT (frame_size
);
6627 else if (TARGET_ABI_UNICOSMK
)
6629 sp_adj1
= gen_rtx_REG (DImode
, 23);
6630 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
6631 sp_adj2
= const0_rtx
;
6633 else if (frame_size
< 0x40007fffL
)
6635 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
6637 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
6638 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
6642 sp_adj1
= gen_rtx_REG (DImode
, 23);
6643 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
6645 sp_adj2
= GEN_INT (low
);
6649 rtx tmp
= gen_rtx_REG (DImode
, 23);
6650 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3));
6653 /* We can't drop new things to memory this late, afaik,
6654 so build it up by pieces. */
6655 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
6656 -(frame_size
< 0)));
6662 /* From now on, things must be in order. So emit blockages. */
6664 /* Restore the frame pointer. */
6665 if (TARGET_ABI_UNICOSMK
)
6667 emit_insn (gen_blockage ());
6668 mem
= gen_rtx_MEM (DImode
,
6669 plus_constant (hard_frame_pointer_rtx
, -16));
6670 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6671 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
6673 else if (fp_is_frame_pointer
)
6675 emit_insn (gen_blockage ());
6676 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
6677 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6678 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
6680 else if (TARGET_ABI_OPEN_VMS
)
6682 emit_insn (gen_blockage ());
6683 FRP (emit_move_insn (hard_frame_pointer_rtx
,
6684 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
6687 /* Restore the stack pointer. */
6688 emit_insn (gen_blockage ());
6689 if (sp_adj2
== const0_rtx
)
6690 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
6692 FRP (emit_move_insn (stack_pointer_rtx
,
6693 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
6697 if (TARGET_ABI_OPEN_VMS
&& !alpha_is_stack_procedure
)
6699 emit_insn (gen_blockage ());
6700 FRP (emit_move_insn (hard_frame_pointer_rtx
,
6701 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
6703 else if (TARGET_ABI_UNICOSMK
&& !alpha_is_stack_procedure
)
6705 /* Decrement the frame pointer if the function does not have a
6708 emit_insn (gen_blockage ());
6709 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
6710 hard_frame_pointer_rtx
, GEN_INT (-1))));
6715 /* Output the rest of the textual info surrounding the epilogue. */
6718 alpha_end_function (file
, fnname
, decl
)
6721 tree decl ATTRIBUTE_UNUSED
;
6723 /* End the function. */
6724 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
6726 fputs ("\t.end ", file
);
6727 assemble_name (file
, fnname
);
6730 inside_function
= FALSE
;
6732 /* Show that we know this function if it is called again.
6734 Don't do this for global functions in object files destined for a
6735 shared library because the function may be overridden by the application
6736 or other libraries. Similarly, don't do this for weak functions.
6738 Don't do this for functions not defined in the .text section, as
6739 otherwise it's not unlikely that the destination is out of range
6740 for a direct branch. */
6742 if (!DECL_WEAK (current_function_decl
)
6743 && (!flag_pic
|| !TREE_PUBLIC (current_function_decl
))
6744 && decl_in_text_section (current_function_decl
))
6745 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl
), 0)) = 1;
6747 /* Output jump tables and the static subroutine information block. */
6748 if (TARGET_ABI_UNICOSMK
)
6750 unicosmk_output_ssib (file
, fnname
);
6751 unicosmk_output_deferred_case_vectors (file
);
6755 /* Debugging support. */
6759 /* Count the number of sdb related labels are generated (to find block
6760 start and end boundaries). */
6762 int sdb_label_count
= 0;
6764 /* Next label # for each statement. */
6766 static int sym_lineno
= 0;
6768 /* Count the number of .file directives, so that .loc is up to date. */
6770 static int num_source_filenames
= 0;
6772 /* Name of the file containing the current function. */
6774 static const char *current_function_file
= "";
6776 /* Offsets to alpha virtual arg/local debugging pointers. */
6778 long alpha_arg_offset
;
6779 long alpha_auto_offset
;
6781 /* Emit a new filename to a stream. */
6784 alpha_output_filename (stream
, name
)
6788 static int first_time
= TRUE
;
6789 char ltext_label_name
[100];
6794 ++num_source_filenames
;
6795 current_function_file
= name
;
6796 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6797 output_quoted_string (stream
, name
);
6798 fprintf (stream
, "\n");
6799 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
6800 fprintf (stream
, "\t#@stabs\n");
6803 else if (write_symbols
== DBX_DEBUG
)
6805 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name
, "Ltext", 0);
6806 fprintf (stream
, "%s", ASM_STABS_OP
);
6807 output_quoted_string (stream
, name
);
6808 fprintf (stream
, ",%d,0,0,%s\n", N_SOL
, <ext_label_name
[1]);
6811 else if (name
!= current_function_file
6812 && strcmp (name
, current_function_file
) != 0)
6814 if (inside_function
&& ! TARGET_GAS
)
6815 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
6818 ++num_source_filenames
;
6819 current_function_file
= name
;
6820 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6823 output_quoted_string (stream
, name
);
6824 fprintf (stream
, "\n");
6828 /* Emit a linenumber to a stream. */
6831 alpha_output_lineno (stream
, line
)
6835 if (write_symbols
== DBX_DEBUG
)
6837 /* mips-tfile doesn't understand .stabd directives. */
6839 fprintf (stream
, "$LM%d:\n%s%d,0,%d,$LM%d\n",
6840 sym_lineno
, ASM_STABN_OP
, N_SLINE
, line
, sym_lineno
);
6843 fprintf (stream
, "\n\t.loc\t%d %d\n", num_source_filenames
, line
);
6846 /* Structure to show the current status of registers and memory. */
6848 struct shadow_summary
6851 unsigned int i
: 31; /* Mask of int regs */
6852 unsigned int fp
: 31; /* Mask of fp regs */
6853 unsigned int mem
: 1; /* mem == imem | fpmem */
6857 static void summarize_insn
PARAMS ((rtx
, struct shadow_summary
*, int));
6858 static void alpha_handle_trap_shadows
PARAMS ((rtx
));
6860 /* Summary the effects of expression X on the machine. Update SUM, a pointer
6861 to the summary structure. SET is nonzero if the insn is setting the
6862 object, otherwise zero. */
6865 summarize_insn (x
, sum
, set
)
6867 struct shadow_summary
*sum
;
6870 const char *format_ptr
;
6876 switch (GET_CODE (x
))
6878 /* ??? Note that this case would be incorrect if the Alpha had a
6879 ZERO_EXTRACT in SET_DEST. */
6881 summarize_insn (SET_SRC (x
), sum
, 0);
6882 summarize_insn (SET_DEST (x
), sum
, 1);
6886 summarize_insn (XEXP (x
, 0), sum
, 1);
6890 summarize_insn (XEXP (x
, 0), sum
, 0);
6894 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
6895 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
6899 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
6900 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
6904 summarize_insn (SUBREG_REG (x
), sum
, 0);
6909 int regno
= REGNO (x
);
6910 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
6912 if (regno
== 31 || regno
== 63)
6918 sum
->defd
.i
|= mask
;
6920 sum
->defd
.fp
|= mask
;
6925 sum
->used
.i
|= mask
;
6927 sum
->used
.fp
|= mask
;
6938 /* Find the regs used in memory address computation: */
6939 summarize_insn (XEXP (x
, 0), sum
, 0);
6942 case CONST_INT
: case CONST_DOUBLE
:
6943 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
6944 case SCRATCH
: case ASM_INPUT
:
6947 /* Handle common unary and binary ops for efficiency. */
6948 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
6949 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
6950 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
6951 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
6952 case NE
: case EQ
: case GE
: case GT
: case LE
:
6953 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
6954 summarize_insn (XEXP (x
, 0), sum
, 0);
6955 summarize_insn (XEXP (x
, 1), sum
, 0);
6958 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
6959 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
6960 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
6961 case SQRT
: case FFS
:
6962 summarize_insn (XEXP (x
, 0), sum
, 0);
6966 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
6967 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6968 switch (format_ptr
[i
])
6971 summarize_insn (XEXP (x
, i
), sum
, 0);
6975 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6976 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
6988 /* Ensure a sufficient number of `trapb' insns are in the code when
6989 the user requests code with a trap precision of functions or
6992 In naive mode, when the user requests a trap-precision of
6993 "instruction", a trapb is needed after every instruction that may
6994 generate a trap. This ensures that the code is resumption safe but
6997 When optimizations are turned on, we delay issuing a trapb as long
6998 as possible. In this context, a trap shadow is the sequence of
6999 instructions that starts with a (potentially) trap generating
7000 instruction and extends to the next trapb or call_pal instruction
7001 (but GCC never generates call_pal by itself). We can delay (and
7002 therefore sometimes omit) a trapb subject to the following
7005 (a) On entry to the trap shadow, if any Alpha register or memory
7006 location contains a value that is used as an operand value by some
7007 instruction in the trap shadow (live on entry), then no instruction
7008 in the trap shadow may modify the register or memory location.
7010 (b) Within the trap shadow, the computation of the base register
7011 for a memory load or store instruction may not involve using the
7012 result of an instruction that might generate an UNPREDICTABLE
7015 (c) Within the trap shadow, no register may be used more than once
7016 as a destination register. (This is to make life easier for the
7019 (d) The trap shadow may not include any branch instructions. */
7022 alpha_handle_trap_shadows (insns
)
7025 struct shadow_summary shadow
;
7026 int trap_pending
, exception_nesting
;
7030 exception_nesting
= 0;
7033 shadow
.used
.mem
= 0;
7034 shadow
.defd
= shadow
.used
;
7036 for (i
= insns
; i
; i
= NEXT_INSN (i
))
7038 if (GET_CODE (i
) == NOTE
)
7040 switch (NOTE_LINE_NUMBER (i
))
7042 case NOTE_INSN_EH_REGION_BEG
:
7043 exception_nesting
++;
7048 case NOTE_INSN_EH_REGION_END
:
7049 exception_nesting
--;
7054 case NOTE_INSN_EPILOGUE_BEG
:
7055 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
7060 else if (trap_pending
)
7062 if (alpha_tp
== ALPHA_TP_FUNC
)
7064 if (GET_CODE (i
) == JUMP_INSN
7065 && GET_CODE (PATTERN (i
)) == RETURN
)
7068 else if (alpha_tp
== ALPHA_TP_INSN
)
7072 struct shadow_summary sum
;
7077 sum
.defd
= sum
.used
;
7079 switch (GET_CODE (i
))
7082 /* Annoyingly, get_attr_trap will abort on these. */
7083 if (GET_CODE (PATTERN (i
)) == USE
7084 || GET_CODE (PATTERN (i
)) == CLOBBER
)
7087 summarize_insn (PATTERN (i
), &sum
, 0);
7089 if ((sum
.defd
.i
& shadow
.defd
.i
)
7090 || (sum
.defd
.fp
& shadow
.defd
.fp
))
7092 /* (c) would be violated */
7096 /* Combine shadow with summary of current insn: */
7097 shadow
.used
.i
|= sum
.used
.i
;
7098 shadow
.used
.fp
|= sum
.used
.fp
;
7099 shadow
.used
.mem
|= sum
.used
.mem
;
7100 shadow
.defd
.i
|= sum
.defd
.i
;
7101 shadow
.defd
.fp
|= sum
.defd
.fp
;
7102 shadow
.defd
.mem
|= sum
.defd
.mem
;
7104 if ((sum
.defd
.i
& shadow
.used
.i
)
7105 || (sum
.defd
.fp
& shadow
.used
.fp
)
7106 || (sum
.defd
.mem
& shadow
.used
.mem
))
7108 /* (a) would be violated (also takes care of (b)) */
7109 if (get_attr_trap (i
) == TRAP_YES
7110 && ((sum
.defd
.i
& sum
.used
.i
)
7111 || (sum
.defd
.fp
& sum
.used
.fp
)))
7130 n
= emit_insn_before (gen_trapb (), i
);
7131 PUT_MODE (n
, TImode
);
7132 PUT_MODE (i
, TImode
);
7136 shadow
.used
.mem
= 0;
7137 shadow
.defd
= shadow
.used
;
7142 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
7143 && GET_CODE (i
) == INSN
7144 && GET_CODE (PATTERN (i
)) != USE
7145 && GET_CODE (PATTERN (i
)) != CLOBBER
7146 && get_attr_trap (i
) == TRAP_YES
)
7148 if (optimize
&& !trap_pending
)
7149 summarize_insn (PATTERN (i
), &shadow
, 0);
7155 /* Alpha can only issue instruction groups simultaneously if they are
7156 suitibly aligned. This is very processor-specific. */
7158 enum alphaev4_pipe
{
7165 enum alphaev5_pipe
{
7176 static enum alphaev4_pipe alphaev4_insn_pipe
PARAMS ((rtx
));
7177 static enum alphaev5_pipe alphaev5_insn_pipe
PARAMS ((rtx
));
7178 static rtx alphaev4_next_group
PARAMS ((rtx
, int *, int *));
7179 static rtx alphaev5_next_group
PARAMS ((rtx
, int *, int *));
7180 static rtx alphaev4_next_nop
PARAMS ((int *));
7181 static rtx alphaev5_next_nop
PARAMS ((int *));
7183 static void alpha_align_insns
7184 PARAMS ((rtx
, unsigned int, rtx (*)(rtx
, int *, int *), rtx (*)(int *)));
7186 static enum alphaev4_pipe
7187 alphaev4_insn_pipe (insn
)
7190 if (recog_memoized (insn
) < 0)
7192 if (get_attr_length (insn
) != 4)
7195 switch (get_attr_type (insn
))
7228 static enum alphaev5_pipe
7229 alphaev5_insn_pipe (insn
)
7232 if (recog_memoized (insn
) < 0)
7234 if (get_attr_length (insn
) != 4)
7237 switch (get_attr_type (insn
))
7277 /* IN_USE is a mask of the slots currently filled within the insn group.
7278 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
7279 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
7281 LEN is, of course, the length of the group in bytes. */
7284 alphaev4_next_group (insn
, pin_use
, plen
)
7286 int *pin_use
, *plen
;
7293 || GET_CODE (PATTERN (insn
)) == CLOBBER
7294 || GET_CODE (PATTERN (insn
)) == USE
)
7299 enum alphaev4_pipe pipe
;
7301 pipe
= alphaev4_insn_pipe (insn
);
7305 /* Force complex instructions to start new groups. */
7309 /* If this is a completely unrecognized insn, its an asm.
7310 We don't know how long it is, so record length as -1 to
7311 signal a needed realignment. */
7312 if (recog_memoized (insn
) < 0)
7315 len
= get_attr_length (insn
);
7319 if (in_use
& EV4_IB0
)
7321 if (in_use
& EV4_IB1
)
7326 in_use
|= EV4_IB0
| EV4_IBX
;
7330 if (in_use
& EV4_IB0
)
7332 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
7340 if (in_use
& EV4_IB1
)
7350 /* Haifa doesn't do well scheduling branches. */
7351 if (GET_CODE (insn
) == JUMP_INSN
)
7355 insn
= next_nonnote_insn (insn
);
7357 if (!insn
|| ! INSN_P (insn
))
7360 /* Let Haifa tell us where it thinks insn group boundaries are. */
7361 if (GET_MODE (insn
) == TImode
)
7364 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
7369 insn
= next_nonnote_insn (insn
);
7377 /* IN_USE is a mask of the slots currently filled within the insn group.
7378 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
7379 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
7381 LEN is, of course, the length of the group in bytes. */
7384 alphaev5_next_group (insn
, pin_use
, plen
)
7386 int *pin_use
, *plen
;
7393 || GET_CODE (PATTERN (insn
)) == CLOBBER
7394 || GET_CODE (PATTERN (insn
)) == USE
)
7399 enum alphaev5_pipe pipe
;
7401 pipe
= alphaev5_insn_pipe (insn
);
7405 /* Force complex instructions to start new groups. */
7409 /* If this is a completely unrecognized insn, its an asm.
7410 We don't know how long it is, so record length as -1 to
7411 signal a needed realignment. */
7412 if (recog_memoized (insn
) < 0)
7415 len
= get_attr_length (insn
);
7418 /* ??? Most of the places below, we would like to abort, as
7419 it would indicate an error either in Haifa, or in the
7420 scheduling description. Unfortunately, Haifa never
7421 schedules the last instruction of the BB, so we don't
7422 have an accurate TI bit to go off. */
7424 if (in_use
& EV5_E0
)
7426 if (in_use
& EV5_E1
)
7431 in_use
|= EV5_E0
| EV5_E01
;
7435 if (in_use
& EV5_E0
)
7437 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
7445 if (in_use
& EV5_E1
)
7451 if (in_use
& EV5_FA
)
7453 if (in_use
& EV5_FM
)
7458 in_use
|= EV5_FA
| EV5_FAM
;
7462 if (in_use
& EV5_FA
)
7468 if (in_use
& EV5_FM
)
7481 /* Haifa doesn't do well scheduling branches. */
7482 /* ??? If this is predicted not-taken, slotting continues, except
7483 that no more IBR, FBR, or JSR insns may be slotted. */
7484 if (GET_CODE (insn
) == JUMP_INSN
)
7488 insn
= next_nonnote_insn (insn
);
7490 if (!insn
|| ! INSN_P (insn
))
7493 /* Let Haifa tell us where it thinks insn group boundaries are. */
7494 if (GET_MODE (insn
) == TImode
)
7497 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
7502 insn
= next_nonnote_insn (insn
);
7511 alphaev4_next_nop (pin_use
)
7514 int in_use
= *pin_use
;
7517 if (!(in_use
& EV4_IB0
))
7522 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
7527 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
7540 alphaev5_next_nop (pin_use
)
7543 int in_use
= *pin_use
;
7546 if (!(in_use
& EV5_E1
))
7551 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
7556 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
7568 /* The instruction group alignment main loop. */
7571 alpha_align_insns (insns
, max_align
, next_group
, next_nop
)
7573 unsigned int max_align
;
7574 rtx (*next_group
) PARAMS ((rtx
, int *, int *));
7575 rtx (*next_nop
) PARAMS ((int *));
7577 /* ALIGN is the known alignment for the insn group. */
7579 /* OFS is the offset of the current insn in the insn group. */
7581 int prev_in_use
, in_use
, len
;
7584 /* Let shorten branches care for assigning alignments to code labels. */
7585 shorten_branches (insns
);
7587 if (align_functions
< 4)
7589 else if (align_functions
< max_align
)
7590 align
= align_functions
;
7594 ofs
= prev_in_use
= 0;
7596 if (GET_CODE (i
) == NOTE
)
7597 i
= next_nonnote_insn (i
);
7601 next
= (*next_group
) (i
, &in_use
, &len
);
7603 /* When we see a label, resync alignment etc. */
7604 if (GET_CODE (i
) == CODE_LABEL
)
7606 unsigned int new_align
= 1 << label_to_alignment (i
);
7608 if (new_align
>= align
)
7610 align
= new_align
< max_align
? new_align
: max_align
;
7614 else if (ofs
& (new_align
-1))
7615 ofs
= (ofs
| (new_align
-1)) + 1;
7620 /* Handle complex instructions special. */
7621 else if (in_use
== 0)
7623 /* Asms will have length < 0. This is a signal that we have
7624 lost alignment knowledge. Assume, however, that the asm
7625 will not mis-align instructions. */
7634 /* If the known alignment is smaller than the recognized insn group,
7635 realign the output. */
7636 else if ((int) align
< len
)
7638 unsigned int new_log_align
= len
> 8 ? 4 : 3;
7641 where
= prev
= prev_nonnote_insn (i
);
7642 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
7645 /* Can't realign between a call and its gp reload. */
7646 if (! (TARGET_EXPLICIT_RELOCS
7647 && prev
&& GET_CODE (prev
) == CALL_INSN
))
7649 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
7650 align
= 1 << new_log_align
;
7655 /* If the group won't fit in the same INT16 as the previous,
7656 we need to add padding to keep the group together. Rather
7657 than simply leaving the insn filling to the assembler, we
7658 can make use of the knowledge of what sorts of instructions
7659 were issued in the previous group to make sure that all of
7660 the added nops are really free. */
7661 else if (ofs
+ len
> (int) align
)
7663 int nop_count
= (align
- ofs
) / 4;
7666 /* Insert nops before labels, branches, and calls to truely merge
7667 the execution of the nops with the previous instruction group. */
7668 where
= prev_nonnote_insn (i
);
7671 if (GET_CODE (where
) == CODE_LABEL
)
7673 rtx where2
= prev_nonnote_insn (where
);
7674 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
7677 else if (GET_CODE (where
) == INSN
)
7684 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
7685 while (--nop_count
);
7689 ofs
= (ofs
+ len
) & (align
- 1);
7690 prev_in_use
= in_use
;
7695 /* Machine dependent reorg pass. */
7701 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
7702 alpha_handle_trap_shadows (insns
);
7704 /* Due to the number of extra trapb insns, don't bother fixing up
7705 alignment when trap precision is instruction. Moreover, we can
7706 only do our job when sched2 is run. */
7707 if (optimize
&& !optimize_size
7708 && alpha_tp
!= ALPHA_TP_INSN
7709 && flag_schedule_insns_after_reload
)
7711 if (alpha_cpu
== PROCESSOR_EV4
)
7712 alpha_align_insns (insns
, 8, alphaev4_next_group
, alphaev4_next_nop
);
7713 else if (alpha_cpu
== PROCESSOR_EV5
)
7714 alpha_align_insns (insns
, 16, alphaev5_next_group
, alphaev5_next_nop
);
7718 /* Check a floating-point value for validity for a particular machine mode. */
7720 static const char * const float_strings
[] =
7722 /* These are for FLOAT_VAX. */
7723 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
7724 "-1.70141173319264430e+38",
7725 "2.93873587705571877e-39", /* 2^-128 */
7726 "-2.93873587705571877e-39",
7727 /* These are for the default broken IEEE mode, which traps
7728 on infinity or denormal numbers. */
7729 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
7730 "-3.402823466385288598117e+38",
7731 "1.1754943508222875079687e-38", /* 2^-126 */
7732 "-1.1754943508222875079687e-38",
7735 static REAL_VALUE_TYPE float_values
[8];
7736 static int inited_float_values
= 0;
7739 check_float_value (mode
, d
, overflow
)
7740 enum machine_mode mode
;
7742 int overflow ATTRIBUTE_UNUSED
;
7745 if (TARGET_IEEE
|| TARGET_IEEE_CONFORMANT
|| TARGET_IEEE_WITH_INEXACT
)
7748 if (inited_float_values
== 0)
7751 for (i
= 0; i
< 8; i
++)
7752 float_values
[i
] = REAL_VALUE_ATOF (float_strings
[i
], DFmode
);
7754 inited_float_values
= 1;
7760 REAL_VALUE_TYPE
*fvptr
;
7762 if (TARGET_FLOAT_VAX
)
7763 fvptr
= &float_values
[0];
7765 fvptr
= &float_values
[4];
7767 memcpy (&r
, d
, sizeof (REAL_VALUE_TYPE
));
7768 if (REAL_VALUES_LESS (fvptr
[0], r
))
7770 memcpy (d
, &fvptr
[0], sizeof (REAL_VALUE_TYPE
));
7773 else if (REAL_VALUES_LESS (r
, fvptr
[1]))
7775 memcpy (d
, &fvptr
[1], sizeof (REAL_VALUE_TYPE
));
7778 else if (REAL_VALUES_LESS (dconst0
, r
)
7779 && REAL_VALUES_LESS (r
, fvptr
[2]))
7781 memcpy (d
, &dconst0
, sizeof (REAL_VALUE_TYPE
));
7784 else if (REAL_VALUES_LESS (r
, dconst0
)
7785 && REAL_VALUES_LESS (fvptr
[3], r
))
7787 memcpy (d
, &dconst0
, sizeof (REAL_VALUE_TYPE
));
7795 #if TARGET_ABI_OPEN_VMS
7797 /* Return the VMS argument type corresponding to MODE. */
7800 alpha_arg_type (mode
)
7801 enum machine_mode mode
;
7806 return TARGET_FLOAT_VAX
? FF
: FS
;
7808 return TARGET_FLOAT_VAX
? FD
: FT
;
7814 /* Return an rtx for an integer representing the VMS Argument Information
7818 alpha_arg_info_reg_val (cum
)
7819 CUMULATIVE_ARGS cum
;
7821 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
7824 for (i
= 0; i
< 6; i
++)
7825 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
7827 return GEN_INT (regval
);
7830 #include <splay-tree.h>
7832 /* Structure to collect function names for final output
7835 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
7840 enum links_kind kind
;
7843 static splay_tree alpha_links
;
7845 static int mark_alpha_links_node
PARAMS ((splay_tree_node
, void *));
7846 static void mark_alpha_links
PARAMS ((void *));
7847 static int alpha_write_one_linkage
PARAMS ((splay_tree_node
, void *));
7849 /* Protect alpha_links from garbage collection. */
7852 mark_alpha_links_node (node
, data
)
7853 splay_tree_node node
;
7854 void *data ATTRIBUTE_UNUSED
;
7856 struct alpha_links
*links
= (struct alpha_links
*) node
->value
;
7857 ggc_mark_rtx (links
->linkage
);
7862 mark_alpha_links (ptr
)
7865 splay_tree tree
= *(splay_tree
*) ptr
;
7866 splay_tree_foreach (tree
, mark_alpha_links_node
, NULL
);
7869 /* Make (or fake) .linkage entry for function call.
7871 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
7873 Return an SYMBOL_REF rtx for the linkage. */
7876 alpha_need_linkage (name
, is_local
)
7880 splay_tree_node node
;
7881 struct alpha_links
*al
;
7888 /* Is this name already defined? */
7890 node
= splay_tree_lookup (alpha_links
, (splay_tree_key
) name
);
7893 al
= (struct alpha_links
*) node
->value
;
7896 /* Defined here but external assumed. */
7897 if (al
->kind
== KIND_EXTERN
)
7898 al
->kind
= KIND_LOCAL
;
7902 /* Used here but unused assumed. */
7903 if (al
->kind
== KIND_UNUSED
)
7904 al
->kind
= KIND_LOCAL
;
7911 alpha_links
= splay_tree_new ((splay_tree_compare_fn
) strcmp
,
7912 (splay_tree_delete_key_fn
) free
,
7913 (splay_tree_delete_key_fn
) free
);
7914 ggc_add_root (&alpha_links
, 1, 1, mark_alpha_links
);
7917 al
= (struct alpha_links
*) xmalloc (sizeof (struct alpha_links
));
7918 name
= xstrdup (name
);
7920 /* Assume external if no definition. */
7921 al
->kind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
7923 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
7924 get_identifier (name
);
7926 /* Construct a SYMBOL_REF for us to call. */
7928 size_t name_len
= strlen (name
);
7929 char *linksym
= alloca (name_len
+ 6);
7931 memcpy (linksym
+ 1, name
, name_len
);
7932 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
7933 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
7934 ggc_alloc_string (linksym
, name_len
+ 5));
7937 splay_tree_insert (alpha_links
, (splay_tree_key
) name
,
7938 (splay_tree_value
) al
);
7944 alpha_write_one_linkage (node
, data
)
7945 splay_tree_node node
;
7948 const char *const name
= (const char *) node
->key
;
7949 struct alpha_links
*links
= (struct alpha_links
*) node
->value
;
7950 FILE *stream
= (FILE *) data
;
7952 if (links
->kind
== KIND_UNUSED
7953 || ! TREE_SYMBOL_REFERENCED (get_identifier (name
)))
7956 fprintf (stream
, "$%s..lk:\n", name
);
7957 if (links
->kind
== KIND_LOCAL
)
7959 /* Local and used, build linkage pair. */
7960 fprintf (stream
, "\t.quad %s..en\n", name
);
7961 fprintf (stream
, "\t.quad %s\n", name
);
7965 /* External and used, request linkage pair. */
7966 fprintf (stream
, "\t.linkage %s\n", name
);
7973 alpha_write_linkage (stream
)
7978 readonly_section ();
7979 fprintf (stream
, "\t.align 3\n");
7980 splay_tree_foreach (alpha_links
, alpha_write_one_linkage
, stream
);
7984 /* Given a decl, a section name, and whether the decl initializer
7985 has relocs, choose attributes for the section. */
7987 #define SECTION_VMS_OVERLAY SECTION_FORGET
7988 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
7989 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
7992 vms_section_type_flags (decl
, name
, reloc
)
7997 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
7999 if (decl
&& DECL_ATTRIBUTES (decl
)
8000 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
8001 flags
|= SECTION_VMS_OVERLAY
;
8002 if (decl
&& DECL_ATTRIBUTES (decl
)
8003 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
8004 flags
|= SECTION_VMS_GLOBAL
;
8005 if (decl
&& DECL_ATTRIBUTES (decl
)
8006 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
8007 flags
|= SECTION_VMS_INITIALIZE
;
8012 /* Switch to an arbitrary section NAME with attributes as specified
8013 by FLAGS. ALIGN specifies any known alignment requirements for
8014 the section; 0 if the default should be used. */
8017 vms_asm_named_section (name
, flags
)
8021 fputc ('\n', asm_out_file
);
8022 fprintf (asm_out_file
, ".section\t%s", name
);
8024 if (flags
& SECTION_VMS_OVERLAY
)
8025 fprintf (asm_out_file
, ",OVR");
8026 if (flags
& SECTION_VMS_GLOBAL
)
8027 fprintf (asm_out_file
, ",GBL");
8028 if (flags
& SECTION_VMS_INITIALIZE
)
8029 fprintf (asm_out_file
, ",NOMOD");
8030 if (flags
& SECTION_DEBUG
)
8031 fprintf (asm_out_file
, ",NOWRT");
8033 fputc ('\n', asm_out_file
);
8036 /* Record an element in the table of global constructors. SYMBOL is
8037 a SYMBOL_REF of the function to be called; PRIORITY is a number
8038 between 0 and MAX_INIT_PRIORITY.
8040 Differs from default_ctors_section_asm_out_constructor in that the
8041 width of the .ctors entry is always 64 bits, rather than the 32 bits
8042 used by a normal pointer. */
8045 vms_asm_out_constructor (symbol
, priority
)
8047 int priority ATTRIBUTE_UNUSED
;
8050 assemble_align (BITS_PER_WORD
);
8051 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
8055 vms_asm_out_destructor (symbol
, priority
)
8057 int priority ATTRIBUTE_UNUSED
;
8060 assemble_align (BITS_PER_WORD
);
8061 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
8066 alpha_need_linkage (name
, is_local
)
8067 const char *name ATTRIBUTE_UNUSED
;
8068 int is_local ATTRIBUTE_UNUSED
;
8073 #endif /* TARGET_ABI_OPEN_VMS */
8075 #if TARGET_ABI_UNICOSMK
8077 static void unicosmk_output_module_name
PARAMS ((FILE *));
8078 static void unicosmk_output_default_externs
PARAMS ((FILE *));
8079 static void unicosmk_output_dex
PARAMS ((FILE *));
8080 static void unicosmk_output_externs
PARAMS ((FILE *));
8081 static void unicosmk_output_addr_vec
PARAMS ((FILE *, rtx
));
8082 static const char *unicosmk_ssib_name
PARAMS ((void));
8083 static int unicosmk_special_name
PARAMS ((const char *));
8085 /* Define the offset between two registers, one to be eliminated, and the
8086 other its replacement, at the start of a routine. */
8089 unicosmk_initial_elimination_offset (from
, to
)
8095 fixed_size
= alpha_sa_size();
8096 if (fixed_size
!= 0)
8099 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8101 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8103 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8104 return (ALPHA_ROUND (current_function_outgoing_args_size
)
8105 + ALPHA_ROUND (get_frame_size()));
8106 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8107 return (ALPHA_ROUND (fixed_size
)
8108 + ALPHA_ROUND (get_frame_size()
8109 + current_function_outgoing_args_size
));
8114 /* Output the module name for .ident and .end directives. We have to strip
8115 directories and add make sure that the module name starts with a letter
8119 unicosmk_output_module_name (file
)
8124 /* Strip directories. */
8126 name
= strrchr (main_input_filename
, '/');
8130 name
= main_input_filename
;
8132 /* CAM only accepts module names that start with a letter or '$'. We
8133 prefix the module name with a '$' if necessary. */
8135 if (!ISALPHA (*name
))
8136 fprintf (file
, "$%s", name
);
8141 /* Output text that to appear at the beginning of an assembler file. */
8144 unicosmk_asm_file_start (file
)
8149 fputs ("\t.ident\t", file
);
8150 unicosmk_output_module_name (file
);
8151 fputs ("\n\n", file
);
8153 /* The Unicos/Mk assembler uses different register names. Instead of trying
8154 to support them, we simply use micro definitions. */
8156 /* CAM has different register names: rN for the integer register N and fN
8157 for the floating-point register N. Instead of trying to use these in
8158 alpha.md, we define the symbols $N and $fN to refer to the appropriate
8161 for (i
= 0; i
< 32; ++i
)
8162 fprintf (file
, "$%d <- r%d\n", i
, i
);
8164 for (i
= 0; i
< 32; ++i
)
8165 fprintf (file
, "$f%d <- f%d\n", i
, i
);
8169 /* The .align directive fill unused space with zeroes which does not work
8170 in code sections. We define the macro 'gcc@code@align' which uses nops
8171 instead. Note that it assumes that code sections always have the
8172 biggest possible alignment since . refers to the current offset from
8173 the beginning of the section. */
8175 fputs ("\t.macro gcc@code@align n\n", file
);
8176 fputs ("gcc@n@bytes = 1 << n\n", file
);
8177 fputs ("gcc@here = . % gcc@n@bytes\n", file
);
8178 fputs ("\t.if ne, gcc@here, 0\n", file
);
8179 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file
);
8180 fputs ("\tbis r31,r31,r31\n", file
);
8181 fputs ("\t.endr\n", file
);
8182 fputs ("\t.endif\n", file
);
8183 fputs ("\t.endm gcc@code@align\n\n", file
);
8185 /* Output extern declarations which should always be visible. */
8186 unicosmk_output_default_externs (file
);
8188 /* Open a dummy section. We always need to be inside a section for the
8189 section-switching code to work correctly.
8190 ??? This should be a module id or something like that. I still have to
8191 figure out what the rules for those are. */
8192 fputs ("\n\t.psect\t$SG00000,data\n", file
);
8195 /* Output text to appear at the end of an assembler file. This includes all
8196 pending extern declarations and DEX expressions. */
8199 unicosmk_asm_file_end (file
)
8202 fputs ("\t.endp\n\n", file
);
8204 /* Output all pending externs. */
8206 unicosmk_output_externs (file
);
8208 /* Output dex definitions used for functions whose names conflict with
8211 unicosmk_output_dex (file
);
8213 fputs ("\t.end\t", file
);
8214 unicosmk_output_module_name (file
);
8218 /* Output the definition of a common variable. */
8221 unicosmk_output_common (file
, name
, size
, align
)
8228 printf ("T3E__: common %s\n", name
);
8231 fputs("\t.endp\n\n\t.psect ", file
);
8232 assemble_name(file
, name
);
8233 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
8234 fprintf(file
, "\t.byte\t0:%d\n", size
);
8236 /* Mark the symbol as defined in this module. */
8237 name_tree
= get_identifier (name
);
8238 TREE_ASM_WRITTEN (name_tree
) = 1;
8241 #define SECTION_PUBLIC SECTION_MACH_DEP
8242 #define SECTION_MAIN (SECTION_PUBLIC << 1)
8243 static int current_section_align
;
8246 unicosmk_section_type_flags (decl
, name
, reloc
)
8249 int reloc ATTRIBUTE_UNUSED
;
8251 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
8256 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8258 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
8259 if (align_functions_log
> current_section_align
)
8260 current_section_align
= align_functions_log
;
8262 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
8263 flags
|= SECTION_MAIN
;
8266 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
8268 if (TREE_PUBLIC (decl
))
8269 flags
|= SECTION_PUBLIC
;
8274 /* Generate a section name for decl and associate it with the
8278 unicosmk_unique_section (decl
, reloc
)
8280 int reloc ATTRIBUTE_UNUSED
;
8288 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
8289 STRIP_NAME_ENCODING (name
, name
);
8290 len
= strlen (name
);
8292 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8296 /* It is essential that we prefix the section name here because
8297 otherwise the section names generated for constructors and
8298 destructors confuse collect2. */
8300 string
= alloca (len
+ 6);
8301 sprintf (string
, "code@%s", name
);
8302 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
8304 else if (TREE_PUBLIC (decl
))
8305 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
8310 string
= alloca (len
+ 6);
8311 sprintf (string
, "data@%s", name
);
8312 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
8316 /* Switch to an arbitrary section NAME with attributes as specified
8317 by FLAGS. ALIGN specifies any known alignment requirements for
8318 the section; 0 if the default should be used. */
8321 unicosmk_asm_named_section (name
, flags
)
8327 /* Close the previous section. */
8329 fputs ("\t.endp\n\n", asm_out_file
);
8331 /* Find out what kind of section we are opening. */
8333 if (flags
& SECTION_MAIN
)
8334 fputs ("\t.start\tmain\n", asm_out_file
);
8336 if (flags
& SECTION_CODE
)
8338 else if (flags
& SECTION_PUBLIC
)
8343 if (current_section_align
!= 0)
8344 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
8345 current_section_align
, kind
);
8347 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
8351 unicosmk_insert_attributes (decl
, attr_ptr
)
8353 tree
*attr_ptr ATTRIBUTE_UNUSED
;
8356 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
8357 UNIQUE_SECTION (decl
, 0);
8360 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
8361 in code sections because .align fill unused space with zeroes. */
8364 unicosmk_output_align (file
, align
)
8368 if (inside_function
)
8369 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
8371 fprintf (file
, "\t.align\t%d\n", align
);
8374 /* Add a case vector to the current function's list of deferred case
8375 vectors. Case vectors have to be put into a separate section because CAM
8376 does not allow data definitions in code sections. */
8379 unicosmk_defer_case_vector (lab
, vec
)
8383 struct machine_function
*machine
= cfun
->machine
;
8385 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
8386 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
8387 machine
->addr_list
);
8390 /* Output a case vector. */
8393 unicosmk_output_addr_vec (file
, vec
)
8397 rtx lab
= XEXP (vec
, 0);
8398 rtx body
= XEXP (vec
, 1);
8399 int vlen
= XVECLEN (body
, 0);
8402 ASM_OUTPUT_INTERNAL_LABEL (file
, "L", CODE_LABEL_NUMBER (lab
));
8404 for (idx
= 0; idx
< vlen
; idx
++)
8406 ASM_OUTPUT_ADDR_VEC_ELT
8407 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
8411 /* Output current function's deferred case vectors. */
8414 unicosmk_output_deferred_case_vectors (file
)
8417 struct machine_function
*machine
= cfun
->machine
;
8420 if (machine
->addr_list
== NULL_RTX
)
8424 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
8425 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
8428 /* Set up the dynamic subprogram information block (DSIB) and update the
8429 frame pointer register ($15) for subroutines which have a frame. If the
8430 subroutine doesn't have a frame, simply increment $15. */
8433 unicosmk_gen_dsib (imaskP
)
8434 unsigned long * imaskP
;
8436 if (alpha_is_stack_procedure
)
8438 const char *ssib_name
;
8441 /* Allocate 64 bytes for the DSIB. */
8443 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
8445 emit_insn (gen_blockage ());
8447 /* Save the return address. */
8449 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
8450 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8451 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
8452 (*imaskP
) &= ~(1L << REG_RA
);
8454 /* Save the old frame pointer. */
8456 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
8457 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8458 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
8459 (*imaskP
) &= ~(1L << HARD_FRAME_POINTER_REGNUM
);
8461 emit_insn (gen_blockage ());
8463 /* Store the SSIB pointer. */
8465 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
8466 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
8467 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8469 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
8470 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
8471 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
8473 /* Save the CIW index. */
8475 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
8476 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8477 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
8479 emit_insn (gen_blockage ());
8481 /* Set the new frame pointer. */
8483 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8484 stack_pointer_rtx
, GEN_INT (64))));
8489 /* Increment the frame pointer register to indicate that we do not
8492 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8493 hard_frame_pointer_rtx
, GEN_INT (1))));
8497 #define SSIB_PREFIX "__SSIB_"
8498 #define SSIB_PREFIX_LEN 7
8500 /* Generate the name of the SSIB section for the current function. */
8503 unicosmk_ssib_name ()
8505 /* This is ok since CAM won't be able to deal with names longer than that
8508 static char name
[256];
8514 x
= DECL_RTL (cfun
->decl
);
8515 if (GET_CODE (x
) != MEM
)
8518 if (GET_CODE (x
) != SYMBOL_REF
)
8520 fnname
= XSTR (x
, 0);
8521 STRIP_NAME_ENCODING (fnname
, fnname
);
8523 len
= strlen (fnname
);
8524 if (len
+ SSIB_PREFIX_LEN
> 255)
8525 len
= 255 - SSIB_PREFIX_LEN
;
8527 strcpy (name
, SSIB_PREFIX
);
8528 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
8529 name
[len
+ SSIB_PREFIX_LEN
] = 0;
8534 /* Output the static subroutine information block for the current
8538 unicosmk_output_ssib (file
, fnname
)
8546 struct machine_function
*machine
= cfun
->machine
;
8549 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
8550 unicosmk_ssib_name ());
8552 /* Some required stuff and the function name length. */
8554 len
= strlen (fnname
);
8555 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
8558 ??? We don't do that yet. */
8560 fputs ("\t.quad\t0\n", file
);
8562 /* Function address. */
8564 fputs ("\t.quad\t", file
);
8565 assemble_name (file
, fnname
);
8568 fputs ("\t.quad\t0\n", file
);
8569 fputs ("\t.quad\t0\n", file
);
8572 ??? We do it the same way Cray CC does it but this could be
8575 for( i
= 0; i
< len
; i
++ )
8576 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
8577 if( (len
% 8) == 0 )
8578 fputs ("\t.quad\t0\n", file
);
8580 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
8582 /* All call information words used in the function. */
8584 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
8587 fprintf (file
, "\t.quad\t");
8588 #if HOST_BITS_PER_WIDE_INT == 32
8589 fprintf (file
, HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
8590 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
8592 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (ciw
));
8594 fprintf (file
, "\n");
8598 /* Add a call information word (CIW) to the list of the current function's
8599 CIWs and return its index.
8601 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
8604 unicosmk_add_call_info_word (x
)
8608 struct machine_function
*machine
= cfun
->machine
;
8610 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
8611 if (machine
->first_ciw
== NULL_RTX
)
8612 machine
->first_ciw
= node
;
8614 XEXP (machine
->last_ciw
, 1) = node
;
8616 machine
->last_ciw
= node
;
8617 ++machine
->ciw_count
;
8619 return GEN_INT (machine
->ciw_count
8620 + strlen (current_function_name
)/8 + 5);
8623 static char unicosmk_section_buf
[100];
8626 unicosmk_text_section ()
8628 static int count
= 0;
8629 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
8631 return unicosmk_section_buf
;
8635 unicosmk_data_section ()
8637 static int count
= 1;
8638 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
8640 return unicosmk_section_buf
;
8643 /* The Cray assembler doesn't accept extern declarations for symbols which
8644 are defined in the same file. We have to keep track of all global
8645 symbols which are referenced and/or defined in a source file and output
8646 extern declarations for those which are referenced but not defined at
8649 /* List of identifiers for which an extern declaration might have to be
8652 struct unicosmk_extern_list
8654 struct unicosmk_extern_list
*next
;
8658 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
8660 /* Output extern declarations which are required for every asm file. */
8663 unicosmk_output_default_externs (file
)
8666 static const char *const externs
[] =
8667 { "__T3E_MISMATCH" };
8672 n
= ARRAY_SIZE (externs
);
8674 for (i
= 0; i
< n
; i
++)
8675 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
8678 /* Output extern declarations for global symbols which are have been
8679 referenced but not defined. */
8682 unicosmk_output_externs (file
)
8685 struct unicosmk_extern_list
*p
;
8686 const char *real_name
;
8690 len
= strlen (user_label_prefix
);
8691 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
8693 /* We have to strip the encoding and possibly remove user_label_prefix
8694 from the identifier in order to handle -fleading-underscore and
8695 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
8696 STRIP_NAME_ENCODING (real_name
, p
->name
);
8697 if (len
&& p
->name
[0] == '*'
8698 && !memcmp (real_name
, user_label_prefix
, len
))
8701 name_tree
= get_identifier (real_name
);
8702 if (! TREE_ASM_WRITTEN (name_tree
))
8704 TREE_ASM_WRITTEN (name_tree
) = 1;
8705 fputs ("\t.extern\t", file
);
8706 assemble_name (file
, p
->name
);
8712 /* Record an extern. */
8715 unicosmk_add_extern (name
)
8718 struct unicosmk_extern_list
*p
;
8720 p
= (struct unicosmk_extern_list
*)
8721 permalloc (sizeof (struct unicosmk_extern_list
));
8722 p
->next
= unicosmk_extern_head
;
8724 unicosmk_extern_head
= p
;
8727 /* The Cray assembler generates incorrect code if identifiers which
8728 conflict with register names are used as instruction operands. We have
8729 to replace such identifiers with DEX expressions. */
8731 /* Structure to collect identifiers which have been replaced by DEX
8734 struct unicosmk_dex
{
8735 struct unicosmk_dex
*next
;
8739 /* List of identifiers which have been replaced by DEX expressions. The DEX
8740 number is determined by the position in the list. */
8742 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
8744 /* The number of elements in the DEX list. */
8746 static int unicosmk_dex_count
= 0;
8748 /* Check if NAME must be replaced by a DEX expression. */
8751 unicosmk_special_name (name
)
8760 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
8766 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
8769 return (name
[2] == '\0'
8770 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
8773 return (ISDIGIT (name
[1]) && name
[2] == '\0');
8777 /* Return the DEX number if X must be replaced by a DEX expression and 0
8781 unicosmk_need_dex (x
)
8784 struct unicosmk_dex
*dex
;
8788 if (GET_CODE (x
) != SYMBOL_REF
)
8792 if (! unicosmk_special_name (name
))
8795 i
= unicosmk_dex_count
;
8796 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
8798 if (! strcmp (name
, dex
->name
))
8803 dex
= (struct unicosmk_dex
*) permalloc (sizeof (struct unicosmk_dex
));
8805 dex
->next
= unicosmk_dex_list
;
8806 unicosmk_dex_list
= dex
;
8808 ++unicosmk_dex_count
;
8809 return unicosmk_dex_count
;
8812 /* Output the DEX definitions for this file. */
8815 unicosmk_output_dex (file
)
8818 struct unicosmk_dex
*dex
;
8821 if (unicosmk_dex_list
== NULL
)
8824 fprintf (file
, "\t.dexstart\n");
8826 i
= unicosmk_dex_count
;
8827 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
8829 fprintf (file
, "\tDEX (%d) = ", i
);
8830 assemble_name (file
, dex
->name
);
8835 fprintf (file
, "\t.dexend\n");
8841 unicosmk_output_deferred_case_vectors (file
)
8842 FILE *file ATTRIBUTE_UNUSED
;
8846 unicosmk_gen_dsib (imaskP
)
8847 unsigned long * imaskP ATTRIBUTE_UNUSED
;
8851 unicosmk_output_ssib (file
, fnname
)
8852 FILE * file ATTRIBUTE_UNUSED
;
8853 const char * fnname ATTRIBUTE_UNUSED
;
8857 unicosmk_add_call_info_word (x
)
8858 rtx x ATTRIBUTE_UNUSED
;
8864 unicosmk_need_dex (x
)
8865 rtx x ATTRIBUTE_UNUSED
;
8870 #endif /* TARGET_ABI_UNICOSMK */