1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "stor-layout.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
45 #include "diagnostic-core.h"
49 #include "target-def.h"
50 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "splay-tree.h"
54 #include "pointer-set.h"
55 #include "hash-table.h"
57 #include "basic-block.h"
58 #include "tree-ssa-alias.h"
59 #include "internal-fn.h"
60 #include "gimple-fold.h"
62 #include "gimple-expr.h"
65 #include "gimple-iterator.h"
67 #include "gimple-ssa.h"
68 #include "stringpool.h"
69 #include "tree-ssanames.h"
70 #include "tree-stdarg.h"
71 #include "tm-constrs.h"
77 /* Specify which cpu to schedule for. */
78 enum processor_type alpha_tune
;
80 /* Which cpu we're generating code for. */
81 enum processor_type alpha_cpu
;
83 static const char * const alpha_cpu_name
[] =
88 /* Specify how accurate floating-point traps need to be. */
90 enum alpha_trap_precision alpha_tp
;
92 /* Specify the floating-point rounding mode. */
94 enum alpha_fp_rounding_mode alpha_fprm
;
96 /* Specify which things cause traps. */
98 enum alpha_fp_trap_mode alpha_fptm
;
100 /* Nonzero if inside of a function, because the Alpha asm can't
101 handle .files inside of functions. */
103 static int inside_function
= FALSE
;
105 /* The number of cycles of latency we should assume on memory reads. */
107 int alpha_memory_latency
= 3;
109 /* Whether the function needs the GP. */
111 static int alpha_function_needs_gp
;
113 /* The assembler name of the current function. */
115 static const char *alpha_fnname
;
117 /* The next explicit relocation sequence number. */
118 extern GTY(()) int alpha_next_sequence_number
;
119 int alpha_next_sequence_number
= 1;
121 /* The literal and gpdisp sequence numbers for this insn, as printed
122 by %# and %* respectively. */
123 extern GTY(()) int alpha_this_literal_sequence_number
;
124 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
125 int alpha_this_literal_sequence_number
;
126 int alpha_this_gpdisp_sequence_number
;
128 /* Costs of various operations on the different architectures. */
130 struct alpha_rtx_cost_data
132 unsigned char fp_add
;
133 unsigned char fp_mult
;
134 unsigned char fp_div_sf
;
135 unsigned char fp_div_df
;
136 unsigned char int_mult_si
;
137 unsigned char int_mult_di
;
138 unsigned char int_shift
;
139 unsigned char int_cmov
;
140 unsigned short int_div
;
143 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
146 COSTS_N_INSNS (6), /* fp_add */
147 COSTS_N_INSNS (6), /* fp_mult */
148 COSTS_N_INSNS (34), /* fp_div_sf */
149 COSTS_N_INSNS (63), /* fp_div_df */
150 COSTS_N_INSNS (23), /* int_mult_si */
151 COSTS_N_INSNS (23), /* int_mult_di */
152 COSTS_N_INSNS (2), /* int_shift */
153 COSTS_N_INSNS (2), /* int_cmov */
154 COSTS_N_INSNS (97), /* int_div */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (15), /* fp_div_sf */
160 COSTS_N_INSNS (22), /* fp_div_df */
161 COSTS_N_INSNS (8), /* int_mult_si */
162 COSTS_N_INSNS (12), /* int_mult_di */
163 COSTS_N_INSNS (1) + 1, /* int_shift */
164 COSTS_N_INSNS (1), /* int_cmov */
165 COSTS_N_INSNS (83), /* int_div */
168 COSTS_N_INSNS (4), /* fp_add */
169 COSTS_N_INSNS (4), /* fp_mult */
170 COSTS_N_INSNS (12), /* fp_div_sf */
171 COSTS_N_INSNS (15), /* fp_div_df */
172 COSTS_N_INSNS (7), /* int_mult_si */
173 COSTS_N_INSNS (7), /* int_mult_di */
174 COSTS_N_INSNS (1), /* int_shift */
175 COSTS_N_INSNS (2), /* int_cmov */
176 COSTS_N_INSNS (86), /* int_div */
180 /* Similar but tuned for code size instead of execution latency. The
181 extra +N is fractional cost tuning based on latency. It's used to
182 encourage use of cheaper insns like shift, but only if there's just
185 static struct alpha_rtx_cost_data
const alpha_rtx_cost_size
=
187 COSTS_N_INSNS (1), /* fp_add */
188 COSTS_N_INSNS (1), /* fp_mult */
189 COSTS_N_INSNS (1), /* fp_div_sf */
190 COSTS_N_INSNS (1) + 1, /* fp_div_df */
191 COSTS_N_INSNS (1) + 1, /* int_mult_si */
192 COSTS_N_INSNS (1) + 2, /* int_mult_di */
193 COSTS_N_INSNS (1), /* int_shift */
194 COSTS_N_INSNS (1), /* int_cmov */
195 COSTS_N_INSNS (6), /* int_div */
198 /* Get the number of args of a function in one of two ways. */
199 #if TARGET_ABI_OPEN_VMS
200 #define NUM_ARGS crtl->args.info.num_args
202 #define NUM_ARGS crtl->args.info
208 /* Declarations of static functions. */
209 static struct machine_function
*alpha_init_machine_status (void);
210 static rtx
alpha_emit_xfloating_compare (enum rtx_code
*, rtx
, rtx
);
212 #if TARGET_ABI_OPEN_VMS
213 static void alpha_write_linkage (FILE *, const char *);
214 static bool vms_valid_pointer_mode (enum machine_mode
);
216 #define vms_patch_builtins() gcc_unreachable()
219 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
220 /* Implement TARGET_MANGLE_TYPE. */
223 alpha_mangle_type (const_tree type
)
225 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
226 && TARGET_LONG_DOUBLE_128
)
229 /* For all other types, use normal C++ mangling. */
234 /* Parse target option strings. */
237 alpha_option_override (void)
239 static const struct cpu_table
{
240 const char *const name
;
241 const enum processor_type processor
;
243 const unsigned short line_size
; /* in bytes */
244 const unsigned short l1_size
; /* in kb. */
245 const unsigned short l2_size
; /* in kb. */
247 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
248 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
249 had 64k to 8M 8-byte direct Bcache. */
250 { "ev4", PROCESSOR_EV4
, 0, 32, 8, 8*1024 },
251 { "21064", PROCESSOR_EV4
, 0, 32, 8, 8*1024 },
252 { "ev45", PROCESSOR_EV4
, 0, 32, 16, 16*1024 },
254 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
255 and 1M to 16M 64 byte L3 (not modeled).
256 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
257 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
258 { "ev5", PROCESSOR_EV5
, 0, 32, 8, 96 },
259 { "21164", PROCESSOR_EV5
, 0, 32, 8, 96 },
260 { "ev56", PROCESSOR_EV5
, MASK_BWX
, 32, 8, 96 },
261 { "21164a", PROCESSOR_EV5
, MASK_BWX
, 32, 8, 96 },
262 { "pca56", PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
263 { "21164PC",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
264 { "21164pc",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
266 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
267 { "ev6", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
, 64, 64, 16*1024 },
268 { "21264", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
, 64, 64, 16*1024 },
269 { "ev67", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
,
271 { "21264a", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
,
275 int const ct_size
= ARRAY_SIZE (cpu_table
);
276 int line_size
= 0, l1_size
= 0, l2_size
= 0;
279 #ifdef SUBTARGET_OVERRIDE_OPTIONS
280 SUBTARGET_OVERRIDE_OPTIONS
;
283 /* Default to full IEEE compliance mode for Go language. */
284 if (strcmp (lang_hooks
.name
, "GNU Go") == 0
285 && !(target_flags_explicit
& MASK_IEEE
))
286 target_flags
|= MASK_IEEE
;
288 alpha_fprm
= ALPHA_FPRM_NORM
;
289 alpha_tp
= ALPHA_TP_PROG
;
290 alpha_fptm
= ALPHA_FPTM_N
;
294 alpha_tp
= ALPHA_TP_INSN
;
295 alpha_fptm
= ALPHA_FPTM_SU
;
297 if (TARGET_IEEE_WITH_INEXACT
)
299 alpha_tp
= ALPHA_TP_INSN
;
300 alpha_fptm
= ALPHA_FPTM_SUI
;
305 if (! strcmp (alpha_tp_string
, "p"))
306 alpha_tp
= ALPHA_TP_PROG
;
307 else if (! strcmp (alpha_tp_string
, "f"))
308 alpha_tp
= ALPHA_TP_FUNC
;
309 else if (! strcmp (alpha_tp_string
, "i"))
310 alpha_tp
= ALPHA_TP_INSN
;
312 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string
);
315 if (alpha_fprm_string
)
317 if (! strcmp (alpha_fprm_string
, "n"))
318 alpha_fprm
= ALPHA_FPRM_NORM
;
319 else if (! strcmp (alpha_fprm_string
, "m"))
320 alpha_fprm
= ALPHA_FPRM_MINF
;
321 else if (! strcmp (alpha_fprm_string
, "c"))
322 alpha_fprm
= ALPHA_FPRM_CHOP
;
323 else if (! strcmp (alpha_fprm_string
,"d"))
324 alpha_fprm
= ALPHA_FPRM_DYN
;
326 error ("bad value %qs for -mfp-rounding-mode switch",
330 if (alpha_fptm_string
)
332 if (strcmp (alpha_fptm_string
, "n") == 0)
333 alpha_fptm
= ALPHA_FPTM_N
;
334 else if (strcmp (alpha_fptm_string
, "u") == 0)
335 alpha_fptm
= ALPHA_FPTM_U
;
336 else if (strcmp (alpha_fptm_string
, "su") == 0)
337 alpha_fptm
= ALPHA_FPTM_SU
;
338 else if (strcmp (alpha_fptm_string
, "sui") == 0)
339 alpha_fptm
= ALPHA_FPTM_SUI
;
341 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string
);
344 if (alpha_cpu_string
)
346 for (i
= 0; i
< ct_size
; i
++)
347 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
349 alpha_tune
= alpha_cpu
= cpu_table
[i
].processor
;
350 line_size
= cpu_table
[i
].line_size
;
351 l1_size
= cpu_table
[i
].l1_size
;
352 l2_size
= cpu_table
[i
].l2_size
;
353 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
);
354 target_flags
|= cpu_table
[i
].flags
;
358 error ("bad value %qs for -mcpu switch", alpha_cpu_string
);
361 if (alpha_tune_string
)
363 for (i
= 0; i
< ct_size
; i
++)
364 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
366 alpha_tune
= cpu_table
[i
].processor
;
367 line_size
= cpu_table
[i
].line_size
;
368 l1_size
= cpu_table
[i
].l1_size
;
369 l2_size
= cpu_table
[i
].l2_size
;
373 error ("bad value %qs for -mtune switch", alpha_tune_string
);
377 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
, line_size
,
378 global_options
.x_param_values
,
379 global_options_set
.x_param_values
);
381 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, l1_size
,
382 global_options
.x_param_values
,
383 global_options_set
.x_param_values
);
385 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, l2_size
,
386 global_options
.x_param_values
,
387 global_options_set
.x_param_values
);
389 /* Do some sanity checks on the above options. */
391 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
392 && alpha_tp
!= ALPHA_TP_INSN
&& alpha_cpu
!= PROCESSOR_EV6
)
394 warning (0, "fp software completion requires -mtrap-precision=i");
395 alpha_tp
= ALPHA_TP_INSN
;
398 if (alpha_cpu
== PROCESSOR_EV6
)
400 /* Except for EV6 pass 1 (not released), we always have precise
401 arithmetic traps. Which means we can do software completion
402 without minding trap shadows. */
403 alpha_tp
= ALPHA_TP_PROG
;
406 if (TARGET_FLOAT_VAX
)
408 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
410 warning (0, "rounding mode not supported for VAX floats");
411 alpha_fprm
= ALPHA_FPRM_NORM
;
413 if (alpha_fptm
== ALPHA_FPTM_SUI
)
415 warning (0, "trap mode not supported for VAX floats");
416 alpha_fptm
= ALPHA_FPTM_SU
;
418 if (target_flags_explicit
& MASK_LONG_DOUBLE_128
)
419 warning (0, "128-bit long double not supported for VAX floats");
420 target_flags
&= ~MASK_LONG_DOUBLE_128
;
427 if (!alpha_mlat_string
)
428 alpha_mlat_string
= "L1";
430 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
431 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
433 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
434 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
435 && alpha_mlat_string
[2] == '\0')
437 static int const cache_latency
[][4] =
439 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
440 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
441 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
444 lat
= alpha_mlat_string
[1] - '0';
445 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_tune
][lat
-1] == -1)
447 warning (0, "L%d cache latency unknown for %s",
448 lat
, alpha_cpu_name
[alpha_tune
]);
452 lat
= cache_latency
[alpha_tune
][lat
-1];
454 else if (! strcmp (alpha_mlat_string
, "main"))
456 /* Most current memories have about 370ns latency. This is
457 a reasonable guess for a fast cpu. */
462 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string
);
466 alpha_memory_latency
= lat
;
469 /* Default the definition of "small data" to 8 bytes. */
470 if (!global_options_set
.x_g_switch_value
)
473 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
475 target_flags
|= MASK_SMALL_DATA
;
476 else if (flag_pic
== 2)
477 target_flags
&= ~MASK_SMALL_DATA
;
479 /* Align labels and loops for optimal branching. */
480 /* ??? Kludge these by not doing anything if we don't optimize. */
483 if (align_loops
<= 0)
485 if (align_jumps
<= 0)
488 if (align_functions
<= 0)
489 align_functions
= 16;
491 /* Register variables and functions with the garbage collector. */
493 /* Set up function hooks. */
494 init_machine_status
= alpha_init_machine_status
;
496 /* Tell the compiler when we're using VAX floating point. */
497 if (TARGET_FLOAT_VAX
)
499 REAL_MODE_FORMAT (SFmode
) = &vax_f_format
;
500 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
501 REAL_MODE_FORMAT (TFmode
) = NULL
;
504 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
505 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
506 target_flags
|= MASK_LONG_DOUBLE_128
;
510 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
513 zap_mask (HOST_WIDE_INT value
)
517 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
519 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
525 /* Return true if OP is valid for a particular TLS relocation.
526 We are already guaranteed that OP is a CONST. */
529 tls_symbolic_operand_1 (rtx op
, int size
, int unspec
)
533 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
535 op
= XVECEXP (op
, 0, 0);
537 if (GET_CODE (op
) != SYMBOL_REF
)
540 switch (SYMBOL_REF_TLS_MODEL (op
))
542 case TLS_MODEL_LOCAL_DYNAMIC
:
543 return unspec
== UNSPEC_DTPREL
&& size
== alpha_tls_size
;
544 case TLS_MODEL_INITIAL_EXEC
:
545 return unspec
== UNSPEC_TPREL
&& size
== 64;
546 case TLS_MODEL_LOCAL_EXEC
:
547 return unspec
== UNSPEC_TPREL
&& size
== alpha_tls_size
;
553 /* Used by aligned_memory_operand and unaligned_memory_operand to
554 resolve what reload is going to do with OP if it's a register. */
557 resolve_reload_operand (rtx op
)
559 if (reload_in_progress
)
562 if (GET_CODE (tmp
) == SUBREG
)
563 tmp
= SUBREG_REG (tmp
);
565 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
567 op
= reg_equiv_memory_loc (REGNO (tmp
));
575 /* The scalar modes supported differs from the default check-what-c-supports
576 version in that sometimes TFmode is available even when long double
577 indicates only DFmode. */
580 alpha_scalar_mode_supported_p (enum machine_mode mode
)
588 case TImode
: /* via optabs.c */
596 return TARGET_HAS_XFLOATING_LIBS
;
603 /* Alpha implements a couple of integer vector mode operations when
604 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
605 which allows the vectorizer to operate on e.g. move instructions,
606 or when expand_vector_operations can do something useful. */
609 alpha_vector_mode_supported_p (enum machine_mode mode
)
611 return mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
;
614 /* Return 1 if this function can directly return via $26. */
619 return (TARGET_ABI_OSF
621 && alpha_sa_size () == 0
622 && get_frame_size () == 0
623 && crtl
->outgoing_args_size
== 0
624 && crtl
->args
.pretend_args_size
== 0);
627 /* Return the TLS model to use for SYMBOL. */
629 static enum tls_model
630 tls_symbolic_operand_type (rtx symbol
)
632 enum tls_model model
;
634 if (GET_CODE (symbol
) != SYMBOL_REF
)
635 return TLS_MODEL_NONE
;
636 model
= SYMBOL_REF_TLS_MODEL (symbol
);
638 /* Local-exec with a 64-bit size is the same code as initial-exec. */
639 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
640 model
= TLS_MODEL_INITIAL_EXEC
;
645 /* Return true if the function DECL will share the same GP as any
646 function in the current unit of translation. */
649 decl_has_samegp (const_tree decl
)
651 /* Functions that are not local can be overridden, and thus may
652 not share the same gp. */
653 if (!(*targetm
.binds_local_p
) (decl
))
656 /* If -msmall-data is in effect, assume that there is only one GP
657 for the module, and so any local symbol has this property. We
658 need explicit relocations to be able to enforce this for symbols
659 not defined in this unit of translation, however. */
660 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
663 /* Functions that are not external are defined in this UoT. */
664 /* ??? Irritatingly, static functions not yet emitted are still
665 marked "external". Apply this to non-static functions only. */
666 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
669 /* Return true if EXP should be placed in the small data section. */
672 alpha_in_small_data_p (const_tree exp
)
674 /* We want to merge strings, so we never consider them small data. */
675 if (TREE_CODE (exp
) == STRING_CST
)
678 /* Functions are never in the small data area. Duh. */
679 if (TREE_CODE (exp
) == FUNCTION_DECL
)
682 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
684 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
685 if (strcmp (section
, ".sdata") == 0
686 || strcmp (section
, ".sbss") == 0)
691 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
693 /* If this is an incomplete type with size 0, then we can't put it
694 in sdata because it might be too big when completed. */
695 if (size
> 0 && size
<= g_switch_value
)
702 #if TARGET_ABI_OPEN_VMS
704 vms_valid_pointer_mode (enum machine_mode mode
)
706 return (mode
== SImode
|| mode
== DImode
);
710 alpha_linkage_symbol_p (const char *symname
)
712 int symlen
= strlen (symname
);
715 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
720 #define LINKAGE_SYMBOL_REF_P(X) \
721 ((GET_CODE (X) == SYMBOL_REF \
722 && alpha_linkage_symbol_p (XSTR (X, 0))) \
723 || (GET_CODE (X) == CONST \
724 && GET_CODE (XEXP (X, 0)) == PLUS \
725 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
726 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
729 /* legitimate_address_p recognizes an RTL expression that is a valid
730 memory address for an instruction. The MODE argument is the
731 machine mode for the MEM expression that wants to use this address.
733 For Alpha, we have either a constant address or the sum of a
734 register and a constant address, or just a register. For DImode,
735 any of those forms can be surrounded with an AND that clear the
736 low-order three bits; this is an "unaligned" access. */
739 alpha_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
741 /* If this is an ldq_u type address, discard the outer AND. */
743 && GET_CODE (x
) == AND
744 && CONST_INT_P (XEXP (x
, 1))
745 && INTVAL (XEXP (x
, 1)) == -8)
748 /* Discard non-paradoxical subregs. */
749 if (GET_CODE (x
) == SUBREG
750 && (GET_MODE_SIZE (GET_MODE (x
))
751 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
754 /* Unadorned general registers are valid. */
757 ? STRICT_REG_OK_FOR_BASE_P (x
)
758 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
761 /* Constant addresses (i.e. +/- 32k) are valid. */
762 if (CONSTANT_ADDRESS_P (x
))
765 #if TARGET_ABI_OPEN_VMS
766 if (LINKAGE_SYMBOL_REF_P (x
))
770 /* Register plus a small constant offset is valid. */
771 if (GET_CODE (x
) == PLUS
)
773 rtx ofs
= XEXP (x
, 1);
776 /* Discard non-paradoxical subregs. */
777 if (GET_CODE (x
) == SUBREG
778 && (GET_MODE_SIZE (GET_MODE (x
))
779 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
785 && NONSTRICT_REG_OK_FP_BASE_P (x
)
786 && CONST_INT_P (ofs
))
789 ? STRICT_REG_OK_FOR_BASE_P (x
)
790 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
791 && CONSTANT_ADDRESS_P (ofs
))
796 /* If we're managing explicit relocations, LO_SUM is valid, as are small
797 data symbols. Avoid explicit relocations of modes larger than word
798 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
799 else if (TARGET_EXPLICIT_RELOCS
800 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
802 if (small_symbolic_operand (x
, Pmode
))
805 if (GET_CODE (x
) == LO_SUM
)
807 rtx ofs
= XEXP (x
, 1);
810 /* Discard non-paradoxical subregs. */
811 if (GET_CODE (x
) == SUBREG
812 && (GET_MODE_SIZE (GET_MODE (x
))
813 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
816 /* Must have a valid base register. */
819 ? STRICT_REG_OK_FOR_BASE_P (x
)
820 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
823 /* The symbol must be local. */
824 if (local_symbolic_operand (ofs
, Pmode
)
825 || dtp32_symbolic_operand (ofs
, Pmode
)
826 || tp32_symbolic_operand (ofs
, Pmode
))
834 /* Build the SYMBOL_REF for __tls_get_addr. */
836 static GTY(()) rtx tls_get_addr_libfunc
;
839 get_tls_get_addr (void)
841 if (!tls_get_addr_libfunc
)
842 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
843 return tls_get_addr_libfunc
;
846 /* Try machine-dependent ways of modifying an illegitimate address
847 to be legitimate. If we find one, return the new, valid address. */
850 alpha_legitimize_address_1 (rtx x
, rtx scratch
, enum machine_mode mode
)
852 HOST_WIDE_INT addend
;
854 /* If the address is (plus reg const_int) and the CONST_INT is not a
855 valid offset, compute the high part of the constant and add it to
856 the register. Then our address is (plus temp low-part-const). */
857 if (GET_CODE (x
) == PLUS
858 && REG_P (XEXP (x
, 0))
859 && CONST_INT_P (XEXP (x
, 1))
860 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
862 addend
= INTVAL (XEXP (x
, 1));
867 /* If the address is (const (plus FOO const_int)), find the low-order
868 part of the CONST_INT. Then load FOO plus any high-order part of the
869 CONST_INT into a register. Our address is (plus reg low-part-const).
870 This is done to reduce the number of GOT entries. */
871 if (can_create_pseudo_p ()
872 && GET_CODE (x
) == CONST
873 && GET_CODE (XEXP (x
, 0)) == PLUS
874 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
876 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
877 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
881 /* If we have a (plus reg const), emit the load as in (2), then add
882 the two registers, and finally generate (plus reg low-part-const) as
884 if (can_create_pseudo_p ()
885 && GET_CODE (x
) == PLUS
886 && REG_P (XEXP (x
, 0))
887 && GET_CODE (XEXP (x
, 1)) == CONST
888 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
889 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 1), 0), 1)))
891 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
892 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
893 XEXP (XEXP (XEXP (x
, 1), 0), 0),
894 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
898 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
899 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
900 around +/- 32k offset. */
901 if (TARGET_EXPLICIT_RELOCS
902 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
903 && symbolic_operand (x
, Pmode
))
905 rtx r0
, r16
, eqv
, tga
, tp
, insn
, dest
, seq
;
907 switch (tls_symbolic_operand_type (x
))
912 case TLS_MODEL_GLOBAL_DYNAMIC
:
915 r0
= gen_rtx_REG (Pmode
, 0);
916 r16
= gen_rtx_REG (Pmode
, 16);
917 tga
= get_tls_get_addr ();
918 dest
= gen_reg_rtx (Pmode
);
919 seq
= GEN_INT (alpha_next_sequence_number
++);
921 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
922 insn
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
923 insn
= emit_call_insn (insn
);
924 RTL_CONST_CALL_P (insn
) = 1;
925 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
930 emit_libcall_block (insn
, dest
, r0
, x
);
933 case TLS_MODEL_LOCAL_DYNAMIC
:
936 r0
= gen_rtx_REG (Pmode
, 0);
937 r16
= gen_rtx_REG (Pmode
, 16);
938 tga
= get_tls_get_addr ();
939 scratch
= gen_reg_rtx (Pmode
);
940 seq
= GEN_INT (alpha_next_sequence_number
++);
942 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
943 insn
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
944 insn
= emit_call_insn (insn
);
945 RTL_CONST_CALL_P (insn
) = 1;
946 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
951 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
953 emit_libcall_block (insn
, scratch
, r0
, eqv
);
955 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
956 eqv
= gen_rtx_CONST (Pmode
, eqv
);
958 if (alpha_tls_size
== 64)
960 dest
= gen_reg_rtx (Pmode
);
961 emit_insn (gen_rtx_SET (VOIDmode
, dest
, eqv
));
962 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
965 if (alpha_tls_size
== 32)
967 insn
= gen_rtx_HIGH (Pmode
, eqv
);
968 insn
= gen_rtx_PLUS (Pmode
, scratch
, insn
);
969 scratch
= gen_reg_rtx (Pmode
);
970 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, insn
));
972 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
974 case TLS_MODEL_INITIAL_EXEC
:
975 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
976 eqv
= gen_rtx_CONST (Pmode
, eqv
);
977 tp
= gen_reg_rtx (Pmode
);
978 scratch
= gen_reg_rtx (Pmode
);
979 dest
= gen_reg_rtx (Pmode
);
981 emit_insn (gen_get_thread_pointerdi (tp
));
982 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, eqv
));
983 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
986 case TLS_MODEL_LOCAL_EXEC
:
987 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
988 eqv
= gen_rtx_CONST (Pmode
, eqv
);
989 tp
= gen_reg_rtx (Pmode
);
991 emit_insn (gen_get_thread_pointerdi (tp
));
992 if (alpha_tls_size
== 32)
994 insn
= gen_rtx_HIGH (Pmode
, eqv
);
995 insn
= gen_rtx_PLUS (Pmode
, tp
, insn
);
996 tp
= gen_reg_rtx (Pmode
);
997 emit_insn (gen_rtx_SET (VOIDmode
, tp
, insn
));
999 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
1005 if (local_symbolic_operand (x
, Pmode
))
1007 if (small_symbolic_operand (x
, Pmode
))
1011 if (can_create_pseudo_p ())
1012 scratch
= gen_reg_rtx (Pmode
);
1013 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
1014 gen_rtx_HIGH (Pmode
, x
)));
1015 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1024 HOST_WIDE_INT low
, high
;
1026 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1028 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1032 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1033 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1034 1, OPTAB_LIB_WIDEN
);
1036 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1037 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1038 1, OPTAB_LIB_WIDEN
);
1040 return plus_constant (Pmode
, x
, low
);
1045 /* Try machine-dependent ways of modifying an illegitimate address
1046 to be legitimate. Return X or the new, valid address. */
1049 alpha_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1050 enum machine_mode mode
)
1052 rtx new_x
= alpha_legitimize_address_1 (x
, NULL_RTX
, mode
);
1053 return new_x
? new_x
: x
;
1056 /* Return true if ADDR has an effect that depends on the machine mode it
1057 is used for. On the Alpha this is true only for the unaligned modes.
1058 We can simplify the test since we know that the address must be valid. */
1061 alpha_mode_dependent_address_p (const_rtx addr
,
1062 addr_space_t as ATTRIBUTE_UNUSED
)
1064 return GET_CODE (addr
) == AND
;
1067 /* Primarily this is required for TLS symbols, but given that our move
1068 patterns *ought* to be able to handle any symbol at any time, we
1069 should never be spilling symbolic operands to the constant pool, ever. */
1072 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1074 enum rtx_code code
= GET_CODE (x
);
1075 return code
== SYMBOL_REF
|| code
== LABEL_REF
|| code
== CONST
;
1078 /* We do not allow indirect calls to be optimized into sibling calls, nor
1079 can we allow a call to a function with a different GP to be optimized
1083 alpha_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
1085 /* Can't do indirect tail calls, since we don't know if the target
1086 uses the same GP. */
1090 /* Otherwise, we can make a tail call if the target function shares
1092 return decl_has_samegp (decl
);
1096 some_small_symbolic_operand_int (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1100 /* Don't re-split. */
1101 if (GET_CODE (x
) == LO_SUM
)
1104 return small_symbolic_operand (x
, Pmode
) != 0;
1108 split_small_symbolic_operand_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1112 /* Don't re-split. */
1113 if (GET_CODE (x
) == LO_SUM
)
1116 if (small_symbolic_operand (x
, Pmode
))
1118 x
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
1127 split_small_symbolic_operand (rtx x
)
1130 for_each_rtx (&x
, split_small_symbolic_operand_1
, NULL
);
1134 /* Indicate that INSN cannot be duplicated. This is true for any insn
1135 that we've marked with gpdisp relocs, since those have to stay in
1136 1-1 correspondence with one another.
1138 Technically we could copy them if we could set up a mapping from one
1139 sequence number to another, across the set of insns to be duplicated.
1140 This seems overly complicated and error-prone since interblock motion
1141 from sched-ebb could move one of the pair of insns to a different block.
1143 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1144 then they'll be in a different block from their ldgp. Which could lead
1145 the bb reorder code to think that it would be ok to copy just the block
1146 containing the call and branch to the block containing the ldgp. */
1149 alpha_cannot_copy_insn_p (rtx insn
)
1151 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
1153 if (recog_memoized (insn
) >= 0)
1154 return get_attr_cannot_copy (insn
);
1160 /* Try a machine-dependent way of reloading an illegitimate address
1161 operand. If we find one, push the reload and return the new rtx. */
1164 alpha_legitimize_reload_address (rtx x
,
1165 enum machine_mode mode ATTRIBUTE_UNUSED
,
1166 int opnum
, int type
,
1167 int ind_levels ATTRIBUTE_UNUSED
)
1169 /* We must recognize output that we have already generated ourselves. */
1170 if (GET_CODE (x
) == PLUS
1171 && GET_CODE (XEXP (x
, 0)) == PLUS
1172 && REG_P (XEXP (XEXP (x
, 0), 0))
1173 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
1174 && CONST_INT_P (XEXP (x
, 1)))
1176 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1177 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1178 opnum
, (enum reload_type
) type
);
1182 /* We wish to handle large displacements off a base register by
1183 splitting the addend across an ldah and the mem insn. This
1184 cuts number of extra insns needed from 3 to 1. */
1185 if (GET_CODE (x
) == PLUS
1186 && REG_P (XEXP (x
, 0))
1187 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1188 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1189 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1191 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1192 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1194 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1196 /* Check for 32-bit overflow. */
1197 if (high
+ low
!= val
)
1200 /* Reload the high part into a base reg; leave the low part
1201 in the mem directly. */
1202 x
= gen_rtx_PLUS (GET_MODE (x
),
1203 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1207 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1208 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1209 opnum
, (enum reload_type
) type
);
1216 /* Compute a (partial) cost for rtx X. Return true if the complete
1217 cost has been computed, and false if subexpressions should be
1218 scanned. In either case, *TOTAL contains the cost result. */
1221 alpha_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
1224 enum machine_mode mode
= GET_MODE (x
);
1225 bool float_mode_p
= FLOAT_MODE_P (mode
);
1226 const struct alpha_rtx_cost_data
*cost_data
;
1229 cost_data
= &alpha_rtx_cost_size
;
1231 cost_data
= &alpha_rtx_cost_data
[alpha_tune
];
1236 /* If this is an 8-bit constant, return zero since it can be used
1237 nearly anywhere with no cost. If it is a valid operand for an
1238 ADD or AND, likewise return 0 if we know it will be used in that
1239 context. Otherwise, return 2 since it might be used there later.
1240 All other constants take at least two insns. */
1241 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
1249 if (x
== CONST0_RTX (mode
))
1251 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
1252 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
1254 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
1257 *total
= COSTS_N_INSNS (2);
1263 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
1264 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
1265 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
1266 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
1267 else if (tls_symbolic_operand_type (x
))
1268 /* Estimate of cost for call_pal rduniq. */
1269 /* ??? How many insns do we emit here? More than one... */
1270 *total
= COSTS_N_INSNS (15);
1272 /* Otherwise we do a load from the GOT. */
1273 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1277 /* This is effectively an add_operand. */
1284 *total
= cost_data
->fp_add
;
1285 else if (GET_CODE (XEXP (x
, 0)) == MULT
1286 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
1288 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0),
1289 (enum rtx_code
) outer_code
, opno
, speed
)
1290 + rtx_cost (XEXP (x
, 1),
1291 (enum rtx_code
) outer_code
, opno
, speed
)
1292 + COSTS_N_INSNS (1));
1299 *total
= cost_data
->fp_mult
;
1300 else if (mode
== DImode
)
1301 *total
= cost_data
->int_mult_di
;
1303 *total
= cost_data
->int_mult_si
;
1307 if (CONST_INT_P (XEXP (x
, 1))
1308 && INTVAL (XEXP (x
, 1)) <= 3)
1310 *total
= COSTS_N_INSNS (1);
1317 *total
= cost_data
->int_shift
;
1322 *total
= cost_data
->fp_add
;
1324 *total
= cost_data
->int_cmov
;
1332 *total
= cost_data
->int_div
;
1333 else if (mode
== SFmode
)
1334 *total
= cost_data
->fp_div_sf
;
1336 *total
= cost_data
->fp_div_df
;
1340 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1346 *total
= COSTS_N_INSNS (1);
1354 *total
= COSTS_N_INSNS (1) + cost_data
->int_cmov
;
1360 case UNSIGNED_FLOAT
:
1363 case FLOAT_TRUNCATE
:
1364 *total
= cost_data
->fp_add
;
1368 if (MEM_P (XEXP (x
, 0)))
1371 *total
= cost_data
->fp_add
;
1379 /* REF is an alignable memory location. Place an aligned SImode
1380 reference into *PALIGNED_MEM and the number of bits to shift into
1381 *PBITNUM. SCRATCH is a free register for use in reloading out
1382 of range stack slots. */
1385 get_aligned_mem (rtx ref
, rtx
*paligned_mem
, rtx
*pbitnum
)
1388 HOST_WIDE_INT disp
, offset
;
1390 gcc_assert (MEM_P (ref
));
1392 if (reload_in_progress
)
1394 base
= find_replacement (&XEXP (ref
, 0));
1395 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1398 base
= XEXP (ref
, 0);
1400 if (GET_CODE (base
) == PLUS
)
1401 disp
= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1405 /* Find the byte offset within an aligned word. If the memory itself is
1406 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1407 will have examined the base register and determined it is aligned, and
1408 thus displacements from it are naturally alignable. */
1409 if (MEM_ALIGN (ref
) >= 32)
1414 /* The location should not cross aligned word boundary. */
1415 gcc_assert (offset
+ GET_MODE_SIZE (GET_MODE (ref
))
1416 <= GET_MODE_SIZE (SImode
));
1418 /* Access the entire aligned word. */
1419 *paligned_mem
= widen_memory_access (ref
, SImode
, -offset
);
1421 /* Convert the byte offset within the word to a bit offset. */
1422 offset
*= BITS_PER_UNIT
;
1423 *pbitnum
= GEN_INT (offset
);
1426 /* Similar, but just get the address. Handle the two reload cases.
1427 Add EXTRA_OFFSET to the address we return. */
1430 get_unaligned_address (rtx ref
)
1433 HOST_WIDE_INT offset
= 0;
1435 gcc_assert (MEM_P (ref
));
1437 if (reload_in_progress
)
1439 base
= find_replacement (&XEXP (ref
, 0));
1440 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1443 base
= XEXP (ref
, 0);
1445 if (GET_CODE (base
) == PLUS
)
1446 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1448 return plus_constant (Pmode
, base
, offset
);
1451 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1452 X is always returned in a register. */
1455 get_unaligned_offset (rtx addr
, HOST_WIDE_INT ofs
)
1457 if (GET_CODE (addr
) == PLUS
)
1459 ofs
+= INTVAL (XEXP (addr
, 1));
1460 addr
= XEXP (addr
, 0);
1463 return expand_simple_binop (Pmode
, PLUS
, addr
, GEN_INT (ofs
& 7),
1464 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1467 /* On the Alpha, all (non-symbolic) constants except zero go into
1468 a floating-point register via memory. Note that we cannot
1469 return anything that is not a subset of RCLASS, and that some
1470 symbolic constants cannot be dropped to memory. */
1473 alpha_preferred_reload_class(rtx x
, enum reg_class rclass
)
1475 /* Zero is present in any register class. */
1476 if (x
== CONST0_RTX (GET_MODE (x
)))
1479 /* These sorts of constants we can easily drop to memory. */
1481 || GET_CODE (x
) == CONST_DOUBLE
1482 || GET_CODE (x
) == CONST_VECTOR
)
1484 if (rclass
== FLOAT_REGS
)
1486 if (rclass
== ALL_REGS
)
1487 return GENERAL_REGS
;
1491 /* All other kinds of constants should not (and in the case of HIGH
1492 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1493 secondary reload. */
1495 return (rclass
== ALL_REGS
? GENERAL_REGS
: rclass
);
1500 /* Inform reload about cases where moving X with a mode MODE to a register in
1501 RCLASS requires an extra scratch or immediate register. Return the class
1502 needed for the immediate register. */
1505 alpha_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1506 enum machine_mode mode
, secondary_reload_info
*sri
)
1508 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1510 /* Loading and storing HImode or QImode values to and from memory
1511 usually requires a scratch register. */
1512 if (!TARGET_BWX
&& (mode
== QImode
|| mode
== HImode
|| mode
== CQImode
))
1514 if (any_memory_operand (x
, mode
))
1518 if (!aligned_memory_operand (x
, mode
))
1519 sri
->icode
= direct_optab_handler (reload_in_optab
, mode
);
1522 sri
->icode
= direct_optab_handler (reload_out_optab
, mode
);
1527 /* We also cannot do integral arithmetic into FP regs, as might result
1528 from register elimination into a DImode fp register. */
1529 if (rclass
== FLOAT_REGS
)
1531 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
1532 return GENERAL_REGS
;
1533 if (in_p
&& INTEGRAL_MODE_P (mode
)
1534 && !MEM_P (x
) && !REG_P (x
) && !CONST_INT_P (x
))
1535 return GENERAL_REGS
;
1541 /* Subfunction of the following function. Update the flags of any MEM
1542 found in part of X. */
1545 alpha_set_memflags_1 (rtx
*xp
, void *data
)
1547 rtx x
= *xp
, orig
= (rtx
) data
;
1552 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (orig
);
1553 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (orig
);
1554 MEM_READONLY_P (x
) = MEM_READONLY_P (orig
);
1556 /* Sadly, we cannot use alias sets because the extra aliasing
1557 produced by the AND interferes. Given that two-byte quantities
1558 are the only thing we would be able to differentiate anyway,
1559 there does not seem to be any point in convoluting the early
1560 out of the alias check. */
1565 /* Given SEQ, which is an INSN list, look for any MEMs in either
1566 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1567 volatile flags from REF into each of the MEMs found. If REF is not
1568 a MEM, don't do anything. */
1571 alpha_set_memflags (rtx seq
, rtx ref
)
1578 /* This is only called from alpha.md, after having had something
1579 generated from one of the insn patterns. So if everything is
1580 zero, the pattern is already up-to-date. */
1581 if (!MEM_VOLATILE_P (ref
)
1582 && !MEM_NOTRAP_P (ref
)
1583 && !MEM_READONLY_P (ref
))
1586 for (insn
= seq
; insn
; insn
= NEXT_INSN (insn
))
1588 for_each_rtx (&PATTERN (insn
), alpha_set_memflags_1
, (void *) ref
);
1593 static rtx
alpha_emit_set_const (rtx
, enum machine_mode
, HOST_WIDE_INT
,
1596 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1597 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1598 and return pc_rtx if successful. */
1601 alpha_emit_set_const_1 (rtx target
, enum machine_mode mode
,
1602 HOST_WIDE_INT c
, int n
, bool no_output
)
1604 HOST_WIDE_INT new_const
;
1606 /* Use a pseudo if highly optimizing and still generating RTL. */
1608 = (flag_expensive_optimizations
&& can_create_pseudo_p () ? 0 : target
);
1611 /* If this is a sign-extended 32-bit constant, we can do this in at most
1612 three insns, so do it if we have enough insns left. We always have
1613 a sign-extended 32-bit constant when compiling on a narrow machine. */
1615 if (HOST_BITS_PER_WIDE_INT
!= 64
1616 || c
>> 31 == -1 || c
>> 31 == 0)
1618 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1619 HOST_WIDE_INT tmp1
= c
- low
;
1620 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
1621 HOST_WIDE_INT extra
= 0;
1623 /* If HIGH will be interpreted as negative but the constant is
1624 positive, we must adjust it to do two ldha insns. */
1626 if ((high
& 0x8000) != 0 && c
>= 0)
1630 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
1633 if (c
== low
|| (low
== 0 && extra
== 0))
1635 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1636 but that meant that we can't handle INT_MIN on 32-bit machines
1637 (like NT/Alpha), because we recurse indefinitely through
1638 emit_move_insn to gen_movdi. So instead, since we know exactly
1639 what we want, create it explicitly. */
1644 target
= gen_reg_rtx (mode
);
1645 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
1648 else if (n
>= 2 + (extra
!= 0))
1652 if (!can_create_pseudo_p ())
1654 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (high
<< 16)));
1658 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16),
1661 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1662 This means that if we go through expand_binop, we'll try to
1663 generate extensions, etc, which will require new pseudos, which
1664 will fail during some split phases. The SImode add patterns
1665 still exist, but are not named. So build the insns by hand. */
1670 subtarget
= gen_reg_rtx (mode
);
1671 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
1672 insn
= gen_rtx_SET (VOIDmode
, subtarget
, insn
);
1678 target
= gen_reg_rtx (mode
);
1679 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1680 insn
= gen_rtx_SET (VOIDmode
, target
, insn
);
1686 /* If we couldn't do it that way, try some other methods. But if we have
1687 no instructions left, don't bother. Likewise, if this is SImode and
1688 we can't make pseudos, we can't do anything since the expand_binop
1689 and expand_unop calls will widen and try to make pseudos. */
1691 if (n
== 1 || (mode
== SImode
&& !can_create_pseudo_p ()))
1694 /* Next, see if we can load a related constant and then shift and possibly
1695 negate it to get the constant we want. Try this once each increasing
1696 numbers of insns. */
1698 for (i
= 1; i
< n
; i
++)
1700 /* First, see if minus some low bits, we've an easy load of
1703 new_const
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1706 temp
= alpha_emit_set_const (subtarget
, mode
, c
- new_const
, i
, no_output
);
1711 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new_const
),
1712 target
, 0, OPTAB_WIDEN
);
1716 /* Next try complementing. */
1717 temp
= alpha_emit_set_const (subtarget
, mode
, ~c
, i
, no_output
);
1722 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
1725 /* Next try to form a constant and do a left shift. We can do this
1726 if some low-order bits are zero; the exact_log2 call below tells
1727 us that information. The bits we are shifting out could be any
1728 value, but here we'll just try the 0- and sign-extended forms of
1729 the constant. To try to increase the chance of having the same
1730 constant in more than one insn, start at the highest number of
1731 bits to shift, but try all possibilities in case a ZAPNOT will
1734 bits
= exact_log2 (c
& -c
);
1736 for (; bits
> 0; bits
--)
1738 new_const
= c
>> bits
;
1739 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1742 new_const
= (unsigned HOST_WIDE_INT
)c
>> bits
;
1743 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1750 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
1751 target
, 0, OPTAB_WIDEN
);
1755 /* Now try high-order zero bits. Here we try the shifted-in bits as
1756 all zero and all ones. Be careful to avoid shifting outside the
1757 mode and to avoid shifting outside the host wide int size. */
1758 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1759 confuse the recursive call and set all of the high 32 bits. */
1761 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1762 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64));
1764 for (; bits
> 0; bits
--)
1766 new_const
= c
<< bits
;
1767 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1770 new_const
= (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1771 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1778 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
1779 target
, 1, OPTAB_WIDEN
);
1783 /* Now try high-order 1 bits. We get that with a sign-extension.
1784 But one bit isn't enough here. Be careful to avoid shifting outside
1785 the mode and to avoid shifting outside the host wide int size. */
1787 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1788 - floor_log2 (~ c
) - 2);
1790 for (; bits
> 0; bits
--)
1792 new_const
= c
<< bits
;
1793 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1796 new_const
= (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1797 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1804 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
1805 target
, 0, OPTAB_WIDEN
);
1810 #if HOST_BITS_PER_WIDE_INT == 64
1811 /* Finally, see if can load a value into the target that is the same as the
1812 constant except that all bytes that are 0 are changed to be 0xff. If we
1813 can, then we can do a ZAPNOT to obtain the desired constant. */
1816 for (i
= 0; i
< 64; i
+= 8)
1817 if ((new_const
& ((HOST_WIDE_INT
) 0xff << i
)) == 0)
1818 new_const
|= (HOST_WIDE_INT
) 0xff << i
;
1820 /* We are only called for SImode and DImode. If this is SImode, ensure that
1821 we are sign extended to a full word. */
1824 new_const
= ((new_const
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1828 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, n
- 1, no_output
);
1833 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new_const
),
1834 target
, 0, OPTAB_WIDEN
);
1842 /* Try to output insns to set TARGET equal to the constant C if it can be
1843 done in less than N insns. Do all computations in MODE. Returns the place
1844 where the output has been placed if it can be done and the insns have been
1845 emitted. If it would take more than N insns, zero is returned and no
1846 insns and emitted. */
1849 alpha_emit_set_const (rtx target
, enum machine_mode mode
,
1850 HOST_WIDE_INT c
, int n
, bool no_output
)
1852 enum machine_mode orig_mode
= mode
;
1853 rtx orig_target
= target
;
1857 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1858 can't load this constant in one insn, do this in DImode. */
1859 if (!can_create_pseudo_p () && mode
== SImode
1860 && REG_P (target
) && REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1862 result
= alpha_emit_set_const_1 (target
, mode
, c
, 1, no_output
);
1866 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1869 else if (mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
)
1871 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1875 /* Try 1 insn, then 2, then up to N. */
1876 for (i
= 1; i
<= n
; i
++)
1878 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
, no_output
);
1886 insn
= get_last_insn ();
1887 set
= single_set (insn
);
1888 if (! CONSTANT_P (SET_SRC (set
)))
1889 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
1894 /* Allow for the case where we changed the mode of TARGET. */
1897 if (result
== target
)
1898 result
= orig_target
;
1899 else if (mode
!= orig_mode
)
1900 result
= gen_lowpart (orig_mode
, result
);
1906 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1907 fall back to a straight forward decomposition. We do this to avoid
1908 exponential run times encountered when looking for longer sequences
1909 with alpha_emit_set_const. */
1912 alpha_emit_set_long_const (rtx target
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
1914 HOST_WIDE_INT d1
, d2
, d3
, d4
;
1916 /* Decompose the entire word */
1917 #if HOST_BITS_PER_WIDE_INT >= 64
1918 gcc_assert (c2
== -(c1
< 0));
1919 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1921 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1922 c1
= (c1
- d2
) >> 32;
1923 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1925 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1926 gcc_assert (c1
== d4
);
1928 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1930 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1931 gcc_assert (c1
== d2
);
1933 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
1935 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1936 gcc_assert (c2
== d4
);
1939 /* Construct the high word */
1942 emit_move_insn (target
, GEN_INT (d4
));
1944 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
1947 emit_move_insn (target
, GEN_INT (d3
));
1949 /* Shift it into place */
1950 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
1952 /* Add in the low bits. */
1954 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
1956 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
1961 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1965 alpha_extract_integer (rtx x
, HOST_WIDE_INT
*p0
, HOST_WIDE_INT
*p1
)
1967 HOST_WIDE_INT i0
, i1
;
1969 if (GET_CODE (x
) == CONST_VECTOR
)
1970 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
1973 if (CONST_INT_P (x
))
1978 else if (HOST_BITS_PER_WIDE_INT
>= 64)
1980 i0
= CONST_DOUBLE_LOW (x
);
1985 i0
= CONST_DOUBLE_LOW (x
);
1986 i1
= CONST_DOUBLE_HIGH (x
);
1993 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
1994 we are willing to load the value into a register via a move pattern.
1995 Normally this is all symbolic constants, integral constants that
1996 take three or fewer instructions, and floating-point zero. */
1999 alpha_legitimate_constant_p (enum machine_mode mode
, rtx x
)
2001 HOST_WIDE_INT i0
, i1
;
2003 switch (GET_CODE (x
))
2010 if (GET_CODE (XEXP (x
, 0)) == PLUS
2011 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
2012 x
= XEXP (XEXP (x
, 0), 0);
2016 if (GET_CODE (x
) != SYMBOL_REF
)
2022 /* TLS symbols are never valid. */
2023 return SYMBOL_REF_TLS_MODEL (x
) == 0;
2026 if (x
== CONST0_RTX (mode
))
2028 if (FLOAT_MODE_P (mode
))
2033 if (x
== CONST0_RTX (mode
))
2035 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
2037 if (GET_MODE_SIZE (mode
) != 8)
2043 if (TARGET_BUILD_CONSTANTS
)
2045 alpha_extract_integer (x
, &i0
, &i1
);
2046 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== (-i0
< 0))
2047 return alpha_emit_set_const_1 (x
, mode
, i0
, 3, true) != NULL
;
2055 /* Operand 1 is known to be a constant, and should require more than one
2056 instruction to load. Emit that multi-part load. */
2059 alpha_split_const_mov (enum machine_mode mode
, rtx
*operands
)
2061 HOST_WIDE_INT i0
, i1
;
2062 rtx temp
= NULL_RTX
;
2064 alpha_extract_integer (operands
[1], &i0
, &i1
);
2066 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2067 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3, false);
2069 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2070 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2074 if (!rtx_equal_p (operands
[0], temp
))
2075 emit_move_insn (operands
[0], temp
);
2082 /* Expand a move instruction; return true if all work is done.
2083 We don't handle non-bwx subword loads here. */
2086 alpha_expand_mov (enum machine_mode mode
, rtx
*operands
)
2090 /* If the output is not a register, the input must be. */
2091 if (MEM_P (operands
[0])
2092 && ! reg_or_0_operand (operands
[1], mode
))
2093 operands
[1] = force_reg (mode
, operands
[1]);
2095 /* Allow legitimize_address to perform some simplifications. */
2096 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
2098 tmp
= alpha_legitimize_address_1 (operands
[1], operands
[0], mode
);
2101 if (tmp
== operands
[0])
2108 /* Early out for non-constants and valid constants. */
2109 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2112 /* Split large integers. */
2113 if (CONST_INT_P (operands
[1])
2114 || GET_CODE (operands
[1]) == CONST_DOUBLE
2115 || GET_CODE (operands
[1]) == CONST_VECTOR
)
2117 if (alpha_split_const_mov (mode
, operands
))
2121 /* Otherwise we've nothing left but to drop the thing to memory. */
2122 tmp
= force_const_mem (mode
, operands
[1]);
2124 if (tmp
== NULL_RTX
)
2127 if (reload_in_progress
)
2129 emit_move_insn (operands
[0], XEXP (tmp
, 0));
2130 operands
[1] = replace_equiv_address (tmp
, operands
[0]);
2133 operands
[1] = validize_mem (tmp
);
2137 /* Expand a non-bwx QImode or HImode move instruction;
2138 return true if all work is done. */
2141 alpha_expand_mov_nobwx (enum machine_mode mode
, rtx
*operands
)
2145 /* If the output is not a register, the input must be. */
2146 if (MEM_P (operands
[0]))
2147 operands
[1] = force_reg (mode
, operands
[1]);
2149 /* Handle four memory cases, unaligned and aligned for either the input
2150 or the output. The only case where we can be called during reload is
2151 for aligned loads; all other cases require temporaries. */
2153 if (any_memory_operand (operands
[1], mode
))
2155 if (aligned_memory_operand (operands
[1], mode
))
2157 if (reload_in_progress
)
2160 seq
= gen_reload_inqi_aligned (operands
[0], operands
[1]);
2162 seq
= gen_reload_inhi_aligned (operands
[0], operands
[1]);
2167 rtx aligned_mem
, bitnum
;
2168 rtx scratch
= gen_reg_rtx (SImode
);
2172 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2174 subtarget
= operands
[0];
2175 if (REG_P (subtarget
))
2176 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2178 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2181 seq
= gen_aligned_loadqi (subtarget
, aligned_mem
,
2184 seq
= gen_aligned_loadhi (subtarget
, aligned_mem
,
2189 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2194 /* Don't pass these as parameters since that makes the generated
2195 code depend on parameter evaluation order which will cause
2196 bootstrap failures. */
2198 rtx temp1
, temp2
, subtarget
, ua
;
2201 temp1
= gen_reg_rtx (DImode
);
2202 temp2
= gen_reg_rtx (DImode
);
2204 subtarget
= operands
[0];
2205 if (REG_P (subtarget
))
2206 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2208 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2210 ua
= get_unaligned_address (operands
[1]);
2212 seq
= gen_unaligned_loadqi (subtarget
, ua
, temp1
, temp2
);
2214 seq
= gen_unaligned_loadhi (subtarget
, ua
, temp1
, temp2
);
2216 alpha_set_memflags (seq
, operands
[1]);
2220 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2225 if (any_memory_operand (operands
[0], mode
))
2227 if (aligned_memory_operand (operands
[0], mode
))
2229 rtx aligned_mem
, bitnum
;
2230 rtx temp1
= gen_reg_rtx (SImode
);
2231 rtx temp2
= gen_reg_rtx (SImode
);
2233 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2235 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2240 rtx temp1
= gen_reg_rtx (DImode
);
2241 rtx temp2
= gen_reg_rtx (DImode
);
2242 rtx temp3
= gen_reg_rtx (DImode
);
2243 rtx ua
= get_unaligned_address (operands
[0]);
2246 seq
= gen_unaligned_storeqi (ua
, operands
[1], temp1
, temp2
, temp3
);
2248 seq
= gen_unaligned_storehi (ua
, operands
[1], temp1
, temp2
, temp3
);
2250 alpha_set_memflags (seq
, operands
[0]);
2259 /* Implement the movmisalign patterns. One of the operands is a memory
2260 that is not naturally aligned. Emit instructions to load it. */
2263 alpha_expand_movmisalign (enum machine_mode mode
, rtx
*operands
)
2265 /* Honor misaligned loads, for those we promised to do so. */
2266 if (MEM_P (operands
[1]))
2270 if (register_operand (operands
[0], mode
))
2273 tmp
= gen_reg_rtx (mode
);
2275 alpha_expand_unaligned_load (tmp
, operands
[1], 8, 0, 0);
2276 if (tmp
!= operands
[0])
2277 emit_move_insn (operands
[0], tmp
);
2279 else if (MEM_P (operands
[0]))
2281 if (!reg_or_0_operand (operands
[1], mode
))
2282 operands
[1] = force_reg (mode
, operands
[1]);
2283 alpha_expand_unaligned_store (operands
[0], operands
[1], 8, 0);
2289 /* Generate an unsigned DImode to FP conversion. This is the same code
2290 optabs would emit if we didn't have TFmode patterns.
2292 For SFmode, this is the only construction I've found that can pass
2293 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2294 intermediates will work, because you'll get intermediate rounding
2295 that ruins the end result. Some of this could be fixed by turning
2296 on round-to-positive-infinity, but that requires diddling the fpsr,
2297 which kills performance. I tried turning this around and converting
2298 to a negative number, so that I could turn on /m, but either I did
2299 it wrong or there's something else cause I wound up with the exact
2300 same single-bit error. There is a branch-less form of this same code:
2311 fcmoveq $f10,$f11,$f0
2313 I'm not using it because it's the same number of instructions as
2314 this branch-full form, and it has more serialized long latency
2315 instructions on the critical path.
2317 For DFmode, we can avoid rounding errors by breaking up the word
2318 into two pieces, converting them separately, and adding them back:
2320 LC0: .long 0,0x5f800000
2325 cpyse $f11,$f31,$f10
2326 cpyse $f31,$f11,$f11
2334 This doesn't seem to be a clear-cut win over the optabs form.
2335 It probably all depends on the distribution of numbers being
2336 converted -- in the optabs form, all but high-bit-set has a
2337 much lower minimum execution time. */
2340 alpha_emit_floatuns (rtx operands
[2])
2342 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2343 enum machine_mode mode
;
2346 in
= force_reg (DImode
, operands
[1]);
2347 mode
= GET_MODE (out
);
2348 neglab
= gen_label_rtx ();
2349 donelab
= gen_label_rtx ();
2350 i0
= gen_reg_rtx (DImode
);
2351 i1
= gen_reg_rtx (DImode
);
2352 f0
= gen_reg_rtx (mode
);
2354 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2356 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2357 emit_jump_insn (gen_jump (donelab
));
2360 emit_label (neglab
);
2362 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2363 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2364 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2365 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2366 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2368 emit_label (donelab
);
2371 /* Generate the comparison for a conditional branch. */
2374 alpha_emit_conditional_branch (rtx operands
[], enum machine_mode cmp_mode
)
2376 enum rtx_code cmp_code
, branch_code
;
2377 enum machine_mode branch_mode
= VOIDmode
;
2378 enum rtx_code code
= GET_CODE (operands
[0]);
2379 rtx op0
= operands
[1], op1
= operands
[2];
2382 if (cmp_mode
== TFmode
)
2384 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2389 /* The general case: fold the comparison code to the types of compares
2390 that we have, choosing the branch as necessary. */
2393 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2395 /* We have these compares. */
2396 cmp_code
= code
, branch_code
= NE
;
2401 /* These must be reversed. */
2402 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2405 case GE
: case GT
: case GEU
: case GTU
:
2406 /* For FP, we swap them, for INT, we reverse them. */
2407 if (cmp_mode
== DFmode
)
2409 cmp_code
= swap_condition (code
);
2411 tem
= op0
, op0
= op1
, op1
= tem
;
2415 cmp_code
= reverse_condition (code
);
2424 if (cmp_mode
== DFmode
)
2426 if (flag_unsafe_math_optimizations
&& cmp_code
!= UNORDERED
)
2428 /* When we are not as concerned about non-finite values, and we
2429 are comparing against zero, we can branch directly. */
2430 if (op1
== CONST0_RTX (DFmode
))
2431 cmp_code
= UNKNOWN
, branch_code
= code
;
2432 else if (op0
== CONST0_RTX (DFmode
))
2434 /* Undo the swap we probably did just above. */
2435 tem
= op0
, op0
= op1
, op1
= tem
;
2436 branch_code
= swap_condition (cmp_code
);
2442 /* ??? We mark the branch mode to be CCmode to prevent the
2443 compare and branch from being combined, since the compare
2444 insn follows IEEE rules that the branch does not. */
2445 branch_mode
= CCmode
;
2450 /* The following optimizations are only for signed compares. */
2451 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2453 /* Whee. Compare and branch against 0 directly. */
2454 if (op1
== const0_rtx
)
2455 cmp_code
= UNKNOWN
, branch_code
= code
;
2457 /* If the constants doesn't fit into an immediate, but can
2458 be generated by lda/ldah, we adjust the argument and
2459 compare against zero, so we can use beq/bne directly. */
2460 /* ??? Don't do this when comparing against symbols, otherwise
2461 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2462 be declared false out of hand (at least for non-weak). */
2463 else if (CONST_INT_P (op1
)
2464 && (code
== EQ
|| code
== NE
)
2465 && !(symbolic_operand (op0
, VOIDmode
)
2466 || (REG_P (op0
) && REG_POINTER (op0
))))
2468 rtx n_op1
= GEN_INT (-INTVAL (op1
));
2470 if (! satisfies_constraint_I (op1
)
2471 && (satisfies_constraint_K (n_op1
)
2472 || satisfies_constraint_L (n_op1
)))
2473 cmp_code
= PLUS
, branch_code
= code
, op1
= n_op1
;
2477 if (!reg_or_0_operand (op0
, DImode
))
2478 op0
= force_reg (DImode
, op0
);
2479 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2480 op1
= force_reg (DImode
, op1
);
2483 /* Emit an initial compare instruction, if necessary. */
2485 if (cmp_code
!= UNKNOWN
)
2487 tem
= gen_reg_rtx (cmp_mode
);
2488 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2491 /* Emit the branch instruction. */
2492 tem
= gen_rtx_SET (VOIDmode
, pc_rtx
,
2493 gen_rtx_IF_THEN_ELSE (VOIDmode
,
2494 gen_rtx_fmt_ee (branch_code
,
2496 CONST0_RTX (cmp_mode
)),
2497 gen_rtx_LABEL_REF (VOIDmode
,
2500 emit_jump_insn (tem
);
2503 /* Certain simplifications can be done to make invalid setcc operations
2504 valid. Return the final comparison, or NULL if we can't work. */
2507 alpha_emit_setcc (rtx operands
[], enum machine_mode cmp_mode
)
2509 enum rtx_code cmp_code
;
2510 enum rtx_code code
= GET_CODE (operands
[1]);
2511 rtx op0
= operands
[2], op1
= operands
[3];
2514 if (cmp_mode
== TFmode
)
2516 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2521 if (cmp_mode
== DFmode
&& !TARGET_FIX
)
2524 /* The general case: fold the comparison code to the types of compares
2525 that we have, choosing the branch as necessary. */
2530 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2532 /* We have these compares. */
2533 if (cmp_mode
== DFmode
)
2534 cmp_code
= code
, code
= NE
;
2538 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2543 cmp_code
= reverse_condition (code
);
2547 case GE
: case GT
: case GEU
: case GTU
:
2548 /* These normally need swapping, but for integer zero we have
2549 special patterns that recognize swapped operands. */
2550 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2552 code
= swap_condition (code
);
2553 if (cmp_mode
== DFmode
)
2554 cmp_code
= code
, code
= NE
;
2555 tmp
= op0
, op0
= op1
, op1
= tmp
;
2562 if (cmp_mode
== DImode
)
2564 if (!register_operand (op0
, DImode
))
2565 op0
= force_reg (DImode
, op0
);
2566 if (!reg_or_8bit_operand (op1
, DImode
))
2567 op1
= force_reg (DImode
, op1
);
2570 /* Emit an initial compare instruction, if necessary. */
2571 if (cmp_code
!= UNKNOWN
)
2573 tmp
= gen_reg_rtx (cmp_mode
);
2574 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2575 gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
)));
2577 op0
= cmp_mode
!= DImode
? gen_lowpart (DImode
, tmp
) : tmp
;
2581 /* Emit the setcc instruction. */
2582 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
2583 gen_rtx_fmt_ee (code
, DImode
, op0
, op1
)));
2588 /* Rewrite a comparison against zero CMP of the form
2589 (CODE (cc0) (const_int 0)) so it can be written validly in
2590 a conditional move (if_then_else CMP ...).
2591 If both of the operands that set cc0 are nonzero we must emit
2592 an insn to perform the compare (it can't be done within
2593 the conditional move). */
2596 alpha_emit_conditional_move (rtx cmp
, enum machine_mode mode
)
2598 enum rtx_code code
= GET_CODE (cmp
);
2599 enum rtx_code cmov_code
= NE
;
2600 rtx op0
= XEXP (cmp
, 0);
2601 rtx op1
= XEXP (cmp
, 1);
2602 enum machine_mode cmp_mode
2603 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2604 enum machine_mode cmov_mode
= VOIDmode
;
2605 int local_fast_math
= flag_unsafe_math_optimizations
;
2608 if (cmp_mode
== TFmode
)
2610 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2615 gcc_assert (cmp_mode
== DFmode
|| cmp_mode
== DImode
);
2617 if (FLOAT_MODE_P (cmp_mode
) != FLOAT_MODE_P (mode
))
2619 enum rtx_code cmp_code
;
2624 /* If we have fp<->int register move instructions, do a cmov by
2625 performing the comparison in fp registers, and move the
2626 zero/nonzero value to integer registers, where we can then
2627 use a normal cmov, or vice-versa. */
2631 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2633 /* We have these compares. */
2634 cmp_code
= code
, code
= NE
;
2639 /* These must be reversed. */
2640 cmp_code
= reverse_condition (code
), code
= EQ
;
2643 case GE
: case GT
: case GEU
: case GTU
:
2644 /* These normally need swapping, but for integer zero we have
2645 special patterns that recognize swapped operands. */
2646 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2647 cmp_code
= code
, code
= NE
;
2650 cmp_code
= swap_condition (code
);
2652 tem
= op0
, op0
= op1
, op1
= tem
;
2660 if (cmp_mode
== DImode
)
2662 if (!reg_or_0_operand (op0
, DImode
))
2663 op0
= force_reg (DImode
, op0
);
2664 if (!reg_or_8bit_operand (op1
, DImode
))
2665 op1
= force_reg (DImode
, op1
);
2668 tem
= gen_reg_rtx (cmp_mode
);
2669 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
2670 gen_rtx_fmt_ee (cmp_code
, cmp_mode
,
2673 cmp_mode
= cmp_mode
== DImode
? DFmode
: DImode
;
2674 op0
= gen_lowpart (cmp_mode
, tem
);
2675 op1
= CONST0_RTX (cmp_mode
);
2676 cmp
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2677 local_fast_math
= 1;
2680 if (cmp_mode
== DImode
)
2682 if (!reg_or_0_operand (op0
, DImode
))
2683 op0
= force_reg (DImode
, op0
);
2684 if (!reg_or_8bit_operand (op1
, DImode
))
2685 op1
= force_reg (DImode
, op1
);
2688 /* We may be able to use a conditional move directly.
2689 This avoids emitting spurious compares. */
2690 if (signed_comparison_operator (cmp
, VOIDmode
)
2691 && (cmp_mode
== DImode
|| local_fast_math
)
2692 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2693 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2695 /* We can't put the comparison inside the conditional move;
2696 emit a compare instruction and put that inside the
2697 conditional move. Make sure we emit only comparisons we have;
2698 swap or reverse as necessary. */
2700 if (!can_create_pseudo_p ())
2705 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2707 /* We have these compares: */
2712 /* These must be reversed. */
2713 code
= reverse_condition (code
);
2717 case GE
: case GT
: case GEU
: case GTU
:
2718 /* These normally need swapping, but for integer zero we have
2719 special patterns that recognize swapped operands. */
2720 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2722 code
= swap_condition (code
);
2723 tem
= op0
, op0
= op1
, op1
= tem
;
2730 if (cmp_mode
== DImode
)
2732 if (!reg_or_0_operand (op0
, DImode
))
2733 op0
= force_reg (DImode
, op0
);
2734 if (!reg_or_8bit_operand (op1
, DImode
))
2735 op1
= force_reg (DImode
, op1
);
2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
2741 if (cmp_mode
== DFmode
&& !local_fast_math
)
2744 tem
= gen_reg_rtx (cmp_mode
);
2745 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_mode
, op0
, op1
));
2746 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_mode
));
2749 /* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2755 alpha_split_conditional_move (enum rtx_code code
, rtx dest
, rtx cond
,
2756 rtx t_rtx
, rtx f_rtx
)
2758 HOST_WIDE_INT t
, f
, diff
;
2759 enum machine_mode mode
;
2760 rtx target
, subtarget
, tmp
;
2762 mode
= GET_MODE (dest
);
2767 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2768 || (code
== GE
|| code
== GT
))
2770 code
= reverse_condition (code
);
2771 diff
= t
, t
= f
, f
= diff
;
2775 subtarget
= target
= dest
;
2778 target
= gen_lowpart (DImode
, dest
);
2779 if (can_create_pseudo_p ())
2780 subtarget
= gen_reg_rtx (DImode
);
2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2788 if (f
== 0 && exact_log2 (diff
) > 0
2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2790 viable over a longer latency cmove. On EV5, the E0 slot is a
2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
2792 && (diff
<= 8 || alpha_tune
== PROCESSOR_EV6
))
2794 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2795 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2797 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2798 GEN_INT (exact_log2 (t
)));
2799 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2801 else if (f
== 0 && t
== -1)
2803 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2804 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2806 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
2808 else if (diff
== 1 || diff
== 4 || diff
== 8)
2812 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2813 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2816 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
2819 add_op
= GEN_INT (f
);
2820 if (sext_add_operand (add_op
, mode
))
2822 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
2824 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
2825 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2837 /* Look up the function X_floating library function name for the
2840 struct GTY(()) xfloating_op
2842 const enum rtx_code code
;
2843 const char *const GTY((skip
)) osf_func
;
2844 const char *const GTY((skip
)) vms_func
;
2848 static GTY(()) struct xfloating_op xfloating_ops
[] =
2850 { PLUS
, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS
, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT
, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV
, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ
, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE
, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT
, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE
, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT
, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE
, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX
, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT
, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT
, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND
, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2867 static GTY(()) struct xfloating_op vax_cvt_ops
[] =
2869 { FLOAT_EXTEND
, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE
, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2874 alpha_lookup_xfloating_lib_func (enum rtx_code code
)
2876 struct xfloating_op
*ops
= xfloating_ops
;
2877 long n
= ARRAY_SIZE (xfloating_ops
);
2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS
);
2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX
&& (code
== FLOAT_EXTEND
|| code
== FLOAT_TRUNCATE
))
2886 n
= ARRAY_SIZE (vax_cvt_ops
);
2889 for (i
= 0; i
< n
; ++i
, ++ops
)
2890 if (ops
->code
== code
)
2892 rtx func
= ops
->libcall
;
2895 func
= init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops
->vms_func
: ops
->osf_func
);
2897 ops
->libcall
= func
;
2905 /* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2909 alpha_compute_xfloating_mode_arg (enum rtx_code code
,
2910 enum alpha_fp_rounding_mode round
)
2916 case ALPHA_FPRM_NORM
:
2919 case ALPHA_FPRM_MINF
:
2922 case ALPHA_FPRM_CHOP
:
2925 case ALPHA_FPRM_DYN
:
2931 /* XXX For reference, round to +inf is mode = 3. */
2934 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
2940 /* Emit an X_floating library function call.
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
2944 indirect); TFmode return values appear in R16+R17.
2946 FUNC is the function to call.
2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2954 alpha_emit_xfloating_libcall (rtx func
, rtx target
, rtx operands
[],
2955 int noperands
, rtx equiv
)
2957 rtx usage
= NULL_RTX
, tmp
, reg
;
2962 for (i
= 0; i
< noperands
; ++i
)
2964 switch (GET_MODE (operands
[i
]))
2967 reg
= gen_rtx_REG (TFmode
, regno
);
2972 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
2977 gcc_assert (CONST_INT_P (operands
[i
]));
2980 reg
= gen_rtx_REG (DImode
, regno
);
2988 emit_move_insn (reg
, operands
[i
]);
2989 use_reg (&usage
, reg
);
2992 switch (GET_MODE (target
))
2995 reg
= gen_rtx_REG (TFmode
, 16);
2998 reg
= gen_rtx_REG (DFmode
, 32);
3001 reg
= gen_rtx_REG (DImode
, 0);
3007 tmp
= gen_rtx_MEM (QImode
, func
);
3008 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3009 const0_rtx
, const0_rtx
));
3010 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3011 RTL_CONST_CALL_P (tmp
) = 1;
3016 emit_libcall_block (tmp
, target
, reg
, equiv
);
3019 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3022 alpha_emit_xfloating_arith (enum rtx_code code
, rtx operands
[])
3026 rtx out_operands
[3];
3028 func
= alpha_lookup_xfloating_lib_func (code
);
3029 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3031 out_operands
[0] = operands
[1];
3032 out_operands
[1] = operands
[2];
3033 out_operands
[2] = GEN_INT (mode
);
3034 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3035 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3039 /* Emit an X_floating library function call for a comparison. */
3042 alpha_emit_xfloating_compare (enum rtx_code
*pcode
, rtx op0
, rtx op1
)
3044 enum rtx_code cmp_code
, res_code
;
3045 rtx func
, out
, operands
[2], note
;
3047 /* X_floating library comparison functions return
3051 Convert the compare against the raw return value. */
3079 func
= alpha_lookup_xfloating_lib_func (cmp_code
);
3083 out
= gen_reg_rtx (DImode
);
3085 /* What's actually returned is -1,0,1, not a proper boolean value. */
3086 note
= gen_rtx_fmt_ee (cmp_code
, VOIDmode
, op0
, op1
);
3087 note
= gen_rtx_UNSPEC (DImode
, gen_rtvec (1, note
), UNSPEC_XFLT_COMPARE
);
3088 alpha_emit_xfloating_libcall (func
, out
, operands
, 2, note
);
3093 /* Emit an X_floating library function call for a conversion. */
3096 alpha_emit_xfloating_cvt (enum rtx_code orig_code
, rtx operands
[])
3098 int noperands
= 1, mode
;
3099 rtx out_operands
[2];
3101 enum rtx_code code
= orig_code
;
3103 if (code
== UNSIGNED_FIX
)
3106 func
= alpha_lookup_xfloating_lib_func (code
);
3108 out_operands
[0] = operands
[1];
3113 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3114 out_operands
[1] = GEN_INT (mode
);
3117 case FLOAT_TRUNCATE
:
3118 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3119 out_operands
[1] = GEN_INT (mode
);
3126 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3127 gen_rtx_fmt_e (orig_code
,
3128 GET_MODE (operands
[0]),
3132 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3141 alpha_split_tmode_pair (rtx operands
[4], enum machine_mode mode
,
3144 switch (GET_CODE (operands
[1]))
3147 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3148 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3152 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3153 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3158 gcc_assert (operands
[1] == CONST0_RTX (mode
));
3159 operands
[2] = operands
[3] = const0_rtx
;
3166 switch (GET_CODE (operands
[0]))
3169 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3170 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3174 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3175 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3182 if (fixup_overlap
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
3185 tmp
= operands
[0], operands
[0] = operands
[1], operands
[1] = tmp
;
3186 tmp
= operands
[2], operands
[2] = operands
[3], operands
[3] = tmp
;
3190 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
3192 logical operation to be performed. */
3195 alpha_split_tfmode_frobsign (rtx operands
[3], rtx (*operation
) (rtx
, rtx
, rtx
))
3197 rtx high_bit
= operands
[2];
3201 alpha_split_tmode_pair (operands
, TFmode
, false);
3203 /* Detect three flavors of operand overlap. */
3205 if (rtx_equal_p (operands
[0], operands
[2]))
3207 else if (rtx_equal_p (operands
[1], operands
[2]))
3209 if (rtx_equal_p (operands
[0], high_bit
))
3216 emit_move_insn (operands
[0], operands
[2]);
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch
= operands
[move
< 2 ? 1 : 3];
3224 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3228 emit_move_insn (operands
[0], operands
[2]);
3230 emit_move_insn (operands
[1], scratch
);
3234 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3253 quad: ldq_u r1,X(r11)
3262 alpha_expand_unaligned_load (rtx tgt
, rtx mem
, HOST_WIDE_INT size
,
3263 HOST_WIDE_INT ofs
, int sign
)
3265 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3266 enum machine_mode mode
;
3268 if (TARGET_BWX
&& size
== 2)
3270 meml
= adjust_address (mem
, QImode
, ofs
);
3271 memh
= adjust_address (mem
, QImode
, ofs
+1);
3272 extl
= gen_reg_rtx (DImode
);
3273 exth
= gen_reg_rtx (DImode
);
3274 emit_insn (gen_zero_extendqidi2 (extl
, meml
));
3275 emit_insn (gen_zero_extendqidi2 (exth
, memh
));
3276 exth
= expand_simple_binop (DImode
, ASHIFT
, exth
, GEN_INT (8),
3277 NULL
, 1, OPTAB_LIB_WIDEN
);
3278 addr
= expand_simple_binop (DImode
, IOR
, extl
, exth
,
3279 NULL
, 1, OPTAB_LIB_WIDEN
);
3281 if (sign
&& GET_MODE (tgt
) != HImode
)
3283 addr
= gen_lowpart (HImode
, addr
);
3284 emit_insn (gen_extend_insn (tgt
, addr
, GET_MODE (tgt
), HImode
, 0));
3288 if (GET_MODE (tgt
) != DImode
)
3289 addr
= gen_lowpart (GET_MODE (tgt
), addr
);
3290 emit_move_insn (tgt
, addr
);
3295 meml
= gen_reg_rtx (DImode
);
3296 memh
= gen_reg_rtx (DImode
);
3297 addr
= gen_reg_rtx (DImode
);
3298 extl
= gen_reg_rtx (DImode
);
3299 exth
= gen_reg_rtx (DImode
);
3301 mema
= XEXP (mem
, 0);
3302 if (GET_CODE (mema
) == LO_SUM
)
3303 mema
= force_reg (Pmode
, mema
);
3305 /* AND addresses cannot be in any alias set, since they may implicitly
3306 alias surrounding code. Ideally we'd have some alias set that
3307 covered all types except those with alignment 8 or higher. */
3309 tmp
= change_address (mem
, DImode
,
3310 gen_rtx_AND (DImode
,
3311 plus_constant (DImode
, mema
, ofs
),
3313 set_mem_alias_set (tmp
, 0);
3314 emit_move_insn (meml
, tmp
);
3316 tmp
= change_address (mem
, DImode
,
3317 gen_rtx_AND (DImode
,
3318 plus_constant (DImode
, mema
,
3321 set_mem_alias_set (tmp
, 0);
3322 emit_move_insn (memh
, tmp
);
3324 if (sign
&& size
== 2)
3326 emit_move_insn (addr
, plus_constant (Pmode
, mema
, ofs
+2));
3328 emit_insn (gen_extql (extl
, meml
, addr
));
3329 emit_insn (gen_extqh (exth
, memh
, addr
));
3331 /* We must use tgt here for the target. Alpha-vms port fails if we use
3332 addr for the target, because addr is marked as a pointer and combine
3333 knows that pointers are always sign-extended 32-bit values. */
3334 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3335 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3336 addr
, 1, OPTAB_WIDEN
);
3340 emit_move_insn (addr
, plus_constant (Pmode
, mema
, ofs
));
3341 emit_insn (gen_extxl (extl
, meml
, GEN_INT (size
*8), addr
));
3345 emit_insn (gen_extwh (exth
, memh
, addr
));
3349 emit_insn (gen_extlh (exth
, memh
, addr
));
3353 emit_insn (gen_extqh (exth
, memh
, addr
));
3360 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3361 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3366 emit_move_insn (tgt
, gen_lowpart (GET_MODE (tgt
), addr
));
3369 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3372 alpha_expand_unaligned_store (rtx dst
, rtx src
,
3373 HOST_WIDE_INT size
, HOST_WIDE_INT ofs
)
3375 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3377 if (TARGET_BWX
&& size
== 2)
3379 if (src
!= const0_rtx
)
3381 dstl
= gen_lowpart (QImode
, src
);
3382 dsth
= expand_simple_binop (DImode
, LSHIFTRT
, src
, GEN_INT (8),
3383 NULL
, 1, OPTAB_LIB_WIDEN
);
3384 dsth
= gen_lowpart (QImode
, dsth
);
3387 dstl
= dsth
= const0_rtx
;
3389 meml
= adjust_address (dst
, QImode
, ofs
);
3390 memh
= adjust_address (dst
, QImode
, ofs
+1);
3392 emit_move_insn (meml
, dstl
);
3393 emit_move_insn (memh
, dsth
);
3397 dstl
= gen_reg_rtx (DImode
);
3398 dsth
= gen_reg_rtx (DImode
);
3399 insl
= gen_reg_rtx (DImode
);
3400 insh
= gen_reg_rtx (DImode
);
3402 dsta
= XEXP (dst
, 0);
3403 if (GET_CODE (dsta
) == LO_SUM
)
3404 dsta
= force_reg (Pmode
, dsta
);
3406 /* AND addresses cannot be in any alias set, since they may implicitly
3407 alias surrounding code. Ideally we'd have some alias set that
3408 covered all types except those with alignment 8 or higher. */
3410 meml
= change_address (dst
, DImode
,
3411 gen_rtx_AND (DImode
,
3412 plus_constant (DImode
, dsta
, ofs
),
3414 set_mem_alias_set (meml
, 0);
3416 memh
= change_address (dst
, DImode
,
3417 gen_rtx_AND (DImode
,
3418 plus_constant (DImode
, dsta
,
3421 set_mem_alias_set (memh
, 0);
3423 emit_move_insn (dsth
, memh
);
3424 emit_move_insn (dstl
, meml
);
3426 addr
= copy_addr_to_reg (plus_constant (Pmode
, dsta
, ofs
));
3428 if (src
!= CONST0_RTX (GET_MODE (src
)))
3430 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3431 GEN_INT (size
*8), addr
));
3436 emit_insn (gen_inswl (insl
, gen_lowpart (HImode
, src
), addr
));
3439 emit_insn (gen_insll (insl
, gen_lowpart (SImode
, src
), addr
));
3442 emit_insn (gen_insql (insl
, gen_lowpart (DImode
, src
), addr
));
3449 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3454 emit_insn (gen_mskwl (dstl
, dstl
, addr
));
3457 emit_insn (gen_mskll (dstl
, dstl
, addr
));
3460 emit_insn (gen_mskql (dstl
, dstl
, addr
));
3466 if (src
!= CONST0_RTX (GET_MODE (src
)))
3468 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3469 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3472 /* Must store high before low for degenerate case of aligned. */
3473 emit_move_insn (memh
, dsth
);
3474 emit_move_insn (meml
, dstl
);
3477 /* The block move code tries to maximize speed by separating loads and
3478 stores at the expense of register pressure: we load all of the data
3479 before we store it back out. There are two secondary effects worth
3480 mentioning, that this speeds copying to/from aligned and unaligned
3481 buffers, and that it makes the code significantly easier to write. */
3483 #define MAX_MOVE_WORDS 8
3485 /* Load an integral number of consecutive unaligned quadwords. */
3488 alpha_expand_unaligned_load_words (rtx
*out_regs
, rtx smem
,
3489 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3491 rtx
const im8
= GEN_INT (-8);
3492 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3493 rtx sreg
, areg
, tmp
, smema
;
3496 smema
= XEXP (smem
, 0);
3497 if (GET_CODE (smema
) == LO_SUM
)
3498 smema
= force_reg (Pmode
, smema
);
3500 /* Generate all the tmp registers we need. */
3501 for (i
= 0; i
< words
; ++i
)
3503 data_regs
[i
] = out_regs
[i
];
3504 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3506 data_regs
[words
] = gen_reg_rtx (DImode
);
3509 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3511 /* Load up all of the source data. */
3512 for (i
= 0; i
< words
; ++i
)
3514 tmp
= change_address (smem
, DImode
,
3515 gen_rtx_AND (DImode
,
3516 plus_constant (DImode
, smema
, 8*i
),
3518 set_mem_alias_set (tmp
, 0);
3519 emit_move_insn (data_regs
[i
], tmp
);
3522 tmp
= change_address (smem
, DImode
,
3523 gen_rtx_AND (DImode
,
3524 plus_constant (DImode
, smema
,
3527 set_mem_alias_set (tmp
, 0);
3528 emit_move_insn (data_regs
[words
], tmp
);
3530 /* Extract the half-word fragments. Unfortunately DEC decided to make
3531 extxh with offset zero a noop instead of zeroing the register, so
3532 we must take care of that edge condition ourselves with cmov. */
3534 sreg
= copy_addr_to_reg (smema
);
3535 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3537 for (i
= 0; i
< words
; ++i
)
3539 emit_insn (gen_extql (data_regs
[i
], data_regs
[i
], sreg
));
3540 emit_insn (gen_extqh (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3541 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3542 gen_rtx_IF_THEN_ELSE (DImode
,
3543 gen_rtx_EQ (DImode
, areg
,
3545 const0_rtx
, ext_tmps
[i
])));
3548 /* Merge the half-words into whole words. */
3549 for (i
= 0; i
< words
; ++i
)
3551 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3552 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3556 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3557 may be NULL to store zeros. */
3560 alpha_expand_unaligned_store_words (rtx
*data_regs
, rtx dmem
,
3561 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3563 rtx
const im8
= GEN_INT (-8);
3564 rtx ins_tmps
[MAX_MOVE_WORDS
];
3565 rtx st_tmp_1
, st_tmp_2
, dreg
;
3566 rtx st_addr_1
, st_addr_2
, dmema
;
3569 dmema
= XEXP (dmem
, 0);
3570 if (GET_CODE (dmema
) == LO_SUM
)
3571 dmema
= force_reg (Pmode
, dmema
);
3573 /* Generate all the tmp registers we need. */
3574 if (data_regs
!= NULL
)
3575 for (i
= 0; i
< words
; ++i
)
3576 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3577 st_tmp_1
= gen_reg_rtx(DImode
);
3578 st_tmp_2
= gen_reg_rtx(DImode
);
3581 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3583 st_addr_2
= change_address (dmem
, DImode
,
3584 gen_rtx_AND (DImode
,
3585 plus_constant (DImode
, dmema
,
3588 set_mem_alias_set (st_addr_2
, 0);
3590 st_addr_1
= change_address (dmem
, DImode
,
3591 gen_rtx_AND (DImode
, dmema
, im8
));
3592 set_mem_alias_set (st_addr_1
, 0);
3594 /* Load up the destination end bits. */
3595 emit_move_insn (st_tmp_2
, st_addr_2
);
3596 emit_move_insn (st_tmp_1
, st_addr_1
);
3598 /* Shift the input data into place. */
3599 dreg
= copy_addr_to_reg (dmema
);
3600 if (data_regs
!= NULL
)
3602 for (i
= words
-1; i
>= 0; --i
)
3604 emit_insn (gen_insqh (ins_tmps
[i
], data_regs
[i
], dreg
));
3605 emit_insn (gen_insql (data_regs
[i
], data_regs
[i
], dreg
));
3607 for (i
= words
-1; i
> 0; --i
)
3609 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3610 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3615 /* Split and merge the ends with the destination data. */
3616 emit_insn (gen_mskqh (st_tmp_2
, st_tmp_2
, dreg
));
3617 emit_insn (gen_mskql (st_tmp_1
, st_tmp_1
, dreg
));
3619 if (data_regs
!= NULL
)
3621 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3622 st_tmp_2
, 1, OPTAB_WIDEN
);
3623 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3624 st_tmp_1
, 1, OPTAB_WIDEN
);
3628 emit_move_insn (st_addr_2
, st_tmp_2
);
3629 for (i
= words
-1; i
> 0; --i
)
3631 rtx tmp
= change_address (dmem
, DImode
,
3632 gen_rtx_AND (DImode
,
3633 plus_constant (DImode
,
3636 set_mem_alias_set (tmp
, 0);
3637 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3639 emit_move_insn (st_addr_1
, st_tmp_1
);
3643 /* Expand string/block move operations.
3645 operands[0] is the pointer to the destination.
3646 operands[1] is the pointer to the source.
3647 operands[2] is the number of bytes to move.
3648 operands[3] is the alignment. */
3651 alpha_expand_block_move (rtx operands
[])
3653 rtx bytes_rtx
= operands
[2];
3654 rtx align_rtx
= operands
[3];
3655 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3656 HOST_WIDE_INT bytes
= orig_bytes
;
3657 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3658 HOST_WIDE_INT dst_align
= src_align
;
3659 rtx orig_src
= operands
[1];
3660 rtx orig_dst
= operands
[0];
3661 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3663 unsigned int i
, words
, ofs
, nregs
= 0;
3665 if (orig_bytes
<= 0)
3667 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3670 /* Look for additional alignment information from recorded register info. */
3672 tmp
= XEXP (orig_src
, 0);
3674 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3675 else if (GET_CODE (tmp
) == PLUS
3676 && REG_P (XEXP (tmp
, 0))
3677 && CONST_INT_P (XEXP (tmp
, 1)))
3679 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3680 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3684 if (a
>= 64 && c
% 8 == 0)
3686 else if (a
>= 32 && c
% 4 == 0)
3688 else if (a
>= 16 && c
% 2 == 0)
3693 tmp
= XEXP (orig_dst
, 0);
3695 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3696 else if (GET_CODE (tmp
) == PLUS
3697 && REG_P (XEXP (tmp
, 0))
3698 && CONST_INT_P (XEXP (tmp
, 1)))
3700 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3701 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3705 if (a
>= 64 && c
% 8 == 0)
3707 else if (a
>= 32 && c
% 4 == 0)
3709 else if (a
>= 16 && c
% 2 == 0)
3715 if (src_align
>= 64 && bytes
>= 8)
3719 for (i
= 0; i
< words
; ++i
)
3720 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3722 for (i
= 0; i
< words
; ++i
)
3723 emit_move_insn (data_regs
[nregs
+ i
],
3724 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
3731 if (src_align
>= 32 && bytes
>= 4)
3735 for (i
= 0; i
< words
; ++i
)
3736 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
3738 for (i
= 0; i
< words
; ++i
)
3739 emit_move_insn (data_regs
[nregs
+ i
],
3740 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
3751 for (i
= 0; i
< words
+1; ++i
)
3752 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3754 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
3762 if (! TARGET_BWX
&& bytes
>= 4)
3764 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
3765 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
3772 if (src_align
>= 16)
3775 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3776 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
3779 } while (bytes
>= 2);
3781 else if (! TARGET_BWX
)
3783 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3784 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
3792 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
3793 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
3798 gcc_assert (nregs
<= ARRAY_SIZE (data_regs
));
3800 /* Now save it back out again. */
3804 /* Write out the data in whatever chunks reading the source allowed. */
3805 if (dst_align
>= 64)
3807 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3809 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
3816 if (dst_align
>= 32)
3818 /* If the source has remaining DImode regs, write them out in
3820 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3822 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
3823 NULL_RTX
, 1, OPTAB_WIDEN
);
3825 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3826 gen_lowpart (SImode
, data_regs
[i
]));
3827 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
3828 gen_lowpart (SImode
, tmp
));
3833 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3835 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3842 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3844 /* Write out a remaining block of words using unaligned methods. */
3846 for (words
= 1; i
+ words
< nregs
; words
++)
3847 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
3851 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
3853 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
3860 /* Due to the above, this won't be aligned. */
3861 /* ??? If we have more than one of these, consider constructing full
3862 words in registers and using alpha_expand_unaligned_store_words. */
3863 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3865 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
3870 if (dst_align
>= 16)
3871 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
3873 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
3878 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
3880 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
3885 /* The remainder must be byte copies. */
3888 gcc_assert (GET_MODE (data_regs
[i
]) == QImode
);
3889 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
3898 alpha_expand_block_clear (rtx operands
[])
3900 rtx bytes_rtx
= operands
[1];
3901 rtx align_rtx
= operands
[3];
3902 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3903 HOST_WIDE_INT bytes
= orig_bytes
;
3904 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3905 HOST_WIDE_INT alignofs
= 0;
3906 rtx orig_dst
= operands
[0];
3908 int i
, words
, ofs
= 0;
3910 if (orig_bytes
<= 0)
3912 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3915 /* Look for stricter alignment. */
3916 tmp
= XEXP (orig_dst
, 0);
3918 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3919 else if (GET_CODE (tmp
) == PLUS
3920 && REG_P (XEXP (tmp
, 0))
3921 && CONST_INT_P (XEXP (tmp
, 1)))
3923 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3924 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3929 align
= a
, alignofs
= 8 - c
% 8;
3931 align
= a
, alignofs
= 4 - c
% 4;
3933 align
= a
, alignofs
= 2 - c
% 2;
3937 /* Handle an unaligned prefix first. */
3941 #if HOST_BITS_PER_WIDE_INT >= 64
3942 /* Given that alignofs is bounded by align, the only time BWX could
3943 generate three stores is for a 7 byte fill. Prefer two individual
3944 stores over a load/mask/store sequence. */
3945 if ((!TARGET_BWX
|| alignofs
== 7)
3947 && !(alignofs
== 4 && bytes
>= 4))
3949 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
3950 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
3954 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
3955 set_mem_alias_set (mem
, 0);
3957 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
3958 if (bytes
< alignofs
)
3960 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
3971 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
3972 NULL_RTX
, 1, OPTAB_WIDEN
);
3974 emit_move_insn (mem
, tmp
);
3978 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
3980 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
3985 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
3987 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
3992 if (alignofs
== 4 && bytes
>= 4)
3994 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4000 /* If we've not used the extra lead alignment information by now,
4001 we won't be able to. Downgrade align to match what's left over. */
4004 alignofs
= alignofs
& -alignofs
;
4005 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4009 /* Handle a block of contiguous long-words. */
4011 if (align
>= 64 && bytes
>= 8)
4015 for (i
= 0; i
< words
; ++i
)
4016 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4023 /* If the block is large and appropriately aligned, emit a single
4024 store followed by a sequence of stq_u insns. */
4026 if (align
>= 32 && bytes
> 16)
4030 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4034 orig_dsta
= XEXP (orig_dst
, 0);
4035 if (GET_CODE (orig_dsta
) == LO_SUM
)
4036 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4039 for (i
= 0; i
< words
; ++i
)
4042 = change_address (orig_dst
, DImode
,
4043 gen_rtx_AND (DImode
,
4044 plus_constant (DImode
, orig_dsta
,
4047 set_mem_alias_set (mem
, 0);
4048 emit_move_insn (mem
, const0_rtx
);
4051 /* Depending on the alignment, the first stq_u may have overlapped
4052 with the initial stl, which means that the last stq_u didn't
4053 write as much as it would appear. Leave those questionable bytes
4055 bytes
-= words
* 8 - 4;
4056 ofs
+= words
* 8 - 4;
4059 /* Handle a smaller block of aligned words. */
4061 if ((align
>= 64 && bytes
== 4)
4062 || (align
== 32 && bytes
>= 4))
4066 for (i
= 0; i
< words
; ++i
)
4067 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4074 /* An unaligned block uses stq_u stores for as many as possible. */
4080 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4086 /* Next clean up any trailing pieces. */
4088 #if HOST_BITS_PER_WIDE_INT >= 64
4089 /* Count the number of bits in BYTES for which aligned stores could
4092 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4096 /* If we have appropriate alignment (and it wouldn't take too many
4097 instructions otherwise), mask out the bytes we need. */
4098 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4105 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4106 set_mem_alias_set (mem
, 0);
4108 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4110 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4111 NULL_RTX
, 1, OPTAB_WIDEN
);
4113 emit_move_insn (mem
, tmp
);
4116 else if (align
>= 32 && bytes
< 4)
4121 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4122 set_mem_alias_set (mem
, 0);
4124 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4126 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4127 NULL_RTX
, 1, OPTAB_WIDEN
);
4129 emit_move_insn (mem
, tmp
);
4135 if (!TARGET_BWX
&& bytes
>= 4)
4137 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4147 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4151 } while (bytes
>= 2);
4153 else if (! TARGET_BWX
)
4155 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4163 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4171 /* Returns a mask so that zap(x, value) == x & mask. */
4174 alpha_expand_zap_mask (HOST_WIDE_INT value
)
4179 if (HOST_BITS_PER_WIDE_INT
>= 64)
4181 HOST_WIDE_INT mask
= 0;
4183 for (i
= 7; i
>= 0; --i
)
4186 if (!((value
>> i
) & 1))
4190 result
= gen_int_mode (mask
, DImode
);
4194 HOST_WIDE_INT mask_lo
= 0, mask_hi
= 0;
4196 gcc_assert (HOST_BITS_PER_WIDE_INT
== 32);
4198 for (i
= 7; i
>= 4; --i
)
4201 if (!((value
>> i
) & 1))
4205 for (i
= 3; i
>= 0; --i
)
4208 if (!((value
>> i
) & 1))
4212 result
= immed_double_const (mask_lo
, mask_hi
, DImode
);
4219 alpha_expand_builtin_vector_binop (rtx (*gen
) (rtx
, rtx
, rtx
),
4220 enum machine_mode mode
,
4221 rtx op0
, rtx op1
, rtx op2
)
4223 op0
= gen_lowpart (mode
, op0
);
4225 if (op1
== const0_rtx
)
4226 op1
= CONST0_RTX (mode
);
4228 op1
= gen_lowpart (mode
, op1
);
4230 if (op2
== const0_rtx
)
4231 op2
= CONST0_RTX (mode
);
4233 op2
= gen_lowpart (mode
, op2
);
4235 emit_insn ((*gen
) (op0
, op1
, op2
));
4238 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4239 COND is true. Mark the jump as unlikely to be taken. */
4242 emit_unlikely_jump (rtx cond
, rtx label
)
4244 int very_unlikely
= REG_BR_PROB_BASE
/ 100 - 1;
4247 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
4248 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
4249 add_int_reg_note (x
, REG_BR_PROB
, very_unlikely
);
4252 /* A subroutine of the atomic operation splitters. Emit a load-locked
4253 instruction in MODE. */
4256 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
4258 rtx (*fn
) (rtx
, rtx
) = NULL
;
4260 fn
= gen_load_locked_si
;
4261 else if (mode
== DImode
)
4262 fn
= gen_load_locked_di
;
4263 emit_insn (fn (reg
, mem
));
4266 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4267 instruction in MODE. */
4270 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
4272 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
4274 fn
= gen_store_conditional_si
;
4275 else if (mode
== DImode
)
4276 fn
= gen_store_conditional_di
;
4277 emit_insn (fn (res
, mem
, val
));
4280 /* Subroutines of the atomic operation splitters. Emit barriers
4281 as needed for the memory MODEL. */
4284 alpha_pre_atomic_barrier (enum memmodel model
)
4286 if (need_atomic_barrier_p (model
, true))
4287 emit_insn (gen_memory_barrier ());
4291 alpha_post_atomic_barrier (enum memmodel model
)
4293 if (need_atomic_barrier_p (model
, false))
4294 emit_insn (gen_memory_barrier ());
4297 /* A subroutine of the atomic operation splitters. Emit an insxl
4298 instruction in MODE. */
4301 emit_insxl (enum machine_mode mode
, rtx op1
, rtx op2
)
4303 rtx ret
= gen_reg_rtx (DImode
);
4304 rtx (*fn
) (rtx
, rtx
, rtx
);
4324 op1
= force_reg (mode
, op1
);
4325 emit_insn (fn (ret
, op1
, op2
));
4330 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4331 to perform. MEM is the memory on which to operate. VAL is the second
4332 operand of the binary operator. BEFORE and AFTER are optional locations to
4333 return the value of MEM either before of after the operation. SCRATCH is
4334 a scratch register. */
4337 alpha_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
, rtx before
,
4338 rtx after
, rtx scratch
, enum memmodel model
)
4340 enum machine_mode mode
= GET_MODE (mem
);
4341 rtx label
, x
, cond
= gen_rtx_REG (DImode
, REGNO (scratch
));
4343 alpha_pre_atomic_barrier (model
);
4345 label
= gen_label_rtx ();
4347 label
= gen_rtx_LABEL_REF (DImode
, label
);
4351 emit_load_locked (mode
, before
, mem
);
4355 x
= gen_rtx_AND (mode
, before
, val
);
4356 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
4358 x
= gen_rtx_NOT (mode
, val
);
4361 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
4363 emit_insn (gen_rtx_SET (VOIDmode
, after
, copy_rtx (x
)));
4364 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
4366 emit_store_conditional (mode
, cond
, mem
, scratch
);
4368 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4369 emit_unlikely_jump (x
, label
);
4371 alpha_post_atomic_barrier (model
);
4374 /* Expand a compare and swap operation. */
4377 alpha_split_compare_and_swap (rtx operands
[])
4379 rtx cond
, retval
, mem
, oldval
, newval
;
4381 enum memmodel mod_s
, mod_f
;
4382 enum machine_mode mode
;
4383 rtx label1
, label2
, x
;
4386 retval
= operands
[1];
4388 oldval
= operands
[3];
4389 newval
= operands
[4];
4390 is_weak
= (operands
[5] != const0_rtx
);
4391 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
4392 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
4393 mode
= GET_MODE (mem
);
4395 alpha_pre_atomic_barrier (mod_s
);
4400 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4401 emit_label (XEXP (label1
, 0));
4403 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4405 emit_load_locked (mode
, retval
, mem
);
4407 x
= gen_lowpart (DImode
, retval
);
4408 if (oldval
== const0_rtx
)
4410 emit_move_insn (cond
, const0_rtx
);
4411 x
= gen_rtx_NE (DImode
, x
, const0_rtx
);
4415 x
= gen_rtx_EQ (DImode
, x
, oldval
);
4416 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4417 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4419 emit_unlikely_jump (x
, label2
);
4421 emit_move_insn (cond
, newval
);
4422 emit_store_conditional (mode
, cond
, mem
, gen_lowpart (mode
, cond
));
4426 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4427 emit_unlikely_jump (x
, label1
);
4430 if (mod_f
!= MEMMODEL_RELAXED
)
4431 emit_label (XEXP (label2
, 0));
4433 alpha_post_atomic_barrier (mod_s
);
4435 if (mod_f
== MEMMODEL_RELAXED
)
4436 emit_label (XEXP (label2
, 0));
4440 alpha_expand_compare_and_swap_12 (rtx operands
[])
4442 rtx cond
, dst
, mem
, oldval
, newval
, is_weak
, mod_s
, mod_f
;
4443 enum machine_mode mode
;
4444 rtx addr
, align
, wdst
;
4445 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
4450 oldval
= operands
[3];
4451 newval
= operands
[4];
4452 is_weak
= operands
[5];
4453 mod_s
= operands
[6];
4454 mod_f
= operands
[7];
4455 mode
= GET_MODE (mem
);
4457 /* We forced the address into a register via mem_noofs_operand. */
4458 addr
= XEXP (mem
, 0);
4459 gcc_assert (register_operand (addr
, DImode
));
4461 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4462 NULL_RTX
, 1, OPTAB_DIRECT
);
4464 oldval
= convert_modes (DImode
, mode
, oldval
, 1);
4466 if (newval
!= const0_rtx
)
4467 newval
= emit_insxl (mode
, newval
, addr
);
4469 wdst
= gen_reg_rtx (DImode
);
4471 gen
= gen_atomic_compare_and_swapqi_1
;
4473 gen
= gen_atomic_compare_and_swaphi_1
;
4474 emit_insn (gen (cond
, wdst
, mem
, oldval
, newval
, align
,
4475 is_weak
, mod_s
, mod_f
));
4477 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4481 alpha_split_compare_and_swap_12 (rtx operands
[])
4483 rtx cond
, dest
, orig_mem
, oldval
, newval
, align
, scratch
;
4484 enum machine_mode mode
;
4486 enum memmodel mod_s
, mod_f
;
4487 rtx label1
, label2
, mem
, addr
, width
, mask
, x
;
4491 orig_mem
= operands
[2];
4492 oldval
= operands
[3];
4493 newval
= operands
[4];
4494 align
= operands
[5];
4495 is_weak
= (operands
[6] != const0_rtx
);
4496 mod_s
= (enum memmodel
) INTVAL (operands
[7]);
4497 mod_f
= (enum memmodel
) INTVAL (operands
[8]);
4498 scratch
= operands
[9];
4499 mode
= GET_MODE (orig_mem
);
4500 addr
= XEXP (orig_mem
, 0);
4502 mem
= gen_rtx_MEM (DImode
, align
);
4503 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
4504 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
4505 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
4507 alpha_pre_atomic_barrier (mod_s
);
4512 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4513 emit_label (XEXP (label1
, 0));
4515 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4517 emit_load_locked (DImode
, scratch
, mem
);
4519 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4520 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4521 emit_insn (gen_extxl (dest
, scratch
, width
, addr
));
4523 if (oldval
== const0_rtx
)
4525 emit_move_insn (cond
, const0_rtx
);
4526 x
= gen_rtx_NE (DImode
, dest
, const0_rtx
);
4530 x
= gen_rtx_EQ (DImode
, dest
, oldval
);
4531 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4532 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4534 emit_unlikely_jump (x
, label2
);
4536 emit_insn (gen_mskxl (cond
, scratch
, mask
, addr
));
4538 if (newval
!= const0_rtx
)
4539 emit_insn (gen_iordi3 (cond
, cond
, newval
));
4541 emit_store_conditional (DImode
, cond
, mem
, cond
);
4545 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4546 emit_unlikely_jump (x
, label1
);
4549 if (mod_f
!= MEMMODEL_RELAXED
)
4550 emit_label (XEXP (label2
, 0));
4552 alpha_post_atomic_barrier (mod_s
);
4554 if (mod_f
== MEMMODEL_RELAXED
)
4555 emit_label (XEXP (label2
, 0));
4558 /* Expand an atomic exchange operation. */
4561 alpha_split_atomic_exchange (rtx operands
[])
4563 rtx retval
, mem
, val
, scratch
;
4564 enum memmodel model
;
4565 enum machine_mode mode
;
4568 retval
= operands
[0];
4571 model
= (enum memmodel
) INTVAL (operands
[3]);
4572 scratch
= operands
[4];
4573 mode
= GET_MODE (mem
);
4574 cond
= gen_lowpart (DImode
, scratch
);
4576 alpha_pre_atomic_barrier (model
);
4578 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4579 emit_label (XEXP (label
, 0));
4581 emit_load_locked (mode
, retval
, mem
);
4582 emit_move_insn (scratch
, val
);
4583 emit_store_conditional (mode
, cond
, mem
, scratch
);
4585 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4586 emit_unlikely_jump (x
, label
);
4588 alpha_post_atomic_barrier (model
);
4592 alpha_expand_atomic_exchange_12 (rtx operands
[])
4594 rtx dst
, mem
, val
, model
;
4595 enum machine_mode mode
;
4596 rtx addr
, align
, wdst
;
4597 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
, rtx
);
4602 model
= operands
[3];
4603 mode
= GET_MODE (mem
);
4605 /* We forced the address into a register via mem_noofs_operand. */
4606 addr
= XEXP (mem
, 0);
4607 gcc_assert (register_operand (addr
, DImode
));
4609 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4610 NULL_RTX
, 1, OPTAB_DIRECT
);
4612 /* Insert val into the correct byte location within the word. */
4613 if (val
!= const0_rtx
)
4614 val
= emit_insxl (mode
, val
, addr
);
4616 wdst
= gen_reg_rtx (DImode
);
4618 gen
= gen_atomic_exchangeqi_1
;
4620 gen
= gen_atomic_exchangehi_1
;
4621 emit_insn (gen (wdst
, mem
, val
, align
, model
));
4623 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4627 alpha_split_atomic_exchange_12 (rtx operands
[])
4629 rtx dest
, orig_mem
, addr
, val
, align
, scratch
;
4630 rtx label
, mem
, width
, mask
, x
;
4631 enum machine_mode mode
;
4632 enum memmodel model
;
4635 orig_mem
= operands
[1];
4637 align
= operands
[3];
4638 model
= (enum memmodel
) INTVAL (operands
[4]);
4639 scratch
= operands
[5];
4640 mode
= GET_MODE (orig_mem
);
4641 addr
= XEXP (orig_mem
, 0);
4643 mem
= gen_rtx_MEM (DImode
, align
);
4644 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
4645 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
4646 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
4648 alpha_pre_atomic_barrier (model
);
4650 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4651 emit_label (XEXP (label
, 0));
4653 emit_load_locked (DImode
, scratch
, mem
);
4655 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4656 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4657 emit_insn (gen_extxl (dest
, scratch
, width
, addr
));
4658 emit_insn (gen_mskxl (scratch
, scratch
, mask
, addr
));
4659 if (val
!= const0_rtx
)
4660 emit_insn (gen_iordi3 (scratch
, scratch
, val
));
4662 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4664 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4665 emit_unlikely_jump (x
, label
);
4667 alpha_post_atomic_barrier (model
);
4670 /* Adjust the cost of a scheduling dependency. Return the new cost of
4671 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4674 alpha_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4676 enum attr_type dep_insn_type
;
4678 /* If the dependence is an anti-dependence, there is no cost. For an
4679 output dependence, there is sometimes a cost, but it doesn't seem
4680 worth handling those few cases. */
4681 if (REG_NOTE_KIND (link
) != 0)
4684 /* If we can't recognize the insns, we can't really do anything. */
4685 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4688 dep_insn_type
= get_attr_type (dep_insn
);
4690 /* Bring in the user-defined memory latency. */
4691 if (dep_insn_type
== TYPE_ILD
4692 || dep_insn_type
== TYPE_FLD
4693 || dep_insn_type
== TYPE_LDSYM
)
4694 cost
+= alpha_memory_latency
-1;
4696 /* Everything else handled in DFA bypasses now. */
4701 /* The number of instructions that can be issued per cycle. */
4704 alpha_issue_rate (void)
4706 return (alpha_tune
== PROCESSOR_EV4
? 2 : 4);
4709 /* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4718 alpha_multipass_dfa_lookahead (void)
4720 return (alpha_tune
== PROCESSOR_EV6
? 4 : 2);
4723 /* Machine-specific function data. */
4725 struct GTY(()) alpha_links
;
4727 struct GTY(()) machine_function
4730 const char *some_ld_name
;
4732 /* For flag_reorder_blocks_and_partition. */
4735 /* For VMS condition handlers. */
4736 bool uses_condition_handler
;
4738 /* Linkage entries. */
4739 splay_tree
GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
4743 /* How to allocate a 'struct machine_function'. */
4745 static struct machine_function
*
4746 alpha_init_machine_status (void)
4748 return ggc_alloc_cleared_machine_function ();
4751 /* Support for frame based VMS condition handlers. */
4753 /* A VMS condition handler may be established for a function with a call to
4754 __builtin_establish_vms_condition_handler, and cancelled with a call to
4755 __builtin_revert_vms_condition_handler.
4757 The VMS Condition Handling Facility knows about the existence of a handler
4758 from the procedure descriptor .handler field. As the VMS native compilers,
4759 we store the user specified handler's address at a fixed location in the
4760 stack frame and point the procedure descriptor at a common wrapper which
4761 fetches the real handler's address and issues an indirect call.
4763 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4765 We force the procedure kind to PT_STACK, and the fixed frame location is
4766 fp+8, just before the register save area. We use the handler_data field in
4767 the procedure descriptor to state the fp offset at which the installed
4768 handler address can be found. */
4770 #define VMS_COND_HANDLER_FP_OFFSET 8
4772 /* Expand code to store the currently installed user VMS condition handler
4773 into TARGET and install HANDLER as the new condition handler. */
4776 alpha_expand_builtin_establish_vms_condition_handler (rtx target
, rtx handler
)
4778 rtx handler_slot_address
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
4779 VMS_COND_HANDLER_FP_OFFSET
);
4782 = gen_rtx_MEM (DImode
, handler_slot_address
);
4784 emit_move_insn (target
, handler_slot
);
4785 emit_move_insn (handler_slot
, handler
);
4787 /* Notify the start/prologue/epilogue emitters that the condition handler
4788 slot is needed. In addition to reserving the slot space, this will force
4789 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4790 use above is correct. */
4791 cfun
->machine
->uses_condition_handler
= true;
4794 /* Expand code to store the current VMS condition handler into TARGET and
4798 alpha_expand_builtin_revert_vms_condition_handler (rtx target
)
4800 /* We implement this by establishing a null condition handler, with the tiny
4801 side effect of setting uses_condition_handler. This is a little bit
4802 pessimistic if no actual builtin_establish call is ever issued, which is
4803 not a real problem and expected never to happen anyway. */
4805 alpha_expand_builtin_establish_vms_condition_handler (target
, const0_rtx
);
4808 /* Functions to save and restore alpha_return_addr_rtx. */
4810 /* Start the ball rolling with RETURN_ADDR_RTX. */
4813 alpha_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4818 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4821 /* Return or create a memory slot containing the gp value for the current
4822 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4825 alpha_gp_save_rtx (void)
4827 rtx seq
, m
= cfun
->machine
->gp_save_rtx
;
4833 m
= assign_stack_local (DImode
, UNITS_PER_WORD
, BITS_PER_WORD
);
4834 m
= validize_mem (m
);
4835 emit_move_insn (m
, pic_offset_table_rtx
);
4840 /* We used to simply emit the sequence after entry_of_function.
4841 However this breaks the CFG if the first instruction in the
4842 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4843 label. Emit the sequence properly on the edge. We are only
4844 invoked from dw2_build_landing_pads and finish_eh_generation
4845 will call commit_edge_insertions thanks to a kludge. */
4846 insert_insn_on_edge (seq
,
4847 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
4849 cfun
->machine
->gp_save_rtx
= m
;
4856 alpha_instantiate_decls (void)
4858 if (cfun
->machine
->gp_save_rtx
!= NULL_RTX
)
4859 instantiate_decl_rtl (cfun
->machine
->gp_save_rtx
);
4863 alpha_ra_ever_killed (void)
4867 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4868 return (int)df_regs_ever_live_p (REG_RA
);
4870 push_topmost_sequence ();
4872 pop_topmost_sequence ();
4874 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4878 /* Return the trap mode suffix applicable to the current
4879 instruction, or NULL. */
4882 get_trap_mode_suffix (void)
4884 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4888 case TRAP_SUFFIX_NONE
:
4891 case TRAP_SUFFIX_SU
:
4892 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4896 case TRAP_SUFFIX_SUI
:
4897 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4901 case TRAP_SUFFIX_V_SV
:
4909 case ALPHA_FPTM_SUI
:
4915 case TRAP_SUFFIX_V_SV_SVI
:
4924 case ALPHA_FPTM_SUI
:
4931 case TRAP_SUFFIX_U_SU_SUI
:
4940 case ALPHA_FPTM_SUI
:
4953 /* Return the rounding mode suffix applicable to the current
4954 instruction, or NULL. */
4957 get_round_mode_suffix (void)
4959 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
4963 case ROUND_SUFFIX_NONE
:
4965 case ROUND_SUFFIX_NORMAL
:
4968 case ALPHA_FPRM_NORM
:
4970 case ALPHA_FPRM_MINF
:
4972 case ALPHA_FPRM_CHOP
:
4974 case ALPHA_FPRM_DYN
:
4981 case ROUND_SUFFIX_C
:
4990 /* Locate some local-dynamic symbol still in use by this function
4991 so that we can print its name in some movdi_er_tlsldm pattern. */
4994 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
4998 if (GET_CODE (x
) == SYMBOL_REF
4999 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
5001 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
5009 get_some_local_dynamic_name (void)
5013 if (cfun
->machine
->some_ld_name
)
5014 return cfun
->machine
->some_ld_name
;
5016 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5018 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
5019 return cfun
->machine
->some_ld_name
;
5024 /* Print an operand. Recognize special options, documented below. */
5027 print_operand (FILE *file
, rtx x
, int code
)
5034 /* Print the assembler name of the current function. */
5035 assemble_name (file
, alpha_fnname
);
5039 assemble_name (file
, get_some_local_dynamic_name ());
5044 const char *trap
= get_trap_mode_suffix ();
5045 const char *round
= get_round_mode_suffix ();
5048 fprintf (file
, "/%s%s", (trap
? trap
: ""), (round
? round
: ""));
5053 /* Generates single precision instruction suffix. */
5054 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5058 /* Generates double precision instruction suffix. */
5059 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5063 if (alpha_this_literal_sequence_number
== 0)
5064 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5065 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5069 if (alpha_this_gpdisp_sequence_number
== 0)
5070 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5071 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5075 if (GET_CODE (x
) == HIGH
)
5076 output_addr_const (file
, XEXP (x
, 0));
5078 output_operand_lossage ("invalid %%H value");
5085 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5087 x
= XVECEXP (x
, 0, 0);
5088 lituse
= "lituse_tlsgd";
5090 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5092 x
= XVECEXP (x
, 0, 0);
5093 lituse
= "lituse_tlsldm";
5095 else if (CONST_INT_P (x
))
5096 lituse
= "lituse_jsr";
5099 output_operand_lossage ("invalid %%J value");
5103 if (x
!= const0_rtx
)
5104 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5112 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5113 lituse
= "lituse_jsrdirect";
5115 lituse
= "lituse_jsr";
5118 gcc_assert (INTVAL (x
) != 0);
5119 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5123 /* If this operand is the constant zero, write it as "$31". */
5125 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5126 else if (x
== CONST0_RTX (GET_MODE (x
)))
5127 fprintf (file
, "$31");
5129 output_operand_lossage ("invalid %%r value");
5133 /* Similar, but for floating-point. */
5135 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5136 else if (x
== CONST0_RTX (GET_MODE (x
)))
5137 fprintf (file
, "$f31");
5139 output_operand_lossage ("invalid %%R value");
5143 /* Write the 1's complement of a constant. */
5144 if (!CONST_INT_P (x
))
5145 output_operand_lossage ("invalid %%N value");
5147 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5151 /* Write 1 << C, for a constant C. */
5152 if (!CONST_INT_P (x
))
5153 output_operand_lossage ("invalid %%P value");
5155 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5159 /* Write the high-order 16 bits of a constant, sign-extended. */
5160 if (!CONST_INT_P (x
))
5161 output_operand_lossage ("invalid %%h value");
5163 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5167 /* Write the low-order 16 bits of a constant, sign-extended. */
5168 if (!CONST_INT_P (x
))
5169 output_operand_lossage ("invalid %%L value");
5171 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5172 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5176 /* Write mask for ZAP insn. */
5177 if (GET_CODE (x
) == CONST_DOUBLE
)
5179 HOST_WIDE_INT mask
= 0;
5180 HOST_WIDE_INT value
;
5182 value
= CONST_DOUBLE_LOW (x
);
5183 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5188 value
= CONST_DOUBLE_HIGH (x
);
5189 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5192 mask
|= (1 << (i
+ sizeof (int)));
5194 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5197 else if (CONST_INT_P (x
))
5199 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5201 for (i
= 0; i
< 8; i
++, value
>>= 8)
5205 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5208 output_operand_lossage ("invalid %%m value");
5212 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5213 if (!CONST_INT_P (x
)
5214 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5215 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5216 output_operand_lossage ("invalid %%M value");
5218 fprintf (file
, "%s",
5219 (INTVAL (x
) == 8 ? "b"
5220 : INTVAL (x
) == 16 ? "w"
5221 : INTVAL (x
) == 32 ? "l"
5226 /* Similar, except do it from the mask. */
5227 if (CONST_INT_P (x
))
5229 HOST_WIDE_INT value
= INTVAL (x
);
5236 if (value
== 0xffff)
5241 if (value
== 0xffffffff)
5252 else if (HOST_BITS_PER_WIDE_INT
== 32
5253 && GET_CODE (x
) == CONST_DOUBLE
5254 && CONST_DOUBLE_LOW (x
) == 0xffffffff
5255 && CONST_DOUBLE_HIGH (x
) == 0)
5260 output_operand_lossage ("invalid %%U value");
5264 /* Write the constant value divided by 8. */
5265 if (!CONST_INT_P (x
)
5266 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5267 || (INTVAL (x
) & 7) != 0)
5268 output_operand_lossage ("invalid %%s value");
5270 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) / 8);
5274 /* Same, except compute (64 - c) / 8 */
5276 if (!CONST_INT_P (x
)
5277 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5278 && (INTVAL (x
) & 7) != 8)
5279 output_operand_lossage ("invalid %%s value");
5281 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5284 case 'C': case 'D': case 'c': case 'd':
5285 /* Write out comparison name. */
5287 enum rtx_code c
= GET_CODE (x
);
5289 if (!COMPARISON_P (x
))
5290 output_operand_lossage ("invalid %%C value");
5292 else if (code
== 'D')
5293 c
= reverse_condition (c
);
5294 else if (code
== 'c')
5295 c
= swap_condition (c
);
5296 else if (code
== 'd')
5297 c
= swap_condition (reverse_condition (c
));
5300 fprintf (file
, "ule");
5302 fprintf (file
, "ult");
5303 else if (c
== UNORDERED
)
5304 fprintf (file
, "un");
5306 fprintf (file
, "%s", GET_RTX_NAME (c
));
5311 /* Write the divide or modulus operator. */
5312 switch (GET_CODE (x
))
5315 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5318 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5321 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5324 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5327 output_operand_lossage ("invalid %%E value");
5333 /* Write "_u" for unaligned access. */
5334 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
5335 fprintf (file
, "_u");
5340 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5342 output_address (XEXP (x
, 0));
5343 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5345 switch (XINT (XEXP (x
, 0), 1))
5349 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
5352 output_operand_lossage ("unknown relocation unspec");
5357 output_addr_const (file
, x
);
5361 output_operand_lossage ("invalid %%xn code");
5366 print_operand_address (FILE *file
, rtx addr
)
5369 HOST_WIDE_INT offset
= 0;
5371 if (GET_CODE (addr
) == AND
)
5372 addr
= XEXP (addr
, 0);
5374 if (GET_CODE (addr
) == PLUS
5375 && CONST_INT_P (XEXP (addr
, 1)))
5377 offset
= INTVAL (XEXP (addr
, 1));
5378 addr
= XEXP (addr
, 0);
5381 if (GET_CODE (addr
) == LO_SUM
)
5383 const char *reloc16
, *reloclo
;
5384 rtx op1
= XEXP (addr
, 1);
5386 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
5388 op1
= XEXP (op1
, 0);
5389 switch (XINT (op1
, 1))
5393 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
5397 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
5400 output_operand_lossage ("unknown relocation unspec");
5404 output_addr_const (file
, XVECEXP (op1
, 0, 0));
5409 reloclo
= "gprellow";
5410 output_addr_const (file
, op1
);
5414 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
5416 addr
= XEXP (addr
, 0);
5417 switch (GET_CODE (addr
))
5420 basereg
= REGNO (addr
);
5424 basereg
= subreg_regno (addr
);
5431 fprintf (file
, "($%d)\t\t!%s", basereg
,
5432 (basereg
== 29 ? reloc16
: reloclo
));
5436 switch (GET_CODE (addr
))
5439 basereg
= REGNO (addr
);
5443 basereg
= subreg_regno (addr
);
5447 offset
= INTVAL (addr
);
5450 #if TARGET_ABI_OPEN_VMS
5452 fprintf (file
, "%s", XSTR (addr
, 0));
5456 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
5457 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
);
5458 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
5459 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
5460 INTVAL (XEXP (XEXP (addr
, 0), 1)));
5468 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
5471 /* Emit RTL insns to initialize the variable parts of a trampoline at
5472 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5473 for the static chain value for the function. */
5476 alpha_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
5478 rtx fnaddr
, mem
, word1
, word2
;
5480 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
5482 #ifdef POINTERS_EXTEND_UNSIGNED
5483 fnaddr
= convert_memory_address (Pmode
, fnaddr
);
5484 chain_value
= convert_memory_address (Pmode
, chain_value
);
5487 if (TARGET_ABI_OPEN_VMS
)
5492 /* Construct the name of the trampoline entry point. */
5493 fnname
= XSTR (fnaddr
, 0);
5494 trname
= (char *) alloca (strlen (fnname
) + 5);
5495 strcpy (trname
, fnname
);
5496 strcat (trname
, "..tr");
5497 fnname
= ggc_alloc_string (trname
, strlen (trname
) + 1);
5498 word2
= gen_rtx_SYMBOL_REF (Pmode
, fnname
);
5500 /* Trampoline (or "bounded") procedure descriptor is constructed from
5501 the function's procedure descriptor with certain fields zeroed IAW
5502 the VMS calling standard. This is stored in the first quadword. */
5503 word1
= force_reg (DImode
, gen_const_mem (DImode
, fnaddr
));
5504 word1
= expand_and (DImode
, word1
,
5505 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5510 /* These 4 instructions are:
5515 We don't bother setting the HINT field of the jump; the nop
5516 is merely there for padding. */
5517 word1
= GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5518 word2
= GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5521 /* Store the first two words, as computed above. */
5522 mem
= adjust_address (m_tramp
, DImode
, 0);
5523 emit_move_insn (mem
, word1
);
5524 mem
= adjust_address (m_tramp
, DImode
, 8);
5525 emit_move_insn (mem
, word2
);
5527 /* Store function address and static chain value. */
5528 mem
= adjust_address (m_tramp
, Pmode
, 16);
5529 emit_move_insn (mem
, fnaddr
);
5530 mem
= adjust_address (m_tramp
, Pmode
, 24);
5531 emit_move_insn (mem
, chain_value
);
5535 emit_insn (gen_imb ());
5536 #ifdef HAVE_ENABLE_EXECUTE_STACK
5537 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5538 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
5543 /* Determine where to put an argument to a function.
5544 Value is zero to push the argument on the stack,
5545 or a hard register in which to store the argument.
5547 MODE is the argument's machine mode.
5548 TYPE is the data type of the argument (as a tree).
5549 This is null for libcalls where that information may
5551 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5552 the preceding args and about the function being called.
5553 NAMED is nonzero if this argument is a named parameter
5554 (otherwise it is an extra parameter matching an ellipsis).
5556 On Alpha the first 6 words of args are normally in registers
5557 and the rest are pushed. */
5560 alpha_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
5561 const_tree type
, bool named ATTRIBUTE_UNUSED
)
5563 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5567 /* Don't get confused and pass small structures in FP registers. */
5568 if (type
&& AGGREGATE_TYPE_P (type
))
5572 #ifdef ENABLE_CHECKING
5573 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5575 gcc_assert (!COMPLEX_MODE_P (mode
));
5578 /* Set up defaults for FP operands passed in FP registers, and
5579 integral operands passed in integer registers. */
5580 if (TARGET_FPREGS
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5586 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5587 the two platforms, so we can't avoid conditional compilation. */
5588 #if TARGET_ABI_OPEN_VMS
5590 if (mode
== VOIDmode
)
5591 return alpha_arg_info_reg_val (*cum
);
5593 num_args
= cum
->num_args
;
5595 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5598 #elif TARGET_ABI_OSF
5604 /* VOID is passed as a special flag for "last argument". */
5605 if (type
== void_type_node
)
5607 else if (targetm
.calls
.must_pass_in_stack (mode
, type
))
5611 #error Unhandled ABI
5614 return gen_rtx_REG (mode
, num_args
+ basereg
);
5617 /* Update the data in CUM to advance over an argument
5618 of mode MODE and data type TYPE.
5619 (TYPE is null for libcalls where that information may not be available.) */
5622 alpha_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
5623 const_tree type
, bool named ATTRIBUTE_UNUSED
)
5625 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5626 bool onstack
= targetm
.calls
.must_pass_in_stack (mode
, type
);
5627 int increment
= onstack
? 6 : ALPHA_ARG_SIZE (mode
, type
, named
);
5632 if (!onstack
&& cum
->num_args
< 6)
5633 cum
->atypes
[cum
->num_args
] = alpha_arg_type (mode
);
5634 cum
->num_args
+= increment
;
5639 alpha_arg_partial_bytes (cumulative_args_t cum_v
,
5640 enum machine_mode mode ATTRIBUTE_UNUSED
,
5641 tree type ATTRIBUTE_UNUSED
,
5642 bool named ATTRIBUTE_UNUSED
)
5645 CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
= get_cumulative_args (cum_v
);
5647 #if TARGET_ABI_OPEN_VMS
5648 if (cum
->num_args
< 6
5649 && 6 < cum
->num_args
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5650 words
= 6 - cum
->num_args
;
5651 #elif TARGET_ABI_OSF
5652 if (*cum
< 6 && 6 < *cum
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5655 #error Unhandled ABI
5658 return words
* UNITS_PER_WORD
;
5662 /* Return true if TYPE must be returned in memory, instead of in registers. */
5665 alpha_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
5667 enum machine_mode mode
= VOIDmode
;
5672 mode
= TYPE_MODE (type
);
5674 /* All aggregates are returned in memory, except on OpenVMS where
5675 records that fit 64 bits should be returned by immediate value
5676 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5677 if (TARGET_ABI_OPEN_VMS
5678 && TREE_CODE (type
) != ARRAY_TYPE
5679 && (unsigned HOST_WIDE_INT
) int_size_in_bytes(type
) <= 8)
5682 if (AGGREGATE_TYPE_P (type
))
5686 size
= GET_MODE_SIZE (mode
);
5687 switch (GET_MODE_CLASS (mode
))
5689 case MODE_VECTOR_FLOAT
:
5690 /* Pass all float vectors in memory, like an aggregate. */
5693 case MODE_COMPLEX_FLOAT
:
5694 /* We judge complex floats on the size of their element,
5695 not the size of the whole type. */
5696 size
= GET_MODE_UNIT_SIZE (mode
);
5701 case MODE_COMPLEX_INT
:
5702 case MODE_VECTOR_INT
:
5706 /* ??? We get called on all sorts of random stuff from
5707 aggregate_value_p. We must return something, but it's not
5708 clear what's safe to return. Pretend it's a struct I
5713 /* Otherwise types must fit in one register. */
5714 return size
> UNITS_PER_WORD
;
5717 /* Return true if TYPE should be passed by invisible reference. */
5720 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
5721 enum machine_mode mode
,
5722 const_tree type ATTRIBUTE_UNUSED
,
5723 bool named ATTRIBUTE_UNUSED
)
5725 return mode
== TFmode
|| mode
== TCmode
;
5728 /* Define how to find the value returned by a function. VALTYPE is the
5729 data type of the value (as a tree). If the precise function being
5730 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5731 MODE is set instead of VALTYPE for libcalls.
5733 On Alpha the value is found in $0 for integer functions and
5734 $f0 for floating-point functions. */
5737 function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
,
5738 enum machine_mode mode
)
5740 unsigned int regnum
, dummy ATTRIBUTE_UNUSED
;
5741 enum mode_class mclass
;
5743 gcc_assert (!valtype
|| !alpha_return_in_memory (valtype
, func
));
5746 mode
= TYPE_MODE (valtype
);
5748 mclass
= GET_MODE_CLASS (mode
);
5752 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5753 where we have them returning both SImode and DImode. */
5754 if (!(TARGET_ABI_OPEN_VMS
&& valtype
&& AGGREGATE_TYPE_P (valtype
)))
5755 PROMOTE_MODE (mode
, dummy
, valtype
);
5758 case MODE_COMPLEX_INT
:
5759 case MODE_VECTOR_INT
:
5767 case MODE_COMPLEX_FLOAT
:
5769 enum machine_mode cmode
= GET_MODE_INNER (mode
);
5771 return gen_rtx_PARALLEL
5774 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 32),
5776 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 33),
5777 GEN_INT (GET_MODE_SIZE (cmode
)))));
5781 /* We should only reach here for BLKmode on VMS. */
5782 gcc_assert (TARGET_ABI_OPEN_VMS
&& mode
== BLKmode
);
5790 return gen_rtx_REG (mode
, regnum
);
5793 /* TCmode complex values are passed by invisible reference. We
5794 should not split these values. */
5797 alpha_split_complex_arg (const_tree type
)
5799 return TYPE_MODE (type
) != TCmode
;
5803 alpha_build_builtin_va_list (void)
5805 tree base
, ofs
, space
, record
, type_decl
;
5807 if (TARGET_ABI_OPEN_VMS
)
5808 return ptr_type_node
;
5810 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5811 type_decl
= build_decl (BUILTINS_LOCATION
,
5812 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5813 TYPE_STUB_DECL (record
) = type_decl
;
5814 TYPE_NAME (record
) = type_decl
;
5816 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5818 /* Dummy field to prevent alignment warnings. */
5819 space
= build_decl (BUILTINS_LOCATION
,
5820 FIELD_DECL
, NULL_TREE
, integer_type_node
);
5821 DECL_FIELD_CONTEXT (space
) = record
;
5822 DECL_ARTIFICIAL (space
) = 1;
5823 DECL_IGNORED_P (space
) = 1;
5825 ofs
= build_decl (BUILTINS_LOCATION
,
5826 FIELD_DECL
, get_identifier ("__offset"),
5828 DECL_FIELD_CONTEXT (ofs
) = record
;
5829 DECL_CHAIN (ofs
) = space
;
5830 /* ??? This is a hack, __offset is marked volatile to prevent
5831 DCE that confuses stdarg optimization and results in
5832 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5833 TREE_THIS_VOLATILE (ofs
) = 1;
5835 base
= build_decl (BUILTINS_LOCATION
,
5836 FIELD_DECL
, get_identifier ("__base"),
5838 DECL_FIELD_CONTEXT (base
) = record
;
5839 DECL_CHAIN (base
) = ofs
;
5841 TYPE_FIELDS (record
) = base
;
5842 layout_type (record
);
5844 va_list_gpr_counter_field
= ofs
;
5849 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5850 and constant additions. */
5853 va_list_skip_additions (tree lhs
)
5859 enum tree_code code
;
5861 stmt
= SSA_NAME_DEF_STMT (lhs
);
5863 if (gimple_code (stmt
) == GIMPLE_PHI
)
5866 if (!is_gimple_assign (stmt
)
5867 || gimple_assign_lhs (stmt
) != lhs
)
5870 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
5872 code
= gimple_assign_rhs_code (stmt
);
5873 if (!CONVERT_EXPR_CODE_P (code
)
5874 && ((code
!= PLUS_EXPR
&& code
!= POINTER_PLUS_EXPR
)
5875 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
5876 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt
))))
5879 lhs
= gimple_assign_rhs1 (stmt
);
5883 /* Check if LHS = RHS statement is
5884 LHS = *(ap.__base + ap.__offset + cst)
5887 + ((ap.__offset + cst <= 47)
5888 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5889 If the former, indicate that GPR registers are needed,
5890 if the latter, indicate that FPR registers are needed.
5892 Also look for LHS = (*ptr).field, where ptr is one of the forms
5895 On alpha, cfun->va_list_gpr_size is used as size of the needed
5896 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5897 registers are needed and bit 1 set if FPR registers are needed.
5898 Return true if va_list references should not be scanned for the
5899 current statement. */
5902 alpha_stdarg_optimize_hook (struct stdarg_info
*si
, const_gimple stmt
)
5904 tree base
, offset
, rhs
;
5908 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
))
5909 != GIMPLE_SINGLE_RHS
)
5912 rhs
= gimple_assign_rhs1 (stmt
);
5913 while (handled_component_p (rhs
))
5914 rhs
= TREE_OPERAND (rhs
, 0);
5915 if (TREE_CODE (rhs
) != MEM_REF
5916 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5919 stmt
= va_list_skip_additions (TREE_OPERAND (rhs
, 0));
5921 || !is_gimple_assign (stmt
)
5922 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
5925 base
= gimple_assign_rhs1 (stmt
);
5926 if (TREE_CODE (base
) == SSA_NAME
)
5928 base_stmt
= va_list_skip_additions (base
);
5930 && is_gimple_assign (base_stmt
)
5931 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
5932 base
= gimple_assign_rhs1 (base_stmt
);
5935 if (TREE_CODE (base
) != COMPONENT_REF
5936 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5938 base
= gimple_assign_rhs2 (stmt
);
5939 if (TREE_CODE (base
) == SSA_NAME
)
5941 base_stmt
= va_list_skip_additions (base
);
5943 && is_gimple_assign (base_stmt
)
5944 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
5945 base
= gimple_assign_rhs1 (base_stmt
);
5948 if (TREE_CODE (base
) != COMPONENT_REF
5949 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5955 base
= get_base_address (base
);
5956 if (TREE_CODE (base
) != VAR_DECL
5957 || !bitmap_bit_p (si
->va_list_vars
, DECL_UID (base
) + num_ssa_names
))
5960 offset
= gimple_op (stmt
, 1 + offset_arg
);
5961 if (TREE_CODE (offset
) == SSA_NAME
)
5963 gimple offset_stmt
= va_list_skip_additions (offset
);
5966 && gimple_code (offset_stmt
) == GIMPLE_PHI
)
5969 gimple arg1_stmt
, arg2_stmt
;
5971 enum tree_code code1
, code2
;
5973 if (gimple_phi_num_args (offset_stmt
) != 2)
5977 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 0));
5979 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 1));
5980 if (arg1_stmt
== NULL
5981 || !is_gimple_assign (arg1_stmt
)
5982 || arg2_stmt
== NULL
5983 || !is_gimple_assign (arg2_stmt
))
5986 code1
= gimple_assign_rhs_code (arg1_stmt
);
5987 code2
= gimple_assign_rhs_code (arg2_stmt
);
5988 if (code1
== COMPONENT_REF
5989 && (code2
== MINUS_EXPR
|| code2
== PLUS_EXPR
))
5991 else if (code2
== COMPONENT_REF
5992 && (code1
== MINUS_EXPR
|| code1
== PLUS_EXPR
))
5994 gimple tem
= arg1_stmt
;
5996 arg1_stmt
= arg2_stmt
;
6002 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt
)))
6005 sub
= tree_to_shwi (gimple_assign_rhs2 (arg2_stmt
));
6006 if (code2
== MINUS_EXPR
)
6008 if (sub
< -48 || sub
> -32)
6011 arg1
= gimple_assign_rhs1 (arg1_stmt
);
6012 arg2
= gimple_assign_rhs1 (arg2_stmt
);
6013 if (TREE_CODE (arg2
) == SSA_NAME
)
6015 arg2_stmt
= va_list_skip_additions (arg2
);
6016 if (arg2_stmt
== NULL
6017 || !is_gimple_assign (arg2_stmt
)
6018 || gimple_assign_rhs_code (arg2_stmt
) != COMPONENT_REF
)
6020 arg2
= gimple_assign_rhs1 (arg2_stmt
);
6025 if (TREE_CODE (arg1
) != COMPONENT_REF
6026 || TREE_OPERAND (arg1
, 1) != va_list_gpr_counter_field
6027 || get_base_address (arg1
) != base
)
6030 /* Need floating point regs. */
6031 cfun
->va_list_fpr_size
|= 2;
6035 && is_gimple_assign (offset_stmt
)
6036 && gimple_assign_rhs_code (offset_stmt
) == COMPONENT_REF
)
6037 offset
= gimple_assign_rhs1 (offset_stmt
);
6039 if (TREE_CODE (offset
) != COMPONENT_REF
6040 || TREE_OPERAND (offset
, 1) != va_list_gpr_counter_field
6041 || get_base_address (offset
) != base
)
6044 /* Need general regs. */
6045 cfun
->va_list_fpr_size
|= 1;
6049 si
->va_list_escapes
= true;
6054 /* Perform any needed actions needed for a function that is receiving a
6055 variable number of arguments. */
6058 alpha_setup_incoming_varargs (cumulative_args_t pcum
, enum machine_mode mode
,
6059 tree type
, int *pretend_size
, int no_rtl
)
6061 CUMULATIVE_ARGS cum
= *get_cumulative_args (pcum
);
6063 /* Skip the current argument. */
6064 targetm
.calls
.function_arg_advance (pack_cumulative_args (&cum
), mode
, type
,
6067 #if TARGET_ABI_OPEN_VMS
6068 /* For VMS, we allocate space for all 6 arg registers plus a count.
6070 However, if NO registers need to be saved, don't allocate any space.
6071 This is not only because we won't need the space, but because AP
6072 includes the current_pretend_args_size and we don't want to mess up
6073 any ap-relative addresses already made. */
6074 if (cum
.num_args
< 6)
6078 emit_move_insn (gen_rtx_REG (DImode
, 1), virtual_incoming_args_rtx
);
6079 emit_insn (gen_arg_home ());
6081 *pretend_size
= 7 * UNITS_PER_WORD
;
6084 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6085 only push those that are remaining. However, if NO registers need to
6086 be saved, don't allocate any space. This is not only because we won't
6087 need the space, but because AP includes the current_pretend_args_size
6088 and we don't want to mess up any ap-relative addresses already made.
6090 If we are not to use the floating-point registers, save the integer
6091 registers where we would put the floating-point registers. This is
6092 not the most efficient way to implement varargs with just one register
6093 class, but it isn't worth doing anything more efficient in this rare
6101 alias_set_type set
= get_varargs_alias_set ();
6104 count
= cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
6105 if (count
> 6 - cum
)
6108 /* Detect whether integer registers or floating-point registers
6109 are needed by the detected va_arg statements. See above for
6110 how these values are computed. Note that the "escape" value
6111 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6113 gcc_assert ((VA_LIST_MAX_FPR_SIZE
& 3) == 3);
6115 if (cfun
->va_list_fpr_size
& 1)
6117 tmp
= gen_rtx_MEM (BLKmode
,
6118 plus_constant (Pmode
, virtual_incoming_args_rtx
,
6119 (cum
+ 6) * UNITS_PER_WORD
));
6120 MEM_NOTRAP_P (tmp
) = 1;
6121 set_mem_alias_set (tmp
, set
);
6122 move_block_from_reg (16 + cum
, tmp
, count
);
6125 if (cfun
->va_list_fpr_size
& 2)
6127 tmp
= gen_rtx_MEM (BLKmode
,
6128 plus_constant (Pmode
, virtual_incoming_args_rtx
,
6129 cum
* UNITS_PER_WORD
));
6130 MEM_NOTRAP_P (tmp
) = 1;
6131 set_mem_alias_set (tmp
, set
);
6132 move_block_from_reg (16 + cum
+ TARGET_FPREGS
*32, tmp
, count
);
6135 *pretend_size
= 12 * UNITS_PER_WORD
;
6140 alpha_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6142 HOST_WIDE_INT offset
;
6143 tree t
, offset_field
, base_field
;
6145 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6148 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6149 up by 48, storing fp arg registers in the first 48 bytes, and the
6150 integer arg registers in the next 48 bytes. This is only done,
6151 however, if any integer registers need to be stored.
6153 If no integer registers need be stored, then we must subtract 48
6154 in order to account for the integer arg registers which are counted
6155 in argsize above, but which are not actually stored on the stack.
6156 Must further be careful here about structures straddling the last
6157 integer argument register; that futzes with pretend_args_size,
6158 which changes the meaning of AP. */
6161 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6163 offset
= -6 * UNITS_PER_WORD
+ crtl
->args
.pretend_args_size
;
6165 if (TARGET_ABI_OPEN_VMS
)
6167 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6168 t
= fold_build_pointer_plus_hwi (t
, offset
+ NUM_ARGS
* UNITS_PER_WORD
);
6169 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
6170 TREE_SIDE_EFFECTS (t
) = 1;
6171 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6175 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6176 offset_field
= DECL_CHAIN (base_field
);
6178 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6179 valist
, base_field
, NULL_TREE
);
6180 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6181 valist
, offset_field
, NULL_TREE
);
6183 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6184 t
= fold_build_pointer_plus_hwi (t
, offset
);
6185 t
= build2 (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6186 TREE_SIDE_EFFECTS (t
) = 1;
6187 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6189 t
= build_int_cst (NULL_TREE
, NUM_ARGS
* UNITS_PER_WORD
);
6190 t
= build2 (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6191 TREE_SIDE_EFFECTS (t
) = 1;
6192 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6197 alpha_gimplify_va_arg_1 (tree type
, tree base
, tree offset
,
6200 tree type_size
, ptr_type
, addend
, t
, addr
;
6201 gimple_seq internal_post
;
6203 /* If the type could not be passed in registers, skip the block
6204 reserved for the registers. */
6205 if (targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
))
6207 t
= build_int_cst (TREE_TYPE (offset
), 6*8);
6208 gimplify_assign (offset
,
6209 build2 (MAX_EXPR
, TREE_TYPE (offset
), offset
, t
),
6214 ptr_type
= build_pointer_type_for_mode (type
, ptr_mode
, true);
6216 if (TREE_CODE (type
) == COMPLEX_TYPE
)
6218 tree real_part
, imag_part
, real_temp
;
6220 real_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6223 /* Copy the value into a new temporary, lest the formal temporary
6224 be reused out from under us. */
6225 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6227 imag_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6230 return build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6232 else if (TREE_CODE (type
) == REAL_TYPE
)
6234 tree fpaddend
, cond
, fourtyeight
;
6236 fourtyeight
= build_int_cst (TREE_TYPE (addend
), 6*8);
6237 fpaddend
= fold_build2 (MINUS_EXPR
, TREE_TYPE (addend
),
6238 addend
, fourtyeight
);
6239 cond
= fold_build2 (LT_EXPR
, boolean_type_node
, addend
, fourtyeight
);
6240 addend
= fold_build3 (COND_EXPR
, TREE_TYPE (addend
), cond
,
6244 /* Build the final address and force that value into a temporary. */
6245 addr
= fold_build_pointer_plus (fold_convert (ptr_type
, base
), addend
);
6246 internal_post
= NULL
;
6247 gimplify_expr (&addr
, pre_p
, &internal_post
, is_gimple_val
, fb_rvalue
);
6248 gimple_seq_add_seq (pre_p
, internal_post
);
6250 /* Update the offset field. */
6251 type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
));
6252 if (type_size
== NULL
|| TREE_OVERFLOW (type_size
))
6256 t
= size_binop (PLUS_EXPR
, type_size
, size_int (7));
6257 t
= size_binop (TRUNC_DIV_EXPR
, t
, size_int (8));
6258 t
= size_binop (MULT_EXPR
, t
, size_int (8));
6260 t
= fold_convert (TREE_TYPE (offset
), t
);
6261 gimplify_assign (offset
, build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, t
),
6264 return build_va_arg_indirect_ref (addr
);
6268 alpha_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
6271 tree offset_field
, base_field
, offset
, base
, t
, r
;
6274 if (TARGET_ABI_OPEN_VMS
)
6275 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6277 base_field
= TYPE_FIELDS (va_list_type_node
);
6278 offset_field
= DECL_CHAIN (base_field
);
6279 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6280 valist
, base_field
, NULL_TREE
);
6281 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6282 valist
, offset_field
, NULL_TREE
);
6284 /* Pull the fields of the structure out into temporaries. Since we never
6285 modify the base field, we can use a formal temporary. Sign-extend the
6286 offset field so that it's the proper width for pointer arithmetic. */
6287 base
= get_formal_tmp_var (base_field
, pre_p
);
6289 t
= fold_convert (build_nonstandard_integer_type (64, 0), offset_field
);
6290 offset
= get_initialized_tmp_var (t
, pre_p
, NULL
);
6292 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6294 type
= build_pointer_type_for_mode (type
, ptr_mode
, true);
6296 /* Find the value. Note that this will be a stable indirection, or
6297 a composite of stable indirections in the case of complex. */
6298 r
= alpha_gimplify_va_arg_1 (type
, base
, offset
, pre_p
);
6300 /* Stuff the offset temporary back into its field. */
6301 gimplify_assign (unshare_expr (offset_field
),
6302 fold_convert (TREE_TYPE (offset_field
), offset
), pre_p
);
6305 r
= build_va_arg_indirect_ref (r
);
6314 ALPHA_BUILTIN_CMPBGE
,
6315 ALPHA_BUILTIN_EXTBL
,
6316 ALPHA_BUILTIN_EXTWL
,
6317 ALPHA_BUILTIN_EXTLL
,
6318 ALPHA_BUILTIN_EXTQL
,
6319 ALPHA_BUILTIN_EXTWH
,
6320 ALPHA_BUILTIN_EXTLH
,
6321 ALPHA_BUILTIN_EXTQH
,
6322 ALPHA_BUILTIN_INSBL
,
6323 ALPHA_BUILTIN_INSWL
,
6324 ALPHA_BUILTIN_INSLL
,
6325 ALPHA_BUILTIN_INSQL
,
6326 ALPHA_BUILTIN_INSWH
,
6327 ALPHA_BUILTIN_INSLH
,
6328 ALPHA_BUILTIN_INSQH
,
6329 ALPHA_BUILTIN_MSKBL
,
6330 ALPHA_BUILTIN_MSKWL
,
6331 ALPHA_BUILTIN_MSKLL
,
6332 ALPHA_BUILTIN_MSKQL
,
6333 ALPHA_BUILTIN_MSKWH
,
6334 ALPHA_BUILTIN_MSKLH
,
6335 ALPHA_BUILTIN_MSKQH
,
6336 ALPHA_BUILTIN_UMULH
,
6338 ALPHA_BUILTIN_ZAPNOT
,
6339 ALPHA_BUILTIN_AMASK
,
6340 ALPHA_BUILTIN_IMPLVER
,
6342 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER
,
6343 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER
,
6346 ALPHA_BUILTIN_MINUB8
,
6347 ALPHA_BUILTIN_MINSB8
,
6348 ALPHA_BUILTIN_MINUW4
,
6349 ALPHA_BUILTIN_MINSW4
,
6350 ALPHA_BUILTIN_MAXUB8
,
6351 ALPHA_BUILTIN_MAXSB8
,
6352 ALPHA_BUILTIN_MAXUW4
,
6353 ALPHA_BUILTIN_MAXSW4
,
6357 ALPHA_BUILTIN_UNPKBL
,
6358 ALPHA_BUILTIN_UNPKBW
,
6363 ALPHA_BUILTIN_CTPOP
,
6368 static enum insn_code
const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6369 CODE_FOR_builtin_cmpbge
,
6377 CODE_FOR_builtin_insbl
,
6378 CODE_FOR_builtin_inswl
,
6379 CODE_FOR_builtin_insll
,
6391 CODE_FOR_umuldi3_highpart
,
6392 CODE_FOR_builtin_zap
,
6393 CODE_FOR_builtin_zapnot
,
6394 CODE_FOR_builtin_amask
,
6395 CODE_FOR_builtin_implver
,
6396 CODE_FOR_builtin_rpcc
,
6397 CODE_FOR_builtin_establish_vms_condition_handler
,
6398 CODE_FOR_builtin_revert_vms_condition_handler
,
6401 CODE_FOR_builtin_minub8
,
6402 CODE_FOR_builtin_minsb8
,
6403 CODE_FOR_builtin_minuw4
,
6404 CODE_FOR_builtin_minsw4
,
6405 CODE_FOR_builtin_maxub8
,
6406 CODE_FOR_builtin_maxsb8
,
6407 CODE_FOR_builtin_maxuw4
,
6408 CODE_FOR_builtin_maxsw4
,
6409 CODE_FOR_builtin_perr
,
6410 CODE_FOR_builtin_pklb
,
6411 CODE_FOR_builtin_pkwb
,
6412 CODE_FOR_builtin_unpkbl
,
6413 CODE_FOR_builtin_unpkbw
,
6418 CODE_FOR_popcountdi2
6421 struct alpha_builtin_def
6424 enum alpha_builtin code
;
6425 unsigned int target_mask
;
6429 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6430 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0, true },
6431 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0, false }
6434 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6435 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0, true },
6436 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
, true },
6437 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
, true },
6438 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
, true },
6439 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
, true },
6440 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
, true },
6441 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
, true },
6442 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
, true }
6445 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6446 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0, true },
6447 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0, true },
6448 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0, true },
6449 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0, true },
6450 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0, true },
6451 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0, true },
6452 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0, true },
6453 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0, true },
6454 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0, true },
6455 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0, true },
6456 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0, true },
6457 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0, true },
6458 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0, true },
6459 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0, true },
6460 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0, true },
6461 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0, true },
6462 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0, true },
6463 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0, true },
6464 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0, true },
6465 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0, true },
6466 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0, true },
6467 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0, true },
6468 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0, true },
6469 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0, true },
6470 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0, true },
6471 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
, true },
6472 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
, true },
6473 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
, true },
6474 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
, true },
6475 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
, true },
6476 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
, true },
6477 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
, true },
6478 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
, true },
6479 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
, true }
6482 static GTY(()) tree alpha_dimode_u
;
6483 static GTY(()) tree alpha_v8qi_u
;
6484 static GTY(()) tree alpha_v8qi_s
;
6485 static GTY(()) tree alpha_v4hi_u
;
6486 static GTY(()) tree alpha_v4hi_s
;
6488 static GTY(()) tree alpha_builtins
[(int) ALPHA_BUILTIN_max
];
6490 /* Return the alpha builtin for CODE. */
6493 alpha_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
6495 if (code
>= ALPHA_BUILTIN_max
)
6496 return error_mark_node
;
6497 return alpha_builtins
[code
];
6500 /* Helper function of alpha_init_builtins. Add the built-in specified
6501 by NAME, TYPE, CODE, and ECF. */
6504 alpha_builtin_function (const char *name
, tree ftype
,
6505 enum alpha_builtin code
, unsigned ecf
)
6507 tree decl
= add_builtin_function (name
, ftype
, (int) code
,
6508 BUILT_IN_MD
, NULL
, NULL_TREE
);
6510 if (ecf
& ECF_CONST
)
6511 TREE_READONLY (decl
) = 1;
6512 if (ecf
& ECF_NOTHROW
)
6513 TREE_NOTHROW (decl
) = 1;
6515 alpha_builtins
[(int) code
] = decl
;
6518 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6519 functions pointed to by P, with function type FTYPE. */
6522 alpha_add_builtins (const struct alpha_builtin_def
*p
, size_t count
,
6527 for (i
= 0; i
< count
; ++i
, ++p
)
6528 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6529 alpha_builtin_function (p
->name
, ftype
, p
->code
,
6530 (p
->is_const
? ECF_CONST
: 0) | ECF_NOTHROW
);
6534 alpha_init_builtins (void)
6538 alpha_dimode_u
= lang_hooks
.types
.type_for_mode (DImode
, 1);
6539 alpha_v8qi_u
= build_vector_type (unsigned_intQI_type_node
, 8);
6540 alpha_v8qi_s
= build_vector_type (intQI_type_node
, 8);
6541 alpha_v4hi_u
= build_vector_type (unsigned_intHI_type_node
, 4);
6542 alpha_v4hi_s
= build_vector_type (intHI_type_node
, 4);
6544 ftype
= build_function_type_list (alpha_dimode_u
, NULL_TREE
);
6545 alpha_add_builtins (zero_arg_builtins
, ARRAY_SIZE (zero_arg_builtins
), ftype
);
6547 ftype
= build_function_type_list (alpha_dimode_u
, alpha_dimode_u
, NULL_TREE
);
6548 alpha_add_builtins (one_arg_builtins
, ARRAY_SIZE (one_arg_builtins
), ftype
);
6550 ftype
= build_function_type_list (alpha_dimode_u
, alpha_dimode_u
,
6551 alpha_dimode_u
, NULL_TREE
);
6552 alpha_add_builtins (two_arg_builtins
, ARRAY_SIZE (two_arg_builtins
), ftype
);
6554 if (TARGET_ABI_OPEN_VMS
)
6556 ftype
= build_function_type_list (ptr_type_node
, ptr_type_node
,
6558 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6560 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER
,
6563 ftype
= build_function_type_list (ptr_type_node
, void_type_node
,
6565 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype
,
6566 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER
, 0);
6568 vms_patch_builtins ();
6572 /* Expand an expression EXP that calls a built-in function,
6573 with result going to TARGET if that's convenient
6574 (and in mode MODE if that's convenient).
6575 SUBTARGET may be used as the target for computing one of EXP's operands.
6576 IGNORE is nonzero if the value is to be ignored. */
6579 alpha_expand_builtin (tree exp
, rtx target
,
6580 rtx subtarget ATTRIBUTE_UNUSED
,
6581 enum machine_mode mode ATTRIBUTE_UNUSED
,
6582 int ignore ATTRIBUTE_UNUSED
)
6586 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6587 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6589 call_expr_arg_iterator iter
;
6590 enum insn_code icode
;
6591 rtx op
[MAX_ARGS
], pat
;
6595 if (fcode
>= ALPHA_BUILTIN_max
)
6596 internal_error ("bad builtin fcode");
6597 icode
= code_for_builtin
[fcode
];
6599 internal_error ("bad builtin fcode");
6601 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6604 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
6606 const struct insn_operand_data
*insn_op
;
6608 if (arg
== error_mark_node
)
6610 if (arity
> MAX_ARGS
)
6613 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6615 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, EXPAND_NORMAL
);
6617 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6618 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6624 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6626 || GET_MODE (target
) != tmode
6627 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6628 target
= gen_reg_rtx (tmode
);
6634 pat
= GEN_FCN (icode
) (target
);
6638 pat
= GEN_FCN (icode
) (target
, op
[0]);
6640 pat
= GEN_FCN (icode
) (op
[0]);
6643 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6659 /* Several bits below assume HWI >= 64 bits. This should be enforced
6661 #if HOST_BITS_PER_WIDE_INT < 64
6662 # error "HOST_WIDE_INT too small"
6665 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6666 with an 8-bit output vector. OPINT contains the integer operands; bit N
6667 of OP_CONST is set if OPINT[N] is valid. */
6670 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint
[], long op_const
)
6675 for (i
= 0, val
= 0; i
< 8; ++i
)
6677 unsigned HOST_WIDE_INT c0
= (opint
[0] >> (i
* 8)) & 0xff;
6678 unsigned HOST_WIDE_INT c1
= (opint
[1] >> (i
* 8)) & 0xff;
6682 return build_int_cst (alpha_dimode_u
, val
);
6684 else if (op_const
== 2 && opint
[1] == 0)
6685 return build_int_cst (alpha_dimode_u
, 0xff);
6689 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6690 specialized form of an AND operation. Other byte manipulation instructions
6691 are defined in terms of this instruction, so this is also used as a
6692 subroutine for other builtins.
6694 OP contains the tree operands; OPINT contains the extracted integer values.
6695 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6696 OPINT may be considered. */
6699 alpha_fold_builtin_zapnot (tree
*op
, unsigned HOST_WIDE_INT opint
[],
6704 unsigned HOST_WIDE_INT mask
= 0;
6707 for (i
= 0; i
< 8; ++i
)
6708 if ((opint
[1] >> i
) & 1)
6709 mask
|= (unsigned HOST_WIDE_INT
)0xff << (i
* 8);
6712 return build_int_cst (alpha_dimode_u
, opint
[0] & mask
);
6715 return fold_build2 (BIT_AND_EXPR
, alpha_dimode_u
, op
[0],
6716 build_int_cst (alpha_dimode_u
, mask
));
6718 else if ((op_const
& 1) && opint
[0] == 0)
6719 return build_int_cst (alpha_dimode_u
, 0);
6723 /* Fold the builtins for the EXT family of instructions. */
6726 alpha_fold_builtin_extxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6727 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6731 tree
*zap_op
= NULL
;
6735 unsigned HOST_WIDE_INT loc
;
6738 loc
*= BITS_PER_UNIT
;
6744 unsigned HOST_WIDE_INT temp
= opint
[0];
6757 opint
[1] = bytemask
;
6758 return alpha_fold_builtin_zapnot (zap_op
, opint
, zap_const
);
6761 /* Fold the builtins for the INS family of instructions. */
6764 alpha_fold_builtin_insxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6765 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6768 if ((op_const
& 1) && opint
[0] == 0)
6769 return build_int_cst (alpha_dimode_u
, 0);
6773 unsigned HOST_WIDE_INT temp
, loc
, byteloc
;
6774 tree
*zap_op
= NULL
;
6782 byteloc
= (64 - (loc
* 8)) & 0x3f;
6799 opint
[1] = bytemask
;
6800 return alpha_fold_builtin_zapnot (zap_op
, opint
, op_const
);
6807 alpha_fold_builtin_mskxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6808 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6813 unsigned HOST_WIDE_INT loc
;
6821 opint
[1] = bytemask
^ 0xff;
6824 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
6828 alpha_fold_vector_minmax (enum tree_code code
, tree op
[], tree vtype
)
6830 tree op0
= fold_convert (vtype
, op
[0]);
6831 tree op1
= fold_convert (vtype
, op
[1]);
6832 tree val
= fold_build2 (code
, vtype
, op0
, op1
);
6833 return fold_build1 (VIEW_CONVERT_EXPR
, alpha_dimode_u
, val
);
6837 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint
[], long op_const
)
6839 unsigned HOST_WIDE_INT temp
= 0;
6845 for (i
= 0; i
< 8; ++i
)
6847 unsigned HOST_WIDE_INT a
= (opint
[0] >> (i
* 8)) & 0xff;
6848 unsigned HOST_WIDE_INT b
= (opint
[1] >> (i
* 8)) & 0xff;
6855 return build_int_cst (alpha_dimode_u
, temp
);
6859 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6861 unsigned HOST_WIDE_INT temp
;
6866 temp
= opint
[0] & 0xff;
6867 temp
|= (opint
[0] >> 24) & 0xff00;
6869 return build_int_cst (alpha_dimode_u
, temp
);
6873 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6875 unsigned HOST_WIDE_INT temp
;
6880 temp
= opint
[0] & 0xff;
6881 temp
|= (opint
[0] >> 8) & 0xff00;
6882 temp
|= (opint
[0] >> 16) & 0xff0000;
6883 temp
|= (opint
[0] >> 24) & 0xff000000;
6885 return build_int_cst (alpha_dimode_u
, temp
);
6889 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint
[], long op_const
)
6891 unsigned HOST_WIDE_INT temp
;
6896 temp
= opint
[0] & 0xff;
6897 temp
|= (opint
[0] & 0xff00) << 24;
6899 return build_int_cst (alpha_dimode_u
, temp
);
6903 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint
[], long op_const
)
6905 unsigned HOST_WIDE_INT temp
;
6910 temp
= opint
[0] & 0xff;
6911 temp
|= (opint
[0] & 0x0000ff00) << 8;
6912 temp
|= (opint
[0] & 0x00ff0000) << 16;
6913 temp
|= (opint
[0] & 0xff000000) << 24;
6915 return build_int_cst (alpha_dimode_u
, temp
);
6919 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6921 unsigned HOST_WIDE_INT temp
;
6929 temp
= exact_log2 (opint
[0] & -opint
[0]);
6931 return build_int_cst (alpha_dimode_u
, temp
);
6935 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6937 unsigned HOST_WIDE_INT temp
;
6945 temp
= 64 - floor_log2 (opint
[0]) - 1;
6947 return build_int_cst (alpha_dimode_u
, temp
);
6951 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint
[], long op_const
)
6953 unsigned HOST_WIDE_INT temp
, op
;
6961 temp
++, op
&= op
- 1;
6963 return build_int_cst (alpha_dimode_u
, temp
);
6966 /* Fold one of our builtin functions. */
6969 alpha_fold_builtin (tree fndecl
, int n_args
, tree
*op
,
6970 bool ignore ATTRIBUTE_UNUSED
)
6972 unsigned HOST_WIDE_INT opint
[MAX_ARGS
];
6976 if (n_args
> MAX_ARGS
)
6979 for (i
= 0; i
< n_args
; i
++)
6982 if (arg
== error_mark_node
)
6986 if (TREE_CODE (arg
) == INTEGER_CST
)
6988 op_const
|= 1L << i
;
6989 opint
[i
] = int_cst_value (arg
);
6993 switch (DECL_FUNCTION_CODE (fndecl
))
6995 case ALPHA_BUILTIN_CMPBGE
:
6996 return alpha_fold_builtin_cmpbge (opint
, op_const
);
6998 case ALPHA_BUILTIN_EXTBL
:
6999 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x01, false);
7000 case ALPHA_BUILTIN_EXTWL
:
7001 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, false);
7002 case ALPHA_BUILTIN_EXTLL
:
7003 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, false);
7004 case ALPHA_BUILTIN_EXTQL
:
7005 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, false);
7006 case ALPHA_BUILTIN_EXTWH
:
7007 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, true);
7008 case ALPHA_BUILTIN_EXTLH
:
7009 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, true);
7010 case ALPHA_BUILTIN_EXTQH
:
7011 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, true);
7013 case ALPHA_BUILTIN_INSBL
:
7014 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x01, false);
7015 case ALPHA_BUILTIN_INSWL
:
7016 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, false);
7017 case ALPHA_BUILTIN_INSLL
:
7018 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, false);
7019 case ALPHA_BUILTIN_INSQL
:
7020 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, false);
7021 case ALPHA_BUILTIN_INSWH
:
7022 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, true);
7023 case ALPHA_BUILTIN_INSLH
:
7024 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, true);
7025 case ALPHA_BUILTIN_INSQH
:
7026 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, true);
7028 case ALPHA_BUILTIN_MSKBL
:
7029 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x01, false);
7030 case ALPHA_BUILTIN_MSKWL
:
7031 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, false);
7032 case ALPHA_BUILTIN_MSKLL
:
7033 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, false);
7034 case ALPHA_BUILTIN_MSKQL
:
7035 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, false);
7036 case ALPHA_BUILTIN_MSKWH
:
7037 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, true);
7038 case ALPHA_BUILTIN_MSKLH
:
7039 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, true);
7040 case ALPHA_BUILTIN_MSKQH
:
7041 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, true);
7043 case ALPHA_BUILTIN_ZAP
:
7046 case ALPHA_BUILTIN_ZAPNOT
:
7047 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
7049 case ALPHA_BUILTIN_MINUB8
:
7050 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_u
);
7051 case ALPHA_BUILTIN_MINSB8
:
7052 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_s
);
7053 case ALPHA_BUILTIN_MINUW4
:
7054 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_u
);
7055 case ALPHA_BUILTIN_MINSW4
:
7056 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_s
);
7057 case ALPHA_BUILTIN_MAXUB8
:
7058 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_u
);
7059 case ALPHA_BUILTIN_MAXSB8
:
7060 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_s
);
7061 case ALPHA_BUILTIN_MAXUW4
:
7062 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_u
);
7063 case ALPHA_BUILTIN_MAXSW4
:
7064 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_s
);
7066 case ALPHA_BUILTIN_PERR
:
7067 return alpha_fold_builtin_perr (opint
, op_const
);
7068 case ALPHA_BUILTIN_PKLB
:
7069 return alpha_fold_builtin_pklb (opint
, op_const
);
7070 case ALPHA_BUILTIN_PKWB
:
7071 return alpha_fold_builtin_pkwb (opint
, op_const
);
7072 case ALPHA_BUILTIN_UNPKBL
:
7073 return alpha_fold_builtin_unpkbl (opint
, op_const
);
7074 case ALPHA_BUILTIN_UNPKBW
:
7075 return alpha_fold_builtin_unpkbw (opint
, op_const
);
7077 case ALPHA_BUILTIN_CTTZ
:
7078 return alpha_fold_builtin_cttz (opint
, op_const
);
7079 case ALPHA_BUILTIN_CTLZ
:
7080 return alpha_fold_builtin_ctlz (opint
, op_const
);
7081 case ALPHA_BUILTIN_CTPOP
:
7082 return alpha_fold_builtin_ctpop (opint
, op_const
);
7084 case ALPHA_BUILTIN_AMASK
:
7085 case ALPHA_BUILTIN_IMPLVER
:
7086 case ALPHA_BUILTIN_RPCC
:
7087 /* None of these are foldable at compile-time. */
7094 alpha_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
7096 bool changed
= false;
7097 gimple stmt
= gsi_stmt (*gsi
);
7098 tree call
= gimple_call_fn (stmt
);
7099 gimple new_stmt
= NULL
;
7103 tree fndecl
= gimple_call_fndecl (stmt
);
7109 switch (DECL_FUNCTION_CODE (fndecl
))
7111 case ALPHA_BUILTIN_UMULH
:
7112 arg0
= gimple_call_arg (stmt
, 0);
7113 arg1
= gimple_call_arg (stmt
, 1);
7116 = gimple_build_assign_with_ops (MULT_HIGHPART_EXPR
,
7117 gimple_call_lhs (stmt
),
7129 gsi_replace (gsi
, new_stmt
, true);
7136 /* This page contains routines that are used to determine what the function
7137 prologue and epilogue code will do and write them out. */
7139 /* Compute the size of the save area in the stack. */
7141 /* These variables are used for communication between the following functions.
7142 They indicate various things about the current function being compiled
7143 that are used to tell what kind of prologue, epilogue and procedure
7144 descriptor to generate. */
7146 /* Nonzero if we need a stack procedure. */
7147 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
7148 static enum alpha_procedure_types alpha_procedure_type
;
7150 /* Register number (either FP or SP) that is used to unwind the frame. */
7151 static int vms_unwind_regno
;
7153 /* Register number used to save FP. We need not have one for RA since
7154 we don't modify it for register procedures. This is only defined
7155 for register frame procedures. */
7156 static int vms_save_fp_regno
;
7158 /* Register number used to reference objects off our PV. */
7159 static int vms_base_regno
;
7161 /* Compute register masks for saved registers. */
7164 alpha_sa_mask (unsigned long *imaskP
, unsigned long *fmaskP
)
7166 unsigned long imask
= 0;
7167 unsigned long fmask
= 0;
7170 /* When outputting a thunk, we don't have valid register life info,
7171 but assemble_start_function wants to output .frame and .mask
7180 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7181 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
7183 /* One for every register we have to save. */
7184 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7185 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
7186 && df_regs_ever_live_p (i
) && i
!= REG_RA
)
7189 imask
|= (1UL << i
);
7191 fmask
|= (1UL << (i
- 32));
7194 /* We need to restore these for the handler. */
7195 if (crtl
->calls_eh_return
)
7199 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
7200 if (regno
== INVALID_REGNUM
)
7202 imask
|= 1UL << regno
;
7206 /* If any register spilled, then spill the return address also. */
7207 /* ??? This is required by the Digital stack unwind specification
7208 and isn't needed if we're doing Dwarf2 unwinding. */
7209 if (imask
|| fmask
|| alpha_ra_ever_killed ())
7210 imask
|= (1UL << REG_RA
);
7217 alpha_sa_size (void)
7219 unsigned long mask
[2];
7223 alpha_sa_mask (&mask
[0], &mask
[1]);
7225 for (j
= 0; j
< 2; ++j
)
7226 for (i
= 0; i
< 32; ++i
)
7227 if ((mask
[j
] >> i
) & 1)
7230 if (TARGET_ABI_OPEN_VMS
)
7232 /* Start with a stack procedure if we make any calls (REG_RA used), or
7233 need a frame pointer, with a register procedure if we otherwise need
7234 at least a slot, and with a null procedure in other cases. */
7235 if ((mask
[0] >> REG_RA
) & 1 || frame_pointer_needed
)
7236 alpha_procedure_type
= PT_STACK
;
7237 else if (get_frame_size() != 0)
7238 alpha_procedure_type
= PT_REGISTER
;
7240 alpha_procedure_type
= PT_NULL
;
7242 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7243 made the final decision on stack procedure vs register procedure. */
7244 if (alpha_procedure_type
== PT_STACK
)
7247 /* Decide whether to refer to objects off our PV via FP or PV.
7248 If we need FP for something else or if we receive a nonlocal
7249 goto (which expects PV to contain the value), we must use PV.
7250 Otherwise, start by assuming we can use FP. */
7253 = (frame_pointer_needed
7254 || cfun
->has_nonlocal_label
7255 || alpha_procedure_type
== PT_STACK
7256 || crtl
->outgoing_args_size
)
7257 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7259 /* If we want to copy PV into FP, we need to find some register
7260 in which to save FP. */
7262 vms_save_fp_regno
= -1;
7263 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7264 for (i
= 0; i
< 32; i
++)
7265 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! df_regs_ever_live_p (i
))
7266 vms_save_fp_regno
= i
;
7268 /* A VMS condition handler requires a stack procedure in our
7269 implementation. (not required by the calling standard). */
7270 if ((vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7271 || cfun
->machine
->uses_condition_handler
)
7272 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7273 else if (alpha_procedure_type
== PT_NULL
)
7274 vms_base_regno
= REG_PV
;
7276 /* Stack unwinding should be done via FP unless we use it for PV. */
7277 vms_unwind_regno
= (vms_base_regno
== REG_PV
7278 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7280 /* If this is a stack procedure, allow space for saving FP, RA and
7281 a condition handler slot if needed. */
7282 if (alpha_procedure_type
== PT_STACK
)
7283 sa_size
+= 2 + cfun
->machine
->uses_condition_handler
;
7287 /* Our size must be even (multiple of 16 bytes). */
7295 /* Define the offset between two registers, one to be eliminated,
7296 and the other its replacement, at the start of a routine. */
7299 alpha_initial_elimination_offset (unsigned int from
,
7300 unsigned int to ATTRIBUTE_UNUSED
)
7304 ret
= alpha_sa_size ();
7305 ret
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7309 case FRAME_POINTER_REGNUM
:
7312 case ARG_POINTER_REGNUM
:
7313 ret
+= (ALPHA_ROUND (get_frame_size ()
7314 + crtl
->args
.pretend_args_size
)
7315 - crtl
->args
.pretend_args_size
);
7325 #if TARGET_ABI_OPEN_VMS
7327 /* Worker function for TARGET_CAN_ELIMINATE. */
7330 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
7332 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7335 switch (alpha_procedure_type
)
7338 /* NULL procedures have no frame of their own and we only
7339 know how to resolve from the current stack pointer. */
7340 return to
== STACK_POINTER_REGNUM
;
7344 /* We always eliminate except to the stack pointer if there is no
7345 usable frame pointer at hand. */
7346 return (to
!= STACK_POINTER_REGNUM
7347 || vms_unwind_regno
!= HARD_FRAME_POINTER_REGNUM
);
7353 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7354 designates the same location as FROM. */
7357 alpha_vms_initial_elimination_offset (unsigned int from
, unsigned int to
)
7359 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7360 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7361 on the proper computations and will need the register save area size
7364 HOST_WIDE_INT sa_size
= alpha_sa_size ();
7366 /* PT_NULL procedures have no frame of their own and we only allow
7367 elimination to the stack pointer. This is the argument pointer and we
7368 resolve the soft frame pointer to that as well. */
7370 if (alpha_procedure_type
== PT_NULL
)
7373 /* For a PT_STACK procedure the frame layout looks as follows
7375 -----> decreasing addresses
7377 < size rounded up to 16 | likewise >
7378 --------------#------------------------------+++--------------+++-------#
7379 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7380 --------------#---------------------------------------------------------#
7382 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7385 PT_REGISTER procedures are similar in that they may have a frame of their
7386 own. They have no regs-sa/pv/outgoing-args area.
7388 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7389 to STACK_PTR if need be. */
7392 HOST_WIDE_INT offset
;
7393 HOST_WIDE_INT pv_save_size
= alpha_procedure_type
== PT_STACK
? 8 : 0;
7397 case FRAME_POINTER_REGNUM
:
7398 offset
= ALPHA_ROUND (sa_size
+ pv_save_size
);
7400 case ARG_POINTER_REGNUM
:
7401 offset
= (ALPHA_ROUND (sa_size
+ pv_save_size
7403 + crtl
->args
.pretend_args_size
)
7404 - crtl
->args
.pretend_args_size
);
7410 if (to
== STACK_POINTER_REGNUM
)
7411 offset
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7417 #define COMMON_OBJECT "common_object"
7420 common_object_handler (tree
*node
, tree name ATTRIBUTE_UNUSED
,
7421 tree args ATTRIBUTE_UNUSED
, int flags ATTRIBUTE_UNUSED
,
7422 bool *no_add_attrs ATTRIBUTE_UNUSED
)
7425 gcc_assert (DECL_P (decl
));
7427 DECL_COMMON (decl
) = 1;
7431 static const struct attribute_spec vms_attribute_table
[] =
7433 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7434 affects_type_identity } */
7435 { COMMON_OBJECT
, 0, 1, true, false, false, common_object_handler
, false },
7436 { NULL
, 0, 0, false, false, false, NULL
, false }
7440 vms_output_aligned_decl_common(FILE *file
, tree decl
, const char *name
,
7441 unsigned HOST_WIDE_INT size
,
7444 tree attr
= DECL_ATTRIBUTES (decl
);
7445 fprintf (file
, "%s", COMMON_ASM_OP
);
7446 assemble_name (file
, name
);
7447 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
, size
);
7448 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7449 fprintf (file
, ",%u", align
/ BITS_PER_UNIT
);
7452 attr
= lookup_attribute (COMMON_OBJECT
, attr
);
7454 fprintf (file
, ",%s",
7455 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
7460 #undef COMMON_OBJECT
7465 find_lo_sum_using_gp (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
7467 return GET_CODE (*px
) == LO_SUM
&& XEXP (*px
, 0) == pic_offset_table_rtx
;
7471 alpha_find_lo_sum_using_gp (rtx insn
)
7473 return for_each_rtx (&PATTERN (insn
), find_lo_sum_using_gp
, NULL
) > 0;
7477 alpha_does_function_need_gp (void)
7481 /* The GP being variable is an OSF abi thing. */
7482 if (! TARGET_ABI_OSF
)
7485 /* We need the gp to load the address of __mcount. */
7486 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7489 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7493 /* The nonlocal receiver pattern assumes that the gp is valid for
7494 the nested function. Reasonable because it's almost always set
7495 correctly already. For the cases where that's wrong, make sure
7496 the nested function loads its gp on entry. */
7497 if (crtl
->has_nonlocal_goto
)
7500 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7501 Even if we are a static function, we still need to do this in case
7502 our address is taken and passed to something like qsort. */
7504 push_topmost_sequence ();
7505 insn
= get_insns ();
7506 pop_topmost_sequence ();
7508 for (; insn
; insn
= NEXT_INSN (insn
))
7509 if (NONDEBUG_INSN_P (insn
)
7510 && GET_CODE (PATTERN (insn
)) != USE
7511 && GET_CODE (PATTERN (insn
)) != CLOBBER
7512 && get_attr_usegp (insn
))
7519 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7523 set_frame_related_p (void)
7525 rtx seq
= get_insns ();
7536 while (insn
!= NULL_RTX
)
7538 RTX_FRAME_RELATED_P (insn
) = 1;
7539 insn
= NEXT_INSN (insn
);
7541 seq
= emit_insn (seq
);
7545 seq
= emit_insn (seq
);
7546 RTX_FRAME_RELATED_P (seq
) = 1;
7551 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7553 /* Generates a store with the proper unwind info attached. VALUE is
7554 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7555 contains SP+FRAME_BIAS, and that is the unwind info that should be
7556 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7557 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7560 emit_frame_store_1 (rtx value
, rtx base_reg
, HOST_WIDE_INT frame_bias
,
7561 HOST_WIDE_INT base_ofs
, rtx frame_reg
)
7563 rtx addr
, mem
, insn
;
7565 addr
= plus_constant (Pmode
, base_reg
, base_ofs
);
7566 mem
= gen_frame_mem (DImode
, addr
);
7568 insn
= emit_move_insn (mem
, value
);
7569 RTX_FRAME_RELATED_P (insn
) = 1;
7571 if (frame_bias
|| value
!= frame_reg
)
7575 addr
= plus_constant (Pmode
, stack_pointer_rtx
,
7576 frame_bias
+ base_ofs
);
7577 mem
= gen_rtx_MEM (DImode
, addr
);
7580 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
7581 gen_rtx_SET (VOIDmode
, mem
, frame_reg
));
7586 emit_frame_store (unsigned int regno
, rtx base_reg
,
7587 HOST_WIDE_INT frame_bias
, HOST_WIDE_INT base_ofs
)
7589 rtx reg
= gen_rtx_REG (DImode
, regno
);
7590 emit_frame_store_1 (reg
, base_reg
, frame_bias
, base_ofs
, reg
);
7593 /* Compute the frame size. SIZE is the size of the "naked" frame
7594 and SA_SIZE is the size of the register save area. */
7596 static HOST_WIDE_INT
7597 compute_frame_size (HOST_WIDE_INT size
, HOST_WIDE_INT sa_size
)
7599 if (TARGET_ABI_OPEN_VMS
)
7600 return ALPHA_ROUND (sa_size
7601 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7603 + crtl
->args
.pretend_args_size
);
7605 return ALPHA_ROUND (crtl
->outgoing_args_size
)
7608 + crtl
->args
.pretend_args_size
);
7611 /* Write function prologue. */
7613 /* On vms we have two kinds of functions:
7615 - stack frame (PROC_STACK)
7616 these are 'normal' functions with local vars and which are
7617 calling other functions
7618 - register frame (PROC_REGISTER)
7619 keeps all data in registers, needs no stack
7621 We must pass this to the assembler so it can generate the
7622 proper pdsc (procedure descriptor)
7623 This is done with the '.pdesc' command.
7625 On not-vms, we don't really differentiate between the two, as we can
7626 simply allocate stack without saving registers. */
7629 alpha_expand_prologue (void)
7631 /* Registers to save. */
7632 unsigned long imask
= 0;
7633 unsigned long fmask
= 0;
7634 /* Stack space needed for pushing registers clobbered by us. */
7635 HOST_WIDE_INT sa_size
, sa_bias
;
7636 /* Complete stack size needed. */
7637 HOST_WIDE_INT frame_size
;
7638 /* Probed stack size; it additionally includes the size of
7639 the "reserve region" if any. */
7640 HOST_WIDE_INT probed_size
;
7641 /* Offset from base reg to register save area. */
7642 HOST_WIDE_INT reg_offset
;
7646 sa_size
= alpha_sa_size ();
7647 frame_size
= compute_frame_size (get_frame_size (), sa_size
);
7649 if (flag_stack_usage_info
)
7650 current_function_static_stack_size
= frame_size
;
7652 if (TARGET_ABI_OPEN_VMS
)
7653 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
7655 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7657 alpha_sa_mask (&imask
, &fmask
);
7659 /* Emit an insn to reload GP, if needed. */
7662 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7663 if (alpha_function_needs_gp
)
7664 emit_insn (gen_prologue_ldgp ());
7667 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7668 the call to mcount ourselves, rather than having the linker do it
7669 magically in response to -pg. Since _mcount has special linkage,
7670 don't represent the call as a call. */
7671 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7672 emit_insn (gen_prologue_mcount ());
7674 /* Adjust the stack by the frame size. If the frame size is > 4096
7675 bytes, we need to be sure we probe somewhere in the first and last
7676 4096 bytes (we can probably get away without the latter test) and
7677 every 8192 bytes in between. If the frame size is > 32768, we
7678 do this in a loop. Otherwise, we generate the explicit probe
7681 Note that we are only allowed to adjust sp once in the prologue. */
7683 probed_size
= frame_size
;
7684 if (flag_stack_check
)
7685 probed_size
+= STACK_CHECK_PROTECT
;
7687 if (probed_size
<= 32768)
7689 if (probed_size
> 4096)
7693 for (probed
= 4096; probed
< probed_size
; probed
+= 8192)
7694 emit_insn (gen_probe_stack (GEN_INT (-probed
)));
7696 /* We only have to do this probe if we aren't saving registers or
7697 if we are probing beyond the frame because of -fstack-check. */
7698 if ((sa_size
== 0 && probed_size
> probed
- 4096)
7699 || flag_stack_check
)
7700 emit_insn (gen_probe_stack (GEN_INT (-probed_size
)));
7703 if (frame_size
!= 0)
7704 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7705 GEN_INT (-frame_size
))));
7709 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7710 number of 8192 byte blocks to probe. We then probe each block
7711 in the loop and then set SP to the proper location. If the
7712 amount remaining is > 4096, we have to do one more probe if we
7713 are not saving any registers or if we are probing beyond the
7714 frame because of -fstack-check. */
7716 HOST_WIDE_INT blocks
= (probed_size
+ 4096) / 8192;
7717 HOST_WIDE_INT leftover
= probed_size
+ 4096 - blocks
* 8192;
7718 rtx ptr
= gen_rtx_REG (DImode
, 22);
7719 rtx count
= gen_rtx_REG (DImode
, 23);
7722 emit_move_insn (count
, GEN_INT (blocks
));
7723 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
, GEN_INT (4096)));
7725 /* Because of the difficulty in emitting a new basic block this
7726 late in the compilation, generate the loop as a single insn. */
7727 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7729 if ((leftover
> 4096 && sa_size
== 0) || flag_stack_check
)
7731 rtx last
= gen_rtx_MEM (DImode
,
7732 plus_constant (Pmode
, ptr
, -leftover
));
7733 MEM_VOLATILE_P (last
) = 1;
7734 emit_move_insn (last
, const0_rtx
);
7737 if (flag_stack_check
)
7739 /* If -fstack-check is specified we have to load the entire
7740 constant into a register and subtract from the sp in one go,
7741 because the probed stack size is not equal to the frame size. */
7742 HOST_WIDE_INT lo
, hi
;
7743 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7744 hi
= frame_size
- lo
;
7746 emit_move_insn (ptr
, GEN_INT (hi
));
7747 emit_insn (gen_adddi3 (ptr
, ptr
, GEN_INT (lo
)));
7748 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7753 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7754 GEN_INT (-leftover
)));
7757 /* This alternative is special, because the DWARF code cannot
7758 possibly intuit through the loop above. So we invent this
7759 note it looks at instead. */
7760 RTX_FRAME_RELATED_P (seq
) = 1;
7761 add_reg_note (seq
, REG_FRAME_RELATED_EXPR
,
7762 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7763 plus_constant (Pmode
, stack_pointer_rtx
,
7767 /* Cope with very large offsets to the register save area. */
7769 sa_reg
= stack_pointer_rtx
;
7770 if (reg_offset
+ sa_size
> 0x8000)
7772 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7775 if (low
+ sa_size
<= 0x8000)
7776 sa_bias
= reg_offset
- low
, reg_offset
= low
;
7778 sa_bias
= reg_offset
, reg_offset
= 0;
7780 sa_reg
= gen_rtx_REG (DImode
, 24);
7781 sa_bias_rtx
= GEN_INT (sa_bias
);
7783 if (add_operand (sa_bias_rtx
, DImode
))
7784 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_bias_rtx
));
7787 emit_move_insn (sa_reg
, sa_bias_rtx
);
7788 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_reg
));
7792 /* Save regs in stack order. Beginning with VMS PV. */
7793 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7794 emit_frame_store (REG_PV
, stack_pointer_rtx
, 0, 0);
7796 /* Save register RA next. */
7797 if (imask
& (1UL << REG_RA
))
7799 emit_frame_store (REG_RA
, sa_reg
, sa_bias
, reg_offset
);
7800 imask
&= ~(1UL << REG_RA
);
7804 /* Now save any other registers required to be saved. */
7805 for (i
= 0; i
< 31; i
++)
7806 if (imask
& (1UL << i
))
7808 emit_frame_store (i
, sa_reg
, sa_bias
, reg_offset
);
7812 for (i
= 0; i
< 31; i
++)
7813 if (fmask
& (1UL << i
))
7815 emit_frame_store (i
+32, sa_reg
, sa_bias
, reg_offset
);
7819 if (TARGET_ABI_OPEN_VMS
)
7821 /* Register frame procedures save the fp. */
7822 if (alpha_procedure_type
== PT_REGISTER
)
7824 rtx insn
= emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7825 hard_frame_pointer_rtx
);
7826 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
7827 RTX_FRAME_RELATED_P (insn
) = 1;
7830 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7831 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7832 gen_rtx_REG (DImode
, REG_PV
)));
7834 if (alpha_procedure_type
!= PT_NULL
7835 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7836 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7838 /* If we have to allocate space for outgoing args, do it now. */
7839 if (crtl
->outgoing_args_size
!= 0)
7842 = emit_move_insn (stack_pointer_rtx
,
7844 (Pmode
, hard_frame_pointer_rtx
,
7846 (crtl
->outgoing_args_size
))));
7848 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7849 if ! frame_pointer_needed. Setting the bit will change the CFA
7850 computation rule to use sp again, which would be wrong if we had
7851 frame_pointer_needed, as this means sp might move unpredictably
7855 frame_pointer_needed
7856 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7858 crtl->outgoing_args_size != 0
7859 => alpha_procedure_type != PT_NULL,
7861 so when we are not setting the bit here, we are guaranteed to
7862 have emitted an FRP frame pointer update just before. */
7863 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7868 /* If we need a frame pointer, set it from the stack pointer. */
7869 if (frame_pointer_needed
)
7871 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7872 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7874 /* This must always be the last instruction in the
7875 prologue, thus we emit a special move + clobber. */
7876 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7877 stack_pointer_rtx
, sa_reg
)));
7881 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7882 the prologue, for exception handling reasons, we cannot do this for
7883 any insn that might fault. We could prevent this for mems with a
7884 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7885 have to prevent all such scheduling with a blockage.
7887 Linux, on the other hand, never bothered to implement OSF/1's
7888 exception handling, and so doesn't care about such things. Anyone
7889 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7891 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7892 emit_insn (gen_blockage ());
7895 /* Count the number of .file directives, so that .loc is up to date. */
7896 int num_source_filenames
= 0;
7898 /* Output the textual info surrounding the prologue. */
7901 alpha_start_function (FILE *file
, const char *fnname
,
7902 tree decl ATTRIBUTE_UNUSED
)
7904 unsigned long imask
= 0;
7905 unsigned long fmask
= 0;
7906 /* Stack space needed for pushing registers clobbered by us. */
7907 HOST_WIDE_INT sa_size
;
7908 /* Complete stack size needed. */
7909 unsigned HOST_WIDE_INT frame_size
;
7910 /* The maximum debuggable frame size. */
7911 unsigned HOST_WIDE_INT max_frame_size
= 1UL << 31;
7912 /* Offset from base reg to register save area. */
7913 HOST_WIDE_INT reg_offset
;
7914 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7915 char *tramp_label
= (char *) alloca (strlen (fnname
) + 6);
7918 #if TARGET_ABI_OPEN_VMS
7919 vms_start_function (fnname
);
7922 alpha_fnname
= fnname
;
7923 sa_size
= alpha_sa_size ();
7924 frame_size
= compute_frame_size (get_frame_size (), sa_size
);
7926 if (TARGET_ABI_OPEN_VMS
)
7927 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
7929 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7931 alpha_sa_mask (&imask
, &fmask
);
7933 /* Issue function start and label. */
7934 if (TARGET_ABI_OPEN_VMS
|| !flag_inhibit_size_directive
)
7936 fputs ("\t.ent ", file
);
7937 assemble_name (file
, fnname
);
7940 /* If the function needs GP, we'll write the "..ng" label there.
7941 Otherwise, do it here. */
7943 && ! alpha_function_needs_gp
7944 && ! cfun
->is_thunk
)
7947 assemble_name (file
, fnname
);
7948 fputs ("..ng:\n", file
);
7951 /* Nested functions on VMS that are potentially called via trampoline
7952 get a special transfer entry point that loads the called functions
7953 procedure descriptor and static chain. */
7954 if (TARGET_ABI_OPEN_VMS
7955 && !TREE_PUBLIC (decl
)
7956 && DECL_CONTEXT (decl
)
7957 && !TYPE_P (DECL_CONTEXT (decl
))
7958 && TREE_CODE (DECL_CONTEXT (decl
)) != TRANSLATION_UNIT_DECL
)
7960 strcpy (tramp_label
, fnname
);
7961 strcat (tramp_label
, "..tr");
7962 ASM_OUTPUT_LABEL (file
, tramp_label
);
7963 fprintf (file
, "\tldq $1,24($27)\n");
7964 fprintf (file
, "\tldq $27,16($27)\n");
7967 strcpy (entry_label
, fnname
);
7968 if (TARGET_ABI_OPEN_VMS
)
7969 strcat (entry_label
, "..en");
7971 ASM_OUTPUT_LABEL (file
, entry_label
);
7972 inside_function
= TRUE
;
7974 if (TARGET_ABI_OPEN_VMS
)
7975 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7978 && TARGET_IEEE_CONFORMANT
7979 && !flag_inhibit_size_directive
)
7981 /* Set flags in procedure descriptor to request IEEE-conformant
7982 math-library routines. The value we set it to is PDSC_EXC_IEEE
7983 (/usr/include/pdsc.h). */
7984 fputs ("\t.eflag 48\n", file
);
7987 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7988 alpha_auto_offset
= -frame_size
+ crtl
->args
.pretend_args_size
;
7989 alpha_arg_offset
= -frame_size
+ 48;
7991 /* Describe our frame. If the frame size is larger than an integer,
7992 print it as zero to avoid an assembler error. We won't be
7993 properly describing such a frame, but that's the best we can do. */
7994 if (TARGET_ABI_OPEN_VMS
)
7995 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
7996 HOST_WIDE_INT_PRINT_DEC
"\n",
7998 frame_size
>= (1UL << 31) ? 0 : frame_size
,
8000 else if (!flag_inhibit_size_directive
)
8001 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
8002 (frame_pointer_needed
8003 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
8004 frame_size
>= max_frame_size
? 0 : frame_size
,
8005 crtl
->args
.pretend_args_size
);
8007 /* Describe which registers were spilled. */
8008 if (TARGET_ABI_OPEN_VMS
)
8011 /* ??? Does VMS care if mask contains ra? The old code didn't
8012 set it, so I don't here. */
8013 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
8015 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
8016 if (alpha_procedure_type
== PT_REGISTER
)
8017 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
8019 else if (!flag_inhibit_size_directive
)
8023 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
8024 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
8026 for (i
= 0; i
< 32; ++i
)
8027 if (imask
& (1UL << i
))
8032 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
8033 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
8036 #if TARGET_ABI_OPEN_VMS
8037 /* If a user condition handler has been installed at some point, emit
8038 the procedure descriptor bits to point the Condition Handling Facility
8039 at the indirection wrapper, and state the fp offset at which the user
8040 handler may be found. */
8041 if (cfun
->machine
->uses_condition_handler
)
8043 fprintf (file
, "\t.handler __gcc_shell_handler\n");
8044 fprintf (file
, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET
);
8047 #ifdef TARGET_VMS_CRASH_DEBUG
8048 /* Support of minimal traceback info. */
8049 switch_to_section (readonly_data_section
);
8050 fprintf (file
, "\t.align 3\n");
8051 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
8052 fputs ("\t.ascii \"", file
);
8053 assemble_name (file
, fnname
);
8054 fputs ("\\0\"\n", file
);
8055 switch_to_section (text_section
);
8057 #endif /* TARGET_ABI_OPEN_VMS */
8060 /* Emit the .prologue note at the scheduled end of the prologue. */
8063 alpha_output_function_end_prologue (FILE *file
)
8065 if (TARGET_ABI_OPEN_VMS
)
8066 fputs ("\t.prologue\n", file
);
8067 else if (!flag_inhibit_size_directive
)
8068 fprintf (file
, "\t.prologue %d\n",
8069 alpha_function_needs_gp
|| cfun
->is_thunk
);
8072 /* Write function epilogue. */
8075 alpha_expand_epilogue (void)
8077 /* Registers to save. */
8078 unsigned long imask
= 0;
8079 unsigned long fmask
= 0;
8080 /* Stack space needed for pushing registers clobbered by us. */
8081 HOST_WIDE_INT sa_size
;
8082 /* Complete stack size needed. */
8083 HOST_WIDE_INT frame_size
;
8084 /* Offset from base reg to register save area. */
8085 HOST_WIDE_INT reg_offset
;
8086 int fp_is_frame_pointer
, fp_offset
;
8087 rtx sa_reg
, sa_reg_exp
= NULL
;
8088 rtx sp_adj1
, sp_adj2
, mem
, reg
, insn
;
8090 rtx cfa_restores
= NULL_RTX
;
8093 sa_size
= alpha_sa_size ();
8094 frame_size
= compute_frame_size (get_frame_size (), sa_size
);
8096 if (TARGET_ABI_OPEN_VMS
)
8098 if (alpha_procedure_type
== PT_STACK
)
8099 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
8104 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
8106 alpha_sa_mask (&imask
, &fmask
);
8109 = (TARGET_ABI_OPEN_VMS
8110 ? alpha_procedure_type
== PT_STACK
8111 : frame_pointer_needed
);
8113 sa_reg
= stack_pointer_rtx
;
8115 if (crtl
->calls_eh_return
)
8116 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
8122 /* If we have a frame pointer, restore SP from it. */
8123 if (TARGET_ABI_OPEN_VMS
8124 ? vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
8125 : frame_pointer_needed
)
8126 emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
8128 /* Cope with very large offsets to the register save area. */
8129 if (reg_offset
+ sa_size
> 0x8000)
8131 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
8134 if (low
+ sa_size
<= 0x8000)
8135 bias
= reg_offset
- low
, reg_offset
= low
;
8137 bias
= reg_offset
, reg_offset
= 0;
8139 sa_reg
= gen_rtx_REG (DImode
, 22);
8140 sa_reg_exp
= plus_constant (Pmode
, stack_pointer_rtx
, bias
);
8142 emit_move_insn (sa_reg
, sa_reg_exp
);
8145 /* Restore registers in order, excepting a true frame pointer. */
8147 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
, sa_reg
, reg_offset
));
8148 reg
= gen_rtx_REG (DImode
, REG_RA
);
8149 emit_move_insn (reg
, mem
);
8150 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
8153 imask
&= ~(1UL << REG_RA
);
8155 for (i
= 0; i
< 31; ++i
)
8156 if (imask
& (1UL << i
))
8158 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
8159 fp_offset
= reg_offset
;
8162 mem
= gen_frame_mem (DImode
,
8163 plus_constant (Pmode
, sa_reg
,
8165 reg
= gen_rtx_REG (DImode
, i
);
8166 emit_move_insn (reg
, mem
);
8167 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
8173 for (i
= 0; i
< 31; ++i
)
8174 if (fmask
& (1UL << i
))
8176 mem
= gen_frame_mem (DFmode
, plus_constant (Pmode
, sa_reg
,
8178 reg
= gen_rtx_REG (DFmode
, i
+32);
8179 emit_move_insn (reg
, mem
);
8180 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
8185 if (frame_size
|| eh_ofs
)
8187 sp_adj1
= stack_pointer_rtx
;
8191 sp_adj1
= gen_rtx_REG (DImode
, 23);
8192 emit_move_insn (sp_adj1
,
8193 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
8196 /* If the stack size is large, begin computation into a temporary
8197 register so as not to interfere with a potential fp restore,
8198 which must be consecutive with an SP restore. */
8199 if (frame_size
< 32768 && !cfun
->calls_alloca
)
8200 sp_adj2
= GEN_INT (frame_size
);
8201 else if (frame_size
< 0x40007fffL
)
8203 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
8205 sp_adj2
= plus_constant (Pmode
, sp_adj1
, frame_size
- low
);
8206 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
8210 sp_adj1
= gen_rtx_REG (DImode
, 23);
8211 emit_move_insn (sp_adj1
, sp_adj2
);
8213 sp_adj2
= GEN_INT (low
);
8217 rtx tmp
= gen_rtx_REG (DImode
, 23);
8218 sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3, false);
8221 /* We can't drop new things to memory this late, afaik,
8222 so build it up by pieces. */
8223 sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
8225 gcc_assert (sp_adj2
);
8229 /* From now on, things must be in order. So emit blockages. */
8231 /* Restore the frame pointer. */
8232 if (fp_is_frame_pointer
)
8234 emit_insn (gen_blockage ());
8235 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
, sa_reg
,
8237 emit_move_insn (hard_frame_pointer_rtx
, mem
);
8238 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
8239 hard_frame_pointer_rtx
, cfa_restores
);
8241 else if (TARGET_ABI_OPEN_VMS
)
8243 emit_insn (gen_blockage ());
8244 emit_move_insn (hard_frame_pointer_rtx
,
8245 gen_rtx_REG (DImode
, vms_save_fp_regno
));
8246 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
8247 hard_frame_pointer_rtx
, cfa_restores
);
8250 /* Restore the stack pointer. */
8251 emit_insn (gen_blockage ());
8252 if (sp_adj2
== const0_rtx
)
8253 insn
= emit_move_insn (stack_pointer_rtx
, sp_adj1
);
8255 insn
= emit_move_insn (stack_pointer_rtx
,
8256 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
));
8257 REG_NOTES (insn
) = cfa_restores
;
8258 add_reg_note (insn
, REG_CFA_DEF_CFA
, stack_pointer_rtx
);
8259 RTX_FRAME_RELATED_P (insn
) = 1;
8263 gcc_assert (cfa_restores
== NULL
);
8265 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8267 emit_insn (gen_blockage ());
8268 insn
= emit_move_insn (hard_frame_pointer_rtx
,
8269 gen_rtx_REG (DImode
, vms_save_fp_regno
));
8270 add_reg_note (insn
, REG_CFA_RESTORE
, hard_frame_pointer_rtx
);
8271 RTX_FRAME_RELATED_P (insn
) = 1;
8276 /* Output the rest of the textual info surrounding the epilogue. */
8279 alpha_end_function (FILE *file
, const char *fnname
, tree decl ATTRIBUTE_UNUSED
)
8283 /* We output a nop after noreturn calls at the very end of the function to
8284 ensure that the return address always remains in the caller's code range,
8285 as not doing so might confuse unwinding engines. */
8286 insn
= get_last_insn ();
8288 insn
= prev_active_insn (insn
);
8289 if (insn
&& CALL_P (insn
))
8290 output_asm_insn (get_insn_template (CODE_FOR_nop
, NULL
), NULL
);
8292 #if TARGET_ABI_OPEN_VMS
8293 /* Write the linkage entries. */
8294 alpha_write_linkage (file
, fnname
);
8297 /* End the function. */
8298 if (TARGET_ABI_OPEN_VMS
8299 || !flag_inhibit_size_directive
)
8301 fputs ("\t.end ", file
);
8302 assemble_name (file
, fnname
);
8305 inside_function
= FALSE
;
8309 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8311 In order to avoid the hordes of differences between generated code
8312 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8313 lots of code loading up large constants, generate rtl and emit it
8314 instead of going straight to text.
8316 Not sure why this idea hasn't been explored before... */
8319 alpha_output_mi_thunk_osf (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8320 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8323 HOST_WIDE_INT hi
, lo
;
8324 rtx this_rtx
, insn
, funexp
;
8326 /* We always require a valid GP. */
8327 emit_insn (gen_prologue_ldgp ());
8328 emit_note (NOTE_INSN_PROLOGUE_END
);
8330 /* Find the "this" pointer. If the function returns a structure,
8331 the structure return pointer is in $16. */
8332 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8333 this_rtx
= gen_rtx_REG (Pmode
, 17);
8335 this_rtx
= gen_rtx_REG (Pmode
, 16);
8337 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8338 entire constant for the add. */
8339 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8340 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8341 if (hi
+ lo
== delta
)
8344 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (hi
)));
8346 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (lo
)));
8350 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0),
8351 delta
, -(delta
< 0));
8352 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8355 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8360 tmp
= gen_rtx_REG (Pmode
, 0);
8361 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
8363 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8364 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8365 if (hi
+ lo
== vcall_offset
)
8368 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8372 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8373 vcall_offset
, -(vcall_offset
< 0));
8374 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8378 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8381 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8383 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8386 /* Generate a tail call to the target function. */
8387 if (! TREE_USED (function
))
8389 assemble_external (function
);
8390 TREE_USED (function
) = 1;
8392 funexp
= XEXP (DECL_RTL (function
), 0);
8393 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8394 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8395 SIBLING_CALL_P (insn
) = 1;
8397 /* Run just enough of rest_of_compilation to get the insns emitted.
8398 There's not really enough bulk here to make other passes such as
8399 instruction scheduling worth while. Note that use_thunk calls
8400 assemble_start_function and assemble_end_function. */
8401 insn
= get_insns ();
8402 shorten_branches (insn
);
8403 final_start_function (insn
, file
, 1);
8404 final (insn
, file
, 1);
8405 final_end_function ();
8407 #endif /* TARGET_ABI_OSF */
8409 /* Debugging support. */
8413 /* Name of the file containing the current function. */
8415 static const char *current_function_file
= "";
8417 /* Offsets to alpha virtual arg/local debugging pointers. */
8419 long alpha_arg_offset
;
8420 long alpha_auto_offset
;
8422 /* Emit a new filename to a stream. */
8425 alpha_output_filename (FILE *stream
, const char *name
)
8427 static int first_time
= TRUE
;
8432 ++num_source_filenames
;
8433 current_function_file
= name
;
8434 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8435 output_quoted_string (stream
, name
);
8436 fprintf (stream
, "\n");
8439 else if (name
!= current_function_file
8440 && strcmp (name
, current_function_file
) != 0)
8442 ++num_source_filenames
;
8443 current_function_file
= name
;
8444 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8446 output_quoted_string (stream
, name
);
8447 fprintf (stream
, "\n");
8451 /* Structure to show the current status of registers and memory. */
8453 struct shadow_summary
8456 unsigned int i
: 31; /* Mask of int regs */
8457 unsigned int fp
: 31; /* Mask of fp regs */
8458 unsigned int mem
: 1; /* mem == imem | fpmem */
8462 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8463 to the summary structure. SET is nonzero if the insn is setting the
8464 object, otherwise zero. */
8467 summarize_insn (rtx x
, struct shadow_summary
*sum
, int set
)
8469 const char *format_ptr
;
8475 switch (GET_CODE (x
))
8477 /* ??? Note that this case would be incorrect if the Alpha had a
8478 ZERO_EXTRACT in SET_DEST. */
8480 summarize_insn (SET_SRC (x
), sum
, 0);
8481 summarize_insn (SET_DEST (x
), sum
, 1);
8485 summarize_insn (XEXP (x
, 0), sum
, 1);
8489 summarize_insn (XEXP (x
, 0), sum
, 0);
8493 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8494 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8498 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8499 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8503 summarize_insn (SUBREG_REG (x
), sum
, 0);
8508 int regno
= REGNO (x
);
8509 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8511 if (regno
== 31 || regno
== 63)
8517 sum
->defd
.i
|= mask
;
8519 sum
->defd
.fp
|= mask
;
8524 sum
->used
.i
|= mask
;
8526 sum
->used
.fp
|= mask
;
8537 /* Find the regs used in memory address computation: */
8538 summarize_insn (XEXP (x
, 0), sum
, 0);
8541 case CONST_INT
: case CONST_DOUBLE
:
8542 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8543 case SCRATCH
: case ASM_INPUT
:
8546 /* Handle common unary and binary ops for efficiency. */
8547 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8548 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8549 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8550 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8551 case NE
: case EQ
: case GE
: case GT
: case LE
:
8552 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8553 summarize_insn (XEXP (x
, 0), sum
, 0);
8554 summarize_insn (XEXP (x
, 1), sum
, 0);
8557 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8558 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8559 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8560 case SQRT
: case FFS
:
8561 summarize_insn (XEXP (x
, 0), sum
, 0);
8565 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8566 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8567 switch (format_ptr
[i
])
8570 summarize_insn (XEXP (x
, i
), sum
, 0);
8574 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8575 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8587 /* Ensure a sufficient number of `trapb' insns are in the code when
8588 the user requests code with a trap precision of functions or
8591 In naive mode, when the user requests a trap-precision of
8592 "instruction", a trapb is needed after every instruction that may
8593 generate a trap. This ensures that the code is resumption safe but
8596 When optimizations are turned on, we delay issuing a trapb as long
8597 as possible. In this context, a trap shadow is the sequence of
8598 instructions that starts with a (potentially) trap generating
8599 instruction and extends to the next trapb or call_pal instruction
8600 (but GCC never generates call_pal by itself). We can delay (and
8601 therefore sometimes omit) a trapb subject to the following
8604 (a) On entry to the trap shadow, if any Alpha register or memory
8605 location contains a value that is used as an operand value by some
8606 instruction in the trap shadow (live on entry), then no instruction
8607 in the trap shadow may modify the register or memory location.
8609 (b) Within the trap shadow, the computation of the base register
8610 for a memory load or store instruction may not involve using the
8611 result of an instruction that might generate an UNPREDICTABLE
8614 (c) Within the trap shadow, no register may be used more than once
8615 as a destination register. (This is to make life easier for the
8618 (d) The trap shadow may not include any branch instructions. */
8621 alpha_handle_trap_shadows (void)
8623 struct shadow_summary shadow
;
8624 int trap_pending
, exception_nesting
;
8628 exception_nesting
= 0;
8631 shadow
.used
.mem
= 0;
8632 shadow
.defd
= shadow
.used
;
8634 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8638 switch (NOTE_KIND (i
))
8640 case NOTE_INSN_EH_REGION_BEG
:
8641 exception_nesting
++;
8646 case NOTE_INSN_EH_REGION_END
:
8647 exception_nesting
--;
8652 case NOTE_INSN_EPILOGUE_BEG
:
8653 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8658 else if (trap_pending
)
8660 if (alpha_tp
== ALPHA_TP_FUNC
)
8663 && GET_CODE (PATTERN (i
)) == RETURN
)
8666 else if (alpha_tp
== ALPHA_TP_INSN
)
8670 struct shadow_summary sum
;
8675 sum
.defd
= sum
.used
;
8677 switch (GET_CODE (i
))
8680 /* Annoyingly, get_attr_trap will die on these. */
8681 if (GET_CODE (PATTERN (i
)) == USE
8682 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8685 summarize_insn (PATTERN (i
), &sum
, 0);
8687 if ((sum
.defd
.i
& shadow
.defd
.i
)
8688 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8690 /* (c) would be violated */
8694 /* Combine shadow with summary of current insn: */
8695 shadow
.used
.i
|= sum
.used
.i
;
8696 shadow
.used
.fp
|= sum
.used
.fp
;
8697 shadow
.used
.mem
|= sum
.used
.mem
;
8698 shadow
.defd
.i
|= sum
.defd
.i
;
8699 shadow
.defd
.fp
|= sum
.defd
.fp
;
8700 shadow
.defd
.mem
|= sum
.defd
.mem
;
8702 if ((sum
.defd
.i
& shadow
.used
.i
)
8703 || (sum
.defd
.fp
& shadow
.used
.fp
)
8704 || (sum
.defd
.mem
& shadow
.used
.mem
))
8706 /* (a) would be violated (also takes care of (b)) */
8707 gcc_assert (get_attr_trap (i
) != TRAP_YES
8708 || (!(sum
.defd
.i
& sum
.used
.i
)
8709 && !(sum
.defd
.fp
& sum
.used
.fp
)));
8716 /* __builtin_unreachable can expand to no code at all,
8717 leaving (barrier) RTXes in the instruction stream. */
8718 goto close_shadow_notrapb
;
8732 n
= emit_insn_before (gen_trapb (), i
);
8733 PUT_MODE (n
, TImode
);
8734 PUT_MODE (i
, TImode
);
8735 close_shadow_notrapb
:
8739 shadow
.used
.mem
= 0;
8740 shadow
.defd
= shadow
.used
;
8745 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8746 && NONJUMP_INSN_P (i
)
8747 && GET_CODE (PATTERN (i
)) != USE
8748 && GET_CODE (PATTERN (i
)) != CLOBBER
8749 && get_attr_trap (i
) == TRAP_YES
)
8751 if (optimize
&& !trap_pending
)
8752 summarize_insn (PATTERN (i
), &shadow
, 0);
8758 /* Alpha can only issue instruction groups simultaneously if they are
8759 suitably aligned. This is very processor-specific. */
8760 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8761 that are marked "fake". These instructions do not exist on that target,
8762 but it is possible to see these insns with deranged combinations of
8763 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8764 choose a result at random. */
8766 enum alphaev4_pipe
{
8773 enum alphaev5_pipe
{
8784 static enum alphaev4_pipe
8785 alphaev4_insn_pipe (rtx insn
)
8787 if (recog_memoized (insn
) < 0)
8789 if (get_attr_length (insn
) != 4)
8792 switch (get_attr_type (insn
))
8808 case TYPE_MVI
: /* fake */
8823 case TYPE_FSQRT
: /* fake */
8824 case TYPE_FTOI
: /* fake */
8825 case TYPE_ITOF
: /* fake */
8833 static enum alphaev5_pipe
8834 alphaev5_insn_pipe (rtx insn
)
8836 if (recog_memoized (insn
) < 0)
8838 if (get_attr_length (insn
) != 4)
8841 switch (get_attr_type (insn
))
8861 case TYPE_FTOI
: /* fake */
8862 case TYPE_ITOF
: /* fake */
8877 case TYPE_FSQRT
: /* fake */
8888 /* IN_USE is a mask of the slots currently filled within the insn group.
8889 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8890 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8892 LEN is, of course, the length of the group in bytes. */
8895 alphaev4_next_group (rtx insn
, int *pin_use
, int *plen
)
8902 || GET_CODE (PATTERN (insn
)) == CLOBBER
8903 || GET_CODE (PATTERN (insn
)) == USE
)
8908 enum alphaev4_pipe pipe
;
8910 pipe
= alphaev4_insn_pipe (insn
);
8914 /* Force complex instructions to start new groups. */
8918 /* If this is a completely unrecognized insn, it's an asm.
8919 We don't know how long it is, so record length as -1 to
8920 signal a needed realignment. */
8921 if (recog_memoized (insn
) < 0)
8924 len
= get_attr_length (insn
);
8928 if (in_use
& EV4_IB0
)
8930 if (in_use
& EV4_IB1
)
8935 in_use
|= EV4_IB0
| EV4_IBX
;
8939 if (in_use
& EV4_IB0
)
8941 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8949 if (in_use
& EV4_IB1
)
8959 /* Haifa doesn't do well scheduling branches. */
8964 insn
= next_nonnote_insn (insn
);
8966 if (!insn
|| ! INSN_P (insn
))
8969 /* Let Haifa tell us where it thinks insn group boundaries are. */
8970 if (GET_MODE (insn
) == TImode
)
8973 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8978 insn
= next_nonnote_insn (insn
);
8986 /* IN_USE is a mask of the slots currently filled within the insn group.
8987 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8988 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8990 LEN is, of course, the length of the group in bytes. */
8993 alphaev5_next_group (rtx insn
, int *pin_use
, int *plen
)
9000 || GET_CODE (PATTERN (insn
)) == CLOBBER
9001 || GET_CODE (PATTERN (insn
)) == USE
)
9006 enum alphaev5_pipe pipe
;
9008 pipe
= alphaev5_insn_pipe (insn
);
9012 /* Force complex instructions to start new groups. */
9016 /* If this is a completely unrecognized insn, it's an asm.
9017 We don't know how long it is, so record length as -1 to
9018 signal a needed realignment. */
9019 if (recog_memoized (insn
) < 0)
9022 len
= get_attr_length (insn
);
9025 /* ??? Most of the places below, we would like to assert never
9026 happen, as it would indicate an error either in Haifa, or
9027 in the scheduling description. Unfortunately, Haifa never
9028 schedules the last instruction of the BB, so we don't have
9029 an accurate TI bit to go off. */
9031 if (in_use
& EV5_E0
)
9033 if (in_use
& EV5_E1
)
9038 in_use
|= EV5_E0
| EV5_E01
;
9042 if (in_use
& EV5_E0
)
9044 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
9052 if (in_use
& EV5_E1
)
9058 if (in_use
& EV5_FA
)
9060 if (in_use
& EV5_FM
)
9065 in_use
|= EV5_FA
| EV5_FAM
;
9069 if (in_use
& EV5_FA
)
9075 if (in_use
& EV5_FM
)
9088 /* Haifa doesn't do well scheduling branches. */
9089 /* ??? If this is predicted not-taken, slotting continues, except
9090 that no more IBR, FBR, or JSR insns may be slotted. */
9095 insn
= next_nonnote_insn (insn
);
9097 if (!insn
|| ! INSN_P (insn
))
9100 /* Let Haifa tell us where it thinks insn group boundaries are. */
9101 if (GET_MODE (insn
) == TImode
)
9104 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9109 insn
= next_nonnote_insn (insn
);
9118 alphaev4_next_nop (int *pin_use
)
9120 int in_use
= *pin_use
;
9123 if (!(in_use
& EV4_IB0
))
9128 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
9133 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
9146 alphaev5_next_nop (int *pin_use
)
9148 int in_use
= *pin_use
;
9151 if (!(in_use
& EV5_E1
))
9156 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
9161 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
9173 /* The instruction group alignment main loop. */
9176 alpha_align_insns (unsigned int max_align
,
9177 rtx (*next_group
) (rtx
, int *, int *),
9178 rtx (*next_nop
) (int *))
9180 /* ALIGN is the known alignment for the insn group. */
9182 /* OFS is the offset of the current insn in the insn group. */
9184 int prev_in_use
, in_use
, len
, ldgp
;
9187 /* Let shorten branches care for assigning alignments to code labels. */
9188 shorten_branches (get_insns ());
9190 if (align_functions
< 4)
9192 else if ((unsigned int) align_functions
< max_align
)
9193 align
= align_functions
;
9197 ofs
= prev_in_use
= 0;
9200 i
= next_nonnote_insn (i
);
9202 ldgp
= alpha_function_needs_gp
? 8 : 0;
9206 next
= (*next_group
) (i
, &in_use
, &len
);
9208 /* When we see a label, resync alignment etc. */
9211 unsigned int new_align
= 1 << label_to_alignment (i
);
9213 if (new_align
>= align
)
9215 align
= new_align
< max_align
? new_align
: max_align
;
9219 else if (ofs
& (new_align
-1))
9220 ofs
= (ofs
| (new_align
-1)) + 1;
9224 /* Handle complex instructions special. */
9225 else if (in_use
== 0)
9227 /* Asms will have length < 0. This is a signal that we have
9228 lost alignment knowledge. Assume, however, that the asm
9229 will not mis-align instructions. */
9238 /* If the known alignment is smaller than the recognized insn group,
9239 realign the output. */
9240 else if ((int) align
< len
)
9242 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9245 where
= prev
= prev_nonnote_insn (i
);
9246 if (!where
|| !LABEL_P (where
))
9249 /* Can't realign between a call and its gp reload. */
9250 if (! (TARGET_EXPLICIT_RELOCS
9251 && prev
&& CALL_P (prev
)))
9253 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9254 align
= 1 << new_log_align
;
9259 /* We may not insert padding inside the initial ldgp sequence. */
9263 /* If the group won't fit in the same INT16 as the previous,
9264 we need to add padding to keep the group together. Rather
9265 than simply leaving the insn filling to the assembler, we
9266 can make use of the knowledge of what sorts of instructions
9267 were issued in the previous group to make sure that all of
9268 the added nops are really free. */
9269 else if (ofs
+ len
> (int) align
)
9271 int nop_count
= (align
- ofs
) / 4;
9274 /* Insert nops before labels, branches, and calls to truly merge
9275 the execution of the nops with the previous instruction group. */
9276 where
= prev_nonnote_insn (i
);
9279 if (LABEL_P (where
))
9281 rtx where2
= prev_nonnote_insn (where
);
9282 if (where2
&& JUMP_P (where2
))
9285 else if (NONJUMP_INSN_P (where
))
9292 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9293 while (--nop_count
);
9297 ofs
= (ofs
+ len
) & (align
- 1);
9298 prev_in_use
= in_use
;
9303 /* Insert an unop between sibcall or noreturn function call and GP load. */
9306 alpha_pad_function_end (void)
9310 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9313 || !(SIBLING_CALL_P (insn
)
9314 || find_reg_note (insn
, REG_NORETURN
, NULL_RTX
)))
9317 /* Make sure we do not split a call and its corresponding
9318 CALL_ARG_LOCATION note. */
9319 next
= NEXT_INSN (insn
);
9322 if (BARRIER_P (next
))
9324 next
= NEXT_INSN (next
);
9328 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CALL_ARG_LOCATION
)
9331 next
= next_active_insn (insn
);
9334 rtx pat
= PATTERN (next
);
9336 if (GET_CODE (pat
) == SET
9337 && GET_CODE (SET_SRC (pat
)) == UNSPEC_VOLATILE
9338 && XINT (SET_SRC (pat
), 1) == UNSPECV_LDGP1
)
9339 emit_insn_after (gen_unop (), insn
);
9344 /* Machine dependent reorg pass. */
9349 /* Workaround for a linker error that triggers when an exception
9350 handler immediatelly follows a sibcall or a noreturn function.
9352 In the sibcall case:
9354 The instruction stream from an object file:
9356 1d8: 00 00 fb 6b jmp (t12)
9357 1dc: 00 00 ba 27 ldah gp,0(ra)
9358 1e0: 00 00 bd 23 lda gp,0(gp)
9359 1e4: 00 00 7d a7 ldq t12,0(gp)
9360 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9362 was converted in the final link pass to:
9364 12003aa88: 67 fa ff c3 br 120039428 <...>
9365 12003aa8c: 00 00 fe 2f unop
9366 12003aa90: 00 00 fe 2f unop
9367 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9368 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9370 And in the noreturn case:
9372 The instruction stream from an object file:
9374 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9375 58: 00 00 ba 27 ldah gp,0(ra)
9376 5c: 00 00 bd 23 lda gp,0(gp)
9377 60: 00 00 7d a7 ldq t12,0(gp)
9378 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9380 was converted in the final link pass to:
9382 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9383 fdb28: 00 00 fe 2f unop
9384 fdb2c: 00 00 fe 2f unop
9385 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9386 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9388 GP load instructions were wrongly cleared by the linker relaxation
9389 pass. This workaround prevents removal of GP loads by inserting
9390 an unop instruction between a sibcall or noreturn function call and
9391 exception handler prologue. */
9393 if (current_function_has_exception_handlers ())
9394 alpha_pad_function_end ();
9396 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
9397 alpha_handle_trap_shadows ();
9399 /* Due to the number of extra trapb insns, don't bother fixing up
9400 alignment when trap precision is instruction. Moreover, we can
9401 only do our job when sched2 is run. */
9402 if (optimize
&& !optimize_size
9403 && alpha_tp
!= ALPHA_TP_INSN
9404 && flag_schedule_insns_after_reload
)
9406 if (alpha_tune
== PROCESSOR_EV4
)
9407 alpha_align_insns (8, alphaev4_next_group
, alphaev4_next_nop
);
9408 else if (alpha_tune
== PROCESSOR_EV5
)
9409 alpha_align_insns (16, alphaev5_next_group
, alphaev5_next_nop
);
9414 alpha_file_start (void)
9416 default_file_start ();
9418 fputs ("\t.set noreorder\n", asm_out_file
);
9419 fputs ("\t.set volatile\n", asm_out_file
);
9421 fputs ("\t.set noat\n", asm_out_file
);
9422 if (TARGET_EXPLICIT_RELOCS
)
9423 fputs ("\t.set nomacro\n", asm_out_file
);
9424 if (TARGET_SUPPORT_ARCH
| TARGET_BWX
| TARGET_MAX
| TARGET_FIX
| TARGET_CIX
)
9428 if (alpha_cpu
== PROCESSOR_EV6
|| TARGET_FIX
|| TARGET_CIX
)
9430 else if (TARGET_MAX
)
9432 else if (TARGET_BWX
)
9434 else if (alpha_cpu
== PROCESSOR_EV5
)
9439 fprintf (asm_out_file
, "\t.arch %s\n", arch
);
9443 /* Since we don't have a .dynbss section, we should not allow global
9444 relocations in the .rodata section. */
9447 alpha_elf_reloc_rw_mask (void)
9449 return flag_pic
? 3 : 2;
9452 /* Return a section for X. The only special thing we do here is to
9453 honor small data. */
9456 alpha_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
9457 unsigned HOST_WIDE_INT align
)
9459 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9460 /* ??? Consider using mergeable sdata sections. */
9461 return sdata_section
;
9463 return default_elf_select_rtx_section (mode
, x
, align
);
9467 alpha_elf_section_type_flags (tree decl
, const char *name
, int reloc
)
9469 unsigned int flags
= 0;
9471 if (strcmp (name
, ".sdata") == 0
9472 || strncmp (name
, ".sdata.", 7) == 0
9473 || strncmp (name
, ".gnu.linkonce.s.", 16) == 0
9474 || strcmp (name
, ".sbss") == 0
9475 || strncmp (name
, ".sbss.", 6) == 0
9476 || strncmp (name
, ".gnu.linkonce.sb.", 17) == 0)
9477 flags
= SECTION_SMALL
;
9479 flags
|= default_section_type_flags (decl
, name
, reloc
);
9483 /* Structure to collect function names for final output in link section. */
9484 /* Note that items marked with GTY can't be ifdef'ed out. */
9492 struct GTY(()) alpha_links
9496 enum reloc_kind rkind
;
9499 #if TARGET_ABI_OPEN_VMS
9501 /* Return the VMS argument type corresponding to MODE. */
9504 alpha_arg_type (enum machine_mode mode
)
9509 return TARGET_FLOAT_VAX
? FF
: FS
;
9511 return TARGET_FLOAT_VAX
? FD
: FT
;
9517 /* Return an rtx for an integer representing the VMS Argument Information
9521 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum
)
9523 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9526 for (i
= 0; i
< 6; i
++)
9527 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9529 return GEN_INT (regval
);
9533 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9534 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9535 this is the reference to the linkage pointer value, 0 if this is the
9536 reference to the function entry value. RFLAG is 1 if this a reduced
9537 reference (code address only), 0 if this is a full reference. */
9540 alpha_use_linkage (rtx func
, bool lflag
, bool rflag
)
9542 struct alpha_links
*al
= NULL
;
9543 const char *name
= XSTR (func
, 0);
9545 if (cfun
->machine
->links
)
9547 splay_tree_node lnode
;
9549 /* Is this name already defined? */
9550 lnode
= splay_tree_lookup (cfun
->machine
->links
, (splay_tree_key
) name
);
9552 al
= (struct alpha_links
*) lnode
->value
;
9555 cfun
->machine
->links
= splay_tree_new_ggc
9556 ((splay_tree_compare_fn
) strcmp
,
9557 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s
,
9558 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s
);
9569 /* Follow transparent alias, as this is used for CRTL translations. */
9570 id
= maybe_get_identifier (name
);
9573 while (IDENTIFIER_TRANSPARENT_ALIAS (id
))
9574 id
= TREE_CHAIN (id
);
9575 name
= IDENTIFIER_POINTER (id
);
9578 buf_len
= strlen (name
) + 8 + 9;
9579 linksym
= (char *) alloca (buf_len
);
9580 snprintf (linksym
, buf_len
, "$%d..%s..lk", cfun
->funcdef_no
, name
);
9582 al
= ggc_alloc_alpha_links ();
9584 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (linksym
));
9586 splay_tree_insert (cfun
->machine
->links
,
9587 (splay_tree_key
) ggc_strdup (name
),
9588 (splay_tree_value
) al
);
9591 al
->rkind
= rflag
? KIND_CODEADDR
: KIND_LINKAGE
;
9594 return gen_rtx_MEM (Pmode
, plus_constant (Pmode
, al
->linkage
, 8));
9600 alpha_write_one_linkage (splay_tree_node node
, void *data
)
9602 const char *const name
= (const char *) node
->key
;
9603 struct alpha_links
*link
= (struct alpha_links
*) node
->value
;
9604 FILE *stream
= (FILE *) data
;
9606 ASM_OUTPUT_INTERNAL_LABEL (stream
, XSTR (link
->linkage
, 0));
9607 if (link
->rkind
== KIND_CODEADDR
)
9609 /* External and used, request code address. */
9610 fprintf (stream
, "\t.code_address ");
9614 if (!SYMBOL_REF_EXTERNAL_P (link
->func
)
9615 && SYMBOL_REF_LOCAL_P (link
->func
))
9617 /* Locally defined, build linkage pair. */
9618 fprintf (stream
, "\t.quad %s..en\n", name
);
9619 fprintf (stream
, "\t.quad ");
9623 /* External, request linkage pair. */
9624 fprintf (stream
, "\t.linkage ");
9627 assemble_name (stream
, name
);
9628 fputs ("\n", stream
);
9634 alpha_write_linkage (FILE *stream
, const char *funname
)
9636 fprintf (stream
, "\t.link\n");
9637 fprintf (stream
, "\t.align 3\n");
9640 #ifdef TARGET_VMS_CRASH_DEBUG
9641 fputs ("\t.name ", stream
);
9642 assemble_name (stream
, funname
);
9643 fputs ("..na\n", stream
);
9646 ASM_OUTPUT_LABEL (stream
, funname
);
9647 fprintf (stream
, "\t.pdesc ");
9648 assemble_name (stream
, funname
);
9649 fprintf (stream
, "..en,%s\n",
9650 alpha_procedure_type
== PT_STACK
? "stack"
9651 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9653 if (cfun
->machine
->links
)
9655 splay_tree_foreach (cfun
->machine
->links
, alpha_write_one_linkage
, stream
);
9656 /* splay_tree_delete (func->links); */
9660 /* Switch to an arbitrary section NAME with attributes as specified
9661 by FLAGS. ALIGN specifies any known alignment requirements for
9662 the section; 0 if the default should be used. */
9665 vms_asm_named_section (const char *name
, unsigned int flags
,
9666 tree decl ATTRIBUTE_UNUSED
)
9668 fputc ('\n', asm_out_file
);
9669 fprintf (asm_out_file
, ".section\t%s", name
);
9671 if (flags
& SECTION_DEBUG
)
9672 fprintf (asm_out_file
, ",NOWRT");
9674 fputc ('\n', asm_out_file
);
9677 /* Record an element in the table of global constructors. SYMBOL is
9678 a SYMBOL_REF of the function to be called; PRIORITY is a number
9679 between 0 and MAX_INIT_PRIORITY.
9681 Differs from default_ctors_section_asm_out_constructor in that the
9682 width of the .ctors entry is always 64 bits, rather than the 32 bits
9683 used by a normal pointer. */
9686 vms_asm_out_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9688 switch_to_section (ctors_section
);
9689 assemble_align (BITS_PER_WORD
);
9690 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9694 vms_asm_out_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9696 switch_to_section (dtors_section
);
9697 assemble_align (BITS_PER_WORD
);
9698 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9702 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED
,
9703 bool lflag ATTRIBUTE_UNUSED
,
9704 bool rflag ATTRIBUTE_UNUSED
)
9709 #endif /* TARGET_ABI_OPEN_VMS */
9712 alpha_init_libfuncs (void)
9714 if (TARGET_ABI_OPEN_VMS
)
9716 /* Use the VMS runtime library functions for division and
9718 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
9719 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
9720 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
9721 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
9722 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
9723 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
9724 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
9725 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
9726 abort_libfunc
= init_one_libfunc ("decc$abort");
9727 memcmp_libfunc
= init_one_libfunc ("decc$memcmp");
9728 #ifdef MEM_LIBFUNCS_INIT
9734 /* On the Alpha, we use this to disable the floating-point registers
9735 when they don't exist. */
9738 alpha_conditional_register_usage (void)
9741 if (! TARGET_FPREGS
)
9742 for (i
= 32; i
< 63; i
++)
9743 fixed_regs
[i
] = call_used_regs
[i
] = 1;
9746 /* Canonicalize a comparison from one we don't have to one we do have. */
9749 alpha_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
9750 bool op0_preserve_value
)
9752 if (!op0_preserve_value
9753 && (*code
== GE
|| *code
== GT
|| *code
== GEU
|| *code
== GTU
)
9754 && (REG_P (*op1
) || *op1
== const0_rtx
))
9759 *code
= (int)swap_condition ((enum rtx_code
)*code
);
9762 if ((*code
== LT
|| *code
== LTU
)
9763 && CONST_INT_P (*op1
) && INTVAL (*op1
) == 256)
9765 *code
= *code
== LT
? LE
: LEU
;
9766 *op1
= GEN_INT (255);
9770 /* Initialize the GCC target structure. */
9771 #if TARGET_ABI_OPEN_VMS
9772 # undef TARGET_ATTRIBUTE_TABLE
9773 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9774 # undef TARGET_CAN_ELIMINATE
9775 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9778 #undef TARGET_IN_SMALL_DATA_P
9779 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9781 #undef TARGET_ASM_ALIGNED_HI_OP
9782 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9783 #undef TARGET_ASM_ALIGNED_DI_OP
9784 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9786 /* Default unaligned ops are provided for ELF systems. To get unaligned
9787 data for non-ELF systems, we have to turn off auto alignment. */
9788 #if TARGET_ABI_OPEN_VMS
9789 #undef TARGET_ASM_UNALIGNED_HI_OP
9790 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9791 #undef TARGET_ASM_UNALIGNED_SI_OP
9792 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9793 #undef TARGET_ASM_UNALIGNED_DI_OP
9794 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9797 #undef TARGET_ASM_RELOC_RW_MASK
9798 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9799 #undef TARGET_ASM_SELECT_RTX_SECTION
9800 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9801 #undef TARGET_SECTION_TYPE_FLAGS
9802 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9804 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9805 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9807 #undef TARGET_INIT_LIBFUNCS
9808 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9810 #undef TARGET_LEGITIMIZE_ADDRESS
9811 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9812 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9813 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9815 #undef TARGET_ASM_FILE_START
9816 #define TARGET_ASM_FILE_START alpha_file_start
9818 #undef TARGET_SCHED_ADJUST_COST
9819 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9820 #undef TARGET_SCHED_ISSUE_RATE
9821 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9822 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9823 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9824 alpha_multipass_dfa_lookahead
9826 #undef TARGET_HAVE_TLS
9827 #define TARGET_HAVE_TLS HAVE_AS_TLS
9829 #undef TARGET_BUILTIN_DECL
9830 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9831 #undef TARGET_INIT_BUILTINS
9832 #define TARGET_INIT_BUILTINS alpha_init_builtins
9833 #undef TARGET_EXPAND_BUILTIN
9834 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9835 #undef TARGET_FOLD_BUILTIN
9836 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9837 #undef TARGET_GIMPLE_FOLD_BUILTIN
9838 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
9840 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9841 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9842 #undef TARGET_CANNOT_COPY_INSN_P
9843 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9844 #undef TARGET_LEGITIMATE_CONSTANT_P
9845 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9846 #undef TARGET_CANNOT_FORCE_CONST_MEM
9847 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9850 #undef TARGET_ASM_OUTPUT_MI_THUNK
9851 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9852 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9853 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9854 #undef TARGET_STDARG_OPTIMIZE_HOOK
9855 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9858 /* Use 16-bits anchor. */
9859 #undef TARGET_MIN_ANCHOR_OFFSET
9860 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9861 #undef TARGET_MAX_ANCHOR_OFFSET
9862 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9863 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9864 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9866 #undef TARGET_RTX_COSTS
9867 #define TARGET_RTX_COSTS alpha_rtx_costs
9868 #undef TARGET_ADDRESS_COST
9869 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
9871 #undef TARGET_MACHINE_DEPENDENT_REORG
9872 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9874 #undef TARGET_PROMOTE_FUNCTION_MODE
9875 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9876 #undef TARGET_PROMOTE_PROTOTYPES
9877 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9878 #undef TARGET_RETURN_IN_MEMORY
9879 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9880 #undef TARGET_PASS_BY_REFERENCE
9881 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9882 #undef TARGET_SETUP_INCOMING_VARARGS
9883 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9884 #undef TARGET_STRICT_ARGUMENT_NAMING
9885 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9886 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9887 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9888 #undef TARGET_SPLIT_COMPLEX_ARG
9889 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9890 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9891 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9892 #undef TARGET_ARG_PARTIAL_BYTES
9893 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9894 #undef TARGET_FUNCTION_ARG
9895 #define TARGET_FUNCTION_ARG alpha_function_arg
9896 #undef TARGET_FUNCTION_ARG_ADVANCE
9897 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9898 #undef TARGET_TRAMPOLINE_INIT
9899 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9901 #undef TARGET_INSTANTIATE_DECLS
9902 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9904 #undef TARGET_SECONDARY_RELOAD
9905 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9907 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9908 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9909 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9910 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9912 #undef TARGET_BUILD_BUILTIN_VA_LIST
9913 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9915 #undef TARGET_EXPAND_BUILTIN_VA_START
9916 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9918 /* The Alpha architecture does not require sequential consistency. See
9919 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9920 for an example of how it can be violated in practice. */
9921 #undef TARGET_RELAXED_ORDERING
9922 #define TARGET_RELAXED_ORDERING true
9924 #undef TARGET_OPTION_OVERRIDE
9925 #define TARGET_OPTION_OVERRIDE alpha_option_override
9927 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9928 #undef TARGET_MANGLE_TYPE
9929 #define TARGET_MANGLE_TYPE alpha_mangle_type
9932 #undef TARGET_LEGITIMATE_ADDRESS_P
9933 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9935 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9936 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9938 #undef TARGET_CANONICALIZE_COMPARISON
9939 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
9941 struct gcc_target targetm
= TARGET_INITIALIZER
;
9944 #include "gt-alpha.h"