1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "stringpool.h"
42 #include "diagnostic-core.h"
44 #include "fold-const.h"
45 #include "stor-layout.h"
49 #include "insn-attr.h"
54 #include "common/common-target.h"
56 #include "langhooks.h"
58 #include "tree-pass.h"
60 #include "gimple-iterator.h"
62 #include "tree-stdarg.h"
63 #include "tm-constrs.h"
69 /* This file should be included last. */
70 #include "target-def.h"
72 /* Specify which cpu to schedule for. */
73 enum processor_type alpha_tune
;
75 /* Which cpu we're generating code for. */
76 enum processor_type alpha_cpu
;
78 static const char * const alpha_cpu_name
[] =
83 /* Specify how accurate floating-point traps need to be. */
85 enum alpha_trap_precision alpha_tp
;
87 /* Specify the floating-point rounding mode. */
89 enum alpha_fp_rounding_mode alpha_fprm
;
91 /* Specify which things cause traps. */
93 enum alpha_fp_trap_mode alpha_fptm
;
95 /* Nonzero if inside of a function, because the Alpha asm can't
96 handle .files inside of functions. */
98 static int inside_function
= FALSE
;
100 /* The number of cycles of latency we should assume on memory reads. */
102 static int alpha_memory_latency
= 3;
104 /* Whether the function needs the GP. */
106 static int alpha_function_needs_gp
;
108 /* The assembler name of the current function. */
110 static const char *alpha_fnname
;
112 /* The next explicit relocation sequence number. */
113 extern GTY(()) int alpha_next_sequence_number
;
114 int alpha_next_sequence_number
= 1;
116 /* The literal and gpdisp sequence numbers for this insn, as printed
117 by %# and %* respectively. */
118 extern GTY(()) int alpha_this_literal_sequence_number
;
119 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
120 int alpha_this_literal_sequence_number
;
121 int alpha_this_gpdisp_sequence_number
;
123 /* Costs of various operations on the different architectures. */
125 struct alpha_rtx_cost_data
127 unsigned char fp_add
;
128 unsigned char fp_mult
;
129 unsigned char fp_div_sf
;
130 unsigned char fp_div_df
;
131 unsigned char int_mult_si
;
132 unsigned char int_mult_di
;
133 unsigned char int_shift
;
134 unsigned char int_cmov
;
135 unsigned short int_div
;
138 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
141 COSTS_N_INSNS (6), /* fp_add */
142 COSTS_N_INSNS (6), /* fp_mult */
143 COSTS_N_INSNS (34), /* fp_div_sf */
144 COSTS_N_INSNS (63), /* fp_div_df */
145 COSTS_N_INSNS (23), /* int_mult_si */
146 COSTS_N_INSNS (23), /* int_mult_di */
147 COSTS_N_INSNS (2), /* int_shift */
148 COSTS_N_INSNS (2), /* int_cmov */
149 COSTS_N_INSNS (97), /* int_div */
152 COSTS_N_INSNS (4), /* fp_add */
153 COSTS_N_INSNS (4), /* fp_mult */
154 COSTS_N_INSNS (15), /* fp_div_sf */
155 COSTS_N_INSNS (22), /* fp_div_df */
156 COSTS_N_INSNS (8), /* int_mult_si */
157 COSTS_N_INSNS (12), /* int_mult_di */
158 COSTS_N_INSNS (1) + 1, /* int_shift */
159 COSTS_N_INSNS (1), /* int_cmov */
160 COSTS_N_INSNS (83), /* int_div */
163 COSTS_N_INSNS (4), /* fp_add */
164 COSTS_N_INSNS (4), /* fp_mult */
165 COSTS_N_INSNS (12), /* fp_div_sf */
166 COSTS_N_INSNS (15), /* fp_div_df */
167 COSTS_N_INSNS (7), /* int_mult_si */
168 COSTS_N_INSNS (7), /* int_mult_di */
169 COSTS_N_INSNS (1), /* int_shift */
170 COSTS_N_INSNS (2), /* int_cmov */
171 COSTS_N_INSNS (86), /* int_div */
175 /* Similar but tuned for code size instead of execution latency. The
176 extra +N is fractional cost tuning based on latency. It's used to
177 encourage use of cheaper insns like shift, but only if there's just
180 static struct alpha_rtx_cost_data
const alpha_rtx_cost_size
=
182 COSTS_N_INSNS (1), /* fp_add */
183 COSTS_N_INSNS (1), /* fp_mult */
184 COSTS_N_INSNS (1), /* fp_div_sf */
185 COSTS_N_INSNS (1) + 1, /* fp_div_df */
186 COSTS_N_INSNS (1) + 1, /* int_mult_si */
187 COSTS_N_INSNS (1) + 2, /* int_mult_di */
188 COSTS_N_INSNS (1), /* int_shift */
189 COSTS_N_INSNS (1), /* int_cmov */
190 COSTS_N_INSNS (6), /* int_div */
193 /* Get the number of args of a function in one of two ways. */
194 #if TARGET_ABI_OPEN_VMS
195 #define NUM_ARGS crtl->args.info.num_args
197 #define NUM_ARGS crtl->args.info
203 /* Declarations of static functions. */
204 static struct machine_function
*alpha_init_machine_status (void);
205 static rtx
alpha_emit_xfloating_compare (enum rtx_code
*, rtx
, rtx
);
206 static void alpha_handle_trap_shadows (void);
207 static void alpha_align_insns (void);
208 static void alpha_override_options_after_change (void);
210 #if TARGET_ABI_OPEN_VMS
211 static void alpha_write_linkage (FILE *, const char *);
212 static bool vms_valid_pointer_mode (scalar_int_mode
);
214 #define vms_patch_builtins() gcc_unreachable()
218 rest_of_handle_trap_shadows (void)
220 alpha_handle_trap_shadows ();
226 const pass_data pass_data_handle_trap_shadows
=
229 "trap_shadows", /* name */
230 OPTGROUP_NONE
, /* optinfo_flags */
232 0, /* properties_required */
233 0, /* properties_provided */
234 0, /* properties_destroyed */
235 0, /* todo_flags_start */
236 TODO_df_finish
, /* todo_flags_finish */
239 class pass_handle_trap_shadows
: public rtl_opt_pass
242 pass_handle_trap_shadows(gcc::context
*ctxt
)
243 : rtl_opt_pass(pass_data_handle_trap_shadows
, ctxt
)
246 /* opt_pass methods: */
247 virtual bool gate (function
*)
249 return alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
;
252 virtual unsigned int execute (function
*)
254 return rest_of_handle_trap_shadows ();
257 }; // class pass_handle_trap_shadows
262 make_pass_handle_trap_shadows (gcc::context
*ctxt
)
264 return new pass_handle_trap_shadows (ctxt
);
268 rest_of_align_insns (void)
270 alpha_align_insns ();
276 const pass_data pass_data_align_insns
=
279 "align_insns", /* name */
280 OPTGROUP_NONE
, /* optinfo_flags */
282 0, /* properties_required */
283 0, /* properties_provided */
284 0, /* properties_destroyed */
285 0, /* todo_flags_start */
286 TODO_df_finish
, /* todo_flags_finish */
289 class pass_align_insns
: public rtl_opt_pass
292 pass_align_insns(gcc::context
*ctxt
)
293 : rtl_opt_pass(pass_data_align_insns
, ctxt
)
296 /* opt_pass methods: */
297 virtual bool gate (function
*)
299 /* Due to the number of extra trapb insns, don't bother fixing up
300 alignment when trap precision is instruction. Moreover, we can
301 only do our job when sched2 is run. */
302 return ((alpha_tune
== PROCESSOR_EV4
303 || alpha_tune
== PROCESSOR_EV5
)
304 && optimize
&& !optimize_size
305 && alpha_tp
!= ALPHA_TP_INSN
306 && flag_schedule_insns_after_reload
);
309 virtual unsigned int execute (function
*)
311 return rest_of_align_insns ();
314 }; // class pass_align_insns
319 make_pass_align_insns (gcc::context
*ctxt
)
321 return new pass_align_insns (ctxt
);
324 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
325 /* Implement TARGET_MANGLE_TYPE. */
328 alpha_mangle_type (const_tree type
)
330 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
331 && TARGET_LONG_DOUBLE_128
)
334 /* For all other types, use normal C++ mangling. */
339 /* Parse target option strings. */
342 alpha_option_override (void)
344 static const struct cpu_table
{
345 const char *const name
;
346 const enum processor_type processor
;
348 const unsigned short line_size
; /* in bytes */
349 const unsigned short l1_size
; /* in kb. */
350 const unsigned short l2_size
; /* in kb. */
352 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
353 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
354 had 64k to 8M 8-byte direct Bcache. */
355 { "ev4", PROCESSOR_EV4
, 0, 32, 8, 8*1024 },
356 { "21064", PROCESSOR_EV4
, 0, 32, 8, 8*1024 },
357 { "ev45", PROCESSOR_EV4
, 0, 32, 16, 16*1024 },
359 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
360 and 1M to 16M 64 byte L3 (not modeled).
361 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
362 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
363 { "ev5", PROCESSOR_EV5
, 0, 32, 8, 96 },
364 { "21164", PROCESSOR_EV5
, 0, 32, 8, 96 },
365 { "ev56", PROCESSOR_EV5
, MASK_BWX
, 32, 8, 96 },
366 { "21164a", PROCESSOR_EV5
, MASK_BWX
, 32, 8, 96 },
367 { "pca56", PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
368 { "21164PC",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
369 { "21164pc",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
, 64, 16, 4*1024 },
371 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
372 { "ev6", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
, 64, 64, 16*1024 },
373 { "21264", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
, 64, 64, 16*1024 },
374 { "ev67", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
,
376 { "21264a", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
,
380 int const ct_size
= ARRAY_SIZE (cpu_table
);
381 int line_size
= 0, l1_size
= 0, l2_size
= 0;
384 #ifdef SUBTARGET_OVERRIDE_OPTIONS
385 SUBTARGET_OVERRIDE_OPTIONS
;
388 /* Default to full IEEE compliance mode for Go language. */
389 if (strcmp (lang_hooks
.name
, "GNU Go") == 0
390 && !(target_flags_explicit
& MASK_IEEE
))
391 target_flags
|= MASK_IEEE
;
393 alpha_fprm
= ALPHA_FPRM_NORM
;
394 alpha_tp
= ALPHA_TP_PROG
;
395 alpha_fptm
= ALPHA_FPTM_N
;
399 alpha_tp
= ALPHA_TP_INSN
;
400 alpha_fptm
= ALPHA_FPTM_SU
;
402 if (TARGET_IEEE_WITH_INEXACT
)
404 alpha_tp
= ALPHA_TP_INSN
;
405 alpha_fptm
= ALPHA_FPTM_SUI
;
410 if (! strcmp (alpha_tp_string
, "p"))
411 alpha_tp
= ALPHA_TP_PROG
;
412 else if (! strcmp (alpha_tp_string
, "f"))
413 alpha_tp
= ALPHA_TP_FUNC
;
414 else if (! strcmp (alpha_tp_string
, "i"))
415 alpha_tp
= ALPHA_TP_INSN
;
417 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string
);
420 if (alpha_fprm_string
)
422 if (! strcmp (alpha_fprm_string
, "n"))
423 alpha_fprm
= ALPHA_FPRM_NORM
;
424 else if (! strcmp (alpha_fprm_string
, "m"))
425 alpha_fprm
= ALPHA_FPRM_MINF
;
426 else if (! strcmp (alpha_fprm_string
, "c"))
427 alpha_fprm
= ALPHA_FPRM_CHOP
;
428 else if (! strcmp (alpha_fprm_string
,"d"))
429 alpha_fprm
= ALPHA_FPRM_DYN
;
431 error ("bad value %qs for -mfp-rounding-mode switch",
435 if (alpha_fptm_string
)
437 if (strcmp (alpha_fptm_string
, "n") == 0)
438 alpha_fptm
= ALPHA_FPTM_N
;
439 else if (strcmp (alpha_fptm_string
, "u") == 0)
440 alpha_fptm
= ALPHA_FPTM_U
;
441 else if (strcmp (alpha_fptm_string
, "su") == 0)
442 alpha_fptm
= ALPHA_FPTM_SU
;
443 else if (strcmp (alpha_fptm_string
, "sui") == 0)
444 alpha_fptm
= ALPHA_FPTM_SUI
;
446 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string
);
449 if (alpha_cpu_string
)
451 for (i
= 0; i
< ct_size
; i
++)
452 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
454 alpha_tune
= alpha_cpu
= cpu_table
[i
].processor
;
455 line_size
= cpu_table
[i
].line_size
;
456 l1_size
= cpu_table
[i
].l1_size
;
457 l2_size
= cpu_table
[i
].l2_size
;
458 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
);
459 target_flags
|= cpu_table
[i
].flags
;
463 error ("bad value %qs for -mcpu switch", alpha_cpu_string
);
466 if (alpha_tune_string
)
468 for (i
= 0; i
< ct_size
; i
++)
469 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
471 alpha_tune
= cpu_table
[i
].processor
;
472 line_size
= cpu_table
[i
].line_size
;
473 l1_size
= cpu_table
[i
].l1_size
;
474 l2_size
= cpu_table
[i
].l2_size
;
478 error ("bad value %qs for -mtune switch", alpha_tune_string
);
482 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
, line_size
,
483 global_options
.x_param_values
,
484 global_options_set
.x_param_values
);
486 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, l1_size
,
487 global_options
.x_param_values
,
488 global_options_set
.x_param_values
);
490 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, l2_size
,
491 global_options
.x_param_values
,
492 global_options_set
.x_param_values
);
494 /* Do some sanity checks on the above options. */
496 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
497 && alpha_tp
!= ALPHA_TP_INSN
&& alpha_cpu
!= PROCESSOR_EV6
)
499 warning (0, "fp software completion requires -mtrap-precision=i");
500 alpha_tp
= ALPHA_TP_INSN
;
503 if (alpha_cpu
== PROCESSOR_EV6
)
505 /* Except for EV6 pass 1 (not released), we always have precise
506 arithmetic traps. Which means we can do software completion
507 without minding trap shadows. */
508 alpha_tp
= ALPHA_TP_PROG
;
511 if (TARGET_FLOAT_VAX
)
513 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
515 warning (0, "rounding mode not supported for VAX floats");
516 alpha_fprm
= ALPHA_FPRM_NORM
;
518 if (alpha_fptm
== ALPHA_FPTM_SUI
)
520 warning (0, "trap mode not supported for VAX floats");
521 alpha_fptm
= ALPHA_FPTM_SU
;
523 if (target_flags_explicit
& MASK_LONG_DOUBLE_128
)
524 warning (0, "128-bit long double not supported for VAX floats");
525 target_flags
&= ~MASK_LONG_DOUBLE_128
;
532 if (!alpha_mlat_string
)
533 alpha_mlat_string
= "L1";
535 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
536 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
538 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
539 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
540 && alpha_mlat_string
[2] == '\0')
542 static int const cache_latency
[][4] =
544 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
545 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
546 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
549 lat
= alpha_mlat_string
[1] - '0';
550 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_tune
][lat
-1] == -1)
552 warning (0, "L%d cache latency unknown for %s",
553 lat
, alpha_cpu_name
[alpha_tune
]);
557 lat
= cache_latency
[alpha_tune
][lat
-1];
559 else if (! strcmp (alpha_mlat_string
, "main"))
561 /* Most current memories have about 370ns latency. This is
562 a reasonable guess for a fast cpu. */
567 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string
);
571 alpha_memory_latency
= lat
;
574 /* Default the definition of "small data" to 8 bytes. */
575 if (!global_options_set
.x_g_switch_value
)
578 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
580 target_flags
|= MASK_SMALL_DATA
;
581 else if (flag_pic
== 2)
582 target_flags
&= ~MASK_SMALL_DATA
;
584 alpha_override_options_after_change ();
586 /* Register variables and functions with the garbage collector. */
588 /* Set up function hooks. */
589 init_machine_status
= alpha_init_machine_status
;
591 /* Tell the compiler when we're using VAX floating point. */
592 if (TARGET_FLOAT_VAX
)
594 REAL_MODE_FORMAT (SFmode
) = &vax_f_format
;
595 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
596 REAL_MODE_FORMAT (TFmode
) = NULL
;
599 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
600 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
601 target_flags
|= MASK_LONG_DOUBLE_128
;
606 /* Implement targetm.override_options_after_change. */
609 alpha_override_options_after_change (void)
611 /* Align labels and loops for optimal branching. */
612 /* ??? Kludge these by not doing anything if we don't optimize. */
615 if (align_loops
<= 0)
617 if (align_jumps
<= 0)
620 if (align_functions
<= 0)
621 align_functions
= 16;
624 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
627 zap_mask (HOST_WIDE_INT value
)
631 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
633 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
639 /* Return true if OP is valid for a particular TLS relocation.
640 We are already guaranteed that OP is a CONST. */
643 tls_symbolic_operand_1 (rtx op
, int size
, int unspec
)
647 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
649 op
= XVECEXP (op
, 0, 0);
651 if (GET_CODE (op
) != SYMBOL_REF
)
654 switch (SYMBOL_REF_TLS_MODEL (op
))
656 case TLS_MODEL_LOCAL_DYNAMIC
:
657 return unspec
== UNSPEC_DTPREL
&& size
== alpha_tls_size
;
658 case TLS_MODEL_INITIAL_EXEC
:
659 return unspec
== UNSPEC_TPREL
&& size
== 64;
660 case TLS_MODEL_LOCAL_EXEC
:
661 return unspec
== UNSPEC_TPREL
&& size
== alpha_tls_size
;
667 /* Used by aligned_memory_operand and unaligned_memory_operand to
668 resolve what reload is going to do with OP if it's a register. */
671 resolve_reload_operand (rtx op
)
673 if (reload_in_progress
)
677 tmp
= SUBREG_REG (tmp
);
679 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
681 op
= reg_equiv_memory_loc (REGNO (tmp
));
689 /* The scalar modes supported differs from the default check-what-c-supports
690 version in that sometimes TFmode is available even when long double
691 indicates only DFmode. */
694 alpha_scalar_mode_supported_p (scalar_mode mode
)
702 case E_TImode
: /* via optabs.c */
710 return TARGET_HAS_XFLOATING_LIBS
;
717 /* Alpha implements a couple of integer vector mode operations when
718 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
719 which allows the vectorizer to operate on e.g. move instructions,
720 or when expand_vector_operations can do something useful. */
723 alpha_vector_mode_supported_p (machine_mode mode
)
725 return mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
;
728 /* Return 1 if this function can directly return via $26. */
733 return (TARGET_ABI_OSF
735 && alpha_sa_size () == 0
736 && get_frame_size () == 0
737 && crtl
->outgoing_args_size
== 0
738 && crtl
->args
.pretend_args_size
== 0);
741 /* Return the TLS model to use for SYMBOL. */
743 static enum tls_model
744 tls_symbolic_operand_type (rtx symbol
)
746 enum tls_model model
;
748 if (GET_CODE (symbol
) != SYMBOL_REF
)
749 return TLS_MODEL_NONE
;
750 model
= SYMBOL_REF_TLS_MODEL (symbol
);
752 /* Local-exec with a 64-bit size is the same code as initial-exec. */
753 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
754 model
= TLS_MODEL_INITIAL_EXEC
;
759 /* Return true if the function DECL will share the same GP as any
760 function in the current unit of translation. */
763 decl_has_samegp (const_tree decl
)
765 /* Functions that are not local can be overridden, and thus may
766 not share the same gp. */
767 if (!(*targetm
.binds_local_p
) (decl
))
770 /* If -msmall-data is in effect, assume that there is only one GP
771 for the module, and so any local symbol has this property. We
772 need explicit relocations to be able to enforce this for symbols
773 not defined in this unit of translation, however. */
774 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
777 /* Functions that are not external are defined in this UoT. */
778 /* ??? Irritatingly, static functions not yet emitted are still
779 marked "external". Apply this to non-static functions only. */
780 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
783 /* Return true if EXP should be placed in the small data section. */
786 alpha_in_small_data_p (const_tree exp
)
788 /* We want to merge strings, so we never consider them small data. */
789 if (TREE_CODE (exp
) == STRING_CST
)
792 /* Functions are never in the small data area. Duh. */
793 if (TREE_CODE (exp
) == FUNCTION_DECL
)
796 /* COMMON symbols are never small data. */
797 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_COMMON (exp
))
800 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
802 const char *section
= DECL_SECTION_NAME (exp
);
803 if (strcmp (section
, ".sdata") == 0
804 || strcmp (section
, ".sbss") == 0)
809 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
811 /* If this is an incomplete type with size 0, then we can't put it
812 in sdata because it might be too big when completed. */
813 if (size
> 0 && size
<= g_switch_value
)
820 #if TARGET_ABI_OPEN_VMS
822 vms_valid_pointer_mode (scalar_int_mode mode
)
824 return (mode
== SImode
|| mode
== DImode
);
828 alpha_linkage_symbol_p (const char *symname
)
830 int symlen
= strlen (symname
);
833 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
838 #define LINKAGE_SYMBOL_REF_P(X) \
839 ((GET_CODE (X) == SYMBOL_REF \
840 && alpha_linkage_symbol_p (XSTR (X, 0))) \
841 || (GET_CODE (X) == CONST \
842 && GET_CODE (XEXP (X, 0)) == PLUS \
843 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
844 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
847 /* legitimate_address_p recognizes an RTL expression that is a valid
848 memory address for an instruction. The MODE argument is the
849 machine mode for the MEM expression that wants to use this address.
851 For Alpha, we have either a constant address or the sum of a
852 register and a constant address, or just a register. For DImode,
853 any of those forms can be surrounded with an AND that clear the
854 low-order three bits; this is an "unaligned" access. */
857 alpha_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
859 /* If this is an ldq_u type address, discard the outer AND. */
861 && GET_CODE (x
) == AND
862 && CONST_INT_P (XEXP (x
, 1))
863 && INTVAL (XEXP (x
, 1)) == -8)
866 /* Discard non-paradoxical subregs. */
868 && (GET_MODE_SIZE (GET_MODE (x
))
869 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
872 /* Unadorned general registers are valid. */
875 ? STRICT_REG_OK_FOR_BASE_P (x
)
876 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
879 /* Constant addresses (i.e. +/- 32k) are valid. */
880 if (CONSTANT_ADDRESS_P (x
))
883 #if TARGET_ABI_OPEN_VMS
884 if (LINKAGE_SYMBOL_REF_P (x
))
888 /* Register plus a small constant offset is valid. */
889 if (GET_CODE (x
) == PLUS
)
891 rtx ofs
= XEXP (x
, 1);
894 /* Discard non-paradoxical subregs. */
896 && (GET_MODE_SIZE (GET_MODE (x
))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
903 && NONSTRICT_REG_OK_FP_BASE_P (x
)
904 && CONST_INT_P (ofs
))
907 ? STRICT_REG_OK_FOR_BASE_P (x
)
908 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
909 && CONSTANT_ADDRESS_P (ofs
))
914 /* If we're managing explicit relocations, LO_SUM is valid, as are small
915 data symbols. Avoid explicit relocations of modes larger than word
916 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
917 else if (TARGET_EXPLICIT_RELOCS
918 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
920 if (small_symbolic_operand (x
, Pmode
))
923 if (GET_CODE (x
) == LO_SUM
)
925 rtx ofs
= XEXP (x
, 1);
928 /* Discard non-paradoxical subregs. */
930 && (GET_MODE_SIZE (GET_MODE (x
))
931 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
934 /* Must have a valid base register. */
937 ? STRICT_REG_OK_FOR_BASE_P (x
)
938 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
941 /* The symbol must be local. */
942 if (local_symbolic_operand (ofs
, Pmode
)
943 || dtp32_symbolic_operand (ofs
, Pmode
)
944 || tp32_symbolic_operand (ofs
, Pmode
))
952 /* Build the SYMBOL_REF for __tls_get_addr. */
954 static GTY(()) rtx tls_get_addr_libfunc
;
957 get_tls_get_addr (void)
959 if (!tls_get_addr_libfunc
)
960 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
961 return tls_get_addr_libfunc
;
964 /* Try machine-dependent ways of modifying an illegitimate address
965 to be legitimate. If we find one, return the new, valid address. */
968 alpha_legitimize_address_1 (rtx x
, rtx scratch
, machine_mode mode
)
970 HOST_WIDE_INT addend
;
972 /* If the address is (plus reg const_int) and the CONST_INT is not a
973 valid offset, compute the high part of the constant and add it to
974 the register. Then our address is (plus temp low-part-const). */
975 if (GET_CODE (x
) == PLUS
976 && REG_P (XEXP (x
, 0))
977 && CONST_INT_P (XEXP (x
, 1))
978 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
980 addend
= INTVAL (XEXP (x
, 1));
985 /* If the address is (const (plus FOO const_int)), find the low-order
986 part of the CONST_INT. Then load FOO plus any high-order part of the
987 CONST_INT into a register. Our address is (plus reg low-part-const).
988 This is done to reduce the number of GOT entries. */
989 if (can_create_pseudo_p ()
990 && GET_CODE (x
) == CONST
991 && GET_CODE (XEXP (x
, 0)) == PLUS
992 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
994 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
995 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
999 /* If we have a (plus reg const), emit the load as in (2), then add
1000 the two registers, and finally generate (plus reg low-part-const) as
1002 if (can_create_pseudo_p ()
1003 && GET_CODE (x
) == PLUS
1004 && REG_P (XEXP (x
, 0))
1005 && GET_CODE (XEXP (x
, 1)) == CONST
1006 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
1007 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 1), 0), 1)))
1009 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
1010 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
1011 XEXP (XEXP (XEXP (x
, 1), 0), 0),
1012 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1016 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1017 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1018 around +/- 32k offset. */
1019 if (TARGET_EXPLICIT_RELOCS
1020 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
1021 && symbolic_operand (x
, Pmode
))
1023 rtx r0
, r16
, eqv
, tga
, tp
, dest
, seq
;
1026 switch (tls_symbolic_operand_type (x
))
1028 case TLS_MODEL_NONE
:
1031 case TLS_MODEL_GLOBAL_DYNAMIC
:
1035 r0
= gen_rtx_REG (Pmode
, 0);
1036 r16
= gen_rtx_REG (Pmode
, 16);
1037 tga
= get_tls_get_addr ();
1038 dest
= gen_reg_rtx (Pmode
);
1039 seq
= GEN_INT (alpha_next_sequence_number
++);
1041 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
1042 rtx val
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
1043 insn
= emit_call_insn (val
);
1044 RTL_CONST_CALL_P (insn
) = 1;
1045 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1047 insn
= get_insns ();
1050 emit_libcall_block (insn
, dest
, r0
, x
);
1054 case TLS_MODEL_LOCAL_DYNAMIC
:
1058 r0
= gen_rtx_REG (Pmode
, 0);
1059 r16
= gen_rtx_REG (Pmode
, 16);
1060 tga
= get_tls_get_addr ();
1061 scratch
= gen_reg_rtx (Pmode
);
1062 seq
= GEN_INT (alpha_next_sequence_number
++);
1064 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
1065 rtx val
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
1066 insn
= emit_call_insn (val
);
1067 RTL_CONST_CALL_P (insn
) = 1;
1068 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1070 insn
= get_insns ();
1073 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
1074 UNSPEC_TLSLDM_CALL
);
1075 emit_libcall_block (insn
, scratch
, r0
, eqv
);
1077 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
1078 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1080 if (alpha_tls_size
== 64)
1082 dest
= gen_reg_rtx (Pmode
);
1083 emit_insn (gen_rtx_SET (dest
, eqv
));
1084 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
1087 if (alpha_tls_size
== 32)
1089 rtx temp
= gen_rtx_HIGH (Pmode
, eqv
);
1090 temp
= gen_rtx_PLUS (Pmode
, scratch
, temp
);
1091 scratch
= gen_reg_rtx (Pmode
);
1092 emit_insn (gen_rtx_SET (scratch
, temp
));
1094 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
1097 case TLS_MODEL_INITIAL_EXEC
:
1098 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1099 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1100 tp
= gen_reg_rtx (Pmode
);
1101 scratch
= gen_reg_rtx (Pmode
);
1102 dest
= gen_reg_rtx (Pmode
);
1104 emit_insn (gen_get_thread_pointerdi (tp
));
1105 emit_insn (gen_rtx_SET (scratch
, eqv
));
1106 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
1109 case TLS_MODEL_LOCAL_EXEC
:
1110 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1111 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1112 tp
= gen_reg_rtx (Pmode
);
1114 emit_insn (gen_get_thread_pointerdi (tp
));
1115 if (alpha_tls_size
== 32)
1117 rtx temp
= gen_rtx_HIGH (Pmode
, eqv
);
1118 temp
= gen_rtx_PLUS (Pmode
, tp
, temp
);
1119 tp
= gen_reg_rtx (Pmode
);
1120 emit_insn (gen_rtx_SET (tp
, temp
));
1122 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
1128 if (local_symbolic_operand (x
, Pmode
))
1130 if (small_symbolic_operand (x
, Pmode
))
1134 if (can_create_pseudo_p ())
1135 scratch
= gen_reg_rtx (Pmode
);
1136 emit_insn (gen_rtx_SET (scratch
, gen_rtx_HIGH (Pmode
, x
)));
1137 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1146 HOST_WIDE_INT low
, high
;
1148 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1150 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1154 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1155 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1156 1, OPTAB_LIB_WIDEN
);
1158 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1159 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1160 1, OPTAB_LIB_WIDEN
);
1162 return plus_constant (Pmode
, x
, low
);
1167 /* Try machine-dependent ways of modifying an illegitimate address
1168 to be legitimate. Return X or the new, valid address. */
1171 alpha_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1174 rtx new_x
= alpha_legitimize_address_1 (x
, NULL_RTX
, mode
);
1175 return new_x
? new_x
: x
;
1178 /* Return true if ADDR has an effect that depends on the machine mode it
1179 is used for. On the Alpha this is true only for the unaligned modes.
1180 We can simplify the test since we know that the address must be valid. */
1183 alpha_mode_dependent_address_p (const_rtx addr
,
1184 addr_space_t as ATTRIBUTE_UNUSED
)
1186 return GET_CODE (addr
) == AND
;
1189 /* Primarily this is required for TLS symbols, but given that our move
1190 patterns *ought* to be able to handle any symbol at any time, we
1191 should never be spilling symbolic operands to the constant pool, ever. */
1194 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1196 enum rtx_code code
= GET_CODE (x
);
1197 return code
== SYMBOL_REF
|| code
== LABEL_REF
|| code
== CONST
;
1200 /* We do not allow indirect calls to be optimized into sibling calls, nor
1201 can we allow a call to a function with a different GP to be optimized
1205 alpha_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
1207 /* Can't do indirect tail calls, since we don't know if the target
1208 uses the same GP. */
1212 /* Otherwise, we can make a tail call if the target function shares
1214 return decl_has_samegp (decl
);
1218 some_small_symbolic_operand_int (rtx x
)
1220 subrtx_var_iterator::array_type array
;
1221 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, ALL
)
1224 /* Don't re-split. */
1225 if (GET_CODE (x
) == LO_SUM
)
1226 iter
.skip_subrtxes ();
1227 else if (small_symbolic_operand (x
, Pmode
))
1234 split_small_symbolic_operand (rtx x
)
1237 subrtx_ptr_iterator::array_type array
;
1238 FOR_EACH_SUBRTX_PTR (iter
, array
, &x
, ALL
)
1242 /* Don't re-split. */
1243 if (GET_CODE (x
) == LO_SUM
)
1244 iter
.skip_subrtxes ();
1245 else if (small_symbolic_operand (x
, Pmode
))
1247 *ptr
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
1248 iter
.skip_subrtxes ();
1254 /* Indicate that INSN cannot be duplicated. This is true for any insn
1255 that we've marked with gpdisp relocs, since those have to stay in
1256 1-1 correspondence with one another.
1258 Technically we could copy them if we could set up a mapping from one
1259 sequence number to another, across the set of insns to be duplicated.
1260 This seems overly complicated and error-prone since interblock motion
1261 from sched-ebb could move one of the pair of insns to a different block.
1263 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1264 then they'll be in a different block from their ldgp. Which could lead
1265 the bb reorder code to think that it would be ok to copy just the block
1266 containing the call and branch to the block containing the ldgp. */
1269 alpha_cannot_copy_insn_p (rtx_insn
*insn
)
1271 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
1273 if (recog_memoized (insn
) >= 0)
1274 return get_attr_cannot_copy (insn
);
1280 /* Try a machine-dependent way of reloading an illegitimate address
1281 operand. If we find one, push the reload and return the new rtx. */
1284 alpha_legitimize_reload_address (rtx x
,
1285 machine_mode mode ATTRIBUTE_UNUSED
,
1286 int opnum
, int type
,
1287 int ind_levels ATTRIBUTE_UNUSED
)
1289 /* We must recognize output that we have already generated ourselves. */
1290 if (GET_CODE (x
) == PLUS
1291 && GET_CODE (XEXP (x
, 0)) == PLUS
1292 && REG_P (XEXP (XEXP (x
, 0), 0))
1293 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
1294 && CONST_INT_P (XEXP (x
, 1)))
1296 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1297 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1298 opnum
, (enum reload_type
) type
);
1302 /* We wish to handle large displacements off a base register by
1303 splitting the addend across an ldah and the mem insn. This
1304 cuts number of extra insns needed from 3 to 1. */
1305 if (GET_CODE (x
) == PLUS
1306 && REG_P (XEXP (x
, 0))
1307 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1308 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1309 && CONST_INT_P (XEXP (x
, 1)))
1311 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1312 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1314 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1316 /* Check for 32-bit overflow. */
1317 if (high
+ low
!= val
)
1320 /* Reload the high part into a base reg; leave the low part
1321 in the mem directly. */
1322 x
= gen_rtx_PLUS (GET_MODE (x
),
1323 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1327 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1328 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1329 opnum
, (enum reload_type
) type
);
1336 /* Return the cost of moving between registers of various classes. Moving
1337 between FLOAT_REGS and anything else except float regs is expensive.
1338 In fact, we make it quite expensive because we really don't want to
1339 do these moves unless it is clearly worth it. Optimizations may
1340 reduce the impact of not being able to allocate a pseudo to a
1344 alpha_register_move_cost (machine_mode
/*mode*/,
1345 reg_class_t from
, reg_class_t to
)
1347 if ((from
== FLOAT_REGS
) == (to
== FLOAT_REGS
))
1351 return (from
== FLOAT_REGS
) ? 6 : 8;
1353 return 4 + 2 * alpha_memory_latency
;
1356 /* Return the cost of moving data of MODE from a register to
1357 or from memory. On the Alpha, bump this up a bit. */
1360 alpha_memory_move_cost (machine_mode
/*mode*/, reg_class_t
/*regclass*/,
1363 return 2 * alpha_memory_latency
;
1366 /* Compute a (partial) cost for rtx X. Return true if the complete
1367 cost has been computed, and false if subexpressions should be
1368 scanned. In either case, *TOTAL contains the cost result. */
1371 alpha_rtx_costs (rtx x
, machine_mode mode
, int outer_code
, int opno
, int *total
,
1374 int code
= GET_CODE (x
);
1375 bool float_mode_p
= FLOAT_MODE_P (mode
);
1376 const struct alpha_rtx_cost_data
*cost_data
;
1379 cost_data
= &alpha_rtx_cost_size
;
1381 cost_data
= &alpha_rtx_cost_data
[alpha_tune
];
1386 /* If this is an 8-bit constant, return zero since it can be used
1387 nearly anywhere with no cost. If it is a valid operand for an
1388 ADD or AND, likewise return 0 if we know it will be used in that
1389 context. Otherwise, return 2 since it might be used there later.
1390 All other constants take at least two insns. */
1391 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
1399 case CONST_WIDE_INT
:
1400 if (x
== CONST0_RTX (mode
))
1402 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
1403 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
1405 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
1408 *total
= COSTS_N_INSNS (2);
1414 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
1415 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
1416 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
1417 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
1418 else if (tls_symbolic_operand_type (x
))
1419 /* Estimate of cost for call_pal rduniq. */
1420 /* ??? How many insns do we emit here? More than one... */
1421 *total
= COSTS_N_INSNS (15);
1423 /* Otherwise we do a load from the GOT. */
1424 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1428 /* This is effectively an add_operand. */
1435 *total
= cost_data
->fp_add
;
1436 else if (GET_CODE (XEXP (x
, 0)) == MULT
1437 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
1439 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), mode
,
1440 (enum rtx_code
) outer_code
, opno
, speed
)
1441 + rtx_cost (XEXP (x
, 1), mode
,
1442 (enum rtx_code
) outer_code
, opno
, speed
)
1443 + COSTS_N_INSNS (1));
1450 *total
= cost_data
->fp_mult
;
1451 else if (mode
== DImode
)
1452 *total
= cost_data
->int_mult_di
;
1454 *total
= cost_data
->int_mult_si
;
1458 if (CONST_INT_P (XEXP (x
, 1))
1459 && INTVAL (XEXP (x
, 1)) <= 3)
1461 *total
= COSTS_N_INSNS (1);
1468 *total
= cost_data
->int_shift
;
1473 *total
= cost_data
->fp_add
;
1475 *total
= cost_data
->int_cmov
;
1483 *total
= cost_data
->int_div
;
1484 else if (mode
== SFmode
)
1485 *total
= cost_data
->fp_div_sf
;
1487 *total
= cost_data
->fp_div_df
;
1491 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1497 *total
= COSTS_N_INSNS (1);
1505 *total
= COSTS_N_INSNS (1) + cost_data
->int_cmov
;
1511 case UNSIGNED_FLOAT
:
1514 case FLOAT_TRUNCATE
:
1515 *total
= cost_data
->fp_add
;
1519 if (MEM_P (XEXP (x
, 0)))
1522 *total
= cost_data
->fp_add
;
1530 /* REF is an alignable memory location. Place an aligned SImode
1531 reference into *PALIGNED_MEM and the number of bits to shift into
1532 *PBITNUM. SCRATCH is a free register for use in reloading out
1533 of range stack slots. */
1536 get_aligned_mem (rtx ref
, rtx
*paligned_mem
, rtx
*pbitnum
)
1539 HOST_WIDE_INT disp
, offset
;
1541 gcc_assert (MEM_P (ref
));
1543 if (reload_in_progress
)
1545 base
= find_replacement (&XEXP (ref
, 0));
1546 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1549 base
= XEXP (ref
, 0);
1551 if (GET_CODE (base
) == PLUS
)
1552 disp
= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1556 /* Find the byte offset within an aligned word. If the memory itself is
1557 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1558 will have examined the base register and determined it is aligned, and
1559 thus displacements from it are naturally alignable. */
1560 if (MEM_ALIGN (ref
) >= 32)
1565 /* The location should not cross aligned word boundary. */
1566 gcc_assert (offset
+ GET_MODE_SIZE (GET_MODE (ref
))
1567 <= GET_MODE_SIZE (SImode
));
1569 /* Access the entire aligned word. */
1570 *paligned_mem
= widen_memory_access (ref
, SImode
, -offset
);
1572 /* Convert the byte offset within the word to a bit offset. */
1573 offset
*= BITS_PER_UNIT
;
1574 *pbitnum
= GEN_INT (offset
);
1577 /* Similar, but just get the address. Handle the two reload cases.
1578 Add EXTRA_OFFSET to the address we return. */
1581 get_unaligned_address (rtx ref
)
1584 HOST_WIDE_INT offset
= 0;
1586 gcc_assert (MEM_P (ref
));
1588 if (reload_in_progress
)
1590 base
= find_replacement (&XEXP (ref
, 0));
1591 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1594 base
= XEXP (ref
, 0);
1596 if (GET_CODE (base
) == PLUS
)
1597 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1599 return plus_constant (Pmode
, base
, offset
);
1602 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1603 X is always returned in a register. */
1606 get_unaligned_offset (rtx addr
, HOST_WIDE_INT ofs
)
1608 if (GET_CODE (addr
) == PLUS
)
1610 ofs
+= INTVAL (XEXP (addr
, 1));
1611 addr
= XEXP (addr
, 0);
1614 return expand_simple_binop (Pmode
, PLUS
, addr
, GEN_INT (ofs
& 7),
1615 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1618 /* On the Alpha, all (non-symbolic) constants except zero go into
1619 a floating-point register via memory. Note that we cannot
1620 return anything that is not a subset of RCLASS, and that some
1621 symbolic constants cannot be dropped to memory. */
1624 alpha_preferred_reload_class(rtx x
, enum reg_class rclass
)
1626 /* Zero is present in any register class. */
1627 if (x
== CONST0_RTX (GET_MODE (x
)))
1630 /* These sorts of constants we can easily drop to memory. */
1631 if (CONST_SCALAR_INT_P (x
)
1632 || CONST_DOUBLE_P (x
)
1633 || GET_CODE (x
) == CONST_VECTOR
)
1635 if (rclass
== FLOAT_REGS
)
1637 if (rclass
== ALL_REGS
)
1638 return GENERAL_REGS
;
1642 /* All other kinds of constants should not (and in the case of HIGH
1643 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1644 secondary reload. */
1646 return (rclass
== ALL_REGS
? GENERAL_REGS
: rclass
);
1651 /* Inform reload about cases where moving X with a mode MODE to a register in
1652 RCLASS requires an extra scratch or immediate register. Return the class
1653 needed for the immediate register. */
1656 alpha_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1657 machine_mode mode
, secondary_reload_info
*sri
)
1659 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1661 /* Loading and storing HImode or QImode values to and from memory
1662 usually requires a scratch register. */
1663 if (!TARGET_BWX
&& (mode
== QImode
|| mode
== HImode
|| mode
== CQImode
))
1665 if (any_memory_operand (x
, mode
))
1669 if (!aligned_memory_operand (x
, mode
))
1670 sri
->icode
= direct_optab_handler (reload_in_optab
, mode
);
1673 sri
->icode
= direct_optab_handler (reload_out_optab
, mode
);
1678 /* We also cannot do integral arithmetic into FP regs, as might result
1679 from register elimination into a DImode fp register. */
1680 if (rclass
== FLOAT_REGS
)
1682 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
1683 return GENERAL_REGS
;
1684 if (in_p
&& INTEGRAL_MODE_P (mode
)
1685 && !MEM_P (x
) && !REG_P (x
) && !CONST_INT_P (x
))
1686 return GENERAL_REGS
;
1692 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
1694 If we are copying between general and FP registers, we need a memory
1695 location unless the FIX extension is available. */
1698 alpha_secondary_memory_needed (machine_mode
, reg_class_t class1
,
1702 && ((class1
== FLOAT_REGS
&& class2
!= FLOAT_REGS
)
1703 || (class2
== FLOAT_REGS
&& class1
!= FLOAT_REGS
)));
1706 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE. If MODE is
1707 floating-point, use it. Otherwise, widen to a word like the default.
1708 This is needed because we always store integers in FP registers in
1709 quadword format. This whole area is very tricky! */
1712 alpha_secondary_memory_needed_mode (machine_mode mode
)
1714 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1716 if (GET_MODE_SIZE (mode
) >= 4)
1718 return mode_for_size (BITS_PER_WORD
, GET_MODE_CLASS (mode
), 0).require ();
1721 /* Given SEQ, which is an INSN list, look for any MEMs in either
1722 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1723 volatile flags from REF into each of the MEMs found. If REF is not
1724 a MEM, don't do anything. */
1727 alpha_set_memflags (rtx seq
, rtx ref
)
1734 /* This is only called from alpha.md, after having had something
1735 generated from one of the insn patterns. So if everything is
1736 zero, the pattern is already up-to-date. */
1737 if (!MEM_VOLATILE_P (ref
)
1738 && !MEM_NOTRAP_P (ref
)
1739 && !MEM_READONLY_P (ref
))
1742 subrtx_var_iterator::array_type array
;
1743 for (insn
= as_a
<rtx_insn
*> (seq
); insn
; insn
= NEXT_INSN (insn
))
1745 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1750 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (ref
);
1751 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (ref
);
1752 MEM_READONLY_P (x
) = MEM_READONLY_P (ref
);
1753 /* Sadly, we cannot use alias sets because the extra
1754 aliasing produced by the AND interferes. Given that
1755 two-byte quantities are the only thing we would be
1756 able to differentiate anyway, there does not seem to
1757 be any point in convoluting the early out of the
1759 iter
.skip_subrtxes ();
1766 static rtx
alpha_emit_set_const (rtx
, machine_mode
, HOST_WIDE_INT
,
1769 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1770 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1771 and return pc_rtx if successful. */
1774 alpha_emit_set_const_1 (rtx target
, machine_mode mode
,
1775 HOST_WIDE_INT c
, int n
, bool no_output
)
1777 HOST_WIDE_INT new_const
;
1779 /* Use a pseudo if highly optimizing and still generating RTL. */
1781 = (flag_expensive_optimizations
&& can_create_pseudo_p () ? 0 : target
);
1784 /* If this is a sign-extended 32-bit constant, we can do this in at most
1785 three insns, so do it if we have enough insns left. */
1787 if (c
>> 31 == -1 || c
>> 31 == 0)
1789 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1790 HOST_WIDE_INT tmp1
= c
- low
;
1791 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
1792 HOST_WIDE_INT extra
= 0;
1794 /* If HIGH will be interpreted as negative but the constant is
1795 positive, we must adjust it to do two ldha insns. */
1797 if ((high
& 0x8000) != 0 && c
>= 0)
1801 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
1804 if (c
== low
|| (low
== 0 && extra
== 0))
1806 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1807 but that meant that we can't handle INT_MIN on 32-bit machines
1808 (like NT/Alpha), because we recurse indefinitely through
1809 emit_move_insn to gen_movdi. So instead, since we know exactly
1810 what we want, create it explicitly. */
1815 target
= gen_reg_rtx (mode
);
1816 emit_insn (gen_rtx_SET (target
, GEN_INT (c
)));
1819 else if (n
>= 2 + (extra
!= 0))
1823 if (!can_create_pseudo_p ())
1825 emit_insn (gen_rtx_SET (target
, GEN_INT (high
<< 16)));
1829 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16),
1832 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1833 This means that if we go through expand_binop, we'll try to
1834 generate extensions, etc, which will require new pseudos, which
1835 will fail during some split phases. The SImode add patterns
1836 still exist, but are not named. So build the insns by hand. */
1841 subtarget
= gen_reg_rtx (mode
);
1842 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
1843 insn
= gen_rtx_SET (subtarget
, insn
);
1849 target
= gen_reg_rtx (mode
);
1850 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1851 insn
= gen_rtx_SET (target
, insn
);
1857 /* If we couldn't do it that way, try some other methods. But if we have
1858 no instructions left, don't bother. Likewise, if this is SImode and
1859 we can't make pseudos, we can't do anything since the expand_binop
1860 and expand_unop calls will widen and try to make pseudos. */
1862 if (n
== 1 || (mode
== SImode
&& !can_create_pseudo_p ()))
1865 /* Next, see if we can load a related constant and then shift and possibly
1866 negate it to get the constant we want. Try this once each increasing
1867 numbers of insns. */
1869 for (i
= 1; i
< n
; i
++)
1871 /* First, see if minus some low bits, we've an easy load of
1874 new_const
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1877 temp
= alpha_emit_set_const (subtarget
, mode
, c
- new_const
, i
, no_output
);
1882 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new_const
),
1883 target
, 0, OPTAB_WIDEN
);
1887 /* Next try complementing. */
1888 temp
= alpha_emit_set_const (subtarget
, mode
, ~c
, i
, no_output
);
1893 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
1896 /* Next try to form a constant and do a left shift. We can do this
1897 if some low-order bits are zero; the exact_log2 call below tells
1898 us that information. The bits we are shifting out could be any
1899 value, but here we'll just try the 0- and sign-extended forms of
1900 the constant. To try to increase the chance of having the same
1901 constant in more than one insn, start at the highest number of
1902 bits to shift, but try all possibilities in case a ZAPNOT will
1905 bits
= exact_log2 (c
& -c
);
1907 for (; bits
> 0; bits
--)
1909 new_const
= c
>> bits
;
1910 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1913 new_const
= (unsigned HOST_WIDE_INT
)c
>> bits
;
1914 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1921 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
1922 target
, 0, OPTAB_WIDEN
);
1926 /* Now try high-order zero bits. Here we try the shifted-in bits as
1927 all zero and all ones. Be careful to avoid shifting outside the
1928 mode and to avoid shifting outside the host wide int size. */
1930 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1931 - floor_log2 (c
) - 1);
1933 for (; bits
> 0; bits
--)
1935 new_const
= c
<< bits
;
1936 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1939 new_const
= (c
<< bits
) | ((HOST_WIDE_INT_1U
<< bits
) - 1);
1940 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1947 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
1948 target
, 1, OPTAB_WIDEN
);
1952 /* Now try high-order 1 bits. We get that with a sign-extension.
1953 But one bit isn't enough here. Be careful to avoid shifting outside
1954 the mode and to avoid shifting outside the host wide int size. */
1956 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1957 - floor_log2 (~ c
) - 2);
1959 for (; bits
> 0; bits
--)
1961 new_const
= c
<< bits
;
1962 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1965 new_const
= (c
<< bits
) | ((HOST_WIDE_INT_1U
<< bits
) - 1);
1966 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1973 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
1974 target
, 0, OPTAB_WIDEN
);
1979 /* Finally, see if can load a value into the target that is the same as the
1980 constant except that all bytes that are 0 are changed to be 0xff. If we
1981 can, then we can do a ZAPNOT to obtain the desired constant. */
1984 for (i
= 0; i
< 64; i
+= 8)
1985 if ((new_const
& ((HOST_WIDE_INT
) 0xff << i
)) == 0)
1986 new_const
|= (HOST_WIDE_INT
) 0xff << i
;
1988 /* We are only called for SImode and DImode. If this is SImode, ensure that
1989 we are sign extended to a full word. */
1992 new_const
= ((new_const
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1996 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, n
- 1, no_output
);
2001 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new_const
),
2002 target
, 0, OPTAB_WIDEN
);
2009 /* Try to output insns to set TARGET equal to the constant C if it can be
2010 done in less than N insns. Do all computations in MODE. Returns the place
2011 where the output has been placed if it can be done and the insns have been
2012 emitted. If it would take more than N insns, zero is returned and no
2013 insns and emitted. */
2016 alpha_emit_set_const (rtx target
, machine_mode mode
,
2017 HOST_WIDE_INT c
, int n
, bool no_output
)
2019 machine_mode orig_mode
= mode
;
2020 rtx orig_target
= target
;
2024 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2025 can't load this constant in one insn, do this in DImode. */
2026 if (!can_create_pseudo_p () && mode
== SImode
2027 && REG_P (target
) && REGNO (target
) < FIRST_PSEUDO_REGISTER
)
2029 result
= alpha_emit_set_const_1 (target
, mode
, c
, 1, no_output
);
2033 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
2036 else if (mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
)
2038 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
2042 /* Try 1 insn, then 2, then up to N. */
2043 for (i
= 1; i
<= n
; i
++)
2045 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
, no_output
);
2054 insn
= get_last_insn ();
2055 set
= single_set (insn
);
2056 if (! CONSTANT_P (SET_SRC (set
)))
2057 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
2062 /* Allow for the case where we changed the mode of TARGET. */
2065 if (result
== target
)
2066 result
= orig_target
;
2067 else if (mode
!= orig_mode
)
2068 result
= gen_lowpart (orig_mode
, result
);
2074 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2075 fall back to a straight forward decomposition. We do this to avoid
2076 exponential run times encountered when looking for longer sequences
2077 with alpha_emit_set_const. */
2080 alpha_emit_set_long_const (rtx target
, HOST_WIDE_INT c1
)
2082 HOST_WIDE_INT d1
, d2
, d3
, d4
;
2084 /* Decompose the entire word */
2086 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2088 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2089 c1
= (c1
- d2
) >> 32;
2090 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2092 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2093 gcc_assert (c1
== d4
);
2095 /* Construct the high word */
2098 emit_move_insn (target
, GEN_INT (d4
));
2100 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
2103 emit_move_insn (target
, GEN_INT (d3
));
2105 /* Shift it into place */
2106 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2108 /* Add in the low bits. */
2110 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2112 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2117 /* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
2119 static HOST_WIDE_INT
2120 alpha_extract_integer (rtx x
)
2122 if (GET_CODE (x
) == CONST_VECTOR
)
2123 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
2125 gcc_assert (CONST_INT_P (x
));
2130 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2131 we are willing to load the value into a register via a move pattern.
2132 Normally this is all symbolic constants, integral constants that
2133 take three or fewer instructions, and floating-point zero. */
2136 alpha_legitimate_constant_p (machine_mode mode
, rtx x
)
2140 switch (GET_CODE (x
))
2147 if (GET_CODE (XEXP (x
, 0)) == PLUS
2148 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
2149 x
= XEXP (XEXP (x
, 0), 0);
2153 if (GET_CODE (x
) != SYMBOL_REF
)
2158 /* TLS symbols are never valid. */
2159 return SYMBOL_REF_TLS_MODEL (x
) == 0;
2161 case CONST_WIDE_INT
:
2162 if (TARGET_BUILD_CONSTANTS
)
2164 if (x
== CONST0_RTX (mode
))
2167 gcc_assert (CONST_WIDE_INT_NUNITS (x
) == 2);
2168 i0
= CONST_WIDE_INT_ELT (x
, 1);
2169 if (alpha_emit_set_const_1 (NULL_RTX
, mode
, i0
, 3, true) == NULL
)
2171 i0
= CONST_WIDE_INT_ELT (x
, 0);
2175 if (x
== CONST0_RTX (mode
))
2180 if (x
== CONST0_RTX (mode
))
2182 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
2184 if (GET_MODE_SIZE (mode
) != 8)
2189 if (TARGET_BUILD_CONSTANTS
)
2191 i0
= alpha_extract_integer (x
);
2193 return alpha_emit_set_const_1 (NULL_RTX
, mode
, i0
, 3, true) != NULL
;
2200 /* Operand 1 is known to be a constant, and should require more than one
2201 instruction to load. Emit that multi-part load. */
2204 alpha_split_const_mov (machine_mode mode
, rtx
*operands
)
2207 rtx temp
= NULL_RTX
;
2209 i0
= alpha_extract_integer (operands
[1]);
2211 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3, false);
2213 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2214 temp
= alpha_emit_set_long_const (operands
[0], i0
);
2218 if (!rtx_equal_p (operands
[0], temp
))
2219 emit_move_insn (operands
[0], temp
);
2226 /* Expand a move instruction; return true if all work is done.
2227 We don't handle non-bwx subword loads here. */
2230 alpha_expand_mov (machine_mode mode
, rtx
*operands
)
2234 /* If the output is not a register, the input must be. */
2235 if (MEM_P (operands
[0])
2236 && ! reg_or_0_operand (operands
[1], mode
))
2237 operands
[1] = force_reg (mode
, operands
[1]);
2239 /* Allow legitimize_address to perform some simplifications. */
2240 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
2242 tmp
= alpha_legitimize_address_1 (operands
[1], operands
[0], mode
);
2245 if (tmp
== operands
[0])
2252 /* Early out for non-constants and valid constants. */
2253 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2256 /* Split large integers. */
2257 if (CONST_INT_P (operands
[1])
2258 || GET_CODE (operands
[1]) == CONST_VECTOR
)
2260 if (alpha_split_const_mov (mode
, operands
))
2264 /* Otherwise we've nothing left but to drop the thing to memory. */
2265 tmp
= force_const_mem (mode
, operands
[1]);
2267 if (tmp
== NULL_RTX
)
2270 if (reload_in_progress
)
2272 emit_move_insn (operands
[0], XEXP (tmp
, 0));
2273 operands
[1] = replace_equiv_address (tmp
, operands
[0]);
2276 operands
[1] = validize_mem (tmp
);
2280 /* Expand a non-bwx QImode or HImode move instruction;
2281 return true if all work is done. */
2284 alpha_expand_mov_nobwx (machine_mode mode
, rtx
*operands
)
2288 /* If the output is not a register, the input must be. */
2289 if (MEM_P (operands
[0]))
2290 operands
[1] = force_reg (mode
, operands
[1]);
2292 /* Handle four memory cases, unaligned and aligned for either the input
2293 or the output. The only case where we can be called during reload is
2294 for aligned loads; all other cases require temporaries. */
2296 if (any_memory_operand (operands
[1], mode
))
2298 if (aligned_memory_operand (operands
[1], mode
))
2300 if (reload_in_progress
)
2303 seq
= gen_reload_inqi_aligned (operands
[0], operands
[1]);
2305 seq
= gen_reload_inhi_aligned (operands
[0], operands
[1]);
2310 rtx aligned_mem
, bitnum
;
2311 rtx scratch
= gen_reg_rtx (SImode
);
2315 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2317 subtarget
= operands
[0];
2318 if (REG_P (subtarget
))
2319 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2321 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2324 seq
= gen_aligned_loadqi (subtarget
, aligned_mem
,
2327 seq
= gen_aligned_loadhi (subtarget
, aligned_mem
,
2332 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2337 /* Don't pass these as parameters since that makes the generated
2338 code depend on parameter evaluation order which will cause
2339 bootstrap failures. */
2341 rtx temp1
, temp2
, subtarget
, ua
;
2344 temp1
= gen_reg_rtx (DImode
);
2345 temp2
= gen_reg_rtx (DImode
);
2347 subtarget
= operands
[0];
2348 if (REG_P (subtarget
))
2349 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2351 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2353 ua
= get_unaligned_address (operands
[1]);
2355 seq
= gen_unaligned_loadqi (subtarget
, ua
, temp1
, temp2
);
2357 seq
= gen_unaligned_loadhi (subtarget
, ua
, temp1
, temp2
);
2359 alpha_set_memflags (seq
, operands
[1]);
2363 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2368 if (any_memory_operand (operands
[0], mode
))
2370 if (aligned_memory_operand (operands
[0], mode
))
2372 rtx aligned_mem
, bitnum
;
2373 rtx temp1
= gen_reg_rtx (SImode
);
2374 rtx temp2
= gen_reg_rtx (SImode
);
2376 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2378 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2383 rtx temp1
= gen_reg_rtx (DImode
);
2384 rtx temp2
= gen_reg_rtx (DImode
);
2385 rtx temp3
= gen_reg_rtx (DImode
);
2386 rtx ua
= get_unaligned_address (operands
[0]);
2389 seq
= gen_unaligned_storeqi (ua
, operands
[1], temp1
, temp2
, temp3
);
2391 seq
= gen_unaligned_storehi (ua
, operands
[1], temp1
, temp2
, temp3
);
2393 alpha_set_memflags (seq
, operands
[0]);
2402 /* Implement the movmisalign patterns. One of the operands is a memory
2403 that is not naturally aligned. Emit instructions to load it. */
2406 alpha_expand_movmisalign (machine_mode mode
, rtx
*operands
)
2408 /* Honor misaligned loads, for those we promised to do so. */
2409 if (MEM_P (operands
[1]))
2413 if (register_operand (operands
[0], mode
))
2416 tmp
= gen_reg_rtx (mode
);
2418 alpha_expand_unaligned_load (tmp
, operands
[1], 8, 0, 0);
2419 if (tmp
!= operands
[0])
2420 emit_move_insn (operands
[0], tmp
);
2422 else if (MEM_P (operands
[0]))
2424 if (!reg_or_0_operand (operands
[1], mode
))
2425 operands
[1] = force_reg (mode
, operands
[1]);
2426 alpha_expand_unaligned_store (operands
[0], operands
[1], 8, 0);
2432 /* Generate an unsigned DImode to FP conversion. This is the same code
2433 optabs would emit if we didn't have TFmode patterns.
2435 For SFmode, this is the only construction I've found that can pass
2436 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2437 intermediates will work, because you'll get intermediate rounding
2438 that ruins the end result. Some of this could be fixed by turning
2439 on round-to-positive-infinity, but that requires diddling the fpsr,
2440 which kills performance. I tried turning this around and converting
2441 to a negative number, so that I could turn on /m, but either I did
2442 it wrong or there's something else cause I wound up with the exact
2443 same single-bit error. There is a branch-less form of this same code:
2454 fcmoveq $f10,$f11,$f0
2456 I'm not using it because it's the same number of instructions as
2457 this branch-full form, and it has more serialized long latency
2458 instructions on the critical path.
2460 For DFmode, we can avoid rounding errors by breaking up the word
2461 into two pieces, converting them separately, and adding them back:
2463 LC0: .long 0,0x5f800000
2468 cpyse $f11,$f31,$f10
2469 cpyse $f31,$f11,$f11
2477 This doesn't seem to be a clear-cut win over the optabs form.
2478 It probably all depends on the distribution of numbers being
2479 converted -- in the optabs form, all but high-bit-set has a
2480 much lower minimum execution time. */
2483 alpha_emit_floatuns (rtx operands
[2])
2485 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2489 in
= force_reg (DImode
, operands
[1]);
2490 mode
= GET_MODE (out
);
2491 neglab
= gen_label_rtx ();
2492 donelab
= gen_label_rtx ();
2493 i0
= gen_reg_rtx (DImode
);
2494 i1
= gen_reg_rtx (DImode
);
2495 f0
= gen_reg_rtx (mode
);
2497 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2499 emit_insn (gen_rtx_SET (out
, gen_rtx_FLOAT (mode
, in
)));
2500 emit_jump_insn (gen_jump (donelab
));
2503 emit_label (neglab
);
2505 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2506 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2507 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2508 emit_insn (gen_rtx_SET (f0
, gen_rtx_FLOAT (mode
, i0
)));
2509 emit_insn (gen_rtx_SET (out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2511 emit_label (donelab
);
2514 /* Generate the comparison for a conditional branch. */
2517 alpha_emit_conditional_branch (rtx operands
[], machine_mode cmp_mode
)
2519 enum rtx_code cmp_code
, branch_code
;
2520 machine_mode branch_mode
= VOIDmode
;
2521 enum rtx_code code
= GET_CODE (operands
[0]);
2522 rtx op0
= operands
[1], op1
= operands
[2];
2525 if (cmp_mode
== TFmode
)
2527 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2532 /* The general case: fold the comparison code to the types of compares
2533 that we have, choosing the branch as necessary. */
2536 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2538 /* We have these compares. */
2539 cmp_code
= code
, branch_code
= NE
;
2544 /* These must be reversed. */
2545 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2548 case GE
: case GT
: case GEU
: case GTU
:
2549 /* For FP, we swap them, for INT, we reverse them. */
2550 if (cmp_mode
== DFmode
)
2552 cmp_code
= swap_condition (code
);
2554 std::swap (op0
, op1
);
2558 cmp_code
= reverse_condition (code
);
2567 if (cmp_mode
== DFmode
)
2569 if (flag_unsafe_math_optimizations
&& cmp_code
!= UNORDERED
)
2571 /* When we are not as concerned about non-finite values, and we
2572 are comparing against zero, we can branch directly. */
2573 if (op1
== CONST0_RTX (DFmode
))
2574 cmp_code
= UNKNOWN
, branch_code
= code
;
2575 else if (op0
== CONST0_RTX (DFmode
))
2577 /* Undo the swap we probably did just above. */
2578 std::swap (op0
, op1
);
2579 branch_code
= swap_condition (cmp_code
);
2585 /* ??? We mark the branch mode to be CCmode to prevent the
2586 compare and branch from being combined, since the compare
2587 insn follows IEEE rules that the branch does not. */
2588 branch_mode
= CCmode
;
2593 /* The following optimizations are only for signed compares. */
2594 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2596 /* Whee. Compare and branch against 0 directly. */
2597 if (op1
== const0_rtx
)
2598 cmp_code
= UNKNOWN
, branch_code
= code
;
2600 /* If the constants doesn't fit into an immediate, but can
2601 be generated by lda/ldah, we adjust the argument and
2602 compare against zero, so we can use beq/bne directly. */
2603 /* ??? Don't do this when comparing against symbols, otherwise
2604 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2605 be declared false out of hand (at least for non-weak). */
2606 else if (CONST_INT_P (op1
)
2607 && (code
== EQ
|| code
== NE
)
2608 && !(symbolic_operand (op0
, VOIDmode
)
2609 || (REG_P (op0
) && REG_POINTER (op0
))))
2611 rtx n_op1
= GEN_INT (-INTVAL (op1
));
2613 if (! satisfies_constraint_I (op1
)
2614 && (satisfies_constraint_K (n_op1
)
2615 || satisfies_constraint_L (n_op1
)))
2616 cmp_code
= PLUS
, branch_code
= code
, op1
= n_op1
;
2620 if (!reg_or_0_operand (op0
, DImode
))
2621 op0
= force_reg (DImode
, op0
);
2622 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2623 op1
= force_reg (DImode
, op1
);
2626 /* Emit an initial compare instruction, if necessary. */
2628 if (cmp_code
!= UNKNOWN
)
2630 tem
= gen_reg_rtx (cmp_mode
);
2631 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2634 /* Emit the branch instruction. */
2635 tem
= gen_rtx_SET (pc_rtx
,
2636 gen_rtx_IF_THEN_ELSE (VOIDmode
,
2637 gen_rtx_fmt_ee (branch_code
,
2639 CONST0_RTX (cmp_mode
)),
2640 gen_rtx_LABEL_REF (VOIDmode
,
2643 emit_jump_insn (tem
);
2646 /* Certain simplifications can be done to make invalid setcc operations
2647 valid. Return the final comparison, or NULL if we can't work. */
2650 alpha_emit_setcc (rtx operands
[], machine_mode cmp_mode
)
2652 enum rtx_code cmp_code
;
2653 enum rtx_code code
= GET_CODE (operands
[1]);
2654 rtx op0
= operands
[2], op1
= operands
[3];
2657 if (cmp_mode
== TFmode
)
2659 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2664 if (cmp_mode
== DFmode
&& !TARGET_FIX
)
2667 /* The general case: fold the comparison code to the types of compares
2668 that we have, choosing the branch as necessary. */
2673 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2675 /* We have these compares. */
2676 if (cmp_mode
== DFmode
)
2677 cmp_code
= code
, code
= NE
;
2681 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2686 cmp_code
= reverse_condition (code
);
2690 case GE
: case GT
: case GEU
: case GTU
:
2691 /* These normally need swapping, but for integer zero we have
2692 special patterns that recognize swapped operands. */
2693 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2695 code
= swap_condition (code
);
2696 if (cmp_mode
== DFmode
)
2697 cmp_code
= code
, code
= NE
;
2698 std::swap (op0
, op1
);
2705 if (cmp_mode
== DImode
)
2707 if (!register_operand (op0
, DImode
))
2708 op0
= force_reg (DImode
, op0
);
2709 if (!reg_or_8bit_operand (op1
, DImode
))
2710 op1
= force_reg (DImode
, op1
);
2713 /* Emit an initial compare instruction, if necessary. */
2714 if (cmp_code
!= UNKNOWN
)
2716 tmp
= gen_reg_rtx (cmp_mode
);
2717 emit_insn (gen_rtx_SET (tmp
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
,
2720 op0
= cmp_mode
!= DImode
? gen_lowpart (DImode
, tmp
) : tmp
;
2724 /* Emit the setcc instruction. */
2725 emit_insn (gen_rtx_SET (operands
[0], gen_rtx_fmt_ee (code
, DImode
,
2731 /* Rewrite a comparison against zero CMP of the form
2732 (CODE (cc0) (const_int 0)) so it can be written validly in
2733 a conditional move (if_then_else CMP ...).
2734 If both of the operands that set cc0 are nonzero we must emit
2735 an insn to perform the compare (it can't be done within
2736 the conditional move). */
2739 alpha_emit_conditional_move (rtx cmp
, machine_mode mode
)
2741 enum rtx_code code
= GET_CODE (cmp
);
2742 enum rtx_code cmov_code
= NE
;
2743 rtx op0
= XEXP (cmp
, 0);
2744 rtx op1
= XEXP (cmp
, 1);
2745 machine_mode cmp_mode
2746 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2747 machine_mode cmov_mode
= VOIDmode
;
2748 int local_fast_math
= flag_unsafe_math_optimizations
;
2751 if (cmp_mode
== TFmode
)
2753 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2758 gcc_assert (cmp_mode
== DFmode
|| cmp_mode
== DImode
);
2760 if (FLOAT_MODE_P (cmp_mode
) != FLOAT_MODE_P (mode
))
2762 enum rtx_code cmp_code
;
2767 /* If we have fp<->int register move instructions, do a cmov by
2768 performing the comparison in fp registers, and move the
2769 zero/nonzero value to integer registers, where we can then
2770 use a normal cmov, or vice-versa. */
2774 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2776 /* We have these compares. */
2777 cmp_code
= code
, code
= NE
;
2782 /* These must be reversed. */
2783 cmp_code
= reverse_condition (code
), code
= EQ
;
2786 case GE
: case GT
: case GEU
: case GTU
:
2787 /* These normally need swapping, but for integer zero we have
2788 special patterns that recognize swapped operands. */
2789 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2790 cmp_code
= code
, code
= NE
;
2793 cmp_code
= swap_condition (code
);
2795 std::swap (op0
, op1
);
2803 if (cmp_mode
== DImode
)
2805 if (!reg_or_0_operand (op0
, DImode
))
2806 op0
= force_reg (DImode
, op0
);
2807 if (!reg_or_8bit_operand (op1
, DImode
))
2808 op1
= force_reg (DImode
, op1
);
2811 tem
= gen_reg_rtx (cmp_mode
);
2812 emit_insn (gen_rtx_SET (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
,
2815 cmp_mode
= cmp_mode
== DImode
? E_DFmode
: E_DImode
;
2816 op0
= gen_lowpart (cmp_mode
, tem
);
2817 op1
= CONST0_RTX (cmp_mode
);
2818 cmp
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2819 local_fast_math
= 1;
2822 if (cmp_mode
== DImode
)
2824 if (!reg_or_0_operand (op0
, DImode
))
2825 op0
= force_reg (DImode
, op0
);
2826 if (!reg_or_8bit_operand (op1
, DImode
))
2827 op1
= force_reg (DImode
, op1
);
2830 /* We may be able to use a conditional move directly.
2831 This avoids emitting spurious compares. */
2832 if (signed_comparison_operator (cmp
, VOIDmode
)
2833 && (cmp_mode
== DImode
|| local_fast_math
)
2834 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2835 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2837 /* We can't put the comparison inside the conditional move;
2838 emit a compare instruction and put that inside the
2839 conditional move. Make sure we emit only comparisons we have;
2840 swap or reverse as necessary. */
2842 if (!can_create_pseudo_p ())
2847 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2849 /* We have these compares: */
2854 /* These must be reversed. */
2855 code
= reverse_condition (code
);
2859 case GE
: case GT
: case GEU
: case GTU
:
2860 /* These normally need swapping, but for integer zero we have
2861 special patterns that recognize swapped operands. */
2862 if (cmp_mode
== DImode
&& op1
== const0_rtx
)
2864 code
= swap_condition (code
);
2865 std::swap (op0
, op1
);
2872 if (cmp_mode
== DImode
)
2874 if (!reg_or_0_operand (op0
, DImode
))
2875 op0
= force_reg (DImode
, op0
);
2876 if (!reg_or_8bit_operand (op1
, DImode
))
2877 op1
= force_reg (DImode
, op1
);
2880 /* ??? We mark the branch mode to be CCmode to prevent the compare
2881 and cmov from being combined, since the compare insn follows IEEE
2882 rules that the cmov does not. */
2883 if (cmp_mode
== DFmode
&& !local_fast_math
)
2886 tem
= gen_reg_rtx (cmp_mode
);
2887 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_mode
, op0
, op1
));
2888 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_mode
));
2891 /* Simplify a conditional move of two constants into a setcc with
2892 arithmetic. This is done with a splitter since combine would
2893 just undo the work if done during code generation. It also catches
2894 cases we wouldn't have before cse. */
2897 alpha_split_conditional_move (enum rtx_code code
, rtx dest
, rtx cond
,
2898 rtx t_rtx
, rtx f_rtx
)
2900 HOST_WIDE_INT t
, f
, diff
;
2902 rtx target
, subtarget
, tmp
;
2904 mode
= GET_MODE (dest
);
2909 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2910 || (code
== GE
|| code
== GT
))
2912 code
= reverse_condition (code
);
2917 subtarget
= target
= dest
;
2920 target
= gen_lowpart (DImode
, dest
);
2921 if (can_create_pseudo_p ())
2922 subtarget
= gen_reg_rtx (DImode
);
2926 /* Below, we must be careful to use copy_rtx on target and subtarget
2927 in intermediate insns, as they may be a subreg rtx, which may not
2930 if (f
== 0 && exact_log2 (diff
) > 0
2931 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2932 viable over a longer latency cmove. On EV5, the E0 slot is a
2933 scarce resource, and on EV4 shift has the same latency as a cmove. */
2934 && (diff
<= 8 || alpha_tune
== PROCESSOR_EV6
))
2936 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2937 emit_insn (gen_rtx_SET (copy_rtx (subtarget
), tmp
));
2939 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2940 GEN_INT (exact_log2 (t
)));
2941 emit_insn (gen_rtx_SET (target
, tmp
));
2943 else if (f
== 0 && t
== -1)
2945 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2946 emit_insn (gen_rtx_SET (copy_rtx (subtarget
), tmp
));
2948 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
2950 else if (diff
== 1 || diff
== 4 || diff
== 8)
2954 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2955 emit_insn (gen_rtx_SET (copy_rtx (subtarget
), tmp
));
2958 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
2961 add_op
= GEN_INT (f
);
2962 if (sext_add_operand (add_op
, mode
))
2964 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
2966 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
2967 emit_insn (gen_rtx_SET (target
, tmp
));
2979 /* Look up the function X_floating library function name for the
2982 struct GTY(()) xfloating_op
2984 const enum rtx_code code
;
2985 const char *const GTY((skip
)) osf_func
;
2986 const char *const GTY((skip
)) vms_func
;
2990 static GTY(()) struct xfloating_op xfloating_ops
[] =
2992 { PLUS
, "_OtsAddX", "OTS$ADD_X", 0 },
2993 { MINUS
, "_OtsSubX", "OTS$SUB_X", 0 },
2994 { MULT
, "_OtsMulX", "OTS$MUL_X", 0 },
2995 { DIV
, "_OtsDivX", "OTS$DIV_X", 0 },
2996 { EQ
, "_OtsEqlX", "OTS$EQL_X", 0 },
2997 { NE
, "_OtsNeqX", "OTS$NEQ_X", 0 },
2998 { LT
, "_OtsLssX", "OTS$LSS_X", 0 },
2999 { LE
, "_OtsLeqX", "OTS$LEQ_X", 0 },
3000 { GT
, "_OtsGtrX", "OTS$GTR_X", 0 },
3001 { GE
, "_OtsGeqX", "OTS$GEQ_X", 0 },
3002 { FIX
, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
3003 { FLOAT
, "_OtsCvtQX", "OTS$CVTQX", 0 },
3004 { UNSIGNED_FLOAT
, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
3005 { FLOAT_EXTEND
, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
3006 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
3009 static GTY(()) struct xfloating_op vax_cvt_ops
[] =
3011 { FLOAT_EXTEND
, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
3012 { FLOAT_TRUNCATE
, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3016 alpha_lookup_xfloating_lib_func (enum rtx_code code
)
3018 struct xfloating_op
*ops
= xfloating_ops
;
3019 long n
= ARRAY_SIZE (xfloating_ops
);
3022 gcc_assert (TARGET_HAS_XFLOATING_LIBS
);
3024 /* How irritating. Nothing to key off for the main table. */
3025 if (TARGET_FLOAT_VAX
&& (code
== FLOAT_EXTEND
|| code
== FLOAT_TRUNCATE
))
3028 n
= ARRAY_SIZE (vax_cvt_ops
);
3031 for (i
= 0; i
< n
; ++i
, ++ops
)
3032 if (ops
->code
== code
)
3034 rtx func
= ops
->libcall
;
3037 func
= init_one_libfunc (TARGET_ABI_OPEN_VMS
3038 ? ops
->vms_func
: ops
->osf_func
);
3039 ops
->libcall
= func
;
3047 /* Most X_floating operations take the rounding mode as an argument.
3048 Compute that here. */
3051 alpha_compute_xfloating_mode_arg (enum rtx_code code
,
3052 enum alpha_fp_rounding_mode round
)
3058 case ALPHA_FPRM_NORM
:
3061 case ALPHA_FPRM_MINF
:
3064 case ALPHA_FPRM_CHOP
:
3067 case ALPHA_FPRM_DYN
:
3073 /* XXX For reference, round to +inf is mode = 3. */
3076 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3082 /* Emit an X_floating library function call.
3084 Note that these functions do not follow normal calling conventions:
3085 TFmode arguments are passed in two integer registers (as opposed to
3086 indirect); TFmode return values appear in R16+R17.
3088 FUNC is the function to call.
3089 TARGET is where the output belongs.
3090 OPERANDS are the inputs.
3091 NOPERANDS is the count of inputs.
3092 EQUIV is the expression equivalent for the function.
3096 alpha_emit_xfloating_libcall (rtx func
, rtx target
, rtx operands
[],
3097 int noperands
, rtx equiv
)
3099 rtx usage
= NULL_RTX
, reg
;
3104 for (i
= 0; i
< noperands
; ++i
)
3106 switch (GET_MODE (operands
[i
]))
3109 reg
= gen_rtx_REG (TFmode
, regno
);
3114 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
3119 gcc_assert (CONST_INT_P (operands
[i
]));
3122 reg
= gen_rtx_REG (DImode
, regno
);
3130 emit_move_insn (reg
, operands
[i
]);
3131 use_reg (&usage
, reg
);
3134 switch (GET_MODE (target
))
3137 reg
= gen_rtx_REG (TFmode
, 16);
3140 reg
= gen_rtx_REG (DFmode
, 32);
3143 reg
= gen_rtx_REG (DImode
, 0);
3149 rtx mem
= gen_rtx_MEM (QImode
, func
);
3150 rtx_insn
*tmp
= emit_call_insn (gen_call_value (reg
, mem
, const0_rtx
,
3151 const0_rtx
, const0_rtx
));
3152 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3153 RTL_CONST_CALL_P (tmp
) = 1;
3158 emit_libcall_block (tmp
, target
, reg
, equiv
);
3161 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3164 alpha_emit_xfloating_arith (enum rtx_code code
, rtx operands
[])
3168 rtx out_operands
[3];
3170 func
= alpha_lookup_xfloating_lib_func (code
);
3171 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3173 out_operands
[0] = operands
[1];
3174 out_operands
[1] = operands
[2];
3175 out_operands
[2] = GEN_INT (mode
);
3176 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3177 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3181 /* Emit an X_floating library function call for a comparison. */
3184 alpha_emit_xfloating_compare (enum rtx_code
*pcode
, rtx op0
, rtx op1
)
3186 enum rtx_code cmp_code
, res_code
;
3187 rtx func
, out
, operands
[2], note
;
3189 /* X_floating library comparison functions return
3193 Convert the compare against the raw return value. */
3221 func
= alpha_lookup_xfloating_lib_func (cmp_code
);
3225 out
= gen_reg_rtx (DImode
);
3227 /* What's actually returned is -1,0,1, not a proper boolean value. */
3228 note
= gen_rtx_fmt_ee (cmp_code
, VOIDmode
, op0
, op1
);
3229 note
= gen_rtx_UNSPEC (DImode
, gen_rtvec (1, note
), UNSPEC_XFLT_COMPARE
);
3230 alpha_emit_xfloating_libcall (func
, out
, operands
, 2, note
);
3235 /* Emit an X_floating library function call for a conversion. */
3238 alpha_emit_xfloating_cvt (enum rtx_code orig_code
, rtx operands
[])
3240 int noperands
= 1, mode
;
3241 rtx out_operands
[2];
3243 enum rtx_code code
= orig_code
;
3245 if (code
== UNSIGNED_FIX
)
3248 func
= alpha_lookup_xfloating_lib_func (code
);
3250 out_operands
[0] = operands
[1];
3255 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3256 out_operands
[1] = GEN_INT (mode
);
3259 case FLOAT_TRUNCATE
:
3260 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3261 out_operands
[1] = GEN_INT (mode
);
3268 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3269 gen_rtx_fmt_e (orig_code
,
3270 GET_MODE (operands
[0]),
3274 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3275 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3276 guarantee that the sequence
3279 is valid. Naturally, output operand ordering is little-endian.
3280 This is used by *movtf_internal and *movti_internal. */
3283 alpha_split_tmode_pair (rtx operands
[4], machine_mode mode
,
3286 switch (GET_CODE (operands
[1]))
3289 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3290 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3294 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3295 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3298 CASE_CONST_SCALAR_INT
:
3300 gcc_assert (operands
[1] == CONST0_RTX (mode
));
3301 operands
[2] = operands
[3] = const0_rtx
;
3308 switch (GET_CODE (operands
[0]))
3311 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3312 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3316 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3317 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3324 if (fixup_overlap
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
3326 std::swap (operands
[0], operands
[1]);
3327 std::swap (operands
[2], operands
[3]);
3331 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3332 op2 is a register containing the sign bit, operation is the
3333 logical operation to be performed. */
3336 alpha_split_tfmode_frobsign (rtx operands
[3], rtx (*operation
) (rtx
, rtx
, rtx
))
3338 rtx high_bit
= operands
[2];
3342 alpha_split_tmode_pair (operands
, TFmode
, false);
3344 /* Detect three flavors of operand overlap. */
3346 if (rtx_equal_p (operands
[0], operands
[2]))
3348 else if (rtx_equal_p (operands
[1], operands
[2]))
3350 if (rtx_equal_p (operands
[0], high_bit
))
3357 emit_move_insn (operands
[0], operands
[2]);
3359 /* ??? If the destination overlaps both source tf and high_bit, then
3360 assume source tf is dead in its entirety and use the other half
3361 for a scratch register. Otherwise "scratch" is just the proper
3362 destination register. */
3363 scratch
= operands
[move
< 2 ? 1 : 3];
3365 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3369 emit_move_insn (operands
[0], operands
[2]);
3371 emit_move_insn (operands
[1], scratch
);
3375 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3379 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3380 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3381 lda r3,X(r11) lda r3,X+2(r11)
3382 extwl r1,r3,r1 extql r1,r3,r1
3383 extwh r2,r3,r2 extqh r2,r3,r2
3384 or r1.r2.r1 or r1,r2,r1
3387 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3388 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3389 lda r3,X(r11) lda r3,X(r11)
3390 extll r1,r3,r1 extll r1,r3,r1
3391 extlh r2,r3,r2 extlh r2,r3,r2
3392 or r1.r2.r1 addl r1,r2,r1
3394 quad: ldq_u r1,X(r11)
3403 alpha_expand_unaligned_load (rtx tgt
, rtx mem
, HOST_WIDE_INT size
,
3404 HOST_WIDE_INT ofs
, int sign
)
3406 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3409 if (TARGET_BWX
&& size
== 2)
3411 meml
= adjust_address (mem
, QImode
, ofs
);
3412 memh
= adjust_address (mem
, QImode
, ofs
+1);
3413 extl
= gen_reg_rtx (DImode
);
3414 exth
= gen_reg_rtx (DImode
);
3415 emit_insn (gen_zero_extendqidi2 (extl
, meml
));
3416 emit_insn (gen_zero_extendqidi2 (exth
, memh
));
3417 exth
= expand_simple_binop (DImode
, ASHIFT
, exth
, GEN_INT (8),
3418 NULL
, 1, OPTAB_LIB_WIDEN
);
3419 addr
= expand_simple_binop (DImode
, IOR
, extl
, exth
,
3420 NULL
, 1, OPTAB_LIB_WIDEN
);
3422 if (sign
&& GET_MODE (tgt
) != HImode
)
3424 addr
= gen_lowpart (HImode
, addr
);
3425 emit_insn (gen_extend_insn (tgt
, addr
, GET_MODE (tgt
), HImode
, 0));
3429 if (GET_MODE (tgt
) != DImode
)
3430 addr
= gen_lowpart (GET_MODE (tgt
), addr
);
3431 emit_move_insn (tgt
, addr
);
3436 meml
= gen_reg_rtx (DImode
);
3437 memh
= gen_reg_rtx (DImode
);
3438 addr
= gen_reg_rtx (DImode
);
3439 extl
= gen_reg_rtx (DImode
);
3440 exth
= gen_reg_rtx (DImode
);
3442 mema
= XEXP (mem
, 0);
3443 if (GET_CODE (mema
) == LO_SUM
)
3444 mema
= force_reg (Pmode
, mema
);
3446 /* AND addresses cannot be in any alias set, since they may implicitly
3447 alias surrounding code. Ideally we'd have some alias set that
3448 covered all types except those with alignment 8 or higher. */
3450 tmp
= change_address (mem
, DImode
,
3451 gen_rtx_AND (DImode
,
3452 plus_constant (DImode
, mema
, ofs
),
3454 set_mem_alias_set (tmp
, 0);
3455 emit_move_insn (meml
, tmp
);
3457 tmp
= change_address (mem
, DImode
,
3458 gen_rtx_AND (DImode
,
3459 plus_constant (DImode
, mema
,
3462 set_mem_alias_set (tmp
, 0);
3463 emit_move_insn (memh
, tmp
);
3465 if (sign
&& size
== 2)
3467 emit_move_insn (addr
, plus_constant (Pmode
, mema
, ofs
+2));
3469 emit_insn (gen_extql (extl
, meml
, addr
));
3470 emit_insn (gen_extqh (exth
, memh
, addr
));
3472 /* We must use tgt here for the target. Alpha-vms port fails if we use
3473 addr for the target, because addr is marked as a pointer and combine
3474 knows that pointers are always sign-extended 32-bit values. */
3475 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3476 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3477 addr
, 1, OPTAB_WIDEN
);
3481 emit_move_insn (addr
, plus_constant (Pmode
, mema
, ofs
));
3482 emit_insn (gen_extxl (extl
, meml
, GEN_INT (size
*8), addr
));
3486 emit_insn (gen_extwh (exth
, memh
, addr
));
3490 emit_insn (gen_extlh (exth
, memh
, addr
));
3494 emit_insn (gen_extqh (exth
, memh
, addr
));
3501 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3502 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3507 emit_move_insn (tgt
, gen_lowpart (GET_MODE (tgt
), addr
));
3510 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3513 alpha_expand_unaligned_store (rtx dst
, rtx src
,
3514 HOST_WIDE_INT size
, HOST_WIDE_INT ofs
)
3516 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3518 if (TARGET_BWX
&& size
== 2)
3520 if (src
!= const0_rtx
)
3522 dstl
= gen_lowpart (QImode
, src
);
3523 dsth
= expand_simple_binop (DImode
, LSHIFTRT
, src
, GEN_INT (8),
3524 NULL
, 1, OPTAB_LIB_WIDEN
);
3525 dsth
= gen_lowpart (QImode
, dsth
);
3528 dstl
= dsth
= const0_rtx
;
3530 meml
= adjust_address (dst
, QImode
, ofs
);
3531 memh
= adjust_address (dst
, QImode
, ofs
+1);
3533 emit_move_insn (meml
, dstl
);
3534 emit_move_insn (memh
, dsth
);
3538 dstl
= gen_reg_rtx (DImode
);
3539 dsth
= gen_reg_rtx (DImode
);
3540 insl
= gen_reg_rtx (DImode
);
3541 insh
= gen_reg_rtx (DImode
);
3543 dsta
= XEXP (dst
, 0);
3544 if (GET_CODE (dsta
) == LO_SUM
)
3545 dsta
= force_reg (Pmode
, dsta
);
3547 /* AND addresses cannot be in any alias set, since they may implicitly
3548 alias surrounding code. Ideally we'd have some alias set that
3549 covered all types except those with alignment 8 or higher. */
3551 meml
= change_address (dst
, DImode
,
3552 gen_rtx_AND (DImode
,
3553 plus_constant (DImode
, dsta
, ofs
),
3555 set_mem_alias_set (meml
, 0);
3557 memh
= change_address (dst
, DImode
,
3558 gen_rtx_AND (DImode
,
3559 plus_constant (DImode
, dsta
,
3562 set_mem_alias_set (memh
, 0);
3564 emit_move_insn (dsth
, memh
);
3565 emit_move_insn (dstl
, meml
);
3567 addr
= copy_addr_to_reg (plus_constant (Pmode
, dsta
, ofs
));
3569 if (src
!= CONST0_RTX (GET_MODE (src
)))
3571 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3572 GEN_INT (size
*8), addr
));
3577 emit_insn (gen_inswl (insl
, gen_lowpart (HImode
, src
), addr
));
3580 emit_insn (gen_insll (insl
, gen_lowpart (SImode
, src
), addr
));
3583 emit_insn (gen_insql (insl
, gen_lowpart (DImode
, src
), addr
));
3590 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3595 emit_insn (gen_mskwl (dstl
, dstl
, addr
));
3598 emit_insn (gen_mskll (dstl
, dstl
, addr
));
3601 emit_insn (gen_mskql (dstl
, dstl
, addr
));
3607 if (src
!= CONST0_RTX (GET_MODE (src
)))
3609 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3610 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3613 /* Must store high before low for degenerate case of aligned. */
3614 emit_move_insn (memh
, dsth
);
3615 emit_move_insn (meml
, dstl
);
3618 /* The block move code tries to maximize speed by separating loads and
3619 stores at the expense of register pressure: we load all of the data
3620 before we store it back out. There are two secondary effects worth
3621 mentioning, that this speeds copying to/from aligned and unaligned
3622 buffers, and that it makes the code significantly easier to write. */
3624 #define MAX_MOVE_WORDS 8
3626 /* Load an integral number of consecutive unaligned quadwords. */
3629 alpha_expand_unaligned_load_words (rtx
*out_regs
, rtx smem
,
3630 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3632 rtx
const im8
= GEN_INT (-8);
3633 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3634 rtx sreg
, areg
, tmp
, smema
;
3637 smema
= XEXP (smem
, 0);
3638 if (GET_CODE (smema
) == LO_SUM
)
3639 smema
= force_reg (Pmode
, smema
);
3641 /* Generate all the tmp registers we need. */
3642 for (i
= 0; i
< words
; ++i
)
3644 data_regs
[i
] = out_regs
[i
];
3645 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3647 data_regs
[words
] = gen_reg_rtx (DImode
);
3650 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3652 /* Load up all of the source data. */
3653 for (i
= 0; i
< words
; ++i
)
3655 tmp
= change_address (smem
, DImode
,
3656 gen_rtx_AND (DImode
,
3657 plus_constant (DImode
, smema
, 8*i
),
3659 set_mem_alias_set (tmp
, 0);
3660 emit_move_insn (data_regs
[i
], tmp
);
3663 tmp
= change_address (smem
, DImode
,
3664 gen_rtx_AND (DImode
,
3665 plus_constant (DImode
, smema
,
3668 set_mem_alias_set (tmp
, 0);
3669 emit_move_insn (data_regs
[words
], tmp
);
3671 /* Extract the half-word fragments. Unfortunately DEC decided to make
3672 extxh with offset zero a noop instead of zeroing the register, so
3673 we must take care of that edge condition ourselves with cmov. */
3675 sreg
= copy_addr_to_reg (smema
);
3676 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3678 for (i
= 0; i
< words
; ++i
)
3680 emit_insn (gen_extql (data_regs
[i
], data_regs
[i
], sreg
));
3681 emit_insn (gen_extqh (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3682 emit_insn (gen_rtx_SET (ext_tmps
[i
],
3683 gen_rtx_IF_THEN_ELSE (DImode
,
3684 gen_rtx_EQ (DImode
, areg
,
3686 const0_rtx
, ext_tmps
[i
])));
3689 /* Merge the half-words into whole words. */
3690 for (i
= 0; i
< words
; ++i
)
3692 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3693 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3697 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3698 may be NULL to store zeros. */
3701 alpha_expand_unaligned_store_words (rtx
*data_regs
, rtx dmem
,
3702 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3704 rtx
const im8
= GEN_INT (-8);
3705 rtx ins_tmps
[MAX_MOVE_WORDS
];
3706 rtx st_tmp_1
, st_tmp_2
, dreg
;
3707 rtx st_addr_1
, st_addr_2
, dmema
;
3710 dmema
= XEXP (dmem
, 0);
3711 if (GET_CODE (dmema
) == LO_SUM
)
3712 dmema
= force_reg (Pmode
, dmema
);
3714 /* Generate all the tmp registers we need. */
3715 if (data_regs
!= NULL
)
3716 for (i
= 0; i
< words
; ++i
)
3717 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3718 st_tmp_1
= gen_reg_rtx(DImode
);
3719 st_tmp_2
= gen_reg_rtx(DImode
);
3722 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3724 st_addr_2
= change_address (dmem
, DImode
,
3725 gen_rtx_AND (DImode
,
3726 plus_constant (DImode
, dmema
,
3729 set_mem_alias_set (st_addr_2
, 0);
3731 st_addr_1
= change_address (dmem
, DImode
,
3732 gen_rtx_AND (DImode
, dmema
, im8
));
3733 set_mem_alias_set (st_addr_1
, 0);
3735 /* Load up the destination end bits. */
3736 emit_move_insn (st_tmp_2
, st_addr_2
);
3737 emit_move_insn (st_tmp_1
, st_addr_1
);
3739 /* Shift the input data into place. */
3740 dreg
= copy_addr_to_reg (dmema
);
3741 if (data_regs
!= NULL
)
3743 for (i
= words
-1; i
>= 0; --i
)
3745 emit_insn (gen_insqh (ins_tmps
[i
], data_regs
[i
], dreg
));
3746 emit_insn (gen_insql (data_regs
[i
], data_regs
[i
], dreg
));
3748 for (i
= words
-1; i
> 0; --i
)
3750 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3751 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3756 /* Split and merge the ends with the destination data. */
3757 emit_insn (gen_mskqh (st_tmp_2
, st_tmp_2
, dreg
));
3758 emit_insn (gen_mskql (st_tmp_1
, st_tmp_1
, dreg
));
3760 if (data_regs
!= NULL
)
3762 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3763 st_tmp_2
, 1, OPTAB_WIDEN
);
3764 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3765 st_tmp_1
, 1, OPTAB_WIDEN
);
3769 emit_move_insn (st_addr_2
, st_tmp_2
);
3770 for (i
= words
-1; i
> 0; --i
)
3772 rtx tmp
= change_address (dmem
, DImode
,
3773 gen_rtx_AND (DImode
,
3774 plus_constant (DImode
,
3777 set_mem_alias_set (tmp
, 0);
3778 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3780 emit_move_insn (st_addr_1
, st_tmp_1
);
3784 /* Expand string/block move operations.
3786 operands[0] is the pointer to the destination.
3787 operands[1] is the pointer to the source.
3788 operands[2] is the number of bytes to move.
3789 operands[3] is the alignment. */
3792 alpha_expand_block_move (rtx operands
[])
3794 rtx bytes_rtx
= operands
[2];
3795 rtx align_rtx
= operands
[3];
3796 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3797 HOST_WIDE_INT bytes
= orig_bytes
;
3798 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3799 HOST_WIDE_INT dst_align
= src_align
;
3800 rtx orig_src
= operands
[1];
3801 rtx orig_dst
= operands
[0];
3802 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3804 unsigned int i
, words
, ofs
, nregs
= 0;
3806 if (orig_bytes
<= 0)
3808 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3811 /* Look for additional alignment information from recorded register info. */
3813 tmp
= XEXP (orig_src
, 0);
3815 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3816 else if (GET_CODE (tmp
) == PLUS
3817 && REG_P (XEXP (tmp
, 0))
3818 && CONST_INT_P (XEXP (tmp
, 1)))
3820 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3821 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3825 if (a
>= 64 && c
% 8 == 0)
3827 else if (a
>= 32 && c
% 4 == 0)
3829 else if (a
>= 16 && c
% 2 == 0)
3834 tmp
= XEXP (orig_dst
, 0);
3836 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3837 else if (GET_CODE (tmp
) == PLUS
3838 && REG_P (XEXP (tmp
, 0))
3839 && CONST_INT_P (XEXP (tmp
, 1)))
3841 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3842 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3846 if (a
>= 64 && c
% 8 == 0)
3848 else if (a
>= 32 && c
% 4 == 0)
3850 else if (a
>= 16 && c
% 2 == 0)
3856 if (src_align
>= 64 && bytes
>= 8)
3860 for (i
= 0; i
< words
; ++i
)
3861 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3863 for (i
= 0; i
< words
; ++i
)
3864 emit_move_insn (data_regs
[nregs
+ i
],
3865 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
3872 if (src_align
>= 32 && bytes
>= 4)
3876 for (i
= 0; i
< words
; ++i
)
3877 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
3879 for (i
= 0; i
< words
; ++i
)
3880 emit_move_insn (data_regs
[nregs
+ i
],
3881 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
3892 for (i
= 0; i
< words
+1; ++i
)
3893 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3895 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
3903 if (! TARGET_BWX
&& bytes
>= 4)
3905 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
3906 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
3913 if (src_align
>= 16)
3916 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3917 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
3920 } while (bytes
>= 2);
3922 else if (! TARGET_BWX
)
3924 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3925 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
3933 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
3934 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
3939 gcc_assert (nregs
<= ARRAY_SIZE (data_regs
));
3941 /* Now save it back out again. */
3945 /* Write out the data in whatever chunks reading the source allowed. */
3946 if (dst_align
>= 64)
3948 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3950 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
3957 if (dst_align
>= 32)
3959 /* If the source has remaining DImode regs, write them out in
3961 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3963 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
3964 NULL_RTX
, 1, OPTAB_WIDEN
);
3966 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3967 gen_lowpart (SImode
, data_regs
[i
]));
3968 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
3969 gen_lowpart (SImode
, tmp
));
3974 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3976 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3983 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3985 /* Write out a remaining block of words using unaligned methods. */
3987 for (words
= 1; i
+ words
< nregs
; words
++)
3988 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
3992 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
3994 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
4001 /* Due to the above, this won't be aligned. */
4002 /* ??? If we have more than one of these, consider constructing full
4003 words in registers and using alpha_expand_unaligned_store_words. */
4004 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4006 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
4011 if (dst_align
>= 16)
4012 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4014 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4019 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4021 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4026 /* The remainder must be byte copies. */
4029 gcc_assert (GET_MODE (data_regs
[i
]) == QImode
);
4030 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4039 alpha_expand_block_clear (rtx operands
[])
4041 rtx bytes_rtx
= operands
[1];
4042 rtx align_rtx
= operands
[3];
4043 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4044 HOST_WIDE_INT bytes
= orig_bytes
;
4045 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4046 HOST_WIDE_INT alignofs
= 0;
4047 rtx orig_dst
= operands
[0];
4049 int i
, words
, ofs
= 0;
4051 if (orig_bytes
<= 0)
4053 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4056 /* Look for stricter alignment. */
4057 tmp
= XEXP (orig_dst
, 0);
4059 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4060 else if (GET_CODE (tmp
) == PLUS
4061 && REG_P (XEXP (tmp
, 0))
4062 && CONST_INT_P (XEXP (tmp
, 1)))
4064 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4065 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4070 align
= a
, alignofs
= 8 - c
% 8;
4072 align
= a
, alignofs
= 4 - c
% 4;
4074 align
= a
, alignofs
= 2 - c
% 2;
4078 /* Handle an unaligned prefix first. */
4082 /* Given that alignofs is bounded by align, the only time BWX could
4083 generate three stores is for a 7 byte fill. Prefer two individual
4084 stores over a load/mask/store sequence. */
4085 if ((!TARGET_BWX
|| alignofs
== 7)
4087 && !(alignofs
== 4 && bytes
>= 4))
4089 machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4090 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4094 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4095 set_mem_alias_set (mem
, 0);
4097 mask
= ~(HOST_WIDE_INT_M1U
<< (inv_alignofs
* 8));
4098 if (bytes
< alignofs
)
4100 mask
|= HOST_WIDE_INT_M1U
<< ((inv_alignofs
+ bytes
) * 8);
4111 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4112 NULL_RTX
, 1, OPTAB_WIDEN
);
4114 emit_move_insn (mem
, tmp
);
4117 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4119 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4124 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4126 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4131 if (alignofs
== 4 && bytes
>= 4)
4133 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4139 /* If we've not used the extra lead alignment information by now,
4140 we won't be able to. Downgrade align to match what's left over. */
4143 alignofs
= alignofs
& -alignofs
;
4144 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4148 /* Handle a block of contiguous long-words. */
4150 if (align
>= 64 && bytes
>= 8)
4154 for (i
= 0; i
< words
; ++i
)
4155 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4162 /* If the block is large and appropriately aligned, emit a single
4163 store followed by a sequence of stq_u insns. */
4165 if (align
>= 32 && bytes
> 16)
4169 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4173 orig_dsta
= XEXP (orig_dst
, 0);
4174 if (GET_CODE (orig_dsta
) == LO_SUM
)
4175 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4178 for (i
= 0; i
< words
; ++i
)
4181 = change_address (orig_dst
, DImode
,
4182 gen_rtx_AND (DImode
,
4183 plus_constant (DImode
, orig_dsta
,
4186 set_mem_alias_set (mem
, 0);
4187 emit_move_insn (mem
, const0_rtx
);
4190 /* Depending on the alignment, the first stq_u may have overlapped
4191 with the initial stl, which means that the last stq_u didn't
4192 write as much as it would appear. Leave those questionable bytes
4194 bytes
-= words
* 8 - 4;
4195 ofs
+= words
* 8 - 4;
4198 /* Handle a smaller block of aligned words. */
4200 if ((align
>= 64 && bytes
== 4)
4201 || (align
== 32 && bytes
>= 4))
4205 for (i
= 0; i
< words
; ++i
)
4206 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4213 /* An unaligned block uses stq_u stores for as many as possible. */
4219 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4225 /* Next clean up any trailing pieces. */
4227 /* Count the number of bits in BYTES for which aligned stores could
4230 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4234 /* If we have appropriate alignment (and it wouldn't take too many
4235 instructions otherwise), mask out the bytes we need. */
4236 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4243 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4244 set_mem_alias_set (mem
, 0);
4246 mask
= HOST_WIDE_INT_M1U
<< (bytes
* 8);
4248 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4249 NULL_RTX
, 1, OPTAB_WIDEN
);
4251 emit_move_insn (mem
, tmp
);
4254 else if (align
>= 32 && bytes
< 4)
4259 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4260 set_mem_alias_set (mem
, 0);
4262 mask
= HOST_WIDE_INT_M1U
<< (bytes
* 8);
4264 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4265 NULL_RTX
, 1, OPTAB_WIDEN
);
4267 emit_move_insn (mem
, tmp
);
4272 if (!TARGET_BWX
&& bytes
>= 4)
4274 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4284 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4288 } while (bytes
>= 2);
4290 else if (! TARGET_BWX
)
4292 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4300 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4308 /* Returns a mask so that zap(x, value) == x & mask. */
4311 alpha_expand_zap_mask (HOST_WIDE_INT value
)
4315 HOST_WIDE_INT mask
= 0;
4317 for (i
= 7; i
>= 0; --i
)
4320 if (!((value
>> i
) & 1))
4324 result
= gen_int_mode (mask
, DImode
);
4329 alpha_expand_builtin_vector_binop (rtx (*gen
) (rtx
, rtx
, rtx
),
4331 rtx op0
, rtx op1
, rtx op2
)
4333 op0
= gen_lowpart (mode
, op0
);
4335 if (op1
== const0_rtx
)
4336 op1
= CONST0_RTX (mode
);
4338 op1
= gen_lowpart (mode
, op1
);
4340 if (op2
== const0_rtx
)
4341 op2
= CONST0_RTX (mode
);
4343 op2
= gen_lowpart (mode
, op2
);
4345 emit_insn ((*gen
) (op0
, op1
, op2
));
4348 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4349 COND is true. Mark the jump as unlikely to be taken. */
4352 emit_unlikely_jump (rtx cond
, rtx label
)
4354 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
4355 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
4356 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
4359 /* A subroutine of the atomic operation splitters. Emit a load-locked
4360 instruction in MODE. */
4363 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
4365 rtx (*fn
) (rtx
, rtx
) = NULL
;
4367 fn
= gen_load_locked_si
;
4368 else if (mode
== DImode
)
4369 fn
= gen_load_locked_di
;
4370 emit_insn (fn (reg
, mem
));
4373 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4374 instruction in MODE. */
4377 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
4379 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
4381 fn
= gen_store_conditional_si
;
4382 else if (mode
== DImode
)
4383 fn
= gen_store_conditional_di
;
4384 emit_insn (fn (res
, mem
, val
));
4387 /* Subroutines of the atomic operation splitters. Emit barriers
4388 as needed for the memory MODEL. */
4391 alpha_pre_atomic_barrier (enum memmodel model
)
4393 if (need_atomic_barrier_p (model
, true))
4394 emit_insn (gen_memory_barrier ());
4398 alpha_post_atomic_barrier (enum memmodel model
)
4400 if (need_atomic_barrier_p (model
, false))
4401 emit_insn (gen_memory_barrier ());
4404 /* A subroutine of the atomic operation splitters. Emit an insxl
4405 instruction in MODE. */
4408 emit_insxl (machine_mode mode
, rtx op1
, rtx op2
)
4410 rtx ret
= gen_reg_rtx (DImode
);
4411 rtx (*fn
) (rtx
, rtx
, rtx
);
4431 op1
= force_reg (mode
, op1
);
4432 emit_insn (fn (ret
, op1
, op2
));
4437 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4438 to perform. MEM is the memory on which to operate. VAL is the second
4439 operand of the binary operator. BEFORE and AFTER are optional locations to
4440 return the value of MEM either before of after the operation. SCRATCH is
4441 a scratch register. */
4444 alpha_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
, rtx before
,
4445 rtx after
, rtx scratch
, enum memmodel model
)
4447 machine_mode mode
= GET_MODE (mem
);
4448 rtx label
, x
, cond
= gen_rtx_REG (DImode
, REGNO (scratch
));
4450 alpha_pre_atomic_barrier (model
);
4452 label
= gen_label_rtx ();
4454 label
= gen_rtx_LABEL_REF (DImode
, label
);
4458 emit_load_locked (mode
, before
, mem
);
4462 x
= gen_rtx_AND (mode
, before
, val
);
4463 emit_insn (gen_rtx_SET (val
, x
));
4465 x
= gen_rtx_NOT (mode
, val
);
4468 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
4470 emit_insn (gen_rtx_SET (after
, copy_rtx (x
)));
4471 emit_insn (gen_rtx_SET (scratch
, x
));
4473 emit_store_conditional (mode
, cond
, mem
, scratch
);
4475 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4476 emit_unlikely_jump (x
, label
);
4478 alpha_post_atomic_barrier (model
);
4481 /* Expand a compare and swap operation. */
4484 alpha_split_compare_and_swap (rtx operands
[])
4486 rtx cond
, retval
, mem
, oldval
, newval
;
4488 enum memmodel mod_s
, mod_f
;
4490 rtx label1
, label2
, x
;
4493 retval
= operands
[1];
4495 oldval
= operands
[3];
4496 newval
= operands
[4];
4497 is_weak
= (operands
[5] != const0_rtx
);
4498 mod_s
= memmodel_from_int (INTVAL (operands
[6]));
4499 mod_f
= memmodel_from_int (INTVAL (operands
[7]));
4500 mode
= GET_MODE (mem
);
4502 alpha_pre_atomic_barrier (mod_s
);
4507 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4508 emit_label (XEXP (label1
, 0));
4510 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4512 emit_load_locked (mode
, retval
, mem
);
4514 x
= gen_lowpart (DImode
, retval
);
4515 if (oldval
== const0_rtx
)
4517 emit_move_insn (cond
, const0_rtx
);
4518 x
= gen_rtx_NE (DImode
, x
, const0_rtx
);
4522 x
= gen_rtx_EQ (DImode
, x
, oldval
);
4523 emit_insn (gen_rtx_SET (cond
, x
));
4524 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4526 emit_unlikely_jump (x
, label2
);
4528 emit_move_insn (cond
, newval
);
4529 emit_store_conditional (mode
, cond
, mem
, gen_lowpart (mode
, cond
));
4533 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4534 emit_unlikely_jump (x
, label1
);
4537 if (!is_mm_relaxed (mod_f
))
4538 emit_label (XEXP (label2
, 0));
4540 alpha_post_atomic_barrier (mod_s
);
4542 if (is_mm_relaxed (mod_f
))
4543 emit_label (XEXP (label2
, 0));
4547 alpha_expand_compare_and_swap_12 (rtx operands
[])
4549 rtx cond
, dst
, mem
, oldval
, newval
, is_weak
, mod_s
, mod_f
;
4551 rtx addr
, align
, wdst
;
4552 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
4557 oldval
= operands
[3];
4558 newval
= operands
[4];
4559 is_weak
= operands
[5];
4560 mod_s
= operands
[6];
4561 mod_f
= operands
[7];
4562 mode
= GET_MODE (mem
);
4564 /* We forced the address into a register via mem_noofs_operand. */
4565 addr
= XEXP (mem
, 0);
4566 gcc_assert (register_operand (addr
, DImode
));
4568 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4569 NULL_RTX
, 1, OPTAB_DIRECT
);
4571 oldval
= convert_modes (DImode
, mode
, oldval
, 1);
4573 if (newval
!= const0_rtx
)
4574 newval
= emit_insxl (mode
, newval
, addr
);
4576 wdst
= gen_reg_rtx (DImode
);
4578 gen
= gen_atomic_compare_and_swapqi_1
;
4580 gen
= gen_atomic_compare_and_swaphi_1
;
4581 emit_insn (gen (cond
, wdst
, mem
, oldval
, newval
, align
,
4582 is_weak
, mod_s
, mod_f
));
4584 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4588 alpha_split_compare_and_swap_12 (rtx operands
[])
4590 rtx cond
, dest
, orig_mem
, oldval
, newval
, align
, scratch
;
4593 enum memmodel mod_s
, mod_f
;
4594 rtx label1
, label2
, mem
, addr
, width
, mask
, x
;
4598 orig_mem
= operands
[2];
4599 oldval
= operands
[3];
4600 newval
= operands
[4];
4601 align
= operands
[5];
4602 is_weak
= (operands
[6] != const0_rtx
);
4603 mod_s
= memmodel_from_int (INTVAL (operands
[7]));
4604 mod_f
= memmodel_from_int (INTVAL (operands
[8]));
4605 scratch
= operands
[9];
4606 mode
= GET_MODE (orig_mem
);
4607 addr
= XEXP (orig_mem
, 0);
4609 mem
= gen_rtx_MEM (DImode
, align
);
4610 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
4611 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
4612 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
4614 alpha_pre_atomic_barrier (mod_s
);
4619 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4620 emit_label (XEXP (label1
, 0));
4622 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4624 emit_load_locked (DImode
, scratch
, mem
);
4626 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4627 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4628 emit_insn (gen_extxl (dest
, scratch
, width
, addr
));
4630 if (oldval
== const0_rtx
)
4632 emit_move_insn (cond
, const0_rtx
);
4633 x
= gen_rtx_NE (DImode
, dest
, const0_rtx
);
4637 x
= gen_rtx_EQ (DImode
, dest
, oldval
);
4638 emit_insn (gen_rtx_SET (cond
, x
));
4639 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4641 emit_unlikely_jump (x
, label2
);
4643 emit_insn (gen_mskxl (cond
, scratch
, mask
, addr
));
4645 if (newval
!= const0_rtx
)
4646 emit_insn (gen_iordi3 (cond
, cond
, newval
));
4648 emit_store_conditional (DImode
, cond
, mem
, cond
);
4652 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4653 emit_unlikely_jump (x
, label1
);
4656 if (!is_mm_relaxed (mod_f
))
4657 emit_label (XEXP (label2
, 0));
4659 alpha_post_atomic_barrier (mod_s
);
4661 if (is_mm_relaxed (mod_f
))
4662 emit_label (XEXP (label2
, 0));
4665 /* Expand an atomic exchange operation. */
4668 alpha_split_atomic_exchange (rtx operands
[])
4670 rtx retval
, mem
, val
, scratch
;
4671 enum memmodel model
;
4675 retval
= operands
[0];
4678 model
= (enum memmodel
) INTVAL (operands
[3]);
4679 scratch
= operands
[4];
4680 mode
= GET_MODE (mem
);
4681 cond
= gen_lowpart (DImode
, scratch
);
4683 alpha_pre_atomic_barrier (model
);
4685 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4686 emit_label (XEXP (label
, 0));
4688 emit_load_locked (mode
, retval
, mem
);
4689 emit_move_insn (scratch
, val
);
4690 emit_store_conditional (mode
, cond
, mem
, scratch
);
4692 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4693 emit_unlikely_jump (x
, label
);
4695 alpha_post_atomic_barrier (model
);
4699 alpha_expand_atomic_exchange_12 (rtx operands
[])
4701 rtx dst
, mem
, val
, model
;
4703 rtx addr
, align
, wdst
;
4704 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
, rtx
);
4709 model
= operands
[3];
4710 mode
= GET_MODE (mem
);
4712 /* We forced the address into a register via mem_noofs_operand. */
4713 addr
= XEXP (mem
, 0);
4714 gcc_assert (register_operand (addr
, DImode
));
4716 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4717 NULL_RTX
, 1, OPTAB_DIRECT
);
4719 /* Insert val into the correct byte location within the word. */
4720 if (val
!= const0_rtx
)
4721 val
= emit_insxl (mode
, val
, addr
);
4723 wdst
= gen_reg_rtx (DImode
);
4725 gen
= gen_atomic_exchangeqi_1
;
4727 gen
= gen_atomic_exchangehi_1
;
4728 emit_insn (gen (wdst
, mem
, val
, align
, model
));
4730 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4734 alpha_split_atomic_exchange_12 (rtx operands
[])
4736 rtx dest
, orig_mem
, addr
, val
, align
, scratch
;
4737 rtx label
, mem
, width
, mask
, x
;
4739 enum memmodel model
;
4742 orig_mem
= operands
[1];
4744 align
= operands
[3];
4745 model
= (enum memmodel
) INTVAL (operands
[4]);
4746 scratch
= operands
[5];
4747 mode
= GET_MODE (orig_mem
);
4748 addr
= XEXP (orig_mem
, 0);
4750 mem
= gen_rtx_MEM (DImode
, align
);
4751 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
4752 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
4753 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
4755 alpha_pre_atomic_barrier (model
);
4757 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4758 emit_label (XEXP (label
, 0));
4760 emit_load_locked (DImode
, scratch
, mem
);
4762 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4763 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4764 emit_insn (gen_extxl (dest
, scratch
, width
, addr
));
4765 emit_insn (gen_mskxl (scratch
, scratch
, mask
, addr
));
4766 if (val
!= const0_rtx
)
4767 emit_insn (gen_iordi3 (scratch
, scratch
, val
));
4769 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4771 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4772 emit_unlikely_jump (x
, label
);
4774 alpha_post_atomic_barrier (model
);
4777 /* Adjust the cost of a scheduling dependency. Return the new cost of
4778 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4781 alpha_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
4784 enum attr_type dep_insn_type
;
4786 /* If the dependence is an anti-dependence, there is no cost. For an
4787 output dependence, there is sometimes a cost, but it doesn't seem
4788 worth handling those few cases. */
4792 /* If we can't recognize the insns, we can't really do anything. */
4793 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4796 dep_insn_type
= get_attr_type (dep_insn
);
4798 /* Bring in the user-defined memory latency. */
4799 if (dep_insn_type
== TYPE_ILD
4800 || dep_insn_type
== TYPE_FLD
4801 || dep_insn_type
== TYPE_LDSYM
)
4802 cost
+= alpha_memory_latency
-1;
4804 /* Everything else handled in DFA bypasses now. */
4809 /* The number of instructions that can be issued per cycle. */
4812 alpha_issue_rate (void)
4814 return (alpha_tune
== PROCESSOR_EV4
? 2 : 4);
4817 /* How many alternative schedules to try. This should be as wide as the
4818 scheduling freedom in the DFA, but no wider. Making this value too
4819 large results extra work for the scheduler.
4821 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4822 alternative schedules. For EV5, we can choose between E0/E1 and
4823 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4826 alpha_multipass_dfa_lookahead (void)
4828 return (alpha_tune
== PROCESSOR_EV6
? 4 : 2);
4831 /* Machine-specific function data. */
4833 struct GTY(()) alpha_links
;
4835 struct GTY(()) machine_function
4837 /* For flag_reorder_blocks_and_partition. */
4840 /* For VMS condition handlers. */
4841 bool uses_condition_handler
;
4843 /* Linkage entries. */
4844 hash_map
<nofree_string_hash
, alpha_links
*> *links
;
4847 /* How to allocate a 'struct machine_function'. */
4849 static struct machine_function
*
4850 alpha_init_machine_status (void)
4852 return ggc_cleared_alloc
<machine_function
> ();
4855 /* Support for frame based VMS condition handlers. */
4857 /* A VMS condition handler may be established for a function with a call to
4858 __builtin_establish_vms_condition_handler, and cancelled with a call to
4859 __builtin_revert_vms_condition_handler.
4861 The VMS Condition Handling Facility knows about the existence of a handler
4862 from the procedure descriptor .handler field. As the VMS native compilers,
4863 we store the user specified handler's address at a fixed location in the
4864 stack frame and point the procedure descriptor at a common wrapper which
4865 fetches the real handler's address and issues an indirect call.
4867 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4869 We force the procedure kind to PT_STACK, and the fixed frame location is
4870 fp+8, just before the register save area. We use the handler_data field in
4871 the procedure descriptor to state the fp offset at which the installed
4872 handler address can be found. */
4874 #define VMS_COND_HANDLER_FP_OFFSET 8
4876 /* Expand code to store the currently installed user VMS condition handler
4877 into TARGET and install HANDLER as the new condition handler. */
4880 alpha_expand_builtin_establish_vms_condition_handler (rtx target
, rtx handler
)
4882 rtx handler_slot_address
= plus_constant (Pmode
, hard_frame_pointer_rtx
,
4883 VMS_COND_HANDLER_FP_OFFSET
);
4886 = gen_rtx_MEM (DImode
, handler_slot_address
);
4888 emit_move_insn (target
, handler_slot
);
4889 emit_move_insn (handler_slot
, handler
);
4891 /* Notify the start/prologue/epilogue emitters that the condition handler
4892 slot is needed. In addition to reserving the slot space, this will force
4893 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4894 use above is correct. */
4895 cfun
->machine
->uses_condition_handler
= true;
4898 /* Expand code to store the current VMS condition handler into TARGET and
4902 alpha_expand_builtin_revert_vms_condition_handler (rtx target
)
4904 /* We implement this by establishing a null condition handler, with the tiny
4905 side effect of setting uses_condition_handler. This is a little bit
4906 pessimistic if no actual builtin_establish call is ever issued, which is
4907 not a real problem and expected never to happen anyway. */
4909 alpha_expand_builtin_establish_vms_condition_handler (target
, const0_rtx
);
4912 /* Functions to save and restore alpha_return_addr_rtx. */
4914 /* Start the ball rolling with RETURN_ADDR_RTX. */
4917 alpha_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4922 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4925 /* Return or create a memory slot containing the gp value for the current
4926 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4929 alpha_gp_save_rtx (void)
4932 rtx m
= cfun
->machine
->gp_save_rtx
;
4938 m
= assign_stack_local (DImode
, UNITS_PER_WORD
, BITS_PER_WORD
);
4939 m
= validize_mem (m
);
4940 emit_move_insn (m
, pic_offset_table_rtx
);
4945 /* We used to simply emit the sequence after entry_of_function.
4946 However this breaks the CFG if the first instruction in the
4947 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4948 label. Emit the sequence properly on the edge. We are only
4949 invoked from dw2_build_landing_pads and finish_eh_generation
4950 will call commit_edge_insertions thanks to a kludge. */
4951 insert_insn_on_edge (seq
,
4952 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
4954 cfun
->machine
->gp_save_rtx
= m
;
4961 alpha_instantiate_decls (void)
4963 if (cfun
->machine
->gp_save_rtx
!= NULL_RTX
)
4964 instantiate_decl_rtl (cfun
->machine
->gp_save_rtx
);
4968 alpha_ra_ever_killed (void)
4972 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4973 return (int)df_regs_ever_live_p (REG_RA
);
4975 push_topmost_sequence ();
4977 pop_topmost_sequence ();
4979 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL
);
4983 /* Return the trap mode suffix applicable to the current
4984 instruction, or NULL. */
4987 get_trap_mode_suffix (void)
4989 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4993 case TRAP_SUFFIX_NONE
:
4996 case TRAP_SUFFIX_SU
:
4997 if (alpha_fptm
>= ALPHA_FPTM_SU
)
5001 case TRAP_SUFFIX_SUI
:
5002 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
5006 case TRAP_SUFFIX_V_SV
:
5014 case ALPHA_FPTM_SUI
:
5020 case TRAP_SUFFIX_V_SV_SVI
:
5029 case ALPHA_FPTM_SUI
:
5036 case TRAP_SUFFIX_U_SU_SUI
:
5045 case ALPHA_FPTM_SUI
:
5058 /* Return the rounding mode suffix applicable to the current
5059 instruction, or NULL. */
5062 get_round_mode_suffix (void)
5064 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
5068 case ROUND_SUFFIX_NONE
:
5070 case ROUND_SUFFIX_NORMAL
:
5073 case ALPHA_FPRM_NORM
:
5075 case ALPHA_FPRM_MINF
:
5077 case ALPHA_FPRM_CHOP
:
5079 case ALPHA_FPRM_DYN
:
5086 case ROUND_SUFFIX_C
:
5095 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5098 alpha_print_operand_punct_valid_p (unsigned char code
)
5100 return (code
== '/' || code
== ',' || code
== '-' || code
== '~'
5101 || code
== '#' || code
== '*' || code
== '&');
5104 /* Implement TARGET_PRINT_OPERAND. The alpha-specific
5105 operand codes are documented below. */
5108 alpha_print_operand (FILE *file
, rtx x
, int code
)
5115 /* Print the assembler name of the current function. */
5116 assemble_name (file
, alpha_fnname
);
5120 if (const char *name
= get_some_local_dynamic_name ())
5121 assemble_name (file
, name
);
5123 output_operand_lossage ("'%%&' used without any "
5124 "local dynamic TLS references");
5128 /* Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX
5129 attributes are examined to determine what is appropriate. */
5131 const char *trap
= get_trap_mode_suffix ();
5132 const char *round
= get_round_mode_suffix ();
5135 fprintf (file
, "/%s%s", (trap
? trap
: ""), (round
? round
: ""));
5140 /* Generates single precision suffix for floating point
5141 instructions (s for IEEE, f for VAX). */
5142 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5146 /* Generates double precision suffix for floating point
5147 instructions (t for IEEE, g for VAX). */
5148 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5152 if (alpha_this_literal_sequence_number
== 0)
5153 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5154 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5158 if (alpha_this_gpdisp_sequence_number
== 0)
5159 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5160 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5167 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5169 x
= XVECEXP (x
, 0, 0);
5170 lituse
= "lituse_tlsgd";
5172 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5174 x
= XVECEXP (x
, 0, 0);
5175 lituse
= "lituse_tlsldm";
5177 else if (CONST_INT_P (x
))
5178 lituse
= "lituse_jsr";
5181 output_operand_lossage ("invalid %%J value");
5185 if (x
!= const0_rtx
)
5186 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5194 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5195 lituse
= "lituse_jsrdirect";
5197 lituse
= "lituse_jsr";
5200 gcc_assert (INTVAL (x
) != 0);
5201 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5205 /* If this operand is the constant zero, write it as "$31". */
5207 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5208 else if (x
== CONST0_RTX (GET_MODE (x
)))
5209 fprintf (file
, "$31");
5211 output_operand_lossage ("invalid %%r value");
5215 /* Similar, but for floating-point. */
5217 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5218 else if (x
== CONST0_RTX (GET_MODE (x
)))
5219 fprintf (file
, "$f31");
5221 output_operand_lossage ("invalid %%R value");
5225 /* Write the 1's complement of a constant. */
5226 if (!CONST_INT_P (x
))
5227 output_operand_lossage ("invalid %%N value");
5229 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5233 /* Write 1 << C, for a constant C. */
5234 if (!CONST_INT_P (x
))
5235 output_operand_lossage ("invalid %%P value");
5237 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, HOST_WIDE_INT_1
<< INTVAL (x
));
5241 /* Write the high-order 16 bits of a constant, sign-extended. */
5242 if (!CONST_INT_P (x
))
5243 output_operand_lossage ("invalid %%h value");
5245 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5249 /* Write the low-order 16 bits of a constant, sign-extended. */
5250 if (!CONST_INT_P (x
))
5251 output_operand_lossage ("invalid %%L value");
5253 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5254 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5258 /* Write mask for ZAP insn. */
5259 if (CONST_INT_P (x
))
5261 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5263 for (i
= 0; i
< 8; i
++, value
>>= 8)
5267 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5270 output_operand_lossage ("invalid %%m value");
5274 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5275 if (!mode_width_operand (x
, VOIDmode
))
5276 output_operand_lossage ("invalid %%M value");
5278 fprintf (file
, "%s",
5279 (INTVAL (x
) == 8 ? "b"
5280 : INTVAL (x
) == 16 ? "w"
5281 : INTVAL (x
) == 32 ? "l"
5286 /* Similar, except do it from the mask. */
5287 if (CONST_INT_P (x
))
5289 HOST_WIDE_INT value
= INTVAL (x
);
5296 if (value
== 0xffff)
5301 if (value
== 0xffffffff)
5313 output_operand_lossage ("invalid %%U value");
5317 /* Write the constant value divided by 8. */
5318 if (!CONST_INT_P (x
)
5319 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5320 || (INTVAL (x
) & 7) != 0)
5321 output_operand_lossage ("invalid %%s value");
5323 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) / 8);
5326 case 'C': case 'D': case 'c': case 'd':
5327 /* Write out comparison name. */
5329 enum rtx_code c
= GET_CODE (x
);
5331 if (!COMPARISON_P (x
))
5332 output_operand_lossage ("invalid %%C value");
5334 else if (code
== 'D')
5335 c
= reverse_condition (c
);
5336 else if (code
== 'c')
5337 c
= swap_condition (c
);
5338 else if (code
== 'd')
5339 c
= swap_condition (reverse_condition (c
));
5342 fprintf (file
, "ule");
5344 fprintf (file
, "ult");
5345 else if (c
== UNORDERED
)
5346 fprintf (file
, "un");
5348 fprintf (file
, "%s", GET_RTX_NAME (c
));
5353 /* Write the divide or modulus operator. */
5354 switch (GET_CODE (x
))
5357 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5360 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5363 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5366 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5369 output_operand_lossage ("invalid %%E value");
5375 /* Write "_u" for unaligned access. */
5376 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
5377 fprintf (file
, "_u");
5382 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5384 output_address (GET_MODE (x
), XEXP (x
, 0));
5385 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5387 switch (XINT (XEXP (x
, 0), 1))
5391 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
5394 output_operand_lossage ("unknown relocation unspec");
5399 output_addr_const (file
, x
);
5403 output_operand_lossage ("invalid %%xn code");
5407 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
5410 alpha_print_operand_address (FILE *file
, machine_mode
/*mode*/, rtx addr
)
5413 HOST_WIDE_INT offset
= 0;
5415 if (GET_CODE (addr
) == AND
)
5416 addr
= XEXP (addr
, 0);
5418 if (GET_CODE (addr
) == PLUS
5419 && CONST_INT_P (XEXP (addr
, 1)))
5421 offset
= INTVAL (XEXP (addr
, 1));
5422 addr
= XEXP (addr
, 0);
5425 if (GET_CODE (addr
) == LO_SUM
)
5427 const char *reloc16
, *reloclo
;
5428 rtx op1
= XEXP (addr
, 1);
5430 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
5432 op1
= XEXP (op1
, 0);
5433 switch (XINT (op1
, 1))
5437 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
5441 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
5444 output_operand_lossage ("unknown relocation unspec");
5448 output_addr_const (file
, XVECEXP (op1
, 0, 0));
5453 reloclo
= "gprellow";
5454 output_addr_const (file
, op1
);
5458 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
5460 addr
= XEXP (addr
, 0);
5461 switch (GET_CODE (addr
))
5464 basereg
= REGNO (addr
);
5468 basereg
= subreg_regno (addr
);
5475 fprintf (file
, "($%d)\t\t!%s", basereg
,
5476 (basereg
== 29 ? reloc16
: reloclo
));
5480 switch (GET_CODE (addr
))
5483 basereg
= REGNO (addr
);
5487 basereg
= subreg_regno (addr
);
5491 offset
= INTVAL (addr
);
5495 gcc_assert(TARGET_ABI_OPEN_VMS
|| this_is_asm_operands
);
5496 fprintf (file
, "%s", XSTR (addr
, 0));
5500 gcc_assert(TARGET_ABI_OPEN_VMS
|| this_is_asm_operands
);
5501 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
5502 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
);
5503 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
5504 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
5505 INTVAL (XEXP (XEXP (addr
, 0), 1)));
5509 output_operand_lossage ("invalid operand address");
5513 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
5516 /* Emit RTL insns to initialize the variable parts of a trampoline at
5517 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5518 for the static chain value for the function. */
5521 alpha_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
5523 rtx fnaddr
, mem
, word1
, word2
;
5525 fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
5527 #ifdef POINTERS_EXTEND_UNSIGNED
5528 fnaddr
= convert_memory_address (Pmode
, fnaddr
);
5529 chain_value
= convert_memory_address (Pmode
, chain_value
);
5532 if (TARGET_ABI_OPEN_VMS
)
5537 /* Construct the name of the trampoline entry point. */
5538 fnname
= XSTR (fnaddr
, 0);
5539 trname
= (char *) alloca (strlen (fnname
) + 5);
5540 strcpy (trname
, fnname
);
5541 strcat (trname
, "..tr");
5542 fnname
= ggc_alloc_string (trname
, strlen (trname
) + 1);
5543 word2
= gen_rtx_SYMBOL_REF (Pmode
, fnname
);
5545 /* Trampoline (or "bounded") procedure descriptor is constructed from
5546 the function's procedure descriptor with certain fields zeroed IAW
5547 the VMS calling standard. This is stored in the first quadword. */
5548 word1
= force_reg (DImode
, gen_const_mem (DImode
, fnaddr
));
5549 word1
= expand_and (DImode
, word1
,
5550 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5555 /* These 4 instructions are:
5560 We don't bother setting the HINT field of the jump; the nop
5561 is merely there for padding. */
5562 word1
= GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5563 word2
= GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5566 /* Store the first two words, as computed above. */
5567 mem
= adjust_address (m_tramp
, DImode
, 0);
5568 emit_move_insn (mem
, word1
);
5569 mem
= adjust_address (m_tramp
, DImode
, 8);
5570 emit_move_insn (mem
, word2
);
5572 /* Store function address and static chain value. */
5573 mem
= adjust_address (m_tramp
, Pmode
, 16);
5574 emit_move_insn (mem
, fnaddr
);
5575 mem
= adjust_address (m_tramp
, Pmode
, 24);
5576 emit_move_insn (mem
, chain_value
);
5580 emit_insn (gen_imb ());
5581 #ifdef HAVE_ENABLE_EXECUTE_STACK
5582 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5583 LCT_NORMAL
, VOIDmode
, XEXP (m_tramp
, 0), Pmode
);
5588 /* Determine where to put an argument to a function.
5589 Value is zero to push the argument on the stack,
5590 or a hard register in which to store the argument.
5592 MODE is the argument's machine mode.
5593 TYPE is the data type of the argument (as a tree).
5594 This is null for libcalls where that information may
5596 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5597 the preceding args and about the function being called.
5598 NAMED is nonzero if this argument is a named parameter
5599 (otherwise it is an extra parameter matching an ellipsis).
5601 On Alpha the first 6 words of args are normally in registers
5602 and the rest are pushed. */
5605 alpha_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
5606 const_tree type
, bool named ATTRIBUTE_UNUSED
)
5608 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5612 /* Don't get confused and pass small structures in FP registers. */
5613 if (type
&& AGGREGATE_TYPE_P (type
))
5617 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5619 gcc_checking_assert (!COMPLEX_MODE_P (mode
));
5621 /* Set up defaults for FP operands passed in FP registers, and
5622 integral operands passed in integer registers. */
5623 if (TARGET_FPREGS
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5629 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5630 the two platforms, so we can't avoid conditional compilation. */
5631 #if TARGET_ABI_OPEN_VMS
5633 if (mode
== VOIDmode
)
5634 return alpha_arg_info_reg_val (*cum
);
5636 num_args
= cum
->num_args
;
5638 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5641 #elif TARGET_ABI_OSF
5647 /* VOID is passed as a special flag for "last argument". */
5648 if (type
== void_type_node
)
5650 else if (targetm
.calls
.must_pass_in_stack (mode
, type
))
5654 #error Unhandled ABI
5657 return gen_rtx_REG (mode
, num_args
+ basereg
);
5660 /* Update the data in CUM to advance over an argument
5661 of mode MODE and data type TYPE.
5662 (TYPE is null for libcalls where that information may not be available.) */
5665 alpha_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
5666 const_tree type
, bool named ATTRIBUTE_UNUSED
)
5668 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
5669 bool onstack
= targetm
.calls
.must_pass_in_stack (mode
, type
);
5670 int increment
= onstack
? 6 : ALPHA_ARG_SIZE (mode
, type
);
5675 if (!onstack
&& cum
->num_args
< 6)
5676 cum
->atypes
[cum
->num_args
] = alpha_arg_type (mode
);
5677 cum
->num_args
+= increment
;
5682 alpha_arg_partial_bytes (cumulative_args_t cum_v
,
5683 machine_mode mode ATTRIBUTE_UNUSED
,
5684 tree type ATTRIBUTE_UNUSED
,
5685 bool named ATTRIBUTE_UNUSED
)
5688 CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
= get_cumulative_args (cum_v
);
5690 #if TARGET_ABI_OPEN_VMS
5691 if (cum
->num_args
< 6
5692 && 6 < cum
->num_args
+ ALPHA_ARG_SIZE (mode
, type
))
5693 words
= 6 - cum
->num_args
;
5694 #elif TARGET_ABI_OSF
5695 if (*cum
< 6 && 6 < *cum
+ ALPHA_ARG_SIZE (mode
, type
))
5698 #error Unhandled ABI
5701 return words
* UNITS_PER_WORD
;
5705 /* Return true if TYPE must be returned in memory, instead of in registers. */
5708 alpha_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
5710 machine_mode mode
= VOIDmode
;
5715 mode
= TYPE_MODE (type
);
5717 /* All aggregates are returned in memory, except on OpenVMS where
5718 records that fit 64 bits should be returned by immediate value
5719 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5720 if (TARGET_ABI_OPEN_VMS
5721 && TREE_CODE (type
) != ARRAY_TYPE
5722 && (unsigned HOST_WIDE_INT
) int_size_in_bytes(type
) <= 8)
5725 if (AGGREGATE_TYPE_P (type
))
5729 size
= GET_MODE_SIZE (mode
);
5730 switch (GET_MODE_CLASS (mode
))
5732 case MODE_VECTOR_FLOAT
:
5733 /* Pass all float vectors in memory, like an aggregate. */
5736 case MODE_COMPLEX_FLOAT
:
5737 /* We judge complex floats on the size of their element,
5738 not the size of the whole type. */
5739 size
= GET_MODE_UNIT_SIZE (mode
);
5744 case MODE_COMPLEX_INT
:
5745 case MODE_VECTOR_INT
:
5749 /* ??? We get called on all sorts of random stuff from
5750 aggregate_value_p. We must return something, but it's not
5751 clear what's safe to return. Pretend it's a struct I
5756 /* Otherwise types must fit in one register. */
5757 return size
> UNITS_PER_WORD
;
5760 /* Return true if TYPE should be passed by invisible reference. */
5763 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
5765 const_tree type ATTRIBUTE_UNUSED
,
5768 /* Pass float and _Complex float variable arguments by reference.
5769 This avoids 64-bit store from a FP register to a pretend args save area
5770 and subsequent 32-bit load from the saved location to a FP register.
5772 Note that 32-bit loads and stores to/from a FP register on alpha reorder
5773 bits to form a canonical 64-bit value in the FP register. This fact
5774 invalidates compiler assumption that 32-bit FP value lives in the lower
5775 32-bits of the passed 64-bit FP value, so loading the 32-bit value from
5776 the stored 64-bit location using 32-bit FP load is invalid on alpha.
5778 This introduces sort of ABI incompatibility, but until _Float32 was
5779 introduced, C-family languages promoted 32-bit float variable arg to
5780 a 64-bit double, and it was not allowed to pass float as a varible
5781 argument. Passing _Complex float as a variable argument never
5782 worked on alpha. Thus, we have no backward compatibility issues
5783 to worry about, and passing unpromoted _Float32 and _Complex float
5784 as a variable argument will actually work in the future. */
5786 if (mode
== SFmode
|| mode
== SCmode
)
5789 return mode
== TFmode
|| mode
== TCmode
;
5792 /* Define how to find the value returned by a function. VALTYPE is the
5793 data type of the value (as a tree). If the precise function being
5794 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5795 MODE is set instead of VALTYPE for libcalls.
5797 On Alpha the value is found in $0 for integer functions and
5798 $f0 for floating-point functions. */
5801 alpha_function_value_1 (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
,
5804 unsigned int regnum
, dummy ATTRIBUTE_UNUSED
;
5805 enum mode_class mclass
;
5807 gcc_assert (!valtype
|| !alpha_return_in_memory (valtype
, func
));
5810 mode
= TYPE_MODE (valtype
);
5812 mclass
= GET_MODE_CLASS (mode
);
5816 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5817 where we have them returning both SImode and DImode. */
5818 if (!(TARGET_ABI_OPEN_VMS
&& valtype
&& AGGREGATE_TYPE_P (valtype
)))
5819 PROMOTE_MODE (mode
, dummy
, valtype
);
5822 case MODE_COMPLEX_INT
:
5823 case MODE_VECTOR_INT
:
5831 case MODE_COMPLEX_FLOAT
:
5833 machine_mode cmode
= GET_MODE_INNER (mode
);
5835 return gen_rtx_PARALLEL
5838 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 32),
5840 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 33),
5841 GEN_INT (GET_MODE_SIZE (cmode
)))));
5845 /* We should only reach here for BLKmode on VMS. */
5846 gcc_assert (TARGET_ABI_OPEN_VMS
&& mode
== BLKmode
);
5854 return gen_rtx_REG (mode
, regnum
);
5857 /* Implement TARGET_FUNCTION_VALUE. */
5860 alpha_function_value (const_tree valtype
, const_tree fn_decl_or_type
,
5863 return alpha_function_value_1 (valtype
, fn_decl_or_type
, VOIDmode
);
5866 /* Implement TARGET_LIBCALL_VALUE. */
5869 alpha_libcall_value (machine_mode mode
, const_rtx
/*fun*/)
5871 return alpha_function_value_1 (NULL_TREE
, NULL_TREE
, mode
);
5874 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5876 On the Alpha, $0 $1 and $f0 $f1 are the only register thus used. */
5879 alpha_function_value_regno_p (const unsigned int regno
)
5881 return (regno
== 0 || regno
== 1 || regno
== 32 || regno
== 33);
5884 /* TCmode complex values are passed by invisible reference. We
5885 should not split these values. */
5888 alpha_split_complex_arg (const_tree type
)
5890 return TYPE_MODE (type
) != TCmode
;
5894 alpha_build_builtin_va_list (void)
5896 tree base
, ofs
, space
, record
, type_decl
;
5898 if (TARGET_ABI_OPEN_VMS
)
5899 return ptr_type_node
;
5901 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5902 type_decl
= build_decl (BUILTINS_LOCATION
,
5903 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5904 TYPE_STUB_DECL (record
) = type_decl
;
5905 TYPE_NAME (record
) = type_decl
;
5907 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5909 /* Dummy field to prevent alignment warnings. */
5910 space
= build_decl (BUILTINS_LOCATION
,
5911 FIELD_DECL
, NULL_TREE
, integer_type_node
);
5912 DECL_FIELD_CONTEXT (space
) = record
;
5913 DECL_ARTIFICIAL (space
) = 1;
5914 DECL_IGNORED_P (space
) = 1;
5916 ofs
= build_decl (BUILTINS_LOCATION
,
5917 FIELD_DECL
, get_identifier ("__offset"),
5919 DECL_FIELD_CONTEXT (ofs
) = record
;
5920 DECL_CHAIN (ofs
) = space
;
5922 base
= build_decl (BUILTINS_LOCATION
,
5923 FIELD_DECL
, get_identifier ("__base"),
5925 DECL_FIELD_CONTEXT (base
) = record
;
5926 DECL_CHAIN (base
) = ofs
;
5928 TYPE_FIELDS (record
) = base
;
5929 layout_type (record
);
5931 va_list_gpr_counter_field
= ofs
;
5936 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5937 and constant additions. */
5940 va_list_skip_additions (tree lhs
)
5946 enum tree_code code
;
5948 stmt
= SSA_NAME_DEF_STMT (lhs
);
5950 if (gimple_code (stmt
) == GIMPLE_PHI
)
5953 if (!is_gimple_assign (stmt
)
5954 || gimple_assign_lhs (stmt
) != lhs
)
5957 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
5959 code
= gimple_assign_rhs_code (stmt
);
5960 if (!CONVERT_EXPR_CODE_P (code
)
5961 && ((code
!= PLUS_EXPR
&& code
!= POINTER_PLUS_EXPR
)
5962 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
5963 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt
))))
5966 lhs
= gimple_assign_rhs1 (stmt
);
5970 /* Check if LHS = RHS statement is
5971 LHS = *(ap.__base + ap.__offset + cst)
5974 + ((ap.__offset + cst <= 47)
5975 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5976 If the former, indicate that GPR registers are needed,
5977 if the latter, indicate that FPR registers are needed.
5979 Also look for LHS = (*ptr).field, where ptr is one of the forms
5982 On alpha, cfun->va_list_gpr_size is used as size of the needed
5983 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5984 registers are needed and bit 1 set if FPR registers are needed.
5985 Return true if va_list references should not be scanned for the
5986 current statement. */
5989 alpha_stdarg_optimize_hook (struct stdarg_info
*si
, const gimple
*stmt
)
5991 tree base
, offset
, rhs
;
5995 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
))
5996 != GIMPLE_SINGLE_RHS
)
5999 rhs
= gimple_assign_rhs1 (stmt
);
6000 while (handled_component_p (rhs
))
6001 rhs
= TREE_OPERAND (rhs
, 0);
6002 if (TREE_CODE (rhs
) != MEM_REF
6003 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
6006 stmt
= va_list_skip_additions (TREE_OPERAND (rhs
, 0));
6008 || !is_gimple_assign (stmt
)
6009 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
6012 base
= gimple_assign_rhs1 (stmt
);
6013 if (TREE_CODE (base
) == SSA_NAME
)
6015 base_stmt
= va_list_skip_additions (base
);
6017 && is_gimple_assign (base_stmt
)
6018 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
6019 base
= gimple_assign_rhs1 (base_stmt
);
6022 if (TREE_CODE (base
) != COMPONENT_REF
6023 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
6025 base
= gimple_assign_rhs2 (stmt
);
6026 if (TREE_CODE (base
) == SSA_NAME
)
6028 base_stmt
= va_list_skip_additions (base
);
6030 && is_gimple_assign (base_stmt
)
6031 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
6032 base
= gimple_assign_rhs1 (base_stmt
);
6035 if (TREE_CODE (base
) != COMPONENT_REF
6036 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
6042 base
= get_base_address (base
);
6043 if (TREE_CODE (base
) != VAR_DECL
6044 || !bitmap_bit_p (si
->va_list_vars
, DECL_UID (base
) + num_ssa_names
))
6047 offset
= gimple_op (stmt
, 1 + offset_arg
);
6048 if (TREE_CODE (offset
) == SSA_NAME
)
6050 gimple
*offset_stmt
= va_list_skip_additions (offset
);
6053 && gimple_code (offset_stmt
) == GIMPLE_PHI
)
6056 gimple
*arg1_stmt
, *arg2_stmt
;
6058 enum tree_code code1
, code2
;
6060 if (gimple_phi_num_args (offset_stmt
) != 2)
6064 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 0));
6066 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 1));
6067 if (arg1_stmt
== NULL
6068 || !is_gimple_assign (arg1_stmt
)
6069 || arg2_stmt
== NULL
6070 || !is_gimple_assign (arg2_stmt
))
6073 code1
= gimple_assign_rhs_code (arg1_stmt
);
6074 code2
= gimple_assign_rhs_code (arg2_stmt
);
6075 if (code1
== COMPONENT_REF
6076 && (code2
== MINUS_EXPR
|| code2
== PLUS_EXPR
))
6078 else if (code2
== COMPONENT_REF
6079 && (code1
== MINUS_EXPR
|| code1
== PLUS_EXPR
))
6081 std::swap (arg1_stmt
, arg2_stmt
);
6087 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt
)))
6090 sub
= tree_to_shwi (gimple_assign_rhs2 (arg2_stmt
));
6091 if (code2
== MINUS_EXPR
)
6093 if (sub
< -48 || sub
> -32)
6096 arg1
= gimple_assign_rhs1 (arg1_stmt
);
6097 arg2
= gimple_assign_rhs1 (arg2_stmt
);
6098 if (TREE_CODE (arg2
) == SSA_NAME
)
6100 arg2_stmt
= va_list_skip_additions (arg2
);
6101 if (arg2_stmt
== NULL
6102 || !is_gimple_assign (arg2_stmt
)
6103 || gimple_assign_rhs_code (arg2_stmt
) != COMPONENT_REF
)
6105 arg2
= gimple_assign_rhs1 (arg2_stmt
);
6110 if (TREE_CODE (arg1
) != COMPONENT_REF
6111 || TREE_OPERAND (arg1
, 1) != va_list_gpr_counter_field
6112 || get_base_address (arg1
) != base
)
6115 /* Need floating point regs. */
6116 cfun
->va_list_fpr_size
|= 2;
6120 && is_gimple_assign (offset_stmt
)
6121 && gimple_assign_rhs_code (offset_stmt
) == COMPONENT_REF
)
6122 offset
= gimple_assign_rhs1 (offset_stmt
);
6124 if (TREE_CODE (offset
) != COMPONENT_REF
6125 || TREE_OPERAND (offset
, 1) != va_list_gpr_counter_field
6126 || get_base_address (offset
) != base
)
6129 /* Need general regs. */
6130 cfun
->va_list_fpr_size
|= 1;
6134 si
->va_list_escapes
= true;
6139 /* Perform any needed actions needed for a function that is receiving a
6140 variable number of arguments. */
6143 alpha_setup_incoming_varargs (cumulative_args_t pcum
, machine_mode mode
,
6144 tree type
, int *pretend_size
, int no_rtl
)
6146 CUMULATIVE_ARGS cum
= *get_cumulative_args (pcum
);
6148 /* Skip the current argument. */
6149 targetm
.calls
.function_arg_advance (pack_cumulative_args (&cum
), mode
, type
,
6152 #if TARGET_ABI_OPEN_VMS
6153 /* For VMS, we allocate space for all 6 arg registers plus a count.
6155 However, if NO registers need to be saved, don't allocate any space.
6156 This is not only because we won't need the space, but because AP
6157 includes the current_pretend_args_size and we don't want to mess up
6158 any ap-relative addresses already made. */
6159 if (cum
.num_args
< 6)
6163 emit_move_insn (gen_rtx_REG (DImode
, 1), virtual_incoming_args_rtx
);
6164 emit_insn (gen_arg_home ());
6166 *pretend_size
= 7 * UNITS_PER_WORD
;
6169 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6170 only push those that are remaining. However, if NO registers need to
6171 be saved, don't allocate any space. This is not only because we won't
6172 need the space, but because AP includes the current_pretend_args_size
6173 and we don't want to mess up any ap-relative addresses already made.
6175 If we are not to use the floating-point registers, save the integer
6176 registers where we would put the floating-point registers. This is
6177 not the most efficient way to implement varargs with just one register
6178 class, but it isn't worth doing anything more efficient in this rare
6186 alias_set_type set
= get_varargs_alias_set ();
6189 count
= cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
6190 if (count
> 6 - cum
)
6193 /* Detect whether integer registers or floating-point registers
6194 are needed by the detected va_arg statements. See above for
6195 how these values are computed. Note that the "escape" value
6196 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6198 gcc_assert ((VA_LIST_MAX_FPR_SIZE
& 3) == 3);
6200 if (cfun
->va_list_fpr_size
& 1)
6202 tmp
= gen_rtx_MEM (BLKmode
,
6203 plus_constant (Pmode
, virtual_incoming_args_rtx
,
6204 (cum
+ 6) * UNITS_PER_WORD
));
6205 MEM_NOTRAP_P (tmp
) = 1;
6206 set_mem_alias_set (tmp
, set
);
6207 move_block_from_reg (16 + cum
, tmp
, count
);
6210 if (cfun
->va_list_fpr_size
& 2)
6212 tmp
= gen_rtx_MEM (BLKmode
,
6213 plus_constant (Pmode
, virtual_incoming_args_rtx
,
6214 cum
* UNITS_PER_WORD
));
6215 MEM_NOTRAP_P (tmp
) = 1;
6216 set_mem_alias_set (tmp
, set
);
6217 move_block_from_reg (16 + cum
+ TARGET_FPREGS
*32, tmp
, count
);
6220 *pretend_size
= 12 * UNITS_PER_WORD
;
6225 alpha_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6227 HOST_WIDE_INT offset
;
6228 tree t
, offset_field
, base_field
;
6230 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6233 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6234 up by 48, storing fp arg registers in the first 48 bytes, and the
6235 integer arg registers in the next 48 bytes. This is only done,
6236 however, if any integer registers need to be stored.
6238 If no integer registers need be stored, then we must subtract 48
6239 in order to account for the integer arg registers which are counted
6240 in argsize above, but which are not actually stored on the stack.
6241 Must further be careful here about structures straddling the last
6242 integer argument register; that futzes with pretend_args_size,
6243 which changes the meaning of AP. */
6246 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6248 offset
= -6 * UNITS_PER_WORD
+ crtl
->args
.pretend_args_size
;
6250 if (TARGET_ABI_OPEN_VMS
)
6252 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6253 t
= fold_build_pointer_plus_hwi (t
, offset
+ NUM_ARGS
* UNITS_PER_WORD
);
6254 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
6255 TREE_SIDE_EFFECTS (t
) = 1;
6256 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6260 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6261 offset_field
= DECL_CHAIN (base_field
);
6263 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6264 valist
, base_field
, NULL_TREE
);
6265 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6266 valist
, offset_field
, NULL_TREE
);
6268 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6269 t
= fold_build_pointer_plus_hwi (t
, offset
);
6270 t
= build2 (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6271 TREE_SIDE_EFFECTS (t
) = 1;
6272 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6274 t
= build_int_cst (NULL_TREE
, NUM_ARGS
* UNITS_PER_WORD
);
6275 t
= build2 (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6276 TREE_SIDE_EFFECTS (t
) = 1;
6277 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6282 alpha_gimplify_va_arg_1 (tree type
, tree base
, tree offset
,
6285 tree type_size
, ptr_type
, addend
, t
, addr
;
6286 gimple_seq internal_post
;
6288 /* If the type could not be passed in registers, skip the block
6289 reserved for the registers. */
6290 if (targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
))
6292 t
= build_int_cst (TREE_TYPE (offset
), 6*8);
6293 gimplify_assign (offset
,
6294 build2 (MAX_EXPR
, TREE_TYPE (offset
), offset
, t
),
6299 ptr_type
= build_pointer_type_for_mode (type
, ptr_mode
, true);
6301 if (TREE_CODE (type
) == COMPLEX_TYPE
)
6303 tree real_part
, imag_part
, real_temp
;
6305 real_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6308 /* Copy the value into a new temporary, lest the formal temporary
6309 be reused out from under us. */
6310 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6312 imag_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6315 return build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6317 else if (TREE_CODE (type
) == REAL_TYPE
)
6319 tree fpaddend
, cond
, fourtyeight
;
6321 fourtyeight
= build_int_cst (TREE_TYPE (addend
), 6*8);
6322 fpaddend
= fold_build2 (MINUS_EXPR
, TREE_TYPE (addend
),
6323 addend
, fourtyeight
);
6324 cond
= fold_build2 (LT_EXPR
, boolean_type_node
, addend
, fourtyeight
);
6325 addend
= fold_build3 (COND_EXPR
, TREE_TYPE (addend
), cond
,
6329 /* Build the final address and force that value into a temporary. */
6330 addr
= fold_build_pointer_plus (fold_convert (ptr_type
, base
), addend
);
6331 internal_post
= NULL
;
6332 gimplify_expr (&addr
, pre_p
, &internal_post
, is_gimple_val
, fb_rvalue
);
6333 gimple_seq_add_seq (pre_p
, internal_post
);
6335 /* Update the offset field. */
6336 type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
));
6337 if (type_size
== NULL
|| TREE_OVERFLOW (type_size
))
6341 t
= size_binop (PLUS_EXPR
, type_size
, size_int (7));
6342 t
= size_binop (TRUNC_DIV_EXPR
, t
, size_int (8));
6343 t
= size_binop (MULT_EXPR
, t
, size_int (8));
6345 t
= fold_convert (TREE_TYPE (offset
), t
);
6346 gimplify_assign (offset
, build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, t
),
6349 return build_va_arg_indirect_ref (addr
);
6353 alpha_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
6356 tree offset_field
, base_field
, offset
, base
, t
, r
;
6359 if (TARGET_ABI_OPEN_VMS
)
6360 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6362 base_field
= TYPE_FIELDS (va_list_type_node
);
6363 offset_field
= DECL_CHAIN (base_field
);
6364 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6365 valist
, base_field
, NULL_TREE
);
6366 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6367 valist
, offset_field
, NULL_TREE
);
6369 /* Pull the fields of the structure out into temporaries. Since we never
6370 modify the base field, we can use a formal temporary. Sign-extend the
6371 offset field so that it's the proper width for pointer arithmetic. */
6372 base
= get_formal_tmp_var (base_field
, pre_p
);
6374 t
= fold_convert (build_nonstandard_integer_type (64, 0), offset_field
);
6375 offset
= get_initialized_tmp_var (t
, pre_p
, NULL
);
6377 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6379 type
= build_pointer_type_for_mode (type
, ptr_mode
, true);
6381 /* Find the value. Note that this will be a stable indirection, or
6382 a composite of stable indirections in the case of complex. */
6383 r
= alpha_gimplify_va_arg_1 (type
, base
, offset
, pre_p
);
6385 /* Stuff the offset temporary back into its field. */
6386 gimplify_assign (unshare_expr (offset_field
),
6387 fold_convert (TREE_TYPE (offset_field
), offset
), pre_p
);
6390 r
= build_va_arg_indirect_ref (r
);
6399 ALPHA_BUILTIN_CMPBGE
,
6400 ALPHA_BUILTIN_EXTBL
,
6401 ALPHA_BUILTIN_EXTWL
,
6402 ALPHA_BUILTIN_EXTLL
,
6403 ALPHA_BUILTIN_EXTQL
,
6404 ALPHA_BUILTIN_EXTWH
,
6405 ALPHA_BUILTIN_EXTLH
,
6406 ALPHA_BUILTIN_EXTQH
,
6407 ALPHA_BUILTIN_INSBL
,
6408 ALPHA_BUILTIN_INSWL
,
6409 ALPHA_BUILTIN_INSLL
,
6410 ALPHA_BUILTIN_INSQL
,
6411 ALPHA_BUILTIN_INSWH
,
6412 ALPHA_BUILTIN_INSLH
,
6413 ALPHA_BUILTIN_INSQH
,
6414 ALPHA_BUILTIN_MSKBL
,
6415 ALPHA_BUILTIN_MSKWL
,
6416 ALPHA_BUILTIN_MSKLL
,
6417 ALPHA_BUILTIN_MSKQL
,
6418 ALPHA_BUILTIN_MSKWH
,
6419 ALPHA_BUILTIN_MSKLH
,
6420 ALPHA_BUILTIN_MSKQH
,
6421 ALPHA_BUILTIN_UMULH
,
6423 ALPHA_BUILTIN_ZAPNOT
,
6424 ALPHA_BUILTIN_AMASK
,
6425 ALPHA_BUILTIN_IMPLVER
,
6427 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER
,
6428 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER
,
6431 ALPHA_BUILTIN_MINUB8
,
6432 ALPHA_BUILTIN_MINSB8
,
6433 ALPHA_BUILTIN_MINUW4
,
6434 ALPHA_BUILTIN_MINSW4
,
6435 ALPHA_BUILTIN_MAXUB8
,
6436 ALPHA_BUILTIN_MAXSB8
,
6437 ALPHA_BUILTIN_MAXUW4
,
6438 ALPHA_BUILTIN_MAXSW4
,
6442 ALPHA_BUILTIN_UNPKBL
,
6443 ALPHA_BUILTIN_UNPKBW
,
6448 ALPHA_BUILTIN_CTPOP
,
6453 static enum insn_code
const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6454 CODE_FOR_builtin_cmpbge
,
6462 CODE_FOR_builtin_insbl
,
6463 CODE_FOR_builtin_inswl
,
6464 CODE_FOR_builtin_insll
,
6476 CODE_FOR_umuldi3_highpart
,
6477 CODE_FOR_builtin_zap
,
6478 CODE_FOR_builtin_zapnot
,
6479 CODE_FOR_builtin_amask
,
6480 CODE_FOR_builtin_implver
,
6481 CODE_FOR_builtin_rpcc
,
6482 CODE_FOR_builtin_establish_vms_condition_handler
,
6483 CODE_FOR_builtin_revert_vms_condition_handler
,
6486 CODE_FOR_builtin_minub8
,
6487 CODE_FOR_builtin_minsb8
,
6488 CODE_FOR_builtin_minuw4
,
6489 CODE_FOR_builtin_minsw4
,
6490 CODE_FOR_builtin_maxub8
,
6491 CODE_FOR_builtin_maxsb8
,
6492 CODE_FOR_builtin_maxuw4
,
6493 CODE_FOR_builtin_maxsw4
,
6494 CODE_FOR_builtin_perr
,
6495 CODE_FOR_builtin_pklb
,
6496 CODE_FOR_builtin_pkwb
,
6497 CODE_FOR_builtin_unpkbl
,
6498 CODE_FOR_builtin_unpkbw
,
6503 CODE_FOR_popcountdi2
6506 struct alpha_builtin_def
6509 enum alpha_builtin code
;
6510 unsigned int target_mask
;
6514 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6515 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0, true },
6516 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0, false }
6519 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6520 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0, true },
6521 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
, true },
6522 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
, true },
6523 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
, true },
6524 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
, true },
6525 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
, true },
6526 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
, true },
6527 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
, true }
6530 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6531 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0, true },
6532 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0, true },
6533 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0, true },
6534 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0, true },
6535 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0, true },
6536 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0, true },
6537 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0, true },
6538 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0, true },
6539 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0, true },
6540 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0, true },
6541 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0, true },
6542 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0, true },
6543 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0, true },
6544 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0, true },
6545 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0, true },
6546 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0, true },
6547 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0, true },
6548 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0, true },
6549 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0, true },
6550 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0, true },
6551 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0, true },
6552 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0, true },
6553 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0, true },
6554 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0, true },
6555 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0, true },
6556 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
, true },
6557 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
, true },
6558 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
, true },
6559 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
, true },
6560 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
, true },
6561 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
, true },
6562 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
, true },
6563 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
, true },
6564 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
, true }
6567 static GTY(()) tree alpha_dimode_u
;
6568 static GTY(()) tree alpha_v8qi_u
;
6569 static GTY(()) tree alpha_v8qi_s
;
6570 static GTY(()) tree alpha_v4hi_u
;
6571 static GTY(()) tree alpha_v4hi_s
;
6573 static GTY(()) tree alpha_builtins
[(int) ALPHA_BUILTIN_max
];
6575 /* Return the alpha builtin for CODE. */
6578 alpha_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
6580 if (code
>= ALPHA_BUILTIN_max
)
6581 return error_mark_node
;
6582 return alpha_builtins
[code
];
6585 /* Helper function of alpha_init_builtins. Add the built-in specified
6586 by NAME, TYPE, CODE, and ECF. */
6589 alpha_builtin_function (const char *name
, tree ftype
,
6590 enum alpha_builtin code
, unsigned ecf
)
6592 tree decl
= add_builtin_function (name
, ftype
, (int) code
,
6593 BUILT_IN_MD
, NULL
, NULL_TREE
);
6595 if (ecf
& ECF_CONST
)
6596 TREE_READONLY (decl
) = 1;
6597 if (ecf
& ECF_NOTHROW
)
6598 TREE_NOTHROW (decl
) = 1;
6600 alpha_builtins
[(int) code
] = decl
;
6603 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6604 functions pointed to by P, with function type FTYPE. */
6607 alpha_add_builtins (const struct alpha_builtin_def
*p
, size_t count
,
6612 for (i
= 0; i
< count
; ++i
, ++p
)
6613 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6614 alpha_builtin_function (p
->name
, ftype
, p
->code
,
6615 (p
->is_const
? ECF_CONST
: 0) | ECF_NOTHROW
);
6619 alpha_init_builtins (void)
6623 alpha_dimode_u
= lang_hooks
.types
.type_for_mode (DImode
, 1);
6624 alpha_v8qi_u
= build_vector_type (unsigned_intQI_type_node
, 8);
6625 alpha_v8qi_s
= build_vector_type (intQI_type_node
, 8);
6626 alpha_v4hi_u
= build_vector_type (unsigned_intHI_type_node
, 4);
6627 alpha_v4hi_s
= build_vector_type (intHI_type_node
, 4);
6629 ftype
= build_function_type_list (alpha_dimode_u
, NULL_TREE
);
6630 alpha_add_builtins (zero_arg_builtins
, ARRAY_SIZE (zero_arg_builtins
), ftype
);
6632 ftype
= build_function_type_list (alpha_dimode_u
, alpha_dimode_u
, NULL_TREE
);
6633 alpha_add_builtins (one_arg_builtins
, ARRAY_SIZE (one_arg_builtins
), ftype
);
6635 ftype
= build_function_type_list (alpha_dimode_u
, alpha_dimode_u
,
6636 alpha_dimode_u
, NULL_TREE
);
6637 alpha_add_builtins (two_arg_builtins
, ARRAY_SIZE (two_arg_builtins
), ftype
);
6639 if (TARGET_ABI_OPEN_VMS
)
6641 ftype
= build_function_type_list (ptr_type_node
, ptr_type_node
,
6643 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6645 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER
,
6648 ftype
= build_function_type_list (ptr_type_node
, void_type_node
,
6650 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype
,
6651 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER
, 0);
6653 vms_patch_builtins ();
6657 /* Expand an expression EXP that calls a built-in function,
6658 with result going to TARGET if that's convenient
6659 (and in mode MODE if that's convenient).
6660 SUBTARGET may be used as the target for computing one of EXP's operands.
6661 IGNORE is nonzero if the value is to be ignored. */
6664 alpha_expand_builtin (tree exp
, rtx target
,
6665 rtx subtarget ATTRIBUTE_UNUSED
,
6666 machine_mode mode ATTRIBUTE_UNUSED
,
6667 int ignore ATTRIBUTE_UNUSED
)
6671 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6672 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6674 call_expr_arg_iterator iter
;
6675 enum insn_code icode
;
6676 rtx op
[MAX_ARGS
], pat
;
6680 if (fcode
>= ALPHA_BUILTIN_max
)
6681 internal_error ("bad builtin fcode");
6682 icode
= code_for_builtin
[fcode
];
6684 internal_error ("bad builtin fcode");
6686 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6689 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
6691 const struct insn_operand_data
*insn_op
;
6693 if (arg
== error_mark_node
)
6695 if (arity
> MAX_ARGS
)
6698 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6700 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, EXPAND_NORMAL
);
6702 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6703 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6709 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6711 || GET_MODE (target
) != tmode
6712 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6713 target
= gen_reg_rtx (tmode
);
6719 pat
= GEN_FCN (icode
) (target
);
6723 pat
= GEN_FCN (icode
) (target
, op
[0]);
6725 pat
= GEN_FCN (icode
) (op
[0]);
6728 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6743 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6744 with an 8-bit output vector. OPINT contains the integer operands; bit N
6745 of OP_CONST is set if OPINT[N] is valid. */
6748 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint
[], long op_const
)
6753 for (i
= 0, val
= 0; i
< 8; ++i
)
6755 unsigned HOST_WIDE_INT c0
= (opint
[0] >> (i
* 8)) & 0xff;
6756 unsigned HOST_WIDE_INT c1
= (opint
[1] >> (i
* 8)) & 0xff;
6760 return build_int_cst (alpha_dimode_u
, val
);
6762 else if (op_const
== 2 && opint
[1] == 0)
6763 return build_int_cst (alpha_dimode_u
, 0xff);
6767 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6768 specialized form of an AND operation. Other byte manipulation instructions
6769 are defined in terms of this instruction, so this is also used as a
6770 subroutine for other builtins.
6772 OP contains the tree operands; OPINT contains the extracted integer values.
6773 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6774 OPINT may be considered. */
6777 alpha_fold_builtin_zapnot (tree
*op
, unsigned HOST_WIDE_INT opint
[],
6782 unsigned HOST_WIDE_INT mask
= 0;
6785 for (i
= 0; i
< 8; ++i
)
6786 if ((opint
[1] >> i
) & 1)
6787 mask
|= (unsigned HOST_WIDE_INT
)0xff << (i
* 8);
6790 return build_int_cst (alpha_dimode_u
, opint
[0] & mask
);
6793 return fold_build2 (BIT_AND_EXPR
, alpha_dimode_u
, op
[0],
6794 build_int_cst (alpha_dimode_u
, mask
));
6796 else if ((op_const
& 1) && opint
[0] == 0)
6797 return build_int_cst (alpha_dimode_u
, 0);
6801 /* Fold the builtins for the EXT family of instructions. */
6804 alpha_fold_builtin_extxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6805 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6809 tree
*zap_op
= NULL
;
6813 unsigned HOST_WIDE_INT loc
;
6816 loc
*= BITS_PER_UNIT
;
6822 unsigned HOST_WIDE_INT temp
= opint
[0];
6835 opint
[1] = bytemask
;
6836 return alpha_fold_builtin_zapnot (zap_op
, opint
, zap_const
);
6839 /* Fold the builtins for the INS family of instructions. */
6842 alpha_fold_builtin_insxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6843 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6846 if ((op_const
& 1) && opint
[0] == 0)
6847 return build_int_cst (alpha_dimode_u
, 0);
6851 unsigned HOST_WIDE_INT temp
, loc
, byteloc
;
6852 tree
*zap_op
= NULL
;
6860 byteloc
= (64 - (loc
* 8)) & 0x3f;
6877 opint
[1] = bytemask
;
6878 return alpha_fold_builtin_zapnot (zap_op
, opint
, op_const
);
6885 alpha_fold_builtin_mskxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6886 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6891 unsigned HOST_WIDE_INT loc
;
6899 opint
[1] = bytemask
^ 0xff;
6902 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
6906 alpha_fold_vector_minmax (enum tree_code code
, tree op
[], tree vtype
)
6908 tree op0
= fold_convert (vtype
, op
[0]);
6909 tree op1
= fold_convert (vtype
, op
[1]);
6910 tree val
= fold_build2 (code
, vtype
, op0
, op1
);
6911 return fold_build1 (VIEW_CONVERT_EXPR
, alpha_dimode_u
, val
);
6915 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint
[], long op_const
)
6917 unsigned HOST_WIDE_INT temp
= 0;
6923 for (i
= 0; i
< 8; ++i
)
6925 unsigned HOST_WIDE_INT a
= (opint
[0] >> (i
* 8)) & 0xff;
6926 unsigned HOST_WIDE_INT b
= (opint
[1] >> (i
* 8)) & 0xff;
6933 return build_int_cst (alpha_dimode_u
, temp
);
6937 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6939 unsigned HOST_WIDE_INT temp
;
6944 temp
= opint
[0] & 0xff;
6945 temp
|= (opint
[0] >> 24) & 0xff00;
6947 return build_int_cst (alpha_dimode_u
, temp
);
6951 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6953 unsigned HOST_WIDE_INT temp
;
6958 temp
= opint
[0] & 0xff;
6959 temp
|= (opint
[0] >> 8) & 0xff00;
6960 temp
|= (opint
[0] >> 16) & 0xff0000;
6961 temp
|= (opint
[0] >> 24) & 0xff000000;
6963 return build_int_cst (alpha_dimode_u
, temp
);
6967 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint
[], long op_const
)
6969 unsigned HOST_WIDE_INT temp
;
6974 temp
= opint
[0] & 0xff;
6975 temp
|= (opint
[0] & 0xff00) << 24;
6977 return build_int_cst (alpha_dimode_u
, temp
);
6981 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint
[], long op_const
)
6983 unsigned HOST_WIDE_INT temp
;
6988 temp
= opint
[0] & 0xff;
6989 temp
|= (opint
[0] & 0x0000ff00) << 8;
6990 temp
|= (opint
[0] & 0x00ff0000) << 16;
6991 temp
|= (opint
[0] & 0xff000000) << 24;
6993 return build_int_cst (alpha_dimode_u
, temp
);
6997 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6999 unsigned HOST_WIDE_INT temp
;
7007 temp
= exact_log2 (opint
[0] & -opint
[0]);
7009 return build_int_cst (alpha_dimode_u
, temp
);
7013 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint
[], long op_const
)
7015 unsigned HOST_WIDE_INT temp
;
7023 temp
= 64 - floor_log2 (opint
[0]) - 1;
7025 return build_int_cst (alpha_dimode_u
, temp
);
7029 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint
[], long op_const
)
7031 unsigned HOST_WIDE_INT temp
, op
;
7039 temp
++, op
&= op
- 1;
7041 return build_int_cst (alpha_dimode_u
, temp
);
7044 /* Fold one of our builtin functions. */
7047 alpha_fold_builtin (tree fndecl
, int n_args
, tree
*op
,
7048 bool ignore ATTRIBUTE_UNUSED
)
7050 unsigned HOST_WIDE_INT opint
[MAX_ARGS
];
7054 if (n_args
> MAX_ARGS
)
7057 for (i
= 0; i
< n_args
; i
++)
7060 if (arg
== error_mark_node
)
7064 if (TREE_CODE (arg
) == INTEGER_CST
)
7066 op_const
|= 1L << i
;
7067 opint
[i
] = int_cst_value (arg
);
7071 switch (DECL_FUNCTION_CODE (fndecl
))
7073 case ALPHA_BUILTIN_CMPBGE
:
7074 return alpha_fold_builtin_cmpbge (opint
, op_const
);
7076 case ALPHA_BUILTIN_EXTBL
:
7077 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x01, false);
7078 case ALPHA_BUILTIN_EXTWL
:
7079 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, false);
7080 case ALPHA_BUILTIN_EXTLL
:
7081 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, false);
7082 case ALPHA_BUILTIN_EXTQL
:
7083 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, false);
7084 case ALPHA_BUILTIN_EXTWH
:
7085 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, true);
7086 case ALPHA_BUILTIN_EXTLH
:
7087 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, true);
7088 case ALPHA_BUILTIN_EXTQH
:
7089 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, true);
7091 case ALPHA_BUILTIN_INSBL
:
7092 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x01, false);
7093 case ALPHA_BUILTIN_INSWL
:
7094 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, false);
7095 case ALPHA_BUILTIN_INSLL
:
7096 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, false);
7097 case ALPHA_BUILTIN_INSQL
:
7098 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, false);
7099 case ALPHA_BUILTIN_INSWH
:
7100 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, true);
7101 case ALPHA_BUILTIN_INSLH
:
7102 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, true);
7103 case ALPHA_BUILTIN_INSQH
:
7104 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, true);
7106 case ALPHA_BUILTIN_MSKBL
:
7107 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x01, false);
7108 case ALPHA_BUILTIN_MSKWL
:
7109 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, false);
7110 case ALPHA_BUILTIN_MSKLL
:
7111 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, false);
7112 case ALPHA_BUILTIN_MSKQL
:
7113 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, false);
7114 case ALPHA_BUILTIN_MSKWH
:
7115 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, true);
7116 case ALPHA_BUILTIN_MSKLH
:
7117 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, true);
7118 case ALPHA_BUILTIN_MSKQH
:
7119 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, true);
7121 case ALPHA_BUILTIN_ZAP
:
7124 case ALPHA_BUILTIN_ZAPNOT
:
7125 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
7127 case ALPHA_BUILTIN_MINUB8
:
7128 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_u
);
7129 case ALPHA_BUILTIN_MINSB8
:
7130 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_s
);
7131 case ALPHA_BUILTIN_MINUW4
:
7132 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_u
);
7133 case ALPHA_BUILTIN_MINSW4
:
7134 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_s
);
7135 case ALPHA_BUILTIN_MAXUB8
:
7136 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_u
);
7137 case ALPHA_BUILTIN_MAXSB8
:
7138 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_s
);
7139 case ALPHA_BUILTIN_MAXUW4
:
7140 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_u
);
7141 case ALPHA_BUILTIN_MAXSW4
:
7142 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_s
);
7144 case ALPHA_BUILTIN_PERR
:
7145 return alpha_fold_builtin_perr (opint
, op_const
);
7146 case ALPHA_BUILTIN_PKLB
:
7147 return alpha_fold_builtin_pklb (opint
, op_const
);
7148 case ALPHA_BUILTIN_PKWB
:
7149 return alpha_fold_builtin_pkwb (opint
, op_const
);
7150 case ALPHA_BUILTIN_UNPKBL
:
7151 return alpha_fold_builtin_unpkbl (opint
, op_const
);
7152 case ALPHA_BUILTIN_UNPKBW
:
7153 return alpha_fold_builtin_unpkbw (opint
, op_const
);
7155 case ALPHA_BUILTIN_CTTZ
:
7156 return alpha_fold_builtin_cttz (opint
, op_const
);
7157 case ALPHA_BUILTIN_CTLZ
:
7158 return alpha_fold_builtin_ctlz (opint
, op_const
);
7159 case ALPHA_BUILTIN_CTPOP
:
7160 return alpha_fold_builtin_ctpop (opint
, op_const
);
7162 case ALPHA_BUILTIN_AMASK
:
7163 case ALPHA_BUILTIN_IMPLVER
:
7164 case ALPHA_BUILTIN_RPCC
:
7165 /* None of these are foldable at compile-time. */
7172 alpha_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
7174 bool changed
= false;
7175 gimple
*stmt
= gsi_stmt (*gsi
);
7176 tree call
= gimple_call_fn (stmt
);
7177 gimple
*new_stmt
= NULL
;
7181 tree fndecl
= gimple_call_fndecl (stmt
);
7187 switch (DECL_FUNCTION_CODE (fndecl
))
7189 case ALPHA_BUILTIN_UMULH
:
7190 arg0
= gimple_call_arg (stmt
, 0);
7191 arg1
= gimple_call_arg (stmt
, 1);
7193 new_stmt
= gimple_build_assign (gimple_call_lhs (stmt
),
7194 MULT_HIGHPART_EXPR
, arg0
, arg1
);
7204 gsi_replace (gsi
, new_stmt
, true);
7211 /* This page contains routines that are used to determine what the function
7212 prologue and epilogue code will do and write them out. */
7214 /* Compute the size of the save area in the stack. */
7216 /* These variables are used for communication between the following functions.
7217 They indicate various things about the current function being compiled
7218 that are used to tell what kind of prologue, epilogue and procedure
7219 descriptor to generate. */
7221 /* Nonzero if we need a stack procedure. */
7222 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
7223 static enum alpha_procedure_types alpha_procedure_type
;
7225 /* Register number (either FP or SP) that is used to unwind the frame. */
7226 static int vms_unwind_regno
;
7228 /* Register number used to save FP. We need not have one for RA since
7229 we don't modify it for register procedures. This is only defined
7230 for register frame procedures. */
7231 static int vms_save_fp_regno
;
7233 /* Register number used to reference objects off our PV. */
7234 static int vms_base_regno
;
7236 /* Compute register masks for saved registers. */
7239 alpha_sa_mask (unsigned long *imaskP
, unsigned long *fmaskP
)
7241 unsigned long imask
= 0;
7242 unsigned long fmask
= 0;
7245 /* When outputting a thunk, we don't have valid register life info,
7246 but assemble_start_function wants to output .frame and .mask
7255 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7256 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
7258 /* One for every register we have to save. */
7259 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7260 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
7261 && df_regs_ever_live_p (i
) && i
!= REG_RA
)
7264 imask
|= (1UL << i
);
7266 fmask
|= (1UL << (i
- 32));
7269 /* We need to restore these for the handler. */
7270 if (crtl
->calls_eh_return
)
7274 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
7275 if (regno
== INVALID_REGNUM
)
7277 imask
|= 1UL << regno
;
7281 /* If any register spilled, then spill the return address also. */
7282 /* ??? This is required by the Digital stack unwind specification
7283 and isn't needed if we're doing Dwarf2 unwinding. */
7284 if (imask
|| fmask
|| alpha_ra_ever_killed ())
7285 imask
|= (1UL << REG_RA
);
7292 alpha_sa_size (void)
7294 unsigned long mask
[2];
7298 alpha_sa_mask (&mask
[0], &mask
[1]);
7300 for (j
= 0; j
< 2; ++j
)
7301 for (i
= 0; i
< 32; ++i
)
7302 if ((mask
[j
] >> i
) & 1)
7305 if (TARGET_ABI_OPEN_VMS
)
7307 /* Start with a stack procedure if we make any calls (REG_RA used), or
7308 need a frame pointer, with a register procedure if we otherwise need
7309 at least a slot, and with a null procedure in other cases. */
7310 if ((mask
[0] >> REG_RA
) & 1 || frame_pointer_needed
)
7311 alpha_procedure_type
= PT_STACK
;
7312 else if (get_frame_size() != 0)
7313 alpha_procedure_type
= PT_REGISTER
;
7315 alpha_procedure_type
= PT_NULL
;
7317 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7318 made the final decision on stack procedure vs register procedure. */
7319 if (alpha_procedure_type
== PT_STACK
)
7322 /* Decide whether to refer to objects off our PV via FP or PV.
7323 If we need FP for something else or if we receive a nonlocal
7324 goto (which expects PV to contain the value), we must use PV.
7325 Otherwise, start by assuming we can use FP. */
7328 = (frame_pointer_needed
7329 || cfun
->has_nonlocal_label
7330 || alpha_procedure_type
== PT_STACK
7331 || crtl
->outgoing_args_size
)
7332 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7334 /* If we want to copy PV into FP, we need to find some register
7335 in which to save FP. */
7337 vms_save_fp_regno
= -1;
7338 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7339 for (i
= 0; i
< 32; i
++)
7340 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! df_regs_ever_live_p (i
))
7341 vms_save_fp_regno
= i
;
7343 /* A VMS condition handler requires a stack procedure in our
7344 implementation. (not required by the calling standard). */
7345 if ((vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7346 || cfun
->machine
->uses_condition_handler
)
7347 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7348 else if (alpha_procedure_type
== PT_NULL
)
7349 vms_base_regno
= REG_PV
;
7351 /* Stack unwinding should be done via FP unless we use it for PV. */
7352 vms_unwind_regno
= (vms_base_regno
== REG_PV
7353 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7355 /* If this is a stack procedure, allow space for saving FP, RA and
7356 a condition handler slot if needed. */
7357 if (alpha_procedure_type
== PT_STACK
)
7358 sa_size
+= 2 + cfun
->machine
->uses_condition_handler
;
7362 /* Our size must be even (multiple of 16 bytes). */
7370 /* Define the offset between two registers, one to be eliminated,
7371 and the other its replacement, at the start of a routine. */
7374 alpha_initial_elimination_offset (unsigned int from
,
7375 unsigned int to ATTRIBUTE_UNUSED
)
7379 ret
= alpha_sa_size ();
7380 ret
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7384 case FRAME_POINTER_REGNUM
:
7387 case ARG_POINTER_REGNUM
:
7388 ret
+= (ALPHA_ROUND (get_frame_size ()
7389 + crtl
->args
.pretend_args_size
)
7390 - crtl
->args
.pretend_args_size
);
7400 #if TARGET_ABI_OPEN_VMS
7402 /* Worker function for TARGET_CAN_ELIMINATE. */
7405 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
7407 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7410 switch (alpha_procedure_type
)
7413 /* NULL procedures have no frame of their own and we only
7414 know how to resolve from the current stack pointer. */
7415 return to
== STACK_POINTER_REGNUM
;
7419 /* We always eliminate except to the stack pointer if there is no
7420 usable frame pointer at hand. */
7421 return (to
!= STACK_POINTER_REGNUM
7422 || vms_unwind_regno
!= HARD_FRAME_POINTER_REGNUM
);
7428 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7429 designates the same location as FROM. */
7432 alpha_vms_initial_elimination_offset (unsigned int from
, unsigned int to
)
7434 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7435 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7436 on the proper computations and will need the register save area size
7439 HOST_WIDE_INT sa_size
= alpha_sa_size ();
7441 /* PT_NULL procedures have no frame of their own and we only allow
7442 elimination to the stack pointer. This is the argument pointer and we
7443 resolve the soft frame pointer to that as well. */
7445 if (alpha_procedure_type
== PT_NULL
)
7448 /* For a PT_STACK procedure the frame layout looks as follows
7450 -----> decreasing addresses
7452 < size rounded up to 16 | likewise >
7453 --------------#------------------------------+++--------------+++-------#
7454 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7455 --------------#---------------------------------------------------------#
7457 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7460 PT_REGISTER procedures are similar in that they may have a frame of their
7461 own. They have no regs-sa/pv/outgoing-args area.
7463 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7464 to STACK_PTR if need be. */
7467 HOST_WIDE_INT offset
;
7468 HOST_WIDE_INT pv_save_size
= alpha_procedure_type
== PT_STACK
? 8 : 0;
7472 case FRAME_POINTER_REGNUM
:
7473 offset
= ALPHA_ROUND (sa_size
+ pv_save_size
);
7475 case ARG_POINTER_REGNUM
:
7476 offset
= (ALPHA_ROUND (sa_size
+ pv_save_size
7478 + crtl
->args
.pretend_args_size
)
7479 - crtl
->args
.pretend_args_size
);
7485 if (to
== STACK_POINTER_REGNUM
)
7486 offset
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7492 #define COMMON_OBJECT "common_object"
7495 common_object_handler (tree
*node
, tree name ATTRIBUTE_UNUSED
,
7496 tree args ATTRIBUTE_UNUSED
, int flags ATTRIBUTE_UNUSED
,
7497 bool *no_add_attrs ATTRIBUTE_UNUSED
)
7500 gcc_assert (DECL_P (decl
));
7502 DECL_COMMON (decl
) = 1;
7506 static const struct attribute_spec vms_attribute_table
[] =
7508 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7509 affects_type_identity, exclusions } */
7510 { COMMON_OBJECT
, 0, 1, true, false, false, common_object_handler
, false,
7512 { NULL
, 0, 0, false, false, false, NULL
, false, NULL
}
7516 vms_output_aligned_decl_common(FILE *file
, tree decl
, const char *name
,
7517 unsigned HOST_WIDE_INT size
,
7520 tree attr
= DECL_ATTRIBUTES (decl
);
7521 fprintf (file
, "%s", COMMON_ASM_OP
);
7522 assemble_name (file
, name
);
7523 fprintf (file
, "," HOST_WIDE_INT_PRINT_UNSIGNED
, size
);
7524 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7525 fprintf (file
, ",%u", align
/ BITS_PER_UNIT
);
7528 attr
= lookup_attribute (COMMON_OBJECT
, attr
);
7530 fprintf (file
, ",%s",
7531 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr
))));
7536 #undef COMMON_OBJECT
7541 alpha_find_lo_sum_using_gp (rtx insn
)
7543 subrtx_iterator::array_type array
;
7544 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
7546 const_rtx x
= *iter
;
7547 if (GET_CODE (x
) == LO_SUM
&& XEXP (x
, 0) == pic_offset_table_rtx
)
7554 alpha_does_function_need_gp (void)
7558 /* The GP being variable is an OSF abi thing. */
7559 if (! TARGET_ABI_OSF
)
7562 /* We need the gp to load the address of __mcount. */
7563 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7566 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7570 /* The nonlocal receiver pattern assumes that the gp is valid for
7571 the nested function. Reasonable because it's almost always set
7572 correctly already. For the cases where that's wrong, make sure
7573 the nested function loads its gp on entry. */
7574 if (crtl
->has_nonlocal_goto
)
7577 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7578 Even if we are a static function, we still need to do this in case
7579 our address is taken and passed to something like qsort. */
7581 push_topmost_sequence ();
7582 insn
= get_insns ();
7583 pop_topmost_sequence ();
7585 for (; insn
; insn
= NEXT_INSN (insn
))
7586 if (NONDEBUG_INSN_P (insn
)
7587 && GET_CODE (PATTERN (insn
)) != USE
7588 && GET_CODE (PATTERN (insn
)) != CLOBBER
7589 && get_attr_usegp (insn
))
7596 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7600 set_frame_related_p (void)
7602 rtx_insn
*seq
= get_insns ();
7613 while (insn
!= NULL_RTX
)
7615 RTX_FRAME_RELATED_P (insn
) = 1;
7616 insn
= NEXT_INSN (insn
);
7618 seq
= emit_insn (seq
);
7622 seq
= emit_insn (seq
);
7623 RTX_FRAME_RELATED_P (seq
) = 1;
7628 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7630 /* Generates a store with the proper unwind info attached. VALUE is
7631 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7632 contains SP+FRAME_BIAS, and that is the unwind info that should be
7633 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7634 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7637 emit_frame_store_1 (rtx value
, rtx base_reg
, HOST_WIDE_INT frame_bias
,
7638 HOST_WIDE_INT base_ofs
, rtx frame_reg
)
7643 addr
= plus_constant (Pmode
, base_reg
, base_ofs
);
7644 mem
= gen_frame_mem (DImode
, addr
);
7646 insn
= emit_move_insn (mem
, value
);
7647 RTX_FRAME_RELATED_P (insn
) = 1;
7649 if (frame_bias
|| value
!= frame_reg
)
7653 addr
= plus_constant (Pmode
, stack_pointer_rtx
,
7654 frame_bias
+ base_ofs
);
7655 mem
= gen_rtx_MEM (DImode
, addr
);
7658 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
7659 gen_rtx_SET (mem
, frame_reg
));
7664 emit_frame_store (unsigned int regno
, rtx base_reg
,
7665 HOST_WIDE_INT frame_bias
, HOST_WIDE_INT base_ofs
)
7667 rtx reg
= gen_rtx_REG (DImode
, regno
);
7668 emit_frame_store_1 (reg
, base_reg
, frame_bias
, base_ofs
, reg
);
7671 /* Compute the frame size. SIZE is the size of the "naked" frame
7672 and SA_SIZE is the size of the register save area. */
7674 static HOST_WIDE_INT
7675 compute_frame_size (HOST_WIDE_INT size
, HOST_WIDE_INT sa_size
)
7677 if (TARGET_ABI_OPEN_VMS
)
7678 return ALPHA_ROUND (sa_size
7679 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7681 + crtl
->args
.pretend_args_size
);
7683 return ALPHA_ROUND (crtl
->outgoing_args_size
)
7686 + crtl
->args
.pretend_args_size
);
7689 /* Write function prologue. */
7691 /* On vms we have two kinds of functions:
7693 - stack frame (PROC_STACK)
7694 these are 'normal' functions with local vars and which are
7695 calling other functions
7696 - register frame (PROC_REGISTER)
7697 keeps all data in registers, needs no stack
7699 We must pass this to the assembler so it can generate the
7700 proper pdsc (procedure descriptor)
7701 This is done with the '.pdesc' command.
7703 On not-vms, we don't really differentiate between the two, as we can
7704 simply allocate stack without saving registers. */
7707 alpha_expand_prologue (void)
7709 /* Registers to save. */
7710 unsigned long imask
= 0;
7711 unsigned long fmask
= 0;
7712 /* Stack space needed for pushing registers clobbered by us. */
7713 HOST_WIDE_INT sa_size
, sa_bias
;
7714 /* Complete stack size needed. */
7715 HOST_WIDE_INT frame_size
;
7716 /* Probed stack size; it additionally includes the size of
7717 the "reserve region" if any. */
7718 HOST_WIDE_INT probed_size
;
7719 /* Offset from base reg to register save area. */
7720 HOST_WIDE_INT reg_offset
;
7724 sa_size
= alpha_sa_size ();
7725 frame_size
= compute_frame_size (get_frame_size (), sa_size
);
7727 if (flag_stack_usage_info
)
7728 current_function_static_stack_size
= frame_size
;
7730 if (TARGET_ABI_OPEN_VMS
)
7731 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
7733 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7735 alpha_sa_mask (&imask
, &fmask
);
7737 /* Emit an insn to reload GP, if needed. */
7740 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7741 if (alpha_function_needs_gp
)
7742 emit_insn (gen_prologue_ldgp ());
7745 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7746 the call to mcount ourselves, rather than having the linker do it
7747 magically in response to -pg. Since _mcount has special linkage,
7748 don't represent the call as a call. */
7749 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7750 emit_insn (gen_prologue_mcount ());
7752 /* Adjust the stack by the frame size. If the frame size is > 4096
7753 bytes, we need to be sure we probe somewhere in the first and last
7754 4096 bytes (we can probably get away without the latter test) and
7755 every 8192 bytes in between. If the frame size is > 32768, we
7756 do this in a loop. Otherwise, we generate the explicit probe
7759 Note that we are only allowed to adjust sp once in the prologue. */
7761 probed_size
= frame_size
;
7762 if (flag_stack_check
|| flag_stack_clash_protection
)
7763 probed_size
+= get_stack_check_protect ();
7765 if (probed_size
<= 32768)
7767 if (probed_size
> 4096)
7771 for (probed
= 4096; probed
< probed_size
; probed
+= 8192)
7772 emit_insn (gen_probe_stack (GEN_INT (-probed
)));
7774 /* We only have to do this probe if we aren't saving registers or
7775 if we are probing beyond the frame because of -fstack-check. */
7776 if ((sa_size
== 0 && probed_size
> probed
- 4096)
7777 || flag_stack_check
|| flag_stack_clash_protection
)
7778 emit_insn (gen_probe_stack (GEN_INT (-probed_size
)));
7781 if (frame_size
!= 0)
7782 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7783 GEN_INT (-frame_size
))));
7787 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7788 number of 8192 byte blocks to probe. We then probe each block
7789 in the loop and then set SP to the proper location. If the
7790 amount remaining is > 4096, we have to do one more probe if we
7791 are not saving any registers or if we are probing beyond the
7792 frame because of -fstack-check. */
7794 HOST_WIDE_INT blocks
= (probed_size
+ 4096) / 8192;
7795 HOST_WIDE_INT leftover
= probed_size
+ 4096 - blocks
* 8192;
7796 rtx ptr
= gen_rtx_REG (DImode
, 22);
7797 rtx count
= gen_rtx_REG (DImode
, 23);
7800 emit_move_insn (count
, GEN_INT (blocks
));
7801 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
, GEN_INT (4096)));
7803 /* Because of the difficulty in emitting a new basic block this
7804 late in the compilation, generate the loop as a single insn. */
7805 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7807 if ((leftover
> 4096 && sa_size
== 0)
7808 || flag_stack_check
|| flag_stack_clash_protection
)
7810 rtx last
= gen_rtx_MEM (DImode
,
7811 plus_constant (Pmode
, ptr
, -leftover
));
7812 MEM_VOLATILE_P (last
) = 1;
7813 emit_move_insn (last
, const0_rtx
);
7816 if (flag_stack_check
|| flag_stack_clash_protection
)
7818 /* If -fstack-check is specified we have to load the entire
7819 constant into a register and subtract from the sp in one go,
7820 because the probed stack size is not equal to the frame size. */
7821 HOST_WIDE_INT lo
, hi
;
7822 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7823 hi
= frame_size
- lo
;
7825 emit_move_insn (ptr
, GEN_INT (hi
));
7826 emit_insn (gen_adddi3 (ptr
, ptr
, GEN_INT (lo
)));
7827 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7832 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7833 GEN_INT (-leftover
)));
7836 /* This alternative is special, because the DWARF code cannot
7837 possibly intuit through the loop above. So we invent this
7838 note it looks at instead. */
7839 RTX_FRAME_RELATED_P (seq
) = 1;
7840 add_reg_note (seq
, REG_FRAME_RELATED_EXPR
,
7841 gen_rtx_SET (stack_pointer_rtx
,
7842 plus_constant (Pmode
, stack_pointer_rtx
,
7846 /* Cope with very large offsets to the register save area. */
7848 sa_reg
= stack_pointer_rtx
;
7849 if (reg_offset
+ sa_size
> 0x8000)
7851 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7854 if (low
+ sa_size
<= 0x8000)
7855 sa_bias
= reg_offset
- low
, reg_offset
= low
;
7857 sa_bias
= reg_offset
, reg_offset
= 0;
7859 sa_reg
= gen_rtx_REG (DImode
, 24);
7860 sa_bias_rtx
= GEN_INT (sa_bias
);
7862 if (add_operand (sa_bias_rtx
, DImode
))
7863 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_bias_rtx
));
7866 emit_move_insn (sa_reg
, sa_bias_rtx
);
7867 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_reg
));
7871 /* Save regs in stack order. Beginning with VMS PV. */
7872 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7873 emit_frame_store (REG_PV
, stack_pointer_rtx
, 0, 0);
7875 /* Save register RA next. */
7876 if (imask
& (1UL << REG_RA
))
7878 emit_frame_store (REG_RA
, sa_reg
, sa_bias
, reg_offset
);
7879 imask
&= ~(1UL << REG_RA
);
7883 /* Now save any other registers required to be saved. */
7884 for (i
= 0; i
< 31; i
++)
7885 if (imask
& (1UL << i
))
7887 emit_frame_store (i
, sa_reg
, sa_bias
, reg_offset
);
7891 for (i
= 0; i
< 31; i
++)
7892 if (fmask
& (1UL << i
))
7894 emit_frame_store (i
+32, sa_reg
, sa_bias
, reg_offset
);
7898 if (TARGET_ABI_OPEN_VMS
)
7900 /* Register frame procedures save the fp. */
7901 if (alpha_procedure_type
== PT_REGISTER
)
7904 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7905 hard_frame_pointer_rtx
);
7906 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
7907 RTX_FRAME_RELATED_P (insn
) = 1;
7910 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7911 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7912 gen_rtx_REG (DImode
, REG_PV
)));
7914 if (alpha_procedure_type
!= PT_NULL
7915 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7916 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7918 /* If we have to allocate space for outgoing args, do it now. */
7919 if (crtl
->outgoing_args_size
!= 0)
7922 = emit_move_insn (stack_pointer_rtx
,
7924 (Pmode
, hard_frame_pointer_rtx
,
7926 (crtl
->outgoing_args_size
))));
7928 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7929 if ! frame_pointer_needed. Setting the bit will change the CFA
7930 computation rule to use sp again, which would be wrong if we had
7931 frame_pointer_needed, as this means sp might move unpredictably
7935 frame_pointer_needed
7936 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7938 crtl->outgoing_args_size != 0
7939 => alpha_procedure_type != PT_NULL,
7941 so when we are not setting the bit here, we are guaranteed to
7942 have emitted an FRP frame pointer update just before. */
7943 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7948 /* If we need a frame pointer, set it from the stack pointer. */
7949 if (frame_pointer_needed
)
7951 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7952 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7954 /* This must always be the last instruction in the
7955 prologue, thus we emit a special move + clobber. */
7956 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7957 stack_pointer_rtx
, sa_reg
)));
7961 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7962 the prologue, for exception handling reasons, we cannot do this for
7963 any insn that might fault. We could prevent this for mems with a
7964 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7965 have to prevent all such scheduling with a blockage.
7967 Linux, on the other hand, never bothered to implement OSF/1's
7968 exception handling, and so doesn't care about such things. Anyone
7969 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7971 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7972 emit_insn (gen_blockage ());
7975 /* Count the number of .file directives, so that .loc is up to date. */
7976 int num_source_filenames
= 0;
7978 /* Output the textual info surrounding the prologue. */
7981 alpha_start_function (FILE *file
, const char *fnname
,
7982 tree decl ATTRIBUTE_UNUSED
)
7984 unsigned long imask
= 0;
7985 unsigned long fmask
= 0;
7986 /* Stack space needed for pushing registers clobbered by us. */
7987 HOST_WIDE_INT sa_size
;
7988 /* Complete stack size needed. */
7989 unsigned HOST_WIDE_INT frame_size
;
7990 /* The maximum debuggable frame size. */
7991 unsigned HOST_WIDE_INT max_frame_size
= 1UL << 31;
7992 /* Offset from base reg to register save area. */
7993 HOST_WIDE_INT reg_offset
;
7994 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7995 char *tramp_label
= (char *) alloca (strlen (fnname
) + 6);
7998 #if TARGET_ABI_OPEN_VMS
7999 vms_start_function (fnname
);
8002 alpha_fnname
= fnname
;
8003 sa_size
= alpha_sa_size ();
8004 frame_size
= compute_frame_size (get_frame_size (), sa_size
);
8006 if (TARGET_ABI_OPEN_VMS
)
8007 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
8009 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
8011 alpha_sa_mask (&imask
, &fmask
);
8013 /* Issue function start and label. */
8014 if (TARGET_ABI_OPEN_VMS
|| !flag_inhibit_size_directive
)
8016 fputs ("\t.ent ", file
);
8017 assemble_name (file
, fnname
);
8020 /* If the function needs GP, we'll write the "..ng" label there.
8021 Otherwise, do it here. */
8023 && ! alpha_function_needs_gp
8024 && ! cfun
->is_thunk
)
8027 assemble_name (file
, fnname
);
8028 fputs ("..ng:\n", file
);
8031 /* Nested functions on VMS that are potentially called via trampoline
8032 get a special transfer entry point that loads the called functions
8033 procedure descriptor and static chain. */
8034 if (TARGET_ABI_OPEN_VMS
8035 && !TREE_PUBLIC (decl
)
8036 && DECL_CONTEXT (decl
)
8037 && !TYPE_P (DECL_CONTEXT (decl
))
8038 && TREE_CODE (DECL_CONTEXT (decl
)) != TRANSLATION_UNIT_DECL
)
8040 strcpy (tramp_label
, fnname
);
8041 strcat (tramp_label
, "..tr");
8042 ASM_OUTPUT_LABEL (file
, tramp_label
);
8043 fprintf (file
, "\tldq $1,24($27)\n");
8044 fprintf (file
, "\tldq $27,16($27)\n");
8047 strcpy (entry_label
, fnname
);
8048 if (TARGET_ABI_OPEN_VMS
)
8049 strcat (entry_label
, "..en");
8051 ASM_OUTPUT_LABEL (file
, entry_label
);
8052 inside_function
= TRUE
;
8054 if (TARGET_ABI_OPEN_VMS
)
8055 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
8058 && TARGET_IEEE_CONFORMANT
8059 && !flag_inhibit_size_directive
)
8061 /* Set flags in procedure descriptor to request IEEE-conformant
8062 math-library routines. The value we set it to is PDSC_EXC_IEEE
8063 (/usr/include/pdsc.h). */
8064 fputs ("\t.eflag 48\n", file
);
8067 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8068 alpha_auto_offset
= -frame_size
+ crtl
->args
.pretend_args_size
;
8069 alpha_arg_offset
= -frame_size
+ 48;
8071 /* Describe our frame. If the frame size is larger than an integer,
8072 print it as zero to avoid an assembler error. We won't be
8073 properly describing such a frame, but that's the best we can do. */
8074 if (TARGET_ABI_OPEN_VMS
)
8075 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
8076 HOST_WIDE_INT_PRINT_DEC
"\n",
8078 frame_size
>= (1UL << 31) ? 0 : frame_size
,
8080 else if (!flag_inhibit_size_directive
)
8081 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
8082 (frame_pointer_needed
8083 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
8084 frame_size
>= max_frame_size
? 0 : frame_size
,
8085 crtl
->args
.pretend_args_size
);
8087 /* Describe which registers were spilled. */
8088 if (TARGET_ABI_OPEN_VMS
)
8091 /* ??? Does VMS care if mask contains ra? The old code didn't
8092 set it, so I don't here. */
8093 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
8095 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
8096 if (alpha_procedure_type
== PT_REGISTER
)
8097 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
8099 else if (!flag_inhibit_size_directive
)
8103 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
8104 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
8106 for (i
= 0; i
< 32; ++i
)
8107 if (imask
& (1UL << i
))
8112 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
8113 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
8116 #if TARGET_ABI_OPEN_VMS
8117 /* If a user condition handler has been installed at some point, emit
8118 the procedure descriptor bits to point the Condition Handling Facility
8119 at the indirection wrapper, and state the fp offset at which the user
8120 handler may be found. */
8121 if (cfun
->machine
->uses_condition_handler
)
8123 fprintf (file
, "\t.handler __gcc_shell_handler\n");
8124 fprintf (file
, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET
);
8127 #ifdef TARGET_VMS_CRASH_DEBUG
8128 /* Support of minimal traceback info. */
8129 switch_to_section (readonly_data_section
);
8130 fprintf (file
, "\t.align 3\n");
8131 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
8132 fputs ("\t.ascii \"", file
);
8133 assemble_name (file
, fnname
);
8134 fputs ("\\0\"\n", file
);
8135 switch_to_section (text_section
);
8137 #endif /* TARGET_ABI_OPEN_VMS */
8140 /* Emit the .prologue note at the scheduled end of the prologue. */
8143 alpha_output_function_end_prologue (FILE *file
)
8145 if (TARGET_ABI_OPEN_VMS
)
8146 fputs ("\t.prologue\n", file
);
8147 else if (!flag_inhibit_size_directive
)
8148 fprintf (file
, "\t.prologue %d\n",
8149 alpha_function_needs_gp
|| cfun
->is_thunk
);
8152 /* Write function epilogue. */
8155 alpha_expand_epilogue (void)
8157 /* Registers to save. */
8158 unsigned long imask
= 0;
8159 unsigned long fmask
= 0;
8160 /* Stack space needed for pushing registers clobbered by us. */
8161 HOST_WIDE_INT sa_size
;
8162 /* Complete stack size needed. */
8163 HOST_WIDE_INT frame_size
;
8164 /* Offset from base reg to register save area. */
8165 HOST_WIDE_INT reg_offset
;
8166 int fp_is_frame_pointer
, fp_offset
;
8167 rtx sa_reg
, sa_reg_exp
= NULL
;
8168 rtx sp_adj1
, sp_adj2
, mem
, reg
, insn
;
8170 rtx cfa_restores
= NULL_RTX
;
8173 sa_size
= alpha_sa_size ();
8174 frame_size
= compute_frame_size (get_frame_size (), sa_size
);
8176 if (TARGET_ABI_OPEN_VMS
)
8178 if (alpha_procedure_type
== PT_STACK
)
8179 reg_offset
= 8 + 8 * cfun
->machine
->uses_condition_handler
;
8184 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
8186 alpha_sa_mask (&imask
, &fmask
);
8189 = (TARGET_ABI_OPEN_VMS
8190 ? alpha_procedure_type
== PT_STACK
8191 : frame_pointer_needed
);
8193 sa_reg
= stack_pointer_rtx
;
8195 if (crtl
->calls_eh_return
)
8196 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
8202 /* If we have a frame pointer, restore SP from it. */
8203 if (TARGET_ABI_OPEN_VMS
8204 ? vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
8205 : frame_pointer_needed
)
8206 emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
8208 /* Cope with very large offsets to the register save area. */
8209 if (reg_offset
+ sa_size
> 0x8000)
8211 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
8214 if (low
+ sa_size
<= 0x8000)
8215 bias
= reg_offset
- low
, reg_offset
= low
;
8217 bias
= reg_offset
, reg_offset
= 0;
8219 sa_reg
= gen_rtx_REG (DImode
, 22);
8220 sa_reg_exp
= plus_constant (Pmode
, stack_pointer_rtx
, bias
);
8222 emit_move_insn (sa_reg
, sa_reg_exp
);
8225 /* Restore registers in order, excepting a true frame pointer. */
8227 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
, sa_reg
, reg_offset
));
8228 reg
= gen_rtx_REG (DImode
, REG_RA
);
8229 emit_move_insn (reg
, mem
);
8230 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
8233 imask
&= ~(1UL << REG_RA
);
8235 for (i
= 0; i
< 31; ++i
)
8236 if (imask
& (1UL << i
))
8238 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
8239 fp_offset
= reg_offset
;
8242 mem
= gen_frame_mem (DImode
,
8243 plus_constant (Pmode
, sa_reg
,
8245 reg
= gen_rtx_REG (DImode
, i
);
8246 emit_move_insn (reg
, mem
);
8247 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
8253 for (i
= 0; i
< 31; ++i
)
8254 if (fmask
& (1UL << i
))
8256 mem
= gen_frame_mem (DFmode
, plus_constant (Pmode
, sa_reg
,
8258 reg
= gen_rtx_REG (DFmode
, i
+32);
8259 emit_move_insn (reg
, mem
);
8260 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
8265 if (frame_size
|| eh_ofs
)
8267 sp_adj1
= stack_pointer_rtx
;
8271 sp_adj1
= gen_rtx_REG (DImode
, 23);
8272 emit_move_insn (sp_adj1
,
8273 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
8276 /* If the stack size is large, begin computation into a temporary
8277 register so as not to interfere with a potential fp restore,
8278 which must be consecutive with an SP restore. */
8279 if (frame_size
< 32768 && !cfun
->calls_alloca
)
8280 sp_adj2
= GEN_INT (frame_size
);
8281 else if (frame_size
< 0x40007fffL
)
8283 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
8285 sp_adj2
= plus_constant (Pmode
, sp_adj1
, frame_size
- low
);
8286 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
8290 sp_adj1
= gen_rtx_REG (DImode
, 23);
8291 emit_move_insn (sp_adj1
, sp_adj2
);
8293 sp_adj2
= GEN_INT (low
);
8297 rtx tmp
= gen_rtx_REG (DImode
, 23);
8298 sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3, false);
8301 /* We can't drop new things to memory this late, afaik,
8302 so build it up by pieces. */
8303 sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
);
8304 gcc_assert (sp_adj2
);
8308 /* From now on, things must be in order. So emit blockages. */
8310 /* Restore the frame pointer. */
8311 if (fp_is_frame_pointer
)
8313 emit_insn (gen_blockage ());
8314 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
, sa_reg
,
8316 emit_move_insn (hard_frame_pointer_rtx
, mem
);
8317 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
8318 hard_frame_pointer_rtx
, cfa_restores
);
8320 else if (TARGET_ABI_OPEN_VMS
)
8322 emit_insn (gen_blockage ());
8323 emit_move_insn (hard_frame_pointer_rtx
,
8324 gen_rtx_REG (DImode
, vms_save_fp_regno
));
8325 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
8326 hard_frame_pointer_rtx
, cfa_restores
);
8329 /* Restore the stack pointer. */
8330 emit_insn (gen_blockage ());
8331 if (sp_adj2
== const0_rtx
)
8332 insn
= emit_move_insn (stack_pointer_rtx
, sp_adj1
);
8334 insn
= emit_move_insn (stack_pointer_rtx
,
8335 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
));
8336 REG_NOTES (insn
) = cfa_restores
;
8337 add_reg_note (insn
, REG_CFA_DEF_CFA
, stack_pointer_rtx
);
8338 RTX_FRAME_RELATED_P (insn
) = 1;
8342 gcc_assert (cfa_restores
== NULL
);
8344 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8346 emit_insn (gen_blockage ());
8347 insn
= emit_move_insn (hard_frame_pointer_rtx
,
8348 gen_rtx_REG (DImode
, vms_save_fp_regno
));
8349 add_reg_note (insn
, REG_CFA_RESTORE
, hard_frame_pointer_rtx
);
8350 RTX_FRAME_RELATED_P (insn
) = 1;
8355 /* Output the rest of the textual info surrounding the epilogue. */
8358 alpha_end_function (FILE *file
, const char *fnname
, tree decl ATTRIBUTE_UNUSED
)
8362 /* We output a nop after noreturn calls at the very end of the function to
8363 ensure that the return address always remains in the caller's code range,
8364 as not doing so might confuse unwinding engines. */
8365 insn
= get_last_insn ();
8367 insn
= prev_active_insn (insn
);
8368 if (insn
&& CALL_P (insn
))
8369 output_asm_insn (get_insn_template (CODE_FOR_nop
, NULL
), NULL
);
8371 #if TARGET_ABI_OPEN_VMS
8372 /* Write the linkage entries. */
8373 alpha_write_linkage (file
, fnname
);
8376 /* End the function. */
8377 if (TARGET_ABI_OPEN_VMS
8378 || !flag_inhibit_size_directive
)
8380 fputs ("\t.end ", file
);
8381 assemble_name (file
, fnname
);
8384 inside_function
= FALSE
;
8388 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8390 In order to avoid the hordes of differences between generated code
8391 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8392 lots of code loading up large constants, generate rtl and emit it
8393 instead of going straight to text.
8395 Not sure why this idea hasn't been explored before... */
8398 alpha_output_mi_thunk_osf (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8399 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8402 HOST_WIDE_INT hi
, lo
;
8403 rtx this_rtx
, funexp
;
8406 /* We always require a valid GP. */
8407 emit_insn (gen_prologue_ldgp ());
8408 emit_note (NOTE_INSN_PROLOGUE_END
);
8410 /* Find the "this" pointer. If the function returns a structure,
8411 the structure return pointer is in $16. */
8412 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8413 this_rtx
= gen_rtx_REG (Pmode
, 17);
8415 this_rtx
= gen_rtx_REG (Pmode
, 16);
8417 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8418 entire constant for the add. */
8419 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8420 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8421 if (hi
+ lo
== delta
)
8424 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (hi
)));
8426 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (lo
)));
8430 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0), delta
);
8431 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8434 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8439 tmp
= gen_rtx_REG (Pmode
, 0);
8440 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
8442 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8443 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8444 if (hi
+ lo
== vcall_offset
)
8447 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8451 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8453 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8457 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8460 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8462 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8465 /* Generate a tail call to the target function. */
8466 if (! TREE_USED (function
))
8468 assemble_external (function
);
8469 TREE_USED (function
) = 1;
8471 funexp
= XEXP (DECL_RTL (function
), 0);
8472 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8473 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8474 SIBLING_CALL_P (insn
) = 1;
8476 /* Run just enough of rest_of_compilation to get the insns emitted.
8477 There's not really enough bulk here to make other passes such as
8478 instruction scheduling worth while. Note that use_thunk calls
8479 assemble_start_function and assemble_end_function. */
8480 insn
= get_insns ();
8481 shorten_branches (insn
);
8482 final_start_function (insn
, file
, 1);
8483 final (insn
, file
, 1);
8484 final_end_function ();
8486 #endif /* TARGET_ABI_OSF */
8488 /* Debugging support. */
8492 /* Name of the file containing the current function. */
8494 static const char *current_function_file
= "";
8496 /* Offsets to alpha virtual arg/local debugging pointers. */
8498 long alpha_arg_offset
;
8499 long alpha_auto_offset
;
8501 /* Emit a new filename to a stream. */
8504 alpha_output_filename (FILE *stream
, const char *name
)
8506 static int first_time
= TRUE
;
8511 ++num_source_filenames
;
8512 current_function_file
= name
;
8513 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8514 output_quoted_string (stream
, name
);
8515 fprintf (stream
, "\n");
8518 else if (name
!= current_function_file
8519 && strcmp (name
, current_function_file
) != 0)
8521 ++num_source_filenames
;
8522 current_function_file
= name
;
8523 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8525 output_quoted_string (stream
, name
);
8526 fprintf (stream
, "\n");
8530 /* Structure to show the current status of registers and memory. */
8532 struct shadow_summary
8535 unsigned int i
: 31; /* Mask of int regs */
8536 unsigned int fp
: 31; /* Mask of fp regs */
8537 unsigned int mem
: 1; /* mem == imem | fpmem */
8541 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8542 to the summary structure. SET is nonzero if the insn is setting the
8543 object, otherwise zero. */
8546 summarize_insn (rtx x
, struct shadow_summary
*sum
, int set
)
8548 const char *format_ptr
;
8554 switch (GET_CODE (x
))
8556 /* ??? Note that this case would be incorrect if the Alpha had a
8557 ZERO_EXTRACT in SET_DEST. */
8559 summarize_insn (SET_SRC (x
), sum
, 0);
8560 summarize_insn (SET_DEST (x
), sum
, 1);
8564 summarize_insn (XEXP (x
, 0), sum
, 1);
8568 summarize_insn (XEXP (x
, 0), sum
, 0);
8572 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8573 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8577 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8578 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8582 summarize_insn (SUBREG_REG (x
), sum
, 0);
8587 int regno
= REGNO (x
);
8588 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8590 if (regno
== 31 || regno
== 63)
8596 sum
->defd
.i
|= mask
;
8598 sum
->defd
.fp
|= mask
;
8603 sum
->used
.i
|= mask
;
8605 sum
->used
.fp
|= mask
;
8616 /* Find the regs used in memory address computation: */
8617 summarize_insn (XEXP (x
, 0), sum
, 0);
8620 case CONST_INT
: case CONST_WIDE_INT
: case CONST_DOUBLE
:
8621 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8622 case SCRATCH
: case ASM_INPUT
:
8625 /* Handle common unary and binary ops for efficiency. */
8626 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8627 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8628 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8629 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8630 case NE
: case EQ
: case GE
: case GT
: case LE
:
8631 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8632 summarize_insn (XEXP (x
, 0), sum
, 0);
8633 summarize_insn (XEXP (x
, 1), sum
, 0);
8636 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8637 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8638 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8639 case SQRT
: case FFS
:
8640 summarize_insn (XEXP (x
, 0), sum
, 0);
8644 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8645 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8646 switch (format_ptr
[i
])
8649 summarize_insn (XEXP (x
, i
), sum
, 0);
8653 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8654 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8666 /* Ensure a sufficient number of `trapb' insns are in the code when
8667 the user requests code with a trap precision of functions or
8670 In naive mode, when the user requests a trap-precision of
8671 "instruction", a trapb is needed after every instruction that may
8672 generate a trap. This ensures that the code is resumption safe but
8675 When optimizations are turned on, we delay issuing a trapb as long
8676 as possible. In this context, a trap shadow is the sequence of
8677 instructions that starts with a (potentially) trap generating
8678 instruction and extends to the next trapb or call_pal instruction
8679 (but GCC never generates call_pal by itself). We can delay (and
8680 therefore sometimes omit) a trapb subject to the following
8683 (a) On entry to the trap shadow, if any Alpha register or memory
8684 location contains a value that is used as an operand value by some
8685 instruction in the trap shadow (live on entry), then no instruction
8686 in the trap shadow may modify the register or memory location.
8688 (b) Within the trap shadow, the computation of the base register
8689 for a memory load or store instruction may not involve using the
8690 result of an instruction that might generate an UNPREDICTABLE
8693 (c) Within the trap shadow, no register may be used more than once
8694 as a destination register. (This is to make life easier for the
8697 (d) The trap shadow may not include any branch instructions. */
8700 alpha_handle_trap_shadows (void)
8702 struct shadow_summary shadow
;
8703 int trap_pending
, exception_nesting
;
8707 exception_nesting
= 0;
8710 shadow
.used
.mem
= 0;
8711 shadow
.defd
= shadow
.used
;
8713 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8717 switch (NOTE_KIND (i
))
8719 case NOTE_INSN_EH_REGION_BEG
:
8720 exception_nesting
++;
8725 case NOTE_INSN_EH_REGION_END
:
8726 exception_nesting
--;
8731 case NOTE_INSN_EPILOGUE_BEG
:
8732 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8737 else if (trap_pending
)
8739 if (alpha_tp
== ALPHA_TP_FUNC
)
8742 && GET_CODE (PATTERN (i
)) == RETURN
)
8745 else if (alpha_tp
== ALPHA_TP_INSN
)
8749 struct shadow_summary sum
;
8754 sum
.defd
= sum
.used
;
8756 switch (GET_CODE (i
))
8759 /* Annoyingly, get_attr_trap will die on these. */
8760 if (GET_CODE (PATTERN (i
)) == USE
8761 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8764 summarize_insn (PATTERN (i
), &sum
, 0);
8766 if ((sum
.defd
.i
& shadow
.defd
.i
)
8767 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8769 /* (c) would be violated */
8773 /* Combine shadow with summary of current insn: */
8774 shadow
.used
.i
|= sum
.used
.i
;
8775 shadow
.used
.fp
|= sum
.used
.fp
;
8776 shadow
.used
.mem
|= sum
.used
.mem
;
8777 shadow
.defd
.i
|= sum
.defd
.i
;
8778 shadow
.defd
.fp
|= sum
.defd
.fp
;
8779 shadow
.defd
.mem
|= sum
.defd
.mem
;
8781 if ((sum
.defd
.i
& shadow
.used
.i
)
8782 || (sum
.defd
.fp
& shadow
.used
.fp
)
8783 || (sum
.defd
.mem
& shadow
.used
.mem
))
8785 /* (a) would be violated (also takes care of (b)) */
8786 gcc_assert (get_attr_trap (i
) != TRAP_YES
8787 || (!(sum
.defd
.i
& sum
.used
.i
)
8788 && !(sum
.defd
.fp
& sum
.used
.fp
)));
8795 /* __builtin_unreachable can expand to no code at all,
8796 leaving (barrier) RTXes in the instruction stream. */
8797 goto close_shadow_notrapb
;
8811 n
= emit_insn_before (gen_trapb (), i
);
8812 PUT_MODE (n
, TImode
);
8813 PUT_MODE (i
, TImode
);
8814 close_shadow_notrapb
:
8818 shadow
.used
.mem
= 0;
8819 shadow
.defd
= shadow
.used
;
8824 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8825 && NONJUMP_INSN_P (i
)
8826 && GET_CODE (PATTERN (i
)) != USE
8827 && GET_CODE (PATTERN (i
)) != CLOBBER
8828 && get_attr_trap (i
) == TRAP_YES
)
8830 if (optimize
&& !trap_pending
)
8831 summarize_insn (PATTERN (i
), &shadow
, 0);
8837 /* Alpha can only issue instruction groups simultaneously if they are
8838 suitably aligned. This is very processor-specific. */
8839 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8840 that are marked "fake". These instructions do not exist on that target,
8841 but it is possible to see these insns with deranged combinations of
8842 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8843 choose a result at random. */
8845 enum alphaev4_pipe
{
8852 enum alphaev5_pipe
{
8863 static enum alphaev4_pipe
8864 alphaev4_insn_pipe (rtx_insn
*insn
)
8866 if (recog_memoized (insn
) < 0)
8868 if (get_attr_length (insn
) != 4)
8871 switch (get_attr_type (insn
))
8887 case TYPE_MVI
: /* fake */
8902 case TYPE_FSQRT
: /* fake */
8903 case TYPE_FTOI
: /* fake */
8904 case TYPE_ITOF
: /* fake */
8912 static enum alphaev5_pipe
8913 alphaev5_insn_pipe (rtx_insn
*insn
)
8915 if (recog_memoized (insn
) < 0)
8917 if (get_attr_length (insn
) != 4)
8920 switch (get_attr_type (insn
))
8940 case TYPE_FTOI
: /* fake */
8941 case TYPE_ITOF
: /* fake */
8956 case TYPE_FSQRT
: /* fake */
8967 /* IN_USE is a mask of the slots currently filled within the insn group.
8968 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8969 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8971 LEN is, of course, the length of the group in bytes. */
8974 alphaev4_next_group (rtx_insn
*insn
, int *pin_use
, int *plen
)
8981 || GET_CODE (PATTERN (insn
)) == CLOBBER
8982 || GET_CODE (PATTERN (insn
)) == USE
)
8987 enum alphaev4_pipe pipe
;
8989 pipe
= alphaev4_insn_pipe (insn
);
8993 /* Force complex instructions to start new groups. */
8997 /* If this is a completely unrecognized insn, it's an asm.
8998 We don't know how long it is, so record length as -1 to
8999 signal a needed realignment. */
9000 if (recog_memoized (insn
) < 0)
9003 len
= get_attr_length (insn
);
9007 if (in_use
& EV4_IB0
)
9009 if (in_use
& EV4_IB1
)
9014 in_use
|= EV4_IB0
| EV4_IBX
;
9018 if (in_use
& EV4_IB0
)
9020 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
9028 if (in_use
& EV4_IB1
)
9038 /* Haifa doesn't do well scheduling branches. */
9043 insn
= next_nonnote_insn (insn
);
9045 if (!insn
|| ! INSN_P (insn
))
9048 /* Let Haifa tell us where it thinks insn group boundaries are. */
9049 if (GET_MODE (insn
) == TImode
)
9052 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9057 insn
= next_nonnote_insn (insn
);
9065 /* IN_USE is a mask of the slots currently filled within the insn group.
9066 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9067 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9069 LEN is, of course, the length of the group in bytes. */
9072 alphaev5_next_group (rtx_insn
*insn
, int *pin_use
, int *plen
)
9079 || GET_CODE (PATTERN (insn
)) == CLOBBER
9080 || GET_CODE (PATTERN (insn
)) == USE
)
9085 enum alphaev5_pipe pipe
;
9087 pipe
= alphaev5_insn_pipe (insn
);
9091 /* Force complex instructions to start new groups. */
9095 /* If this is a completely unrecognized insn, it's an asm.
9096 We don't know how long it is, so record length as -1 to
9097 signal a needed realignment. */
9098 if (recog_memoized (insn
) < 0)
9101 len
= get_attr_length (insn
);
9104 /* ??? Most of the places below, we would like to assert never
9105 happen, as it would indicate an error either in Haifa, or
9106 in the scheduling description. Unfortunately, Haifa never
9107 schedules the last instruction of the BB, so we don't have
9108 an accurate TI bit to go off. */
9110 if (in_use
& EV5_E0
)
9112 if (in_use
& EV5_E1
)
9117 in_use
|= EV5_E0
| EV5_E01
;
9121 if (in_use
& EV5_E0
)
9123 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
9131 if (in_use
& EV5_E1
)
9137 if (in_use
& EV5_FA
)
9139 if (in_use
& EV5_FM
)
9144 in_use
|= EV5_FA
| EV5_FAM
;
9148 if (in_use
& EV5_FA
)
9154 if (in_use
& EV5_FM
)
9167 /* Haifa doesn't do well scheduling branches. */
9168 /* ??? If this is predicted not-taken, slotting continues, except
9169 that no more IBR, FBR, or JSR insns may be slotted. */
9174 insn
= next_nonnote_insn (insn
);
9176 if (!insn
|| ! INSN_P (insn
))
9179 /* Let Haifa tell us where it thinks insn group boundaries are. */
9180 if (GET_MODE (insn
) == TImode
)
9183 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9188 insn
= next_nonnote_insn (insn
);
9197 alphaev4_next_nop (int *pin_use
)
9199 int in_use
= *pin_use
;
9202 if (!(in_use
& EV4_IB0
))
9207 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
9212 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
9225 alphaev5_next_nop (int *pin_use
)
9227 int in_use
= *pin_use
;
9230 if (!(in_use
& EV5_E1
))
9235 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
9240 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
9252 /* The instruction group alignment main loop. */
9255 alpha_align_insns_1 (unsigned int max_align
,
9256 rtx_insn
*(*next_group
) (rtx_insn
*, int *, int *),
9257 rtx (*next_nop
) (int *))
9259 /* ALIGN is the known alignment for the insn group. */
9261 /* OFS is the offset of the current insn in the insn group. */
9263 int prev_in_use
, in_use
, len
, ldgp
;
9266 /* Let shorten branches care for assigning alignments to code labels. */
9267 shorten_branches (get_insns ());
9269 if (align_functions
< 4)
9271 else if ((unsigned int) align_functions
< max_align
)
9272 align
= align_functions
;
9276 ofs
= prev_in_use
= 0;
9279 i
= next_nonnote_insn (i
);
9281 ldgp
= alpha_function_needs_gp
? 8 : 0;
9285 next
= (*next_group
) (i
, &in_use
, &len
);
9287 /* When we see a label, resync alignment etc. */
9290 unsigned int new_align
= 1 << label_to_alignment (i
);
9292 if (new_align
>= align
)
9294 align
= new_align
< max_align
? new_align
: max_align
;
9298 else if (ofs
& (new_align
-1))
9299 ofs
= (ofs
| (new_align
-1)) + 1;
9303 /* Handle complex instructions special. */
9304 else if (in_use
== 0)
9306 /* Asms will have length < 0. This is a signal that we have
9307 lost alignment knowledge. Assume, however, that the asm
9308 will not mis-align instructions. */
9317 /* If the known alignment is smaller than the recognized insn group,
9318 realign the output. */
9319 else if ((int) align
< len
)
9321 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9322 rtx_insn
*prev
, *where
;
9324 where
= prev
= prev_nonnote_insn (i
);
9325 if (!where
|| !LABEL_P (where
))
9328 /* Can't realign between a call and its gp reload. */
9329 if (! (TARGET_EXPLICIT_RELOCS
9330 && prev
&& CALL_P (prev
)))
9332 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9333 align
= 1 << new_log_align
;
9338 /* We may not insert padding inside the initial ldgp sequence. */
9342 /* If the group won't fit in the same INT16 as the previous,
9343 we need to add padding to keep the group together. Rather
9344 than simply leaving the insn filling to the assembler, we
9345 can make use of the knowledge of what sorts of instructions
9346 were issued in the previous group to make sure that all of
9347 the added nops are really free. */
9348 else if (ofs
+ len
> (int) align
)
9350 int nop_count
= (align
- ofs
) / 4;
9353 /* Insert nops before labels, branches, and calls to truly merge
9354 the execution of the nops with the previous instruction group. */
9355 where
= prev_nonnote_insn (i
);
9358 if (LABEL_P (where
))
9360 rtx_insn
*where2
= prev_nonnote_insn (where
);
9361 if (where2
&& JUMP_P (where2
))
9364 else if (NONJUMP_INSN_P (where
))
9371 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9372 while (--nop_count
);
9376 ofs
= (ofs
+ len
) & (align
- 1);
9377 prev_in_use
= in_use
;
9383 alpha_align_insns (void)
9385 if (alpha_tune
== PROCESSOR_EV4
)
9386 alpha_align_insns_1 (8, alphaev4_next_group
, alphaev4_next_nop
);
9387 else if (alpha_tune
== PROCESSOR_EV5
)
9388 alpha_align_insns_1 (16, alphaev5_next_group
, alphaev5_next_nop
);
9393 /* Insert an unop between sibcall or noreturn function call and GP load. */
9396 alpha_pad_function_end (void)
9398 rtx_insn
*insn
, *next
;
9400 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9403 || !(SIBLING_CALL_P (insn
)
9404 || find_reg_note (insn
, REG_NORETURN
, NULL_RTX
)))
9407 /* Make sure we do not split a call and its corresponding
9408 CALL_ARG_LOCATION note. */
9409 next
= NEXT_INSN (insn
);
9412 if (NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CALL_ARG_LOCATION
)
9415 next
= next_active_insn (insn
);
9418 rtx pat
= PATTERN (next
);
9420 if (GET_CODE (pat
) == SET
9421 && GET_CODE (SET_SRC (pat
)) == UNSPEC_VOLATILE
9422 && XINT (SET_SRC (pat
), 1) == UNSPECV_LDGP1
)
9423 emit_insn_after (gen_unop (), insn
);
9428 /* Machine dependent reorg pass. */
9433 /* Workaround for a linker error that triggers when an exception
9434 handler immediatelly follows a sibcall or a noreturn function.
9436 In the sibcall case:
9438 The instruction stream from an object file:
9440 1d8: 00 00 fb 6b jmp (t12)
9441 1dc: 00 00 ba 27 ldah gp,0(ra)
9442 1e0: 00 00 bd 23 lda gp,0(gp)
9443 1e4: 00 00 7d a7 ldq t12,0(gp)
9444 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9446 was converted in the final link pass to:
9448 12003aa88: 67 fa ff c3 br 120039428 <...>
9449 12003aa8c: 00 00 fe 2f unop
9450 12003aa90: 00 00 fe 2f unop
9451 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9452 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9454 And in the noreturn case:
9456 The instruction stream from an object file:
9458 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9459 58: 00 00 ba 27 ldah gp,0(ra)
9460 5c: 00 00 bd 23 lda gp,0(gp)
9461 60: 00 00 7d a7 ldq t12,0(gp)
9462 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9464 was converted in the final link pass to:
9466 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9467 fdb28: 00 00 fe 2f unop
9468 fdb2c: 00 00 fe 2f unop
9469 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9470 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9472 GP load instructions were wrongly cleared by the linker relaxation
9473 pass. This workaround prevents removal of GP loads by inserting
9474 an unop instruction between a sibcall or noreturn function call and
9475 exception handler prologue. */
9477 if (current_function_has_exception_handlers ())
9478 alpha_pad_function_end ();
9480 /* CALL_PAL that implements trap insn, updates program counter to point
9481 after the insn. In case trap is the last insn in the function,
9482 emit NOP to guarantee that PC remains inside function boundaries.
9483 This workaround is needed to get reliable backtraces. */
9485 rtx_insn
*insn
= prev_active_insn (get_last_insn ());
9487 if (insn
&& NONJUMP_INSN_P (insn
))
9489 rtx pat
= PATTERN (insn
);
9490 if (GET_CODE (pat
) == PARALLEL
)
9492 rtx vec
= XVECEXP (pat
, 0, 0);
9493 if (GET_CODE (vec
) == TRAP_IF
9494 && XEXP (vec
, 0) == const1_rtx
)
9495 emit_insn_after (gen_unop (), insn
);
9501 alpha_file_start (void)
9503 default_file_start ();
9505 fputs ("\t.set noreorder\n", asm_out_file
);
9506 fputs ("\t.set volatile\n", asm_out_file
);
9508 fputs ("\t.set noat\n", asm_out_file
);
9509 if (TARGET_EXPLICIT_RELOCS
)
9510 fputs ("\t.set nomacro\n", asm_out_file
);
9511 if (TARGET_SUPPORT_ARCH
| TARGET_BWX
| TARGET_MAX
| TARGET_FIX
| TARGET_CIX
)
9515 if (alpha_cpu
== PROCESSOR_EV6
|| TARGET_FIX
|| TARGET_CIX
)
9517 else if (TARGET_MAX
)
9519 else if (TARGET_BWX
)
9521 else if (alpha_cpu
== PROCESSOR_EV5
)
9526 fprintf (asm_out_file
, "\t.arch %s\n", arch
);
9530 /* Since we don't have a .dynbss section, we should not allow global
9531 relocations in the .rodata section. */
9534 alpha_elf_reloc_rw_mask (void)
9536 return flag_pic
? 3 : 2;
9539 /* Return a section for X. The only special thing we do here is to
9540 honor small data. */
9543 alpha_elf_select_rtx_section (machine_mode mode
, rtx x
,
9544 unsigned HOST_WIDE_INT align
)
9546 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9547 /* ??? Consider using mergeable sdata sections. */
9548 return sdata_section
;
9550 return default_elf_select_rtx_section (mode
, x
, align
);
9554 alpha_elf_section_type_flags (tree decl
, const char *name
, int reloc
)
9556 unsigned int flags
= 0;
9558 if (strcmp (name
, ".sdata") == 0
9559 || strncmp (name
, ".sdata.", 7) == 0
9560 || strncmp (name
, ".gnu.linkonce.s.", 16) == 0
9561 || strcmp (name
, ".sbss") == 0
9562 || strncmp (name
, ".sbss.", 6) == 0
9563 || strncmp (name
, ".gnu.linkonce.sb.", 17) == 0)
9564 flags
= SECTION_SMALL
;
9566 flags
|= default_section_type_flags (decl
, name
, reloc
);
9570 /* Structure to collect function names for final output in link section. */
9571 /* Note that items marked with GTY can't be ifdef'ed out. */
9579 struct GTY(()) alpha_links
9583 enum reloc_kind rkind
;
9586 #if TARGET_ABI_OPEN_VMS
9588 /* Return the VMS argument type corresponding to MODE. */
9591 alpha_arg_type (machine_mode mode
)
9596 return TARGET_FLOAT_VAX
? FF
: FS
;
9598 return TARGET_FLOAT_VAX
? FD
: FT
;
9604 /* Return an rtx for an integer representing the VMS Argument Information
9608 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum
)
9610 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9613 for (i
= 0; i
< 6; i
++)
9614 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9616 return GEN_INT (regval
);
9620 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9621 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9622 this is the reference to the linkage pointer value, 0 if this is the
9623 reference to the function entry value. RFLAG is 1 if this a reduced
9624 reference (code address only), 0 if this is a full reference. */
9627 alpha_use_linkage (rtx func
, bool lflag
, bool rflag
)
9629 struct alpha_links
*al
= NULL
;
9630 const char *name
= XSTR (func
, 0);
9632 if (cfun
->machine
->links
)
9634 /* Is this name already defined? */
9635 alpha_links
**slot
= cfun
->machine
->links
->get (name
);
9640 cfun
->machine
->links
9641 = hash_map
<nofree_string_hash
, alpha_links
*>::create_ggc (64);
9652 /* Follow transparent alias, as this is used for CRTL translations. */
9653 id
= maybe_get_identifier (name
);
9656 while (IDENTIFIER_TRANSPARENT_ALIAS (id
))
9657 id
= TREE_CHAIN (id
);
9658 name
= IDENTIFIER_POINTER (id
);
9661 buf_len
= strlen (name
) + 8 + 9;
9662 linksym
= (char *) alloca (buf_len
);
9663 snprintf (linksym
, buf_len
, "$%d..%s..lk", cfun
->funcdef_no
, name
);
9665 al
= ggc_alloc
<alpha_links
> ();
9667 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (linksym
));
9669 cfun
->machine
->links
->put (ggc_strdup (name
), al
);
9672 al
->rkind
= rflag
? KIND_CODEADDR
: KIND_LINKAGE
;
9675 return gen_rtx_MEM (Pmode
, plus_constant (Pmode
, al
->linkage
, 8));
9681 alpha_write_one_linkage (const char *name
, alpha_links
*link
, FILE *stream
)
9683 ASM_OUTPUT_INTERNAL_LABEL (stream
, XSTR (link
->linkage
, 0));
9684 if (link
->rkind
== KIND_CODEADDR
)
9686 /* External and used, request code address. */
9687 fprintf (stream
, "\t.code_address ");
9691 if (!SYMBOL_REF_EXTERNAL_P (link
->func
)
9692 && SYMBOL_REF_LOCAL_P (link
->func
))
9694 /* Locally defined, build linkage pair. */
9695 fprintf (stream
, "\t.quad %s..en\n", name
);
9696 fprintf (stream
, "\t.quad ");
9700 /* External, request linkage pair. */
9701 fprintf (stream
, "\t.linkage ");
9704 assemble_name (stream
, name
);
9705 fputs ("\n", stream
);
9711 alpha_write_linkage (FILE *stream
, const char *funname
)
9713 fprintf (stream
, "\t.link\n");
9714 fprintf (stream
, "\t.align 3\n");
9717 #ifdef TARGET_VMS_CRASH_DEBUG
9718 fputs ("\t.name ", stream
);
9719 assemble_name (stream
, funname
);
9720 fputs ("..na\n", stream
);
9723 ASM_OUTPUT_LABEL (stream
, funname
);
9724 fprintf (stream
, "\t.pdesc ");
9725 assemble_name (stream
, funname
);
9726 fprintf (stream
, "..en,%s\n",
9727 alpha_procedure_type
== PT_STACK
? "stack"
9728 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9730 if (cfun
->machine
->links
)
9732 hash_map
<nofree_string_hash
, alpha_links
*>::iterator iter
9733 = cfun
->machine
->links
->begin ();
9734 for (; iter
!= cfun
->machine
->links
->end (); ++iter
)
9735 alpha_write_one_linkage ((*iter
).first
, (*iter
).second
, stream
);
9739 /* Switch to an arbitrary section NAME with attributes as specified
9740 by FLAGS. ALIGN specifies any known alignment requirements for
9741 the section; 0 if the default should be used. */
9744 vms_asm_named_section (const char *name
, unsigned int flags
,
9745 tree decl ATTRIBUTE_UNUSED
)
9747 fputc ('\n', asm_out_file
);
9748 fprintf (asm_out_file
, ".section\t%s", name
);
9750 if (flags
& SECTION_DEBUG
)
9751 fprintf (asm_out_file
, ",NOWRT");
9753 fputc ('\n', asm_out_file
);
9756 /* Record an element in the table of global constructors. SYMBOL is
9757 a SYMBOL_REF of the function to be called; PRIORITY is a number
9758 between 0 and MAX_INIT_PRIORITY.
9760 Differs from default_ctors_section_asm_out_constructor in that the
9761 width of the .ctors entry is always 64 bits, rather than the 32 bits
9762 used by a normal pointer. */
9765 vms_asm_out_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9767 switch_to_section (ctors_section
);
9768 assemble_align (BITS_PER_WORD
);
9769 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9773 vms_asm_out_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9775 switch_to_section (dtors_section
);
9776 assemble_align (BITS_PER_WORD
);
9777 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9781 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED
,
9782 bool lflag ATTRIBUTE_UNUSED
,
9783 bool rflag ATTRIBUTE_UNUSED
)
9788 #endif /* TARGET_ABI_OPEN_VMS */
9791 alpha_init_libfuncs (void)
9793 if (TARGET_ABI_OPEN_VMS
)
9795 /* Use the VMS runtime library functions for division and
9797 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
9798 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
9799 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
9800 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
9801 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
9802 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
9803 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
9804 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
9805 #ifdef MEM_LIBFUNCS_INIT
9811 /* On the Alpha, we use this to disable the floating-point registers
9812 when they don't exist. */
9815 alpha_conditional_register_usage (void)
9818 if (! TARGET_FPREGS
)
9819 for (i
= 32; i
< 63; i
++)
9820 fixed_regs
[i
] = call_used_regs
[i
] = 1;
9823 /* Canonicalize a comparison from one we don't have to one we do have. */
9826 alpha_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
9827 bool op0_preserve_value
)
9829 if (!op0_preserve_value
9830 && (*code
== GE
|| *code
== GT
|| *code
== GEU
|| *code
== GTU
)
9831 && (REG_P (*op1
) || *op1
== const0_rtx
))
9833 std::swap (*op0
, *op1
);
9834 *code
= (int)swap_condition ((enum rtx_code
)*code
);
9837 if ((*code
== LT
|| *code
== LTU
)
9838 && CONST_INT_P (*op1
) && INTVAL (*op1
) == 256)
9840 *code
= *code
== LT
? LE
: LEU
;
9841 *op1
= GEN_INT (255);
9845 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9848 alpha_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
9850 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK
= (0x3fUL
<< 17);
9852 tree fenv_var
, get_fpscr
, set_fpscr
, mask
, ld_fenv
, masked_fenv
;
9853 tree new_fenv_var
, reload_fenv
, restore_fnenv
;
9854 tree update_call
, atomic_feraiseexcept
, hold_fnclex
;
9856 /* Assume OSF/1 compatible interfaces. */
9857 if (!TARGET_ABI_OSF
)
9860 /* Generate the equivalent of :
9861 unsigned long fenv_var;
9862 fenv_var = __ieee_get_fp_control ();
9864 unsigned long masked_fenv;
9865 masked_fenv = fenv_var & mask;
9867 __ieee_set_fp_control (masked_fenv); */
9869 fenv_var
= create_tmp_var_raw (long_unsigned_type_node
);
9871 = build_fn_decl ("__ieee_get_fp_control",
9872 build_function_type_list (long_unsigned_type_node
, NULL
));
9874 = build_fn_decl ("__ieee_set_fp_control",
9875 build_function_type_list (void_type_node
, NULL
));
9876 mask
= build_int_cst (long_unsigned_type_node
, ~SWCR_STATUS_MASK
);
9877 ld_fenv
= build2 (MODIFY_EXPR
, long_unsigned_type_node
,
9878 fenv_var
, build_call_expr (get_fpscr
, 0));
9879 masked_fenv
= build2 (BIT_AND_EXPR
, long_unsigned_type_node
, fenv_var
, mask
);
9880 hold_fnclex
= build_call_expr (set_fpscr
, 1, masked_fenv
);
9881 *hold
= build2 (COMPOUND_EXPR
, void_type_node
,
9882 build2 (COMPOUND_EXPR
, void_type_node
, masked_fenv
, ld_fenv
),
9885 /* Store the value of masked_fenv to clear the exceptions:
9886 __ieee_set_fp_control (masked_fenv); */
9888 *clear
= build_call_expr (set_fpscr
, 1, masked_fenv
);
9890 /* Generate the equivalent of :
9891 unsigned long new_fenv_var;
9892 new_fenv_var = __ieee_get_fp_control ();
9894 __ieee_set_fp_control (fenv_var);
9896 __atomic_feraiseexcept (new_fenv_var); */
9898 new_fenv_var
= create_tmp_var_raw (long_unsigned_type_node
);
9899 reload_fenv
= build2 (MODIFY_EXPR
, long_unsigned_type_node
, new_fenv_var
,
9900 build_call_expr (get_fpscr
, 0));
9901 restore_fnenv
= build_call_expr (set_fpscr
, 1, fenv_var
);
9902 atomic_feraiseexcept
= builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
9904 = build_call_expr (atomic_feraiseexcept
, 1,
9905 fold_convert (integer_type_node
, new_fenv_var
));
9906 *update
= build2 (COMPOUND_EXPR
, void_type_node
,
9907 build2 (COMPOUND_EXPR
, void_type_node
,
9908 reload_fenv
, restore_fnenv
), update_call
);
9911 /* Implement TARGET_HARD_REGNO_MODE_OK. On Alpha, the integer registers
9912 can hold any mode. The floating-point registers can hold 64-bit
9913 integers as well, but not smaller values. */
9916 alpha_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
9918 if (IN_RANGE (regno
, 32, 62))
9919 return (mode
== SFmode
9927 /* Implement TARGET_MODES_TIEABLE_P. This asymmetric test is true when
9928 MODE1 could be put in an FP register but MODE2 could not. */
9931 alpha_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
9933 return (alpha_hard_regno_mode_ok (32, mode1
)
9934 ? alpha_hard_regno_mode_ok (32, mode2
)
9938 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
9941 alpha_can_change_mode_class (machine_mode from
, machine_mode to
,
9944 return (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
)
9945 || !reg_classes_intersect_p (FLOAT_REGS
, rclass
));
9948 /* Initialize the GCC target structure. */
9949 #if TARGET_ABI_OPEN_VMS
9950 # undef TARGET_ATTRIBUTE_TABLE
9951 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9952 # undef TARGET_CAN_ELIMINATE
9953 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9956 #undef TARGET_IN_SMALL_DATA_P
9957 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9959 #undef TARGET_ASM_ALIGNED_HI_OP
9960 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9961 #undef TARGET_ASM_ALIGNED_DI_OP
9962 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9964 /* Default unaligned ops are provided for ELF systems. To get unaligned
9965 data for non-ELF systems, we have to turn off auto alignment. */
9966 #if TARGET_ABI_OPEN_VMS
9967 #undef TARGET_ASM_UNALIGNED_HI_OP
9968 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9969 #undef TARGET_ASM_UNALIGNED_SI_OP
9970 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9971 #undef TARGET_ASM_UNALIGNED_DI_OP
9972 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9975 #undef TARGET_ASM_RELOC_RW_MASK
9976 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9977 #undef TARGET_ASM_SELECT_RTX_SECTION
9978 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9979 #undef TARGET_SECTION_TYPE_FLAGS
9980 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9982 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9983 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9985 #undef TARGET_INIT_LIBFUNCS
9986 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9988 #undef TARGET_LEGITIMIZE_ADDRESS
9989 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9990 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9991 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9993 #undef TARGET_ASM_FILE_START
9994 #define TARGET_ASM_FILE_START alpha_file_start
9996 #undef TARGET_SCHED_ADJUST_COST
9997 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9998 #undef TARGET_SCHED_ISSUE_RATE
9999 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10000 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10001 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10002 alpha_multipass_dfa_lookahead
10004 #undef TARGET_HAVE_TLS
10005 #define TARGET_HAVE_TLS HAVE_AS_TLS
10007 #undef TARGET_BUILTIN_DECL
10008 #define TARGET_BUILTIN_DECL alpha_builtin_decl
10009 #undef TARGET_INIT_BUILTINS
10010 #define TARGET_INIT_BUILTINS alpha_init_builtins
10011 #undef TARGET_EXPAND_BUILTIN
10012 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10013 #undef TARGET_FOLD_BUILTIN
10014 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10015 #undef TARGET_GIMPLE_FOLD_BUILTIN
10016 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
10018 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10019 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10020 #undef TARGET_CANNOT_COPY_INSN_P
10021 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10022 #undef TARGET_LEGITIMATE_CONSTANT_P
10023 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
10024 #undef TARGET_CANNOT_FORCE_CONST_MEM
10025 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10028 #undef TARGET_ASM_OUTPUT_MI_THUNK
10029 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10030 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10031 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10032 #undef TARGET_STDARG_OPTIMIZE_HOOK
10033 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10036 #undef TARGET_PRINT_OPERAND
10037 #define TARGET_PRINT_OPERAND alpha_print_operand
10038 #undef TARGET_PRINT_OPERAND_ADDRESS
10039 #define TARGET_PRINT_OPERAND_ADDRESS alpha_print_operand_address
10040 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
10041 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P alpha_print_operand_punct_valid_p
10043 /* Use 16-bits anchor. */
10044 #undef TARGET_MIN_ANCHOR_OFFSET
10045 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
10046 #undef TARGET_MAX_ANCHOR_OFFSET
10047 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
10048 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10049 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
10051 #undef TARGET_REGISTER_MOVE_COST
10052 #define TARGET_REGISTER_MOVE_COST alpha_register_move_cost
10053 #undef TARGET_MEMORY_MOVE_COST
10054 #define TARGET_MEMORY_MOVE_COST alpha_memory_move_cost
10055 #undef TARGET_RTX_COSTS
10056 #define TARGET_RTX_COSTS alpha_rtx_costs
10057 #undef TARGET_ADDRESS_COST
10058 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
10060 #undef TARGET_MACHINE_DEPENDENT_REORG
10061 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10063 #undef TARGET_PROMOTE_FUNCTION_MODE
10064 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10065 #undef TARGET_PROMOTE_PROTOTYPES
10066 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10068 #undef TARGET_FUNCTION_VALUE
10069 #define TARGET_FUNCTION_VALUE alpha_function_value
10070 #undef TARGET_LIBCALL_VALUE
10071 #define TARGET_LIBCALL_VALUE alpha_libcall_value
10072 #undef TARGET_FUNCTION_VALUE_REGNO_P
10073 #define TARGET_FUNCTION_VALUE_REGNO_P alpha_function_value_regno_p
10074 #undef TARGET_RETURN_IN_MEMORY
10075 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10076 #undef TARGET_PASS_BY_REFERENCE
10077 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10078 #undef TARGET_SETUP_INCOMING_VARARGS
10079 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10080 #undef TARGET_STRICT_ARGUMENT_NAMING
10081 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10082 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10083 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10084 #undef TARGET_SPLIT_COMPLEX_ARG
10085 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10086 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10087 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10088 #undef TARGET_ARG_PARTIAL_BYTES
10089 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10090 #undef TARGET_FUNCTION_ARG
10091 #define TARGET_FUNCTION_ARG alpha_function_arg
10092 #undef TARGET_FUNCTION_ARG_ADVANCE
10093 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
10094 #undef TARGET_TRAMPOLINE_INIT
10095 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
10097 #undef TARGET_INSTANTIATE_DECLS
10098 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10100 #undef TARGET_SECONDARY_RELOAD
10101 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10102 #undef TARGET_SECONDARY_MEMORY_NEEDED
10103 #define TARGET_SECONDARY_MEMORY_NEEDED alpha_secondary_memory_needed
10104 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
10105 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE alpha_secondary_memory_needed_mode
10107 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10108 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10109 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10110 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10112 #undef TARGET_BUILD_BUILTIN_VA_LIST
10113 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10115 #undef TARGET_EXPAND_BUILTIN_VA_START
10116 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10118 #undef TARGET_OPTION_OVERRIDE
10119 #define TARGET_OPTION_OVERRIDE alpha_option_override
10121 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
10122 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
10123 alpha_override_options_after_change
10125 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10126 #undef TARGET_MANGLE_TYPE
10127 #define TARGET_MANGLE_TYPE alpha_mangle_type
10130 #undef TARGET_LRA_P
10131 #define TARGET_LRA_P hook_bool_void_false
10133 #undef TARGET_LEGITIMATE_ADDRESS_P
10134 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10136 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10137 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10139 #undef TARGET_CANONICALIZE_COMPARISON
10140 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10142 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10143 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10145 #undef TARGET_HARD_REGNO_MODE_OK
10146 #define TARGET_HARD_REGNO_MODE_OK alpha_hard_regno_mode_ok
10148 #undef TARGET_MODES_TIEABLE_P
10149 #define TARGET_MODES_TIEABLE_P alpha_modes_tieable_p
10151 #undef TARGET_CAN_CHANGE_MODE_CLASS
10152 #define TARGET_CAN_CHANGE_MODE_CLASS alpha_can_change_mode_class
10154 struct gcc_target targetm
= TARGET_INITIALIZER
;
10157 #include "gt-alpha.h"