1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune
;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu
;
67 static const char * const alpha_cpu_name
[] =
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp
;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm
;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm
;
84 /* Save information from a "cmpxx" operation until the branch or scc is
87 struct alpha_compare alpha_compare
;
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
92 static int inside_function
= FALSE
;
94 /* The number of cycles of latency we should assume on memory reads. */
96 int alpha_memory_latency
= 3;
98 /* Whether the function needs the GP. */
100 static int alpha_function_needs_gp
;
102 /* The alias set for prologue/epilogue register save/restore. */
104 static GTY(()) alias_set_type alpha_sr_alias_set
;
106 /* The assembler name of the current function. */
108 static const char *alpha_fnname
;
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number
;
112 int alpha_next_sequence_number
= 1;
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number
;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
118 int alpha_this_literal_sequence_number
;
119 int alpha_this_gpdisp_sequence_number
;
121 /* Costs of various operations on the different architectures. */
123 struct alpha_rtx_cost_data
125 unsigned char fp_add
;
126 unsigned char fp_mult
;
127 unsigned char fp_div_sf
;
128 unsigned char fp_div_df
;
129 unsigned char int_mult_si
;
130 unsigned char int_mult_di
;
131 unsigned char int_shift
;
132 unsigned char int_cmov
;
133 unsigned short int_div
;
136 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
178 static struct alpha_rtx_cost_data
const alpha_rtx_cost_size
=
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS crtl->args.info.num_args
195 #define NUM_ARGS crtl->args.info
201 /* Declarations of static functions. */
202 static struct machine_function
*alpha_init_machine_status (void);
203 static rtx
alpha_emit_xfloating_compare (enum rtx_code
*, rtx
, rtx
);
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree
);
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx
);
214 /* Implement TARGET_HANDLE_OPTION. */
217 alpha_handle_option (size_t code
, const char *arg
, int value
)
223 target_flags
|= MASK_SOFT_FP
;
227 case OPT_mieee_with_inexact
:
228 target_flags
|= MASK_IEEE_CONFORMANT
;
232 if (value
!= 16 && value
!= 32 && value
!= 64)
233 error ("bad value %qs for -mtls-size switch", arg
);
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_TYPE. */
244 alpha_mangle_type (const_tree type
)
246 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128
)
250 /* For all other types, use normal C++ mangling. */
255 /* Parse target option strings. */
258 override_options (void)
260 static const struct cpu_table
{
261 const char *const name
;
262 const enum processor_type processor
;
265 { "ev4", PROCESSOR_EV4
, 0 },
266 { "ev45", PROCESSOR_EV4
, 0 },
267 { "21064", PROCESSOR_EV4
, 0 },
268 { "ev5", PROCESSOR_EV5
, 0 },
269 { "21164", PROCESSOR_EV5
, 0 },
270 { "ev56", PROCESSOR_EV5
, MASK_BWX
},
271 { "21164a", PROCESSOR_EV5
, MASK_BWX
},
272 { "pca56", PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
273 { "21164PC",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
274 { "21164pc",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
275 { "ev6", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
},
276 { "21264", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
},
277 { "ev67", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
},
278 { "21264a", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
},
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic
> 1) ? "PIC" : "pic");
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK
)
295 alpha_fprm
= ALPHA_FPRM_DYN
;
297 alpha_fprm
= ALPHA_FPRM_NORM
;
299 alpha_tp
= ALPHA_TP_PROG
;
300 alpha_fptm
= ALPHA_FPTM_N
;
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
310 if (TARGET_ABI_UNICOSMK
)
311 warning (0, "-mieee not supported on Unicos/Mk");
314 alpha_tp
= ALPHA_TP_INSN
;
315 alpha_fptm
= ALPHA_FPTM_SU
;
319 if (TARGET_IEEE_WITH_INEXACT
)
321 if (TARGET_ABI_UNICOSMK
)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
325 alpha_tp
= ALPHA_TP_INSN
;
326 alpha_fptm
= ALPHA_FPTM_SUI
;
332 if (! strcmp (alpha_tp_string
, "p"))
333 alpha_tp
= ALPHA_TP_PROG
;
334 else if (! strcmp (alpha_tp_string
, "f"))
335 alpha_tp
= ALPHA_TP_FUNC
;
336 else if (! strcmp (alpha_tp_string
, "i"))
337 alpha_tp
= ALPHA_TP_INSN
;
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string
);
342 if (alpha_fprm_string
)
344 if (! strcmp (alpha_fprm_string
, "n"))
345 alpha_fprm
= ALPHA_FPRM_NORM
;
346 else if (! strcmp (alpha_fprm_string
, "m"))
347 alpha_fprm
= ALPHA_FPRM_MINF
;
348 else if (! strcmp (alpha_fprm_string
, "c"))
349 alpha_fprm
= ALPHA_FPRM_CHOP
;
350 else if (! strcmp (alpha_fprm_string
,"d"))
351 alpha_fprm
= ALPHA_FPRM_DYN
;
353 error ("bad value %qs for -mfp-rounding-mode switch",
357 if (alpha_fptm_string
)
359 if (strcmp (alpha_fptm_string
, "n") == 0)
360 alpha_fptm
= ALPHA_FPTM_N
;
361 else if (strcmp (alpha_fptm_string
, "u") == 0)
362 alpha_fptm
= ALPHA_FPTM_U
;
363 else if (strcmp (alpha_fptm_string
, "su") == 0)
364 alpha_fptm
= ALPHA_FPTM_SU
;
365 else if (strcmp (alpha_fptm_string
, "sui") == 0)
366 alpha_fptm
= ALPHA_FPTM_SUI
;
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string
);
371 if (alpha_cpu_string
)
373 for (i
= 0; cpu_table
[i
].name
; i
++)
374 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
376 alpha_tune
= alpha_cpu
= cpu_table
[i
].processor
;
377 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
);
378 target_flags
|= cpu_table
[i
].flags
;
381 if (! cpu_table
[i
].name
)
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string
);
385 if (alpha_tune_string
)
387 for (i
= 0; cpu_table
[i
].name
; i
++)
388 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
390 alpha_tune
= cpu_table
[i
].processor
;
393 if (! cpu_table
[i
].name
)
394 error ("bad value %qs for -mcpu switch", alpha_tune_string
);
397 /* Do some sanity checks on the above options. */
399 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm
= ALPHA_FPTM_N
;
405 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
406 && alpha_tp
!= ALPHA_TP_INSN
&& alpha_cpu
!= PROCESSOR_EV6
)
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp
= ALPHA_TP_INSN
;
412 if (alpha_cpu
== PROCESSOR_EV6
)
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp
= ALPHA_TP_PROG
;
420 if (TARGET_FLOAT_VAX
)
422 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm
= ALPHA_FPRM_NORM
;
427 if (alpha_fptm
== ALPHA_FPTM_SUI
)
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm
= ALPHA_FPTM_SU
;
432 if (target_flags_explicit
& MASK_LONG_DOUBLE_128
)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags
&= ~MASK_LONG_DOUBLE_128
;
441 if (!alpha_mlat_string
)
442 alpha_mlat_string
= "L1";
444 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
445 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
447 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
449 && alpha_mlat_string
[2] == '\0')
451 static int const cache_latency
[][4] =
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
458 lat
= alpha_mlat_string
[1] - '0';
459 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_tune
][lat
-1] == -1)
461 warning (0, "L%d cache latency unknown for %s",
462 lat
, alpha_cpu_name
[alpha_tune
]);
466 lat
= cache_latency
[alpha_tune
][lat
-1];
468 else if (! strcmp (alpha_mlat_string
, "main"))
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string
);
480 alpha_memory_latency
= lat
;
483 /* Default the definition of "small data" to 8 bytes. */
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
489 target_flags
|= MASK_SMALL_DATA
;
490 else if (flag_pic
== 2)
491 target_flags
&= ~MASK_SMALL_DATA
;
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
498 if (align_loops
<= 0)
500 if (align_jumps
<= 0)
503 if (align_functions
<= 0)
504 align_functions
= 16;
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set
= new_alias_set ();
509 /* Register variables and functions with the garbage collector. */
511 /* Set up function hooks. */
512 init_machine_status
= alpha_init_machine_status
;
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX
)
517 REAL_MODE_FORMAT (SFmode
) = &vax_f_format
;
518 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
519 REAL_MODE_FORMAT (TFmode
) = NULL
;
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
524 target_flags
|= MASK_LONG_DOUBLE_128
;
527 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
528 can be optimized to ap = __builtin_next_arg (0). */
529 if (TARGET_ABI_UNICOSMK
)
530 targetm
.expand_builtin_va_start
= NULL
;
533 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
536 zap_mask (HOST_WIDE_INT value
)
540 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
542 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
548 /* Return true if OP is valid for a particular TLS relocation.
549 We are already guaranteed that OP is a CONST. */
552 tls_symbolic_operand_1 (rtx op
, int size
, int unspec
)
556 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
558 op
= XVECEXP (op
, 0, 0);
560 if (GET_CODE (op
) != SYMBOL_REF
)
563 switch (SYMBOL_REF_TLS_MODEL (op
))
565 case TLS_MODEL_LOCAL_DYNAMIC
:
566 return unspec
== UNSPEC_DTPREL
&& size
== alpha_tls_size
;
567 case TLS_MODEL_INITIAL_EXEC
:
568 return unspec
== UNSPEC_TPREL
&& size
== 64;
569 case TLS_MODEL_LOCAL_EXEC
:
570 return unspec
== UNSPEC_TPREL
&& size
== alpha_tls_size
;
576 /* Used by aligned_memory_operand and unaligned_memory_operand to
577 resolve what reload is going to do with OP if it's a register. */
580 resolve_reload_operand (rtx op
)
582 if (reload_in_progress
)
585 if (GET_CODE (tmp
) == SUBREG
)
586 tmp
= SUBREG_REG (tmp
);
588 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
590 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
598 /* The scalar modes supported differs from the default check-what-c-supports
599 version in that sometimes TFmode is available even when long double
600 indicates only DFmode. On unicosmk, we have the situation that HImode
601 doesn't map to any C type, but of course we still support that. */
604 alpha_scalar_mode_supported_p (enum machine_mode mode
)
612 case TImode
: /* via optabs.c */
620 return TARGET_HAS_XFLOATING_LIBS
;
627 /* Alpha implements a couple of integer vector mode operations when
628 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
629 which allows the vectorizer to operate on e.g. move instructions,
630 or when expand_vector_operations can do something useful. */
633 alpha_vector_mode_supported_p (enum machine_mode mode
)
635 return mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
;
638 /* Return 1 if this function can directly return via $26. */
643 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
645 && alpha_sa_size () == 0
646 && get_frame_size () == 0
647 && crtl
->outgoing_args_size
== 0
648 && crtl
->args
.pretend_args_size
== 0);
651 /* Return the ADDR_VEC associated with a tablejump insn. */
654 alpha_tablejump_addr_vec (rtx insn
)
658 tmp
= JUMP_LABEL (insn
);
661 tmp
= NEXT_INSN (tmp
);
665 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
666 return PATTERN (tmp
);
670 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
673 alpha_tablejump_best_label (rtx insn
)
675 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
676 rtx best_label
= NULL_RTX
;
678 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
679 there for edge frequency counts from profile data. */
683 int n_labels
= XVECLEN (jump_table
, 1);
687 for (i
= 0; i
< n_labels
; i
++)
691 for (j
= i
+ 1; j
< n_labels
; j
++)
692 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
693 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
696 if (count
> best_count
)
697 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
701 return best_label
? best_label
: const0_rtx
;
704 /* Return the TLS model to use for SYMBOL. */
706 static enum tls_model
707 tls_symbolic_operand_type (rtx symbol
)
709 enum tls_model model
;
711 if (GET_CODE (symbol
) != SYMBOL_REF
)
713 model
= SYMBOL_REF_TLS_MODEL (symbol
);
715 /* Local-exec with a 64-bit size is the same code as initial-exec. */
716 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
717 model
= TLS_MODEL_INITIAL_EXEC
;
722 /* Return true if the function DECL will share the same GP as any
723 function in the current unit of translation. */
726 decl_has_samegp (const_tree decl
)
728 /* Functions that are not local can be overridden, and thus may
729 not share the same gp. */
730 if (!(*targetm
.binds_local_p
) (decl
))
733 /* If -msmall-data is in effect, assume that there is only one GP
734 for the module, and so any local symbol has this property. We
735 need explicit relocations to be able to enforce this for symbols
736 not defined in this unit of translation, however. */
737 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
740 /* Functions that are not external are defined in this UoT. */
741 /* ??? Irritatingly, static functions not yet emitted are still
742 marked "external". Apply this to non-static functions only. */
743 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
746 /* Return true if EXP should be placed in the small data section. */
749 alpha_in_small_data_p (const_tree exp
)
751 /* We want to merge strings, so we never consider them small data. */
752 if (TREE_CODE (exp
) == STRING_CST
)
755 /* Functions are never in the small data area. Duh. */
756 if (TREE_CODE (exp
) == FUNCTION_DECL
)
759 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
761 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
762 if (strcmp (section
, ".sdata") == 0
763 || strcmp (section
, ".sbss") == 0)
768 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
770 /* If this is an incomplete type with size 0, then we can't put it
771 in sdata because it might be too big when completed. */
772 if (size
> 0 && (unsigned HOST_WIDE_INT
) size
<= g_switch_value
)
779 #if TARGET_ABI_OPEN_VMS
781 alpha_linkage_symbol_p (const char *symname
)
783 int symlen
= strlen (symname
);
786 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
791 #define LINKAGE_SYMBOL_REF_P(X) \
792 ((GET_CODE (X) == SYMBOL_REF \
793 && alpha_linkage_symbol_p (XSTR (X, 0))) \
794 || (GET_CODE (X) == CONST \
795 && GET_CODE (XEXP (X, 0)) == PLUS \
796 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
797 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
800 /* legitimate_address_p recognizes an RTL expression that is a valid
801 memory address for an instruction. The MODE argument is the
802 machine mode for the MEM expression that wants to use this address.
804 For Alpha, we have either a constant address or the sum of a
805 register and a constant address, or just a register. For DImode,
806 any of those forms can be surrounded with an AND that clear the
807 low-order three bits; this is an "unaligned" access. */
810 alpha_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict
)
812 /* If this is an ldq_u type address, discard the outer AND. */
814 && GET_CODE (x
) == AND
815 && CONST_INT_P (XEXP (x
, 1))
816 && INTVAL (XEXP (x
, 1)) == -8)
819 /* Discard non-paradoxical subregs. */
820 if (GET_CODE (x
) == SUBREG
821 && (GET_MODE_SIZE (GET_MODE (x
))
822 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
825 /* Unadorned general registers are valid. */
828 ? STRICT_REG_OK_FOR_BASE_P (x
)
829 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
832 /* Constant addresses (i.e. +/- 32k) are valid. */
833 if (CONSTANT_ADDRESS_P (x
))
836 #if TARGET_ABI_OPEN_VMS
837 if (LINKAGE_SYMBOL_REF_P (x
))
841 /* Register plus a small constant offset is valid. */
842 if (GET_CODE (x
) == PLUS
)
844 rtx ofs
= XEXP (x
, 1);
847 /* Discard non-paradoxical subregs. */
848 if (GET_CODE (x
) == SUBREG
849 && (GET_MODE_SIZE (GET_MODE (x
))
850 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
856 && NONSTRICT_REG_OK_FP_BASE_P (x
)
857 && CONST_INT_P (ofs
))
860 ? STRICT_REG_OK_FOR_BASE_P (x
)
861 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
862 && CONSTANT_ADDRESS_P (ofs
))
867 /* If we're managing explicit relocations, LO_SUM is valid, as are small
868 data symbols. Avoid explicit relocations of modes larger than word
869 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
870 else if (TARGET_EXPLICIT_RELOCS
871 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
873 if (small_symbolic_operand (x
, Pmode
))
876 if (GET_CODE (x
) == LO_SUM
)
878 rtx ofs
= XEXP (x
, 1);
881 /* Discard non-paradoxical subregs. */
882 if (GET_CODE (x
) == SUBREG
883 && (GET_MODE_SIZE (GET_MODE (x
))
884 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
887 /* Must have a valid base register. */
890 ? STRICT_REG_OK_FOR_BASE_P (x
)
891 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
894 /* The symbol must be local. */
895 if (local_symbolic_operand (ofs
, Pmode
)
896 || dtp32_symbolic_operand (ofs
, Pmode
)
897 || tp32_symbolic_operand (ofs
, Pmode
))
905 /* Build the SYMBOL_REF for __tls_get_addr. */
907 static GTY(()) rtx tls_get_addr_libfunc
;
910 get_tls_get_addr (void)
912 if (!tls_get_addr_libfunc
)
913 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
914 return tls_get_addr_libfunc
;
917 /* Try machine-dependent ways of modifying an illegitimate address
918 to be legitimate. If we find one, return the new, valid address. */
921 alpha_legitimize_address (rtx x
, rtx scratch
, enum machine_mode mode
)
923 HOST_WIDE_INT addend
;
925 /* If the address is (plus reg const_int) and the CONST_INT is not a
926 valid offset, compute the high part of the constant and add it to
927 the register. Then our address is (plus temp low-part-const). */
928 if (GET_CODE (x
) == PLUS
929 && REG_P (XEXP (x
, 0))
930 && CONST_INT_P (XEXP (x
, 1))
931 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
933 addend
= INTVAL (XEXP (x
, 1));
938 /* If the address is (const (plus FOO const_int)), find the low-order
939 part of the CONST_INT. Then load FOO plus any high-order part of the
940 CONST_INT into a register. Our address is (plus reg low-part-const).
941 This is done to reduce the number of GOT entries. */
942 if (can_create_pseudo_p ()
943 && GET_CODE (x
) == CONST
944 && GET_CODE (XEXP (x
, 0)) == PLUS
945 && CONST_INT_P (XEXP (XEXP (x
, 0), 1)))
947 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
948 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
952 /* If we have a (plus reg const), emit the load as in (2), then add
953 the two registers, and finally generate (plus reg low-part-const) as
955 if (can_create_pseudo_p ()
956 && GET_CODE (x
) == PLUS
957 && REG_P (XEXP (x
, 0))
958 && GET_CODE (XEXP (x
, 1)) == CONST
959 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
960 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 1), 0), 1)))
962 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
963 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
964 XEXP (XEXP (XEXP (x
, 1), 0), 0),
965 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
969 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
970 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
971 around +/- 32k offset. */
972 if (TARGET_EXPLICIT_RELOCS
973 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
974 && symbolic_operand (x
, Pmode
))
976 rtx r0
, r16
, eqv
, tga
, tp
, insn
, dest
, seq
;
978 switch (tls_symbolic_operand_type (x
))
983 case TLS_MODEL_GLOBAL_DYNAMIC
:
986 r0
= gen_rtx_REG (Pmode
, 0);
987 r16
= gen_rtx_REG (Pmode
, 16);
988 tga
= get_tls_get_addr ();
989 dest
= gen_reg_rtx (Pmode
);
990 seq
= GEN_INT (alpha_next_sequence_number
++);
992 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
993 insn
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
994 insn
= emit_call_insn (insn
);
995 RTL_CONST_CALL_P (insn
) = 1;
996 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1001 emit_libcall_block (insn
, dest
, r0
, x
);
1004 case TLS_MODEL_LOCAL_DYNAMIC
:
1007 r0
= gen_rtx_REG (Pmode
, 0);
1008 r16
= gen_rtx_REG (Pmode
, 16);
1009 tga
= get_tls_get_addr ();
1010 scratch
= gen_reg_rtx (Pmode
);
1011 seq
= GEN_INT (alpha_next_sequence_number
++);
1013 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
1014 insn
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
1015 insn
= emit_call_insn (insn
);
1016 RTL_CONST_CALL_P (insn
) = 1;
1017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1019 insn
= get_insns ();
1022 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
1023 UNSPEC_TLSLDM_CALL
);
1024 emit_libcall_block (insn
, scratch
, r0
, eqv
);
1026 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
1027 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1029 if (alpha_tls_size
== 64)
1031 dest
= gen_reg_rtx (Pmode
);
1032 emit_insn (gen_rtx_SET (VOIDmode
, dest
, eqv
));
1033 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
1036 if (alpha_tls_size
== 32)
1038 insn
= gen_rtx_HIGH (Pmode
, eqv
);
1039 insn
= gen_rtx_PLUS (Pmode
, scratch
, insn
);
1040 scratch
= gen_reg_rtx (Pmode
);
1041 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, insn
));
1043 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
1045 case TLS_MODEL_INITIAL_EXEC
:
1046 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1047 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1048 tp
= gen_reg_rtx (Pmode
);
1049 scratch
= gen_reg_rtx (Pmode
);
1050 dest
= gen_reg_rtx (Pmode
);
1052 emit_insn (gen_load_tp (tp
));
1053 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, eqv
));
1054 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
1057 case TLS_MODEL_LOCAL_EXEC
:
1058 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1059 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1060 tp
= gen_reg_rtx (Pmode
);
1062 emit_insn (gen_load_tp (tp
));
1063 if (alpha_tls_size
== 32)
1065 insn
= gen_rtx_HIGH (Pmode
, eqv
);
1066 insn
= gen_rtx_PLUS (Pmode
, tp
, insn
);
1067 tp
= gen_reg_rtx (Pmode
);
1068 emit_insn (gen_rtx_SET (VOIDmode
, tp
, insn
));
1070 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
1076 if (local_symbolic_operand (x
, Pmode
))
1078 if (small_symbolic_operand (x
, Pmode
))
1082 if (can_create_pseudo_p ())
1083 scratch
= gen_reg_rtx (Pmode
);
1084 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
1085 gen_rtx_HIGH (Pmode
, x
)));
1086 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1095 HOST_WIDE_INT low
, high
;
1097 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1099 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1103 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1104 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1105 1, OPTAB_LIB_WIDEN
);
1107 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1108 (!can_create_pseudo_p () ? scratch
: NULL_RTX
),
1109 1, OPTAB_LIB_WIDEN
);
1111 return plus_constant (x
, low
);
1115 /* Primarily this is required for TLS symbols, but given that our move
1116 patterns *ought* to be able to handle any symbol at any time, we
1117 should never be spilling symbolic operands to the constant pool, ever. */
1120 alpha_cannot_force_const_mem (rtx x
)
1122 enum rtx_code code
= GET_CODE (x
);
1123 return code
== SYMBOL_REF
|| code
== LABEL_REF
|| code
== CONST
;
1126 /* We do not allow indirect calls to be optimized into sibling calls, nor
1127 can we allow a call to a function with a different GP to be optimized
1131 alpha_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
1133 /* Can't do indirect tail calls, since we don't know if the target
1134 uses the same GP. */
1138 /* Otherwise, we can make a tail call if the target function shares
1140 return decl_has_samegp (decl
);
1144 some_small_symbolic_operand_int (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1148 /* Don't re-split. */
1149 if (GET_CODE (x
) == LO_SUM
)
1152 return small_symbolic_operand (x
, Pmode
) != 0;
1156 split_small_symbolic_operand_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1160 /* Don't re-split. */
1161 if (GET_CODE (x
) == LO_SUM
)
1164 if (small_symbolic_operand (x
, Pmode
))
1166 x
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
1175 split_small_symbolic_operand (rtx x
)
1178 for_each_rtx (&x
, split_small_symbolic_operand_1
, NULL
);
1182 /* Indicate that INSN cannot be duplicated. This is true for any insn
1183 that we've marked with gpdisp relocs, since those have to stay in
1184 1-1 correspondence with one another.
1186 Technically we could copy them if we could set up a mapping from one
1187 sequence number to another, across the set of insns to be duplicated.
1188 This seems overly complicated and error-prone since interblock motion
1189 from sched-ebb could move one of the pair of insns to a different block.
1191 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1192 then they'll be in a different block from their ldgp. Which could lead
1193 the bb reorder code to think that it would be ok to copy just the block
1194 containing the call and branch to the block containing the ldgp. */
1197 alpha_cannot_copy_insn_p (rtx insn
)
1199 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
1201 if (recog_memoized (insn
) >= 0)
1202 return get_attr_cannot_copy (insn
);
1208 /* Try a machine-dependent way of reloading an illegitimate address
1209 operand. If we find one, push the reload and return the new rtx. */
1212 alpha_legitimize_reload_address (rtx x
,
1213 enum machine_mode mode ATTRIBUTE_UNUSED
,
1214 int opnum
, int type
,
1215 int ind_levels ATTRIBUTE_UNUSED
)
1217 /* We must recognize output that we have already generated ourselves. */
1218 if (GET_CODE (x
) == PLUS
1219 && GET_CODE (XEXP (x
, 0)) == PLUS
1220 && REG_P (XEXP (XEXP (x
, 0), 0))
1221 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
1222 && CONST_INT_P (XEXP (x
, 1)))
1224 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1225 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1230 /* We wish to handle large displacements off a base register by
1231 splitting the addend across an ldah and the mem insn. This
1232 cuts number of extra insns needed from 3 to 1. */
1233 if (GET_CODE (x
) == PLUS
1234 && REG_P (XEXP (x
, 0))
1235 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1236 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1237 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1239 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1240 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1242 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1244 /* Check for 32-bit overflow. */
1245 if (high
+ low
!= val
)
1248 /* Reload the high part into a base reg; leave the low part
1249 in the mem directly. */
1250 x
= gen_rtx_PLUS (GET_MODE (x
),
1251 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1255 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1256 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1264 /* Compute a (partial) cost for rtx X. Return true if the complete
1265 cost has been computed, and false if subexpressions should be
1266 scanned. In either case, *TOTAL contains the cost result. */
1269 alpha_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
1272 enum machine_mode mode
= GET_MODE (x
);
1273 bool float_mode_p
= FLOAT_MODE_P (mode
);
1274 const struct alpha_rtx_cost_data
*cost_data
;
1277 cost_data
= &alpha_rtx_cost_size
;
1279 cost_data
= &alpha_rtx_cost_data
[alpha_tune
];
1284 /* If this is an 8-bit constant, return zero since it can be used
1285 nearly anywhere with no cost. If it is a valid operand for an
1286 ADD or AND, likewise return 0 if we know it will be used in that
1287 context. Otherwise, return 2 since it might be used there later.
1288 All other constants take at least two insns. */
1289 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
1297 if (x
== CONST0_RTX (mode
))
1299 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
1300 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
1302 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
1305 *total
= COSTS_N_INSNS (2);
1311 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
1312 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
1313 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
1314 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
1315 else if (tls_symbolic_operand_type (x
))
1316 /* Estimate of cost for call_pal rduniq. */
1317 /* ??? How many insns do we emit here? More than one... */
1318 *total
= COSTS_N_INSNS (15);
1320 /* Otherwise we do a load from the GOT. */
1321 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1325 /* This is effectively an add_operand. */
1332 *total
= cost_data
->fp_add
;
1333 else if (GET_CODE (XEXP (x
, 0)) == MULT
1334 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
1336 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
, speed
)
1337 + rtx_cost (XEXP (x
, 1), outer_code
, speed
) + COSTS_N_INSNS (1));
1344 *total
= cost_data
->fp_mult
;
1345 else if (mode
== DImode
)
1346 *total
= cost_data
->int_mult_di
;
1348 *total
= cost_data
->int_mult_si
;
1352 if (CONST_INT_P (XEXP (x
, 1))
1353 && INTVAL (XEXP (x
, 1)) <= 3)
1355 *total
= COSTS_N_INSNS (1);
1362 *total
= cost_data
->int_shift
;
1367 *total
= cost_data
->fp_add
;
1369 *total
= cost_data
->int_cmov
;
1377 *total
= cost_data
->int_div
;
1378 else if (mode
== SFmode
)
1379 *total
= cost_data
->fp_div_sf
;
1381 *total
= cost_data
->fp_div_df
;
1385 *total
= COSTS_N_INSNS (!speed
? 1 : alpha_memory_latency
);
1391 *total
= COSTS_N_INSNS (1);
1399 *total
= COSTS_N_INSNS (1) + cost_data
->int_cmov
;
1405 case UNSIGNED_FLOAT
:
1408 case FLOAT_TRUNCATE
:
1409 *total
= cost_data
->fp_add
;
1413 if (MEM_P (XEXP (x
, 0)))
1416 *total
= cost_data
->fp_add
;
1424 /* REF is an alignable memory location. Place an aligned SImode
1425 reference into *PALIGNED_MEM and the number of bits to shift into
1426 *PBITNUM. SCRATCH is a free register for use in reloading out
1427 of range stack slots. */
1430 get_aligned_mem (rtx ref
, rtx
*paligned_mem
, rtx
*pbitnum
)
1433 HOST_WIDE_INT disp
, offset
;
1435 gcc_assert (MEM_P (ref
));
1437 if (reload_in_progress
1438 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1440 base
= find_replacement (&XEXP (ref
, 0));
1441 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1444 base
= XEXP (ref
, 0);
1446 if (GET_CODE (base
) == PLUS
)
1447 disp
= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1451 /* Find the byte offset within an aligned word. If the memory itself is
1452 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1453 will have examined the base register and determined it is aligned, and
1454 thus displacements from it are naturally alignable. */
1455 if (MEM_ALIGN (ref
) >= 32)
1460 /* Access the entire aligned word. */
1461 *paligned_mem
= widen_memory_access (ref
, SImode
, -offset
);
1463 /* Convert the byte offset within the word to a bit offset. */
1464 if (WORDS_BIG_ENDIAN
)
1465 offset
= 32 - (GET_MODE_BITSIZE (GET_MODE (ref
)) + offset
* 8);
1468 *pbitnum
= GEN_INT (offset
);
1471 /* Similar, but just get the address. Handle the two reload cases.
1472 Add EXTRA_OFFSET to the address we return. */
1475 get_unaligned_address (rtx ref
)
1478 HOST_WIDE_INT offset
= 0;
1480 gcc_assert (MEM_P (ref
));
1482 if (reload_in_progress
1483 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1485 base
= find_replacement (&XEXP (ref
, 0));
1487 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1490 base
= XEXP (ref
, 0);
1492 if (GET_CODE (base
) == PLUS
)
1493 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1495 return plus_constant (base
, offset
);
1498 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1499 X is always returned in a register. */
1502 get_unaligned_offset (rtx addr
, HOST_WIDE_INT ofs
)
1504 if (GET_CODE (addr
) == PLUS
)
1506 ofs
+= INTVAL (XEXP (addr
, 1));
1507 addr
= XEXP (addr
, 0);
1510 return expand_simple_binop (Pmode
, PLUS
, addr
, GEN_INT (ofs
& 7),
1511 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1514 /* On the Alpha, all (non-symbolic) constants except zero go into
1515 a floating-point register via memory. Note that we cannot
1516 return anything that is not a subset of RCLASS, and that some
1517 symbolic constants cannot be dropped to memory. */
1520 alpha_preferred_reload_class(rtx x
, enum reg_class rclass
)
1522 /* Zero is present in any register class. */
1523 if (x
== CONST0_RTX (GET_MODE (x
)))
1526 /* These sorts of constants we can easily drop to memory. */
1528 || GET_CODE (x
) == CONST_DOUBLE
1529 || GET_CODE (x
) == CONST_VECTOR
)
1531 if (rclass
== FLOAT_REGS
)
1533 if (rclass
== ALL_REGS
)
1534 return GENERAL_REGS
;
1538 /* All other kinds of constants should not (and in the case of HIGH
1539 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1540 secondary reload. */
1542 return (rclass
== ALL_REGS
? GENERAL_REGS
: rclass
);
1547 /* Inform reload about cases where moving X with a mode MODE to a register in
1548 RCLASS requires an extra scratch or immediate register. Return the class
1549 needed for the immediate register. */
1551 static enum reg_class
1552 alpha_secondary_reload (bool in_p
, rtx x
, enum reg_class rclass
,
1553 enum machine_mode mode
, secondary_reload_info
*sri
)
1555 /* Loading and storing HImode or QImode values to and from memory
1556 usually requires a scratch register. */
1557 if (!TARGET_BWX
&& (mode
== QImode
|| mode
== HImode
|| mode
== CQImode
))
1559 if (any_memory_operand (x
, mode
))
1563 if (!aligned_memory_operand (x
, mode
))
1564 sri
->icode
= reload_in_optab
[mode
];
1567 sri
->icode
= reload_out_optab
[mode
];
1572 /* We also cannot do integral arithmetic into FP regs, as might result
1573 from register elimination into a DImode fp register. */
1574 if (rclass
== FLOAT_REGS
)
1576 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
1577 return GENERAL_REGS
;
1578 if (in_p
&& INTEGRAL_MODE_P (mode
)
1579 && !MEM_P (x
) && !REG_P (x
) && !CONST_INT_P (x
))
1580 return GENERAL_REGS
;
1586 /* Subfunction of the following function. Update the flags of any MEM
1587 found in part of X. */
1590 alpha_set_memflags_1 (rtx
*xp
, void *data
)
1592 rtx x
= *xp
, orig
= (rtx
) data
;
1597 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (orig
);
1598 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (orig
);
1599 MEM_SCALAR_P (x
) = MEM_SCALAR_P (orig
);
1600 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (orig
);
1601 MEM_READONLY_P (x
) = MEM_READONLY_P (orig
);
1603 /* Sadly, we cannot use alias sets because the extra aliasing
1604 produced by the AND interferes. Given that two-byte quantities
1605 are the only thing we would be able to differentiate anyway,
1606 there does not seem to be any point in convoluting the early
1607 out of the alias check. */
1612 /* Given SEQ, which is an INSN list, look for any MEMs in either
1613 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1614 volatile flags from REF into each of the MEMs found. If REF is not
1615 a MEM, don't do anything. */
1618 alpha_set_memflags (rtx seq
, rtx ref
)
1625 /* This is only called from alpha.md, after having had something
1626 generated from one of the insn patterns. So if everything is
1627 zero, the pattern is already up-to-date. */
1628 if (!MEM_VOLATILE_P (ref
)
1629 && !MEM_IN_STRUCT_P (ref
)
1630 && !MEM_SCALAR_P (ref
)
1631 && !MEM_NOTRAP_P (ref
)
1632 && !MEM_READONLY_P (ref
))
1635 for (insn
= seq
; insn
; insn
= NEXT_INSN (insn
))
1637 for_each_rtx (&PATTERN (insn
), alpha_set_memflags_1
, (void *) ref
);
1642 static rtx
alpha_emit_set_const (rtx
, enum machine_mode
, HOST_WIDE_INT
,
1645 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1646 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1647 and return pc_rtx if successful. */
1650 alpha_emit_set_const_1 (rtx target
, enum machine_mode mode
,
1651 HOST_WIDE_INT c
, int n
, bool no_output
)
1653 HOST_WIDE_INT new_const
;
1655 /* Use a pseudo if highly optimizing and still generating RTL. */
1657 = (flag_expensive_optimizations
&& can_create_pseudo_p () ? 0 : target
);
1660 /* If this is a sign-extended 32-bit constant, we can do this in at most
1661 three insns, so do it if we have enough insns left. We always have
1662 a sign-extended 32-bit constant when compiling on a narrow machine. */
1664 if (HOST_BITS_PER_WIDE_INT
!= 64
1665 || c
>> 31 == -1 || c
>> 31 == 0)
1667 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1668 HOST_WIDE_INT tmp1
= c
- low
;
1669 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
1670 HOST_WIDE_INT extra
= 0;
1672 /* If HIGH will be interpreted as negative but the constant is
1673 positive, we must adjust it to do two ldha insns. */
1675 if ((high
& 0x8000) != 0 && c
>= 0)
1679 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
1682 if (c
== low
|| (low
== 0 && extra
== 0))
1684 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1685 but that meant that we can't handle INT_MIN on 32-bit machines
1686 (like NT/Alpha), because we recurse indefinitely through
1687 emit_move_insn to gen_movdi. So instead, since we know exactly
1688 what we want, create it explicitly. */
1693 target
= gen_reg_rtx (mode
);
1694 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
1697 else if (n
>= 2 + (extra
!= 0))
1701 if (!can_create_pseudo_p ())
1703 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (high
<< 16)));
1707 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16),
1710 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1711 This means that if we go through expand_binop, we'll try to
1712 generate extensions, etc, which will require new pseudos, which
1713 will fail during some split phases. The SImode add patterns
1714 still exist, but are not named. So build the insns by hand. */
1719 subtarget
= gen_reg_rtx (mode
);
1720 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
1721 insn
= gen_rtx_SET (VOIDmode
, subtarget
, insn
);
1727 target
= gen_reg_rtx (mode
);
1728 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1729 insn
= gen_rtx_SET (VOIDmode
, target
, insn
);
1735 /* If we couldn't do it that way, try some other methods. But if we have
1736 no instructions left, don't bother. Likewise, if this is SImode and
1737 we can't make pseudos, we can't do anything since the expand_binop
1738 and expand_unop calls will widen and try to make pseudos. */
1740 if (n
== 1 || (mode
== SImode
&& !can_create_pseudo_p ()))
1743 /* Next, see if we can load a related constant and then shift and possibly
1744 negate it to get the constant we want. Try this once each increasing
1745 numbers of insns. */
1747 for (i
= 1; i
< n
; i
++)
1749 /* First, see if minus some low bits, we've an easy load of
1752 new_const
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1755 temp
= alpha_emit_set_const (subtarget
, mode
, c
- new_const
, i
, no_output
);
1760 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new_const
),
1761 target
, 0, OPTAB_WIDEN
);
1765 /* Next try complementing. */
1766 temp
= alpha_emit_set_const (subtarget
, mode
, ~c
, i
, no_output
);
1771 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
1774 /* Next try to form a constant and do a left shift. We can do this
1775 if some low-order bits are zero; the exact_log2 call below tells
1776 us that information. The bits we are shifting out could be any
1777 value, but here we'll just try the 0- and sign-extended forms of
1778 the constant. To try to increase the chance of having the same
1779 constant in more than one insn, start at the highest number of
1780 bits to shift, but try all possibilities in case a ZAPNOT will
1783 bits
= exact_log2 (c
& -c
);
1785 for (; bits
> 0; bits
--)
1787 new_const
= c
>> bits
;
1788 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1791 new_const
= (unsigned HOST_WIDE_INT
)c
>> bits
;
1792 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1799 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
1800 target
, 0, OPTAB_WIDEN
);
1804 /* Now try high-order zero bits. Here we try the shifted-in bits as
1805 all zero and all ones. Be careful to avoid shifting outside the
1806 mode and to avoid shifting outside the host wide int size. */
1807 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1808 confuse the recursive call and set all of the high 32 bits. */
1810 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1811 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64));
1813 for (; bits
> 0; bits
--)
1815 new_const
= c
<< bits
;
1816 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1819 new_const
= (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1820 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1827 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
1828 target
, 1, OPTAB_WIDEN
);
1832 /* Now try high-order 1 bits. We get that with a sign-extension.
1833 But one bit isn't enough here. Be careful to avoid shifting outside
1834 the mode and to avoid shifting outside the host wide int size. */
1836 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1837 - floor_log2 (~ c
) - 2);
1839 for (; bits
> 0; bits
--)
1841 new_const
= c
<< bits
;
1842 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, i
, no_output
);
1845 new_const
= (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1846 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
,
1853 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
1854 target
, 0, OPTAB_WIDEN
);
1859 #if HOST_BITS_PER_WIDE_INT == 64
1860 /* Finally, see if can load a value into the target that is the same as the
1861 constant except that all bytes that are 0 are changed to be 0xff. If we
1862 can, then we can do a ZAPNOT to obtain the desired constant. */
1865 for (i
= 0; i
< 64; i
+= 8)
1866 if ((new_const
& ((HOST_WIDE_INT
) 0xff << i
)) == 0)
1867 new_const
|= (HOST_WIDE_INT
) 0xff << i
;
1869 /* We are only called for SImode and DImode. If this is SImode, ensure that
1870 we are sign extended to a full word. */
1873 new_const
= ((new_const
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1877 temp
= alpha_emit_set_const (subtarget
, mode
, new_const
, n
- 1, no_output
);
1882 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new_const
),
1883 target
, 0, OPTAB_WIDEN
);
1891 /* Try to output insns to set TARGET equal to the constant C if it can be
1892 done in less than N insns. Do all computations in MODE. Returns the place
1893 where the output has been placed if it can be done and the insns have been
1894 emitted. If it would take more than N insns, zero is returned and no
1895 insns and emitted. */
1898 alpha_emit_set_const (rtx target
, enum machine_mode mode
,
1899 HOST_WIDE_INT c
, int n
, bool no_output
)
1901 enum machine_mode orig_mode
= mode
;
1902 rtx orig_target
= target
;
1906 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1907 can't load this constant in one insn, do this in DImode. */
1908 if (!can_create_pseudo_p () && mode
== SImode
1909 && REG_P (target
) && REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1911 result
= alpha_emit_set_const_1 (target
, mode
, c
, 1, no_output
);
1915 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1918 else if (mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
)
1920 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1924 /* Try 1 insn, then 2, then up to N. */
1925 for (i
= 1; i
<= n
; i
++)
1927 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
, no_output
);
1935 insn
= get_last_insn ();
1936 set
= single_set (insn
);
1937 if (! CONSTANT_P (SET_SRC (set
)))
1938 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
1943 /* Allow for the case where we changed the mode of TARGET. */
1946 if (result
== target
)
1947 result
= orig_target
;
1948 else if (mode
!= orig_mode
)
1949 result
= gen_lowpart (orig_mode
, result
);
1955 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1956 fall back to a straight forward decomposition. We do this to avoid
1957 exponential run times encountered when looking for longer sequences
1958 with alpha_emit_set_const. */
1961 alpha_emit_set_long_const (rtx target
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
1963 HOST_WIDE_INT d1
, d2
, d3
, d4
;
1965 /* Decompose the entire word */
1966 #if HOST_BITS_PER_WIDE_INT >= 64
1967 gcc_assert (c2
== -(c1
< 0));
1968 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1970 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1971 c1
= (c1
- d2
) >> 32;
1972 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1974 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1975 gcc_assert (c1
== d4
);
1977 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1979 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1980 gcc_assert (c1
== d2
);
1982 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
1984 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1985 gcc_assert (c2
== d4
);
1988 /* Construct the high word */
1991 emit_move_insn (target
, GEN_INT (d4
));
1993 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
1996 emit_move_insn (target
, GEN_INT (d3
));
1998 /* Shift it into place */
1999 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2001 /* Add in the low bits. */
2003 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2005 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2010 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2014 alpha_extract_integer (rtx x
, HOST_WIDE_INT
*p0
, HOST_WIDE_INT
*p1
)
2016 HOST_WIDE_INT i0
, i1
;
2018 if (GET_CODE (x
) == CONST_VECTOR
)
2019 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
2022 if (CONST_INT_P (x
))
2027 else if (HOST_BITS_PER_WIDE_INT
>= 64)
2029 i0
= CONST_DOUBLE_LOW (x
);
2034 i0
= CONST_DOUBLE_LOW (x
);
2035 i1
= CONST_DOUBLE_HIGH (x
);
2042 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2043 are willing to load the value into a register via a move pattern.
2044 Normally this is all symbolic constants, integral constants that
2045 take three or fewer instructions, and floating-point zero. */
2048 alpha_legitimate_constant_p (rtx x
)
2050 enum machine_mode mode
= GET_MODE (x
);
2051 HOST_WIDE_INT i0
, i1
;
2053 switch (GET_CODE (x
))
2061 /* TLS symbols are never valid. */
2062 return SYMBOL_REF_TLS_MODEL (x
) == 0;
2065 if (x
== CONST0_RTX (mode
))
2067 if (FLOAT_MODE_P (mode
))
2072 if (x
== CONST0_RTX (mode
))
2074 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
2076 if (GET_MODE_SIZE (mode
) != 8)
2082 if (TARGET_BUILD_CONSTANTS
)
2084 alpha_extract_integer (x
, &i0
, &i1
);
2085 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== (-i0
< 0))
2086 return alpha_emit_set_const_1 (x
, mode
, i0
, 3, true) != NULL
;
2094 /* Operand 1 is known to be a constant, and should require more than one
2095 instruction to load. Emit that multi-part load. */
2098 alpha_split_const_mov (enum machine_mode mode
, rtx
*operands
)
2100 HOST_WIDE_INT i0
, i1
;
2101 rtx temp
= NULL_RTX
;
2103 alpha_extract_integer (operands
[1], &i0
, &i1
);
2105 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2106 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3, false);
2108 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2109 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2113 if (!rtx_equal_p (operands
[0], temp
))
2114 emit_move_insn (operands
[0], temp
);
2121 /* Expand a move instruction; return true if all work is done.
2122 We don't handle non-bwx subword loads here. */
2125 alpha_expand_mov (enum machine_mode mode
, rtx
*operands
)
2129 /* If the output is not a register, the input must be. */
2130 if (MEM_P (operands
[0])
2131 && ! reg_or_0_operand (operands
[1], mode
))
2132 operands
[1] = force_reg (mode
, operands
[1]);
2134 /* Allow legitimize_address to perform some simplifications. */
2135 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
2137 tmp
= alpha_legitimize_address (operands
[1], operands
[0], mode
);
2140 if (tmp
== operands
[0])
2147 /* Early out for non-constants and valid constants. */
2148 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2151 /* Split large integers. */
2152 if (CONST_INT_P (operands
[1])
2153 || GET_CODE (operands
[1]) == CONST_DOUBLE
2154 || GET_CODE (operands
[1]) == CONST_VECTOR
)
2156 if (alpha_split_const_mov (mode
, operands
))
2160 /* Otherwise we've nothing left but to drop the thing to memory. */
2161 tmp
= force_const_mem (mode
, operands
[1]);
2163 if (tmp
== NULL_RTX
)
2166 if (reload_in_progress
)
2168 emit_move_insn (operands
[0], XEXP (tmp
, 0));
2169 operands
[1] = replace_equiv_address (tmp
, operands
[0]);
2172 operands
[1] = validize_mem (tmp
);
2176 /* Expand a non-bwx QImode or HImode move instruction;
2177 return true if all work is done. */
2180 alpha_expand_mov_nobwx (enum machine_mode mode
, rtx
*operands
)
2184 /* If the output is not a register, the input must be. */
2185 if (MEM_P (operands
[0]))
2186 operands
[1] = force_reg (mode
, operands
[1]);
2188 /* Handle four memory cases, unaligned and aligned for either the input
2189 or the output. The only case where we can be called during reload is
2190 for aligned loads; all other cases require temporaries. */
2192 if (any_memory_operand (operands
[1], mode
))
2194 if (aligned_memory_operand (operands
[1], mode
))
2196 if (reload_in_progress
)
2199 seq
= gen_reload_inqi_aligned (operands
[0], operands
[1]);
2201 seq
= gen_reload_inhi_aligned (operands
[0], operands
[1]);
2206 rtx aligned_mem
, bitnum
;
2207 rtx scratch
= gen_reg_rtx (SImode
);
2211 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2213 subtarget
= operands
[0];
2214 if (REG_P (subtarget
))
2215 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2217 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2220 seq
= gen_aligned_loadqi (subtarget
, aligned_mem
,
2223 seq
= gen_aligned_loadhi (subtarget
, aligned_mem
,
2228 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2233 /* Don't pass these as parameters since that makes the generated
2234 code depend on parameter evaluation order which will cause
2235 bootstrap failures. */
2237 rtx temp1
, temp2
, subtarget
, ua
;
2240 temp1
= gen_reg_rtx (DImode
);
2241 temp2
= gen_reg_rtx (DImode
);
2243 subtarget
= operands
[0];
2244 if (REG_P (subtarget
))
2245 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2247 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2249 ua
= get_unaligned_address (operands
[1]);
2251 seq
= gen_unaligned_loadqi (subtarget
, ua
, temp1
, temp2
);
2253 seq
= gen_unaligned_loadhi (subtarget
, ua
, temp1
, temp2
);
2255 alpha_set_memflags (seq
, operands
[1]);
2259 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2264 if (any_memory_operand (operands
[0], mode
))
2266 if (aligned_memory_operand (operands
[0], mode
))
2268 rtx aligned_mem
, bitnum
;
2269 rtx temp1
= gen_reg_rtx (SImode
);
2270 rtx temp2
= gen_reg_rtx (SImode
);
2272 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2274 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2279 rtx temp1
= gen_reg_rtx (DImode
);
2280 rtx temp2
= gen_reg_rtx (DImode
);
2281 rtx temp3
= gen_reg_rtx (DImode
);
2282 rtx ua
= get_unaligned_address (operands
[0]);
2285 seq
= gen_unaligned_storeqi (ua
, operands
[1], temp1
, temp2
, temp3
);
2287 seq
= gen_unaligned_storehi (ua
, operands
[1], temp1
, temp2
, temp3
);
2289 alpha_set_memflags (seq
, operands
[0]);
2298 /* Implement the movmisalign patterns. One of the operands is a memory
2299 that is not naturally aligned. Emit instructions to load it. */
2302 alpha_expand_movmisalign (enum machine_mode mode
, rtx
*operands
)
2304 /* Honor misaligned loads, for those we promised to do so. */
2305 if (MEM_P (operands
[1]))
2309 if (register_operand (operands
[0], mode
))
2312 tmp
= gen_reg_rtx (mode
);
2314 alpha_expand_unaligned_load (tmp
, operands
[1], 8, 0, 0);
2315 if (tmp
!= operands
[0])
2316 emit_move_insn (operands
[0], tmp
);
2318 else if (MEM_P (operands
[0]))
2320 if (!reg_or_0_operand (operands
[1], mode
))
2321 operands
[1] = force_reg (mode
, operands
[1]);
2322 alpha_expand_unaligned_store (operands
[0], operands
[1], 8, 0);
2328 /* Generate an unsigned DImode to FP conversion. This is the same code
2329 optabs would emit if we didn't have TFmode patterns.
2331 For SFmode, this is the only construction I've found that can pass
2332 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2333 intermediates will work, because you'll get intermediate rounding
2334 that ruins the end result. Some of this could be fixed by turning
2335 on round-to-positive-infinity, but that requires diddling the fpsr,
2336 which kills performance. I tried turning this around and converting
2337 to a negative number, so that I could turn on /m, but either I did
2338 it wrong or there's something else cause I wound up with the exact
2339 same single-bit error. There is a branch-less form of this same code:
2350 fcmoveq $f10,$f11,$f0
2352 I'm not using it because it's the same number of instructions as
2353 this branch-full form, and it has more serialized long latency
2354 instructions on the critical path.
2356 For DFmode, we can avoid rounding errors by breaking up the word
2357 into two pieces, converting them separately, and adding them back:
2359 LC0: .long 0,0x5f800000
2364 cpyse $f11,$f31,$f10
2365 cpyse $f31,$f11,$f11
2373 This doesn't seem to be a clear-cut win over the optabs form.
2374 It probably all depends on the distribution of numbers being
2375 converted -- in the optabs form, all but high-bit-set has a
2376 much lower minimum execution time. */
2379 alpha_emit_floatuns (rtx operands
[2])
2381 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2382 enum machine_mode mode
;
2385 in
= force_reg (DImode
, operands
[1]);
2386 mode
= GET_MODE (out
);
2387 neglab
= gen_label_rtx ();
2388 donelab
= gen_label_rtx ();
2389 i0
= gen_reg_rtx (DImode
);
2390 i1
= gen_reg_rtx (DImode
);
2391 f0
= gen_reg_rtx (mode
);
2393 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2395 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2396 emit_jump_insn (gen_jump (donelab
));
2399 emit_label (neglab
);
2401 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2402 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2403 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2404 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2405 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2407 emit_label (donelab
);
2410 /* Generate the comparison for a conditional branch. */
2413 alpha_emit_conditional_branch (enum rtx_code code
)
2415 enum rtx_code cmp_code
, branch_code
;
2416 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
2417 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2420 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
2422 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2424 alpha_compare
.fp_p
= 0;
2427 /* The general case: fold the comparison code to the types of compares
2428 that we have, choosing the branch as necessary. */
2431 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2433 /* We have these compares: */
2434 cmp_code
= code
, branch_code
= NE
;
2439 /* These must be reversed. */
2440 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2443 case GE
: case GT
: case GEU
: case GTU
:
2444 /* For FP, we swap them, for INT, we reverse them. */
2445 if (alpha_compare
.fp_p
)
2447 cmp_code
= swap_condition (code
);
2449 tem
= op0
, op0
= op1
, op1
= tem
;
2453 cmp_code
= reverse_condition (code
);
2462 if (alpha_compare
.fp_p
)
2465 if (flag_unsafe_math_optimizations
&& cmp_code
!= UNORDERED
)
2467 /* When we are not as concerned about non-finite values, and we
2468 are comparing against zero, we can branch directly. */
2469 if (op1
== CONST0_RTX (DFmode
))
2470 cmp_code
= UNKNOWN
, branch_code
= code
;
2471 else if (op0
== CONST0_RTX (DFmode
))
2473 /* Undo the swap we probably did just above. */
2474 tem
= op0
, op0
= op1
, op1
= tem
;
2475 branch_code
= swap_condition (cmp_code
);
2481 /* ??? We mark the branch mode to be CCmode to prevent the
2482 compare and branch from being combined, since the compare
2483 insn follows IEEE rules that the branch does not. */
2484 branch_mode
= CCmode
;
2491 /* The following optimizations are only for signed compares. */
2492 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2494 /* Whee. Compare and branch against 0 directly. */
2495 if (op1
== const0_rtx
)
2496 cmp_code
= UNKNOWN
, branch_code
= code
;
2498 /* If the constants doesn't fit into an immediate, but can
2499 be generated by lda/ldah, we adjust the argument and
2500 compare against zero, so we can use beq/bne directly. */
2501 /* ??? Don't do this when comparing against symbols, otherwise
2502 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2503 be declared false out of hand (at least for non-weak). */
2504 else if (CONST_INT_P (op1
)
2505 && (code
== EQ
|| code
== NE
)
2506 && !(symbolic_operand (op0
, VOIDmode
)
2507 || (REG_P (op0
) && REG_POINTER (op0
))))
2509 rtx n_op1
= GEN_INT (-INTVAL (op1
));
2511 if (! satisfies_constraint_I (op1
)
2512 && (satisfies_constraint_K (n_op1
)
2513 || satisfies_constraint_L (n_op1
)))
2514 cmp_code
= PLUS
, branch_code
= code
, op1
= n_op1
;
2518 if (!reg_or_0_operand (op0
, DImode
))
2519 op0
= force_reg (DImode
, op0
);
2520 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2521 op1
= force_reg (DImode
, op1
);
2524 /* Emit an initial compare instruction, if necessary. */
2526 if (cmp_code
!= UNKNOWN
)
2528 tem
= gen_reg_rtx (cmp_mode
);
2529 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2532 /* Zero the operands. */
2533 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2535 /* Return the branch comparison. */
2536 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
2539 /* Certain simplifications can be done to make invalid setcc operations
2540 valid. Return the final comparison, or NULL if we can't work. */
2543 alpha_emit_setcc (enum rtx_code code
)
2545 enum rtx_code cmp_code
;
2546 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2547 int fp_p
= alpha_compare
.fp_p
;
2550 /* Zero the operands. */
2551 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2553 if (fp_p
&& GET_MODE (op0
) == TFmode
)
2555 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2560 if (fp_p
&& !TARGET_FIX
)
2563 /* The general case: fold the comparison code to the types of compares
2564 that we have, choosing the branch as necessary. */
2569 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2571 /* We have these compares. */
2573 cmp_code
= code
, code
= NE
;
2577 if (!fp_p
&& op1
== const0_rtx
)
2582 cmp_code
= reverse_condition (code
);
2586 case GE
: case GT
: case GEU
: case GTU
:
2587 /* These normally need swapping, but for integer zero we have
2588 special patterns that recognize swapped operands. */
2589 if (!fp_p
&& op1
== const0_rtx
)
2591 code
= swap_condition (code
);
2593 cmp_code
= code
, code
= NE
;
2594 tmp
= op0
, op0
= op1
, op1
= tmp
;
2603 if (!register_operand (op0
, DImode
))
2604 op0
= force_reg (DImode
, op0
);
2605 if (!reg_or_8bit_operand (op1
, DImode
))
2606 op1
= force_reg (DImode
, op1
);
2609 /* Emit an initial compare instruction, if necessary. */
2610 if (cmp_code
!= UNKNOWN
)
2612 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
2614 tmp
= gen_reg_rtx (mode
);
2615 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2616 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
2618 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
2622 /* Return the setcc comparison. */
2623 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
2627 /* Rewrite a comparison against zero CMP of the form
2628 (CODE (cc0) (const_int 0)) so it can be written validly in
2629 a conditional move (if_then_else CMP ...).
2630 If both of the operands that set cc0 are nonzero we must emit
2631 an insn to perform the compare (it can't be done within
2632 the conditional move). */
2635 alpha_emit_conditional_move (rtx cmp
, enum machine_mode mode
)
2637 enum rtx_code code
= GET_CODE (cmp
);
2638 enum rtx_code cmov_code
= NE
;
2639 rtx op0
= alpha_compare
.op0
;
2640 rtx op1
= alpha_compare
.op1
;
2641 int fp_p
= alpha_compare
.fp_p
;
2642 enum machine_mode cmp_mode
2643 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2644 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
2645 enum machine_mode cmov_mode
= VOIDmode
;
2646 int local_fast_math
= flag_unsafe_math_optimizations
;
2649 /* Zero the operands. */
2650 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2652 if (fp_p
!= FLOAT_MODE_P (mode
))
2654 enum rtx_code cmp_code
;
2659 /* If we have fp<->int register move instructions, do a cmov by
2660 performing the comparison in fp registers, and move the
2661 zero/nonzero value to integer registers, where we can then
2662 use a normal cmov, or vice-versa. */
2666 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2667 /* We have these compares. */
2668 cmp_code
= code
, code
= NE
;
2672 /* This must be reversed. */
2673 cmp_code
= EQ
, code
= EQ
;
2676 case GE
: case GT
: case GEU
: case GTU
:
2677 /* These normally need swapping, but for integer zero we have
2678 special patterns that recognize swapped operands. */
2679 if (!fp_p
&& op1
== const0_rtx
)
2680 cmp_code
= code
, code
= NE
;
2683 cmp_code
= swap_condition (code
);
2685 tem
= op0
, op0
= op1
, op1
= tem
;
2693 tem
= gen_reg_rtx (cmp_op_mode
);
2694 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
2695 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
2698 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
2699 op0
= gen_lowpart (cmp_op_mode
, tem
);
2700 op1
= CONST0_RTX (cmp_op_mode
);
2702 local_fast_math
= 1;
2705 /* We may be able to use a conditional move directly.
2706 This avoids emitting spurious compares. */
2707 if (signed_comparison_operator (cmp
, VOIDmode
)
2708 && (!fp_p
|| local_fast_math
)
2709 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2710 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2712 /* We can't put the comparison inside the conditional move;
2713 emit a compare instruction and put that inside the
2714 conditional move. Make sure we emit only comparisons we have;
2715 swap or reverse as necessary. */
2717 if (!can_create_pseudo_p ())
2722 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2723 /* We have these compares: */
2727 /* This must be reversed. */
2728 code
= reverse_condition (code
);
2732 case GE
: case GT
: case GEU
: case GTU
:
2733 /* These must be swapped. */
2734 if (op1
!= CONST0_RTX (cmp_mode
))
2736 code
= swap_condition (code
);
2737 tem
= op0
, op0
= op1
, op1
= tem
;
2747 if (!reg_or_0_operand (op0
, DImode
))
2748 op0
= force_reg (DImode
, op0
);
2749 if (!reg_or_8bit_operand (op1
, DImode
))
2750 op1
= force_reg (DImode
, op1
);
2753 /* ??? We mark the branch mode to be CCmode to prevent the compare
2754 and cmov from being combined, since the compare insn follows IEEE
2755 rules that the cmov does not. */
2756 if (fp_p
&& !local_fast_math
)
2759 tem
= gen_reg_rtx (cmp_op_mode
);
2760 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
2761 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
2764 /* Simplify a conditional move of two constants into a setcc with
2765 arithmetic. This is done with a splitter since combine would
2766 just undo the work if done during code generation. It also catches
2767 cases we wouldn't have before cse. */
2770 alpha_split_conditional_move (enum rtx_code code
, rtx dest
, rtx cond
,
2771 rtx t_rtx
, rtx f_rtx
)
2773 HOST_WIDE_INT t
, f
, diff
;
2774 enum machine_mode mode
;
2775 rtx target
, subtarget
, tmp
;
2777 mode
= GET_MODE (dest
);
2782 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2783 || (code
== GE
|| code
== GT
))
2785 code
= reverse_condition (code
);
2786 diff
= t
, t
= f
, f
= diff
;
2790 subtarget
= target
= dest
;
2793 target
= gen_lowpart (DImode
, dest
);
2794 if (can_create_pseudo_p ())
2795 subtarget
= gen_reg_rtx (DImode
);
2799 /* Below, we must be careful to use copy_rtx on target and subtarget
2800 in intermediate insns, as they may be a subreg rtx, which may not
2803 if (f
== 0 && exact_log2 (diff
) > 0
2804 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2805 viable over a longer latency cmove. On EV5, the E0 slot is a
2806 scarce resource, and on EV4 shift has the same latency as a cmove. */
2807 && (diff
<= 8 || alpha_tune
== PROCESSOR_EV6
))
2809 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2810 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2812 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2813 GEN_INT (exact_log2 (t
)));
2814 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2816 else if (f
== 0 && t
== -1)
2818 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2819 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2821 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
2823 else if (diff
== 1 || diff
== 4 || diff
== 8)
2827 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2828 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2831 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
2834 add_op
= GEN_INT (f
);
2835 if (sext_add_operand (add_op
, mode
))
2837 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
2839 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
2840 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2852 /* Look up the function X_floating library function name for the
2855 struct GTY(()) xfloating_op
2857 const enum rtx_code code
;
2858 const char *const GTY((skip
)) osf_func
;
2859 const char *const GTY((skip
)) vms_func
;
2863 static GTY(()) struct xfloating_op xfloating_ops
[] =
2865 { PLUS
, "_OtsAddX", "OTS$ADD_X", 0 },
2866 { MINUS
, "_OtsSubX", "OTS$SUB_X", 0 },
2867 { MULT
, "_OtsMulX", "OTS$MUL_X", 0 },
2868 { DIV
, "_OtsDivX", "OTS$DIV_X", 0 },
2869 { EQ
, "_OtsEqlX", "OTS$EQL_X", 0 },
2870 { NE
, "_OtsNeqX", "OTS$NEQ_X", 0 },
2871 { LT
, "_OtsLssX", "OTS$LSS_X", 0 },
2872 { LE
, "_OtsLeqX", "OTS$LEQ_X", 0 },
2873 { GT
, "_OtsGtrX", "OTS$GTR_X", 0 },
2874 { GE
, "_OtsGeqX", "OTS$GEQ_X", 0 },
2875 { FIX
, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2876 { FLOAT
, "_OtsCvtQX", "OTS$CVTQX", 0 },
2877 { UNSIGNED_FLOAT
, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2878 { FLOAT_EXTEND
, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2879 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2882 static GTY(()) struct xfloating_op vax_cvt_ops
[] =
2884 { FLOAT_EXTEND
, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2885 { FLOAT_TRUNCATE
, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2889 alpha_lookup_xfloating_lib_func (enum rtx_code code
)
2891 struct xfloating_op
*ops
= xfloating_ops
;
2892 long n
= ARRAY_SIZE (xfloating_ops
);
2895 gcc_assert (TARGET_HAS_XFLOATING_LIBS
);
2897 /* How irritating. Nothing to key off for the main table. */
2898 if (TARGET_FLOAT_VAX
&& (code
== FLOAT_EXTEND
|| code
== FLOAT_TRUNCATE
))
2901 n
= ARRAY_SIZE (vax_cvt_ops
);
2904 for (i
= 0; i
< n
; ++i
, ++ops
)
2905 if (ops
->code
== code
)
2907 rtx func
= ops
->libcall
;
2910 func
= init_one_libfunc (TARGET_ABI_OPEN_VMS
2911 ? ops
->vms_func
: ops
->osf_func
);
2912 ops
->libcall
= func
;
2920 /* Most X_floating operations take the rounding mode as an argument.
2921 Compute that here. */
2924 alpha_compute_xfloating_mode_arg (enum rtx_code code
,
2925 enum alpha_fp_rounding_mode round
)
2931 case ALPHA_FPRM_NORM
:
2934 case ALPHA_FPRM_MINF
:
2937 case ALPHA_FPRM_CHOP
:
2940 case ALPHA_FPRM_DYN
:
2946 /* XXX For reference, round to +inf is mode = 3. */
2949 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
2955 /* Emit an X_floating library function call.
2957 Note that these functions do not follow normal calling conventions:
2958 TFmode arguments are passed in two integer registers (as opposed to
2959 indirect); TFmode return values appear in R16+R17.
2961 FUNC is the function to call.
2962 TARGET is where the output belongs.
2963 OPERANDS are the inputs.
2964 NOPERANDS is the count of inputs.
2965 EQUIV is the expression equivalent for the function.
2969 alpha_emit_xfloating_libcall (rtx func
, rtx target
, rtx operands
[],
2970 int noperands
, rtx equiv
)
2972 rtx usage
= NULL_RTX
, tmp
, reg
;
2977 for (i
= 0; i
< noperands
; ++i
)
2979 switch (GET_MODE (operands
[i
]))
2982 reg
= gen_rtx_REG (TFmode
, regno
);
2987 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
2992 gcc_assert (CONST_INT_P (operands
[i
]));
2995 reg
= gen_rtx_REG (DImode
, regno
);
3003 emit_move_insn (reg
, operands
[i
]);
3004 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
3007 switch (GET_MODE (target
))
3010 reg
= gen_rtx_REG (TFmode
, 16);
3013 reg
= gen_rtx_REG (DFmode
, 32);
3016 reg
= gen_rtx_REG (DImode
, 0);
3022 tmp
= gen_rtx_MEM (QImode
, func
);
3023 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3024 const0_rtx
, const0_rtx
));
3025 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3026 RTL_CONST_CALL_P (tmp
) = 1;
3031 emit_libcall_block (tmp
, target
, reg
, equiv
);
3034 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3037 alpha_emit_xfloating_arith (enum rtx_code code
, rtx operands
[])
3041 rtx out_operands
[3];
3043 func
= alpha_lookup_xfloating_lib_func (code
);
3044 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3046 out_operands
[0] = operands
[1];
3047 out_operands
[1] = operands
[2];
3048 out_operands
[2] = GEN_INT (mode
);
3049 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3050 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3054 /* Emit an X_floating library function call for a comparison. */
3057 alpha_emit_xfloating_compare (enum rtx_code
*pcode
, rtx op0
, rtx op1
)
3059 enum rtx_code cmp_code
, res_code
;
3060 rtx func
, out
, operands
[2], note
;
3062 /* X_floating library comparison functions return
3066 Convert the compare against the raw return value. */
3094 func
= alpha_lookup_xfloating_lib_func (cmp_code
);
3098 out
= gen_reg_rtx (DImode
);
3100 /* What's actually returned is -1,0,1, not a proper boolean value,
3101 so use an EXPR_LIST as with a generic libcall instead of a
3102 comparison type expression. */
3103 note
= gen_rtx_EXPR_LIST (VOIDmode
, op1
, NULL_RTX
);
3104 note
= gen_rtx_EXPR_LIST (VOIDmode
, op0
, note
);
3105 note
= gen_rtx_EXPR_LIST (VOIDmode
, func
, note
);
3106 alpha_emit_xfloating_libcall (func
, out
, operands
, 2, note
);
3111 /* Emit an X_floating library function call for a conversion. */
3114 alpha_emit_xfloating_cvt (enum rtx_code orig_code
, rtx operands
[])
3116 int noperands
= 1, mode
;
3117 rtx out_operands
[2];
3119 enum rtx_code code
= orig_code
;
3121 if (code
== UNSIGNED_FIX
)
3124 func
= alpha_lookup_xfloating_lib_func (code
);
3126 out_operands
[0] = operands
[1];
3131 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3132 out_operands
[1] = GEN_INT (mode
);
3135 case FLOAT_TRUNCATE
:
3136 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3137 out_operands
[1] = GEN_INT (mode
);
3144 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3145 gen_rtx_fmt_e (orig_code
,
3146 GET_MODE (operands
[0]),
3150 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3151 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3152 guarantee that the sequence
3155 is valid. Naturally, output operand ordering is little-endian.
3156 This is used by *movtf_internal and *movti_internal. */
3159 alpha_split_tmode_pair (rtx operands
[4], enum machine_mode mode
,
3162 switch (GET_CODE (operands
[1]))
3165 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3166 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3170 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3171 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3176 gcc_assert (operands
[1] == CONST0_RTX (mode
));
3177 operands
[2] = operands
[3] = const0_rtx
;
3184 switch (GET_CODE (operands
[0]))
3187 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3188 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3192 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3193 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3200 if (fixup_overlap
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
3203 tmp
= operands
[0], operands
[0] = operands
[1], operands
[1] = tmp
;
3204 tmp
= operands
[2], operands
[2] = operands
[3], operands
[3] = tmp
;
3208 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3209 op2 is a register containing the sign bit, operation is the
3210 logical operation to be performed. */
3213 alpha_split_tfmode_frobsign (rtx operands
[3], rtx (*operation
) (rtx
, rtx
, rtx
))
3215 rtx high_bit
= operands
[2];
3219 alpha_split_tmode_pair (operands
, TFmode
, false);
3221 /* Detect three flavors of operand overlap. */
3223 if (rtx_equal_p (operands
[0], operands
[2]))
3225 else if (rtx_equal_p (operands
[1], operands
[2]))
3227 if (rtx_equal_p (operands
[0], high_bit
))
3234 emit_move_insn (operands
[0], operands
[2]);
3236 /* ??? If the destination overlaps both source tf and high_bit, then
3237 assume source tf is dead in its entirety and use the other half
3238 for a scratch register. Otherwise "scratch" is just the proper
3239 destination register. */
3240 scratch
= operands
[move
< 2 ? 1 : 3];
3242 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3246 emit_move_insn (operands
[0], operands
[2]);
3248 emit_move_insn (operands
[1], scratch
);
3252 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3256 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3257 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3258 lda r3,X(r11) lda r3,X+2(r11)
3259 extwl r1,r3,r1 extql r1,r3,r1
3260 extwh r2,r3,r2 extqh r2,r3,r2
3261 or r1.r2.r1 or r1,r2,r1
3264 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3265 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3266 lda r3,X(r11) lda r3,X(r11)
3267 extll r1,r3,r1 extll r1,r3,r1
3268 extlh r2,r3,r2 extlh r2,r3,r2
3269 or r1.r2.r1 addl r1,r2,r1
3271 quad: ldq_u r1,X(r11)
3280 alpha_expand_unaligned_load (rtx tgt
, rtx mem
, HOST_WIDE_INT size
,
3281 HOST_WIDE_INT ofs
, int sign
)
3283 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3284 enum machine_mode mode
;
3286 if (TARGET_BWX
&& size
== 2)
3288 meml
= adjust_address (mem
, QImode
, ofs
);
3289 memh
= adjust_address (mem
, QImode
, ofs
+1);
3290 if (BYTES_BIG_ENDIAN
)
3291 tmp
= meml
, meml
= memh
, memh
= tmp
;
3292 extl
= gen_reg_rtx (DImode
);
3293 exth
= gen_reg_rtx (DImode
);
3294 emit_insn (gen_zero_extendqidi2 (extl
, meml
));
3295 emit_insn (gen_zero_extendqidi2 (exth
, memh
));
3296 exth
= expand_simple_binop (DImode
, ASHIFT
, exth
, GEN_INT (8),
3297 NULL
, 1, OPTAB_LIB_WIDEN
);
3298 addr
= expand_simple_binop (DImode
, IOR
, extl
, exth
,
3299 NULL
, 1, OPTAB_LIB_WIDEN
);
3301 if (sign
&& GET_MODE (tgt
) != HImode
)
3303 addr
= gen_lowpart (HImode
, addr
);
3304 emit_insn (gen_extend_insn (tgt
, addr
, GET_MODE (tgt
), HImode
, 0));
3308 if (GET_MODE (tgt
) != DImode
)
3309 addr
= gen_lowpart (GET_MODE (tgt
), addr
);
3310 emit_move_insn (tgt
, addr
);
3315 meml
= gen_reg_rtx (DImode
);
3316 memh
= gen_reg_rtx (DImode
);
3317 addr
= gen_reg_rtx (DImode
);
3318 extl
= gen_reg_rtx (DImode
);
3319 exth
= gen_reg_rtx (DImode
);
3321 mema
= XEXP (mem
, 0);
3322 if (GET_CODE (mema
) == LO_SUM
)
3323 mema
= force_reg (Pmode
, mema
);
3325 /* AND addresses cannot be in any alias set, since they may implicitly
3326 alias surrounding code. Ideally we'd have some alias set that
3327 covered all types except those with alignment 8 or higher. */
3329 tmp
= change_address (mem
, DImode
,
3330 gen_rtx_AND (DImode
,
3331 plus_constant (mema
, ofs
),
3333 set_mem_alias_set (tmp
, 0);
3334 emit_move_insn (meml
, tmp
);
3336 tmp
= change_address (mem
, DImode
,
3337 gen_rtx_AND (DImode
,
3338 plus_constant (mema
, ofs
+ size
- 1),
3340 set_mem_alias_set (tmp
, 0);
3341 emit_move_insn (memh
, tmp
);
3343 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
3345 emit_move_insn (addr
, plus_constant (mema
, -1));
3347 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3348 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
3350 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3351 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
3352 addr
, 1, OPTAB_WIDEN
);
3354 else if (sign
&& size
== 2)
3356 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
3358 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
3359 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3361 /* We must use tgt here for the target. Alpha-vms port fails if we use
3362 addr for the target, because addr is marked as a pointer and combine
3363 knows that pointers are always sign-extended 32-bit values. */
3364 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3365 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3366 addr
, 1, OPTAB_WIDEN
);
3370 if (WORDS_BIG_ENDIAN
)
3372 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
3376 emit_insn (gen_extwh_be (extl
, meml
, addr
));
3381 emit_insn (gen_extlh_be (extl
, meml
, addr
));
3386 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3393 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
3397 emit_move_insn (addr
, plus_constant (mema
, ofs
));
3398 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
3402 emit_insn (gen_extwh_le (exth
, memh
, addr
));
3407 emit_insn (gen_extlh_le (exth
, memh
, addr
));
3412 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3421 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3422 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3427 emit_move_insn (tgt
, gen_lowpart (GET_MODE (tgt
), addr
));
3430 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3433 alpha_expand_unaligned_store (rtx dst
, rtx src
,
3434 HOST_WIDE_INT size
, HOST_WIDE_INT ofs
)
3436 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3438 if (TARGET_BWX
&& size
== 2)
3440 if (src
!= const0_rtx
)
3442 dstl
= gen_lowpart (QImode
, src
);
3443 dsth
= expand_simple_binop (DImode
, LSHIFTRT
, src
, GEN_INT (8),
3444 NULL
, 1, OPTAB_LIB_WIDEN
);
3445 dsth
= gen_lowpart (QImode
, dsth
);
3448 dstl
= dsth
= const0_rtx
;
3450 meml
= adjust_address (dst
, QImode
, ofs
);
3451 memh
= adjust_address (dst
, QImode
, ofs
+1);
3452 if (BYTES_BIG_ENDIAN
)
3453 addr
= meml
, meml
= memh
, memh
= addr
;
3455 emit_move_insn (meml
, dstl
);
3456 emit_move_insn (memh
, dsth
);
3460 dstl
= gen_reg_rtx (DImode
);
3461 dsth
= gen_reg_rtx (DImode
);
3462 insl
= gen_reg_rtx (DImode
);
3463 insh
= gen_reg_rtx (DImode
);
3465 dsta
= XEXP (dst
, 0);
3466 if (GET_CODE (dsta
) == LO_SUM
)
3467 dsta
= force_reg (Pmode
, dsta
);
3469 /* AND addresses cannot be in any alias set, since they may implicitly
3470 alias surrounding code. Ideally we'd have some alias set that
3471 covered all types except those with alignment 8 or higher. */
3473 meml
= change_address (dst
, DImode
,
3474 gen_rtx_AND (DImode
,
3475 plus_constant (dsta
, ofs
),
3477 set_mem_alias_set (meml
, 0);
3479 memh
= change_address (dst
, DImode
,
3480 gen_rtx_AND (DImode
,
3481 plus_constant (dsta
, ofs
+ size
- 1),
3483 set_mem_alias_set (memh
, 0);
3485 emit_move_insn (dsth
, memh
);
3486 emit_move_insn (dstl
, meml
);
3487 if (WORDS_BIG_ENDIAN
)
3489 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
3491 if (src
!= const0_rtx
)
3496 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
3499 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
3502 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
3505 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
3506 GEN_INT (size
*8), addr
));
3512 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
3516 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
3517 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
3521 emit_insn (gen_mskxl_be (dsth
, dsth
, constm1_rtx
, addr
));
3525 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
3529 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
3531 if (src
!= CONST0_RTX (GET_MODE (src
)))
3533 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3534 GEN_INT (size
*8), addr
));
3539 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
3542 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
3545 emit_insn (gen_insql_le (insl
, src
, addr
));
3550 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3555 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
3559 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
3560 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
3564 emit_insn (gen_mskxl_le (dstl
, dstl
, constm1_rtx
, addr
));
3569 if (src
!= CONST0_RTX (GET_MODE (src
)))
3571 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3572 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3575 if (WORDS_BIG_ENDIAN
)
3577 emit_move_insn (meml
, dstl
);
3578 emit_move_insn (memh
, dsth
);
3582 /* Must store high before low for degenerate case of aligned. */
3583 emit_move_insn (memh
, dsth
);
3584 emit_move_insn (meml
, dstl
);
3588 /* The block move code tries to maximize speed by separating loads and
3589 stores at the expense of register pressure: we load all of the data
3590 before we store it back out. There are two secondary effects worth
3591 mentioning, that this speeds copying to/from aligned and unaligned
3592 buffers, and that it makes the code significantly easier to write. */
3594 #define MAX_MOVE_WORDS 8
3596 /* Load an integral number of consecutive unaligned quadwords. */
3599 alpha_expand_unaligned_load_words (rtx
*out_regs
, rtx smem
,
3600 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3602 rtx
const im8
= GEN_INT (-8);
3603 rtx
const i64
= GEN_INT (64);
3604 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3605 rtx sreg
, areg
, tmp
, smema
;
3608 smema
= XEXP (smem
, 0);
3609 if (GET_CODE (smema
) == LO_SUM
)
3610 smema
= force_reg (Pmode
, smema
);
3612 /* Generate all the tmp registers we need. */
3613 for (i
= 0; i
< words
; ++i
)
3615 data_regs
[i
] = out_regs
[i
];
3616 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3618 data_regs
[words
] = gen_reg_rtx (DImode
);
3621 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3623 /* Load up all of the source data. */
3624 for (i
= 0; i
< words
; ++i
)
3626 tmp
= change_address (smem
, DImode
,
3627 gen_rtx_AND (DImode
,
3628 plus_constant (smema
, 8*i
),
3630 set_mem_alias_set (tmp
, 0);
3631 emit_move_insn (data_regs
[i
], tmp
);
3634 tmp
= change_address (smem
, DImode
,
3635 gen_rtx_AND (DImode
,
3636 plus_constant (smema
, 8*words
- 1),
3638 set_mem_alias_set (tmp
, 0);
3639 emit_move_insn (data_regs
[words
], tmp
);
3641 /* Extract the half-word fragments. Unfortunately DEC decided to make
3642 extxh with offset zero a noop instead of zeroing the register, so
3643 we must take care of that edge condition ourselves with cmov. */
3645 sreg
= copy_addr_to_reg (smema
);
3646 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3648 if (WORDS_BIG_ENDIAN
)
3649 emit_move_insn (sreg
, plus_constant (sreg
, 7));
3650 for (i
= 0; i
< words
; ++i
)
3652 if (WORDS_BIG_ENDIAN
)
3654 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
3655 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
3659 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
3660 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3662 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3663 gen_rtx_IF_THEN_ELSE (DImode
,
3664 gen_rtx_EQ (DImode
, areg
,
3666 const0_rtx
, ext_tmps
[i
])));
3669 /* Merge the half-words into whole words. */
3670 for (i
= 0; i
< words
; ++i
)
3672 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3673 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3677 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3678 may be NULL to store zeros. */
3681 alpha_expand_unaligned_store_words (rtx
*data_regs
, rtx dmem
,
3682 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3684 rtx
const im8
= GEN_INT (-8);
3685 rtx
const i64
= GEN_INT (64);
3686 rtx ins_tmps
[MAX_MOVE_WORDS
];
3687 rtx st_tmp_1
, st_tmp_2
, dreg
;
3688 rtx st_addr_1
, st_addr_2
, dmema
;
3691 dmema
= XEXP (dmem
, 0);
3692 if (GET_CODE (dmema
) == LO_SUM
)
3693 dmema
= force_reg (Pmode
, dmema
);
3695 /* Generate all the tmp registers we need. */
3696 if (data_regs
!= NULL
)
3697 for (i
= 0; i
< words
; ++i
)
3698 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3699 st_tmp_1
= gen_reg_rtx(DImode
);
3700 st_tmp_2
= gen_reg_rtx(DImode
);
3703 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3705 st_addr_2
= change_address (dmem
, DImode
,
3706 gen_rtx_AND (DImode
,
3707 plus_constant (dmema
, words
*8 - 1),
3709 set_mem_alias_set (st_addr_2
, 0);
3711 st_addr_1
= change_address (dmem
, DImode
,
3712 gen_rtx_AND (DImode
, dmema
, im8
));
3713 set_mem_alias_set (st_addr_1
, 0);
3715 /* Load up the destination end bits. */
3716 emit_move_insn (st_tmp_2
, st_addr_2
);
3717 emit_move_insn (st_tmp_1
, st_addr_1
);
3719 /* Shift the input data into place. */
3720 dreg
= copy_addr_to_reg (dmema
);
3721 if (WORDS_BIG_ENDIAN
)
3722 emit_move_insn (dreg
, plus_constant (dreg
, 7));
3723 if (data_regs
!= NULL
)
3725 for (i
= words
-1; i
>= 0; --i
)
3727 if (WORDS_BIG_ENDIAN
)
3729 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
3730 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
3734 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
3735 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
3738 for (i
= words
-1; i
> 0; --i
)
3740 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3741 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3746 /* Split and merge the ends with the destination data. */
3747 if (WORDS_BIG_ENDIAN
)
3749 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, constm1_rtx
, dreg
));
3750 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
3754 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
3755 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, constm1_rtx
, dreg
));
3758 if (data_regs
!= NULL
)
3760 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3761 st_tmp_2
, 1, OPTAB_WIDEN
);
3762 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3763 st_tmp_1
, 1, OPTAB_WIDEN
);
3767 if (WORDS_BIG_ENDIAN
)
3768 emit_move_insn (st_addr_1
, st_tmp_1
);
3770 emit_move_insn (st_addr_2
, st_tmp_2
);
3771 for (i
= words
-1; i
> 0; --i
)
3773 rtx tmp
= change_address (dmem
, DImode
,
3774 gen_rtx_AND (DImode
,
3775 plus_constant(dmema
,
3776 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
3778 set_mem_alias_set (tmp
, 0);
3779 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3781 if (WORDS_BIG_ENDIAN
)
3782 emit_move_insn (st_addr_2
, st_tmp_2
);
3784 emit_move_insn (st_addr_1
, st_tmp_1
);
3788 /* Expand string/block move operations.
3790 operands[0] is the pointer to the destination.
3791 operands[1] is the pointer to the source.
3792 operands[2] is the number of bytes to move.
3793 operands[3] is the alignment. */
3796 alpha_expand_block_move (rtx operands
[])
3798 rtx bytes_rtx
= operands
[2];
3799 rtx align_rtx
= operands
[3];
3800 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3801 HOST_WIDE_INT bytes
= orig_bytes
;
3802 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3803 HOST_WIDE_INT dst_align
= src_align
;
3804 rtx orig_src
= operands
[1];
3805 rtx orig_dst
= operands
[0];
3806 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3808 unsigned int i
, words
, ofs
, nregs
= 0;
3810 if (orig_bytes
<= 0)
3812 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3815 /* Look for additional alignment information from recorded register info. */
3817 tmp
= XEXP (orig_src
, 0);
3819 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3820 else if (GET_CODE (tmp
) == PLUS
3821 && REG_P (XEXP (tmp
, 0))
3822 && CONST_INT_P (XEXP (tmp
, 1)))
3824 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3825 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3829 if (a
>= 64 && c
% 8 == 0)
3831 else if (a
>= 32 && c
% 4 == 0)
3833 else if (a
>= 16 && c
% 2 == 0)
3838 tmp
= XEXP (orig_dst
, 0);
3840 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3841 else if (GET_CODE (tmp
) == PLUS
3842 && REG_P (XEXP (tmp
, 0))
3843 && CONST_INT_P (XEXP (tmp
, 1)))
3845 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3846 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3850 if (a
>= 64 && c
% 8 == 0)
3852 else if (a
>= 32 && c
% 4 == 0)
3854 else if (a
>= 16 && c
% 2 == 0)
3860 if (src_align
>= 64 && bytes
>= 8)
3864 for (i
= 0; i
< words
; ++i
)
3865 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3867 for (i
= 0; i
< words
; ++i
)
3868 emit_move_insn (data_regs
[nregs
+ i
],
3869 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
3876 if (src_align
>= 32 && bytes
>= 4)
3880 for (i
= 0; i
< words
; ++i
)
3881 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
3883 for (i
= 0; i
< words
; ++i
)
3884 emit_move_insn (data_regs
[nregs
+ i
],
3885 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
3896 for (i
= 0; i
< words
+1; ++i
)
3897 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3899 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
3907 if (! TARGET_BWX
&& bytes
>= 4)
3909 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
3910 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
3917 if (src_align
>= 16)
3920 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3921 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
3924 } while (bytes
>= 2);
3926 else if (! TARGET_BWX
)
3928 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3929 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
3937 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
3938 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
3943 gcc_assert (nregs
<= ARRAY_SIZE (data_regs
));
3945 /* Now save it back out again. */
3949 /* Write out the data in whatever chunks reading the source allowed. */
3950 if (dst_align
>= 64)
3952 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3954 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
3961 if (dst_align
>= 32)
3963 /* If the source has remaining DImode regs, write them out in
3965 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3967 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
3968 NULL_RTX
, 1, OPTAB_WIDEN
);
3970 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3971 gen_lowpart (SImode
, data_regs
[i
]));
3972 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
3973 gen_lowpart (SImode
, tmp
));
3978 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3980 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3987 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3989 /* Write out a remaining block of words using unaligned methods. */
3991 for (words
= 1; i
+ words
< nregs
; words
++)
3992 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
3996 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
3998 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
4005 /* Due to the above, this won't be aligned. */
4006 /* ??? If we have more than one of these, consider constructing full
4007 words in registers and using alpha_expand_unaligned_store_words. */
4008 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4010 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
4015 if (dst_align
>= 16)
4016 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4018 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4023 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4025 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4030 /* The remainder must be byte copies. */
4033 gcc_assert (GET_MODE (data_regs
[i
]) == QImode
);
4034 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4043 alpha_expand_block_clear (rtx operands
[])
4045 rtx bytes_rtx
= operands
[1];
4046 rtx align_rtx
= operands
[3];
4047 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4048 HOST_WIDE_INT bytes
= orig_bytes
;
4049 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4050 HOST_WIDE_INT alignofs
= 0;
4051 rtx orig_dst
= operands
[0];
4053 int i
, words
, ofs
= 0;
4055 if (orig_bytes
<= 0)
4057 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4060 /* Look for stricter alignment. */
4061 tmp
= XEXP (orig_dst
, 0);
4063 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4064 else if (GET_CODE (tmp
) == PLUS
4065 && REG_P (XEXP (tmp
, 0))
4066 && CONST_INT_P (XEXP (tmp
, 1)))
4068 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4069 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4074 align
= a
, alignofs
= 8 - c
% 8;
4076 align
= a
, alignofs
= 4 - c
% 4;
4078 align
= a
, alignofs
= 2 - c
% 2;
4082 /* Handle an unaligned prefix first. */
4086 #if HOST_BITS_PER_WIDE_INT >= 64
4087 /* Given that alignofs is bounded by align, the only time BWX could
4088 generate three stores is for a 7 byte fill. Prefer two individual
4089 stores over a load/mask/store sequence. */
4090 if ((!TARGET_BWX
|| alignofs
== 7)
4092 && !(alignofs
== 4 && bytes
>= 4))
4094 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4095 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4099 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4100 set_mem_alias_set (mem
, 0);
4102 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
4103 if (bytes
< alignofs
)
4105 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
4116 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4117 NULL_RTX
, 1, OPTAB_WIDEN
);
4119 emit_move_insn (mem
, tmp
);
4123 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4125 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4130 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4132 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4137 if (alignofs
== 4 && bytes
>= 4)
4139 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4145 /* If we've not used the extra lead alignment information by now,
4146 we won't be able to. Downgrade align to match what's left over. */
4149 alignofs
= alignofs
& -alignofs
;
4150 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4154 /* Handle a block of contiguous long-words. */
4156 if (align
>= 64 && bytes
>= 8)
4160 for (i
= 0; i
< words
; ++i
)
4161 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4168 /* If the block is large and appropriately aligned, emit a single
4169 store followed by a sequence of stq_u insns. */
4171 if (align
>= 32 && bytes
> 16)
4175 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4179 orig_dsta
= XEXP (orig_dst
, 0);
4180 if (GET_CODE (orig_dsta
) == LO_SUM
)
4181 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4184 for (i
= 0; i
< words
; ++i
)
4187 = change_address (orig_dst
, DImode
,
4188 gen_rtx_AND (DImode
,
4189 plus_constant (orig_dsta
, ofs
+ i
*8),
4191 set_mem_alias_set (mem
, 0);
4192 emit_move_insn (mem
, const0_rtx
);
4195 /* Depending on the alignment, the first stq_u may have overlapped
4196 with the initial stl, which means that the last stq_u didn't
4197 write as much as it would appear. Leave those questionable bytes
4199 bytes
-= words
* 8 - 4;
4200 ofs
+= words
* 8 - 4;
4203 /* Handle a smaller block of aligned words. */
4205 if ((align
>= 64 && bytes
== 4)
4206 || (align
== 32 && bytes
>= 4))
4210 for (i
= 0; i
< words
; ++i
)
4211 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4218 /* An unaligned block uses stq_u stores for as many as possible. */
4224 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4230 /* Next clean up any trailing pieces. */
4232 #if HOST_BITS_PER_WIDE_INT >= 64
4233 /* Count the number of bits in BYTES for which aligned stores could
4236 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4240 /* If we have appropriate alignment (and it wouldn't take too many
4241 instructions otherwise), mask out the bytes we need. */
4242 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4249 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4250 set_mem_alias_set (mem
, 0);
4252 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4254 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4255 NULL_RTX
, 1, OPTAB_WIDEN
);
4257 emit_move_insn (mem
, tmp
);
4260 else if (align
>= 32 && bytes
< 4)
4265 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4266 set_mem_alias_set (mem
, 0);
4268 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4270 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4271 NULL_RTX
, 1, OPTAB_WIDEN
);
4273 emit_move_insn (mem
, tmp
);
4279 if (!TARGET_BWX
&& bytes
>= 4)
4281 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4291 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4295 } while (bytes
>= 2);
4297 else if (! TARGET_BWX
)
4299 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4307 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4315 /* Returns a mask so that zap(x, value) == x & mask. */
4318 alpha_expand_zap_mask (HOST_WIDE_INT value
)
4323 if (HOST_BITS_PER_WIDE_INT
>= 64)
4325 HOST_WIDE_INT mask
= 0;
4327 for (i
= 7; i
>= 0; --i
)
4330 if (!((value
>> i
) & 1))
4334 result
= gen_int_mode (mask
, DImode
);
4338 HOST_WIDE_INT mask_lo
= 0, mask_hi
= 0;
4340 gcc_assert (HOST_BITS_PER_WIDE_INT
== 32);
4342 for (i
= 7; i
>= 4; --i
)
4345 if (!((value
>> i
) & 1))
4349 for (i
= 3; i
>= 0; --i
)
4352 if (!((value
>> i
) & 1))
4356 result
= immed_double_const (mask_lo
, mask_hi
, DImode
);
4363 alpha_expand_builtin_vector_binop (rtx (*gen
) (rtx
, rtx
, rtx
),
4364 enum machine_mode mode
,
4365 rtx op0
, rtx op1
, rtx op2
)
4367 op0
= gen_lowpart (mode
, op0
);
4369 if (op1
== const0_rtx
)
4370 op1
= CONST0_RTX (mode
);
4372 op1
= gen_lowpart (mode
, op1
);
4374 if (op2
== const0_rtx
)
4375 op2
= CONST0_RTX (mode
);
4377 op2
= gen_lowpart (mode
, op2
);
4379 emit_insn ((*gen
) (op0
, op1
, op2
));
4382 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4383 COND is true. Mark the jump as unlikely to be taken. */
4386 emit_unlikely_jump (rtx cond
, rtx label
)
4388 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
4391 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
4392 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
4393 REG_NOTES (x
) = gen_rtx_EXPR_LIST (REG_BR_PROB
, very_unlikely
, NULL_RTX
);
4396 /* A subroutine of the atomic operation splitters. Emit a load-locked
4397 instruction in MODE. */
4400 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
4402 rtx (*fn
) (rtx
, rtx
) = NULL
;
4404 fn
= gen_load_locked_si
;
4405 else if (mode
== DImode
)
4406 fn
= gen_load_locked_di
;
4407 emit_insn (fn (reg
, mem
));
4410 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4411 instruction in MODE. */
4414 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
4416 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
4418 fn
= gen_store_conditional_si
;
4419 else if (mode
== DImode
)
4420 fn
= gen_store_conditional_di
;
4421 emit_insn (fn (res
, mem
, val
));
4424 /* A subroutine of the atomic operation splitters. Emit an insxl
4425 instruction in MODE. */
4428 emit_insxl (enum machine_mode mode
, rtx op1
, rtx op2
)
4430 rtx ret
= gen_reg_rtx (DImode
);
4431 rtx (*fn
) (rtx
, rtx
, rtx
);
4433 if (WORDS_BIG_ENDIAN
)
4447 /* The insbl and inswl patterns require a register operand. */
4448 op1
= force_reg (mode
, op1
);
4449 emit_insn (fn (ret
, op1
, op2
));
4454 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4455 to perform. MEM is the memory on which to operate. VAL is the second
4456 operand of the binary operator. BEFORE and AFTER are optional locations to
4457 return the value of MEM either before of after the operation. SCRATCH is
4458 a scratch register. */
4461 alpha_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
4462 rtx before
, rtx after
, rtx scratch
)
4464 enum machine_mode mode
= GET_MODE (mem
);
4465 rtx label
, x
, cond
= gen_rtx_REG (DImode
, REGNO (scratch
));
4467 emit_insn (gen_memory_barrier ());
4469 label
= gen_label_rtx ();
4471 label
= gen_rtx_LABEL_REF (DImode
, label
);
4475 emit_load_locked (mode
, before
, mem
);
4479 x
= gen_rtx_AND (mode
, before
, val
);
4480 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
4482 x
= gen_rtx_NOT (mode
, val
);
4485 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
4487 emit_insn (gen_rtx_SET (VOIDmode
, after
, copy_rtx (x
)));
4488 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
4490 emit_store_conditional (mode
, cond
, mem
, scratch
);
4492 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4493 emit_unlikely_jump (x
, label
);
4495 emit_insn (gen_memory_barrier ());
4498 /* Expand a compare and swap operation. */
4501 alpha_split_compare_and_swap (rtx retval
, rtx mem
, rtx oldval
, rtx newval
,
4504 enum machine_mode mode
= GET_MODE (mem
);
4505 rtx label1
, label2
, x
, cond
= gen_lowpart (DImode
, scratch
);
4507 emit_insn (gen_memory_barrier ());
4509 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4510 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4511 emit_label (XEXP (label1
, 0));
4513 emit_load_locked (mode
, retval
, mem
);
4515 x
= gen_lowpart (DImode
, retval
);
4516 if (oldval
== const0_rtx
)
4517 x
= gen_rtx_NE (DImode
, x
, const0_rtx
);
4520 x
= gen_rtx_EQ (DImode
, x
, oldval
);
4521 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4522 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4524 emit_unlikely_jump (x
, label2
);
4526 emit_move_insn (scratch
, newval
);
4527 emit_store_conditional (mode
, cond
, mem
, scratch
);
4529 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4530 emit_unlikely_jump (x
, label1
);
4532 emit_insn (gen_memory_barrier ());
4533 emit_label (XEXP (label2
, 0));
4537 alpha_expand_compare_and_swap_12 (rtx dst
, rtx mem
, rtx oldval
, rtx newval
)
4539 enum machine_mode mode
= GET_MODE (mem
);
4540 rtx addr
, align
, wdst
;
4541 rtx (*fn5
) (rtx
, rtx
, rtx
, rtx
, rtx
);
4543 addr
= force_reg (DImode
, XEXP (mem
, 0));
4544 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4545 NULL_RTX
, 1, OPTAB_DIRECT
);
4547 oldval
= convert_modes (DImode
, mode
, oldval
, 1);
4548 newval
= emit_insxl (mode
, newval
, addr
);
4550 wdst
= gen_reg_rtx (DImode
);
4552 fn5
= gen_sync_compare_and_swapqi_1
;
4554 fn5
= gen_sync_compare_and_swaphi_1
;
4555 emit_insn (fn5 (wdst
, addr
, oldval
, newval
, align
));
4557 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4561 alpha_split_compare_and_swap_12 (enum machine_mode mode
, rtx dest
, rtx addr
,
4562 rtx oldval
, rtx newval
, rtx align
,
4563 rtx scratch
, rtx cond
)
4565 rtx label1
, label2
, mem
, width
, mask
, x
;
4567 mem
= gen_rtx_MEM (DImode
, align
);
4568 MEM_VOLATILE_P (mem
) = 1;
4570 emit_insn (gen_memory_barrier ());
4571 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4572 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4573 emit_label (XEXP (label1
, 0));
4575 emit_load_locked (DImode
, scratch
, mem
);
4577 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4578 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4579 if (WORDS_BIG_ENDIAN
)
4580 emit_insn (gen_extxl_be (dest
, scratch
, width
, addr
));
4582 emit_insn (gen_extxl_le (dest
, scratch
, width
, addr
));
4584 if (oldval
== const0_rtx
)
4585 x
= gen_rtx_NE (DImode
, dest
, const0_rtx
);
4588 x
= gen_rtx_EQ (DImode
, dest
, oldval
);
4589 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4590 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4592 emit_unlikely_jump (x
, label2
);
4594 if (WORDS_BIG_ENDIAN
)
4595 emit_insn (gen_mskxl_be (scratch
, scratch
, mask
, addr
));
4597 emit_insn (gen_mskxl_le (scratch
, scratch
, mask
, addr
));
4598 emit_insn (gen_iordi3 (scratch
, scratch
, newval
));
4600 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4602 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4603 emit_unlikely_jump (x
, label1
);
4605 emit_insn (gen_memory_barrier ());
4606 emit_label (XEXP (label2
, 0));
4609 /* Expand an atomic exchange operation. */
4612 alpha_split_lock_test_and_set (rtx retval
, rtx mem
, rtx val
, rtx scratch
)
4614 enum machine_mode mode
= GET_MODE (mem
);
4615 rtx label
, x
, cond
= gen_lowpart (DImode
, scratch
);
4617 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4618 emit_label (XEXP (label
, 0));
4620 emit_load_locked (mode
, retval
, mem
);
4621 emit_move_insn (scratch
, val
);
4622 emit_store_conditional (mode
, cond
, mem
, scratch
);
4624 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4625 emit_unlikely_jump (x
, label
);
4627 emit_insn (gen_memory_barrier ());
4631 alpha_expand_lock_test_and_set_12 (rtx dst
, rtx mem
, rtx val
)
4633 enum machine_mode mode
= GET_MODE (mem
);
4634 rtx addr
, align
, wdst
;
4635 rtx (*fn4
) (rtx
, rtx
, rtx
, rtx
);
4637 /* Force the address into a register. */
4638 addr
= force_reg (DImode
, XEXP (mem
, 0));
4640 /* Align it to a multiple of 8. */
4641 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4642 NULL_RTX
, 1, OPTAB_DIRECT
);
4644 /* Insert val into the correct byte location within the word. */
4645 val
= emit_insxl (mode
, val
, addr
);
4647 wdst
= gen_reg_rtx (DImode
);
4649 fn4
= gen_sync_lock_test_and_setqi_1
;
4651 fn4
= gen_sync_lock_test_and_sethi_1
;
4652 emit_insn (fn4 (wdst
, addr
, val
, align
));
4654 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4658 alpha_split_lock_test_and_set_12 (enum machine_mode mode
, rtx dest
, rtx addr
,
4659 rtx val
, rtx align
, rtx scratch
)
4661 rtx label
, mem
, width
, mask
, x
;
4663 mem
= gen_rtx_MEM (DImode
, align
);
4664 MEM_VOLATILE_P (mem
) = 1;
4666 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4667 emit_label (XEXP (label
, 0));
4669 emit_load_locked (DImode
, scratch
, mem
);
4671 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4672 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4673 if (WORDS_BIG_ENDIAN
)
4675 emit_insn (gen_extxl_be (dest
, scratch
, width
, addr
));
4676 emit_insn (gen_mskxl_be (scratch
, scratch
, mask
, addr
));
4680 emit_insn (gen_extxl_le (dest
, scratch
, width
, addr
));
4681 emit_insn (gen_mskxl_le (scratch
, scratch
, mask
, addr
));
4683 emit_insn (gen_iordi3 (scratch
, scratch
, val
));
4685 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4687 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4688 emit_unlikely_jump (x
, label
);
4690 emit_insn (gen_memory_barrier ());
4693 /* Adjust the cost of a scheduling dependency. Return the new cost of
4694 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4697 alpha_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4699 enum attr_type insn_type
, dep_insn_type
;
4701 /* If the dependence is an anti-dependence, there is no cost. For an
4702 output dependence, there is sometimes a cost, but it doesn't seem
4703 worth handling those few cases. */
4704 if (REG_NOTE_KIND (link
) != 0)
4707 /* If we can't recognize the insns, we can't really do anything. */
4708 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4711 insn_type
= get_attr_type (insn
);
4712 dep_insn_type
= get_attr_type (dep_insn
);
4714 /* Bring in the user-defined memory latency. */
4715 if (dep_insn_type
== TYPE_ILD
4716 || dep_insn_type
== TYPE_FLD
4717 || dep_insn_type
== TYPE_LDSYM
)
4718 cost
+= alpha_memory_latency
-1;
4720 /* Everything else handled in DFA bypasses now. */
4725 /* The number of instructions that can be issued per cycle. */
4728 alpha_issue_rate (void)
4730 return (alpha_tune
== PROCESSOR_EV4
? 2 : 4);
4733 /* How many alternative schedules to try. This should be as wide as the
4734 scheduling freedom in the DFA, but no wider. Making this value too
4735 large results extra work for the scheduler.
4737 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4738 alternative schedules. For EV5, we can choose between E0/E1 and
4739 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4742 alpha_multipass_dfa_lookahead (void)
4744 return (alpha_tune
== PROCESSOR_EV6
? 4 : 2);
4747 /* Machine-specific function data. */
4749 struct GTY(()) machine_function
4752 /* List of call information words for calls from this function. */
4753 struct rtx_def
*first_ciw
;
4754 struct rtx_def
*last_ciw
;
4757 /* List of deferred case vectors. */
4758 struct rtx_def
*addr_list
;
4761 const char *some_ld_name
;
4763 /* For TARGET_LD_BUGGY_LDGP. */
4764 struct rtx_def
*gp_save_rtx
;
4767 /* How to allocate a 'struct machine_function'. */
4769 static struct machine_function
*
4770 alpha_init_machine_status (void)
4772 return ((struct machine_function
*)
4773 ggc_alloc_cleared (sizeof (struct machine_function
)));
4776 /* Functions to save and restore alpha_return_addr_rtx. */
4778 /* Start the ball rolling with RETURN_ADDR_RTX. */
4781 alpha_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4786 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4789 /* Return or create a memory slot containing the gp value for the current
4790 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4793 alpha_gp_save_rtx (void)
4795 rtx seq
, m
= cfun
->machine
->gp_save_rtx
;
4801 m
= assign_stack_local (DImode
, UNITS_PER_WORD
, BITS_PER_WORD
);
4802 m
= validize_mem (m
);
4803 emit_move_insn (m
, pic_offset_table_rtx
);
4808 /* We used to simply emit the sequence after entry_of_function.
4809 However this breaks the CFG if the first instruction in the
4810 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4811 label. Emit the sequence properly on the edge. We are only
4812 invoked from dw2_build_landing_pads and finish_eh_generation
4813 will call commit_edge_insertions thanks to a kludge. */
4814 insert_insn_on_edge (seq
, single_succ_edge (ENTRY_BLOCK_PTR
));
4816 cfun
->machine
->gp_save_rtx
= m
;
4823 alpha_ra_ever_killed (void)
4827 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4828 return (int)df_regs_ever_live_p (REG_RA
);
4830 push_topmost_sequence ();
4832 pop_topmost_sequence ();
4834 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4838 /* Return the trap mode suffix applicable to the current
4839 instruction, or NULL. */
4842 get_trap_mode_suffix (void)
4844 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4848 case TRAP_SUFFIX_NONE
:
4851 case TRAP_SUFFIX_SU
:
4852 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4856 case TRAP_SUFFIX_SUI
:
4857 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4861 case TRAP_SUFFIX_V_SV
:
4869 case ALPHA_FPTM_SUI
:
4875 case TRAP_SUFFIX_V_SV_SVI
:
4884 case ALPHA_FPTM_SUI
:
4891 case TRAP_SUFFIX_U_SU_SUI
:
4900 case ALPHA_FPTM_SUI
:
4913 /* Return the rounding mode suffix applicable to the current
4914 instruction, or NULL. */
4917 get_round_mode_suffix (void)
4919 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
4923 case ROUND_SUFFIX_NONE
:
4925 case ROUND_SUFFIX_NORMAL
:
4928 case ALPHA_FPRM_NORM
:
4930 case ALPHA_FPRM_MINF
:
4932 case ALPHA_FPRM_CHOP
:
4934 case ALPHA_FPRM_DYN
:
4941 case ROUND_SUFFIX_C
:
4950 /* Locate some local-dynamic symbol still in use by this function
4951 so that we can print its name in some movdi_er_tlsldm pattern. */
4954 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
4958 if (GET_CODE (x
) == SYMBOL_REF
4959 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
4961 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
4969 get_some_local_dynamic_name (void)
4973 if (cfun
->machine
->some_ld_name
)
4974 return cfun
->machine
->some_ld_name
;
4976 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
4978 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
4979 return cfun
->machine
->some_ld_name
;
4984 /* Print an operand. Recognize special options, documented below. */
4987 print_operand (FILE *file
, rtx x
, int code
)
4994 /* Print the assembler name of the current function. */
4995 assemble_name (file
, alpha_fnname
);
4999 assemble_name (file
, get_some_local_dynamic_name ());
5004 const char *trap
= get_trap_mode_suffix ();
5005 const char *round
= get_round_mode_suffix ();
5008 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
5009 (trap
? trap
: ""), (round
? round
: ""));
5014 /* Generates single precision instruction suffix. */
5015 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5019 /* Generates double precision instruction suffix. */
5020 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5024 if (alpha_this_literal_sequence_number
== 0)
5025 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5026 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5030 if (alpha_this_gpdisp_sequence_number
== 0)
5031 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5032 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5036 if (GET_CODE (x
) == HIGH
)
5037 output_addr_const (file
, XEXP (x
, 0));
5039 output_operand_lossage ("invalid %%H value");
5046 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5048 x
= XVECEXP (x
, 0, 0);
5049 lituse
= "lituse_tlsgd";
5051 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5053 x
= XVECEXP (x
, 0, 0);
5054 lituse
= "lituse_tlsldm";
5056 else if (CONST_INT_P (x
))
5057 lituse
= "lituse_jsr";
5060 output_operand_lossage ("invalid %%J value");
5064 if (x
!= const0_rtx
)
5065 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5073 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5074 lituse
= "lituse_jsrdirect";
5076 lituse
= "lituse_jsr";
5079 gcc_assert (INTVAL (x
) != 0);
5080 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5084 /* If this operand is the constant zero, write it as "$31". */
5086 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5087 else if (x
== CONST0_RTX (GET_MODE (x
)))
5088 fprintf (file
, "$31");
5090 output_operand_lossage ("invalid %%r value");
5094 /* Similar, but for floating-point. */
5096 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5097 else if (x
== CONST0_RTX (GET_MODE (x
)))
5098 fprintf (file
, "$f31");
5100 output_operand_lossage ("invalid %%R value");
5104 /* Write the 1's complement of a constant. */
5105 if (!CONST_INT_P (x
))
5106 output_operand_lossage ("invalid %%N value");
5108 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5112 /* Write 1 << C, for a constant C. */
5113 if (!CONST_INT_P (x
))
5114 output_operand_lossage ("invalid %%P value");
5116 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5120 /* Write the high-order 16 bits of a constant, sign-extended. */
5121 if (!CONST_INT_P (x
))
5122 output_operand_lossage ("invalid %%h value");
5124 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5128 /* Write the low-order 16 bits of a constant, sign-extended. */
5129 if (!CONST_INT_P (x
))
5130 output_operand_lossage ("invalid %%L value");
5132 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5133 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5137 /* Write mask for ZAP insn. */
5138 if (GET_CODE (x
) == CONST_DOUBLE
)
5140 HOST_WIDE_INT mask
= 0;
5141 HOST_WIDE_INT value
;
5143 value
= CONST_DOUBLE_LOW (x
);
5144 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5149 value
= CONST_DOUBLE_HIGH (x
);
5150 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5153 mask
|= (1 << (i
+ sizeof (int)));
5155 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5158 else if (CONST_INT_P (x
))
5160 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5162 for (i
= 0; i
< 8; i
++, value
>>= 8)
5166 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5169 output_operand_lossage ("invalid %%m value");
5173 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5174 if (!CONST_INT_P (x
)
5175 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5176 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5177 output_operand_lossage ("invalid %%M value");
5179 fprintf (file
, "%s",
5180 (INTVAL (x
) == 8 ? "b"
5181 : INTVAL (x
) == 16 ? "w"
5182 : INTVAL (x
) == 32 ? "l"
5187 /* Similar, except do it from the mask. */
5188 if (CONST_INT_P (x
))
5190 HOST_WIDE_INT value
= INTVAL (x
);
5197 if (value
== 0xffff)
5202 if (value
== 0xffffffff)
5213 else if (HOST_BITS_PER_WIDE_INT
== 32
5214 && GET_CODE (x
) == CONST_DOUBLE
5215 && CONST_DOUBLE_LOW (x
) == 0xffffffff
5216 && CONST_DOUBLE_HIGH (x
) == 0)
5221 output_operand_lossage ("invalid %%U value");
5225 /* Write the constant value divided by 8 for little-endian mode or
5226 (56 - value) / 8 for big-endian mode. */
5228 if (!CONST_INT_P (x
)
5229 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5232 || (INTVAL (x
) & 7) != 0)
5233 output_operand_lossage ("invalid %%s value");
5235 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5237 ? (56 - INTVAL (x
)) / 8
5242 /* Same, except compute (64 - c) / 8 */
5244 if (!CONST_INT_P (x
)
5245 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5246 && (INTVAL (x
) & 7) != 8)
5247 output_operand_lossage ("invalid %%s value");
5249 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5254 /* On Unicos/Mk systems: use a DEX expression if the symbol
5255 clashes with a register name. */
5256 int dex
= unicosmk_need_dex (x
);
5258 fprintf (file
, "DEX(%d)", dex
);
5260 output_addr_const (file
, x
);
5264 case 'C': case 'D': case 'c': case 'd':
5265 /* Write out comparison name. */
5267 enum rtx_code c
= GET_CODE (x
);
5269 if (!COMPARISON_P (x
))
5270 output_operand_lossage ("invalid %%C value");
5272 else if (code
== 'D')
5273 c
= reverse_condition (c
);
5274 else if (code
== 'c')
5275 c
= swap_condition (c
);
5276 else if (code
== 'd')
5277 c
= swap_condition (reverse_condition (c
));
5280 fprintf (file
, "ule");
5282 fprintf (file
, "ult");
5283 else if (c
== UNORDERED
)
5284 fprintf (file
, "un");
5286 fprintf (file
, "%s", GET_RTX_NAME (c
));
5291 /* Write the divide or modulus operator. */
5292 switch (GET_CODE (x
))
5295 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5298 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5301 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5304 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5307 output_operand_lossage ("invalid %%E value");
5313 /* Write "_u" for unaligned access. */
5314 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == AND
)
5315 fprintf (file
, "_u");
5320 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5322 output_address (XEXP (x
, 0));
5323 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5325 switch (XINT (XEXP (x
, 0), 1))
5329 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
5332 output_operand_lossage ("unknown relocation unspec");
5337 output_addr_const (file
, x
);
5341 output_operand_lossage ("invalid %%xn code");
5346 print_operand_address (FILE *file
, rtx addr
)
5349 HOST_WIDE_INT offset
= 0;
5351 if (GET_CODE (addr
) == AND
)
5352 addr
= XEXP (addr
, 0);
5354 if (GET_CODE (addr
) == PLUS
5355 && CONST_INT_P (XEXP (addr
, 1)))
5357 offset
= INTVAL (XEXP (addr
, 1));
5358 addr
= XEXP (addr
, 0);
5361 if (GET_CODE (addr
) == LO_SUM
)
5363 const char *reloc16
, *reloclo
;
5364 rtx op1
= XEXP (addr
, 1);
5366 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
5368 op1
= XEXP (op1
, 0);
5369 switch (XINT (op1
, 1))
5373 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
5377 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
5380 output_operand_lossage ("unknown relocation unspec");
5384 output_addr_const (file
, XVECEXP (op1
, 0, 0));
5389 reloclo
= "gprellow";
5390 output_addr_const (file
, op1
);
5394 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
5396 addr
= XEXP (addr
, 0);
5397 switch (GET_CODE (addr
))
5400 basereg
= REGNO (addr
);
5404 basereg
= subreg_regno (addr
);
5411 fprintf (file
, "($%d)\t\t!%s", basereg
,
5412 (basereg
== 29 ? reloc16
: reloclo
));
5416 switch (GET_CODE (addr
))
5419 basereg
= REGNO (addr
);
5423 basereg
= subreg_regno (addr
);
5427 offset
= INTVAL (addr
);
5430 #if TARGET_ABI_OPEN_VMS
5432 fprintf (file
, "%s", XSTR (addr
, 0));
5436 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
5437 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
);
5438 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
5439 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
5440 INTVAL (XEXP (XEXP (addr
, 0), 1)));
5448 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
5451 /* Emit RTL insns to initialize the variable parts of a trampoline at
5452 TRAMP. FNADDR is an RTX for the address of the function's pure
5453 code. CXT is an RTX for the static chain value for the function.
5455 The three offset parameters are for the individual template's
5456 layout. A JMPOFS < 0 indicates that the trampoline does not
5457 contain instructions at all.
5459 We assume here that a function will be called many more times than
5460 its address is taken (e.g., it might be passed to qsort), so we
5461 take the trouble to initialize the "hint" field in the JMP insn.
5462 Note that the hint field is PC (new) + 4 * bits 13:0. */
5465 alpha_initialize_trampoline (rtx tramp
, rtx fnaddr
, rtx cxt
,
5466 int fnofs
, int cxtofs
, int jmpofs
)
5469 /* VMS really uses DImode pointers in memory at this point. */
5470 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
5472 #ifdef POINTERS_EXTEND_UNSIGNED
5473 fnaddr
= convert_memory_address (mode
, fnaddr
);
5474 cxt
= convert_memory_address (mode
, cxt
);
5477 /* Store function address and CXT. */
5478 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
5479 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
5480 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
5481 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
5483 #ifdef ENABLE_EXECUTE_STACK
5484 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5485 0, VOIDmode
, 1, tramp
, Pmode
);
5489 emit_insn (gen_imb ());
5492 /* Determine where to put an argument to a function.
5493 Value is zero to push the argument on the stack,
5494 or a hard register in which to store the argument.
5496 MODE is the argument's machine mode.
5497 TYPE is the data type of the argument (as a tree).
5498 This is null for libcalls where that information may
5500 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5501 the preceding args and about the function being called.
5502 NAMED is nonzero if this argument is a named parameter
5503 (otherwise it is an extra parameter matching an ellipsis).
5505 On Alpha the first 6 words of args are normally in registers
5506 and the rest are pushed. */
5509 function_arg (CUMULATIVE_ARGS cum
, enum machine_mode mode
, tree type
,
5510 int named ATTRIBUTE_UNUSED
)
5515 /* Don't get confused and pass small structures in FP registers. */
5516 if (type
&& AGGREGATE_TYPE_P (type
))
5520 #ifdef ENABLE_CHECKING
5521 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5523 gcc_assert (!COMPLEX_MODE_P (mode
));
5526 /* Set up defaults for FP operands passed in FP registers, and
5527 integral operands passed in integer registers. */
5528 if (TARGET_FPREGS
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5534 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5535 the three platforms, so we can't avoid conditional compilation. */
5536 #if TARGET_ABI_OPEN_VMS
5538 if (mode
== VOIDmode
)
5539 return alpha_arg_info_reg_val (cum
);
5541 num_args
= cum
.num_args
;
5543 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5546 #elif TARGET_ABI_UNICOSMK
5550 /* If this is the last argument, generate the call info word (CIW). */
5551 /* ??? We don't include the caller's line number in the CIW because
5552 I don't know how to determine it if debug infos are turned off. */
5553 if (mode
== VOIDmode
)
5562 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
5563 if (cum
.reg_args_type
[i
])
5564 lo
|= (1 << (7 - i
));
5566 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
5569 lo
|= cum
.num_reg_words
;
5571 #if HOST_BITS_PER_WIDE_INT == 32
5572 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
5574 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
5575 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
5578 ciw
= immed_double_const (lo
, hi
, DImode
);
5580 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
5581 UNSPEC_UMK_LOAD_CIW
);
5584 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
5585 num_args
= cum
.num_reg_words
;
5587 || cum
.num_reg_words
+ size
> 6
5588 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5590 else if (type
&& TYPE_MODE (type
) == BLKmode
)
5594 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
5595 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
5597 /* The argument fits in two registers. Note that we still need to
5598 reserve a register for empty structures. */
5602 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
5605 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
5606 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
5607 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
5611 #elif TARGET_ABI_OSF
5617 /* VOID is passed as a special flag for "last argument". */
5618 if (type
== void_type_node
)
5620 else if (targetm
.calls
.must_pass_in_stack (mode
, type
))
5624 #error Unhandled ABI
5627 return gen_rtx_REG (mode
, num_args
+ basereg
);
5631 alpha_arg_partial_bytes (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
5632 enum machine_mode mode ATTRIBUTE_UNUSED
,
5633 tree type ATTRIBUTE_UNUSED
,
5634 bool named ATTRIBUTE_UNUSED
)
5638 #if TARGET_ABI_OPEN_VMS
5639 if (cum
->num_args
< 6
5640 && 6 < cum
->num_args
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5641 words
= 6 - cum
->num_args
;
5642 #elif TARGET_ABI_UNICOSMK
5643 /* Never any split arguments. */
5644 #elif TARGET_ABI_OSF
5645 if (*cum
< 6 && 6 < *cum
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5648 #error Unhandled ABI
5651 return words
* UNITS_PER_WORD
;
5655 /* Return true if TYPE must be returned in memory, instead of in registers. */
5658 alpha_return_in_memory (const_tree type
, const_tree fndecl ATTRIBUTE_UNUSED
)
5660 enum machine_mode mode
= VOIDmode
;
5665 mode
= TYPE_MODE (type
);
5667 /* All aggregates are returned in memory. */
5668 if (AGGREGATE_TYPE_P (type
))
5672 size
= GET_MODE_SIZE (mode
);
5673 switch (GET_MODE_CLASS (mode
))
5675 case MODE_VECTOR_FLOAT
:
5676 /* Pass all float vectors in memory, like an aggregate. */
5679 case MODE_COMPLEX_FLOAT
:
5680 /* We judge complex floats on the size of their element,
5681 not the size of the whole type. */
5682 size
= GET_MODE_UNIT_SIZE (mode
);
5687 case MODE_COMPLEX_INT
:
5688 case MODE_VECTOR_INT
:
5692 /* ??? We get called on all sorts of random stuff from
5693 aggregate_value_p. We must return something, but it's not
5694 clear what's safe to return. Pretend it's a struct I
5699 /* Otherwise types must fit in one register. */
5700 return size
> UNITS_PER_WORD
;
5703 /* Return true if TYPE should be passed by invisible reference. */
5706 alpha_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
5707 enum machine_mode mode
,
5708 const_tree type ATTRIBUTE_UNUSED
,
5709 bool named ATTRIBUTE_UNUSED
)
5711 return mode
== TFmode
|| mode
== TCmode
;
5714 /* Define how to find the value returned by a function. VALTYPE is the
5715 data type of the value (as a tree). If the precise function being
5716 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5717 MODE is set instead of VALTYPE for libcalls.
5719 On Alpha the value is found in $0 for integer functions and
5720 $f0 for floating-point functions. */
5723 function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
,
5724 enum machine_mode mode
)
5726 unsigned int regnum
, dummy
;
5727 enum mode_class mclass
;
5729 gcc_assert (!valtype
|| !alpha_return_in_memory (valtype
, func
));
5732 mode
= TYPE_MODE (valtype
);
5734 mclass
= GET_MODE_CLASS (mode
);
5738 PROMOTE_MODE (mode
, dummy
, valtype
);
5741 case MODE_COMPLEX_INT
:
5742 case MODE_VECTOR_INT
:
5750 case MODE_COMPLEX_FLOAT
:
5752 enum machine_mode cmode
= GET_MODE_INNER (mode
);
5754 return gen_rtx_PARALLEL
5757 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 32),
5759 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 33),
5760 GEN_INT (GET_MODE_SIZE (cmode
)))));
5767 return gen_rtx_REG (mode
, regnum
);
5770 /* TCmode complex values are passed by invisible reference. We
5771 should not split these values. */
5774 alpha_split_complex_arg (const_tree type
)
5776 return TYPE_MODE (type
) != TCmode
;
5780 alpha_build_builtin_va_list (void)
5782 tree base
, ofs
, space
, record
, type_decl
;
5784 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5785 return ptr_type_node
;
5787 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5788 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5789 TREE_CHAIN (record
) = type_decl
;
5790 TYPE_NAME (record
) = type_decl
;
5792 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5794 /* Dummy field to prevent alignment warnings. */
5795 space
= build_decl (FIELD_DECL
, NULL_TREE
, integer_type_node
);
5796 DECL_FIELD_CONTEXT (space
) = record
;
5797 DECL_ARTIFICIAL (space
) = 1;
5798 DECL_IGNORED_P (space
) = 1;
5800 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
5802 DECL_FIELD_CONTEXT (ofs
) = record
;
5803 TREE_CHAIN (ofs
) = space
;
5805 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
5807 DECL_FIELD_CONTEXT (base
) = record
;
5808 TREE_CHAIN (base
) = ofs
;
5810 TYPE_FIELDS (record
) = base
;
5811 layout_type (record
);
5813 va_list_gpr_counter_field
= ofs
;
5818 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5819 and constant additions. */
5822 va_list_skip_additions (tree lhs
)
5828 enum tree_code code
;
5830 stmt
= SSA_NAME_DEF_STMT (lhs
);
5832 if (gimple_code (stmt
) == GIMPLE_PHI
)
5835 if (!is_gimple_assign (stmt
)
5836 || gimple_assign_lhs (stmt
) != lhs
)
5839 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
5841 code
= gimple_assign_rhs_code (stmt
);
5842 if (!CONVERT_EXPR_CODE_P (code
)
5843 && ((code
!= PLUS_EXPR
&& code
!= POINTER_PLUS_EXPR
)
5844 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
5845 || !host_integerp (gimple_assign_rhs2 (stmt
), 1)))
5848 lhs
= gimple_assign_rhs1 (stmt
);
5852 /* Check if LHS = RHS statement is
5853 LHS = *(ap.__base + ap.__offset + cst)
5856 + ((ap.__offset + cst <= 47)
5857 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5858 If the former, indicate that GPR registers are needed,
5859 if the latter, indicate that FPR registers are needed.
5861 Also look for LHS = (*ptr).field, where ptr is one of the forms
5864 On alpha, cfun->va_list_gpr_size is used as size of the needed
5865 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5866 registers are needed and bit 1 set if FPR registers are needed.
5867 Return true if va_list references should not be scanned for the
5868 current statement. */
5871 alpha_stdarg_optimize_hook (struct stdarg_info
*si
, const_gimple stmt
)
5873 tree base
, offset
, rhs
;
5877 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
))
5878 != GIMPLE_SINGLE_RHS
)
5881 rhs
= gimple_assign_rhs1 (stmt
);
5882 while (handled_component_p (rhs
))
5883 rhs
= TREE_OPERAND (rhs
, 0);
5884 if (TREE_CODE (rhs
) != INDIRECT_REF
5885 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5888 stmt
= va_list_skip_additions (TREE_OPERAND (rhs
, 0));
5890 || !is_gimple_assign (stmt
)
5891 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
5894 base
= gimple_assign_rhs1 (stmt
);
5895 if (TREE_CODE (base
) == SSA_NAME
)
5897 base_stmt
= va_list_skip_additions (base
);
5899 && is_gimple_assign (base_stmt
)
5900 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
5901 base
= gimple_assign_rhs1 (base_stmt
);
5904 if (TREE_CODE (base
) != COMPONENT_REF
5905 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5907 base
= gimple_assign_rhs2 (stmt
);
5908 if (TREE_CODE (base
) == SSA_NAME
)
5910 base_stmt
= va_list_skip_additions (base
);
5912 && is_gimple_assign (base_stmt
)
5913 && gimple_assign_rhs_code (base_stmt
) == COMPONENT_REF
)
5914 base
= gimple_assign_rhs1 (base_stmt
);
5917 if (TREE_CODE (base
) != COMPONENT_REF
5918 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5924 base
= get_base_address (base
);
5925 if (TREE_CODE (base
) != VAR_DECL
5926 || !bitmap_bit_p (si
->va_list_vars
, DECL_UID (base
)))
5929 offset
= gimple_op (stmt
, 1 + offset_arg
);
5930 if (TREE_CODE (offset
) == SSA_NAME
)
5932 gimple offset_stmt
= va_list_skip_additions (offset
);
5935 && gimple_code (offset_stmt
) == GIMPLE_PHI
)
5938 gimple arg1_stmt
, arg2_stmt
;
5940 enum tree_code code1
, code2
;
5942 if (gimple_phi_num_args (offset_stmt
) != 2)
5946 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 0));
5948 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt
, 1));
5949 if (arg1_stmt
== NULL
5950 || !is_gimple_assign (arg1_stmt
)
5951 || arg2_stmt
== NULL
5952 || !is_gimple_assign (arg2_stmt
))
5955 code1
= gimple_assign_rhs_code (arg1_stmt
);
5956 code2
= gimple_assign_rhs_code (arg2_stmt
);
5957 if (code1
== COMPONENT_REF
5958 && (code2
== MINUS_EXPR
|| code2
== PLUS_EXPR
))
5960 else if (code2
== COMPONENT_REF
5961 && (code1
== MINUS_EXPR
|| code1
== PLUS_EXPR
))
5963 gimple tem
= arg1_stmt
;
5965 arg1_stmt
= arg2_stmt
;
5971 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt
), 0))
5974 sub
= tree_low_cst (gimple_assign_rhs2 (arg2_stmt
), 0);
5975 if (code2
== MINUS_EXPR
)
5977 if (sub
< -48 || sub
> -32)
5980 arg1
= gimple_assign_rhs1 (arg1_stmt
);
5981 arg2
= gimple_assign_rhs1 (arg2_stmt
);
5982 if (TREE_CODE (arg2
) == SSA_NAME
)
5984 arg2_stmt
= va_list_skip_additions (arg2
);
5985 if (arg2_stmt
== NULL
5986 || !is_gimple_assign (arg2_stmt
)
5987 || gimple_assign_rhs_code (arg2_stmt
) != COMPONENT_REF
)
5989 arg2
= gimple_assign_rhs1 (arg2_stmt
);
5994 if (TREE_CODE (arg1
) != COMPONENT_REF
5995 || TREE_OPERAND (arg1
, 1) != va_list_gpr_counter_field
5996 || get_base_address (arg1
) != base
)
5999 /* Need floating point regs. */
6000 cfun
->va_list_fpr_size
|= 2;
6004 && is_gimple_assign (offset_stmt
)
6005 && gimple_assign_rhs_code (offset_stmt
) == COMPONENT_REF
)
6006 offset
= gimple_assign_rhs1 (offset_stmt
);
6008 if (TREE_CODE (offset
) != COMPONENT_REF
6009 || TREE_OPERAND (offset
, 1) != va_list_gpr_counter_field
6010 || get_base_address (offset
) != base
)
6013 /* Need general regs. */
6014 cfun
->va_list_fpr_size
|= 1;
6018 si
->va_list_escapes
= true;
6023 /* Perform any needed actions needed for a function that is receiving a
6024 variable number of arguments. */
6027 alpha_setup_incoming_varargs (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
6028 tree type
, int *pretend_size
, int no_rtl
)
6030 CUMULATIVE_ARGS cum
= *pcum
;
6032 /* Skip the current argument. */
6033 FUNCTION_ARG_ADVANCE (cum
, mode
, type
, 1);
6035 #if TARGET_ABI_UNICOSMK
6036 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6037 arguments on the stack. Unfortunately, it doesn't always store the first
6038 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6039 with stdargs as we always have at least one named argument there. */
6040 if (cum
.num_reg_words
< 6)
6044 emit_insn (gen_umk_mismatch_args (GEN_INT (cum
.num_reg_words
)));
6045 emit_insn (gen_arg_home_umk ());
6049 #elif TARGET_ABI_OPEN_VMS
6050 /* For VMS, we allocate space for all 6 arg registers plus a count.
6052 However, if NO registers need to be saved, don't allocate any space.
6053 This is not only because we won't need the space, but because AP
6054 includes the current_pretend_args_size and we don't want to mess up
6055 any ap-relative addresses already made. */
6056 if (cum
.num_args
< 6)
6060 emit_move_insn (gen_rtx_REG (DImode
, 1), virtual_incoming_args_rtx
);
6061 emit_insn (gen_arg_home ());
6063 *pretend_size
= 7 * UNITS_PER_WORD
;
6066 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6067 only push those that are remaining. However, if NO registers need to
6068 be saved, don't allocate any space. This is not only because we won't
6069 need the space, but because AP includes the current_pretend_args_size
6070 and we don't want to mess up any ap-relative addresses already made.
6072 If we are not to use the floating-point registers, save the integer
6073 registers where we would put the floating-point registers. This is
6074 not the most efficient way to implement varargs with just one register
6075 class, but it isn't worth doing anything more efficient in this rare
6083 alias_set_type set
= get_varargs_alias_set ();
6086 count
= cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
6087 if (count
> 6 - cum
)
6090 /* Detect whether integer registers or floating-point registers
6091 are needed by the detected va_arg statements. See above for
6092 how these values are computed. Note that the "escape" value
6093 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6095 gcc_assert ((VA_LIST_MAX_FPR_SIZE
& 3) == 3);
6097 if (cfun
->va_list_fpr_size
& 1)
6099 tmp
= gen_rtx_MEM (BLKmode
,
6100 plus_constant (virtual_incoming_args_rtx
,
6101 (cum
+ 6) * UNITS_PER_WORD
));
6102 MEM_NOTRAP_P (tmp
) = 1;
6103 set_mem_alias_set (tmp
, set
);
6104 move_block_from_reg (16 + cum
, tmp
, count
);
6107 if (cfun
->va_list_fpr_size
& 2)
6109 tmp
= gen_rtx_MEM (BLKmode
,
6110 plus_constant (virtual_incoming_args_rtx
,
6111 cum
* UNITS_PER_WORD
));
6112 MEM_NOTRAP_P (tmp
) = 1;
6113 set_mem_alias_set (tmp
, set
);
6114 move_block_from_reg (16 + cum
+ TARGET_FPREGS
*32, tmp
, count
);
6117 *pretend_size
= 12 * UNITS_PER_WORD
;
6122 alpha_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6124 HOST_WIDE_INT offset
;
6125 tree t
, offset_field
, base_field
;
6127 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6130 if (TARGET_ABI_UNICOSMK
)
6131 std_expand_builtin_va_start (valist
, nextarg
);
6133 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6134 up by 48, storing fp arg registers in the first 48 bytes, and the
6135 integer arg registers in the next 48 bytes. This is only done,
6136 however, if any integer registers need to be stored.
6138 If no integer registers need be stored, then we must subtract 48
6139 in order to account for the integer arg registers which are counted
6140 in argsize above, but which are not actually stored on the stack.
6141 Must further be careful here about structures straddling the last
6142 integer argument register; that futzes with pretend_args_size,
6143 which changes the meaning of AP. */
6146 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6148 offset
= -6 * UNITS_PER_WORD
+ crtl
->args
.pretend_args_size
;
6150 if (TARGET_ABI_OPEN_VMS
)
6152 nextarg
= plus_constant (nextarg
, offset
);
6153 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
6154 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
6155 make_tree (ptr_type_node
, nextarg
));
6156 TREE_SIDE_EFFECTS (t
) = 1;
6158 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6162 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6163 offset_field
= TREE_CHAIN (base_field
);
6165 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6166 valist
, base_field
, NULL_TREE
);
6167 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6168 valist
, offset_field
, NULL_TREE
);
6170 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6171 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, t
,
6173 t
= build2 (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6174 TREE_SIDE_EFFECTS (t
) = 1;
6175 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6177 t
= build_int_cst (NULL_TREE
, NUM_ARGS
* UNITS_PER_WORD
);
6178 t
= build2 (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6179 TREE_SIDE_EFFECTS (t
) = 1;
6180 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6185 alpha_gimplify_va_arg_1 (tree type
, tree base
, tree offset
,
6188 tree type_size
, ptr_type
, addend
, t
, addr
;
6189 gimple_seq internal_post
;
6191 /* If the type could not be passed in registers, skip the block
6192 reserved for the registers. */
6193 if (targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
))
6195 t
= build_int_cst (TREE_TYPE (offset
), 6*8);
6196 gimplify_assign (offset
,
6197 build2 (MAX_EXPR
, TREE_TYPE (offset
), offset
, t
),
6202 ptr_type
= build_pointer_type (type
);
6204 if (TREE_CODE (type
) == COMPLEX_TYPE
)
6206 tree real_part
, imag_part
, real_temp
;
6208 real_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6211 /* Copy the value into a new temporary, lest the formal temporary
6212 be reused out from under us. */
6213 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6215 imag_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6218 return build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6220 else if (TREE_CODE (type
) == REAL_TYPE
)
6222 tree fpaddend
, cond
, fourtyeight
;
6224 fourtyeight
= build_int_cst (TREE_TYPE (addend
), 6*8);
6225 fpaddend
= fold_build2 (MINUS_EXPR
, TREE_TYPE (addend
),
6226 addend
, fourtyeight
);
6227 cond
= fold_build2 (LT_EXPR
, boolean_type_node
, addend
, fourtyeight
);
6228 addend
= fold_build3 (COND_EXPR
, TREE_TYPE (addend
), cond
,
6232 /* Build the final address and force that value into a temporary. */
6233 addr
= build2 (POINTER_PLUS_EXPR
, ptr_type
, fold_convert (ptr_type
, base
),
6234 fold_convert (sizetype
, addend
));
6235 internal_post
= NULL
;
6236 gimplify_expr (&addr
, pre_p
, &internal_post
, is_gimple_val
, fb_rvalue
);
6237 gimple_seq_add_seq (pre_p
, internal_post
);
6239 /* Update the offset field. */
6240 type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
));
6241 if (type_size
== NULL
|| TREE_OVERFLOW (type_size
))
6245 t
= size_binop (PLUS_EXPR
, type_size
, size_int (7));
6246 t
= size_binop (TRUNC_DIV_EXPR
, t
, size_int (8));
6247 t
= size_binop (MULT_EXPR
, t
, size_int (8));
6249 t
= fold_convert (TREE_TYPE (offset
), t
);
6250 gimplify_assign (offset
, build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, t
),
6253 return build_va_arg_indirect_ref (addr
);
6257 alpha_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
6260 tree offset_field
, base_field
, offset
, base
, t
, r
;
6263 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6264 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6266 base_field
= TYPE_FIELDS (va_list_type_node
);
6267 offset_field
= TREE_CHAIN (base_field
);
6268 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6269 valist
, base_field
, NULL_TREE
);
6270 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6271 valist
, offset_field
, NULL_TREE
);
6273 /* Pull the fields of the structure out into temporaries. Since we never
6274 modify the base field, we can use a formal temporary. Sign-extend the
6275 offset field so that it's the proper width for pointer arithmetic. */
6276 base
= get_formal_tmp_var (base_field
, pre_p
);
6278 t
= fold_convert (lang_hooks
.types
.type_for_size (64, 0), offset_field
);
6279 offset
= get_initialized_tmp_var (t
, pre_p
, NULL
);
6281 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6283 type
= build_pointer_type (type
);
6285 /* Find the value. Note that this will be a stable indirection, or
6286 a composite of stable indirections in the case of complex. */
6287 r
= alpha_gimplify_va_arg_1 (type
, base
, offset
, pre_p
);
6289 /* Stuff the offset temporary back into its field. */
6290 gimplify_assign (unshare_expr (offset_field
),
6291 fold_convert (TREE_TYPE (offset_field
), offset
), pre_p
);
6294 r
= build_va_arg_indirect_ref (r
);
6303 ALPHA_BUILTIN_CMPBGE
,
6304 ALPHA_BUILTIN_EXTBL
,
6305 ALPHA_BUILTIN_EXTWL
,
6306 ALPHA_BUILTIN_EXTLL
,
6307 ALPHA_BUILTIN_EXTQL
,
6308 ALPHA_BUILTIN_EXTWH
,
6309 ALPHA_BUILTIN_EXTLH
,
6310 ALPHA_BUILTIN_EXTQH
,
6311 ALPHA_BUILTIN_INSBL
,
6312 ALPHA_BUILTIN_INSWL
,
6313 ALPHA_BUILTIN_INSLL
,
6314 ALPHA_BUILTIN_INSQL
,
6315 ALPHA_BUILTIN_INSWH
,
6316 ALPHA_BUILTIN_INSLH
,
6317 ALPHA_BUILTIN_INSQH
,
6318 ALPHA_BUILTIN_MSKBL
,
6319 ALPHA_BUILTIN_MSKWL
,
6320 ALPHA_BUILTIN_MSKLL
,
6321 ALPHA_BUILTIN_MSKQL
,
6322 ALPHA_BUILTIN_MSKWH
,
6323 ALPHA_BUILTIN_MSKLH
,
6324 ALPHA_BUILTIN_MSKQH
,
6325 ALPHA_BUILTIN_UMULH
,
6327 ALPHA_BUILTIN_ZAPNOT
,
6328 ALPHA_BUILTIN_AMASK
,
6329 ALPHA_BUILTIN_IMPLVER
,
6331 ALPHA_BUILTIN_THREAD_POINTER
,
6332 ALPHA_BUILTIN_SET_THREAD_POINTER
,
6335 ALPHA_BUILTIN_MINUB8
,
6336 ALPHA_BUILTIN_MINSB8
,
6337 ALPHA_BUILTIN_MINUW4
,
6338 ALPHA_BUILTIN_MINSW4
,
6339 ALPHA_BUILTIN_MAXUB8
,
6340 ALPHA_BUILTIN_MAXSB8
,
6341 ALPHA_BUILTIN_MAXUW4
,
6342 ALPHA_BUILTIN_MAXSW4
,
6346 ALPHA_BUILTIN_UNPKBL
,
6347 ALPHA_BUILTIN_UNPKBW
,
6352 ALPHA_BUILTIN_CTPOP
,
6357 static unsigned int const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6358 CODE_FOR_builtin_cmpbge
,
6359 CODE_FOR_builtin_extbl
,
6360 CODE_FOR_builtin_extwl
,
6361 CODE_FOR_builtin_extll
,
6362 CODE_FOR_builtin_extql
,
6363 CODE_FOR_builtin_extwh
,
6364 CODE_FOR_builtin_extlh
,
6365 CODE_FOR_builtin_extqh
,
6366 CODE_FOR_builtin_insbl
,
6367 CODE_FOR_builtin_inswl
,
6368 CODE_FOR_builtin_insll
,
6369 CODE_FOR_builtin_insql
,
6370 CODE_FOR_builtin_inswh
,
6371 CODE_FOR_builtin_inslh
,
6372 CODE_FOR_builtin_insqh
,
6373 CODE_FOR_builtin_mskbl
,
6374 CODE_FOR_builtin_mskwl
,
6375 CODE_FOR_builtin_mskll
,
6376 CODE_FOR_builtin_mskql
,
6377 CODE_FOR_builtin_mskwh
,
6378 CODE_FOR_builtin_msklh
,
6379 CODE_FOR_builtin_mskqh
,
6380 CODE_FOR_umuldi3_highpart
,
6381 CODE_FOR_builtin_zap
,
6382 CODE_FOR_builtin_zapnot
,
6383 CODE_FOR_builtin_amask
,
6384 CODE_FOR_builtin_implver
,
6385 CODE_FOR_builtin_rpcc
,
6390 CODE_FOR_builtin_minub8
,
6391 CODE_FOR_builtin_minsb8
,
6392 CODE_FOR_builtin_minuw4
,
6393 CODE_FOR_builtin_minsw4
,
6394 CODE_FOR_builtin_maxub8
,
6395 CODE_FOR_builtin_maxsb8
,
6396 CODE_FOR_builtin_maxuw4
,
6397 CODE_FOR_builtin_maxsw4
,
6398 CODE_FOR_builtin_perr
,
6399 CODE_FOR_builtin_pklb
,
6400 CODE_FOR_builtin_pkwb
,
6401 CODE_FOR_builtin_unpkbl
,
6402 CODE_FOR_builtin_unpkbw
,
6407 CODE_FOR_popcountdi2
6410 struct alpha_builtin_def
6413 enum alpha_builtin code
;
6414 unsigned int target_mask
;
6418 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6419 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0, true },
6420 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0, false }
6423 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6424 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0, true },
6425 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
, true },
6426 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
, true },
6427 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
, true },
6428 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
, true },
6429 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
, true },
6430 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
, true },
6431 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
, true }
6434 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6435 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0, true },
6436 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0, true },
6437 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0, true },
6438 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0, true },
6439 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0, true },
6440 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0, true },
6441 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0, true },
6442 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0, true },
6443 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0, true },
6444 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0, true },
6445 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0, true },
6446 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0, true },
6447 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0, true },
6448 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0, true },
6449 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0, true },
6450 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0, true },
6451 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0, true },
6452 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0, true },
6453 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0, true },
6454 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0, true },
6455 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0, true },
6456 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0, true },
6457 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0, true },
6458 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0, true },
6459 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0, true },
6460 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
, true },
6461 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
, true },
6462 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
, true },
6463 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
, true },
6464 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
, true },
6465 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
, true },
6466 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
, true },
6467 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
, true },
6468 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
, true }
6471 static GTY(()) tree alpha_v8qi_u
;
6472 static GTY(()) tree alpha_v8qi_s
;
6473 static GTY(()) tree alpha_v4hi_u
;
6474 static GTY(()) tree alpha_v4hi_s
;
6476 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6477 functions pointed to by P, with function type FTYPE. */
6480 alpha_add_builtins (const struct alpha_builtin_def
*p
, size_t count
,
6486 for (i
= 0; i
< count
; ++i
, ++p
)
6487 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6489 decl
= add_builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6492 TREE_READONLY (decl
) = 1;
6493 TREE_NOTHROW (decl
) = 1;
6499 alpha_init_builtins (void)
6501 tree dimode_integer_type_node
;
6504 dimode_integer_type_node
= lang_hooks
.types
.type_for_mode (DImode
, 0);
6506 ftype
= build_function_type (dimode_integer_type_node
, void_list_node
);
6507 alpha_add_builtins (zero_arg_builtins
, ARRAY_SIZE (zero_arg_builtins
),
6510 ftype
= build_function_type_list (dimode_integer_type_node
,
6511 dimode_integer_type_node
, NULL_TREE
);
6512 alpha_add_builtins (one_arg_builtins
, ARRAY_SIZE (one_arg_builtins
),
6515 ftype
= build_function_type_list (dimode_integer_type_node
,
6516 dimode_integer_type_node
,
6517 dimode_integer_type_node
, NULL_TREE
);
6518 alpha_add_builtins (two_arg_builtins
, ARRAY_SIZE (two_arg_builtins
),
6521 ftype
= build_function_type (ptr_type_node
, void_list_node
);
6522 decl
= add_builtin_function ("__builtin_thread_pointer", ftype
,
6523 ALPHA_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
6525 TREE_NOTHROW (decl
) = 1;
6527 ftype
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
6528 decl
= add_builtin_function ("__builtin_set_thread_pointer", ftype
,
6529 ALPHA_BUILTIN_SET_THREAD_POINTER
, BUILT_IN_MD
,
6531 TREE_NOTHROW (decl
) = 1;
6533 alpha_v8qi_u
= build_vector_type (unsigned_intQI_type_node
, 8);
6534 alpha_v8qi_s
= build_vector_type (intQI_type_node
, 8);
6535 alpha_v4hi_u
= build_vector_type (unsigned_intHI_type_node
, 4);
6536 alpha_v4hi_s
= build_vector_type (intHI_type_node
, 4);
6539 /* Expand an expression EXP that calls a built-in function,
6540 with result going to TARGET if that's convenient
6541 (and in mode MODE if that's convenient).
6542 SUBTARGET may be used as the target for computing one of EXP's operands.
6543 IGNORE is nonzero if the value is to be ignored. */
6546 alpha_expand_builtin (tree exp
, rtx target
,
6547 rtx subtarget ATTRIBUTE_UNUSED
,
6548 enum machine_mode mode ATTRIBUTE_UNUSED
,
6549 int ignore ATTRIBUTE_UNUSED
)
6553 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6554 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6556 call_expr_arg_iterator iter
;
6557 enum insn_code icode
;
6558 rtx op
[MAX_ARGS
], pat
;
6562 if (fcode
>= ALPHA_BUILTIN_max
)
6563 internal_error ("bad builtin fcode");
6564 icode
= code_for_builtin
[fcode
];
6566 internal_error ("bad builtin fcode");
6568 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6571 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
6573 const struct insn_operand_data
*insn_op
;
6575 if (arg
== error_mark_node
)
6577 if (arity
> MAX_ARGS
)
6580 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6582 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, 0);
6584 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6585 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6591 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6593 || GET_MODE (target
) != tmode
6594 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6595 target
= gen_reg_rtx (tmode
);
6601 pat
= GEN_FCN (icode
) (target
);
6605 pat
= GEN_FCN (icode
) (target
, op
[0]);
6607 pat
= GEN_FCN (icode
) (op
[0]);
6610 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6626 /* Several bits below assume HWI >= 64 bits. This should be enforced
6628 #if HOST_BITS_PER_WIDE_INT < 64
6629 # error "HOST_WIDE_INT too small"
6632 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6633 with an 8-bit output vector. OPINT contains the integer operands; bit N
6634 of OP_CONST is set if OPINT[N] is valid. */
6637 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint
[], long op_const
)
6642 for (i
= 0, val
= 0; i
< 8; ++i
)
6644 unsigned HOST_WIDE_INT c0
= (opint
[0] >> (i
* 8)) & 0xff;
6645 unsigned HOST_WIDE_INT c1
= (opint
[1] >> (i
* 8)) & 0xff;
6649 return build_int_cst (long_integer_type_node
, val
);
6651 else if (op_const
== 2 && opint
[1] == 0)
6652 return build_int_cst (long_integer_type_node
, 0xff);
6656 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6657 specialized form of an AND operation. Other byte manipulation instructions
6658 are defined in terms of this instruction, so this is also used as a
6659 subroutine for other builtins.
6661 OP contains the tree operands; OPINT contains the extracted integer values.
6662 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6663 OPINT may be considered. */
6666 alpha_fold_builtin_zapnot (tree
*op
, unsigned HOST_WIDE_INT opint
[],
6671 unsigned HOST_WIDE_INT mask
= 0;
6674 for (i
= 0; i
< 8; ++i
)
6675 if ((opint
[1] >> i
) & 1)
6676 mask
|= (unsigned HOST_WIDE_INT
)0xff << (i
* 8);
6679 return build_int_cst (long_integer_type_node
, opint
[0] & mask
);
6682 return fold_build2 (BIT_AND_EXPR
, long_integer_type_node
, op
[0],
6683 build_int_cst (long_integer_type_node
, mask
));
6685 else if ((op_const
& 1) && opint
[0] == 0)
6686 return build_int_cst (long_integer_type_node
, 0);
6690 /* Fold the builtins for the EXT family of instructions. */
6693 alpha_fold_builtin_extxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6694 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6698 tree
*zap_op
= NULL
;
6702 unsigned HOST_WIDE_INT loc
;
6705 if (BYTES_BIG_ENDIAN
)
6713 unsigned HOST_WIDE_INT temp
= opint
[0];
6726 opint
[1] = bytemask
;
6727 return alpha_fold_builtin_zapnot (zap_op
, opint
, zap_const
);
6730 /* Fold the builtins for the INS family of instructions. */
6733 alpha_fold_builtin_insxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6734 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6737 if ((op_const
& 1) && opint
[0] == 0)
6738 return build_int_cst (long_integer_type_node
, 0);
6742 unsigned HOST_WIDE_INT temp
, loc
, byteloc
;
6743 tree
*zap_op
= NULL
;
6746 if (BYTES_BIG_ENDIAN
)
6753 byteloc
= (64 - (loc
* 8)) & 0x3f;
6770 opint
[1] = bytemask
;
6771 return alpha_fold_builtin_zapnot (zap_op
, opint
, op_const
);
6778 alpha_fold_builtin_mskxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6779 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6784 unsigned HOST_WIDE_INT loc
;
6787 if (BYTES_BIG_ENDIAN
)
6794 opint
[1] = bytemask
^ 0xff;
6797 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
6801 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint
[], long op_const
)
6807 unsigned HOST_WIDE_INT l
;
6810 mul_double (opint
[0], 0, opint
[1], 0, &l
, &h
);
6812 #if HOST_BITS_PER_WIDE_INT > 64
6816 return build_int_cst (long_integer_type_node
, h
);
6820 opint
[1] = opint
[0];
6823 /* Note that (X*1) >> 64 == 0. */
6824 if (opint
[1] == 0 || opint
[1] == 1)
6825 return build_int_cst (long_integer_type_node
, 0);
6832 alpha_fold_vector_minmax (enum tree_code code
, tree op
[], tree vtype
)
6834 tree op0
= fold_convert (vtype
, op
[0]);
6835 tree op1
= fold_convert (vtype
, op
[1]);
6836 tree val
= fold_build2 (code
, vtype
, op0
, op1
);
6837 return fold_build1 (VIEW_CONVERT_EXPR
, long_integer_type_node
, val
);
6841 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint
[], long op_const
)
6843 unsigned HOST_WIDE_INT temp
= 0;
6849 for (i
= 0; i
< 8; ++i
)
6851 unsigned HOST_WIDE_INT a
= (opint
[0] >> (i
* 8)) & 0xff;
6852 unsigned HOST_WIDE_INT b
= (opint
[1] >> (i
* 8)) & 0xff;
6859 return build_int_cst (long_integer_type_node
, temp
);
6863 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6865 unsigned HOST_WIDE_INT temp
;
6870 temp
= opint
[0] & 0xff;
6871 temp
|= (opint
[0] >> 24) & 0xff00;
6873 return build_int_cst (long_integer_type_node
, temp
);
6877 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6879 unsigned HOST_WIDE_INT temp
;
6884 temp
= opint
[0] & 0xff;
6885 temp
|= (opint
[0] >> 8) & 0xff00;
6886 temp
|= (opint
[0] >> 16) & 0xff0000;
6887 temp
|= (opint
[0] >> 24) & 0xff000000;
6889 return build_int_cst (long_integer_type_node
, temp
);
6893 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint
[], long op_const
)
6895 unsigned HOST_WIDE_INT temp
;
6900 temp
= opint
[0] & 0xff;
6901 temp
|= (opint
[0] & 0xff00) << 24;
6903 return build_int_cst (long_integer_type_node
, temp
);
6907 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint
[], long op_const
)
6909 unsigned HOST_WIDE_INT temp
;
6914 temp
= opint
[0] & 0xff;
6915 temp
|= (opint
[0] & 0x0000ff00) << 8;
6916 temp
|= (opint
[0] & 0x00ff0000) << 16;
6917 temp
|= (opint
[0] & 0xff000000) << 24;
6919 return build_int_cst (long_integer_type_node
, temp
);
6923 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6925 unsigned HOST_WIDE_INT temp
;
6933 temp
= exact_log2 (opint
[0] & -opint
[0]);
6935 return build_int_cst (long_integer_type_node
, temp
);
6939 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6941 unsigned HOST_WIDE_INT temp
;
6949 temp
= 64 - floor_log2 (opint
[0]) - 1;
6951 return build_int_cst (long_integer_type_node
, temp
);
6955 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint
[], long op_const
)
6957 unsigned HOST_WIDE_INT temp
, op
;
6965 temp
++, op
&= op
- 1;
6967 return build_int_cst (long_integer_type_node
, temp
);
6970 /* Fold one of our builtin functions. */
6973 alpha_fold_builtin (tree fndecl
, tree arglist
, bool ignore ATTRIBUTE_UNUSED
)
6975 tree op
[MAX_ARGS
], t
;
6976 unsigned HOST_WIDE_INT opint
[MAX_ARGS
];
6977 long op_const
= 0, arity
= 0;
6979 for (t
= arglist
; t
; t
= TREE_CHAIN (t
), ++arity
)
6981 tree arg
= TREE_VALUE (t
);
6982 if (arg
== error_mark_node
)
6984 if (arity
>= MAX_ARGS
)
6989 if (TREE_CODE (arg
) == INTEGER_CST
)
6991 op_const
|= 1L << arity
;
6992 opint
[arity
] = int_cst_value (arg
);
6996 switch (DECL_FUNCTION_CODE (fndecl
))
6998 case ALPHA_BUILTIN_CMPBGE
:
6999 return alpha_fold_builtin_cmpbge (opint
, op_const
);
7001 case ALPHA_BUILTIN_EXTBL
:
7002 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x01, false);
7003 case ALPHA_BUILTIN_EXTWL
:
7004 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, false);
7005 case ALPHA_BUILTIN_EXTLL
:
7006 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, false);
7007 case ALPHA_BUILTIN_EXTQL
:
7008 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, false);
7009 case ALPHA_BUILTIN_EXTWH
:
7010 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, true);
7011 case ALPHA_BUILTIN_EXTLH
:
7012 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, true);
7013 case ALPHA_BUILTIN_EXTQH
:
7014 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, true);
7016 case ALPHA_BUILTIN_INSBL
:
7017 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x01, false);
7018 case ALPHA_BUILTIN_INSWL
:
7019 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, false);
7020 case ALPHA_BUILTIN_INSLL
:
7021 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, false);
7022 case ALPHA_BUILTIN_INSQL
:
7023 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, false);
7024 case ALPHA_BUILTIN_INSWH
:
7025 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, true);
7026 case ALPHA_BUILTIN_INSLH
:
7027 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, true);
7028 case ALPHA_BUILTIN_INSQH
:
7029 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, true);
7031 case ALPHA_BUILTIN_MSKBL
:
7032 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x01, false);
7033 case ALPHA_BUILTIN_MSKWL
:
7034 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, false);
7035 case ALPHA_BUILTIN_MSKLL
:
7036 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, false);
7037 case ALPHA_BUILTIN_MSKQL
:
7038 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, false);
7039 case ALPHA_BUILTIN_MSKWH
:
7040 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, true);
7041 case ALPHA_BUILTIN_MSKLH
:
7042 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, true);
7043 case ALPHA_BUILTIN_MSKQH
:
7044 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, true);
7046 case ALPHA_BUILTIN_UMULH
:
7047 return alpha_fold_builtin_umulh (opint
, op_const
);
7049 case ALPHA_BUILTIN_ZAP
:
7052 case ALPHA_BUILTIN_ZAPNOT
:
7053 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
7055 case ALPHA_BUILTIN_MINUB8
:
7056 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_u
);
7057 case ALPHA_BUILTIN_MINSB8
:
7058 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_s
);
7059 case ALPHA_BUILTIN_MINUW4
:
7060 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_u
);
7061 case ALPHA_BUILTIN_MINSW4
:
7062 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_s
);
7063 case ALPHA_BUILTIN_MAXUB8
:
7064 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_u
);
7065 case ALPHA_BUILTIN_MAXSB8
:
7066 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_s
);
7067 case ALPHA_BUILTIN_MAXUW4
:
7068 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_u
);
7069 case ALPHA_BUILTIN_MAXSW4
:
7070 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_s
);
7072 case ALPHA_BUILTIN_PERR
:
7073 return alpha_fold_builtin_perr (opint
, op_const
);
7074 case ALPHA_BUILTIN_PKLB
:
7075 return alpha_fold_builtin_pklb (opint
, op_const
);
7076 case ALPHA_BUILTIN_PKWB
:
7077 return alpha_fold_builtin_pkwb (opint
, op_const
);
7078 case ALPHA_BUILTIN_UNPKBL
:
7079 return alpha_fold_builtin_unpkbl (opint
, op_const
);
7080 case ALPHA_BUILTIN_UNPKBW
:
7081 return alpha_fold_builtin_unpkbw (opint
, op_const
);
7083 case ALPHA_BUILTIN_CTTZ
:
7084 return alpha_fold_builtin_cttz (opint
, op_const
);
7085 case ALPHA_BUILTIN_CTLZ
:
7086 return alpha_fold_builtin_ctlz (opint
, op_const
);
7087 case ALPHA_BUILTIN_CTPOP
:
7088 return alpha_fold_builtin_ctpop (opint
, op_const
);
7090 case ALPHA_BUILTIN_AMASK
:
7091 case ALPHA_BUILTIN_IMPLVER
:
7092 case ALPHA_BUILTIN_RPCC
:
7093 case ALPHA_BUILTIN_THREAD_POINTER
:
7094 case ALPHA_BUILTIN_SET_THREAD_POINTER
:
7095 /* None of these are foldable at compile-time. */
7101 /* This page contains routines that are used to determine what the function
7102 prologue and epilogue code will do and write them out. */
7104 /* Compute the size of the save area in the stack. */
7106 /* These variables are used for communication between the following functions.
7107 They indicate various things about the current function being compiled
7108 that are used to tell what kind of prologue, epilogue and procedure
7109 descriptor to generate. */
7111 /* Nonzero if we need a stack procedure. */
7112 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
7113 static enum alpha_procedure_types alpha_procedure_type
;
7115 /* Register number (either FP or SP) that is used to unwind the frame. */
7116 static int vms_unwind_regno
;
7118 /* Register number used to save FP. We need not have one for RA since
7119 we don't modify it for register procedures. This is only defined
7120 for register frame procedures. */
7121 static int vms_save_fp_regno
;
7123 /* Register number used to reference objects off our PV. */
7124 static int vms_base_regno
;
7126 /* Compute register masks for saved registers. */
7129 alpha_sa_mask (unsigned long *imaskP
, unsigned long *fmaskP
)
7131 unsigned long imask
= 0;
7132 unsigned long fmask
= 0;
7135 /* When outputting a thunk, we don't have valid register life info,
7136 but assemble_start_function wants to output .frame and .mask
7145 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7146 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
7148 /* One for every register we have to save. */
7149 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7150 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
7151 && df_regs_ever_live_p (i
) && i
!= REG_RA
7152 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
7155 imask
|= (1UL << i
);
7157 fmask
|= (1UL << (i
- 32));
7160 /* We need to restore these for the handler. */
7161 if (crtl
->calls_eh_return
)
7165 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
7166 if (regno
== INVALID_REGNUM
)
7168 imask
|= 1UL << regno
;
7172 /* If any register spilled, then spill the return address also. */
7173 /* ??? This is required by the Digital stack unwind specification
7174 and isn't needed if we're doing Dwarf2 unwinding. */
7175 if (imask
|| fmask
|| alpha_ra_ever_killed ())
7176 imask
|= (1UL << REG_RA
);
7183 alpha_sa_size (void)
7185 unsigned long mask
[2];
7189 alpha_sa_mask (&mask
[0], &mask
[1]);
7191 if (TARGET_ABI_UNICOSMK
)
7193 if (mask
[0] || mask
[1])
7198 for (j
= 0; j
< 2; ++j
)
7199 for (i
= 0; i
< 32; ++i
)
7200 if ((mask
[j
] >> i
) & 1)
7204 if (TARGET_ABI_UNICOSMK
)
7206 /* We might not need to generate a frame if we don't make any calls
7207 (including calls to __T3E_MISMATCH if this is a vararg function),
7208 don't have any local variables which require stack slots, don't
7209 use alloca and have not determined that we need a frame for other
7212 alpha_procedure_type
7213 = (sa_size
|| get_frame_size() != 0
7214 || crtl
->outgoing_args_size
7215 || cfun
->stdarg
|| cfun
->calls_alloca
7216 || frame_pointer_needed
)
7217 ? PT_STACK
: PT_REGISTER
;
7219 /* Always reserve space for saving callee-saved registers if we
7220 need a frame as required by the calling convention. */
7221 if (alpha_procedure_type
== PT_STACK
)
7224 else if (TARGET_ABI_OPEN_VMS
)
7226 /* Start by assuming we can use a register procedure if we don't
7227 make any calls (REG_RA not used) or need to save any
7228 registers and a stack procedure if we do. */
7229 if ((mask
[0] >> REG_RA
) & 1)
7230 alpha_procedure_type
= PT_STACK
;
7231 else if (get_frame_size() != 0)
7232 alpha_procedure_type
= PT_REGISTER
;
7234 alpha_procedure_type
= PT_NULL
;
7236 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7237 made the final decision on stack procedure vs register procedure. */
7238 if (alpha_procedure_type
== PT_STACK
)
7241 /* Decide whether to refer to objects off our PV via FP or PV.
7242 If we need FP for something else or if we receive a nonlocal
7243 goto (which expects PV to contain the value), we must use PV.
7244 Otherwise, start by assuming we can use FP. */
7247 = (frame_pointer_needed
7248 || cfun
->has_nonlocal_label
7249 || alpha_procedure_type
== PT_STACK
7250 || crtl
->outgoing_args_size
)
7251 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7253 /* If we want to copy PV into FP, we need to find some register
7254 in which to save FP. */
7256 vms_save_fp_regno
= -1;
7257 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7258 for (i
= 0; i
< 32; i
++)
7259 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! df_regs_ever_live_p (i
))
7260 vms_save_fp_regno
= i
;
7262 if (vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7263 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7264 else if (alpha_procedure_type
== PT_NULL
)
7265 vms_base_regno
= REG_PV
;
7267 /* Stack unwinding should be done via FP unless we use it for PV. */
7268 vms_unwind_regno
= (vms_base_regno
== REG_PV
7269 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7271 /* If this is a stack procedure, allow space for saving FP and RA. */
7272 if (alpha_procedure_type
== PT_STACK
)
7277 /* Our size must be even (multiple of 16 bytes). */
7285 /* Define the offset between two registers, one to be eliminated,
7286 and the other its replacement, at the start of a routine. */
7289 alpha_initial_elimination_offset (unsigned int from
,
7290 unsigned int to ATTRIBUTE_UNUSED
)
7294 ret
= alpha_sa_size ();
7295 ret
+= ALPHA_ROUND (crtl
->outgoing_args_size
);
7299 case FRAME_POINTER_REGNUM
:
7302 case ARG_POINTER_REGNUM
:
7303 ret
+= (ALPHA_ROUND (get_frame_size ()
7304 + crtl
->args
.pretend_args_size
)
7305 - crtl
->args
.pretend_args_size
);
7316 alpha_pv_save_size (void)
7319 return alpha_procedure_type
== PT_STACK
? 8 : 0;
7323 alpha_using_fp (void)
7326 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
7329 #if TARGET_ABI_OPEN_VMS
7331 const struct attribute_spec vms_attribute_table
[] =
7333 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7334 { "overlaid", 0, 0, true, false, false, NULL
},
7335 { "global", 0, 0, true, false, false, NULL
},
7336 { "initialize", 0, 0, true, false, false, NULL
},
7337 { NULL
, 0, 0, false, false, false, NULL
}
7343 find_lo_sum_using_gp (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
7345 return GET_CODE (*px
) == LO_SUM
&& XEXP (*px
, 0) == pic_offset_table_rtx
;
7349 alpha_find_lo_sum_using_gp (rtx insn
)
7351 return for_each_rtx (&PATTERN (insn
), find_lo_sum_using_gp
, NULL
) > 0;
7355 alpha_does_function_need_gp (void)
7359 /* The GP being variable is an OSF abi thing. */
7360 if (! TARGET_ABI_OSF
)
7363 /* We need the gp to load the address of __mcount. */
7364 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7367 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7371 /* The nonlocal receiver pattern assumes that the gp is valid for
7372 the nested function. Reasonable because it's almost always set
7373 correctly already. For the cases where that's wrong, make sure
7374 the nested function loads its gp on entry. */
7375 if (crtl
->has_nonlocal_goto
)
7378 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7379 Even if we are a static function, we still need to do this in case
7380 our address is taken and passed to something like qsort. */
7382 push_topmost_sequence ();
7383 insn
= get_insns ();
7384 pop_topmost_sequence ();
7386 for (; insn
; insn
= NEXT_INSN (insn
))
7388 && ! JUMP_TABLE_DATA_P (insn
)
7389 && GET_CODE (PATTERN (insn
)) != USE
7390 && GET_CODE (PATTERN (insn
)) != CLOBBER
7391 && get_attr_usegp (insn
))
7398 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7402 set_frame_related_p (void)
7404 rtx seq
= get_insns ();
7415 while (insn
!= NULL_RTX
)
7417 RTX_FRAME_RELATED_P (insn
) = 1;
7418 insn
= NEXT_INSN (insn
);
7420 seq
= emit_insn (seq
);
7424 seq
= emit_insn (seq
);
7425 RTX_FRAME_RELATED_P (seq
) = 1;
7430 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7432 /* Generates a store with the proper unwind info attached. VALUE is
7433 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7434 contains SP+FRAME_BIAS, and that is the unwind info that should be
7435 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7436 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7439 emit_frame_store_1 (rtx value
, rtx base_reg
, HOST_WIDE_INT frame_bias
,
7440 HOST_WIDE_INT base_ofs
, rtx frame_reg
)
7442 rtx addr
, mem
, insn
;
7444 addr
= plus_constant (base_reg
, base_ofs
);
7445 mem
= gen_rtx_MEM (DImode
, addr
);
7446 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7448 insn
= emit_move_insn (mem
, value
);
7449 RTX_FRAME_RELATED_P (insn
) = 1;
7451 if (frame_bias
|| value
!= frame_reg
)
7455 addr
= plus_constant (stack_pointer_rtx
, frame_bias
+ base_ofs
);
7456 mem
= gen_rtx_MEM (DImode
, addr
);
7460 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7461 gen_rtx_SET (VOIDmode
, mem
, frame_reg
),
7467 emit_frame_store (unsigned int regno
, rtx base_reg
,
7468 HOST_WIDE_INT frame_bias
, HOST_WIDE_INT base_ofs
)
7470 rtx reg
= gen_rtx_REG (DImode
, regno
);
7471 emit_frame_store_1 (reg
, base_reg
, frame_bias
, base_ofs
, reg
);
7474 /* Write function prologue. */
7476 /* On vms we have two kinds of functions:
7478 - stack frame (PROC_STACK)
7479 these are 'normal' functions with local vars and which are
7480 calling other functions
7481 - register frame (PROC_REGISTER)
7482 keeps all data in registers, needs no stack
7484 We must pass this to the assembler so it can generate the
7485 proper pdsc (procedure descriptor)
7486 This is done with the '.pdesc' command.
7488 On not-vms, we don't really differentiate between the two, as we can
7489 simply allocate stack without saving registers. */
7492 alpha_expand_prologue (void)
7494 /* Registers to save. */
7495 unsigned long imask
= 0;
7496 unsigned long fmask
= 0;
7497 /* Stack space needed for pushing registers clobbered by us. */
7498 HOST_WIDE_INT sa_size
;
7499 /* Complete stack size needed. */
7500 HOST_WIDE_INT frame_size
;
7501 /* Offset from base reg to register save area. */
7502 HOST_WIDE_INT reg_offset
;
7506 sa_size
= alpha_sa_size ();
7508 frame_size
= get_frame_size ();
7509 if (TARGET_ABI_OPEN_VMS
)
7510 frame_size
= ALPHA_ROUND (sa_size
7511 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7513 + crtl
->args
.pretend_args_size
);
7514 else if (TARGET_ABI_UNICOSMK
)
7515 /* We have to allocate space for the DSIB if we generate a frame. */
7516 frame_size
= ALPHA_ROUND (sa_size
7517 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7518 + ALPHA_ROUND (frame_size
7519 + crtl
->outgoing_args_size
);
7521 frame_size
= (ALPHA_ROUND (crtl
->outgoing_args_size
)
7523 + ALPHA_ROUND (frame_size
7524 + crtl
->args
.pretend_args_size
));
7526 if (TARGET_ABI_OPEN_VMS
)
7529 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7531 alpha_sa_mask (&imask
, &fmask
);
7533 /* Emit an insn to reload GP, if needed. */
7536 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7537 if (alpha_function_needs_gp
)
7538 emit_insn (gen_prologue_ldgp ());
7541 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7542 the call to mcount ourselves, rather than having the linker do it
7543 magically in response to -pg. Since _mcount has special linkage,
7544 don't represent the call as a call. */
7545 if (TARGET_PROFILING_NEEDS_GP
&& crtl
->profile
)
7546 emit_insn (gen_prologue_mcount ());
7548 if (TARGET_ABI_UNICOSMK
)
7549 unicosmk_gen_dsib (&imask
);
7551 /* Adjust the stack by the frame size. If the frame size is > 4096
7552 bytes, we need to be sure we probe somewhere in the first and last
7553 4096 bytes (we can probably get away without the latter test) and
7554 every 8192 bytes in between. If the frame size is > 32768, we
7555 do this in a loop. Otherwise, we generate the explicit probe
7558 Note that we are only allowed to adjust sp once in the prologue. */
7560 if (frame_size
<= 32768)
7562 if (frame_size
> 4096)
7566 for (probed
= 4096; probed
< frame_size
; probed
+= 8192)
7567 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7571 /* We only have to do this probe if we aren't saving registers. */
7572 if (sa_size
== 0 && frame_size
> probed
- 4096)
7573 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
7576 if (frame_size
!= 0)
7577 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7578 GEN_INT (TARGET_ABI_UNICOSMK
7584 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7585 number of 8192 byte blocks to probe. We then probe each block
7586 in the loop and then set SP to the proper location. If the
7587 amount remaining is > 4096, we have to do one more probe if we
7588 are not saving any registers. */
7590 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
7591 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
7592 rtx ptr
= gen_rtx_REG (DImode
, 22);
7593 rtx count
= gen_rtx_REG (DImode
, 23);
7596 emit_move_insn (count
, GEN_INT (blocks
));
7597 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
7598 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
7600 /* Because of the difficulty in emitting a new basic block this
7601 late in the compilation, generate the loop as a single insn. */
7602 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7604 if (leftover
> 4096 && sa_size
== 0)
7606 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
7607 MEM_VOLATILE_P (last
) = 1;
7608 emit_move_insn (last
, const0_rtx
);
7611 if (TARGET_ABI_WINDOWS_NT
)
7613 /* For NT stack unwind (done by 'reverse execution'), it's
7614 not OK to take the result of a loop, even though the value
7615 is already in ptr, so we reload it via a single operation
7616 and subtract it to sp.
7618 Yes, that's correct -- we have to reload the whole constant
7619 into a temporary via ldah+lda then subtract from sp. */
7621 HOST_WIDE_INT lo
, hi
;
7622 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7623 hi
= frame_size
- lo
;
7625 emit_move_insn (ptr
, GEN_INT (hi
));
7626 emit_insn (gen_adddi3 (ptr
, ptr
, GEN_INT (lo
)));
7627 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7632 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7633 GEN_INT (-leftover
)));
7636 /* This alternative is special, because the DWARF code cannot
7637 possibly intuit through the loop above. So we invent this
7638 note it looks at instead. */
7639 RTX_FRAME_RELATED_P (seq
) = 1;
7641 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7642 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7643 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
7644 GEN_INT (TARGET_ABI_UNICOSMK
7650 if (!TARGET_ABI_UNICOSMK
)
7652 HOST_WIDE_INT sa_bias
= 0;
7654 /* Cope with very large offsets to the register save area. */
7655 sa_reg
= stack_pointer_rtx
;
7656 if (reg_offset
+ sa_size
> 0x8000)
7658 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7661 if (low
+ sa_size
<= 0x8000)
7662 sa_bias
= reg_offset
- low
, reg_offset
= low
;
7664 sa_bias
= reg_offset
, reg_offset
= 0;
7666 sa_reg
= gen_rtx_REG (DImode
, 24);
7667 sa_bias_rtx
= GEN_INT (sa_bias
);
7669 if (add_operand (sa_bias_rtx
, DImode
))
7670 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_bias_rtx
));
7673 emit_move_insn (sa_reg
, sa_bias_rtx
);
7674 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_reg
));
7678 /* Save regs in stack order. Beginning with VMS PV. */
7679 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7680 emit_frame_store (REG_PV
, stack_pointer_rtx
, 0, 0);
7682 /* Save register RA next. */
7683 if (imask
& (1UL << REG_RA
))
7685 emit_frame_store (REG_RA
, sa_reg
, sa_bias
, reg_offset
);
7686 imask
&= ~(1UL << REG_RA
);
7690 /* Now save any other registers required to be saved. */
7691 for (i
= 0; i
< 31; i
++)
7692 if (imask
& (1UL << i
))
7694 emit_frame_store (i
, sa_reg
, sa_bias
, reg_offset
);
7698 for (i
= 0; i
< 31; i
++)
7699 if (fmask
& (1UL << i
))
7701 emit_frame_store (i
+32, sa_reg
, sa_bias
, reg_offset
);
7705 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7707 /* The standard frame on the T3E includes space for saving registers.
7708 We just have to use it. We don't have to save the return address and
7709 the old frame pointer here - they are saved in the DSIB. */
7712 for (i
= 9; i
< 15; i
++)
7713 if (imask
& (1UL << i
))
7715 emit_frame_store (i
, hard_frame_pointer_rtx
, 0, reg_offset
);
7718 for (i
= 2; i
< 10; i
++)
7719 if (fmask
& (1UL << i
))
7721 emit_frame_store (i
+32, hard_frame_pointer_rtx
, 0, reg_offset
);
7726 if (TARGET_ABI_OPEN_VMS
)
7728 if (alpha_procedure_type
== PT_REGISTER
)
7729 /* Register frame procedures save the fp.
7730 ?? Ought to have a dwarf2 save for this. */
7731 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7732 hard_frame_pointer_rtx
);
7734 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7735 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7736 gen_rtx_REG (DImode
, REG_PV
)));
7738 if (alpha_procedure_type
!= PT_NULL
7739 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7740 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7742 /* If we have to allocate space for outgoing args, do it now. */
7743 if (crtl
->outgoing_args_size
!= 0)
7746 = emit_move_insn (stack_pointer_rtx
,
7748 (hard_frame_pointer_rtx
,
7750 (crtl
->outgoing_args_size
))));
7752 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7753 if ! frame_pointer_needed. Setting the bit will change the CFA
7754 computation rule to use sp again, which would be wrong if we had
7755 frame_pointer_needed, as this means sp might move unpredictably
7759 frame_pointer_needed
7760 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7762 crtl->outgoing_args_size != 0
7763 => alpha_procedure_type != PT_NULL,
7765 so when we are not setting the bit here, we are guaranteed to
7766 have emitted an FRP frame pointer update just before. */
7767 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7770 else if (!TARGET_ABI_UNICOSMK
)
7772 /* If we need a frame pointer, set it from the stack pointer. */
7773 if (frame_pointer_needed
)
7775 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7776 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7778 /* This must always be the last instruction in the
7779 prologue, thus we emit a special move + clobber. */
7780 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7781 stack_pointer_rtx
, sa_reg
)));
7785 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7786 the prologue, for exception handling reasons, we cannot do this for
7787 any insn that might fault. We could prevent this for mems with a
7788 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7789 have to prevent all such scheduling with a blockage.
7791 Linux, on the other hand, never bothered to implement OSF/1's
7792 exception handling, and so doesn't care about such things. Anyone
7793 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7795 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7796 emit_insn (gen_blockage ());
7799 /* Count the number of .file directives, so that .loc is up to date. */
7800 int num_source_filenames
= 0;
7802 /* Output the textual info surrounding the prologue. */
7805 alpha_start_function (FILE *file
, const char *fnname
,
7806 tree decl ATTRIBUTE_UNUSED
)
7808 unsigned long imask
= 0;
7809 unsigned long fmask
= 0;
7810 /* Stack space needed for pushing registers clobbered by us. */
7811 HOST_WIDE_INT sa_size
;
7812 /* Complete stack size needed. */
7813 unsigned HOST_WIDE_INT frame_size
;
7814 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7815 unsigned HOST_WIDE_INT max_frame_size
= TARGET_ABI_OSF
&& !TARGET_GAS
7818 /* Offset from base reg to register save area. */
7819 HOST_WIDE_INT reg_offset
;
7820 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7823 /* Don't emit an extern directive for functions defined in the same file. */
7824 if (TARGET_ABI_UNICOSMK
)
7827 name_tree
= get_identifier (fnname
);
7828 TREE_ASM_WRITTEN (name_tree
) = 1;
7831 alpha_fnname
= fnname
;
7832 sa_size
= alpha_sa_size ();
7834 frame_size
= get_frame_size ();
7835 if (TARGET_ABI_OPEN_VMS
)
7836 frame_size
= ALPHA_ROUND (sa_size
7837 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7839 + crtl
->args
.pretend_args_size
);
7840 else if (TARGET_ABI_UNICOSMK
)
7841 frame_size
= ALPHA_ROUND (sa_size
7842 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7843 + ALPHA_ROUND (frame_size
7844 + crtl
->outgoing_args_size
);
7846 frame_size
= (ALPHA_ROUND (crtl
->outgoing_args_size
)
7848 + ALPHA_ROUND (frame_size
7849 + crtl
->args
.pretend_args_size
));
7851 if (TARGET_ABI_OPEN_VMS
)
7854 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
7856 alpha_sa_mask (&imask
, &fmask
);
7858 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7859 We have to do that before the .ent directive as we cannot switch
7860 files within procedures with native ecoff because line numbers are
7861 linked to procedure descriptors.
7862 Outputting the lineno helps debugging of one line functions as they
7863 would otherwise get no line number at all. Please note that we would
7864 like to put out last_linenum from final.c, but it is not accessible. */
7866 if (write_symbols
== SDB_DEBUG
)
7868 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7869 ASM_OUTPUT_SOURCE_FILENAME (file
,
7870 DECL_SOURCE_FILE (current_function_decl
));
7872 #ifdef SDB_OUTPUT_SOURCE_LINE
7873 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
7874 SDB_OUTPUT_SOURCE_LINE (file
,
7875 DECL_SOURCE_LINE (current_function_decl
));
7879 /* Issue function start and label. */
7880 if (TARGET_ABI_OPEN_VMS
7881 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
7883 fputs ("\t.ent ", file
);
7884 assemble_name (file
, fnname
);
7887 /* If the function needs GP, we'll write the "..ng" label there.
7888 Otherwise, do it here. */
7890 && ! alpha_function_needs_gp
7891 && ! cfun
->is_thunk
)
7894 assemble_name (file
, fnname
);
7895 fputs ("..ng:\n", file
);
7899 strcpy (entry_label
, fnname
);
7900 if (TARGET_ABI_OPEN_VMS
)
7901 strcat (entry_label
, "..en");
7903 /* For public functions, the label must be globalized by appending an
7904 additional colon. */
7905 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
7906 strcat (entry_label
, ":");
7908 ASM_OUTPUT_LABEL (file
, entry_label
);
7909 inside_function
= TRUE
;
7911 if (TARGET_ABI_OPEN_VMS
)
7912 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7914 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
7915 && !flag_inhibit_size_directive
)
7917 /* Set flags in procedure descriptor to request IEEE-conformant
7918 math-library routines. The value we set it to is PDSC_EXC_IEEE
7919 (/usr/include/pdsc.h). */
7920 fputs ("\t.eflag 48\n", file
);
7923 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7924 alpha_auto_offset
= -frame_size
+ crtl
->args
.pretend_args_size
;
7925 alpha_arg_offset
= -frame_size
+ 48;
7927 /* Describe our frame. If the frame size is larger than an integer,
7928 print it as zero to avoid an assembler error. We won't be
7929 properly describing such a frame, but that's the best we can do. */
7930 if (TARGET_ABI_UNICOSMK
)
7932 else if (TARGET_ABI_OPEN_VMS
)
7933 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
7934 HOST_WIDE_INT_PRINT_DEC
"\n",
7936 frame_size
>= (1UL << 31) ? 0 : frame_size
,
7938 else if (!flag_inhibit_size_directive
)
7939 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
7940 (frame_pointer_needed
7941 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
7942 frame_size
>= max_frame_size
? 0 : frame_size
,
7943 crtl
->args
.pretend_args_size
);
7945 /* Describe which registers were spilled. */
7946 if (TARGET_ABI_UNICOSMK
)
7948 else if (TARGET_ABI_OPEN_VMS
)
7951 /* ??? Does VMS care if mask contains ra? The old code didn't
7952 set it, so I don't here. */
7953 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
7955 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
7956 if (alpha_procedure_type
== PT_REGISTER
)
7957 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
7959 else if (!flag_inhibit_size_directive
)
7963 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
7964 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
7966 for (i
= 0; i
< 32; ++i
)
7967 if (imask
& (1UL << i
))
7972 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
7973 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
7976 #if TARGET_ABI_OPEN_VMS
7977 /* Ifdef'ed cause link_section are only available then. */
7978 switch_to_section (readonly_data_section
);
7979 fprintf (file
, "\t.align 3\n");
7980 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
7981 fputs ("\t.ascii \"", file
);
7982 assemble_name (file
, fnname
);
7983 fputs ("\\0\"\n", file
);
7984 alpha_need_linkage (fnname
, 1);
7985 switch_to_section (text_section
);
7989 /* Emit the .prologue note at the scheduled end of the prologue. */
7992 alpha_output_function_end_prologue (FILE *file
)
7994 if (TARGET_ABI_UNICOSMK
)
7996 else if (TARGET_ABI_OPEN_VMS
)
7997 fputs ("\t.prologue\n", file
);
7998 else if (TARGET_ABI_WINDOWS_NT
)
7999 fputs ("\t.prologue 0\n", file
);
8000 else if (!flag_inhibit_size_directive
)
8001 fprintf (file
, "\t.prologue %d\n",
8002 alpha_function_needs_gp
|| cfun
->is_thunk
);
8005 /* Write function epilogue. */
8007 /* ??? At some point we will want to support full unwind, and so will
8008 need to mark the epilogue as well. At the moment, we just confuse
8011 #define FRP(exp) exp
8014 alpha_expand_epilogue (void)
8016 /* Registers to save. */
8017 unsigned long imask
= 0;
8018 unsigned long fmask
= 0;
8019 /* Stack space needed for pushing registers clobbered by us. */
8020 HOST_WIDE_INT sa_size
;
8021 /* Complete stack size needed. */
8022 HOST_WIDE_INT frame_size
;
8023 /* Offset from base reg to register save area. */
8024 HOST_WIDE_INT reg_offset
;
8025 int fp_is_frame_pointer
, fp_offset
;
8026 rtx sa_reg
, sa_reg_exp
= NULL
;
8027 rtx sp_adj1
, sp_adj2
, mem
;
8031 sa_size
= alpha_sa_size ();
8033 frame_size
= get_frame_size ();
8034 if (TARGET_ABI_OPEN_VMS
)
8035 frame_size
= ALPHA_ROUND (sa_size
8036 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
8038 + crtl
->args
.pretend_args_size
);
8039 else if (TARGET_ABI_UNICOSMK
)
8040 frame_size
= ALPHA_ROUND (sa_size
8041 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
8042 + ALPHA_ROUND (frame_size
8043 + crtl
->outgoing_args_size
);
8045 frame_size
= (ALPHA_ROUND (crtl
->outgoing_args_size
)
8047 + ALPHA_ROUND (frame_size
8048 + crtl
->args
.pretend_args_size
));
8050 if (TARGET_ABI_OPEN_VMS
)
8052 if (alpha_procedure_type
== PT_STACK
)
8058 reg_offset
= ALPHA_ROUND (crtl
->outgoing_args_size
);
8060 alpha_sa_mask (&imask
, &fmask
);
8063 = ((TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
8064 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
8066 sa_reg
= stack_pointer_rtx
;
8068 if (crtl
->calls_eh_return
)
8069 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
8073 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
8075 /* If we have a frame pointer, restore SP from it. */
8076 if ((TARGET_ABI_OPEN_VMS
8077 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
8078 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
8079 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
8081 /* Cope with very large offsets to the register save area. */
8082 if (reg_offset
+ sa_size
> 0x8000)
8084 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
8087 if (low
+ sa_size
<= 0x8000)
8088 bias
= reg_offset
- low
, reg_offset
= low
;
8090 bias
= reg_offset
, reg_offset
= 0;
8092 sa_reg
= gen_rtx_REG (DImode
, 22);
8093 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
8095 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
8098 /* Restore registers in order, excepting a true frame pointer. */
8100 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
8102 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8103 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
8106 imask
&= ~(1UL << REG_RA
);
8108 for (i
= 0; i
< 31; ++i
)
8109 if (imask
& (1UL << i
))
8111 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
8112 fp_offset
= reg_offset
;
8115 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
8116 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8117 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
8122 for (i
= 0; i
< 31; ++i
)
8123 if (fmask
& (1UL << i
))
8125 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
8126 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8127 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
8131 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
8133 /* Restore callee-saved general-purpose registers. */
8137 for (i
= 9; i
< 15; i
++)
8138 if (imask
& (1UL << i
))
8140 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
8142 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8143 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
8147 for (i
= 2; i
< 10; i
++)
8148 if (fmask
& (1UL << i
))
8150 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
8152 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8153 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
8157 /* Restore the return address from the DSIB. */
8159 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
8160 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8161 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
8164 if (frame_size
|| eh_ofs
)
8166 sp_adj1
= stack_pointer_rtx
;
8170 sp_adj1
= gen_rtx_REG (DImode
, 23);
8171 emit_move_insn (sp_adj1
,
8172 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
8175 /* If the stack size is large, begin computation into a temporary
8176 register so as not to interfere with a potential fp restore,
8177 which must be consecutive with an SP restore. */
8178 if (frame_size
< 32768
8179 && ! (TARGET_ABI_UNICOSMK
&& cfun
->calls_alloca
))
8180 sp_adj2
= GEN_INT (frame_size
);
8181 else if (TARGET_ABI_UNICOSMK
)
8183 sp_adj1
= gen_rtx_REG (DImode
, 23);
8184 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
8185 sp_adj2
= const0_rtx
;
8187 else if (frame_size
< 0x40007fffL
)
8189 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
8191 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
8192 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
8196 sp_adj1
= gen_rtx_REG (DImode
, 23);
8197 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
8199 sp_adj2
= GEN_INT (low
);
8203 rtx tmp
= gen_rtx_REG (DImode
, 23);
8204 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
,
8208 /* We can't drop new things to memory this late, afaik,
8209 so build it up by pieces. */
8210 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
8211 -(frame_size
< 0)));
8212 gcc_assert (sp_adj2
);
8216 /* From now on, things must be in order. So emit blockages. */
8218 /* Restore the frame pointer. */
8219 if (TARGET_ABI_UNICOSMK
)
8221 emit_insn (gen_blockage ());
8222 mem
= gen_rtx_MEM (DImode
,
8223 plus_constant (hard_frame_pointer_rtx
, -16));
8224 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8225 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8227 else if (fp_is_frame_pointer
)
8229 emit_insn (gen_blockage ());
8230 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
8231 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8232 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8234 else if (TARGET_ABI_OPEN_VMS
)
8236 emit_insn (gen_blockage ());
8237 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8238 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8241 /* Restore the stack pointer. */
8242 emit_insn (gen_blockage ());
8243 if (sp_adj2
== const0_rtx
)
8244 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
8246 FRP (emit_move_insn (stack_pointer_rtx
,
8247 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
8251 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8253 emit_insn (gen_blockage ());
8254 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8255 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8257 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
!= PT_STACK
)
8259 /* Decrement the frame pointer if the function does not have a
8262 emit_insn (gen_blockage ());
8263 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8264 hard_frame_pointer_rtx
, constm1_rtx
)));
8269 /* Output the rest of the textual info surrounding the epilogue. */
8272 alpha_end_function (FILE *file
, const char *fnname
, tree decl ATTRIBUTE_UNUSED
)
8276 /* We output a nop after noreturn calls at the very end of the function to
8277 ensure that the return address always remains in the caller's code range,
8278 as not doing so might confuse unwinding engines. */
8279 insn
= get_last_insn ();
8281 insn
= prev_active_insn (insn
);
8283 output_asm_insn (get_insn_template (CODE_FOR_nop
, NULL
), NULL
);
8287 free_after_compilation (cfun
);
8290 #if TARGET_ABI_OPEN_VMS
8291 alpha_write_linkage (file
, fnname
, decl
);
8294 /* End the function. */
8295 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
8297 fputs ("\t.end ", file
);
8298 assemble_name (file
, fnname
);
8301 inside_function
= FALSE
;
8303 /* Output jump tables and the static subroutine information block. */
8304 if (TARGET_ABI_UNICOSMK
)
8306 unicosmk_output_ssib (file
, fnname
);
8307 unicosmk_output_deferred_case_vectors (file
);
8312 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8314 In order to avoid the hordes of differences between generated code
8315 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8316 lots of code loading up large constants, generate rtl and emit it
8317 instead of going straight to text.
8319 Not sure why this idea hasn't been explored before... */
8322 alpha_output_mi_thunk_osf (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8323 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8326 HOST_WIDE_INT hi
, lo
;
8327 rtx this_rtx
, insn
, funexp
;
8329 gcc_assert (cfun
->is_thunk
);
8331 /* We always require a valid GP. */
8332 emit_insn (gen_prologue_ldgp ());
8333 emit_note (NOTE_INSN_PROLOGUE_END
);
8335 /* Find the "this" pointer. If the function returns a structure,
8336 the structure return pointer is in $16. */
8337 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8338 this_rtx
= gen_rtx_REG (Pmode
, 17);
8340 this_rtx
= gen_rtx_REG (Pmode
, 16);
8342 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8343 entire constant for the add. */
8344 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8345 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8346 if (hi
+ lo
== delta
)
8349 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (hi
)));
8351 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, GEN_INT (lo
)));
8355 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0),
8356 delta
, -(delta
< 0));
8357 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8360 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8365 tmp
= gen_rtx_REG (Pmode
, 0);
8366 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
8368 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8369 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8370 if (hi
+ lo
== vcall_offset
)
8373 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8377 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8378 vcall_offset
, -(vcall_offset
< 0));
8379 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8383 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8386 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8388 emit_insn (gen_adddi3 (this_rtx
, this_rtx
, tmp
));
8391 /* Generate a tail call to the target function. */
8392 if (! TREE_USED (function
))
8394 assemble_external (function
);
8395 TREE_USED (function
) = 1;
8397 funexp
= XEXP (DECL_RTL (function
), 0);
8398 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8399 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8400 SIBLING_CALL_P (insn
) = 1;
8402 /* Run just enough of rest_of_compilation to get the insns emitted.
8403 There's not really enough bulk here to make other passes such as
8404 instruction scheduling worth while. Note that use_thunk calls
8405 assemble_start_function and assemble_end_function. */
8406 insn
= get_insns ();
8407 insn_locators_alloc ();
8408 shorten_branches (insn
);
8409 final_start_function (insn
, file
, 1);
8410 final (insn
, file
, 1);
8411 final_end_function ();
8413 #endif /* TARGET_ABI_OSF */
8415 /* Debugging support. */
8419 /* Count the number of sdb related labels are generated (to find block
8420 start and end boundaries). */
8422 int sdb_label_count
= 0;
8424 /* Name of the file containing the current function. */
8426 static const char *current_function_file
= "";
8428 /* Offsets to alpha virtual arg/local debugging pointers. */
8430 long alpha_arg_offset
;
8431 long alpha_auto_offset
;
8433 /* Emit a new filename to a stream. */
8436 alpha_output_filename (FILE *stream
, const char *name
)
8438 static int first_time
= TRUE
;
8443 ++num_source_filenames
;
8444 current_function_file
= name
;
8445 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8446 output_quoted_string (stream
, name
);
8447 fprintf (stream
, "\n");
8448 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
8449 fprintf (stream
, "\t#@stabs\n");
8452 else if (write_symbols
== DBX_DEBUG
)
8453 /* dbxout.c will emit an appropriate .stabs directive. */
8456 else if (name
!= current_function_file
8457 && strcmp (name
, current_function_file
) != 0)
8459 if (inside_function
&& ! TARGET_GAS
)
8460 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
8463 ++num_source_filenames
;
8464 current_function_file
= name
;
8465 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8468 output_quoted_string (stream
, name
);
8469 fprintf (stream
, "\n");
8473 /* Structure to show the current status of registers and memory. */
8475 struct shadow_summary
8478 unsigned int i
: 31; /* Mask of int regs */
8479 unsigned int fp
: 31; /* Mask of fp regs */
8480 unsigned int mem
: 1; /* mem == imem | fpmem */
8484 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8485 to the summary structure. SET is nonzero if the insn is setting the
8486 object, otherwise zero. */
8489 summarize_insn (rtx x
, struct shadow_summary
*sum
, int set
)
8491 const char *format_ptr
;
8497 switch (GET_CODE (x
))
8499 /* ??? Note that this case would be incorrect if the Alpha had a
8500 ZERO_EXTRACT in SET_DEST. */
8502 summarize_insn (SET_SRC (x
), sum
, 0);
8503 summarize_insn (SET_DEST (x
), sum
, 1);
8507 summarize_insn (XEXP (x
, 0), sum
, 1);
8511 summarize_insn (XEXP (x
, 0), sum
, 0);
8515 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8516 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8520 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8521 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8525 summarize_insn (SUBREG_REG (x
), sum
, 0);
8530 int regno
= REGNO (x
);
8531 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8533 if (regno
== 31 || regno
== 63)
8539 sum
->defd
.i
|= mask
;
8541 sum
->defd
.fp
|= mask
;
8546 sum
->used
.i
|= mask
;
8548 sum
->used
.fp
|= mask
;
8559 /* Find the regs used in memory address computation: */
8560 summarize_insn (XEXP (x
, 0), sum
, 0);
8563 case CONST_INT
: case CONST_DOUBLE
:
8564 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8565 case SCRATCH
: case ASM_INPUT
:
8568 /* Handle common unary and binary ops for efficiency. */
8569 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8570 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8571 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8572 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8573 case NE
: case EQ
: case GE
: case GT
: case LE
:
8574 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8575 summarize_insn (XEXP (x
, 0), sum
, 0);
8576 summarize_insn (XEXP (x
, 1), sum
, 0);
8579 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8580 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8581 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8582 case SQRT
: case FFS
:
8583 summarize_insn (XEXP (x
, 0), sum
, 0);
8587 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8588 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8589 switch (format_ptr
[i
])
8592 summarize_insn (XEXP (x
, i
), sum
, 0);
8596 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8597 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8609 /* Ensure a sufficient number of `trapb' insns are in the code when
8610 the user requests code with a trap precision of functions or
8613 In naive mode, when the user requests a trap-precision of
8614 "instruction", a trapb is needed after every instruction that may
8615 generate a trap. This ensures that the code is resumption safe but
8618 When optimizations are turned on, we delay issuing a trapb as long
8619 as possible. In this context, a trap shadow is the sequence of
8620 instructions that starts with a (potentially) trap generating
8621 instruction and extends to the next trapb or call_pal instruction
8622 (but GCC never generates call_pal by itself). We can delay (and
8623 therefore sometimes omit) a trapb subject to the following
8626 (a) On entry to the trap shadow, if any Alpha register or memory
8627 location contains a value that is used as an operand value by some
8628 instruction in the trap shadow (live on entry), then no instruction
8629 in the trap shadow may modify the register or memory location.
8631 (b) Within the trap shadow, the computation of the base register
8632 for a memory load or store instruction may not involve using the
8633 result of an instruction that might generate an UNPREDICTABLE
8636 (c) Within the trap shadow, no register may be used more than once
8637 as a destination register. (This is to make life easier for the
8640 (d) The trap shadow may not include any branch instructions. */
8643 alpha_handle_trap_shadows (void)
8645 struct shadow_summary shadow
;
8646 int trap_pending
, exception_nesting
;
8650 exception_nesting
= 0;
8653 shadow
.used
.mem
= 0;
8654 shadow
.defd
= shadow
.used
;
8656 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8660 switch (NOTE_KIND (i
))
8662 case NOTE_INSN_EH_REGION_BEG
:
8663 exception_nesting
++;
8668 case NOTE_INSN_EH_REGION_END
:
8669 exception_nesting
--;
8674 case NOTE_INSN_EPILOGUE_BEG
:
8675 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8680 else if (trap_pending
)
8682 if (alpha_tp
== ALPHA_TP_FUNC
)
8685 && GET_CODE (PATTERN (i
)) == RETURN
)
8688 else if (alpha_tp
== ALPHA_TP_INSN
)
8692 struct shadow_summary sum
;
8697 sum
.defd
= sum
.used
;
8699 switch (GET_CODE (i
))
8702 /* Annoyingly, get_attr_trap will die on these. */
8703 if (GET_CODE (PATTERN (i
)) == USE
8704 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8707 summarize_insn (PATTERN (i
), &sum
, 0);
8709 if ((sum
.defd
.i
& shadow
.defd
.i
)
8710 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8712 /* (c) would be violated */
8716 /* Combine shadow with summary of current insn: */
8717 shadow
.used
.i
|= sum
.used
.i
;
8718 shadow
.used
.fp
|= sum
.used
.fp
;
8719 shadow
.used
.mem
|= sum
.used
.mem
;
8720 shadow
.defd
.i
|= sum
.defd
.i
;
8721 shadow
.defd
.fp
|= sum
.defd
.fp
;
8722 shadow
.defd
.mem
|= sum
.defd
.mem
;
8724 if ((sum
.defd
.i
& shadow
.used
.i
)
8725 || (sum
.defd
.fp
& shadow
.used
.fp
)
8726 || (sum
.defd
.mem
& shadow
.used
.mem
))
8728 /* (a) would be violated (also takes care of (b)) */
8729 gcc_assert (get_attr_trap (i
) != TRAP_YES
8730 || (!(sum
.defd
.i
& sum
.used
.i
)
8731 && !(sum
.defd
.fp
& sum
.used
.fp
)));
8749 n
= emit_insn_before (gen_trapb (), i
);
8750 PUT_MODE (n
, TImode
);
8751 PUT_MODE (i
, TImode
);
8755 shadow
.used
.mem
= 0;
8756 shadow
.defd
= shadow
.used
;
8761 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8762 && NONJUMP_INSN_P (i
)
8763 && GET_CODE (PATTERN (i
)) != USE
8764 && GET_CODE (PATTERN (i
)) != CLOBBER
8765 && get_attr_trap (i
) == TRAP_YES
)
8767 if (optimize
&& !trap_pending
)
8768 summarize_insn (PATTERN (i
), &shadow
, 0);
8774 /* Alpha can only issue instruction groups simultaneously if they are
8775 suitably aligned. This is very processor-specific. */
8776 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8777 that are marked "fake". These instructions do not exist on that target,
8778 but it is possible to see these insns with deranged combinations of
8779 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8780 choose a result at random. */
8782 enum alphaev4_pipe
{
8789 enum alphaev5_pipe
{
8800 static enum alphaev4_pipe
8801 alphaev4_insn_pipe (rtx insn
)
8803 if (recog_memoized (insn
) < 0)
8805 if (get_attr_length (insn
) != 4)
8808 switch (get_attr_type (insn
))
8824 case TYPE_MVI
: /* fake */
8839 case TYPE_FSQRT
: /* fake */
8840 case TYPE_FTOI
: /* fake */
8841 case TYPE_ITOF
: /* fake */
8849 static enum alphaev5_pipe
8850 alphaev5_insn_pipe (rtx insn
)
8852 if (recog_memoized (insn
) < 0)
8854 if (get_attr_length (insn
) != 4)
8857 switch (get_attr_type (insn
))
8877 case TYPE_FTOI
: /* fake */
8878 case TYPE_ITOF
: /* fake */
8893 case TYPE_FSQRT
: /* fake */
8904 /* IN_USE is a mask of the slots currently filled within the insn group.
8905 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8906 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8908 LEN is, of course, the length of the group in bytes. */
8911 alphaev4_next_group (rtx insn
, int *pin_use
, int *plen
)
8918 || GET_CODE (PATTERN (insn
)) == CLOBBER
8919 || GET_CODE (PATTERN (insn
)) == USE
)
8924 enum alphaev4_pipe pipe
;
8926 pipe
= alphaev4_insn_pipe (insn
);
8930 /* Force complex instructions to start new groups. */
8934 /* If this is a completely unrecognized insn, it's an asm.
8935 We don't know how long it is, so record length as -1 to
8936 signal a needed realignment. */
8937 if (recog_memoized (insn
) < 0)
8940 len
= get_attr_length (insn
);
8944 if (in_use
& EV4_IB0
)
8946 if (in_use
& EV4_IB1
)
8951 in_use
|= EV4_IB0
| EV4_IBX
;
8955 if (in_use
& EV4_IB0
)
8957 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8965 if (in_use
& EV4_IB1
)
8975 /* Haifa doesn't do well scheduling branches. */
8980 insn
= next_nonnote_insn (insn
);
8982 if (!insn
|| ! INSN_P (insn
))
8985 /* Let Haifa tell us where it thinks insn group boundaries are. */
8986 if (GET_MODE (insn
) == TImode
)
8989 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8994 insn
= next_nonnote_insn (insn
);
9002 /* IN_USE is a mask of the slots currently filled within the insn group.
9003 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9004 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9006 LEN is, of course, the length of the group in bytes. */
9009 alphaev5_next_group (rtx insn
, int *pin_use
, int *plen
)
9016 || GET_CODE (PATTERN (insn
)) == CLOBBER
9017 || GET_CODE (PATTERN (insn
)) == USE
)
9022 enum alphaev5_pipe pipe
;
9024 pipe
= alphaev5_insn_pipe (insn
);
9028 /* Force complex instructions to start new groups. */
9032 /* If this is a completely unrecognized insn, it's an asm.
9033 We don't know how long it is, so record length as -1 to
9034 signal a needed realignment. */
9035 if (recog_memoized (insn
) < 0)
9038 len
= get_attr_length (insn
);
9041 /* ??? Most of the places below, we would like to assert never
9042 happen, as it would indicate an error either in Haifa, or
9043 in the scheduling description. Unfortunately, Haifa never
9044 schedules the last instruction of the BB, so we don't have
9045 an accurate TI bit to go off. */
9047 if (in_use
& EV5_E0
)
9049 if (in_use
& EV5_E1
)
9054 in_use
|= EV5_E0
| EV5_E01
;
9058 if (in_use
& EV5_E0
)
9060 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
9068 if (in_use
& EV5_E1
)
9074 if (in_use
& EV5_FA
)
9076 if (in_use
& EV5_FM
)
9081 in_use
|= EV5_FA
| EV5_FAM
;
9085 if (in_use
& EV5_FA
)
9091 if (in_use
& EV5_FM
)
9104 /* Haifa doesn't do well scheduling branches. */
9105 /* ??? If this is predicted not-taken, slotting continues, except
9106 that no more IBR, FBR, or JSR insns may be slotted. */
9111 insn
= next_nonnote_insn (insn
);
9113 if (!insn
|| ! INSN_P (insn
))
9116 /* Let Haifa tell us where it thinks insn group boundaries are. */
9117 if (GET_MODE (insn
) == TImode
)
9120 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9125 insn
= next_nonnote_insn (insn
);
9134 alphaev4_next_nop (int *pin_use
)
9136 int in_use
= *pin_use
;
9139 if (!(in_use
& EV4_IB0
))
9144 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
9149 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
9162 alphaev5_next_nop (int *pin_use
)
9164 int in_use
= *pin_use
;
9167 if (!(in_use
& EV5_E1
))
9172 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
9177 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
9189 /* The instruction group alignment main loop. */
9192 alpha_align_insns (unsigned int max_align
,
9193 rtx (*next_group
) (rtx
, int *, int *),
9194 rtx (*next_nop
) (int *))
9196 /* ALIGN is the known alignment for the insn group. */
9198 /* OFS is the offset of the current insn in the insn group. */
9200 int prev_in_use
, in_use
, len
, ldgp
;
9203 /* Let shorten branches care for assigning alignments to code labels. */
9204 shorten_branches (get_insns ());
9206 if (align_functions
< 4)
9208 else if ((unsigned int) align_functions
< max_align
)
9209 align
= align_functions
;
9213 ofs
= prev_in_use
= 0;
9216 i
= next_nonnote_insn (i
);
9218 ldgp
= alpha_function_needs_gp
? 8 : 0;
9222 next
= (*next_group
) (i
, &in_use
, &len
);
9224 /* When we see a label, resync alignment etc. */
9227 unsigned int new_align
= 1 << label_to_alignment (i
);
9229 if (new_align
>= align
)
9231 align
= new_align
< max_align
? new_align
: max_align
;
9235 else if (ofs
& (new_align
-1))
9236 ofs
= (ofs
| (new_align
-1)) + 1;
9240 /* Handle complex instructions special. */
9241 else if (in_use
== 0)
9243 /* Asms will have length < 0. This is a signal that we have
9244 lost alignment knowledge. Assume, however, that the asm
9245 will not mis-align instructions. */
9254 /* If the known alignment is smaller than the recognized insn group,
9255 realign the output. */
9256 else if ((int) align
< len
)
9258 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9261 where
= prev
= prev_nonnote_insn (i
);
9262 if (!where
|| !LABEL_P (where
))
9265 /* Can't realign between a call and its gp reload. */
9266 if (! (TARGET_EXPLICIT_RELOCS
9267 && prev
&& CALL_P (prev
)))
9269 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9270 align
= 1 << new_log_align
;
9275 /* We may not insert padding inside the initial ldgp sequence. */
9279 /* If the group won't fit in the same INT16 as the previous,
9280 we need to add padding to keep the group together. Rather
9281 than simply leaving the insn filling to the assembler, we
9282 can make use of the knowledge of what sorts of instructions
9283 were issued in the previous group to make sure that all of
9284 the added nops are really free. */
9285 else if (ofs
+ len
> (int) align
)
9287 int nop_count
= (align
- ofs
) / 4;
9290 /* Insert nops before labels, branches, and calls to truly merge
9291 the execution of the nops with the previous instruction group. */
9292 where
= prev_nonnote_insn (i
);
9295 if (LABEL_P (where
))
9297 rtx where2
= prev_nonnote_insn (where
);
9298 if (where2
&& JUMP_P (where2
))
9301 else if (NONJUMP_INSN_P (where
))
9308 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9309 while (--nop_count
);
9313 ofs
= (ofs
+ len
) & (align
- 1);
9314 prev_in_use
= in_use
;
9319 /* Insert an unop between a noreturn function call and GP load. */
9322 alpha_pad_noreturn (void)
9326 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
9329 || !find_reg_note (insn
, REG_NORETURN
, NULL_RTX
))
9332 next
= next_active_insn (insn
);
9336 rtx pat
= PATTERN (next
);
9338 if (GET_CODE (pat
) == SET
9339 && GET_CODE (SET_SRC (pat
)) == UNSPEC_VOLATILE
9340 && XINT (SET_SRC (pat
), 1) == UNSPECV_LDGP1
)
9341 emit_insn_after (gen_unop (), insn
);
9346 /* Machine dependent reorg pass. */
9351 /* Workaround for a linker error that triggers when an
9352 exception handler immediatelly follows a noreturn function.
9354 The instruction stream from an object file:
9356 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9357 58: 00 00 ba 27 ldah gp,0(ra)
9358 5c: 00 00 bd 23 lda gp,0(gp)
9359 60: 00 00 7d a7 ldq t12,0(gp)
9360 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9362 was converted in the final link pass to:
9364 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9365 fdb28: 00 00 fe 2f unop
9366 fdb2c: 00 00 fe 2f unop
9367 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9368 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9370 GP load instructions were wrongly cleared by the linker relaxation
9371 pass. This workaround prevents removal of GP loads by inserting
9372 an unop instruction between a noreturn function call and
9373 exception handler prologue. */
9375 if (current_function_has_exception_handlers ())
9376 alpha_pad_noreturn ();
9378 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
9379 alpha_handle_trap_shadows ();
9381 /* Due to the number of extra trapb insns, don't bother fixing up
9382 alignment when trap precision is instruction. Moreover, we can
9383 only do our job when sched2 is run. */
9384 if (optimize
&& !optimize_size
9385 && alpha_tp
!= ALPHA_TP_INSN
9386 && flag_schedule_insns_after_reload
)
9388 if (alpha_tune
== PROCESSOR_EV4
)
9389 alpha_align_insns (8, alphaev4_next_group
, alphaev4_next_nop
);
9390 else if (alpha_tune
== PROCESSOR_EV5
)
9391 alpha_align_insns (16, alphaev5_next_group
, alphaev5_next_nop
);
9395 #if !TARGET_ABI_UNICOSMK
9402 alpha_file_start (void)
9404 #ifdef OBJECT_FORMAT_ELF
9405 /* If emitting dwarf2 debug information, we cannot generate a .file
9406 directive to start the file, as it will conflict with dwarf2out
9407 file numbers. So it's only useful when emitting mdebug output. */
9408 targetm
.file_start_file_directive
= (write_symbols
== DBX_DEBUG
);
9411 default_file_start ();
9413 fprintf (asm_out_file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
9416 fputs ("\t.set noreorder\n", asm_out_file
);
9417 fputs ("\t.set volatile\n", asm_out_file
);
9418 if (!TARGET_ABI_OPEN_VMS
)
9419 fputs ("\t.set noat\n", asm_out_file
);
9420 if (TARGET_EXPLICIT_RELOCS
)
9421 fputs ("\t.set nomacro\n", asm_out_file
);
9422 if (TARGET_SUPPORT_ARCH
| TARGET_BWX
| TARGET_MAX
| TARGET_FIX
| TARGET_CIX
)
9426 if (alpha_cpu
== PROCESSOR_EV6
|| TARGET_FIX
|| TARGET_CIX
)
9428 else if (TARGET_MAX
)
9430 else if (TARGET_BWX
)
9432 else if (alpha_cpu
== PROCESSOR_EV5
)
9437 fprintf (asm_out_file
, "\t.arch %s\n", arch
);
9442 #ifdef OBJECT_FORMAT_ELF
9443 /* Since we don't have a .dynbss section, we should not allow global
9444 relocations in the .rodata section. */
9447 alpha_elf_reloc_rw_mask (void)
9449 return flag_pic
? 3 : 2;
9452 /* Return a section for X. The only special thing we do here is to
9453 honor small data. */
9456 alpha_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
9457 unsigned HOST_WIDE_INT align
)
9459 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9460 /* ??? Consider using mergeable sdata sections. */
9461 return sdata_section
;
9463 return default_elf_select_rtx_section (mode
, x
, align
);
9467 alpha_elf_section_type_flags (tree decl
, const char *name
, int reloc
)
9469 unsigned int flags
= 0;
9471 if (strcmp (name
, ".sdata") == 0
9472 || strncmp (name
, ".sdata.", 7) == 0
9473 || strncmp (name
, ".gnu.linkonce.s.", 16) == 0
9474 || strcmp (name
, ".sbss") == 0
9475 || strncmp (name
, ".sbss.", 6) == 0
9476 || strncmp (name
, ".gnu.linkonce.sb.", 17) == 0)
9477 flags
= SECTION_SMALL
;
9479 flags
|= default_section_type_flags (decl
, name
, reloc
);
9482 #endif /* OBJECT_FORMAT_ELF */
9484 /* Structure to collect function names for final output in link section. */
9485 /* Note that items marked with GTY can't be ifdef'ed out. */
9487 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
9488 enum reloc_kind
{KIND_LINKAGE
, KIND_CODEADDR
};
9490 struct GTY(()) alpha_links
9494 enum links_kind lkind
;
9495 enum reloc_kind rkind
;
9498 struct GTY(()) alpha_funcs
9501 splay_tree
GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9505 static GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9506 splay_tree alpha_links_tree
;
9507 static GTY ((param1_is (tree
), param2_is (struct alpha_funcs
*)))
9508 splay_tree alpha_funcs_tree
;
9510 static GTY(()) int alpha_funcs_num
;
9512 #if TARGET_ABI_OPEN_VMS
9514 /* Return the VMS argument type corresponding to MODE. */
9517 alpha_arg_type (enum machine_mode mode
)
9522 return TARGET_FLOAT_VAX
? FF
: FS
;
9524 return TARGET_FLOAT_VAX
? FD
: FT
;
9530 /* Return an rtx for an integer representing the VMS Argument Information
9534 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum
)
9536 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9539 for (i
= 0; i
< 6; i
++)
9540 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9542 return GEN_INT (regval
);
9545 /* Make (or fake) .linkage entry for function call.
9547 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9549 Return an SYMBOL_REF rtx for the linkage. */
9552 alpha_need_linkage (const char *name
, int is_local
)
9554 splay_tree_node node
;
9555 struct alpha_links
*al
;
9562 struct alpha_funcs
*cfaf
;
9564 if (!alpha_funcs_tree
)
9565 alpha_funcs_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
)
9566 splay_tree_compare_pointers
);
9568 cfaf
= (struct alpha_funcs
*) ggc_alloc (sizeof (struct alpha_funcs
));
9571 cfaf
->num
= ++alpha_funcs_num
;
9573 splay_tree_insert (alpha_funcs_tree
,
9574 (splay_tree_key
) current_function_decl
,
9575 (splay_tree_value
) cfaf
);
9578 if (alpha_links_tree
)
9580 /* Is this name already defined? */
9582 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9585 al
= (struct alpha_links
*) node
->value
;
9588 /* Defined here but external assumed. */
9589 if (al
->lkind
== KIND_EXTERN
)
9590 al
->lkind
= KIND_LOCAL
;
9594 /* Used here but unused assumed. */
9595 if (al
->lkind
== KIND_UNUSED
)
9596 al
->lkind
= KIND_LOCAL
;
9602 alpha_links_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9604 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9605 name
= ggc_strdup (name
);
9607 /* Assume external if no definition. */
9608 al
->lkind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
9610 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9611 get_identifier (name
);
9613 /* Construct a SYMBOL_REF for us to call. */
9615 size_t name_len
= strlen (name
);
9616 char *linksym
= XALLOCAVEC (char, name_len
+ 6);
9618 memcpy (linksym
+ 1, name
, name_len
);
9619 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
9620 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
9621 ggc_alloc_string (linksym
, name_len
+ 5));
9624 splay_tree_insert (alpha_links_tree
, (splay_tree_key
) name
,
9625 (splay_tree_value
) al
);
9631 alpha_use_linkage (rtx linkage
, tree cfundecl
, int lflag
, int rflag
)
9633 splay_tree_node cfunnode
;
9634 struct alpha_funcs
*cfaf
;
9635 struct alpha_links
*al
;
9636 const char *name
= XSTR (linkage
, 0);
9638 cfaf
= (struct alpha_funcs
*) 0;
9639 al
= (struct alpha_links
*) 0;
9641 cfunnode
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) cfundecl
);
9642 cfaf
= (struct alpha_funcs
*) cfunnode
->value
;
9646 splay_tree_node lnode
;
9648 /* Is this name already defined? */
9650 lnode
= splay_tree_lookup (cfaf
->links
, (splay_tree_key
) name
);
9652 al
= (struct alpha_links
*) lnode
->value
;
9655 cfaf
->links
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9663 splay_tree_node node
= 0;
9664 struct alpha_links
*anl
;
9669 name_len
= strlen (name
);
9671 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9672 al
->num
= cfaf
->num
;
9674 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9677 anl
= (struct alpha_links
*) node
->value
;
9678 al
->lkind
= anl
->lkind
;
9681 sprintf (buf
, "$%d..%s..lk", cfaf
->num
, name
);
9682 buflen
= strlen (buf
);
9683 linksym
= XALLOCAVEC (char, buflen
+ 1);
9684 memcpy (linksym
, buf
, buflen
+ 1);
9686 al
->linkage
= gen_rtx_SYMBOL_REF
9687 (Pmode
, ggc_alloc_string (linksym
, buflen
+ 1));
9689 splay_tree_insert (cfaf
->links
, (splay_tree_key
) name
,
9690 (splay_tree_value
) al
);
9694 al
->rkind
= KIND_CODEADDR
;
9696 al
->rkind
= KIND_LINKAGE
;
9699 return gen_rtx_MEM (Pmode
, plus_constant (al
->linkage
, 8));
9705 alpha_write_one_linkage (splay_tree_node node
, void *data
)
9707 const char *const name
= (const char *) node
->key
;
9708 struct alpha_links
*link
= (struct alpha_links
*) node
->value
;
9709 FILE *stream
= (FILE *) data
;
9711 fprintf (stream
, "$%d..%s..lk:\n", link
->num
, name
);
9712 if (link
->rkind
== KIND_CODEADDR
)
9714 if (link
->lkind
== KIND_LOCAL
)
9716 /* Local and used */
9717 fprintf (stream
, "\t.quad %s..en\n", name
);
9721 /* External and used, request code address. */
9722 fprintf (stream
, "\t.code_address %s\n", name
);
9727 if (link
->lkind
== KIND_LOCAL
)
9729 /* Local and used, build linkage pair. */
9730 fprintf (stream
, "\t.quad %s..en\n", name
);
9731 fprintf (stream
, "\t.quad %s\n", name
);
9735 /* External and used, request linkage pair. */
9736 fprintf (stream
, "\t.linkage %s\n", name
);
9744 alpha_write_linkage (FILE *stream
, const char *funname
, tree fundecl
)
9746 splay_tree_node node
;
9747 struct alpha_funcs
*func
;
9749 fprintf (stream
, "\t.link\n");
9750 fprintf (stream
, "\t.align 3\n");
9753 node
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) fundecl
);
9754 func
= (struct alpha_funcs
*) node
->value
;
9756 fputs ("\t.name ", stream
);
9757 assemble_name (stream
, funname
);
9758 fputs ("..na\n", stream
);
9759 ASM_OUTPUT_LABEL (stream
, funname
);
9760 fprintf (stream
, "\t.pdesc ");
9761 assemble_name (stream
, funname
);
9762 fprintf (stream
, "..en,%s\n",
9763 alpha_procedure_type
== PT_STACK
? "stack"
9764 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9768 splay_tree_foreach (func
->links
, alpha_write_one_linkage
, stream
);
9769 /* splay_tree_delete (func->links); */
9773 /* Given a decl, a section name, and whether the decl initializer
9774 has relocs, choose attributes for the section. */
9776 #define SECTION_VMS_OVERLAY SECTION_FORGET
9777 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9778 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9781 vms_section_type_flags (tree decl
, const char *name
, int reloc
)
9783 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9785 if (decl
&& DECL_ATTRIBUTES (decl
)
9786 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
9787 flags
|= SECTION_VMS_OVERLAY
;
9788 if (decl
&& DECL_ATTRIBUTES (decl
)
9789 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
9790 flags
|= SECTION_VMS_GLOBAL
;
9791 if (decl
&& DECL_ATTRIBUTES (decl
)
9792 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
9793 flags
|= SECTION_VMS_INITIALIZE
;
9798 /* Switch to an arbitrary section NAME with attributes as specified
9799 by FLAGS. ALIGN specifies any known alignment requirements for
9800 the section; 0 if the default should be used. */
9803 vms_asm_named_section (const char *name
, unsigned int flags
,
9804 tree decl ATTRIBUTE_UNUSED
)
9806 fputc ('\n', asm_out_file
);
9807 fprintf (asm_out_file
, ".section\t%s", name
);
9809 if (flags
& SECTION_VMS_OVERLAY
)
9810 fprintf (asm_out_file
, ",OVR");
9811 if (flags
& SECTION_VMS_GLOBAL
)
9812 fprintf (asm_out_file
, ",GBL");
9813 if (flags
& SECTION_VMS_INITIALIZE
)
9814 fprintf (asm_out_file
, ",NOMOD");
9815 if (flags
& SECTION_DEBUG
)
9816 fprintf (asm_out_file
, ",NOWRT");
9818 fputc ('\n', asm_out_file
);
9821 /* Record an element in the table of global constructors. SYMBOL is
9822 a SYMBOL_REF of the function to be called; PRIORITY is a number
9823 between 0 and MAX_INIT_PRIORITY.
9825 Differs from default_ctors_section_asm_out_constructor in that the
9826 width of the .ctors entry is always 64 bits, rather than the 32 bits
9827 used by a normal pointer. */
9830 vms_asm_out_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9832 switch_to_section (ctors_section
);
9833 assemble_align (BITS_PER_WORD
);
9834 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9838 vms_asm_out_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9840 switch_to_section (dtors_section
);
9841 assemble_align (BITS_PER_WORD
);
9842 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9847 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED
,
9848 int is_local ATTRIBUTE_UNUSED
)
9854 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED
,
9855 tree cfundecl ATTRIBUTE_UNUSED
,
9856 int lflag ATTRIBUTE_UNUSED
,
9857 int rflag ATTRIBUTE_UNUSED
)
9862 #endif /* TARGET_ABI_OPEN_VMS */
9864 #if TARGET_ABI_UNICOSMK
9866 /* This evaluates to true if we do not know how to pass TYPE solely in
9867 registers. This is the case for all arguments that do not fit in two
9871 unicosmk_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
9876 if (TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
9878 if (TREE_ADDRESSABLE (type
))
9881 return ALPHA_ARG_SIZE (mode
, type
, 0) > 2;
9884 /* Define the offset between two registers, one to be eliminated, and the
9885 other its replacement, at the start of a routine. */
9888 unicosmk_initial_elimination_offset (int from
, int to
)
9892 fixed_size
= alpha_sa_size();
9893 if (fixed_size
!= 0)
9896 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9898 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9900 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9901 return (ALPHA_ROUND (crtl
->outgoing_args_size
)
9902 + ALPHA_ROUND (get_frame_size()));
9903 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9904 return (ALPHA_ROUND (fixed_size
)
9905 + ALPHA_ROUND (get_frame_size()
9906 + crtl
->outgoing_args_size
));
9911 /* Output the module name for .ident and .end directives. We have to strip
9912 directories and add make sure that the module name starts with a letter
9916 unicosmk_output_module_name (FILE *file
)
9918 const char *name
= lbasename (main_input_filename
);
9919 unsigned len
= strlen (name
);
9920 char *clean_name
= alloca (len
+ 2);
9921 char *ptr
= clean_name
;
9923 /* CAM only accepts module names that start with a letter or '$'. We
9924 prefix the module name with a '$' if necessary. */
9926 if (!ISALPHA (*name
))
9928 memcpy (ptr
, name
, len
+ 1);
9929 clean_symbol_name (clean_name
);
9930 fputs (clean_name
, file
);
9933 /* Output the definition of a common variable. */
9936 unicosmk_output_common (FILE *file
, const char *name
, int size
, int align
)
9939 printf ("T3E__: common %s\n", name
);
9942 fputs("\t.endp\n\n\t.psect ", file
);
9943 assemble_name(file
, name
);
9944 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
9945 fprintf(file
, "\t.byte\t0:%d\n", size
);
9947 /* Mark the symbol as defined in this module. */
9948 name_tree
= get_identifier (name
);
9949 TREE_ASM_WRITTEN (name_tree
) = 1;
9952 #define SECTION_PUBLIC SECTION_MACH_DEP
9953 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9954 static int current_section_align
;
9956 /* A get_unnamed_section callback for switching to the text section. */
9959 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9961 static int count
= 0;
9962 fprintf (asm_out_file
, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count
++);
9965 /* A get_unnamed_section callback for switching to the data section. */
9968 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9970 static int count
= 1;
9971 fprintf (asm_out_file
, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count
++);
9974 /* Implement TARGET_ASM_INIT_SECTIONS.
9976 The Cray assembler is really weird with respect to sections. It has only
9977 named sections and you can't reopen a section once it has been closed.
9978 This means that we have to generate unique names whenever we want to
9979 reenter the text or the data section. */
9982 unicosmk_init_sections (void)
9984 text_section
= get_unnamed_section (SECTION_CODE
,
9985 unicosmk_output_text_section_asm_op
,
9987 data_section
= get_unnamed_section (SECTION_WRITE
,
9988 unicosmk_output_data_section_asm_op
,
9990 readonly_data_section
= data_section
;
9994 unicosmk_section_type_flags (tree decl
, const char *name
,
9995 int reloc ATTRIBUTE_UNUSED
)
9997 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
10002 if (TREE_CODE (decl
) == FUNCTION_DECL
)
10004 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
10005 if (align_functions_log
> current_section_align
)
10006 current_section_align
= align_functions_log
;
10008 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
10009 flags
|= SECTION_MAIN
;
10012 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
10014 if (TREE_PUBLIC (decl
))
10015 flags
|= SECTION_PUBLIC
;
10020 /* Generate a section name for decl and associate it with the
10024 unicosmk_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
10031 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
10032 name
= default_strip_name_encoding (name
);
10033 len
= strlen (name
);
10035 if (TREE_CODE (decl
) == FUNCTION_DECL
)
10039 /* It is essential that we prefix the section name here because
10040 otherwise the section names generated for constructors and
10041 destructors confuse collect2. */
10043 string
= alloca (len
+ 6);
10044 sprintf (string
, "code@%s", name
);
10045 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
10047 else if (TREE_PUBLIC (decl
))
10048 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
10053 string
= alloca (len
+ 6);
10054 sprintf (string
, "data@%s", name
);
10055 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
10059 /* Switch to an arbitrary section NAME with attributes as specified
10060 by FLAGS. ALIGN specifies any known alignment requirements for
10061 the section; 0 if the default should be used. */
10064 unicosmk_asm_named_section (const char *name
, unsigned int flags
,
10065 tree decl ATTRIBUTE_UNUSED
)
10069 /* Close the previous section. */
10071 fputs ("\t.endp\n\n", asm_out_file
);
10073 /* Find out what kind of section we are opening. */
10075 if (flags
& SECTION_MAIN
)
10076 fputs ("\t.start\tmain\n", asm_out_file
);
10078 if (flags
& SECTION_CODE
)
10080 else if (flags
& SECTION_PUBLIC
)
10085 if (current_section_align
!= 0)
10086 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
10087 current_section_align
, kind
);
10089 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
10093 unicosmk_insert_attributes (tree decl
, tree
*attr_ptr ATTRIBUTE_UNUSED
)
10096 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
10097 unicosmk_unique_section (decl
, 0);
10100 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10101 in code sections because .align fill unused space with zeroes. */
10104 unicosmk_output_align (FILE *file
, int align
)
10106 if (inside_function
)
10107 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
10109 fprintf (file
, "\t.align\t%d\n", align
);
10112 /* Add a case vector to the current function's list of deferred case
10113 vectors. Case vectors have to be put into a separate section because CAM
10114 does not allow data definitions in code sections. */
10117 unicosmk_defer_case_vector (rtx lab
, rtx vec
)
10119 struct machine_function
*machine
= cfun
->machine
;
10121 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
10122 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
10123 machine
->addr_list
);
10126 /* Output a case vector. */
10129 unicosmk_output_addr_vec (FILE *file
, rtx vec
)
10131 rtx lab
= XEXP (vec
, 0);
10132 rtx body
= XEXP (vec
, 1);
10133 int vlen
= XVECLEN (body
, 0);
10136 (*targetm
.asm_out
.internal_label
) (file
, "L", CODE_LABEL_NUMBER (lab
));
10138 for (idx
= 0; idx
< vlen
; idx
++)
10140 ASM_OUTPUT_ADDR_VEC_ELT
10141 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
10145 /* Output current function's deferred case vectors. */
10148 unicosmk_output_deferred_case_vectors (FILE *file
)
10150 struct machine_function
*machine
= cfun
->machine
;
10153 if (machine
->addr_list
== NULL_RTX
)
10156 switch_to_section (data_section
);
10157 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
10158 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
10161 /* Generate the name of the SSIB section for the current function. */
10163 #define SSIB_PREFIX "__SSIB_"
10164 #define SSIB_PREFIX_LEN 7
10166 static const char *
10167 unicosmk_ssib_name (void)
10169 /* This is ok since CAM won't be able to deal with names longer than that
10172 static char name
[256];
10175 const char *fnname
;
10178 x
= DECL_RTL (cfun
->decl
);
10179 gcc_assert (MEM_P (x
));
10181 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
10182 fnname
= XSTR (x
, 0);
10184 len
= strlen (fnname
);
10185 if (len
+ SSIB_PREFIX_LEN
> 255)
10186 len
= 255 - SSIB_PREFIX_LEN
;
10188 strcpy (name
, SSIB_PREFIX
);
10189 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
10190 name
[len
+ SSIB_PREFIX_LEN
] = 0;
10195 /* Set up the dynamic subprogram information block (DSIB) and update the
10196 frame pointer register ($15) for subroutines which have a frame. If the
10197 subroutine doesn't have a frame, simply increment $15. */
10200 unicosmk_gen_dsib (unsigned long *imaskP
)
10202 if (alpha_procedure_type
== PT_STACK
)
10204 const char *ssib_name
;
10207 /* Allocate 64 bytes for the DSIB. */
10209 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10211 emit_insn (gen_blockage ());
10213 /* Save the return address. */
10215 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
10216 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10217 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
10218 (*imaskP
) &= ~(1UL << REG_RA
);
10220 /* Save the old frame pointer. */
10222 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
10223 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10224 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
10225 (*imaskP
) &= ~(1UL << HARD_FRAME_POINTER_REGNUM
);
10227 emit_insn (gen_blockage ());
10229 /* Store the SSIB pointer. */
10231 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
10232 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
10233 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10235 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
10236 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
10237 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
10239 /* Save the CIW index. */
10241 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
10242 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10243 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
10245 emit_insn (gen_blockage ());
10247 /* Set the new frame pointer. */
10249 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
10250 stack_pointer_rtx
, GEN_INT (64))));
10255 /* Increment the frame pointer register to indicate that we do not
10258 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
10259 hard_frame_pointer_rtx
, const1_rtx
)));
10263 /* Output the static subroutine information block for the current
10267 unicosmk_output_ssib (FILE *file
, const char *fnname
)
10273 struct machine_function
*machine
= cfun
->machine
;
10276 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
10277 unicosmk_ssib_name ());
10279 /* Some required stuff and the function name length. */
10281 len
= strlen (fnname
);
10282 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
10285 ??? We don't do that yet. */
10287 fputs ("\t.quad\t0\n", file
);
10289 /* Function address. */
10291 fputs ("\t.quad\t", file
);
10292 assemble_name (file
, fnname
);
10295 fputs ("\t.quad\t0\n", file
);
10296 fputs ("\t.quad\t0\n", file
);
10299 ??? We do it the same way Cray CC does it but this could be
10302 for( i
= 0; i
< len
; i
++ )
10303 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
10304 if( (len
% 8) == 0 )
10305 fputs ("\t.quad\t0\n", file
);
10307 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
10309 /* All call information words used in the function. */
10311 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
10314 #if HOST_BITS_PER_WIDE_INT == 32
10315 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX
"\n",
10316 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
10318 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n", INTVAL (ciw
));
10323 /* Add a call information word (CIW) to the list of the current function's
10324 CIWs and return its index.
10326 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10329 unicosmk_add_call_info_word (rtx x
)
10332 struct machine_function
*machine
= cfun
->machine
;
10334 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
10335 if (machine
->first_ciw
== NULL_RTX
)
10336 machine
->first_ciw
= node
;
10338 XEXP (machine
->last_ciw
, 1) = node
;
10340 machine
->last_ciw
= node
;
10341 ++machine
->ciw_count
;
10343 return GEN_INT (machine
->ciw_count
10344 + strlen (current_function_name ())/8 + 5);
10347 /* The Cray assembler doesn't accept extern declarations for symbols which
10348 are defined in the same file. We have to keep track of all global
10349 symbols which are referenced and/or defined in a source file and output
10350 extern declarations for those which are referenced but not defined at
10351 the end of file. */
10353 /* List of identifiers for which an extern declaration might have to be
10355 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10357 struct unicosmk_extern_list
10359 struct unicosmk_extern_list
*next
;
10363 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
10365 /* Output extern declarations which are required for every asm file. */
10368 unicosmk_output_default_externs (FILE *file
)
10370 static const char *const externs
[] =
10371 { "__T3E_MISMATCH" };
10376 n
= ARRAY_SIZE (externs
);
10378 for (i
= 0; i
< n
; i
++)
10379 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
10382 /* Output extern declarations for global symbols which are have been
10383 referenced but not defined. */
10386 unicosmk_output_externs (FILE *file
)
10388 struct unicosmk_extern_list
*p
;
10389 const char *real_name
;
10393 len
= strlen (user_label_prefix
);
10394 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
10396 /* We have to strip the encoding and possibly remove user_label_prefix
10397 from the identifier in order to handle -fleading-underscore and
10398 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10399 real_name
= default_strip_name_encoding (p
->name
);
10400 if (len
&& p
->name
[0] == '*'
10401 && !memcmp (real_name
, user_label_prefix
, len
))
10404 name_tree
= get_identifier (real_name
);
10405 if (! TREE_ASM_WRITTEN (name_tree
))
10407 TREE_ASM_WRITTEN (name_tree
) = 1;
10408 fputs ("\t.extern\t", file
);
10409 assemble_name (file
, p
->name
);
10415 /* Record an extern. */
10418 unicosmk_add_extern (const char *name
)
10420 struct unicosmk_extern_list
*p
;
10422 p
= (struct unicosmk_extern_list
*)
10423 xmalloc (sizeof (struct unicosmk_extern_list
));
10424 p
->next
= unicosmk_extern_head
;
10426 unicosmk_extern_head
= p
;
10429 /* The Cray assembler generates incorrect code if identifiers which
10430 conflict with register names are used as instruction operands. We have
10431 to replace such identifiers with DEX expressions. */
10433 /* Structure to collect identifiers which have been replaced by DEX
10435 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10437 struct unicosmk_dex
{
10438 struct unicosmk_dex
*next
;
10442 /* List of identifiers which have been replaced by DEX expressions. The DEX
10443 number is determined by the position in the list. */
10445 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
10447 /* The number of elements in the DEX list. */
10449 static int unicosmk_dex_count
= 0;
10451 /* Check if NAME must be replaced by a DEX expression. */
10454 unicosmk_special_name (const char *name
)
10456 if (name
[0] == '*')
10459 if (name
[0] == '$')
10462 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
10467 case '1': case '2':
10468 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
10471 return (name
[2] == '\0'
10472 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
10475 return (ISDIGIT (name
[1]) && name
[2] == '\0');
10479 /* Return the DEX number if X must be replaced by a DEX expression and 0
10483 unicosmk_need_dex (rtx x
)
10485 struct unicosmk_dex
*dex
;
10489 if (GET_CODE (x
) != SYMBOL_REF
)
10493 if (! unicosmk_special_name (name
))
10496 i
= unicosmk_dex_count
;
10497 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10499 if (! strcmp (name
, dex
->name
))
10504 dex
= (struct unicosmk_dex
*) xmalloc (sizeof (struct unicosmk_dex
));
10506 dex
->next
= unicosmk_dex_list
;
10507 unicosmk_dex_list
= dex
;
10509 ++unicosmk_dex_count
;
10510 return unicosmk_dex_count
;
10513 /* Output the DEX definitions for this file. */
10516 unicosmk_output_dex (FILE *file
)
10518 struct unicosmk_dex
*dex
;
10521 if (unicosmk_dex_list
== NULL
)
10524 fprintf (file
, "\t.dexstart\n");
10526 i
= unicosmk_dex_count
;
10527 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10529 fprintf (file
, "\tDEX (%d) = ", i
);
10530 assemble_name (file
, dex
->name
);
10535 fprintf (file
, "\t.dexend\n");
10538 /* Output text that to appear at the beginning of an assembler file. */
10541 unicosmk_file_start (void)
10545 fputs ("\t.ident\t", asm_out_file
);
10546 unicosmk_output_module_name (asm_out_file
);
10547 fputs ("\n\n", asm_out_file
);
10549 /* The Unicos/Mk assembler uses different register names. Instead of trying
10550 to support them, we simply use micro definitions. */
10552 /* CAM has different register names: rN for the integer register N and fN
10553 for the floating-point register N. Instead of trying to use these in
10554 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10557 for (i
= 0; i
< 32; ++i
)
10558 fprintf (asm_out_file
, "$%d <- r%d\n", i
, i
);
10560 for (i
= 0; i
< 32; ++i
)
10561 fprintf (asm_out_file
, "$f%d <- f%d\n", i
, i
);
10563 putc ('\n', asm_out_file
);
10565 /* The .align directive fill unused space with zeroes which does not work
10566 in code sections. We define the macro 'gcc@code@align' which uses nops
10567 instead. Note that it assumes that code sections always have the
10568 biggest possible alignment since . refers to the current offset from
10569 the beginning of the section. */
10571 fputs ("\t.macro gcc@code@align n\n", asm_out_file
);
10572 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file
);
10573 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file
);
10574 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file
);
10575 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file
);
10576 fputs ("\tbis r31,r31,r31\n", asm_out_file
);
10577 fputs ("\t.endr\n", asm_out_file
);
10578 fputs ("\t.endif\n", asm_out_file
);
10579 fputs ("\t.endm gcc@code@align\n\n", asm_out_file
);
10581 /* Output extern declarations which should always be visible. */
10582 unicosmk_output_default_externs (asm_out_file
);
10584 /* Open a dummy section. We always need to be inside a section for the
10585 section-switching code to work correctly.
10586 ??? This should be a module id or something like that. I still have to
10587 figure out what the rules for those are. */
10588 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file
);
10591 /* Output text to appear at the end of an assembler file. This includes all
10592 pending extern declarations and DEX expressions. */
10595 unicosmk_file_end (void)
10597 fputs ("\t.endp\n\n", asm_out_file
);
10599 /* Output all pending externs. */
10601 unicosmk_output_externs (asm_out_file
);
10603 /* Output dex definitions used for functions whose names conflict with
10606 unicosmk_output_dex (asm_out_file
);
10608 fputs ("\t.end\t", asm_out_file
);
10609 unicosmk_output_module_name (asm_out_file
);
10610 putc ('\n', asm_out_file
);
10616 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED
)
10620 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED
)
10624 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED
,
10625 const char * fnname ATTRIBUTE_UNUSED
)
10629 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED
)
10635 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED
)
10640 #endif /* TARGET_ABI_UNICOSMK */
10643 alpha_init_libfuncs (void)
10645 if (TARGET_ABI_UNICOSMK
)
10647 /* Prevent gcc from generating calls to __divsi3. */
10648 set_optab_libfunc (sdiv_optab
, SImode
, 0);
10649 set_optab_libfunc (udiv_optab
, SImode
, 0);
10651 /* Use the functions provided by the system library
10652 for DImode integer division. */
10653 set_optab_libfunc (sdiv_optab
, DImode
, "$sldiv");
10654 set_optab_libfunc (udiv_optab
, DImode
, "$uldiv");
10656 else if (TARGET_ABI_OPEN_VMS
)
10658 /* Use the VMS runtime library functions for division and
10660 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
10661 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
10662 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
10663 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
10664 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
10665 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
10666 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
10667 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
10672 /* Initialize the GCC target structure. */
10673 #if TARGET_ABI_OPEN_VMS
10674 # undef TARGET_ATTRIBUTE_TABLE
10675 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10676 # undef TARGET_SECTION_TYPE_FLAGS
10677 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10680 #undef TARGET_IN_SMALL_DATA_P
10681 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10683 #if TARGET_ABI_UNICOSMK
10684 # undef TARGET_INSERT_ATTRIBUTES
10685 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10686 # undef TARGET_SECTION_TYPE_FLAGS
10687 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10688 # undef TARGET_ASM_UNIQUE_SECTION
10689 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10690 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10691 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10692 # undef TARGET_ASM_GLOBALIZE_LABEL
10693 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10694 # undef TARGET_MUST_PASS_IN_STACK
10695 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10698 #undef TARGET_ASM_ALIGNED_HI_OP
10699 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10700 #undef TARGET_ASM_ALIGNED_DI_OP
10701 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10703 /* Default unaligned ops are provided for ELF systems. To get unaligned
10704 data for non-ELF systems, we have to turn off auto alignment. */
10705 #ifndef OBJECT_FORMAT_ELF
10706 #undef TARGET_ASM_UNALIGNED_HI_OP
10707 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10708 #undef TARGET_ASM_UNALIGNED_SI_OP
10709 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10710 #undef TARGET_ASM_UNALIGNED_DI_OP
10711 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10714 #ifdef OBJECT_FORMAT_ELF
10715 #undef TARGET_ASM_RELOC_RW_MASK
10716 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10717 #undef TARGET_ASM_SELECT_RTX_SECTION
10718 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10719 #undef TARGET_SECTION_TYPE_FLAGS
10720 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10723 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10724 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10726 #undef TARGET_INIT_LIBFUNCS
10727 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10729 #if TARGET_ABI_UNICOSMK
10730 #undef TARGET_ASM_FILE_START
10731 #define TARGET_ASM_FILE_START unicosmk_file_start
10732 #undef TARGET_ASM_FILE_END
10733 #define TARGET_ASM_FILE_END unicosmk_file_end
10735 #undef TARGET_ASM_FILE_START
10736 #define TARGET_ASM_FILE_START alpha_file_start
10737 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10738 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10741 #undef TARGET_SCHED_ADJUST_COST
10742 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10743 #undef TARGET_SCHED_ISSUE_RATE
10744 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10745 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10746 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10747 alpha_multipass_dfa_lookahead
10749 #undef TARGET_HAVE_TLS
10750 #define TARGET_HAVE_TLS HAVE_AS_TLS
10752 #undef TARGET_INIT_BUILTINS
10753 #define TARGET_INIT_BUILTINS alpha_init_builtins
10754 #undef TARGET_EXPAND_BUILTIN
10755 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10756 #undef TARGET_FOLD_BUILTIN
10757 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10759 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10760 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10761 #undef TARGET_CANNOT_COPY_INSN_P
10762 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10763 #undef TARGET_CANNOT_FORCE_CONST_MEM
10764 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10767 #undef TARGET_ASM_OUTPUT_MI_THUNK
10768 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10769 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10770 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10771 #undef TARGET_STDARG_OPTIMIZE_HOOK
10772 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10775 #undef TARGET_RTX_COSTS
10776 #define TARGET_RTX_COSTS alpha_rtx_costs
10777 #undef TARGET_ADDRESS_COST
10778 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
10780 #undef TARGET_MACHINE_DEPENDENT_REORG
10781 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10783 #undef TARGET_PROMOTE_FUNCTION_ARGS
10784 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10785 #undef TARGET_PROMOTE_FUNCTION_RETURN
10786 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10787 #undef TARGET_PROMOTE_PROTOTYPES
10788 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10789 #undef TARGET_RETURN_IN_MEMORY
10790 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10791 #undef TARGET_PASS_BY_REFERENCE
10792 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10793 #undef TARGET_SETUP_INCOMING_VARARGS
10794 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10795 #undef TARGET_STRICT_ARGUMENT_NAMING
10796 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10797 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10798 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10799 #undef TARGET_SPLIT_COMPLEX_ARG
10800 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10801 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10802 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10803 #undef TARGET_ARG_PARTIAL_BYTES
10804 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10806 #undef TARGET_SECONDARY_RELOAD
10807 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10809 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10810 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10811 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10812 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10814 #undef TARGET_BUILD_BUILTIN_VA_LIST
10815 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10817 #undef TARGET_EXPAND_BUILTIN_VA_START
10818 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10820 /* The Alpha architecture does not require sequential consistency. See
10821 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10822 for an example of how it can be violated in practice. */
10823 #undef TARGET_RELAXED_ORDERING
10824 #define TARGET_RELAXED_ORDERING true
10826 #undef TARGET_DEFAULT_TARGET_FLAGS
10827 #define TARGET_DEFAULT_TARGET_FLAGS \
10828 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10829 #undef TARGET_HANDLE_OPTION
10830 #define TARGET_HANDLE_OPTION alpha_handle_option
10832 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10833 #undef TARGET_MANGLE_TYPE
10834 #define TARGET_MANGLE_TYPE alpha_mangle_type
10837 struct gcc_target targetm
= TARGET_INITIALIZER
;
10840 #include "gt-alpha.h"