Merged revisions 196716,196830,198094,198116,198502,198877,199007,199262,199319,19946...
[official-gcc.git] / main / gcc / config / alpha / alpha.c
blob095b8fc7305f91b15431b45fde280a509ca30fe0
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "output.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "recog.h"
36 #include "expr.h"
37 #include "optabs.h"
38 #include "reload.h"
39 #include "obstack.h"
40 #include "except.h"
41 #include "function.h"
42 #include "diagnostic-core.h"
43 #include "ggc.h"
44 #include "tm_p.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "common/common-target.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "splay-tree.h"
51 #include "gimple.h"
52 #include "tree-flow.h"
53 #include "tree-ssanames.h"
54 #include "tree-stdarg.h"
55 #include "tm-constrs.h"
56 #include "df.h"
57 #include "libfuncs.h"
58 #include "opts.h"
59 #include "params.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
67 static const char * const alpha_cpu_name[] =
69 "ev4", "ev5", "ev6"
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm;
84 /* Nonzero if inside of a function, because the Alpha asm can't
85 handle .files inside of functions. */
87 static int inside_function = FALSE;
89 /* The number of cycles of latency we should assume on memory reads. */
91 int alpha_memory_latency = 3;
93 /* Whether the function needs the GP. */
95 static int alpha_function_needs_gp;
97 /* The assembler name of the current function. */
99 static const char *alpha_fnname;
101 /* The next explicit relocation sequence number. */
102 extern GTY(()) int alpha_next_sequence_number;
103 int alpha_next_sequence_number = 1;
105 /* The literal and gpdisp sequence numbers for this insn, as printed
106 by %# and %* respectively. */
107 extern GTY(()) int alpha_this_literal_sequence_number;
108 extern GTY(()) int alpha_this_gpdisp_sequence_number;
109 int alpha_this_literal_sequence_number;
110 int alpha_this_gpdisp_sequence_number;
112 /* Costs of various operations on the different architectures. */
114 struct alpha_rtx_cost_data
116 unsigned char fp_add;
117 unsigned char fp_mult;
118 unsigned char fp_div_sf;
119 unsigned char fp_div_df;
120 unsigned char int_mult_si;
121 unsigned char int_mult_di;
122 unsigned char int_shift;
123 unsigned char int_cmov;
124 unsigned short int_div;
127 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
129 { /* EV4 */
130 COSTS_N_INSNS (6), /* fp_add */
131 COSTS_N_INSNS (6), /* fp_mult */
132 COSTS_N_INSNS (34), /* fp_div_sf */
133 COSTS_N_INSNS (63), /* fp_div_df */
134 COSTS_N_INSNS (23), /* int_mult_si */
135 COSTS_N_INSNS (23), /* int_mult_di */
136 COSTS_N_INSNS (2), /* int_shift */
137 COSTS_N_INSNS (2), /* int_cmov */
138 COSTS_N_INSNS (97), /* int_div */
140 { /* EV5 */
141 COSTS_N_INSNS (4), /* fp_add */
142 COSTS_N_INSNS (4), /* fp_mult */
143 COSTS_N_INSNS (15), /* fp_div_sf */
144 COSTS_N_INSNS (22), /* fp_div_df */
145 COSTS_N_INSNS (8), /* int_mult_si */
146 COSTS_N_INSNS (12), /* int_mult_di */
147 COSTS_N_INSNS (1) + 1, /* int_shift */
148 COSTS_N_INSNS (1), /* int_cmov */
149 COSTS_N_INSNS (83), /* int_div */
151 { /* EV6 */
152 COSTS_N_INSNS (4), /* fp_add */
153 COSTS_N_INSNS (4), /* fp_mult */
154 COSTS_N_INSNS (12), /* fp_div_sf */
155 COSTS_N_INSNS (15), /* fp_div_df */
156 COSTS_N_INSNS (7), /* int_mult_si */
157 COSTS_N_INSNS (7), /* int_mult_di */
158 COSTS_N_INSNS (1), /* int_shift */
159 COSTS_N_INSNS (2), /* int_cmov */
160 COSTS_N_INSNS (86), /* int_div */
164 /* Similar but tuned for code size instead of execution latency. The
165 extra +N is fractional cost tuning based on latency. It's used to
166 encourage use of cheaper insns like shift, but only if there's just
167 one of them. */
169 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
171 COSTS_N_INSNS (1), /* fp_add */
172 COSTS_N_INSNS (1), /* fp_mult */
173 COSTS_N_INSNS (1), /* fp_div_sf */
174 COSTS_N_INSNS (1) + 1, /* fp_div_df */
175 COSTS_N_INSNS (1) + 1, /* int_mult_si */
176 COSTS_N_INSNS (1) + 2, /* int_mult_di */
177 COSTS_N_INSNS (1), /* int_shift */
178 COSTS_N_INSNS (1), /* int_cmov */
179 COSTS_N_INSNS (6), /* int_div */
182 /* Get the number of args of a function in one of two ways. */
183 #if TARGET_ABI_OPEN_VMS
184 #define NUM_ARGS crtl->args.info.num_args
185 #else
186 #define NUM_ARGS crtl->args.info
187 #endif
189 #define REG_PV 27
190 #define REG_RA 26
192 /* Declarations of static functions. */
193 static struct machine_function *alpha_init_machine_status (void);
194 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
196 #if TARGET_ABI_OPEN_VMS
197 static void alpha_write_linkage (FILE *, const char *);
198 static bool vms_valid_pointer_mode (enum machine_mode);
199 #else
200 #define vms_patch_builtins() gcc_unreachable()
201 #endif
203 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
204 /* Implement TARGET_MANGLE_TYPE. */
206 static const char *
207 alpha_mangle_type (const_tree type)
209 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
210 && TARGET_LONG_DOUBLE_128)
211 return "g";
213 /* For all other types, use normal C++ mangling. */
214 return NULL;
216 #endif
218 /* Parse target option strings. */
220 static void
221 alpha_option_override (void)
223 static const struct cpu_table {
224 const char *const name;
225 const enum processor_type processor;
226 const int flags;
227 const unsigned short line_size; /* in bytes */
228 const unsigned short l1_size; /* in kb. */
229 const unsigned short l2_size; /* in kb. */
230 } cpu_table[] = {
231 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
232 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
233 had 64k to 8M 8-byte direct Bcache. */
234 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
235 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
236 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
238 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
239 and 1M to 16M 64 byte L3 (not modeled).
240 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
241 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
242 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
243 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
244 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
245 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
246 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
247 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
248 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
250 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
251 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
252 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
253 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
254 64, 64, 16*1024 },
255 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
256 64, 64, 16*1024 }
259 int const ct_size = ARRAY_SIZE (cpu_table);
260 int line_size = 0, l1_size = 0, l2_size = 0;
261 int i;
263 #ifdef SUBTARGET_OVERRIDE_OPTIONS
264 SUBTARGET_OVERRIDE_OPTIONS;
265 #endif
267 /* Default to full IEEE compliance mode for Go language. */
268 if (strcmp (lang_hooks.name, "GNU Go") == 0
269 && !(target_flags_explicit & MASK_IEEE))
270 target_flags |= MASK_IEEE;
272 alpha_fprm = ALPHA_FPRM_NORM;
273 alpha_tp = ALPHA_TP_PROG;
274 alpha_fptm = ALPHA_FPTM_N;
276 if (TARGET_IEEE)
278 alpha_tp = ALPHA_TP_INSN;
279 alpha_fptm = ALPHA_FPTM_SU;
281 if (TARGET_IEEE_WITH_INEXACT)
283 alpha_tp = ALPHA_TP_INSN;
284 alpha_fptm = ALPHA_FPTM_SUI;
287 if (alpha_tp_string)
289 if (! strcmp (alpha_tp_string, "p"))
290 alpha_tp = ALPHA_TP_PROG;
291 else if (! strcmp (alpha_tp_string, "f"))
292 alpha_tp = ALPHA_TP_FUNC;
293 else if (! strcmp (alpha_tp_string, "i"))
294 alpha_tp = ALPHA_TP_INSN;
295 else
296 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
299 if (alpha_fprm_string)
301 if (! strcmp (alpha_fprm_string, "n"))
302 alpha_fprm = ALPHA_FPRM_NORM;
303 else if (! strcmp (alpha_fprm_string, "m"))
304 alpha_fprm = ALPHA_FPRM_MINF;
305 else if (! strcmp (alpha_fprm_string, "c"))
306 alpha_fprm = ALPHA_FPRM_CHOP;
307 else if (! strcmp (alpha_fprm_string,"d"))
308 alpha_fprm = ALPHA_FPRM_DYN;
309 else
310 error ("bad value %qs for -mfp-rounding-mode switch",
311 alpha_fprm_string);
314 if (alpha_fptm_string)
316 if (strcmp (alpha_fptm_string, "n") == 0)
317 alpha_fptm = ALPHA_FPTM_N;
318 else if (strcmp (alpha_fptm_string, "u") == 0)
319 alpha_fptm = ALPHA_FPTM_U;
320 else if (strcmp (alpha_fptm_string, "su") == 0)
321 alpha_fptm = ALPHA_FPTM_SU;
322 else if (strcmp (alpha_fptm_string, "sui") == 0)
323 alpha_fptm = ALPHA_FPTM_SUI;
324 else
325 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
328 if (alpha_cpu_string)
330 for (i = 0; i < ct_size; i++)
331 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
333 alpha_tune = alpha_cpu = cpu_table[i].processor;
334 line_size = cpu_table[i].line_size;
335 l1_size = cpu_table[i].l1_size;
336 l2_size = cpu_table[i].l2_size;
337 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
338 target_flags |= cpu_table[i].flags;
339 break;
341 if (i == ct_size)
342 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
345 if (alpha_tune_string)
347 for (i = 0; i < ct_size; i++)
348 if (! strcmp (alpha_tune_string, cpu_table [i].name))
350 alpha_tune = cpu_table[i].processor;
351 line_size = cpu_table[i].line_size;
352 l1_size = cpu_table[i].l1_size;
353 l2_size = cpu_table[i].l2_size;
354 break;
356 if (i == ct_size)
357 error ("bad value %qs for -mtune switch", alpha_tune_string);
360 if (line_size)
361 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
362 global_options.x_param_values,
363 global_options_set.x_param_values);
364 if (l1_size)
365 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
366 global_options.x_param_values,
367 global_options_set.x_param_values);
368 if (l2_size)
369 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
370 global_options.x_param_values,
371 global_options_set.x_param_values);
373 /* Do some sanity checks on the above options. */
375 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
376 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
378 warning (0, "fp software completion requires -mtrap-precision=i");
379 alpha_tp = ALPHA_TP_INSN;
382 if (alpha_cpu == PROCESSOR_EV6)
384 /* Except for EV6 pass 1 (not released), we always have precise
385 arithmetic traps. Which means we can do software completion
386 without minding trap shadows. */
387 alpha_tp = ALPHA_TP_PROG;
390 if (TARGET_FLOAT_VAX)
392 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
394 warning (0, "rounding mode not supported for VAX floats");
395 alpha_fprm = ALPHA_FPRM_NORM;
397 if (alpha_fptm == ALPHA_FPTM_SUI)
399 warning (0, "trap mode not supported for VAX floats");
400 alpha_fptm = ALPHA_FPTM_SU;
402 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
403 warning (0, "128-bit long double not supported for VAX floats");
404 target_flags &= ~MASK_LONG_DOUBLE_128;
408 char *end;
409 int lat;
411 if (!alpha_mlat_string)
412 alpha_mlat_string = "L1";
414 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
415 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
417 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
418 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
419 && alpha_mlat_string[2] == '\0')
421 static int const cache_latency[][4] =
423 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
424 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
425 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
428 lat = alpha_mlat_string[1] - '0';
429 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
431 warning (0, "L%d cache latency unknown for %s",
432 lat, alpha_cpu_name[alpha_tune]);
433 lat = 3;
435 else
436 lat = cache_latency[alpha_tune][lat-1];
438 else if (! strcmp (alpha_mlat_string, "main"))
440 /* Most current memories have about 370ns latency. This is
441 a reasonable guess for a fast cpu. */
442 lat = 150;
444 else
446 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
447 lat = 3;
450 alpha_memory_latency = lat;
453 /* Default the definition of "small data" to 8 bytes. */
454 if (!global_options_set.x_g_switch_value)
455 g_switch_value = 8;
457 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
458 if (flag_pic == 1)
459 target_flags |= MASK_SMALL_DATA;
460 else if (flag_pic == 2)
461 target_flags &= ~MASK_SMALL_DATA;
463 /* Align labels and loops for optimal branching. */
464 /* ??? Kludge these by not doing anything if we don't optimize. */
465 if (optimize > 0)
467 if (align_loops <= 0)
468 align_loops = 16;
469 if (align_jumps <= 0)
470 align_jumps = 16;
472 if (align_functions <= 0)
473 align_functions = 16;
475 /* Register variables and functions with the garbage collector. */
477 /* Set up function hooks. */
478 init_machine_status = alpha_init_machine_status;
480 /* Tell the compiler when we're using VAX floating point. */
481 if (TARGET_FLOAT_VAX)
483 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
484 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
485 REAL_MODE_FORMAT (TFmode) = NULL;
488 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
489 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
490 target_flags |= MASK_LONG_DOUBLE_128;
491 #endif
494 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
497 zap_mask (HOST_WIDE_INT value)
499 int i;
501 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
502 i++, value >>= 8)
503 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
504 return 0;
506 return 1;
509 /* Return true if OP is valid for a particular TLS relocation.
510 We are already guaranteed that OP is a CONST. */
513 tls_symbolic_operand_1 (rtx op, int size, int unspec)
515 op = XEXP (op, 0);
517 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
518 return 0;
519 op = XVECEXP (op, 0, 0);
521 if (GET_CODE (op) != SYMBOL_REF)
522 return 0;
524 switch (SYMBOL_REF_TLS_MODEL (op))
526 case TLS_MODEL_LOCAL_DYNAMIC:
527 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
528 case TLS_MODEL_INITIAL_EXEC:
529 return unspec == UNSPEC_TPREL && size == 64;
530 case TLS_MODEL_LOCAL_EXEC:
531 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
532 default:
533 gcc_unreachable ();
537 /* Used by aligned_memory_operand and unaligned_memory_operand to
538 resolve what reload is going to do with OP if it's a register. */
541 resolve_reload_operand (rtx op)
543 if (reload_in_progress)
545 rtx tmp = op;
546 if (GET_CODE (tmp) == SUBREG)
547 tmp = SUBREG_REG (tmp);
548 if (REG_P (tmp)
549 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
551 op = reg_equiv_memory_loc (REGNO (tmp));
552 if (op == 0)
553 return 0;
556 return op;
559 /* The scalar modes supported differs from the default check-what-c-supports
560 version in that sometimes TFmode is available even when long double
561 indicates only DFmode. */
563 static bool
564 alpha_scalar_mode_supported_p (enum machine_mode mode)
566 switch (mode)
568 case QImode:
569 case HImode:
570 case SImode:
571 case DImode:
572 case TImode: /* via optabs.c */
573 return true;
575 case SFmode:
576 case DFmode:
577 return true;
579 case TFmode:
580 return TARGET_HAS_XFLOATING_LIBS;
582 default:
583 return false;
587 /* Alpha implements a couple of integer vector mode operations when
588 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
589 which allows the vectorizer to operate on e.g. move instructions,
590 or when expand_vector_operations can do something useful. */
592 static bool
593 alpha_vector_mode_supported_p (enum machine_mode mode)
595 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
598 /* Return 1 if this function can directly return via $26. */
601 direct_return (void)
603 return (TARGET_ABI_OSF
604 && reload_completed
605 && alpha_sa_size () == 0
606 && get_frame_size () == 0
607 && crtl->outgoing_args_size == 0
608 && crtl->args.pretend_args_size == 0);
611 /* Return the TLS model to use for SYMBOL. */
613 static enum tls_model
614 tls_symbolic_operand_type (rtx symbol)
616 enum tls_model model;
618 if (GET_CODE (symbol) != SYMBOL_REF)
619 return TLS_MODEL_NONE;
620 model = SYMBOL_REF_TLS_MODEL (symbol);
622 /* Local-exec with a 64-bit size is the same code as initial-exec. */
623 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
624 model = TLS_MODEL_INITIAL_EXEC;
626 return model;
629 /* Return true if the function DECL will share the same GP as any
630 function in the current unit of translation. */
632 static bool
633 decl_has_samegp (const_tree decl)
635 /* Functions that are not local can be overridden, and thus may
636 not share the same gp. */
637 if (!(*targetm.binds_local_p) (decl))
638 return false;
640 /* If -msmall-data is in effect, assume that there is only one GP
641 for the module, and so any local symbol has this property. We
642 need explicit relocations to be able to enforce this for symbols
643 not defined in this unit of translation, however. */
644 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
645 return true;
647 /* Functions that are not external are defined in this UoT. */
648 /* ??? Irritatingly, static functions not yet emitted are still
649 marked "external". Apply this to non-static functions only. */
650 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
653 /* Return true if EXP should be placed in the small data section. */
655 static bool
656 alpha_in_small_data_p (const_tree exp)
658 /* We want to merge strings, so we never consider them small data. */
659 if (TREE_CODE (exp) == STRING_CST)
660 return false;
662 /* Functions are never in the small data area. Duh. */
663 if (TREE_CODE (exp) == FUNCTION_DECL)
664 return false;
666 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
668 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
669 if (strcmp (section, ".sdata") == 0
670 || strcmp (section, ".sbss") == 0)
671 return true;
673 else
675 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
677 /* If this is an incomplete type with size 0, then we can't put it
678 in sdata because it might be too big when completed. */
679 if (size > 0 && size <= g_switch_value)
680 return true;
683 return false;
686 #if TARGET_ABI_OPEN_VMS
687 static bool
688 vms_valid_pointer_mode (enum machine_mode mode)
690 return (mode == SImode || mode == DImode);
693 static bool
694 alpha_linkage_symbol_p (const char *symname)
696 int symlen = strlen (symname);
698 if (symlen > 4)
699 return strcmp (&symname [symlen - 4], "..lk") == 0;
701 return false;
704 #define LINKAGE_SYMBOL_REF_P(X) \
705 ((GET_CODE (X) == SYMBOL_REF \
706 && alpha_linkage_symbol_p (XSTR (X, 0))) \
707 || (GET_CODE (X) == CONST \
708 && GET_CODE (XEXP (X, 0)) == PLUS \
709 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
710 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
711 #endif
713 /* legitimate_address_p recognizes an RTL expression that is a valid
714 memory address for an instruction. The MODE argument is the
715 machine mode for the MEM expression that wants to use this address.
717 For Alpha, we have either a constant address or the sum of a
718 register and a constant address, or just a register. For DImode,
719 any of those forms can be surrounded with an AND that clear the
720 low-order three bits; this is an "unaligned" access. */
722 static bool
723 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
725 /* If this is an ldq_u type address, discard the outer AND. */
726 if (mode == DImode
727 && GET_CODE (x) == AND
728 && CONST_INT_P (XEXP (x, 1))
729 && INTVAL (XEXP (x, 1)) == -8)
730 x = XEXP (x, 0);
732 /* Discard non-paradoxical subregs. */
733 if (GET_CODE (x) == SUBREG
734 && (GET_MODE_SIZE (GET_MODE (x))
735 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
736 x = SUBREG_REG (x);
738 /* Unadorned general registers are valid. */
739 if (REG_P (x)
740 && (strict
741 ? STRICT_REG_OK_FOR_BASE_P (x)
742 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
743 return true;
745 /* Constant addresses (i.e. +/- 32k) are valid. */
746 if (CONSTANT_ADDRESS_P (x))
747 return true;
749 #if TARGET_ABI_OPEN_VMS
750 if (LINKAGE_SYMBOL_REF_P (x))
751 return true;
752 #endif
754 /* Register plus a small constant offset is valid. */
755 if (GET_CODE (x) == PLUS)
757 rtx ofs = XEXP (x, 1);
758 x = XEXP (x, 0);
760 /* Discard non-paradoxical subregs. */
761 if (GET_CODE (x) == SUBREG
762 && (GET_MODE_SIZE (GET_MODE (x))
763 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
764 x = SUBREG_REG (x);
766 if (REG_P (x))
768 if (! strict
769 && NONSTRICT_REG_OK_FP_BASE_P (x)
770 && CONST_INT_P (ofs))
771 return true;
772 if ((strict
773 ? STRICT_REG_OK_FOR_BASE_P (x)
774 : NONSTRICT_REG_OK_FOR_BASE_P (x))
775 && CONSTANT_ADDRESS_P (ofs))
776 return true;
780 /* If we're managing explicit relocations, LO_SUM is valid, as are small
781 data symbols. Avoid explicit relocations of modes larger than word
782 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
783 else if (TARGET_EXPLICIT_RELOCS
784 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
786 if (small_symbolic_operand (x, Pmode))
787 return true;
789 if (GET_CODE (x) == LO_SUM)
791 rtx ofs = XEXP (x, 1);
792 x = XEXP (x, 0);
794 /* Discard non-paradoxical subregs. */
795 if (GET_CODE (x) == SUBREG
796 && (GET_MODE_SIZE (GET_MODE (x))
797 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
798 x = SUBREG_REG (x);
800 /* Must have a valid base register. */
801 if (! (REG_P (x)
802 && (strict
803 ? STRICT_REG_OK_FOR_BASE_P (x)
804 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
805 return false;
807 /* The symbol must be local. */
808 if (local_symbolic_operand (ofs, Pmode)
809 || dtp32_symbolic_operand (ofs, Pmode)
810 || tp32_symbolic_operand (ofs, Pmode))
811 return true;
815 return false;
818 /* Build the SYMBOL_REF for __tls_get_addr. */
820 static GTY(()) rtx tls_get_addr_libfunc;
822 static rtx
823 get_tls_get_addr (void)
825 if (!tls_get_addr_libfunc)
826 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
827 return tls_get_addr_libfunc;
830 /* Try machine-dependent ways of modifying an illegitimate address
831 to be legitimate. If we find one, return the new, valid address. */
833 static rtx
834 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
836 HOST_WIDE_INT addend;
838 /* If the address is (plus reg const_int) and the CONST_INT is not a
839 valid offset, compute the high part of the constant and add it to
840 the register. Then our address is (plus temp low-part-const). */
841 if (GET_CODE (x) == PLUS
842 && REG_P (XEXP (x, 0))
843 && CONST_INT_P (XEXP (x, 1))
844 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
846 addend = INTVAL (XEXP (x, 1));
847 x = XEXP (x, 0);
848 goto split_addend;
851 /* If the address is (const (plus FOO const_int)), find the low-order
852 part of the CONST_INT. Then load FOO plus any high-order part of the
853 CONST_INT into a register. Our address is (plus reg low-part-const).
854 This is done to reduce the number of GOT entries. */
855 if (can_create_pseudo_p ()
856 && GET_CODE (x) == CONST
857 && GET_CODE (XEXP (x, 0)) == PLUS
858 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
860 addend = INTVAL (XEXP (XEXP (x, 0), 1));
861 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
862 goto split_addend;
865 /* If we have a (plus reg const), emit the load as in (2), then add
866 the two registers, and finally generate (plus reg low-part-const) as
867 our address. */
868 if (can_create_pseudo_p ()
869 && GET_CODE (x) == PLUS
870 && REG_P (XEXP (x, 0))
871 && GET_CODE (XEXP (x, 1)) == CONST
872 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
873 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
875 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
876 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
877 XEXP (XEXP (XEXP (x, 1), 0), 0),
878 NULL_RTX, 1, OPTAB_LIB_WIDEN);
879 goto split_addend;
882 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
883 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
884 around +/- 32k offset. */
885 if (TARGET_EXPLICIT_RELOCS
886 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
887 && symbolic_operand (x, Pmode))
889 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
891 switch (tls_symbolic_operand_type (x))
893 case TLS_MODEL_NONE:
894 break;
896 case TLS_MODEL_GLOBAL_DYNAMIC:
897 start_sequence ();
899 r0 = gen_rtx_REG (Pmode, 0);
900 r16 = gen_rtx_REG (Pmode, 16);
901 tga = get_tls_get_addr ();
902 dest = gen_reg_rtx (Pmode);
903 seq = GEN_INT (alpha_next_sequence_number++);
905 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
906 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
907 insn = emit_call_insn (insn);
908 RTL_CONST_CALL_P (insn) = 1;
909 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
911 insn = get_insns ();
912 end_sequence ();
914 emit_libcall_block (insn, dest, r0, x);
915 return dest;
917 case TLS_MODEL_LOCAL_DYNAMIC:
918 start_sequence ();
920 r0 = gen_rtx_REG (Pmode, 0);
921 r16 = gen_rtx_REG (Pmode, 16);
922 tga = get_tls_get_addr ();
923 scratch = gen_reg_rtx (Pmode);
924 seq = GEN_INT (alpha_next_sequence_number++);
926 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
927 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
928 insn = emit_call_insn (insn);
929 RTL_CONST_CALL_P (insn) = 1;
930 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
932 insn = get_insns ();
933 end_sequence ();
935 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
936 UNSPEC_TLSLDM_CALL);
937 emit_libcall_block (insn, scratch, r0, eqv);
939 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
940 eqv = gen_rtx_CONST (Pmode, eqv);
942 if (alpha_tls_size == 64)
944 dest = gen_reg_rtx (Pmode);
945 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
946 emit_insn (gen_adddi3 (dest, dest, scratch));
947 return dest;
949 if (alpha_tls_size == 32)
951 insn = gen_rtx_HIGH (Pmode, eqv);
952 insn = gen_rtx_PLUS (Pmode, scratch, insn);
953 scratch = gen_reg_rtx (Pmode);
954 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
956 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
958 case TLS_MODEL_INITIAL_EXEC:
959 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
960 eqv = gen_rtx_CONST (Pmode, eqv);
961 tp = gen_reg_rtx (Pmode);
962 scratch = gen_reg_rtx (Pmode);
963 dest = gen_reg_rtx (Pmode);
965 emit_insn (gen_get_thread_pointerdi (tp));
966 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
967 emit_insn (gen_adddi3 (dest, tp, scratch));
968 return dest;
970 case TLS_MODEL_LOCAL_EXEC:
971 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
972 eqv = gen_rtx_CONST (Pmode, eqv);
973 tp = gen_reg_rtx (Pmode);
975 emit_insn (gen_get_thread_pointerdi (tp));
976 if (alpha_tls_size == 32)
978 insn = gen_rtx_HIGH (Pmode, eqv);
979 insn = gen_rtx_PLUS (Pmode, tp, insn);
980 tp = gen_reg_rtx (Pmode);
981 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
983 return gen_rtx_LO_SUM (Pmode, tp, eqv);
985 default:
986 gcc_unreachable ();
989 if (local_symbolic_operand (x, Pmode))
991 if (small_symbolic_operand (x, Pmode))
992 return x;
993 else
995 if (can_create_pseudo_p ())
996 scratch = gen_reg_rtx (Pmode);
997 emit_insn (gen_rtx_SET (VOIDmode, scratch,
998 gen_rtx_HIGH (Pmode, x)));
999 return gen_rtx_LO_SUM (Pmode, scratch, x);
1004 return NULL;
1006 split_addend:
1008 HOST_WIDE_INT low, high;
1010 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1011 addend -= low;
1012 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1013 addend -= high;
1015 if (addend)
1016 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1017 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1018 1, OPTAB_LIB_WIDEN);
1019 if (high)
1020 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1021 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1022 1, OPTAB_LIB_WIDEN);
1024 return plus_constant (Pmode, x, low);
1029 /* Try machine-dependent ways of modifying an illegitimate address
1030 to be legitimate. Return X or the new, valid address. */
1032 static rtx
1033 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1034 enum machine_mode mode)
1036 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1037 return new_x ? new_x : x;
1040 /* Return true if ADDR has an effect that depends on the machine mode it
1041 is used for. On the Alpha this is true only for the unaligned modes.
1042 We can simplify the test since we know that the address must be valid. */
1044 static bool
1045 alpha_mode_dependent_address_p (const_rtx addr,
1046 addr_space_t as ATTRIBUTE_UNUSED)
1048 return GET_CODE (addr) == AND;
1051 /* Primarily this is required for TLS symbols, but given that our move
1052 patterns *ought* to be able to handle any symbol at any time, we
1053 should never be spilling symbolic operands to the constant pool, ever. */
1055 static bool
1056 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1058 enum rtx_code code = GET_CODE (x);
1059 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1062 /* We do not allow indirect calls to be optimized into sibling calls, nor
1063 can we allow a call to a function with a different GP to be optimized
1064 into a sibcall. */
1066 static bool
1067 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1069 /* Can't do indirect tail calls, since we don't know if the target
1070 uses the same GP. */
1071 if (!decl)
1072 return false;
1074 /* Otherwise, we can make a tail call if the target function shares
1075 the same GP. */
1076 return decl_has_samegp (decl);
1080 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1082 rtx x = *px;
1084 /* Don't re-split. */
1085 if (GET_CODE (x) == LO_SUM)
1086 return -1;
1088 return small_symbolic_operand (x, Pmode) != 0;
1091 static int
1092 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1094 rtx x = *px;
1096 /* Don't re-split. */
1097 if (GET_CODE (x) == LO_SUM)
1098 return -1;
1100 if (small_symbolic_operand (x, Pmode))
1102 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1103 *px = x;
1104 return -1;
1107 return 0;
1111 split_small_symbolic_operand (rtx x)
1113 x = copy_insn (x);
1114 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1115 return x;
1118 /* Indicate that INSN cannot be duplicated. This is true for any insn
1119 that we've marked with gpdisp relocs, since those have to stay in
1120 1-1 correspondence with one another.
1122 Technically we could copy them if we could set up a mapping from one
1123 sequence number to another, across the set of insns to be duplicated.
1124 This seems overly complicated and error-prone since interblock motion
1125 from sched-ebb could move one of the pair of insns to a different block.
1127 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1128 then they'll be in a different block from their ldgp. Which could lead
1129 the bb reorder code to think that it would be ok to copy just the block
1130 containing the call and branch to the block containing the ldgp. */
1132 static bool
1133 alpha_cannot_copy_insn_p (rtx insn)
1135 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1136 return false;
1137 if (recog_memoized (insn) >= 0)
1138 return get_attr_cannot_copy (insn);
1139 else
1140 return false;
1144 /* Try a machine-dependent way of reloading an illegitimate address
1145 operand. If we find one, push the reload and return the new rtx. */
1148 alpha_legitimize_reload_address (rtx x,
1149 enum machine_mode mode ATTRIBUTE_UNUSED,
1150 int opnum, int type,
1151 int ind_levels ATTRIBUTE_UNUSED)
1153 /* We must recognize output that we have already generated ourselves. */
1154 if (GET_CODE (x) == PLUS
1155 && GET_CODE (XEXP (x, 0)) == PLUS
1156 && REG_P (XEXP (XEXP (x, 0), 0))
1157 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1158 && CONST_INT_P (XEXP (x, 1)))
1160 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1161 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1162 opnum, (enum reload_type) type);
1163 return x;
1166 /* We wish to handle large displacements off a base register by
1167 splitting the addend across an ldah and the mem insn. This
1168 cuts number of extra insns needed from 3 to 1. */
1169 if (GET_CODE (x) == PLUS
1170 && REG_P (XEXP (x, 0))
1171 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1172 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1173 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1175 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1176 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1177 HOST_WIDE_INT high
1178 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1180 /* Check for 32-bit overflow. */
1181 if (high + low != val)
1182 return NULL_RTX;
1184 /* Reload the high part into a base reg; leave the low part
1185 in the mem directly. */
1186 x = gen_rtx_PLUS (GET_MODE (x),
1187 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1188 GEN_INT (high)),
1189 GEN_INT (low));
1191 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1192 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1193 opnum, (enum reload_type) type);
1194 return x;
1197 return NULL_RTX;
1200 /* Compute a (partial) cost for rtx X. Return true if the complete
1201 cost has been computed, and false if subexpressions should be
1202 scanned. In either case, *TOTAL contains the cost result. */
1204 static bool
1205 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1206 bool speed)
1208 enum machine_mode mode = GET_MODE (x);
1209 bool float_mode_p = FLOAT_MODE_P (mode);
1210 const struct alpha_rtx_cost_data *cost_data;
1212 if (!speed)
1213 cost_data = &alpha_rtx_cost_size;
1214 else
1215 cost_data = &alpha_rtx_cost_data[alpha_tune];
1217 switch (code)
1219 case CONST_INT:
1220 /* If this is an 8-bit constant, return zero since it can be used
1221 nearly anywhere with no cost. If it is a valid operand for an
1222 ADD or AND, likewise return 0 if we know it will be used in that
1223 context. Otherwise, return 2 since it might be used there later.
1224 All other constants take at least two insns. */
1225 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1227 *total = 0;
1228 return true;
1230 /* FALLTHRU */
1232 case CONST_DOUBLE:
1233 if (x == CONST0_RTX (mode))
1234 *total = 0;
1235 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1236 || (outer_code == AND && and_operand (x, VOIDmode)))
1237 *total = 0;
1238 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1239 *total = 2;
1240 else
1241 *total = COSTS_N_INSNS (2);
1242 return true;
1244 case CONST:
1245 case SYMBOL_REF:
1246 case LABEL_REF:
1247 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1248 *total = COSTS_N_INSNS (outer_code != MEM);
1249 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1250 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1251 else if (tls_symbolic_operand_type (x))
1252 /* Estimate of cost for call_pal rduniq. */
1253 /* ??? How many insns do we emit here? More than one... */
1254 *total = COSTS_N_INSNS (15);
1255 else
1256 /* Otherwise we do a load from the GOT. */
1257 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1258 return true;
1260 case HIGH:
1261 /* This is effectively an add_operand. */
1262 *total = 2;
1263 return true;
1265 case PLUS:
1266 case MINUS:
1267 if (float_mode_p)
1268 *total = cost_data->fp_add;
1269 else if (GET_CODE (XEXP (x, 0)) == MULT
1270 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1272 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1273 (enum rtx_code) outer_code, opno, speed)
1274 + rtx_cost (XEXP (x, 1),
1275 (enum rtx_code) outer_code, opno, speed)
1276 + COSTS_N_INSNS (1));
1277 return true;
1279 return false;
1281 case MULT:
1282 if (float_mode_p)
1283 *total = cost_data->fp_mult;
1284 else if (mode == DImode)
1285 *total = cost_data->int_mult_di;
1286 else
1287 *total = cost_data->int_mult_si;
1288 return false;
1290 case ASHIFT:
1291 if (CONST_INT_P (XEXP (x, 1))
1292 && INTVAL (XEXP (x, 1)) <= 3)
1294 *total = COSTS_N_INSNS (1);
1295 return false;
1297 /* FALLTHRU */
1299 case ASHIFTRT:
1300 case LSHIFTRT:
1301 *total = cost_data->int_shift;
1302 return false;
1304 case IF_THEN_ELSE:
1305 if (float_mode_p)
1306 *total = cost_data->fp_add;
1307 else
1308 *total = cost_data->int_cmov;
1309 return false;
1311 case DIV:
1312 case UDIV:
1313 case MOD:
1314 case UMOD:
1315 if (!float_mode_p)
1316 *total = cost_data->int_div;
1317 else if (mode == SFmode)
1318 *total = cost_data->fp_div_sf;
1319 else
1320 *total = cost_data->fp_div_df;
1321 return false;
1323 case MEM:
1324 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1325 return true;
1327 case NEG:
1328 if (! float_mode_p)
1330 *total = COSTS_N_INSNS (1);
1331 return false;
1333 /* FALLTHRU */
1335 case ABS:
1336 if (! float_mode_p)
1338 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1339 return false;
1341 /* FALLTHRU */
1343 case FLOAT:
1344 case UNSIGNED_FLOAT:
1345 case FIX:
1346 case UNSIGNED_FIX:
1347 case FLOAT_TRUNCATE:
1348 *total = cost_data->fp_add;
1349 return false;
1351 case FLOAT_EXTEND:
1352 if (MEM_P (XEXP (x, 0)))
1353 *total = 0;
1354 else
1355 *total = cost_data->fp_add;
1356 return false;
1358 default:
1359 return false;
1363 /* REF is an alignable memory location. Place an aligned SImode
1364 reference into *PALIGNED_MEM and the number of bits to shift into
1365 *PBITNUM. SCRATCH is a free register for use in reloading out
1366 of range stack slots. */
1368 void
1369 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1371 rtx base;
1372 HOST_WIDE_INT disp, offset;
1374 gcc_assert (MEM_P (ref));
1376 if (reload_in_progress
1377 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1379 base = find_replacement (&XEXP (ref, 0));
1380 gcc_assert (memory_address_p (GET_MODE (ref), base));
1382 else
1383 base = XEXP (ref, 0);
1385 if (GET_CODE (base) == PLUS)
1386 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1387 else
1388 disp = 0;
1390 /* Find the byte offset within an aligned word. If the memory itself is
1391 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1392 will have examined the base register and determined it is aligned, and
1393 thus displacements from it are naturally alignable. */
1394 if (MEM_ALIGN (ref) >= 32)
1395 offset = 0;
1396 else
1397 offset = disp & 3;
1399 /* The location should not cross aligned word boundary. */
1400 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1401 <= GET_MODE_SIZE (SImode));
1403 /* Access the entire aligned word. */
1404 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1406 /* Convert the byte offset within the word to a bit offset. */
1407 offset *= BITS_PER_UNIT;
1408 *pbitnum = GEN_INT (offset);
1411 /* Similar, but just get the address. Handle the two reload cases.
1412 Add EXTRA_OFFSET to the address we return. */
1415 get_unaligned_address (rtx ref)
1417 rtx base;
1418 HOST_WIDE_INT offset = 0;
1420 gcc_assert (MEM_P (ref));
1422 if (reload_in_progress
1423 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1425 base = find_replacement (&XEXP (ref, 0));
1427 gcc_assert (memory_address_p (GET_MODE (ref), base));
1429 else
1430 base = XEXP (ref, 0);
1432 if (GET_CODE (base) == PLUS)
1433 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1435 return plus_constant (Pmode, base, offset);
1438 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1439 X is always returned in a register. */
1442 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1444 if (GET_CODE (addr) == PLUS)
1446 ofs += INTVAL (XEXP (addr, 1));
1447 addr = XEXP (addr, 0);
1450 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1451 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1454 /* On the Alpha, all (non-symbolic) constants except zero go into
1455 a floating-point register via memory. Note that we cannot
1456 return anything that is not a subset of RCLASS, and that some
1457 symbolic constants cannot be dropped to memory. */
1459 enum reg_class
1460 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1462 /* Zero is present in any register class. */
1463 if (x == CONST0_RTX (GET_MODE (x)))
1464 return rclass;
1466 /* These sorts of constants we can easily drop to memory. */
1467 if (CONST_INT_P (x)
1468 || GET_CODE (x) == CONST_DOUBLE
1469 || GET_CODE (x) == CONST_VECTOR)
1471 if (rclass == FLOAT_REGS)
1472 return NO_REGS;
1473 if (rclass == ALL_REGS)
1474 return GENERAL_REGS;
1475 return rclass;
1478 /* All other kinds of constants should not (and in the case of HIGH
1479 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1480 secondary reload. */
1481 if (CONSTANT_P (x))
1482 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1484 return rclass;
1487 /* Inform reload about cases where moving X with a mode MODE to a register in
1488 RCLASS requires an extra scratch or immediate register. Return the class
1489 needed for the immediate register. */
1491 static reg_class_t
1492 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1493 enum machine_mode mode, secondary_reload_info *sri)
1495 enum reg_class rclass = (enum reg_class) rclass_i;
1497 /* Loading and storing HImode or QImode values to and from memory
1498 usually requires a scratch register. */
1499 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1501 if (any_memory_operand (x, mode))
1503 if (in_p)
1505 if (!aligned_memory_operand (x, mode))
1506 sri->icode = direct_optab_handler (reload_in_optab, mode);
1508 else
1509 sri->icode = direct_optab_handler (reload_out_optab, mode);
1510 return NO_REGS;
1514 /* We also cannot do integral arithmetic into FP regs, as might result
1515 from register elimination into a DImode fp register. */
1516 if (rclass == FLOAT_REGS)
1518 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1519 return GENERAL_REGS;
1520 if (in_p && INTEGRAL_MODE_P (mode)
1521 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1522 return GENERAL_REGS;
1525 return NO_REGS;
1528 /* Subfunction of the following function. Update the flags of any MEM
1529 found in part of X. */
1531 static int
1532 alpha_set_memflags_1 (rtx *xp, void *data)
1534 rtx x = *xp, orig = (rtx) data;
1536 if (!MEM_P (x))
1537 return 0;
1539 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1540 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1541 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1543 /* Sadly, we cannot use alias sets because the extra aliasing
1544 produced by the AND interferes. Given that two-byte quantities
1545 are the only thing we would be able to differentiate anyway,
1546 there does not seem to be any point in convoluting the early
1547 out of the alias check. */
1549 return -1;
1552 /* Given SEQ, which is an INSN list, look for any MEMs in either
1553 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1554 volatile flags from REF into each of the MEMs found. If REF is not
1555 a MEM, don't do anything. */
1557 void
1558 alpha_set_memflags (rtx seq, rtx ref)
1560 rtx insn;
1562 if (!MEM_P (ref))
1563 return;
1565 /* This is only called from alpha.md, after having had something
1566 generated from one of the insn patterns. So if everything is
1567 zero, the pattern is already up-to-date. */
1568 if (!MEM_VOLATILE_P (ref)
1569 && !MEM_NOTRAP_P (ref)
1570 && !MEM_READONLY_P (ref))
1571 return;
1573 for (insn = seq; insn; insn = NEXT_INSN (insn))
1574 if (INSN_P (insn))
1575 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1576 else
1577 gcc_unreachable ();
1580 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1581 int, bool);
1583 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1584 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1585 and return pc_rtx if successful. */
1587 static rtx
1588 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1589 HOST_WIDE_INT c, int n, bool no_output)
1591 HOST_WIDE_INT new_const;
1592 int i, bits;
1593 /* Use a pseudo if highly optimizing and still generating RTL. */
1594 rtx subtarget
1595 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1596 rtx temp, insn;
1598 /* If this is a sign-extended 32-bit constant, we can do this in at most
1599 three insns, so do it if we have enough insns left. We always have
1600 a sign-extended 32-bit constant when compiling on a narrow machine. */
1602 if (HOST_BITS_PER_WIDE_INT != 64
1603 || c >> 31 == -1 || c >> 31 == 0)
1605 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1606 HOST_WIDE_INT tmp1 = c - low;
1607 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1608 HOST_WIDE_INT extra = 0;
1610 /* If HIGH will be interpreted as negative but the constant is
1611 positive, we must adjust it to do two ldha insns. */
1613 if ((high & 0x8000) != 0 && c >= 0)
1615 extra = 0x4000;
1616 tmp1 -= 0x40000000;
1617 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1620 if (c == low || (low == 0 && extra == 0))
1622 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1623 but that meant that we can't handle INT_MIN on 32-bit machines
1624 (like NT/Alpha), because we recurse indefinitely through
1625 emit_move_insn to gen_movdi. So instead, since we know exactly
1626 what we want, create it explicitly. */
1628 if (no_output)
1629 return pc_rtx;
1630 if (target == NULL)
1631 target = gen_reg_rtx (mode);
1632 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1633 return target;
1635 else if (n >= 2 + (extra != 0))
1637 if (no_output)
1638 return pc_rtx;
1639 if (!can_create_pseudo_p ())
1641 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1642 temp = target;
1644 else
1645 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1646 subtarget, mode);
1648 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1649 This means that if we go through expand_binop, we'll try to
1650 generate extensions, etc, which will require new pseudos, which
1651 will fail during some split phases. The SImode add patterns
1652 still exist, but are not named. So build the insns by hand. */
1654 if (extra != 0)
1656 if (! subtarget)
1657 subtarget = gen_reg_rtx (mode);
1658 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1659 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1660 emit_insn (insn);
1661 temp = subtarget;
1664 if (target == NULL)
1665 target = gen_reg_rtx (mode);
1666 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1667 insn = gen_rtx_SET (VOIDmode, target, insn);
1668 emit_insn (insn);
1669 return target;
1673 /* If we couldn't do it that way, try some other methods. But if we have
1674 no instructions left, don't bother. Likewise, if this is SImode and
1675 we can't make pseudos, we can't do anything since the expand_binop
1676 and expand_unop calls will widen and try to make pseudos. */
1678 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1679 return 0;
1681 /* Next, see if we can load a related constant and then shift and possibly
1682 negate it to get the constant we want. Try this once each increasing
1683 numbers of insns. */
1685 for (i = 1; i < n; i++)
1687 /* First, see if minus some low bits, we've an easy load of
1688 high bits. */
1690 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1691 if (new_const != 0)
1693 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1694 if (temp)
1696 if (no_output)
1697 return temp;
1698 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1699 target, 0, OPTAB_WIDEN);
1703 /* Next try complementing. */
1704 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1705 if (temp)
1707 if (no_output)
1708 return temp;
1709 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1712 /* Next try to form a constant and do a left shift. We can do this
1713 if some low-order bits are zero; the exact_log2 call below tells
1714 us that information. The bits we are shifting out could be any
1715 value, but here we'll just try the 0- and sign-extended forms of
1716 the constant. To try to increase the chance of having the same
1717 constant in more than one insn, start at the highest number of
1718 bits to shift, but try all possibilities in case a ZAPNOT will
1719 be useful. */
1721 bits = exact_log2 (c & -c);
1722 if (bits > 0)
1723 for (; bits > 0; bits--)
1725 new_const = c >> bits;
1726 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1727 if (!temp && c < 0)
1729 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1730 temp = alpha_emit_set_const (subtarget, mode, new_const,
1731 i, no_output);
1733 if (temp)
1735 if (no_output)
1736 return temp;
1737 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1738 target, 0, OPTAB_WIDEN);
1742 /* Now try high-order zero bits. Here we try the shifted-in bits as
1743 all zero and all ones. Be careful to avoid shifting outside the
1744 mode and to avoid shifting outside the host wide int size. */
1745 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1746 confuse the recursive call and set all of the high 32 bits. */
1748 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1749 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1750 if (bits > 0)
1751 for (; bits > 0; bits--)
1753 new_const = c << bits;
1754 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1755 if (!temp)
1757 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1758 temp = alpha_emit_set_const (subtarget, mode, new_const,
1759 i, no_output);
1761 if (temp)
1763 if (no_output)
1764 return temp;
1765 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1766 target, 1, OPTAB_WIDEN);
1770 /* Now try high-order 1 bits. We get that with a sign-extension.
1771 But one bit isn't enough here. Be careful to avoid shifting outside
1772 the mode and to avoid shifting outside the host wide int size. */
1774 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1775 - floor_log2 (~ c) - 2);
1776 if (bits > 0)
1777 for (; bits > 0; bits--)
1779 new_const = c << bits;
1780 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1781 if (!temp)
1783 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1784 temp = alpha_emit_set_const (subtarget, mode, new_const,
1785 i, no_output);
1787 if (temp)
1789 if (no_output)
1790 return temp;
1791 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1792 target, 0, OPTAB_WIDEN);
1797 #if HOST_BITS_PER_WIDE_INT == 64
1798 /* Finally, see if can load a value into the target that is the same as the
1799 constant except that all bytes that are 0 are changed to be 0xff. If we
1800 can, then we can do a ZAPNOT to obtain the desired constant. */
1802 new_const = c;
1803 for (i = 0; i < 64; i += 8)
1804 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1805 new_const |= (HOST_WIDE_INT) 0xff << i;
1807 /* We are only called for SImode and DImode. If this is SImode, ensure that
1808 we are sign extended to a full word. */
1810 if (mode == SImode)
1811 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1813 if (new_const != c)
1815 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1816 if (temp)
1818 if (no_output)
1819 return temp;
1820 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1821 target, 0, OPTAB_WIDEN);
1824 #endif
1826 return 0;
1829 /* Try to output insns to set TARGET equal to the constant C if it can be
1830 done in less than N insns. Do all computations in MODE. Returns the place
1831 where the output has been placed if it can be done and the insns have been
1832 emitted. If it would take more than N insns, zero is returned and no
1833 insns and emitted. */
1835 static rtx
1836 alpha_emit_set_const (rtx target, enum machine_mode mode,
1837 HOST_WIDE_INT c, int n, bool no_output)
1839 enum machine_mode orig_mode = mode;
1840 rtx orig_target = target;
1841 rtx result = 0;
1842 int i;
1844 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1845 can't load this constant in one insn, do this in DImode. */
1846 if (!can_create_pseudo_p () && mode == SImode
1847 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1849 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1850 if (result)
1851 return result;
1853 target = no_output ? NULL : gen_lowpart (DImode, target);
1854 mode = DImode;
1856 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1858 target = no_output ? NULL : gen_lowpart (DImode, target);
1859 mode = DImode;
1862 /* Try 1 insn, then 2, then up to N. */
1863 for (i = 1; i <= n; i++)
1865 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1866 if (result)
1868 rtx insn, set;
1870 if (no_output)
1871 return result;
1873 insn = get_last_insn ();
1874 set = single_set (insn);
1875 if (! CONSTANT_P (SET_SRC (set)))
1876 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1877 break;
1881 /* Allow for the case where we changed the mode of TARGET. */
1882 if (result)
1884 if (result == target)
1885 result = orig_target;
1886 else if (mode != orig_mode)
1887 result = gen_lowpart (orig_mode, result);
1890 return result;
1893 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1894 fall back to a straight forward decomposition. We do this to avoid
1895 exponential run times encountered when looking for longer sequences
1896 with alpha_emit_set_const. */
1898 static rtx
1899 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1901 HOST_WIDE_INT d1, d2, d3, d4;
1903 /* Decompose the entire word */
1904 #if HOST_BITS_PER_WIDE_INT >= 64
1905 gcc_assert (c2 == -(c1 < 0));
1906 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1907 c1 -= d1;
1908 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1909 c1 = (c1 - d2) >> 32;
1910 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1911 c1 -= d3;
1912 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1913 gcc_assert (c1 == d4);
1914 #else
1915 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1916 c1 -= d1;
1917 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1918 gcc_assert (c1 == d2);
1919 c2 += (d2 < 0);
1920 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1921 c2 -= d3;
1922 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1923 gcc_assert (c2 == d4);
1924 #endif
1926 /* Construct the high word */
1927 if (d4)
1929 emit_move_insn (target, GEN_INT (d4));
1930 if (d3)
1931 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1933 else
1934 emit_move_insn (target, GEN_INT (d3));
1936 /* Shift it into place */
1937 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1939 /* Add in the low bits. */
1940 if (d2)
1941 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1942 if (d1)
1943 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1945 return target;
1948 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1949 the low 64 bits. */
1951 static void
1952 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1954 HOST_WIDE_INT i0, i1;
1956 if (GET_CODE (x) == CONST_VECTOR)
1957 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
1960 if (CONST_INT_P (x))
1962 i0 = INTVAL (x);
1963 i1 = -(i0 < 0);
1965 else if (HOST_BITS_PER_WIDE_INT >= 64)
1967 i0 = CONST_DOUBLE_LOW (x);
1968 i1 = -(i0 < 0);
1970 else
1972 i0 = CONST_DOUBLE_LOW (x);
1973 i1 = CONST_DOUBLE_HIGH (x);
1976 *p0 = i0;
1977 *p1 = i1;
1980 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
1981 we are willing to load the value into a register via a move pattern.
1982 Normally this is all symbolic constants, integral constants that
1983 take three or fewer instructions, and floating-point zero. */
1985 bool
1986 alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
1988 HOST_WIDE_INT i0, i1;
1990 switch (GET_CODE (x))
1992 case LABEL_REF:
1993 case HIGH:
1994 return true;
1996 case CONST:
1997 if (GET_CODE (XEXP (x, 0)) == PLUS
1998 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1999 x = XEXP (XEXP (x, 0), 0);
2000 else
2001 return true;
2003 if (GET_CODE (x) != SYMBOL_REF)
2004 return true;
2006 /* FALLTHRU */
2008 case SYMBOL_REF:
2009 /* TLS symbols are never valid. */
2010 return SYMBOL_REF_TLS_MODEL (x) == 0;
2012 case CONST_DOUBLE:
2013 if (x == CONST0_RTX (mode))
2014 return true;
2015 if (FLOAT_MODE_P (mode))
2016 return false;
2017 goto do_integer;
2019 case CONST_VECTOR:
2020 if (x == CONST0_RTX (mode))
2021 return true;
2022 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2023 return false;
2024 if (GET_MODE_SIZE (mode) != 8)
2025 return false;
2026 goto do_integer;
2028 case CONST_INT:
2029 do_integer:
2030 if (TARGET_BUILD_CONSTANTS)
2031 return true;
2032 alpha_extract_integer (x, &i0, &i1);
2033 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2034 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2035 return false;
2037 default:
2038 return false;
2042 /* Operand 1 is known to be a constant, and should require more than one
2043 instruction to load. Emit that multi-part load. */
2045 bool
2046 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2048 HOST_WIDE_INT i0, i1;
2049 rtx temp = NULL_RTX;
2051 alpha_extract_integer (operands[1], &i0, &i1);
2053 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2054 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2056 if (!temp && TARGET_BUILD_CONSTANTS)
2057 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2059 if (temp)
2061 if (!rtx_equal_p (operands[0], temp))
2062 emit_move_insn (operands[0], temp);
2063 return true;
2066 return false;
2069 /* Expand a move instruction; return true if all work is done.
2070 We don't handle non-bwx subword loads here. */
2072 bool
2073 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2075 rtx tmp;
2077 /* If the output is not a register, the input must be. */
2078 if (MEM_P (operands[0])
2079 && ! reg_or_0_operand (operands[1], mode))
2080 operands[1] = force_reg (mode, operands[1]);
2082 /* Allow legitimize_address to perform some simplifications. */
2083 if (mode == Pmode && symbolic_operand (operands[1], mode))
2085 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2086 if (tmp)
2088 if (tmp == operands[0])
2089 return true;
2090 operands[1] = tmp;
2091 return false;
2095 /* Early out for non-constants and valid constants. */
2096 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2097 return false;
2099 /* Split large integers. */
2100 if (CONST_INT_P (operands[1])
2101 || GET_CODE (operands[1]) == CONST_DOUBLE
2102 || GET_CODE (operands[1]) == CONST_VECTOR)
2104 if (alpha_split_const_mov (mode, operands))
2105 return true;
2108 /* Otherwise we've nothing left but to drop the thing to memory. */
2109 tmp = force_const_mem (mode, operands[1]);
2111 if (tmp == NULL_RTX)
2112 return false;
2114 if (reload_in_progress)
2116 emit_move_insn (operands[0], XEXP (tmp, 0));
2117 operands[1] = replace_equiv_address (tmp, operands[0]);
2119 else
2120 operands[1] = validize_mem (tmp);
2121 return false;
2124 /* Expand a non-bwx QImode or HImode move instruction;
2125 return true if all work is done. */
2127 bool
2128 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2130 rtx seq;
2132 /* If the output is not a register, the input must be. */
2133 if (MEM_P (operands[0]))
2134 operands[1] = force_reg (mode, operands[1]);
2136 /* Handle four memory cases, unaligned and aligned for either the input
2137 or the output. The only case where we can be called during reload is
2138 for aligned loads; all other cases require temporaries. */
2140 if (any_memory_operand (operands[1], mode))
2142 if (aligned_memory_operand (operands[1], mode))
2144 if (reload_in_progress)
2146 if (mode == QImode)
2147 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2148 else
2149 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2150 emit_insn (seq);
2152 else
2154 rtx aligned_mem, bitnum;
2155 rtx scratch = gen_reg_rtx (SImode);
2156 rtx subtarget;
2157 bool copyout;
2159 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2161 subtarget = operands[0];
2162 if (REG_P (subtarget))
2163 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2164 else
2165 subtarget = gen_reg_rtx (DImode), copyout = true;
2167 if (mode == QImode)
2168 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2169 bitnum, scratch);
2170 else
2171 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2172 bitnum, scratch);
2173 emit_insn (seq);
2175 if (copyout)
2176 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2179 else
2181 /* Don't pass these as parameters since that makes the generated
2182 code depend on parameter evaluation order which will cause
2183 bootstrap failures. */
2185 rtx temp1, temp2, subtarget, ua;
2186 bool copyout;
2188 temp1 = gen_reg_rtx (DImode);
2189 temp2 = gen_reg_rtx (DImode);
2191 subtarget = operands[0];
2192 if (REG_P (subtarget))
2193 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2194 else
2195 subtarget = gen_reg_rtx (DImode), copyout = true;
2197 ua = get_unaligned_address (operands[1]);
2198 if (mode == QImode)
2199 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2200 else
2201 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2203 alpha_set_memflags (seq, operands[1]);
2204 emit_insn (seq);
2206 if (copyout)
2207 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2209 return true;
2212 if (any_memory_operand (operands[0], mode))
2214 if (aligned_memory_operand (operands[0], mode))
2216 rtx aligned_mem, bitnum;
2217 rtx temp1 = gen_reg_rtx (SImode);
2218 rtx temp2 = gen_reg_rtx (SImode);
2220 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2222 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2223 temp1, temp2));
2225 else
2227 rtx temp1 = gen_reg_rtx (DImode);
2228 rtx temp2 = gen_reg_rtx (DImode);
2229 rtx temp3 = gen_reg_rtx (DImode);
2230 rtx ua = get_unaligned_address (operands[0]);
2232 if (mode == QImode)
2233 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2234 else
2235 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2237 alpha_set_memflags (seq, operands[0]);
2238 emit_insn (seq);
2240 return true;
2243 return false;
2246 /* Implement the movmisalign patterns. One of the operands is a memory
2247 that is not naturally aligned. Emit instructions to load it. */
2249 void
2250 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2252 /* Honor misaligned loads, for those we promised to do so. */
2253 if (MEM_P (operands[1]))
2255 rtx tmp;
2257 if (register_operand (operands[0], mode))
2258 tmp = operands[0];
2259 else
2260 tmp = gen_reg_rtx (mode);
2262 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2263 if (tmp != operands[0])
2264 emit_move_insn (operands[0], tmp);
2266 else if (MEM_P (operands[0]))
2268 if (!reg_or_0_operand (operands[1], mode))
2269 operands[1] = force_reg (mode, operands[1]);
2270 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2272 else
2273 gcc_unreachable ();
2276 /* Generate an unsigned DImode to FP conversion. This is the same code
2277 optabs would emit if we didn't have TFmode patterns.
2279 For SFmode, this is the only construction I've found that can pass
2280 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2281 intermediates will work, because you'll get intermediate rounding
2282 that ruins the end result. Some of this could be fixed by turning
2283 on round-to-positive-infinity, but that requires diddling the fpsr,
2284 which kills performance. I tried turning this around and converting
2285 to a negative number, so that I could turn on /m, but either I did
2286 it wrong or there's something else cause I wound up with the exact
2287 same single-bit error. There is a branch-less form of this same code:
2289 srl $16,1,$1
2290 and $16,1,$2
2291 cmplt $16,0,$3
2292 or $1,$2,$2
2293 cmovge $16,$16,$2
2294 itoft $3,$f10
2295 itoft $2,$f11
2296 cvtqs $f11,$f11
2297 adds $f11,$f11,$f0
2298 fcmoveq $f10,$f11,$f0
2300 I'm not using it because it's the same number of instructions as
2301 this branch-full form, and it has more serialized long latency
2302 instructions on the critical path.
2304 For DFmode, we can avoid rounding errors by breaking up the word
2305 into two pieces, converting them separately, and adding them back:
2307 LC0: .long 0,0x5f800000
2309 itoft $16,$f11
2310 lda $2,LC0
2311 cmplt $16,0,$1
2312 cpyse $f11,$f31,$f10
2313 cpyse $f31,$f11,$f11
2314 s4addq $1,$2,$1
2315 lds $f12,0($1)
2316 cvtqt $f10,$f10
2317 cvtqt $f11,$f11
2318 addt $f12,$f10,$f0
2319 addt $f0,$f11,$f0
2321 This doesn't seem to be a clear-cut win over the optabs form.
2322 It probably all depends on the distribution of numbers being
2323 converted -- in the optabs form, all but high-bit-set has a
2324 much lower minimum execution time. */
2326 void
2327 alpha_emit_floatuns (rtx operands[2])
2329 rtx neglab, donelab, i0, i1, f0, in, out;
2330 enum machine_mode mode;
2332 out = operands[0];
2333 in = force_reg (DImode, operands[1]);
2334 mode = GET_MODE (out);
2335 neglab = gen_label_rtx ();
2336 donelab = gen_label_rtx ();
2337 i0 = gen_reg_rtx (DImode);
2338 i1 = gen_reg_rtx (DImode);
2339 f0 = gen_reg_rtx (mode);
2341 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2343 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2344 emit_jump_insn (gen_jump (donelab));
2345 emit_barrier ();
2347 emit_label (neglab);
2349 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2350 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2351 emit_insn (gen_iordi3 (i0, i0, i1));
2352 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2353 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2355 emit_label (donelab);
2358 /* Generate the comparison for a conditional branch. */
2360 void
2361 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2363 enum rtx_code cmp_code, branch_code;
2364 enum machine_mode branch_mode = VOIDmode;
2365 enum rtx_code code = GET_CODE (operands[0]);
2366 rtx op0 = operands[1], op1 = operands[2];
2367 rtx tem;
2369 if (cmp_mode == TFmode)
2371 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2372 op1 = const0_rtx;
2373 cmp_mode = DImode;
2376 /* The general case: fold the comparison code to the types of compares
2377 that we have, choosing the branch as necessary. */
2378 switch (code)
2380 case EQ: case LE: case LT: case LEU: case LTU:
2381 case UNORDERED:
2382 /* We have these compares. */
2383 cmp_code = code, branch_code = NE;
2384 break;
2386 case NE:
2387 case ORDERED:
2388 /* These must be reversed. */
2389 cmp_code = reverse_condition (code), branch_code = EQ;
2390 break;
2392 case GE: case GT: case GEU: case GTU:
2393 /* For FP, we swap them, for INT, we reverse them. */
2394 if (cmp_mode == DFmode)
2396 cmp_code = swap_condition (code);
2397 branch_code = NE;
2398 tem = op0, op0 = op1, op1 = tem;
2400 else
2402 cmp_code = reverse_condition (code);
2403 branch_code = EQ;
2405 break;
2407 default:
2408 gcc_unreachable ();
2411 if (cmp_mode == DFmode)
2413 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2415 /* When we are not as concerned about non-finite values, and we
2416 are comparing against zero, we can branch directly. */
2417 if (op1 == CONST0_RTX (DFmode))
2418 cmp_code = UNKNOWN, branch_code = code;
2419 else if (op0 == CONST0_RTX (DFmode))
2421 /* Undo the swap we probably did just above. */
2422 tem = op0, op0 = op1, op1 = tem;
2423 branch_code = swap_condition (cmp_code);
2424 cmp_code = UNKNOWN;
2427 else
2429 /* ??? We mark the branch mode to be CCmode to prevent the
2430 compare and branch from being combined, since the compare
2431 insn follows IEEE rules that the branch does not. */
2432 branch_mode = CCmode;
2435 else
2437 /* The following optimizations are only for signed compares. */
2438 if (code != LEU && code != LTU && code != GEU && code != GTU)
2440 /* Whee. Compare and branch against 0 directly. */
2441 if (op1 == const0_rtx)
2442 cmp_code = UNKNOWN, branch_code = code;
2444 /* If the constants doesn't fit into an immediate, but can
2445 be generated by lda/ldah, we adjust the argument and
2446 compare against zero, so we can use beq/bne directly. */
2447 /* ??? Don't do this when comparing against symbols, otherwise
2448 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2449 be declared false out of hand (at least for non-weak). */
2450 else if (CONST_INT_P (op1)
2451 && (code == EQ || code == NE)
2452 && !(symbolic_operand (op0, VOIDmode)
2453 || (REG_P (op0) && REG_POINTER (op0))))
2455 rtx n_op1 = GEN_INT (-INTVAL (op1));
2457 if (! satisfies_constraint_I (op1)
2458 && (satisfies_constraint_K (n_op1)
2459 || satisfies_constraint_L (n_op1)))
2460 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2464 if (!reg_or_0_operand (op0, DImode))
2465 op0 = force_reg (DImode, op0);
2466 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2467 op1 = force_reg (DImode, op1);
2470 /* Emit an initial compare instruction, if necessary. */
2471 tem = op0;
2472 if (cmp_code != UNKNOWN)
2474 tem = gen_reg_rtx (cmp_mode);
2475 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2478 /* Emit the branch instruction. */
2479 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2480 gen_rtx_IF_THEN_ELSE (VOIDmode,
2481 gen_rtx_fmt_ee (branch_code,
2482 branch_mode, tem,
2483 CONST0_RTX (cmp_mode)),
2484 gen_rtx_LABEL_REF (VOIDmode,
2485 operands[3]),
2486 pc_rtx));
2487 emit_jump_insn (tem);
2490 /* Certain simplifications can be done to make invalid setcc operations
2491 valid. Return the final comparison, or NULL if we can't work. */
2493 bool
2494 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2496 enum rtx_code cmp_code;
2497 enum rtx_code code = GET_CODE (operands[1]);
2498 rtx op0 = operands[2], op1 = operands[3];
2499 rtx tmp;
2501 if (cmp_mode == TFmode)
2503 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2504 op1 = const0_rtx;
2505 cmp_mode = DImode;
2508 if (cmp_mode == DFmode && !TARGET_FIX)
2509 return 0;
2511 /* The general case: fold the comparison code to the types of compares
2512 that we have, choosing the branch as necessary. */
2514 cmp_code = UNKNOWN;
2515 switch (code)
2517 case EQ: case LE: case LT: case LEU: case LTU:
2518 case UNORDERED:
2519 /* We have these compares. */
2520 if (cmp_mode == DFmode)
2521 cmp_code = code, code = NE;
2522 break;
2524 case NE:
2525 if (cmp_mode == DImode && op1 == const0_rtx)
2526 break;
2527 /* FALLTHRU */
2529 case ORDERED:
2530 cmp_code = reverse_condition (code);
2531 code = EQ;
2532 break;
2534 case GE: case GT: case GEU: case GTU:
2535 /* These normally need swapping, but for integer zero we have
2536 special patterns that recognize swapped operands. */
2537 if (cmp_mode == DImode && op1 == const0_rtx)
2538 break;
2539 code = swap_condition (code);
2540 if (cmp_mode == DFmode)
2541 cmp_code = code, code = NE;
2542 tmp = op0, op0 = op1, op1 = tmp;
2543 break;
2545 default:
2546 gcc_unreachable ();
2549 if (cmp_mode == DImode)
2551 if (!register_operand (op0, DImode))
2552 op0 = force_reg (DImode, op0);
2553 if (!reg_or_8bit_operand (op1, DImode))
2554 op1 = force_reg (DImode, op1);
2557 /* Emit an initial compare instruction, if necessary. */
2558 if (cmp_code != UNKNOWN)
2560 tmp = gen_reg_rtx (cmp_mode);
2561 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2562 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2564 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2565 op1 = const0_rtx;
2568 /* Emit the setcc instruction. */
2569 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2570 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2571 return true;
2575 /* Rewrite a comparison against zero CMP of the form
2576 (CODE (cc0) (const_int 0)) so it can be written validly in
2577 a conditional move (if_then_else CMP ...).
2578 If both of the operands that set cc0 are nonzero we must emit
2579 an insn to perform the compare (it can't be done within
2580 the conditional move). */
2583 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2585 enum rtx_code code = GET_CODE (cmp);
2586 enum rtx_code cmov_code = NE;
2587 rtx op0 = XEXP (cmp, 0);
2588 rtx op1 = XEXP (cmp, 1);
2589 enum machine_mode cmp_mode
2590 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2591 enum machine_mode cmov_mode = VOIDmode;
2592 int local_fast_math = flag_unsafe_math_optimizations;
2593 rtx tem;
2595 if (cmp_mode == TFmode)
2597 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2598 op1 = const0_rtx;
2599 cmp_mode = DImode;
2602 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2604 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2606 enum rtx_code cmp_code;
2608 if (! TARGET_FIX)
2609 return 0;
2611 /* If we have fp<->int register move instructions, do a cmov by
2612 performing the comparison in fp registers, and move the
2613 zero/nonzero value to integer registers, where we can then
2614 use a normal cmov, or vice-versa. */
2616 switch (code)
2618 case EQ: case LE: case LT: case LEU: case LTU:
2619 case UNORDERED:
2620 /* We have these compares. */
2621 cmp_code = code, code = NE;
2622 break;
2624 case NE:
2625 case ORDERED:
2626 /* These must be reversed. */
2627 cmp_code = reverse_condition (code), code = EQ;
2628 break;
2630 case GE: case GT: case GEU: case GTU:
2631 /* These normally need swapping, but for integer zero we have
2632 special patterns that recognize swapped operands. */
2633 if (cmp_mode == DImode && op1 == const0_rtx)
2634 cmp_code = code, code = NE;
2635 else
2637 cmp_code = swap_condition (code);
2638 code = NE;
2639 tem = op0, op0 = op1, op1 = tem;
2641 break;
2643 default:
2644 gcc_unreachable ();
2647 if (cmp_mode == DImode)
2649 if (!reg_or_0_operand (op0, DImode))
2650 op0 = force_reg (DImode, op0);
2651 if (!reg_or_8bit_operand (op1, DImode))
2652 op1 = force_reg (DImode, op1);
2655 tem = gen_reg_rtx (cmp_mode);
2656 emit_insn (gen_rtx_SET (VOIDmode, tem,
2657 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2658 op0, op1)));
2660 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2661 op0 = gen_lowpart (cmp_mode, tem);
2662 op1 = CONST0_RTX (cmp_mode);
2663 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2664 local_fast_math = 1;
2667 if (cmp_mode == DImode)
2669 if (!reg_or_0_operand (op0, DImode))
2670 op0 = force_reg (DImode, op0);
2671 if (!reg_or_8bit_operand (op1, DImode))
2672 op1 = force_reg (DImode, op1);
2675 /* We may be able to use a conditional move directly.
2676 This avoids emitting spurious compares. */
2677 if (signed_comparison_operator (cmp, VOIDmode)
2678 && (cmp_mode == DImode || local_fast_math)
2679 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2680 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2682 /* We can't put the comparison inside the conditional move;
2683 emit a compare instruction and put that inside the
2684 conditional move. Make sure we emit only comparisons we have;
2685 swap or reverse as necessary. */
2687 if (!can_create_pseudo_p ())
2688 return NULL_RTX;
2690 switch (code)
2692 case EQ: case LE: case LT: case LEU: case LTU:
2693 case UNORDERED:
2694 /* We have these compares: */
2695 break;
2697 case NE:
2698 case ORDERED:
2699 /* These must be reversed. */
2700 code = reverse_condition (code);
2701 cmov_code = EQ;
2702 break;
2704 case GE: case GT: case GEU: case GTU:
2705 /* These normally need swapping, but for integer zero we have
2706 special patterns that recognize swapped operands. */
2707 if (cmp_mode == DImode && op1 == const0_rtx)
2708 break;
2709 code = swap_condition (code);
2710 tem = op0, op0 = op1, op1 = tem;
2711 break;
2713 default:
2714 gcc_unreachable ();
2717 if (cmp_mode == DImode)
2719 if (!reg_or_0_operand (op0, DImode))
2720 op0 = force_reg (DImode, op0);
2721 if (!reg_or_8bit_operand (op1, DImode))
2722 op1 = force_reg (DImode, op1);
2725 /* ??? We mark the branch mode to be CCmode to prevent the compare
2726 and cmov from being combined, since the compare insn follows IEEE
2727 rules that the cmov does not. */
2728 if (cmp_mode == DFmode && !local_fast_math)
2729 cmov_mode = CCmode;
2731 tem = gen_reg_rtx (cmp_mode);
2732 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2733 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2736 /* Simplify a conditional move of two constants into a setcc with
2737 arithmetic. This is done with a splitter since combine would
2738 just undo the work if done during code generation. It also catches
2739 cases we wouldn't have before cse. */
2742 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2743 rtx t_rtx, rtx f_rtx)
2745 HOST_WIDE_INT t, f, diff;
2746 enum machine_mode mode;
2747 rtx target, subtarget, tmp;
2749 mode = GET_MODE (dest);
2750 t = INTVAL (t_rtx);
2751 f = INTVAL (f_rtx);
2752 diff = t - f;
2754 if (((code == NE || code == EQ) && diff < 0)
2755 || (code == GE || code == GT))
2757 code = reverse_condition (code);
2758 diff = t, t = f, f = diff;
2759 diff = t - f;
2762 subtarget = target = dest;
2763 if (mode != DImode)
2765 target = gen_lowpart (DImode, dest);
2766 if (can_create_pseudo_p ())
2767 subtarget = gen_reg_rtx (DImode);
2768 else
2769 subtarget = target;
2771 /* Below, we must be careful to use copy_rtx on target and subtarget
2772 in intermediate insns, as they may be a subreg rtx, which may not
2773 be shared. */
2775 if (f == 0 && exact_log2 (diff) > 0
2776 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2777 viable over a longer latency cmove. On EV5, the E0 slot is a
2778 scarce resource, and on EV4 shift has the same latency as a cmove. */
2779 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2781 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2782 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2784 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2785 GEN_INT (exact_log2 (t)));
2786 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2788 else if (f == 0 && t == -1)
2790 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2791 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2793 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2795 else if (diff == 1 || diff == 4 || diff == 8)
2797 rtx add_op;
2799 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2800 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2802 if (diff == 1)
2803 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2804 else
2806 add_op = GEN_INT (f);
2807 if (sext_add_operand (add_op, mode))
2809 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2810 GEN_INT (diff));
2811 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2812 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2814 else
2815 return 0;
2818 else
2819 return 0;
2821 return 1;
2824 /* Look up the function X_floating library function name for the
2825 given operation. */
2827 struct GTY(()) xfloating_op
2829 const enum rtx_code code;
2830 const char *const GTY((skip)) osf_func;
2831 const char *const GTY((skip)) vms_func;
2832 rtx libcall;
2835 static GTY(()) struct xfloating_op xfloating_ops[] =
2837 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2838 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2839 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2840 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2841 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2842 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2843 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2844 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2845 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2846 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2847 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2848 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2849 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2850 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2851 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2854 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2856 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2857 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2860 static rtx
2861 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2863 struct xfloating_op *ops = xfloating_ops;
2864 long n = ARRAY_SIZE (xfloating_ops);
2865 long i;
2867 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2869 /* How irritating. Nothing to key off for the main table. */
2870 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2872 ops = vax_cvt_ops;
2873 n = ARRAY_SIZE (vax_cvt_ops);
2876 for (i = 0; i < n; ++i, ++ops)
2877 if (ops->code == code)
2879 rtx func = ops->libcall;
2880 if (!func)
2882 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2883 ? ops->vms_func : ops->osf_func);
2884 ops->libcall = func;
2886 return func;
2889 gcc_unreachable ();
2892 /* Most X_floating operations take the rounding mode as an argument.
2893 Compute that here. */
2895 static int
2896 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2897 enum alpha_fp_rounding_mode round)
2899 int mode;
2901 switch (round)
2903 case ALPHA_FPRM_NORM:
2904 mode = 2;
2905 break;
2906 case ALPHA_FPRM_MINF:
2907 mode = 1;
2908 break;
2909 case ALPHA_FPRM_CHOP:
2910 mode = 0;
2911 break;
2912 case ALPHA_FPRM_DYN:
2913 mode = 4;
2914 break;
2915 default:
2916 gcc_unreachable ();
2918 /* XXX For reference, round to +inf is mode = 3. */
2921 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2922 mode |= 0x10000;
2924 return mode;
2927 /* Emit an X_floating library function call.
2929 Note that these functions do not follow normal calling conventions:
2930 TFmode arguments are passed in two integer registers (as opposed to
2931 indirect); TFmode return values appear in R16+R17.
2933 FUNC is the function to call.
2934 TARGET is where the output belongs.
2935 OPERANDS are the inputs.
2936 NOPERANDS is the count of inputs.
2937 EQUIV is the expression equivalent for the function.
2940 static void
2941 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2942 int noperands, rtx equiv)
2944 rtx usage = NULL_RTX, tmp, reg;
2945 int regno = 16, i;
2947 start_sequence ();
2949 for (i = 0; i < noperands; ++i)
2951 switch (GET_MODE (operands[i]))
2953 case TFmode:
2954 reg = gen_rtx_REG (TFmode, regno);
2955 regno += 2;
2956 break;
2958 case DFmode:
2959 reg = gen_rtx_REG (DFmode, regno + 32);
2960 regno += 1;
2961 break;
2963 case VOIDmode:
2964 gcc_assert (CONST_INT_P (operands[i]));
2965 /* FALLTHRU */
2966 case DImode:
2967 reg = gen_rtx_REG (DImode, regno);
2968 regno += 1;
2969 break;
2971 default:
2972 gcc_unreachable ();
2975 emit_move_insn (reg, operands[i]);
2976 use_reg (&usage, reg);
2979 switch (GET_MODE (target))
2981 case TFmode:
2982 reg = gen_rtx_REG (TFmode, 16);
2983 break;
2984 case DFmode:
2985 reg = gen_rtx_REG (DFmode, 32);
2986 break;
2987 case DImode:
2988 reg = gen_rtx_REG (DImode, 0);
2989 break;
2990 default:
2991 gcc_unreachable ();
2994 tmp = gen_rtx_MEM (QImode, func);
2995 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2996 const0_rtx, const0_rtx));
2997 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2998 RTL_CONST_CALL_P (tmp) = 1;
3000 tmp = get_insns ();
3001 end_sequence ();
3003 emit_libcall_block (tmp, target, reg, equiv);
3006 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3008 void
3009 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3011 rtx func;
3012 int mode;
3013 rtx out_operands[3];
3015 func = alpha_lookup_xfloating_lib_func (code);
3016 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3018 out_operands[0] = operands[1];
3019 out_operands[1] = operands[2];
3020 out_operands[2] = GEN_INT (mode);
3021 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3022 gen_rtx_fmt_ee (code, TFmode, operands[1],
3023 operands[2]));
3026 /* Emit an X_floating library function call for a comparison. */
3028 static rtx
3029 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3031 enum rtx_code cmp_code, res_code;
3032 rtx func, out, operands[2], note;
3034 /* X_floating library comparison functions return
3035 -1 unordered
3036 0 false
3037 1 true
3038 Convert the compare against the raw return value. */
3040 cmp_code = *pcode;
3041 switch (cmp_code)
3043 case UNORDERED:
3044 cmp_code = EQ;
3045 res_code = LT;
3046 break;
3047 case ORDERED:
3048 cmp_code = EQ;
3049 res_code = GE;
3050 break;
3051 case NE:
3052 res_code = NE;
3053 break;
3054 case EQ:
3055 case LT:
3056 case GT:
3057 case LE:
3058 case GE:
3059 res_code = GT;
3060 break;
3061 default:
3062 gcc_unreachable ();
3064 *pcode = res_code;
3066 func = alpha_lookup_xfloating_lib_func (cmp_code);
3068 operands[0] = op0;
3069 operands[1] = op1;
3070 out = gen_reg_rtx (DImode);
3072 /* What's actually returned is -1,0,1, not a proper boolean value. */
3073 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3074 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
3075 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3077 return out;
3080 /* Emit an X_floating library function call for a conversion. */
3082 void
3083 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3085 int noperands = 1, mode;
3086 rtx out_operands[2];
3087 rtx func;
3088 enum rtx_code code = orig_code;
3090 if (code == UNSIGNED_FIX)
3091 code = FIX;
3093 func = alpha_lookup_xfloating_lib_func (code);
3095 out_operands[0] = operands[1];
3097 switch (code)
3099 case FIX:
3100 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3101 out_operands[1] = GEN_INT (mode);
3102 noperands = 2;
3103 break;
3104 case FLOAT_TRUNCATE:
3105 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3106 out_operands[1] = GEN_INT (mode);
3107 noperands = 2;
3108 break;
3109 default:
3110 break;
3113 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3114 gen_rtx_fmt_e (orig_code,
3115 GET_MODE (operands[0]),
3116 operands[1]));
3119 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3120 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3121 guarantee that the sequence
3122 set (OP[0] OP[2])
3123 set (OP[1] OP[3])
3124 is valid. Naturally, output operand ordering is little-endian.
3125 This is used by *movtf_internal and *movti_internal. */
3127 void
3128 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3129 bool fixup_overlap)
3131 switch (GET_CODE (operands[1]))
3133 case REG:
3134 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3135 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3136 break;
3138 case MEM:
3139 operands[3] = adjust_address (operands[1], DImode, 8);
3140 operands[2] = adjust_address (operands[1], DImode, 0);
3141 break;
3143 case CONST_INT:
3144 case CONST_DOUBLE:
3145 gcc_assert (operands[1] == CONST0_RTX (mode));
3146 operands[2] = operands[3] = const0_rtx;
3147 break;
3149 default:
3150 gcc_unreachable ();
3153 switch (GET_CODE (operands[0]))
3155 case REG:
3156 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3157 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3158 break;
3160 case MEM:
3161 operands[1] = adjust_address (operands[0], DImode, 8);
3162 operands[0] = adjust_address (operands[0], DImode, 0);
3163 break;
3165 default:
3166 gcc_unreachable ();
3169 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3171 rtx tmp;
3172 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3173 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3177 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3178 op2 is a register containing the sign bit, operation is the
3179 logical operation to be performed. */
3181 void
3182 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3184 rtx high_bit = operands[2];
3185 rtx scratch;
3186 int move;
3188 alpha_split_tmode_pair (operands, TFmode, false);
3190 /* Detect three flavors of operand overlap. */
3191 move = 1;
3192 if (rtx_equal_p (operands[0], operands[2]))
3193 move = 0;
3194 else if (rtx_equal_p (operands[1], operands[2]))
3196 if (rtx_equal_p (operands[0], high_bit))
3197 move = 2;
3198 else
3199 move = -1;
3202 if (move < 0)
3203 emit_move_insn (operands[0], operands[2]);
3205 /* ??? If the destination overlaps both source tf and high_bit, then
3206 assume source tf is dead in its entirety and use the other half
3207 for a scratch register. Otherwise "scratch" is just the proper
3208 destination register. */
3209 scratch = operands[move < 2 ? 1 : 3];
3211 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3213 if (move > 0)
3215 emit_move_insn (operands[0], operands[2]);
3216 if (move > 1)
3217 emit_move_insn (operands[1], scratch);
3221 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3222 unaligned data:
3224 unsigned: signed:
3225 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3226 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3227 lda r3,X(r11) lda r3,X+2(r11)
3228 extwl r1,r3,r1 extql r1,r3,r1
3229 extwh r2,r3,r2 extqh r2,r3,r2
3230 or r1.r2.r1 or r1,r2,r1
3231 sra r1,48,r1
3233 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3234 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3235 lda r3,X(r11) lda r3,X(r11)
3236 extll r1,r3,r1 extll r1,r3,r1
3237 extlh r2,r3,r2 extlh r2,r3,r2
3238 or r1.r2.r1 addl r1,r2,r1
3240 quad: ldq_u r1,X(r11)
3241 ldq_u r2,X+7(r11)
3242 lda r3,X(r11)
3243 extql r1,r3,r1
3244 extqh r2,r3,r2
3245 or r1.r2.r1
3248 void
3249 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3250 HOST_WIDE_INT ofs, int sign)
3252 rtx meml, memh, addr, extl, exth, tmp, mema;
3253 enum machine_mode mode;
3255 if (TARGET_BWX && size == 2)
3257 meml = adjust_address (mem, QImode, ofs);
3258 memh = adjust_address (mem, QImode, ofs+1);
3259 extl = gen_reg_rtx (DImode);
3260 exth = gen_reg_rtx (DImode);
3261 emit_insn (gen_zero_extendqidi2 (extl, meml));
3262 emit_insn (gen_zero_extendqidi2 (exth, memh));
3263 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3264 NULL, 1, OPTAB_LIB_WIDEN);
3265 addr = expand_simple_binop (DImode, IOR, extl, exth,
3266 NULL, 1, OPTAB_LIB_WIDEN);
3268 if (sign && GET_MODE (tgt) != HImode)
3270 addr = gen_lowpart (HImode, addr);
3271 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3273 else
3275 if (GET_MODE (tgt) != DImode)
3276 addr = gen_lowpart (GET_MODE (tgt), addr);
3277 emit_move_insn (tgt, addr);
3279 return;
3282 meml = gen_reg_rtx (DImode);
3283 memh = gen_reg_rtx (DImode);
3284 addr = gen_reg_rtx (DImode);
3285 extl = gen_reg_rtx (DImode);
3286 exth = gen_reg_rtx (DImode);
3288 mema = XEXP (mem, 0);
3289 if (GET_CODE (mema) == LO_SUM)
3290 mema = force_reg (Pmode, mema);
3292 /* AND addresses cannot be in any alias set, since they may implicitly
3293 alias surrounding code. Ideally we'd have some alias set that
3294 covered all types except those with alignment 8 or higher. */
3296 tmp = change_address (mem, DImode,
3297 gen_rtx_AND (DImode,
3298 plus_constant (DImode, mema, ofs),
3299 GEN_INT (-8)));
3300 set_mem_alias_set (tmp, 0);
3301 emit_move_insn (meml, tmp);
3303 tmp = change_address (mem, DImode,
3304 gen_rtx_AND (DImode,
3305 plus_constant (DImode, mema,
3306 ofs + size - 1),
3307 GEN_INT (-8)));
3308 set_mem_alias_set (tmp, 0);
3309 emit_move_insn (memh, tmp);
3311 if (sign && size == 2)
3313 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3315 emit_insn (gen_extql (extl, meml, addr));
3316 emit_insn (gen_extqh (exth, memh, addr));
3318 /* We must use tgt here for the target. Alpha-vms port fails if we use
3319 addr for the target, because addr is marked as a pointer and combine
3320 knows that pointers are always sign-extended 32-bit values. */
3321 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3322 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3323 addr, 1, OPTAB_WIDEN);
3325 else
3327 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3328 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3329 switch ((int) size)
3331 case 2:
3332 emit_insn (gen_extwh (exth, memh, addr));
3333 mode = HImode;
3334 break;
3335 case 4:
3336 emit_insn (gen_extlh (exth, memh, addr));
3337 mode = SImode;
3338 break;
3339 case 8:
3340 emit_insn (gen_extqh (exth, memh, addr));
3341 mode = DImode;
3342 break;
3343 default:
3344 gcc_unreachable ();
3347 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3348 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3349 sign, OPTAB_WIDEN);
3352 if (addr != tgt)
3353 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3356 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3358 void
3359 alpha_expand_unaligned_store (rtx dst, rtx src,
3360 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3362 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3364 if (TARGET_BWX && size == 2)
3366 if (src != const0_rtx)
3368 dstl = gen_lowpart (QImode, src);
3369 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3370 NULL, 1, OPTAB_LIB_WIDEN);
3371 dsth = gen_lowpart (QImode, dsth);
3373 else
3374 dstl = dsth = const0_rtx;
3376 meml = adjust_address (dst, QImode, ofs);
3377 memh = adjust_address (dst, QImode, ofs+1);
3379 emit_move_insn (meml, dstl);
3380 emit_move_insn (memh, dsth);
3381 return;
3384 dstl = gen_reg_rtx (DImode);
3385 dsth = gen_reg_rtx (DImode);
3386 insl = gen_reg_rtx (DImode);
3387 insh = gen_reg_rtx (DImode);
3389 dsta = XEXP (dst, 0);
3390 if (GET_CODE (dsta) == LO_SUM)
3391 dsta = force_reg (Pmode, dsta);
3393 /* AND addresses cannot be in any alias set, since they may implicitly
3394 alias surrounding code. Ideally we'd have some alias set that
3395 covered all types except those with alignment 8 or higher. */
3397 meml = change_address (dst, DImode,
3398 gen_rtx_AND (DImode,
3399 plus_constant (DImode, dsta, ofs),
3400 GEN_INT (-8)));
3401 set_mem_alias_set (meml, 0);
3403 memh = change_address (dst, DImode,
3404 gen_rtx_AND (DImode,
3405 plus_constant (DImode, dsta,
3406 ofs + size - 1),
3407 GEN_INT (-8)));
3408 set_mem_alias_set (memh, 0);
3410 emit_move_insn (dsth, memh);
3411 emit_move_insn (dstl, meml);
3413 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3415 if (src != CONST0_RTX (GET_MODE (src)))
3417 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3418 GEN_INT (size*8), addr));
3420 switch ((int) size)
3422 case 2:
3423 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3424 break;
3425 case 4:
3426 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3427 break;
3428 case 8:
3429 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3430 break;
3431 default:
3432 gcc_unreachable ();
3436 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3438 switch ((int) size)
3440 case 2:
3441 emit_insn (gen_mskwl (dstl, dstl, addr));
3442 break;
3443 case 4:
3444 emit_insn (gen_mskll (dstl, dstl, addr));
3445 break;
3446 case 8:
3447 emit_insn (gen_mskql (dstl, dstl, addr));
3448 break;
3449 default:
3450 gcc_unreachable ();
3453 if (src != CONST0_RTX (GET_MODE (src)))
3455 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3456 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3459 /* Must store high before low for degenerate case of aligned. */
3460 emit_move_insn (memh, dsth);
3461 emit_move_insn (meml, dstl);
3464 /* The block move code tries to maximize speed by separating loads and
3465 stores at the expense of register pressure: we load all of the data
3466 before we store it back out. There are two secondary effects worth
3467 mentioning, that this speeds copying to/from aligned and unaligned
3468 buffers, and that it makes the code significantly easier to write. */
3470 #define MAX_MOVE_WORDS 8
3472 /* Load an integral number of consecutive unaligned quadwords. */
3474 static void
3475 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3476 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3478 rtx const im8 = GEN_INT (-8);
3479 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3480 rtx sreg, areg, tmp, smema;
3481 HOST_WIDE_INT i;
3483 smema = XEXP (smem, 0);
3484 if (GET_CODE (smema) == LO_SUM)
3485 smema = force_reg (Pmode, smema);
3487 /* Generate all the tmp registers we need. */
3488 for (i = 0; i < words; ++i)
3490 data_regs[i] = out_regs[i];
3491 ext_tmps[i] = gen_reg_rtx (DImode);
3493 data_regs[words] = gen_reg_rtx (DImode);
3495 if (ofs != 0)
3496 smem = adjust_address (smem, GET_MODE (smem), ofs);
3498 /* Load up all of the source data. */
3499 for (i = 0; i < words; ++i)
3501 tmp = change_address (smem, DImode,
3502 gen_rtx_AND (DImode,
3503 plus_constant (DImode, smema, 8*i),
3504 im8));
3505 set_mem_alias_set (tmp, 0);
3506 emit_move_insn (data_regs[i], tmp);
3509 tmp = change_address (smem, DImode,
3510 gen_rtx_AND (DImode,
3511 plus_constant (DImode, smema,
3512 8*words - 1),
3513 im8));
3514 set_mem_alias_set (tmp, 0);
3515 emit_move_insn (data_regs[words], tmp);
3517 /* Extract the half-word fragments. Unfortunately DEC decided to make
3518 extxh with offset zero a noop instead of zeroing the register, so
3519 we must take care of that edge condition ourselves with cmov. */
3521 sreg = copy_addr_to_reg (smema);
3522 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3523 1, OPTAB_WIDEN);
3524 for (i = 0; i < words; ++i)
3526 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3527 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3528 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3529 gen_rtx_IF_THEN_ELSE (DImode,
3530 gen_rtx_EQ (DImode, areg,
3531 const0_rtx),
3532 const0_rtx, ext_tmps[i])));
3535 /* Merge the half-words into whole words. */
3536 for (i = 0; i < words; ++i)
3538 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3539 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3543 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3544 may be NULL to store zeros. */
3546 static void
3547 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3548 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3550 rtx const im8 = GEN_INT (-8);
3551 rtx ins_tmps[MAX_MOVE_WORDS];
3552 rtx st_tmp_1, st_tmp_2, dreg;
3553 rtx st_addr_1, st_addr_2, dmema;
3554 HOST_WIDE_INT i;
3556 dmema = XEXP (dmem, 0);
3557 if (GET_CODE (dmema) == LO_SUM)
3558 dmema = force_reg (Pmode, dmema);
3560 /* Generate all the tmp registers we need. */
3561 if (data_regs != NULL)
3562 for (i = 0; i < words; ++i)
3563 ins_tmps[i] = gen_reg_rtx(DImode);
3564 st_tmp_1 = gen_reg_rtx(DImode);
3565 st_tmp_2 = gen_reg_rtx(DImode);
3567 if (ofs != 0)
3568 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3570 st_addr_2 = change_address (dmem, DImode,
3571 gen_rtx_AND (DImode,
3572 plus_constant (DImode, dmema,
3573 words*8 - 1),
3574 im8));
3575 set_mem_alias_set (st_addr_2, 0);
3577 st_addr_1 = change_address (dmem, DImode,
3578 gen_rtx_AND (DImode, dmema, im8));
3579 set_mem_alias_set (st_addr_1, 0);
3581 /* Load up the destination end bits. */
3582 emit_move_insn (st_tmp_2, st_addr_2);
3583 emit_move_insn (st_tmp_1, st_addr_1);
3585 /* Shift the input data into place. */
3586 dreg = copy_addr_to_reg (dmema);
3587 if (data_regs != NULL)
3589 for (i = words-1; i >= 0; --i)
3591 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3592 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3594 for (i = words-1; i > 0; --i)
3596 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3597 ins_tmps[i-1], ins_tmps[i-1], 1,
3598 OPTAB_WIDEN);
3602 /* Split and merge the ends with the destination data. */
3603 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3604 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3606 if (data_regs != NULL)
3608 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3609 st_tmp_2, 1, OPTAB_WIDEN);
3610 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3611 st_tmp_1, 1, OPTAB_WIDEN);
3614 /* Store it all. */
3615 emit_move_insn (st_addr_2, st_tmp_2);
3616 for (i = words-1; i > 0; --i)
3618 rtx tmp = change_address (dmem, DImode,
3619 gen_rtx_AND (DImode,
3620 plus_constant (DImode,
3621 dmema, i*8),
3622 im8));
3623 set_mem_alias_set (tmp, 0);
3624 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3626 emit_move_insn (st_addr_1, st_tmp_1);
3630 /* Expand string/block move operations.
3632 operands[0] is the pointer to the destination.
3633 operands[1] is the pointer to the source.
3634 operands[2] is the number of bytes to move.
3635 operands[3] is the alignment. */
3638 alpha_expand_block_move (rtx operands[])
3640 rtx bytes_rtx = operands[2];
3641 rtx align_rtx = operands[3];
3642 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3643 HOST_WIDE_INT bytes = orig_bytes;
3644 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3645 HOST_WIDE_INT dst_align = src_align;
3646 rtx orig_src = operands[1];
3647 rtx orig_dst = operands[0];
3648 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3649 rtx tmp;
3650 unsigned int i, words, ofs, nregs = 0;
3652 if (orig_bytes <= 0)
3653 return 1;
3654 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3655 return 0;
3657 /* Look for additional alignment information from recorded register info. */
3659 tmp = XEXP (orig_src, 0);
3660 if (REG_P (tmp))
3661 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3662 else if (GET_CODE (tmp) == PLUS
3663 && REG_P (XEXP (tmp, 0))
3664 && CONST_INT_P (XEXP (tmp, 1)))
3666 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3667 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3669 if (a > src_align)
3671 if (a >= 64 && c % 8 == 0)
3672 src_align = 64;
3673 else if (a >= 32 && c % 4 == 0)
3674 src_align = 32;
3675 else if (a >= 16 && c % 2 == 0)
3676 src_align = 16;
3680 tmp = XEXP (orig_dst, 0);
3681 if (REG_P (tmp))
3682 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3683 else if (GET_CODE (tmp) == PLUS
3684 && REG_P (XEXP (tmp, 0))
3685 && CONST_INT_P (XEXP (tmp, 1)))
3687 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3688 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3690 if (a > dst_align)
3692 if (a >= 64 && c % 8 == 0)
3693 dst_align = 64;
3694 else if (a >= 32 && c % 4 == 0)
3695 dst_align = 32;
3696 else if (a >= 16 && c % 2 == 0)
3697 dst_align = 16;
3701 ofs = 0;
3702 if (src_align >= 64 && bytes >= 8)
3704 words = bytes / 8;
3706 for (i = 0; i < words; ++i)
3707 data_regs[nregs + i] = gen_reg_rtx (DImode);
3709 for (i = 0; i < words; ++i)
3710 emit_move_insn (data_regs[nregs + i],
3711 adjust_address (orig_src, DImode, ofs + i * 8));
3713 nregs += words;
3714 bytes -= words * 8;
3715 ofs += words * 8;
3718 if (src_align >= 32 && bytes >= 4)
3720 words = bytes / 4;
3722 for (i = 0; i < words; ++i)
3723 data_regs[nregs + i] = gen_reg_rtx (SImode);
3725 for (i = 0; i < words; ++i)
3726 emit_move_insn (data_regs[nregs + i],
3727 adjust_address (orig_src, SImode, ofs + i * 4));
3729 nregs += words;
3730 bytes -= words * 4;
3731 ofs += words * 4;
3734 if (bytes >= 8)
3736 words = bytes / 8;
3738 for (i = 0; i < words+1; ++i)
3739 data_regs[nregs + i] = gen_reg_rtx (DImode);
3741 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3742 words, ofs);
3744 nregs += words;
3745 bytes -= words * 8;
3746 ofs += words * 8;
3749 if (! TARGET_BWX && bytes >= 4)
3751 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3752 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3753 bytes -= 4;
3754 ofs += 4;
3757 if (bytes >= 2)
3759 if (src_align >= 16)
3761 do {
3762 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3763 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3764 bytes -= 2;
3765 ofs += 2;
3766 } while (bytes >= 2);
3768 else if (! TARGET_BWX)
3770 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3771 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3772 bytes -= 2;
3773 ofs += 2;
3777 while (bytes > 0)
3779 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3780 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3781 bytes -= 1;
3782 ofs += 1;
3785 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3787 /* Now save it back out again. */
3789 i = 0, ofs = 0;
3791 /* Write out the data in whatever chunks reading the source allowed. */
3792 if (dst_align >= 64)
3794 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3796 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3797 data_regs[i]);
3798 ofs += 8;
3799 i++;
3803 if (dst_align >= 32)
3805 /* If the source has remaining DImode regs, write them out in
3806 two pieces. */
3807 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3809 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3810 NULL_RTX, 1, OPTAB_WIDEN);
3812 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3813 gen_lowpart (SImode, data_regs[i]));
3814 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3815 gen_lowpart (SImode, tmp));
3816 ofs += 8;
3817 i++;
3820 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3822 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3823 data_regs[i]);
3824 ofs += 4;
3825 i++;
3829 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3831 /* Write out a remaining block of words using unaligned methods. */
3833 for (words = 1; i + words < nregs; words++)
3834 if (GET_MODE (data_regs[i + words]) != DImode)
3835 break;
3837 if (words == 1)
3838 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3839 else
3840 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3841 words, ofs);
3843 i += words;
3844 ofs += words * 8;
3847 /* Due to the above, this won't be aligned. */
3848 /* ??? If we have more than one of these, consider constructing full
3849 words in registers and using alpha_expand_unaligned_store_words. */
3850 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3852 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3853 ofs += 4;
3854 i++;
3857 if (dst_align >= 16)
3858 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3860 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3861 i++;
3862 ofs += 2;
3864 else
3865 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3867 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3868 i++;
3869 ofs += 2;
3872 /* The remainder must be byte copies. */
3873 while (i < nregs)
3875 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3876 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3877 i++;
3878 ofs += 1;
3881 return 1;
3885 alpha_expand_block_clear (rtx operands[])
3887 rtx bytes_rtx = operands[1];
3888 rtx align_rtx = operands[3];
3889 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3890 HOST_WIDE_INT bytes = orig_bytes;
3891 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3892 HOST_WIDE_INT alignofs = 0;
3893 rtx orig_dst = operands[0];
3894 rtx tmp;
3895 int i, words, ofs = 0;
3897 if (orig_bytes <= 0)
3898 return 1;
3899 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3900 return 0;
3902 /* Look for stricter alignment. */
3903 tmp = XEXP (orig_dst, 0);
3904 if (REG_P (tmp))
3905 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3906 else if (GET_CODE (tmp) == PLUS
3907 && REG_P (XEXP (tmp, 0))
3908 && CONST_INT_P (XEXP (tmp, 1)))
3910 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3911 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3913 if (a > align)
3915 if (a >= 64)
3916 align = a, alignofs = 8 - c % 8;
3917 else if (a >= 32)
3918 align = a, alignofs = 4 - c % 4;
3919 else if (a >= 16)
3920 align = a, alignofs = 2 - c % 2;
3924 /* Handle an unaligned prefix first. */
3926 if (alignofs > 0)
3928 #if HOST_BITS_PER_WIDE_INT >= 64
3929 /* Given that alignofs is bounded by align, the only time BWX could
3930 generate three stores is for a 7 byte fill. Prefer two individual
3931 stores over a load/mask/store sequence. */
3932 if ((!TARGET_BWX || alignofs == 7)
3933 && align >= 32
3934 && !(alignofs == 4 && bytes >= 4))
3936 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3937 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3938 rtx mem, tmp;
3939 HOST_WIDE_INT mask;
3941 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3942 set_mem_alias_set (mem, 0);
3944 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3945 if (bytes < alignofs)
3947 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3948 ofs += bytes;
3949 bytes = 0;
3951 else
3953 bytes -= alignofs;
3954 ofs += alignofs;
3956 alignofs = 0;
3958 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3959 NULL_RTX, 1, OPTAB_WIDEN);
3961 emit_move_insn (mem, tmp);
3963 #endif
3965 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3967 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3968 bytes -= 1;
3969 ofs += 1;
3970 alignofs -= 1;
3972 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3974 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3975 bytes -= 2;
3976 ofs += 2;
3977 alignofs -= 2;
3979 if (alignofs == 4 && bytes >= 4)
3981 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3982 bytes -= 4;
3983 ofs += 4;
3984 alignofs = 0;
3987 /* If we've not used the extra lead alignment information by now,
3988 we won't be able to. Downgrade align to match what's left over. */
3989 if (alignofs > 0)
3991 alignofs = alignofs & -alignofs;
3992 align = MIN (align, alignofs * BITS_PER_UNIT);
3996 /* Handle a block of contiguous long-words. */
3998 if (align >= 64 && bytes >= 8)
4000 words = bytes / 8;
4002 for (i = 0; i < words; ++i)
4003 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4004 const0_rtx);
4006 bytes -= words * 8;
4007 ofs += words * 8;
4010 /* If the block is large and appropriately aligned, emit a single
4011 store followed by a sequence of stq_u insns. */
4013 if (align >= 32 && bytes > 16)
4015 rtx orig_dsta;
4017 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4018 bytes -= 4;
4019 ofs += 4;
4021 orig_dsta = XEXP (orig_dst, 0);
4022 if (GET_CODE (orig_dsta) == LO_SUM)
4023 orig_dsta = force_reg (Pmode, orig_dsta);
4025 words = bytes / 8;
4026 for (i = 0; i < words; ++i)
4028 rtx mem
4029 = change_address (orig_dst, DImode,
4030 gen_rtx_AND (DImode,
4031 plus_constant (DImode, orig_dsta,
4032 ofs + i*8),
4033 GEN_INT (-8)));
4034 set_mem_alias_set (mem, 0);
4035 emit_move_insn (mem, const0_rtx);
4038 /* Depending on the alignment, the first stq_u may have overlapped
4039 with the initial stl, which means that the last stq_u didn't
4040 write as much as it would appear. Leave those questionable bytes
4041 unaccounted for. */
4042 bytes -= words * 8 - 4;
4043 ofs += words * 8 - 4;
4046 /* Handle a smaller block of aligned words. */
4048 if ((align >= 64 && bytes == 4)
4049 || (align == 32 && bytes >= 4))
4051 words = bytes / 4;
4053 for (i = 0; i < words; ++i)
4054 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4055 const0_rtx);
4057 bytes -= words * 4;
4058 ofs += words * 4;
4061 /* An unaligned block uses stq_u stores for as many as possible. */
4063 if (bytes >= 8)
4065 words = bytes / 8;
4067 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4069 bytes -= words * 8;
4070 ofs += words * 8;
4073 /* Next clean up any trailing pieces. */
4075 #if HOST_BITS_PER_WIDE_INT >= 64
4076 /* Count the number of bits in BYTES for which aligned stores could
4077 be emitted. */
4078 words = 0;
4079 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4080 if (bytes & i)
4081 words += 1;
4083 /* If we have appropriate alignment (and it wouldn't take too many
4084 instructions otherwise), mask out the bytes we need. */
4085 if (TARGET_BWX ? words > 2 : bytes > 0)
4087 if (align >= 64)
4089 rtx mem, tmp;
4090 HOST_WIDE_INT mask;
4092 mem = adjust_address (orig_dst, DImode, ofs);
4093 set_mem_alias_set (mem, 0);
4095 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4097 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4098 NULL_RTX, 1, OPTAB_WIDEN);
4100 emit_move_insn (mem, tmp);
4101 return 1;
4103 else if (align >= 32 && bytes < 4)
4105 rtx mem, tmp;
4106 HOST_WIDE_INT mask;
4108 mem = adjust_address (orig_dst, SImode, ofs);
4109 set_mem_alias_set (mem, 0);
4111 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4113 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4114 NULL_RTX, 1, OPTAB_WIDEN);
4116 emit_move_insn (mem, tmp);
4117 return 1;
4120 #endif
4122 if (!TARGET_BWX && bytes >= 4)
4124 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4125 bytes -= 4;
4126 ofs += 4;
4129 if (bytes >= 2)
4131 if (align >= 16)
4133 do {
4134 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4135 const0_rtx);
4136 bytes -= 2;
4137 ofs += 2;
4138 } while (bytes >= 2);
4140 else if (! TARGET_BWX)
4142 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4143 bytes -= 2;
4144 ofs += 2;
4148 while (bytes > 0)
4150 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4151 bytes -= 1;
4152 ofs += 1;
4155 return 1;
4158 /* Returns a mask so that zap(x, value) == x & mask. */
4161 alpha_expand_zap_mask (HOST_WIDE_INT value)
4163 rtx result;
4164 int i;
4166 if (HOST_BITS_PER_WIDE_INT >= 64)
4168 HOST_WIDE_INT mask = 0;
4170 for (i = 7; i >= 0; --i)
4172 mask <<= 8;
4173 if (!((value >> i) & 1))
4174 mask |= 0xff;
4177 result = gen_int_mode (mask, DImode);
4179 else
4181 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4183 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4185 for (i = 7; i >= 4; --i)
4187 mask_hi <<= 8;
4188 if (!((value >> i) & 1))
4189 mask_hi |= 0xff;
4192 for (i = 3; i >= 0; --i)
4194 mask_lo <<= 8;
4195 if (!((value >> i) & 1))
4196 mask_lo |= 0xff;
4199 result = immed_double_const (mask_lo, mask_hi, DImode);
4202 return result;
4205 void
4206 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4207 enum machine_mode mode,
4208 rtx op0, rtx op1, rtx op2)
4210 op0 = gen_lowpart (mode, op0);
4212 if (op1 == const0_rtx)
4213 op1 = CONST0_RTX (mode);
4214 else
4215 op1 = gen_lowpart (mode, op1);
4217 if (op2 == const0_rtx)
4218 op2 = CONST0_RTX (mode);
4219 else
4220 op2 = gen_lowpart (mode, op2);
4222 emit_insn ((*gen) (op0, op1, op2));
4225 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4226 COND is true. Mark the jump as unlikely to be taken. */
4228 static void
4229 emit_unlikely_jump (rtx cond, rtx label)
4231 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
4232 rtx x;
4234 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4235 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4236 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
4239 /* A subroutine of the atomic operation splitters. Emit a load-locked
4240 instruction in MODE. */
4242 static void
4243 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4245 rtx (*fn) (rtx, rtx) = NULL;
4246 if (mode == SImode)
4247 fn = gen_load_locked_si;
4248 else if (mode == DImode)
4249 fn = gen_load_locked_di;
4250 emit_insn (fn (reg, mem));
4253 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4254 instruction in MODE. */
4256 static void
4257 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4259 rtx (*fn) (rtx, rtx, rtx) = NULL;
4260 if (mode == SImode)
4261 fn = gen_store_conditional_si;
4262 else if (mode == DImode)
4263 fn = gen_store_conditional_di;
4264 emit_insn (fn (res, mem, val));
4267 /* Subroutines of the atomic operation splitters. Emit barriers
4268 as needed for the memory MODEL. */
4270 static void
4271 alpha_pre_atomic_barrier (enum memmodel model)
4273 if (need_atomic_barrier_p (model, true))
4274 emit_insn (gen_memory_barrier ());
4277 static void
4278 alpha_post_atomic_barrier (enum memmodel model)
4280 if (need_atomic_barrier_p (model, false))
4281 emit_insn (gen_memory_barrier ());
4284 /* A subroutine of the atomic operation splitters. Emit an insxl
4285 instruction in MODE. */
4287 static rtx
4288 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4290 rtx ret = gen_reg_rtx (DImode);
4291 rtx (*fn) (rtx, rtx, rtx);
4293 switch (mode)
4295 case QImode:
4296 fn = gen_insbl;
4297 break;
4298 case HImode:
4299 fn = gen_inswl;
4300 break;
4301 case SImode:
4302 fn = gen_insll;
4303 break;
4304 case DImode:
4305 fn = gen_insql;
4306 break;
4307 default:
4308 gcc_unreachable ();
4311 op1 = force_reg (mode, op1);
4312 emit_insn (fn (ret, op1, op2));
4314 return ret;
4317 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4318 to perform. MEM is the memory on which to operate. VAL is the second
4319 operand of the binary operator. BEFORE and AFTER are optional locations to
4320 return the value of MEM either before of after the operation. SCRATCH is
4321 a scratch register. */
4323 void
4324 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4325 rtx after, rtx scratch, enum memmodel model)
4327 enum machine_mode mode = GET_MODE (mem);
4328 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4330 alpha_pre_atomic_barrier (model);
4332 label = gen_label_rtx ();
4333 emit_label (label);
4334 label = gen_rtx_LABEL_REF (DImode, label);
4336 if (before == NULL)
4337 before = scratch;
4338 emit_load_locked (mode, before, mem);
4340 if (code == NOT)
4342 x = gen_rtx_AND (mode, before, val);
4343 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4345 x = gen_rtx_NOT (mode, val);
4347 else
4348 x = gen_rtx_fmt_ee (code, mode, before, val);
4349 if (after)
4350 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4351 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4353 emit_store_conditional (mode, cond, mem, scratch);
4355 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4356 emit_unlikely_jump (x, label);
4358 alpha_post_atomic_barrier (model);
4361 /* Expand a compare and swap operation. */
4363 void
4364 alpha_split_compare_and_swap (rtx operands[])
4366 rtx cond, retval, mem, oldval, newval;
4367 bool is_weak;
4368 enum memmodel mod_s, mod_f;
4369 enum machine_mode mode;
4370 rtx label1, label2, x;
4372 cond = operands[0];
4373 retval = operands[1];
4374 mem = operands[2];
4375 oldval = operands[3];
4376 newval = operands[4];
4377 is_weak = (operands[5] != const0_rtx);
4378 mod_s = (enum memmodel) INTVAL (operands[6]);
4379 mod_f = (enum memmodel) INTVAL (operands[7]);
4380 mode = GET_MODE (mem);
4382 alpha_pre_atomic_barrier (mod_s);
4384 label1 = NULL_RTX;
4385 if (!is_weak)
4387 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4388 emit_label (XEXP (label1, 0));
4390 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4392 emit_load_locked (mode, retval, mem);
4394 x = gen_lowpart (DImode, retval);
4395 if (oldval == const0_rtx)
4397 emit_move_insn (cond, const0_rtx);
4398 x = gen_rtx_NE (DImode, x, const0_rtx);
4400 else
4402 x = gen_rtx_EQ (DImode, x, oldval);
4403 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4404 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4406 emit_unlikely_jump (x, label2);
4408 emit_move_insn (cond, newval);
4409 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4411 if (!is_weak)
4413 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4414 emit_unlikely_jump (x, label1);
4417 if (mod_f != MEMMODEL_RELAXED)
4418 emit_label (XEXP (label2, 0));
4420 alpha_post_atomic_barrier (mod_s);
4422 if (mod_f == MEMMODEL_RELAXED)
4423 emit_label (XEXP (label2, 0));
4426 void
4427 alpha_expand_compare_and_swap_12 (rtx operands[])
4429 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4430 enum machine_mode mode;
4431 rtx addr, align, wdst;
4432 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4434 cond = operands[0];
4435 dst = operands[1];
4436 mem = operands[2];
4437 oldval = operands[3];
4438 newval = operands[4];
4439 is_weak = operands[5];
4440 mod_s = operands[6];
4441 mod_f = operands[7];
4442 mode = GET_MODE (mem);
4444 /* We forced the address into a register via mem_noofs_operand. */
4445 addr = XEXP (mem, 0);
4446 gcc_assert (register_operand (addr, DImode));
4448 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4449 NULL_RTX, 1, OPTAB_DIRECT);
4451 oldval = convert_modes (DImode, mode, oldval, 1);
4453 if (newval != const0_rtx)
4454 newval = emit_insxl (mode, newval, addr);
4456 wdst = gen_reg_rtx (DImode);
4457 if (mode == QImode)
4458 gen = gen_atomic_compare_and_swapqi_1;
4459 else
4460 gen = gen_atomic_compare_and_swaphi_1;
4461 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4462 is_weak, mod_s, mod_f));
4464 emit_move_insn (dst, gen_lowpart (mode, wdst));
4467 void
4468 alpha_split_compare_and_swap_12 (rtx operands[])
4470 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4471 enum machine_mode mode;
4472 bool is_weak;
4473 enum memmodel mod_s, mod_f;
4474 rtx label1, label2, mem, addr, width, mask, x;
4476 cond = operands[0];
4477 dest = operands[1];
4478 orig_mem = operands[2];
4479 oldval = operands[3];
4480 newval = operands[4];
4481 align = operands[5];
4482 is_weak = (operands[6] != const0_rtx);
4483 mod_s = (enum memmodel) INTVAL (operands[7]);
4484 mod_f = (enum memmodel) INTVAL (operands[8]);
4485 scratch = operands[9];
4486 mode = GET_MODE (orig_mem);
4487 addr = XEXP (orig_mem, 0);
4489 mem = gen_rtx_MEM (DImode, align);
4490 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4491 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4492 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4494 alpha_pre_atomic_barrier (mod_s);
4496 label1 = NULL_RTX;
4497 if (!is_weak)
4499 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4500 emit_label (XEXP (label1, 0));
4502 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4504 emit_load_locked (DImode, scratch, mem);
4506 width = GEN_INT (GET_MODE_BITSIZE (mode));
4507 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4508 emit_insn (gen_extxl (dest, scratch, width, addr));
4510 if (oldval == const0_rtx)
4512 emit_move_insn (cond, const0_rtx);
4513 x = gen_rtx_NE (DImode, dest, const0_rtx);
4515 else
4517 x = gen_rtx_EQ (DImode, dest, oldval);
4518 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4519 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4521 emit_unlikely_jump (x, label2);
4523 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4525 if (newval != const0_rtx)
4526 emit_insn (gen_iordi3 (cond, cond, newval));
4528 emit_store_conditional (DImode, cond, mem, cond);
4530 if (!is_weak)
4532 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4533 emit_unlikely_jump (x, label1);
4536 if (mod_f != MEMMODEL_RELAXED)
4537 emit_label (XEXP (label2, 0));
4539 alpha_post_atomic_barrier (mod_s);
4541 if (mod_f == MEMMODEL_RELAXED)
4542 emit_label (XEXP (label2, 0));
4545 /* Expand an atomic exchange operation. */
4547 void
4548 alpha_split_atomic_exchange (rtx operands[])
4550 rtx retval, mem, val, scratch;
4551 enum memmodel model;
4552 enum machine_mode mode;
4553 rtx label, x, cond;
4555 retval = operands[0];
4556 mem = operands[1];
4557 val = operands[2];
4558 model = (enum memmodel) INTVAL (operands[3]);
4559 scratch = operands[4];
4560 mode = GET_MODE (mem);
4561 cond = gen_lowpart (DImode, scratch);
4563 alpha_pre_atomic_barrier (model);
4565 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4566 emit_label (XEXP (label, 0));
4568 emit_load_locked (mode, retval, mem);
4569 emit_move_insn (scratch, val);
4570 emit_store_conditional (mode, cond, mem, scratch);
4572 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4573 emit_unlikely_jump (x, label);
4575 alpha_post_atomic_barrier (model);
4578 void
4579 alpha_expand_atomic_exchange_12 (rtx operands[])
4581 rtx dst, mem, val, model;
4582 enum machine_mode mode;
4583 rtx addr, align, wdst;
4584 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4586 dst = operands[0];
4587 mem = operands[1];
4588 val = operands[2];
4589 model = operands[3];
4590 mode = GET_MODE (mem);
4592 /* We forced the address into a register via mem_noofs_operand. */
4593 addr = XEXP (mem, 0);
4594 gcc_assert (register_operand (addr, DImode));
4596 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4597 NULL_RTX, 1, OPTAB_DIRECT);
4599 /* Insert val into the correct byte location within the word. */
4600 if (val != const0_rtx)
4601 val = emit_insxl (mode, val, addr);
4603 wdst = gen_reg_rtx (DImode);
4604 if (mode == QImode)
4605 gen = gen_atomic_exchangeqi_1;
4606 else
4607 gen = gen_atomic_exchangehi_1;
4608 emit_insn (gen (wdst, mem, val, align, model));
4610 emit_move_insn (dst, gen_lowpart (mode, wdst));
4613 void
4614 alpha_split_atomic_exchange_12 (rtx operands[])
4616 rtx dest, orig_mem, addr, val, align, scratch;
4617 rtx label, mem, width, mask, x;
4618 enum machine_mode mode;
4619 enum memmodel model;
4621 dest = operands[0];
4622 orig_mem = operands[1];
4623 val = operands[2];
4624 align = operands[3];
4625 model = (enum memmodel) INTVAL (operands[4]);
4626 scratch = operands[5];
4627 mode = GET_MODE (orig_mem);
4628 addr = XEXP (orig_mem, 0);
4630 mem = gen_rtx_MEM (DImode, align);
4631 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4632 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4633 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4635 alpha_pre_atomic_barrier (model);
4637 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4638 emit_label (XEXP (label, 0));
4640 emit_load_locked (DImode, scratch, mem);
4642 width = GEN_INT (GET_MODE_BITSIZE (mode));
4643 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4644 emit_insn (gen_extxl (dest, scratch, width, addr));
4645 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4646 if (val != const0_rtx)
4647 emit_insn (gen_iordi3 (scratch, scratch, val));
4649 emit_store_conditional (DImode, scratch, mem, scratch);
4651 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4652 emit_unlikely_jump (x, label);
4654 alpha_post_atomic_barrier (model);
4657 /* Adjust the cost of a scheduling dependency. Return the new cost of
4658 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4660 static int
4661 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4663 enum attr_type dep_insn_type;
4665 /* If the dependence is an anti-dependence, there is no cost. For an
4666 output dependence, there is sometimes a cost, but it doesn't seem
4667 worth handling those few cases. */
4668 if (REG_NOTE_KIND (link) != 0)
4669 return cost;
4671 /* If we can't recognize the insns, we can't really do anything. */
4672 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4673 return cost;
4675 dep_insn_type = get_attr_type (dep_insn);
4677 /* Bring in the user-defined memory latency. */
4678 if (dep_insn_type == TYPE_ILD
4679 || dep_insn_type == TYPE_FLD
4680 || dep_insn_type == TYPE_LDSYM)
4681 cost += alpha_memory_latency-1;
4683 /* Everything else handled in DFA bypasses now. */
4685 return cost;
4688 /* The number of instructions that can be issued per cycle. */
4690 static int
4691 alpha_issue_rate (void)
4693 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4696 /* How many alternative schedules to try. This should be as wide as the
4697 scheduling freedom in the DFA, but no wider. Making this value too
4698 large results extra work for the scheduler.
4700 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4701 alternative schedules. For EV5, we can choose between E0/E1 and
4702 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4704 static int
4705 alpha_multipass_dfa_lookahead (void)
4707 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4710 /* Machine-specific function data. */
4712 struct GTY(()) alpha_links;
4714 struct GTY(()) machine_function
4716 /* For OSF. */
4717 const char *some_ld_name;
4719 /* For flag_reorder_blocks_and_partition. */
4720 rtx gp_save_rtx;
4722 /* For VMS condition handlers. */
4723 bool uses_condition_handler;
4725 /* Linkage entries. */
4726 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
4727 links;
4730 /* How to allocate a 'struct machine_function'. */
4732 static struct machine_function *
4733 alpha_init_machine_status (void)
4735 return ggc_alloc_cleared_machine_function ();
4738 /* Support for frame based VMS condition handlers. */
4740 /* A VMS condition handler may be established for a function with a call to
4741 __builtin_establish_vms_condition_handler, and cancelled with a call to
4742 __builtin_revert_vms_condition_handler.
4744 The VMS Condition Handling Facility knows about the existence of a handler
4745 from the procedure descriptor .handler field. As the VMS native compilers,
4746 we store the user specified handler's address at a fixed location in the
4747 stack frame and point the procedure descriptor at a common wrapper which
4748 fetches the real handler's address and issues an indirect call.
4750 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4752 We force the procedure kind to PT_STACK, and the fixed frame location is
4753 fp+8, just before the register save area. We use the handler_data field in
4754 the procedure descriptor to state the fp offset at which the installed
4755 handler address can be found. */
4757 #define VMS_COND_HANDLER_FP_OFFSET 8
4759 /* Expand code to store the currently installed user VMS condition handler
4760 into TARGET and install HANDLER as the new condition handler. */
4762 void
4763 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4765 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4766 VMS_COND_HANDLER_FP_OFFSET);
4768 rtx handler_slot
4769 = gen_rtx_MEM (DImode, handler_slot_address);
4771 emit_move_insn (target, handler_slot);
4772 emit_move_insn (handler_slot, handler);
4774 /* Notify the start/prologue/epilogue emitters that the condition handler
4775 slot is needed. In addition to reserving the slot space, this will force
4776 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4777 use above is correct. */
4778 cfun->machine->uses_condition_handler = true;
4781 /* Expand code to store the current VMS condition handler into TARGET and
4782 nullify it. */
4784 void
4785 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4787 /* We implement this by establishing a null condition handler, with the tiny
4788 side effect of setting uses_condition_handler. This is a little bit
4789 pessimistic if no actual builtin_establish call is ever issued, which is
4790 not a real problem and expected never to happen anyway. */
4792 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4795 /* Functions to save and restore alpha_return_addr_rtx. */
4797 /* Start the ball rolling with RETURN_ADDR_RTX. */
4800 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4802 if (count != 0)
4803 return const0_rtx;
4805 return get_hard_reg_initial_val (Pmode, REG_RA);
4808 /* Return or create a memory slot containing the gp value for the current
4809 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4812 alpha_gp_save_rtx (void)
4814 rtx seq, m = cfun->machine->gp_save_rtx;
4816 if (m == NULL)
4818 start_sequence ();
4820 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4821 m = validize_mem (m);
4822 emit_move_insn (m, pic_offset_table_rtx);
4824 seq = get_insns ();
4825 end_sequence ();
4827 /* We used to simply emit the sequence after entry_of_function.
4828 However this breaks the CFG if the first instruction in the
4829 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4830 label. Emit the sequence properly on the edge. We are only
4831 invoked from dw2_build_landing_pads and finish_eh_generation
4832 will call commit_edge_insertions thanks to a kludge. */
4833 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4835 cfun->machine->gp_save_rtx = m;
4838 return m;
4841 static void
4842 alpha_instantiate_decls (void)
4844 if (cfun->machine->gp_save_rtx != NULL_RTX)
4845 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4848 static int
4849 alpha_ra_ever_killed (void)
4851 rtx top;
4853 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4854 return (int)df_regs_ever_live_p (REG_RA);
4856 push_topmost_sequence ();
4857 top = get_insns ();
4858 pop_topmost_sequence ();
4860 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4864 /* Return the trap mode suffix applicable to the current
4865 instruction, or NULL. */
4867 static const char *
4868 get_trap_mode_suffix (void)
4870 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4872 switch (s)
4874 case TRAP_SUFFIX_NONE:
4875 return NULL;
4877 case TRAP_SUFFIX_SU:
4878 if (alpha_fptm >= ALPHA_FPTM_SU)
4879 return "su";
4880 return NULL;
4882 case TRAP_SUFFIX_SUI:
4883 if (alpha_fptm >= ALPHA_FPTM_SUI)
4884 return "sui";
4885 return NULL;
4887 case TRAP_SUFFIX_V_SV:
4888 switch (alpha_fptm)
4890 case ALPHA_FPTM_N:
4891 return NULL;
4892 case ALPHA_FPTM_U:
4893 return "v";
4894 case ALPHA_FPTM_SU:
4895 case ALPHA_FPTM_SUI:
4896 return "sv";
4897 default:
4898 gcc_unreachable ();
4901 case TRAP_SUFFIX_V_SV_SVI:
4902 switch (alpha_fptm)
4904 case ALPHA_FPTM_N:
4905 return NULL;
4906 case ALPHA_FPTM_U:
4907 return "v";
4908 case ALPHA_FPTM_SU:
4909 return "sv";
4910 case ALPHA_FPTM_SUI:
4911 return "svi";
4912 default:
4913 gcc_unreachable ();
4915 break;
4917 case TRAP_SUFFIX_U_SU_SUI:
4918 switch (alpha_fptm)
4920 case ALPHA_FPTM_N:
4921 return NULL;
4922 case ALPHA_FPTM_U:
4923 return "u";
4924 case ALPHA_FPTM_SU:
4925 return "su";
4926 case ALPHA_FPTM_SUI:
4927 return "sui";
4928 default:
4929 gcc_unreachable ();
4931 break;
4933 default:
4934 gcc_unreachable ();
4936 gcc_unreachable ();
4939 /* Return the rounding mode suffix applicable to the current
4940 instruction, or NULL. */
4942 static const char *
4943 get_round_mode_suffix (void)
4945 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4947 switch (s)
4949 case ROUND_SUFFIX_NONE:
4950 return NULL;
4951 case ROUND_SUFFIX_NORMAL:
4952 switch (alpha_fprm)
4954 case ALPHA_FPRM_NORM:
4955 return NULL;
4956 case ALPHA_FPRM_MINF:
4957 return "m";
4958 case ALPHA_FPRM_CHOP:
4959 return "c";
4960 case ALPHA_FPRM_DYN:
4961 return "d";
4962 default:
4963 gcc_unreachable ();
4965 break;
4967 case ROUND_SUFFIX_C:
4968 return "c";
4970 default:
4971 gcc_unreachable ();
4973 gcc_unreachable ();
4976 /* Locate some local-dynamic symbol still in use by this function
4977 so that we can print its name in some movdi_er_tlsldm pattern. */
4979 static int
4980 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4982 rtx x = *px;
4984 if (GET_CODE (x) == SYMBOL_REF
4985 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4987 cfun->machine->some_ld_name = XSTR (x, 0);
4988 return 1;
4991 return 0;
4994 static const char *
4995 get_some_local_dynamic_name (void)
4997 rtx insn;
4999 if (cfun->machine->some_ld_name)
5000 return cfun->machine->some_ld_name;
5002 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5003 if (INSN_P (insn)
5004 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5005 return cfun->machine->some_ld_name;
5007 gcc_unreachable ();
5010 /* Print an operand. Recognize special options, documented below. */
5012 void
5013 print_operand (FILE *file, rtx x, int code)
5015 int i;
5017 switch (code)
5019 case '~':
5020 /* Print the assembler name of the current function. */
5021 assemble_name (file, alpha_fnname);
5022 break;
5024 case '&':
5025 assemble_name (file, get_some_local_dynamic_name ());
5026 break;
5028 case '/':
5030 const char *trap = get_trap_mode_suffix ();
5031 const char *round = get_round_mode_suffix ();
5033 if (trap || round)
5034 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5035 break;
5038 case ',':
5039 /* Generates single precision instruction suffix. */
5040 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5041 break;
5043 case '-':
5044 /* Generates double precision instruction suffix. */
5045 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5046 break;
5048 case '#':
5049 if (alpha_this_literal_sequence_number == 0)
5050 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5051 fprintf (file, "%d", alpha_this_literal_sequence_number);
5052 break;
5054 case '*':
5055 if (alpha_this_gpdisp_sequence_number == 0)
5056 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5057 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5058 break;
5060 case 'H':
5061 if (GET_CODE (x) == HIGH)
5062 output_addr_const (file, XEXP (x, 0));
5063 else
5064 output_operand_lossage ("invalid %%H value");
5065 break;
5067 case 'J':
5069 const char *lituse;
5071 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5073 x = XVECEXP (x, 0, 0);
5074 lituse = "lituse_tlsgd";
5076 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5078 x = XVECEXP (x, 0, 0);
5079 lituse = "lituse_tlsldm";
5081 else if (CONST_INT_P (x))
5082 lituse = "lituse_jsr";
5083 else
5085 output_operand_lossage ("invalid %%J value");
5086 break;
5089 if (x != const0_rtx)
5090 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5092 break;
5094 case 'j':
5096 const char *lituse;
5098 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5099 lituse = "lituse_jsrdirect";
5100 #else
5101 lituse = "lituse_jsr";
5102 #endif
5104 gcc_assert (INTVAL (x) != 0);
5105 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5107 break;
5108 case 'r':
5109 /* If this operand is the constant zero, write it as "$31". */
5110 if (REG_P (x))
5111 fprintf (file, "%s", reg_names[REGNO (x)]);
5112 else if (x == CONST0_RTX (GET_MODE (x)))
5113 fprintf (file, "$31");
5114 else
5115 output_operand_lossage ("invalid %%r value");
5116 break;
5118 case 'R':
5119 /* Similar, but for floating-point. */
5120 if (REG_P (x))
5121 fprintf (file, "%s", reg_names[REGNO (x)]);
5122 else if (x == CONST0_RTX (GET_MODE (x)))
5123 fprintf (file, "$f31");
5124 else
5125 output_operand_lossage ("invalid %%R value");
5126 break;
5128 case 'N':
5129 /* Write the 1's complement of a constant. */
5130 if (!CONST_INT_P (x))
5131 output_operand_lossage ("invalid %%N value");
5133 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5134 break;
5136 case 'P':
5137 /* Write 1 << C, for a constant C. */
5138 if (!CONST_INT_P (x))
5139 output_operand_lossage ("invalid %%P value");
5141 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5142 break;
5144 case 'h':
5145 /* Write the high-order 16 bits of a constant, sign-extended. */
5146 if (!CONST_INT_P (x))
5147 output_operand_lossage ("invalid %%h value");
5149 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5150 break;
5152 case 'L':
5153 /* Write the low-order 16 bits of a constant, sign-extended. */
5154 if (!CONST_INT_P (x))
5155 output_operand_lossage ("invalid %%L value");
5157 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5158 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5159 break;
5161 case 'm':
5162 /* Write mask for ZAP insn. */
5163 if (GET_CODE (x) == CONST_DOUBLE)
5165 HOST_WIDE_INT mask = 0;
5166 HOST_WIDE_INT value;
5168 value = CONST_DOUBLE_LOW (x);
5169 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5170 i++, value >>= 8)
5171 if (value & 0xff)
5172 mask |= (1 << i);
5174 value = CONST_DOUBLE_HIGH (x);
5175 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5176 i++, value >>= 8)
5177 if (value & 0xff)
5178 mask |= (1 << (i + sizeof (int)));
5180 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5183 else if (CONST_INT_P (x))
5185 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5187 for (i = 0; i < 8; i++, value >>= 8)
5188 if (value & 0xff)
5189 mask |= (1 << i);
5191 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5193 else
5194 output_operand_lossage ("invalid %%m value");
5195 break;
5197 case 'M':
5198 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5199 if (!CONST_INT_P (x)
5200 || (INTVAL (x) != 8 && INTVAL (x) != 16
5201 && INTVAL (x) != 32 && INTVAL (x) != 64))
5202 output_operand_lossage ("invalid %%M value");
5204 fprintf (file, "%s",
5205 (INTVAL (x) == 8 ? "b"
5206 : INTVAL (x) == 16 ? "w"
5207 : INTVAL (x) == 32 ? "l"
5208 : "q"));
5209 break;
5211 case 'U':
5212 /* Similar, except do it from the mask. */
5213 if (CONST_INT_P (x))
5215 HOST_WIDE_INT value = INTVAL (x);
5217 if (value == 0xff)
5219 fputc ('b', file);
5220 break;
5222 if (value == 0xffff)
5224 fputc ('w', file);
5225 break;
5227 if (value == 0xffffffff)
5229 fputc ('l', file);
5230 break;
5232 if (value == -1)
5234 fputc ('q', file);
5235 break;
5238 else if (HOST_BITS_PER_WIDE_INT == 32
5239 && GET_CODE (x) == CONST_DOUBLE
5240 && CONST_DOUBLE_LOW (x) == 0xffffffff
5241 && CONST_DOUBLE_HIGH (x) == 0)
5243 fputc ('l', file);
5244 break;
5246 output_operand_lossage ("invalid %%U value");
5247 break;
5249 case 's':
5250 /* Write the constant value divided by 8. */
5251 if (!CONST_INT_P (x)
5252 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5253 || (INTVAL (x) & 7) != 0)
5254 output_operand_lossage ("invalid %%s value");
5256 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5257 break;
5259 case 'S':
5260 /* Same, except compute (64 - c) / 8 */
5262 if (!CONST_INT_P (x)
5263 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5264 && (INTVAL (x) & 7) != 8)
5265 output_operand_lossage ("invalid %%s value");
5267 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5268 break;
5270 case 'C': case 'D': case 'c': case 'd':
5271 /* Write out comparison name. */
5273 enum rtx_code c = GET_CODE (x);
5275 if (!COMPARISON_P (x))
5276 output_operand_lossage ("invalid %%C value");
5278 else if (code == 'D')
5279 c = reverse_condition (c);
5280 else if (code == 'c')
5281 c = swap_condition (c);
5282 else if (code == 'd')
5283 c = swap_condition (reverse_condition (c));
5285 if (c == LEU)
5286 fprintf (file, "ule");
5287 else if (c == LTU)
5288 fprintf (file, "ult");
5289 else if (c == UNORDERED)
5290 fprintf (file, "un");
5291 else
5292 fprintf (file, "%s", GET_RTX_NAME (c));
5294 break;
5296 case 'E':
5297 /* Write the divide or modulus operator. */
5298 switch (GET_CODE (x))
5300 case DIV:
5301 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5302 break;
5303 case UDIV:
5304 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5305 break;
5306 case MOD:
5307 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5308 break;
5309 case UMOD:
5310 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5311 break;
5312 default:
5313 output_operand_lossage ("invalid %%E value");
5314 break;
5316 break;
5318 case 'A':
5319 /* Write "_u" for unaligned access. */
5320 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5321 fprintf (file, "_u");
5322 break;
5324 case 0:
5325 if (REG_P (x))
5326 fprintf (file, "%s", reg_names[REGNO (x)]);
5327 else if (MEM_P (x))
5328 output_address (XEXP (x, 0));
5329 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5331 switch (XINT (XEXP (x, 0), 1))
5333 case UNSPEC_DTPREL:
5334 case UNSPEC_TPREL:
5335 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5336 break;
5337 default:
5338 output_operand_lossage ("unknown relocation unspec");
5339 break;
5342 else
5343 output_addr_const (file, x);
5344 break;
5346 default:
5347 output_operand_lossage ("invalid %%xn code");
5351 void
5352 print_operand_address (FILE *file, rtx addr)
5354 int basereg = 31;
5355 HOST_WIDE_INT offset = 0;
5357 if (GET_CODE (addr) == AND)
5358 addr = XEXP (addr, 0);
5360 if (GET_CODE (addr) == PLUS
5361 && CONST_INT_P (XEXP (addr, 1)))
5363 offset = INTVAL (XEXP (addr, 1));
5364 addr = XEXP (addr, 0);
5367 if (GET_CODE (addr) == LO_SUM)
5369 const char *reloc16, *reloclo;
5370 rtx op1 = XEXP (addr, 1);
5372 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5374 op1 = XEXP (op1, 0);
5375 switch (XINT (op1, 1))
5377 case UNSPEC_DTPREL:
5378 reloc16 = NULL;
5379 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5380 break;
5381 case UNSPEC_TPREL:
5382 reloc16 = NULL;
5383 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5384 break;
5385 default:
5386 output_operand_lossage ("unknown relocation unspec");
5387 return;
5390 output_addr_const (file, XVECEXP (op1, 0, 0));
5392 else
5394 reloc16 = "gprel";
5395 reloclo = "gprellow";
5396 output_addr_const (file, op1);
5399 if (offset)
5400 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5402 addr = XEXP (addr, 0);
5403 switch (GET_CODE (addr))
5405 case REG:
5406 basereg = REGNO (addr);
5407 break;
5409 case SUBREG:
5410 basereg = subreg_regno (addr);
5411 break;
5413 default:
5414 gcc_unreachable ();
5417 fprintf (file, "($%d)\t\t!%s", basereg,
5418 (basereg == 29 ? reloc16 : reloclo));
5419 return;
5422 switch (GET_CODE (addr))
5424 case REG:
5425 basereg = REGNO (addr);
5426 break;
5428 case SUBREG:
5429 basereg = subreg_regno (addr);
5430 break;
5432 case CONST_INT:
5433 offset = INTVAL (addr);
5434 break;
5436 #if TARGET_ABI_OPEN_VMS
5437 case SYMBOL_REF:
5438 fprintf (file, "%s", XSTR (addr, 0));
5439 return;
5441 case CONST:
5442 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5443 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5444 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5445 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5446 INTVAL (XEXP (XEXP (addr, 0), 1)));
5447 return;
5449 #endif
5450 default:
5451 gcc_unreachable ();
5454 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5457 /* Emit RTL insns to initialize the variable parts of a trampoline at
5458 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5459 for the static chain value for the function. */
5461 static void
5462 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5464 rtx fnaddr, mem, word1, word2;
5466 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5468 #ifdef POINTERS_EXTEND_UNSIGNED
5469 fnaddr = convert_memory_address (Pmode, fnaddr);
5470 chain_value = convert_memory_address (Pmode, chain_value);
5471 #endif
5473 if (TARGET_ABI_OPEN_VMS)
5475 const char *fnname;
5476 char *trname;
5478 /* Construct the name of the trampoline entry point. */
5479 fnname = XSTR (fnaddr, 0);
5480 trname = (char *) alloca (strlen (fnname) + 5);
5481 strcpy (trname, fnname);
5482 strcat (trname, "..tr");
5483 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5484 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5486 /* Trampoline (or "bounded") procedure descriptor is constructed from
5487 the function's procedure descriptor with certain fields zeroed IAW
5488 the VMS calling standard. This is stored in the first quadword. */
5489 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5490 word1 = expand_and (DImode, word1,
5491 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5492 NULL);
5494 else
5496 /* These 4 instructions are:
5497 ldq $1,24($27)
5498 ldq $27,16($27)
5499 jmp $31,($27),0
5501 We don't bother setting the HINT field of the jump; the nop
5502 is merely there for padding. */
5503 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5504 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5507 /* Store the first two words, as computed above. */
5508 mem = adjust_address (m_tramp, DImode, 0);
5509 emit_move_insn (mem, word1);
5510 mem = adjust_address (m_tramp, DImode, 8);
5511 emit_move_insn (mem, word2);
5513 /* Store function address and static chain value. */
5514 mem = adjust_address (m_tramp, Pmode, 16);
5515 emit_move_insn (mem, fnaddr);
5516 mem = adjust_address (m_tramp, Pmode, 24);
5517 emit_move_insn (mem, chain_value);
5519 if (TARGET_ABI_OSF)
5521 emit_insn (gen_imb ());
5522 #ifdef HAVE_ENABLE_EXECUTE_STACK
5523 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5524 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5525 #endif
5529 /* Determine where to put an argument to a function.
5530 Value is zero to push the argument on the stack,
5531 or a hard register in which to store the argument.
5533 MODE is the argument's machine mode.
5534 TYPE is the data type of the argument (as a tree).
5535 This is null for libcalls where that information may
5536 not be available.
5537 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5538 the preceding args and about the function being called.
5539 NAMED is nonzero if this argument is a named parameter
5540 (otherwise it is an extra parameter matching an ellipsis).
5542 On Alpha the first 6 words of args are normally in registers
5543 and the rest are pushed. */
5545 static rtx
5546 alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5547 const_tree type, bool named ATTRIBUTE_UNUSED)
5549 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5550 int basereg;
5551 int num_args;
5553 /* Don't get confused and pass small structures in FP registers. */
5554 if (type && AGGREGATE_TYPE_P (type))
5555 basereg = 16;
5556 else
5558 #ifdef ENABLE_CHECKING
5559 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5560 values here. */
5561 gcc_assert (!COMPLEX_MODE_P (mode));
5562 #endif
5564 /* Set up defaults for FP operands passed in FP registers, and
5565 integral operands passed in integer registers. */
5566 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5567 basereg = 32 + 16;
5568 else
5569 basereg = 16;
5572 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5573 the two platforms, so we can't avoid conditional compilation. */
5574 #if TARGET_ABI_OPEN_VMS
5576 if (mode == VOIDmode)
5577 return alpha_arg_info_reg_val (*cum);
5579 num_args = cum->num_args;
5580 if (num_args >= 6
5581 || targetm.calls.must_pass_in_stack (mode, type))
5582 return NULL_RTX;
5584 #elif TARGET_ABI_OSF
5586 if (*cum >= 6)
5587 return NULL_RTX;
5588 num_args = *cum;
5590 /* VOID is passed as a special flag for "last argument". */
5591 if (type == void_type_node)
5592 basereg = 16;
5593 else if (targetm.calls.must_pass_in_stack (mode, type))
5594 return NULL_RTX;
5596 #else
5597 #error Unhandled ABI
5598 #endif
5600 return gen_rtx_REG (mode, num_args + basereg);
5603 /* Update the data in CUM to advance over an argument
5604 of mode MODE and data type TYPE.
5605 (TYPE is null for libcalls where that information may not be available.) */
5607 static void
5608 alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5609 const_tree type, bool named ATTRIBUTE_UNUSED)
5611 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5612 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5613 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5615 #if TARGET_ABI_OSF
5616 *cum += increment;
5617 #else
5618 if (!onstack && cum->num_args < 6)
5619 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5620 cum->num_args += increment;
5621 #endif
5624 static int
5625 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5626 enum machine_mode mode ATTRIBUTE_UNUSED,
5627 tree type ATTRIBUTE_UNUSED,
5628 bool named ATTRIBUTE_UNUSED)
5630 int words = 0;
5631 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5633 #if TARGET_ABI_OPEN_VMS
5634 if (cum->num_args < 6
5635 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5636 words = 6 - cum->num_args;
5637 #elif TARGET_ABI_OSF
5638 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5639 words = 6 - *cum;
5640 #else
5641 #error Unhandled ABI
5642 #endif
5644 return words * UNITS_PER_WORD;
5648 /* Return true if TYPE must be returned in memory, instead of in registers. */
5650 static bool
5651 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5653 enum machine_mode mode = VOIDmode;
5654 int size;
5656 if (type)
5658 mode = TYPE_MODE (type);
5660 /* All aggregates are returned in memory, except on OpenVMS where
5661 records that fit 64 bits should be returned by immediate value
5662 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5663 if (TARGET_ABI_OPEN_VMS
5664 && TREE_CODE (type) != ARRAY_TYPE
5665 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5666 return false;
5668 if (AGGREGATE_TYPE_P (type))
5669 return true;
5672 size = GET_MODE_SIZE (mode);
5673 switch (GET_MODE_CLASS (mode))
5675 case MODE_VECTOR_FLOAT:
5676 /* Pass all float vectors in memory, like an aggregate. */
5677 return true;
5679 case MODE_COMPLEX_FLOAT:
5680 /* We judge complex floats on the size of their element,
5681 not the size of the whole type. */
5682 size = GET_MODE_UNIT_SIZE (mode);
5683 break;
5685 case MODE_INT:
5686 case MODE_FLOAT:
5687 case MODE_COMPLEX_INT:
5688 case MODE_VECTOR_INT:
5689 break;
5691 default:
5692 /* ??? We get called on all sorts of random stuff from
5693 aggregate_value_p. We must return something, but it's not
5694 clear what's safe to return. Pretend it's a struct I
5695 guess. */
5696 return true;
5699 /* Otherwise types must fit in one register. */
5700 return size > UNITS_PER_WORD;
5703 /* Return true if TYPE should be passed by invisible reference. */
5705 static bool
5706 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5707 enum machine_mode mode,
5708 const_tree type ATTRIBUTE_UNUSED,
5709 bool named ATTRIBUTE_UNUSED)
5711 return mode == TFmode || mode == TCmode;
5714 /* Define how to find the value returned by a function. VALTYPE is the
5715 data type of the value (as a tree). If the precise function being
5716 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5717 MODE is set instead of VALTYPE for libcalls.
5719 On Alpha the value is found in $0 for integer functions and
5720 $f0 for floating-point functions. */
5723 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5724 enum machine_mode mode)
5726 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5727 enum mode_class mclass;
5729 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5731 if (valtype)
5732 mode = TYPE_MODE (valtype);
5734 mclass = GET_MODE_CLASS (mode);
5735 switch (mclass)
5737 case MODE_INT:
5738 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5739 where we have them returning both SImode and DImode. */
5740 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5741 PROMOTE_MODE (mode, dummy, valtype);
5742 /* FALLTHRU */
5744 case MODE_COMPLEX_INT:
5745 case MODE_VECTOR_INT:
5746 regnum = 0;
5747 break;
5749 case MODE_FLOAT:
5750 regnum = 32;
5751 break;
5753 case MODE_COMPLEX_FLOAT:
5755 enum machine_mode cmode = GET_MODE_INNER (mode);
5757 return gen_rtx_PARALLEL
5758 (VOIDmode,
5759 gen_rtvec (2,
5760 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5761 const0_rtx),
5762 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5763 GEN_INT (GET_MODE_SIZE (cmode)))));
5766 case MODE_RANDOM:
5767 /* We should only reach here for BLKmode on VMS. */
5768 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5769 regnum = 0;
5770 break;
5772 default:
5773 gcc_unreachable ();
5776 return gen_rtx_REG (mode, regnum);
5779 /* TCmode complex values are passed by invisible reference. We
5780 should not split these values. */
5782 static bool
5783 alpha_split_complex_arg (const_tree type)
5785 return TYPE_MODE (type) != TCmode;
5788 static tree
5789 alpha_build_builtin_va_list (void)
5791 tree base, ofs, space, record, type_decl;
5793 if (TARGET_ABI_OPEN_VMS)
5794 return ptr_type_node;
5796 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5797 type_decl = build_decl (BUILTINS_LOCATION,
5798 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5799 TYPE_STUB_DECL (record) = type_decl;
5800 TYPE_NAME (record) = type_decl;
5802 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5804 /* Dummy field to prevent alignment warnings. */
5805 space = build_decl (BUILTINS_LOCATION,
5806 FIELD_DECL, NULL_TREE, integer_type_node);
5807 DECL_FIELD_CONTEXT (space) = record;
5808 DECL_ARTIFICIAL (space) = 1;
5809 DECL_IGNORED_P (space) = 1;
5811 ofs = build_decl (BUILTINS_LOCATION,
5812 FIELD_DECL, get_identifier ("__offset"),
5813 integer_type_node);
5814 DECL_FIELD_CONTEXT (ofs) = record;
5815 DECL_CHAIN (ofs) = space;
5816 /* ??? This is a hack, __offset is marked volatile to prevent
5817 DCE that confuses stdarg optimization and results in
5818 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5819 TREE_THIS_VOLATILE (ofs) = 1;
5821 base = build_decl (BUILTINS_LOCATION,
5822 FIELD_DECL, get_identifier ("__base"),
5823 ptr_type_node);
5824 DECL_FIELD_CONTEXT (base) = record;
5825 DECL_CHAIN (base) = ofs;
5827 TYPE_FIELDS (record) = base;
5828 layout_type (record);
5830 va_list_gpr_counter_field = ofs;
5831 return record;
5834 #if TARGET_ABI_OSF
5835 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5836 and constant additions. */
5838 static gimple
5839 va_list_skip_additions (tree lhs)
5841 gimple stmt;
5843 for (;;)
5845 enum tree_code code;
5847 stmt = SSA_NAME_DEF_STMT (lhs);
5849 if (gimple_code (stmt) == GIMPLE_PHI)
5850 return stmt;
5852 if (!is_gimple_assign (stmt)
5853 || gimple_assign_lhs (stmt) != lhs)
5854 return NULL;
5856 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5857 return stmt;
5858 code = gimple_assign_rhs_code (stmt);
5859 if (!CONVERT_EXPR_CODE_P (code)
5860 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5861 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5862 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5863 return stmt;
5865 lhs = gimple_assign_rhs1 (stmt);
5869 /* Check if LHS = RHS statement is
5870 LHS = *(ap.__base + ap.__offset + cst)
5872 LHS = *(ap.__base
5873 + ((ap.__offset + cst <= 47)
5874 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5875 If the former, indicate that GPR registers are needed,
5876 if the latter, indicate that FPR registers are needed.
5878 Also look for LHS = (*ptr).field, where ptr is one of the forms
5879 listed above.
5881 On alpha, cfun->va_list_gpr_size is used as size of the needed
5882 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5883 registers are needed and bit 1 set if FPR registers are needed.
5884 Return true if va_list references should not be scanned for the
5885 current statement. */
5887 static bool
5888 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5890 tree base, offset, rhs;
5891 int offset_arg = 1;
5892 gimple base_stmt;
5894 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5895 != GIMPLE_SINGLE_RHS)
5896 return false;
5898 rhs = gimple_assign_rhs1 (stmt);
5899 while (handled_component_p (rhs))
5900 rhs = TREE_OPERAND (rhs, 0);
5901 if (TREE_CODE (rhs) != MEM_REF
5902 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5903 return false;
5905 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5906 if (stmt == NULL
5907 || !is_gimple_assign (stmt)
5908 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5909 return false;
5911 base = gimple_assign_rhs1 (stmt);
5912 if (TREE_CODE (base) == SSA_NAME)
5914 base_stmt = va_list_skip_additions (base);
5915 if (base_stmt
5916 && is_gimple_assign (base_stmt)
5917 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5918 base = gimple_assign_rhs1 (base_stmt);
5921 if (TREE_CODE (base) != COMPONENT_REF
5922 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5924 base = gimple_assign_rhs2 (stmt);
5925 if (TREE_CODE (base) == SSA_NAME)
5927 base_stmt = va_list_skip_additions (base);
5928 if (base_stmt
5929 && is_gimple_assign (base_stmt)
5930 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5931 base = gimple_assign_rhs1 (base_stmt);
5934 if (TREE_CODE (base) != COMPONENT_REF
5935 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5936 return false;
5938 offset_arg = 0;
5941 base = get_base_address (base);
5942 if (TREE_CODE (base) != VAR_DECL
5943 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
5944 return false;
5946 offset = gimple_op (stmt, 1 + offset_arg);
5947 if (TREE_CODE (offset) == SSA_NAME)
5949 gimple offset_stmt = va_list_skip_additions (offset);
5951 if (offset_stmt
5952 && gimple_code (offset_stmt) == GIMPLE_PHI)
5954 HOST_WIDE_INT sub;
5955 gimple arg1_stmt, arg2_stmt;
5956 tree arg1, arg2;
5957 enum tree_code code1, code2;
5959 if (gimple_phi_num_args (offset_stmt) != 2)
5960 goto escapes;
5962 arg1_stmt
5963 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5964 arg2_stmt
5965 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5966 if (arg1_stmt == NULL
5967 || !is_gimple_assign (arg1_stmt)
5968 || arg2_stmt == NULL
5969 || !is_gimple_assign (arg2_stmt))
5970 goto escapes;
5972 code1 = gimple_assign_rhs_code (arg1_stmt);
5973 code2 = gimple_assign_rhs_code (arg2_stmt);
5974 if (code1 == COMPONENT_REF
5975 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5976 /* Do nothing. */;
5977 else if (code2 == COMPONENT_REF
5978 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5980 gimple tem = arg1_stmt;
5981 code2 = code1;
5982 arg1_stmt = arg2_stmt;
5983 arg2_stmt = tem;
5985 else
5986 goto escapes;
5988 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5989 goto escapes;
5991 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5992 if (code2 == MINUS_EXPR)
5993 sub = -sub;
5994 if (sub < -48 || sub > -32)
5995 goto escapes;
5997 arg1 = gimple_assign_rhs1 (arg1_stmt);
5998 arg2 = gimple_assign_rhs1 (arg2_stmt);
5999 if (TREE_CODE (arg2) == SSA_NAME)
6001 arg2_stmt = va_list_skip_additions (arg2);
6002 if (arg2_stmt == NULL
6003 || !is_gimple_assign (arg2_stmt)
6004 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6005 goto escapes;
6006 arg2 = gimple_assign_rhs1 (arg2_stmt);
6008 if (arg1 != arg2)
6009 goto escapes;
6011 if (TREE_CODE (arg1) != COMPONENT_REF
6012 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6013 || get_base_address (arg1) != base)
6014 goto escapes;
6016 /* Need floating point regs. */
6017 cfun->va_list_fpr_size |= 2;
6018 return false;
6020 if (offset_stmt
6021 && is_gimple_assign (offset_stmt)
6022 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6023 offset = gimple_assign_rhs1 (offset_stmt);
6025 if (TREE_CODE (offset) != COMPONENT_REF
6026 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6027 || get_base_address (offset) != base)
6028 goto escapes;
6029 else
6030 /* Need general regs. */
6031 cfun->va_list_fpr_size |= 1;
6032 return false;
6034 escapes:
6035 si->va_list_escapes = true;
6036 return false;
6038 #endif
6040 /* Perform any needed actions needed for a function that is receiving a
6041 variable number of arguments. */
6043 static void
6044 alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
6045 tree type, int *pretend_size, int no_rtl)
6047 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6049 /* Skip the current argument. */
6050 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6051 true);
6053 #if TARGET_ABI_OPEN_VMS
6054 /* For VMS, we allocate space for all 6 arg registers plus a count.
6056 However, if NO registers need to be saved, don't allocate any space.
6057 This is not only because we won't need the space, but because AP
6058 includes the current_pretend_args_size and we don't want to mess up
6059 any ap-relative addresses already made. */
6060 if (cum.num_args < 6)
6062 if (!no_rtl)
6064 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6065 emit_insn (gen_arg_home ());
6067 *pretend_size = 7 * UNITS_PER_WORD;
6069 #else
6070 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6071 only push those that are remaining. However, if NO registers need to
6072 be saved, don't allocate any space. This is not only because we won't
6073 need the space, but because AP includes the current_pretend_args_size
6074 and we don't want to mess up any ap-relative addresses already made.
6076 If we are not to use the floating-point registers, save the integer
6077 registers where we would put the floating-point registers. This is
6078 not the most efficient way to implement varargs with just one register
6079 class, but it isn't worth doing anything more efficient in this rare
6080 case. */
6081 if (cum >= 6)
6082 return;
6084 if (!no_rtl)
6086 int count;
6087 alias_set_type set = get_varargs_alias_set ();
6088 rtx tmp;
6090 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6091 if (count > 6 - cum)
6092 count = 6 - cum;
6094 /* Detect whether integer registers or floating-point registers
6095 are needed by the detected va_arg statements. See above for
6096 how these values are computed. Note that the "escape" value
6097 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6098 these bits set. */
6099 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6101 if (cfun->va_list_fpr_size & 1)
6103 tmp = gen_rtx_MEM (BLKmode,
6104 plus_constant (Pmode, virtual_incoming_args_rtx,
6105 (cum + 6) * UNITS_PER_WORD));
6106 MEM_NOTRAP_P (tmp) = 1;
6107 set_mem_alias_set (tmp, set);
6108 move_block_from_reg (16 + cum, tmp, count);
6111 if (cfun->va_list_fpr_size & 2)
6113 tmp = gen_rtx_MEM (BLKmode,
6114 plus_constant (Pmode, virtual_incoming_args_rtx,
6115 cum * UNITS_PER_WORD));
6116 MEM_NOTRAP_P (tmp) = 1;
6117 set_mem_alias_set (tmp, set);
6118 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6121 *pretend_size = 12 * UNITS_PER_WORD;
6122 #endif
6125 static void
6126 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6128 HOST_WIDE_INT offset;
6129 tree t, offset_field, base_field;
6131 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6132 return;
6134 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6135 up by 48, storing fp arg registers in the first 48 bytes, and the
6136 integer arg registers in the next 48 bytes. This is only done,
6137 however, if any integer registers need to be stored.
6139 If no integer registers need be stored, then we must subtract 48
6140 in order to account for the integer arg registers which are counted
6141 in argsize above, but which are not actually stored on the stack.
6142 Must further be careful here about structures straddling the last
6143 integer argument register; that futzes with pretend_args_size,
6144 which changes the meaning of AP. */
6146 if (NUM_ARGS < 6)
6147 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6148 else
6149 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6151 if (TARGET_ABI_OPEN_VMS)
6153 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6154 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6155 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6156 TREE_SIDE_EFFECTS (t) = 1;
6157 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6159 else
6161 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6162 offset_field = DECL_CHAIN (base_field);
6164 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6165 valist, base_field, NULL_TREE);
6166 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6167 valist, offset_field, NULL_TREE);
6169 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6170 t = fold_build_pointer_plus_hwi (t, offset);
6171 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6172 TREE_SIDE_EFFECTS (t) = 1;
6173 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6175 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6176 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6177 TREE_SIDE_EFFECTS (t) = 1;
6178 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6182 static tree
6183 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6184 gimple_seq *pre_p)
6186 tree type_size, ptr_type, addend, t, addr;
6187 gimple_seq internal_post;
6189 /* If the type could not be passed in registers, skip the block
6190 reserved for the registers. */
6191 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6193 t = build_int_cst (TREE_TYPE (offset), 6*8);
6194 gimplify_assign (offset,
6195 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6196 pre_p);
6199 addend = offset;
6200 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6202 if (TREE_CODE (type) == COMPLEX_TYPE)
6204 tree real_part, imag_part, real_temp;
6206 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6207 offset, pre_p);
6209 /* Copy the value into a new temporary, lest the formal temporary
6210 be reused out from under us. */
6211 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6213 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6214 offset, pre_p);
6216 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6218 else if (TREE_CODE (type) == REAL_TYPE)
6220 tree fpaddend, cond, fourtyeight;
6222 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6223 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6224 addend, fourtyeight);
6225 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6226 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6227 fpaddend, addend);
6230 /* Build the final address and force that value into a temporary. */
6231 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6232 internal_post = NULL;
6233 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6234 gimple_seq_add_seq (pre_p, internal_post);
6236 /* Update the offset field. */
6237 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6238 if (type_size == NULL || TREE_OVERFLOW (type_size))
6239 t = size_zero_node;
6240 else
6242 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6243 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6244 t = size_binop (MULT_EXPR, t, size_int (8));
6246 t = fold_convert (TREE_TYPE (offset), t);
6247 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6248 pre_p);
6250 return build_va_arg_indirect_ref (addr);
6253 static tree
6254 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6255 gimple_seq *post_p)
6257 tree offset_field, base_field, offset, base, t, r;
6258 bool indirect;
6260 if (TARGET_ABI_OPEN_VMS)
6261 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6263 base_field = TYPE_FIELDS (va_list_type_node);
6264 offset_field = DECL_CHAIN (base_field);
6265 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6266 valist, base_field, NULL_TREE);
6267 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6268 valist, offset_field, NULL_TREE);
6270 /* Pull the fields of the structure out into temporaries. Since we never
6271 modify the base field, we can use a formal temporary. Sign-extend the
6272 offset field so that it's the proper width for pointer arithmetic. */
6273 base = get_formal_tmp_var (base_field, pre_p);
6275 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6276 offset = get_initialized_tmp_var (t, pre_p, NULL);
6278 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6279 if (indirect)
6280 type = build_pointer_type_for_mode (type, ptr_mode, true);
6282 /* Find the value. Note that this will be a stable indirection, or
6283 a composite of stable indirections in the case of complex. */
6284 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6286 /* Stuff the offset temporary back into its field. */
6287 gimplify_assign (unshare_expr (offset_field),
6288 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6290 if (indirect)
6291 r = build_va_arg_indirect_ref (r);
6293 return r;
6296 /* Builtins. */
6298 enum alpha_builtin
6300 ALPHA_BUILTIN_CMPBGE,
6301 ALPHA_BUILTIN_EXTBL,
6302 ALPHA_BUILTIN_EXTWL,
6303 ALPHA_BUILTIN_EXTLL,
6304 ALPHA_BUILTIN_EXTQL,
6305 ALPHA_BUILTIN_EXTWH,
6306 ALPHA_BUILTIN_EXTLH,
6307 ALPHA_BUILTIN_EXTQH,
6308 ALPHA_BUILTIN_INSBL,
6309 ALPHA_BUILTIN_INSWL,
6310 ALPHA_BUILTIN_INSLL,
6311 ALPHA_BUILTIN_INSQL,
6312 ALPHA_BUILTIN_INSWH,
6313 ALPHA_BUILTIN_INSLH,
6314 ALPHA_BUILTIN_INSQH,
6315 ALPHA_BUILTIN_MSKBL,
6316 ALPHA_BUILTIN_MSKWL,
6317 ALPHA_BUILTIN_MSKLL,
6318 ALPHA_BUILTIN_MSKQL,
6319 ALPHA_BUILTIN_MSKWH,
6320 ALPHA_BUILTIN_MSKLH,
6321 ALPHA_BUILTIN_MSKQH,
6322 ALPHA_BUILTIN_UMULH,
6323 ALPHA_BUILTIN_ZAP,
6324 ALPHA_BUILTIN_ZAPNOT,
6325 ALPHA_BUILTIN_AMASK,
6326 ALPHA_BUILTIN_IMPLVER,
6327 ALPHA_BUILTIN_RPCC,
6328 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6329 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6331 /* TARGET_MAX */
6332 ALPHA_BUILTIN_MINUB8,
6333 ALPHA_BUILTIN_MINSB8,
6334 ALPHA_BUILTIN_MINUW4,
6335 ALPHA_BUILTIN_MINSW4,
6336 ALPHA_BUILTIN_MAXUB8,
6337 ALPHA_BUILTIN_MAXSB8,
6338 ALPHA_BUILTIN_MAXUW4,
6339 ALPHA_BUILTIN_MAXSW4,
6340 ALPHA_BUILTIN_PERR,
6341 ALPHA_BUILTIN_PKLB,
6342 ALPHA_BUILTIN_PKWB,
6343 ALPHA_BUILTIN_UNPKBL,
6344 ALPHA_BUILTIN_UNPKBW,
6346 /* TARGET_CIX */
6347 ALPHA_BUILTIN_CTTZ,
6348 ALPHA_BUILTIN_CTLZ,
6349 ALPHA_BUILTIN_CTPOP,
6351 ALPHA_BUILTIN_max
6354 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6355 CODE_FOR_builtin_cmpbge,
6356 CODE_FOR_extbl,
6357 CODE_FOR_extwl,
6358 CODE_FOR_extll,
6359 CODE_FOR_extql,
6360 CODE_FOR_extwh,
6361 CODE_FOR_extlh,
6362 CODE_FOR_extqh,
6363 CODE_FOR_builtin_insbl,
6364 CODE_FOR_builtin_inswl,
6365 CODE_FOR_builtin_insll,
6366 CODE_FOR_insql,
6367 CODE_FOR_inswh,
6368 CODE_FOR_inslh,
6369 CODE_FOR_insqh,
6370 CODE_FOR_mskbl,
6371 CODE_FOR_mskwl,
6372 CODE_FOR_mskll,
6373 CODE_FOR_mskql,
6374 CODE_FOR_mskwh,
6375 CODE_FOR_msklh,
6376 CODE_FOR_mskqh,
6377 CODE_FOR_umuldi3_highpart,
6378 CODE_FOR_builtin_zap,
6379 CODE_FOR_builtin_zapnot,
6380 CODE_FOR_builtin_amask,
6381 CODE_FOR_builtin_implver,
6382 CODE_FOR_builtin_rpcc,
6383 CODE_FOR_builtin_establish_vms_condition_handler,
6384 CODE_FOR_builtin_revert_vms_condition_handler,
6386 /* TARGET_MAX */
6387 CODE_FOR_builtin_minub8,
6388 CODE_FOR_builtin_minsb8,
6389 CODE_FOR_builtin_minuw4,
6390 CODE_FOR_builtin_minsw4,
6391 CODE_FOR_builtin_maxub8,
6392 CODE_FOR_builtin_maxsb8,
6393 CODE_FOR_builtin_maxuw4,
6394 CODE_FOR_builtin_maxsw4,
6395 CODE_FOR_builtin_perr,
6396 CODE_FOR_builtin_pklb,
6397 CODE_FOR_builtin_pkwb,
6398 CODE_FOR_builtin_unpkbl,
6399 CODE_FOR_builtin_unpkbw,
6401 /* TARGET_CIX */
6402 CODE_FOR_ctzdi2,
6403 CODE_FOR_clzdi2,
6404 CODE_FOR_popcountdi2
6407 struct alpha_builtin_def
6409 const char *name;
6410 enum alpha_builtin code;
6411 unsigned int target_mask;
6412 bool is_const;
6415 static struct alpha_builtin_def const zero_arg_builtins[] = {
6416 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6417 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6420 static struct alpha_builtin_def const one_arg_builtins[] = {
6421 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6422 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6423 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6424 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6425 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6426 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6427 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6428 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6431 static struct alpha_builtin_def const two_arg_builtins[] = {
6432 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6433 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6434 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6435 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6436 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6437 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6438 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6439 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6440 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6441 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6442 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6443 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6444 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6445 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6446 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6447 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6448 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6449 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6450 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6451 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6452 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6453 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6454 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6455 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6456 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6457 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6458 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6459 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6460 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6461 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6462 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6463 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6464 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6465 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6468 static GTY(()) tree alpha_dimode_u;
6469 static GTY(()) tree alpha_v8qi_u;
6470 static GTY(()) tree alpha_v8qi_s;
6471 static GTY(()) tree alpha_v4hi_u;
6472 static GTY(()) tree alpha_v4hi_s;
6474 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6476 /* Return the alpha builtin for CODE. */
6478 static tree
6479 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6481 if (code >= ALPHA_BUILTIN_max)
6482 return error_mark_node;
6483 return alpha_builtins[code];
6486 /* Helper function of alpha_init_builtins. Add the built-in specified
6487 by NAME, TYPE, CODE, and ECF. */
6489 static void
6490 alpha_builtin_function (const char *name, tree ftype,
6491 enum alpha_builtin code, unsigned ecf)
6493 tree decl = add_builtin_function (name, ftype, (int) code,
6494 BUILT_IN_MD, NULL, NULL_TREE);
6496 if (ecf & ECF_CONST)
6497 TREE_READONLY (decl) = 1;
6498 if (ecf & ECF_NOTHROW)
6499 TREE_NOTHROW (decl) = 1;
6501 alpha_builtins [(int) code] = decl;
6504 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6505 functions pointed to by P, with function type FTYPE. */
6507 static void
6508 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6509 tree ftype)
6511 size_t i;
6513 for (i = 0; i < count; ++i, ++p)
6514 if ((target_flags & p->target_mask) == p->target_mask)
6515 alpha_builtin_function (p->name, ftype, p->code,
6516 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6519 static void
6520 alpha_init_builtins (void)
6522 tree ftype;
6524 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6525 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6526 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6527 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6528 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6530 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6531 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6533 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6534 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6536 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6537 alpha_dimode_u, NULL_TREE);
6538 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6540 if (TARGET_ABI_OPEN_VMS)
6542 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6543 NULL_TREE);
6544 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6545 ftype,
6546 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6549 ftype = build_function_type_list (ptr_type_node, void_type_node,
6550 NULL_TREE);
6551 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6552 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6554 vms_patch_builtins ();
6558 /* Expand an expression EXP that calls a built-in function,
6559 with result going to TARGET if that's convenient
6560 (and in mode MODE if that's convenient).
6561 SUBTARGET may be used as the target for computing one of EXP's operands.
6562 IGNORE is nonzero if the value is to be ignored. */
6564 static rtx
6565 alpha_expand_builtin (tree exp, rtx target,
6566 rtx subtarget ATTRIBUTE_UNUSED,
6567 enum machine_mode mode ATTRIBUTE_UNUSED,
6568 int ignore ATTRIBUTE_UNUSED)
6570 #define MAX_ARGS 2
6572 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6573 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6574 tree arg;
6575 call_expr_arg_iterator iter;
6576 enum insn_code icode;
6577 rtx op[MAX_ARGS], pat;
6578 int arity;
6579 bool nonvoid;
6581 if (fcode >= ALPHA_BUILTIN_max)
6582 internal_error ("bad builtin fcode");
6583 icode = code_for_builtin[fcode];
6584 if (icode == 0)
6585 internal_error ("bad builtin fcode");
6587 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6589 arity = 0;
6590 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6592 const struct insn_operand_data *insn_op;
6594 if (arg == error_mark_node)
6595 return NULL_RTX;
6596 if (arity > MAX_ARGS)
6597 return NULL_RTX;
6599 insn_op = &insn_data[icode].operand[arity + nonvoid];
6601 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6603 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6604 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6605 arity++;
6608 if (nonvoid)
6610 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6611 if (!target
6612 || GET_MODE (target) != tmode
6613 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6614 target = gen_reg_rtx (tmode);
6617 switch (arity)
6619 case 0:
6620 pat = GEN_FCN (icode) (target);
6621 break;
6622 case 1:
6623 if (nonvoid)
6624 pat = GEN_FCN (icode) (target, op[0]);
6625 else
6626 pat = GEN_FCN (icode) (op[0]);
6627 break;
6628 case 2:
6629 pat = GEN_FCN (icode) (target, op[0], op[1]);
6630 break;
6631 default:
6632 gcc_unreachable ();
6634 if (!pat)
6635 return NULL_RTX;
6636 emit_insn (pat);
6638 if (nonvoid)
6639 return target;
6640 else
6641 return const0_rtx;
6645 /* Several bits below assume HWI >= 64 bits. This should be enforced
6646 by config.gcc. */
6647 #if HOST_BITS_PER_WIDE_INT < 64
6648 # error "HOST_WIDE_INT too small"
6649 #endif
6651 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6652 with an 8-bit output vector. OPINT contains the integer operands; bit N
6653 of OP_CONST is set if OPINT[N] is valid. */
6655 static tree
6656 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6658 if (op_const == 3)
6660 int i, val;
6661 for (i = 0, val = 0; i < 8; ++i)
6663 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6664 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6665 if (c0 >= c1)
6666 val |= 1 << i;
6668 return build_int_cst (alpha_dimode_u, val);
6670 else if (op_const == 2 && opint[1] == 0)
6671 return build_int_cst (alpha_dimode_u, 0xff);
6672 return NULL;
6675 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6676 specialized form of an AND operation. Other byte manipulation instructions
6677 are defined in terms of this instruction, so this is also used as a
6678 subroutine for other builtins.
6680 OP contains the tree operands; OPINT contains the extracted integer values.
6681 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6682 OPINT may be considered. */
6684 static tree
6685 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6686 long op_const)
6688 if (op_const & 2)
6690 unsigned HOST_WIDE_INT mask = 0;
6691 int i;
6693 for (i = 0; i < 8; ++i)
6694 if ((opint[1] >> i) & 1)
6695 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6697 if (op_const & 1)
6698 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6700 if (op)
6701 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6702 build_int_cst (alpha_dimode_u, mask));
6704 else if ((op_const & 1) && opint[0] == 0)
6705 return build_int_cst (alpha_dimode_u, 0);
6706 return NULL;
6709 /* Fold the builtins for the EXT family of instructions. */
6711 static tree
6712 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6713 long op_const, unsigned HOST_WIDE_INT bytemask,
6714 bool is_high)
6716 long zap_const = 2;
6717 tree *zap_op = NULL;
6719 if (op_const & 2)
6721 unsigned HOST_WIDE_INT loc;
6723 loc = opint[1] & 7;
6724 loc *= BITS_PER_UNIT;
6726 if (loc != 0)
6728 if (op_const & 1)
6730 unsigned HOST_WIDE_INT temp = opint[0];
6731 if (is_high)
6732 temp <<= loc;
6733 else
6734 temp >>= loc;
6735 opint[0] = temp;
6736 zap_const = 3;
6739 else
6740 zap_op = op;
6743 opint[1] = bytemask;
6744 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6747 /* Fold the builtins for the INS family of instructions. */
6749 static tree
6750 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6751 long op_const, unsigned HOST_WIDE_INT bytemask,
6752 bool is_high)
6754 if ((op_const & 1) && opint[0] == 0)
6755 return build_int_cst (alpha_dimode_u, 0);
6757 if (op_const & 2)
6759 unsigned HOST_WIDE_INT temp, loc, byteloc;
6760 tree *zap_op = NULL;
6762 loc = opint[1] & 7;
6763 bytemask <<= loc;
6765 temp = opint[0];
6766 if (is_high)
6768 byteloc = (64 - (loc * 8)) & 0x3f;
6769 if (byteloc == 0)
6770 zap_op = op;
6771 else
6772 temp >>= byteloc;
6773 bytemask >>= 8;
6775 else
6777 byteloc = loc * 8;
6778 if (byteloc == 0)
6779 zap_op = op;
6780 else
6781 temp <<= byteloc;
6784 opint[0] = temp;
6785 opint[1] = bytemask;
6786 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6789 return NULL;
6792 static tree
6793 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6794 long op_const, unsigned HOST_WIDE_INT bytemask,
6795 bool is_high)
6797 if (op_const & 2)
6799 unsigned HOST_WIDE_INT loc;
6801 loc = opint[1] & 7;
6802 bytemask <<= loc;
6804 if (is_high)
6805 bytemask >>= 8;
6807 opint[1] = bytemask ^ 0xff;
6810 return alpha_fold_builtin_zapnot (op, opint, op_const);
6813 static tree
6814 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6816 tree op0 = fold_convert (vtype, op[0]);
6817 tree op1 = fold_convert (vtype, op[1]);
6818 tree val = fold_build2 (code, vtype, op0, op1);
6819 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6822 static tree
6823 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6825 unsigned HOST_WIDE_INT temp = 0;
6826 int i;
6828 if (op_const != 3)
6829 return NULL;
6831 for (i = 0; i < 8; ++i)
6833 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6834 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6835 if (a >= b)
6836 temp += a - b;
6837 else
6838 temp += b - a;
6841 return build_int_cst (alpha_dimode_u, temp);
6844 static tree
6845 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6847 unsigned HOST_WIDE_INT temp;
6849 if (op_const == 0)
6850 return NULL;
6852 temp = opint[0] & 0xff;
6853 temp |= (opint[0] >> 24) & 0xff00;
6855 return build_int_cst (alpha_dimode_u, temp);
6858 static tree
6859 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6861 unsigned HOST_WIDE_INT temp;
6863 if (op_const == 0)
6864 return NULL;
6866 temp = opint[0] & 0xff;
6867 temp |= (opint[0] >> 8) & 0xff00;
6868 temp |= (opint[0] >> 16) & 0xff0000;
6869 temp |= (opint[0] >> 24) & 0xff000000;
6871 return build_int_cst (alpha_dimode_u, temp);
6874 static tree
6875 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6877 unsigned HOST_WIDE_INT temp;
6879 if (op_const == 0)
6880 return NULL;
6882 temp = opint[0] & 0xff;
6883 temp |= (opint[0] & 0xff00) << 24;
6885 return build_int_cst (alpha_dimode_u, temp);
6888 static tree
6889 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6891 unsigned HOST_WIDE_INT temp;
6893 if (op_const == 0)
6894 return NULL;
6896 temp = opint[0] & 0xff;
6897 temp |= (opint[0] & 0x0000ff00) << 8;
6898 temp |= (opint[0] & 0x00ff0000) << 16;
6899 temp |= (opint[0] & 0xff000000) << 24;
6901 return build_int_cst (alpha_dimode_u, temp);
6904 static tree
6905 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6907 unsigned HOST_WIDE_INT temp;
6909 if (op_const == 0)
6910 return NULL;
6912 if (opint[0] == 0)
6913 temp = 64;
6914 else
6915 temp = exact_log2 (opint[0] & -opint[0]);
6917 return build_int_cst (alpha_dimode_u, temp);
6920 static tree
6921 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6923 unsigned HOST_WIDE_INT temp;
6925 if (op_const == 0)
6926 return NULL;
6928 if (opint[0] == 0)
6929 temp = 64;
6930 else
6931 temp = 64 - floor_log2 (opint[0]) - 1;
6933 return build_int_cst (alpha_dimode_u, temp);
6936 static tree
6937 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6939 unsigned HOST_WIDE_INT temp, op;
6941 if (op_const == 0)
6942 return NULL;
6944 op = opint[0];
6945 temp = 0;
6946 while (op)
6947 temp++, op &= op - 1;
6949 return build_int_cst (alpha_dimode_u, temp);
6952 /* Fold one of our builtin functions. */
6954 static tree
6955 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6956 bool ignore ATTRIBUTE_UNUSED)
6958 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6959 long op_const = 0;
6960 int i;
6962 if (n_args > MAX_ARGS)
6963 return NULL;
6965 for (i = 0; i < n_args; i++)
6967 tree arg = op[i];
6968 if (arg == error_mark_node)
6969 return NULL;
6971 opint[i] = 0;
6972 if (TREE_CODE (arg) == INTEGER_CST)
6974 op_const |= 1L << i;
6975 opint[i] = int_cst_value (arg);
6979 switch (DECL_FUNCTION_CODE (fndecl))
6981 case ALPHA_BUILTIN_CMPBGE:
6982 return alpha_fold_builtin_cmpbge (opint, op_const);
6984 case ALPHA_BUILTIN_EXTBL:
6985 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6986 case ALPHA_BUILTIN_EXTWL:
6987 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6988 case ALPHA_BUILTIN_EXTLL:
6989 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6990 case ALPHA_BUILTIN_EXTQL:
6991 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6992 case ALPHA_BUILTIN_EXTWH:
6993 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6994 case ALPHA_BUILTIN_EXTLH:
6995 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6996 case ALPHA_BUILTIN_EXTQH:
6997 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6999 case ALPHA_BUILTIN_INSBL:
7000 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7001 case ALPHA_BUILTIN_INSWL:
7002 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7003 case ALPHA_BUILTIN_INSLL:
7004 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7005 case ALPHA_BUILTIN_INSQL:
7006 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7007 case ALPHA_BUILTIN_INSWH:
7008 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7009 case ALPHA_BUILTIN_INSLH:
7010 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7011 case ALPHA_BUILTIN_INSQH:
7012 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7014 case ALPHA_BUILTIN_MSKBL:
7015 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7016 case ALPHA_BUILTIN_MSKWL:
7017 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7018 case ALPHA_BUILTIN_MSKLL:
7019 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7020 case ALPHA_BUILTIN_MSKQL:
7021 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7022 case ALPHA_BUILTIN_MSKWH:
7023 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7024 case ALPHA_BUILTIN_MSKLH:
7025 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7026 case ALPHA_BUILTIN_MSKQH:
7027 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7029 case ALPHA_BUILTIN_UMULH:
7030 return fold_build2 (MULT_HIGHPART_EXPR, alpha_dimode_u, op[0], op[1]);
7032 case ALPHA_BUILTIN_ZAP:
7033 opint[1] ^= 0xff;
7034 /* FALLTHRU */
7035 case ALPHA_BUILTIN_ZAPNOT:
7036 return alpha_fold_builtin_zapnot (op, opint, op_const);
7038 case ALPHA_BUILTIN_MINUB8:
7039 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7040 case ALPHA_BUILTIN_MINSB8:
7041 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7042 case ALPHA_BUILTIN_MINUW4:
7043 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7044 case ALPHA_BUILTIN_MINSW4:
7045 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7046 case ALPHA_BUILTIN_MAXUB8:
7047 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7048 case ALPHA_BUILTIN_MAXSB8:
7049 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7050 case ALPHA_BUILTIN_MAXUW4:
7051 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7052 case ALPHA_BUILTIN_MAXSW4:
7053 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7055 case ALPHA_BUILTIN_PERR:
7056 return alpha_fold_builtin_perr (opint, op_const);
7057 case ALPHA_BUILTIN_PKLB:
7058 return alpha_fold_builtin_pklb (opint, op_const);
7059 case ALPHA_BUILTIN_PKWB:
7060 return alpha_fold_builtin_pkwb (opint, op_const);
7061 case ALPHA_BUILTIN_UNPKBL:
7062 return alpha_fold_builtin_unpkbl (opint, op_const);
7063 case ALPHA_BUILTIN_UNPKBW:
7064 return alpha_fold_builtin_unpkbw (opint, op_const);
7066 case ALPHA_BUILTIN_CTTZ:
7067 return alpha_fold_builtin_cttz (opint, op_const);
7068 case ALPHA_BUILTIN_CTLZ:
7069 return alpha_fold_builtin_ctlz (opint, op_const);
7070 case ALPHA_BUILTIN_CTPOP:
7071 return alpha_fold_builtin_ctpop (opint, op_const);
7073 case ALPHA_BUILTIN_AMASK:
7074 case ALPHA_BUILTIN_IMPLVER:
7075 case ALPHA_BUILTIN_RPCC:
7076 /* None of these are foldable at compile-time. */
7077 default:
7078 return NULL;
7082 /* This page contains routines that are used to determine what the function
7083 prologue and epilogue code will do and write them out. */
7085 /* Compute the size of the save area in the stack. */
7087 /* These variables are used for communication between the following functions.
7088 They indicate various things about the current function being compiled
7089 that are used to tell what kind of prologue, epilogue and procedure
7090 descriptor to generate. */
7092 /* Nonzero if we need a stack procedure. */
7093 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7094 static enum alpha_procedure_types alpha_procedure_type;
7096 /* Register number (either FP or SP) that is used to unwind the frame. */
7097 static int vms_unwind_regno;
7099 /* Register number used to save FP. We need not have one for RA since
7100 we don't modify it for register procedures. This is only defined
7101 for register frame procedures. */
7102 static int vms_save_fp_regno;
7104 /* Register number used to reference objects off our PV. */
7105 static int vms_base_regno;
7107 /* Compute register masks for saved registers. */
7109 static void
7110 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7112 unsigned long imask = 0;
7113 unsigned long fmask = 0;
7114 unsigned int i;
7116 /* When outputting a thunk, we don't have valid register life info,
7117 but assemble_start_function wants to output .frame and .mask
7118 directives. */
7119 if (cfun->is_thunk)
7121 *imaskP = 0;
7122 *fmaskP = 0;
7123 return;
7126 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7127 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7129 /* One for every register we have to save. */
7130 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7131 if (! fixed_regs[i] && ! call_used_regs[i]
7132 && df_regs_ever_live_p (i) && i != REG_RA)
7134 if (i < 32)
7135 imask |= (1UL << i);
7136 else
7137 fmask |= (1UL << (i - 32));
7140 /* We need to restore these for the handler. */
7141 if (crtl->calls_eh_return)
7143 for (i = 0; ; ++i)
7145 unsigned regno = EH_RETURN_DATA_REGNO (i);
7146 if (regno == INVALID_REGNUM)
7147 break;
7148 imask |= 1UL << regno;
7152 /* If any register spilled, then spill the return address also. */
7153 /* ??? This is required by the Digital stack unwind specification
7154 and isn't needed if we're doing Dwarf2 unwinding. */
7155 if (imask || fmask || alpha_ra_ever_killed ())
7156 imask |= (1UL << REG_RA);
7158 *imaskP = imask;
7159 *fmaskP = fmask;
7163 alpha_sa_size (void)
7165 unsigned long mask[2];
7166 int sa_size = 0;
7167 int i, j;
7169 alpha_sa_mask (&mask[0], &mask[1]);
7171 for (j = 0; j < 2; ++j)
7172 for (i = 0; i < 32; ++i)
7173 if ((mask[j] >> i) & 1)
7174 sa_size++;
7176 if (TARGET_ABI_OPEN_VMS)
7178 /* Start with a stack procedure if we make any calls (REG_RA used), or
7179 need a frame pointer, with a register procedure if we otherwise need
7180 at least a slot, and with a null procedure in other cases. */
7181 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7182 alpha_procedure_type = PT_STACK;
7183 else if (get_frame_size() != 0)
7184 alpha_procedure_type = PT_REGISTER;
7185 else
7186 alpha_procedure_type = PT_NULL;
7188 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7189 made the final decision on stack procedure vs register procedure. */
7190 if (alpha_procedure_type == PT_STACK)
7191 sa_size -= 2;
7193 /* Decide whether to refer to objects off our PV via FP or PV.
7194 If we need FP for something else or if we receive a nonlocal
7195 goto (which expects PV to contain the value), we must use PV.
7196 Otherwise, start by assuming we can use FP. */
7198 vms_base_regno
7199 = (frame_pointer_needed
7200 || cfun->has_nonlocal_label
7201 || alpha_procedure_type == PT_STACK
7202 || crtl->outgoing_args_size)
7203 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7205 /* If we want to copy PV into FP, we need to find some register
7206 in which to save FP. */
7208 vms_save_fp_regno = -1;
7209 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7210 for (i = 0; i < 32; i++)
7211 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7212 vms_save_fp_regno = i;
7214 /* A VMS condition handler requires a stack procedure in our
7215 implementation. (not required by the calling standard). */
7216 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7217 || cfun->machine->uses_condition_handler)
7218 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7219 else if (alpha_procedure_type == PT_NULL)
7220 vms_base_regno = REG_PV;
7222 /* Stack unwinding should be done via FP unless we use it for PV. */
7223 vms_unwind_regno = (vms_base_regno == REG_PV
7224 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7226 /* If this is a stack procedure, allow space for saving FP, RA and
7227 a condition handler slot if needed. */
7228 if (alpha_procedure_type == PT_STACK)
7229 sa_size += 2 + cfun->machine->uses_condition_handler;
7231 else
7233 /* Our size must be even (multiple of 16 bytes). */
7234 if (sa_size & 1)
7235 sa_size++;
7238 return sa_size * 8;
7241 /* Define the offset between two registers, one to be eliminated,
7242 and the other its replacement, at the start of a routine. */
7244 HOST_WIDE_INT
7245 alpha_initial_elimination_offset (unsigned int from,
7246 unsigned int to ATTRIBUTE_UNUSED)
7248 HOST_WIDE_INT ret;
7250 ret = alpha_sa_size ();
7251 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7253 switch (from)
7255 case FRAME_POINTER_REGNUM:
7256 break;
7258 case ARG_POINTER_REGNUM:
7259 ret += (ALPHA_ROUND (get_frame_size ()
7260 + crtl->args.pretend_args_size)
7261 - crtl->args.pretend_args_size);
7262 break;
7264 default:
7265 gcc_unreachable ();
7268 return ret;
7271 #if TARGET_ABI_OPEN_VMS
7273 /* Worker function for TARGET_CAN_ELIMINATE. */
7275 static bool
7276 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7278 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7279 alpha_sa_size ();
7281 switch (alpha_procedure_type)
7283 case PT_NULL:
7284 /* NULL procedures have no frame of their own and we only
7285 know how to resolve from the current stack pointer. */
7286 return to == STACK_POINTER_REGNUM;
7288 case PT_REGISTER:
7289 case PT_STACK:
7290 /* We always eliminate except to the stack pointer if there is no
7291 usable frame pointer at hand. */
7292 return (to != STACK_POINTER_REGNUM
7293 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7296 gcc_unreachable ();
7299 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7300 designates the same location as FROM. */
7302 HOST_WIDE_INT
7303 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7305 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7306 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7307 on the proper computations and will need the register save area size
7308 in most cases. */
7310 HOST_WIDE_INT sa_size = alpha_sa_size ();
7312 /* PT_NULL procedures have no frame of their own and we only allow
7313 elimination to the stack pointer. This is the argument pointer and we
7314 resolve the soft frame pointer to that as well. */
7316 if (alpha_procedure_type == PT_NULL)
7317 return 0;
7319 /* For a PT_STACK procedure the frame layout looks as follows
7321 -----> decreasing addresses
7323 < size rounded up to 16 | likewise >
7324 --------------#------------------------------+++--------------+++-------#
7325 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7326 --------------#---------------------------------------------------------#
7327 ^ ^ ^ ^
7328 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7331 PT_REGISTER procedures are similar in that they may have a frame of their
7332 own. They have no regs-sa/pv/outgoing-args area.
7334 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7335 to STACK_PTR if need be. */
7338 HOST_WIDE_INT offset;
7339 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7341 switch (from)
7343 case FRAME_POINTER_REGNUM:
7344 offset = ALPHA_ROUND (sa_size + pv_save_size);
7345 break;
7346 case ARG_POINTER_REGNUM:
7347 offset = (ALPHA_ROUND (sa_size + pv_save_size
7348 + get_frame_size ()
7349 + crtl->args.pretend_args_size)
7350 - crtl->args.pretend_args_size);
7351 break;
7352 default:
7353 gcc_unreachable ();
7356 if (to == STACK_POINTER_REGNUM)
7357 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7359 return offset;
7363 #define COMMON_OBJECT "common_object"
7365 static tree
7366 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7367 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7368 bool *no_add_attrs ATTRIBUTE_UNUSED)
7370 tree decl = *node;
7371 gcc_assert (DECL_P (decl));
7373 DECL_COMMON (decl) = 1;
7374 return NULL_TREE;
7377 static const struct attribute_spec vms_attribute_table[] =
7379 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7380 affects_type_identity } */
7381 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7382 { NULL, 0, 0, false, false, false, NULL, false }
7385 void
7386 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7387 unsigned HOST_WIDE_INT size,
7388 unsigned int align)
7390 tree attr = DECL_ATTRIBUTES (decl);
7391 fprintf (file, "%s", COMMON_ASM_OP);
7392 assemble_name (file, name);
7393 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7394 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7395 fprintf (file, ",%u", align / BITS_PER_UNIT);
7396 if (attr)
7398 attr = lookup_attribute (COMMON_OBJECT, attr);
7399 if (attr)
7400 fprintf (file, ",%s",
7401 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7403 fputc ('\n', file);
7406 #undef COMMON_OBJECT
7408 #endif
7410 static int
7411 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7413 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7417 alpha_find_lo_sum_using_gp (rtx insn)
7419 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7422 static int
7423 alpha_does_function_need_gp (void)
7425 rtx insn;
7427 /* The GP being variable is an OSF abi thing. */
7428 if (! TARGET_ABI_OSF)
7429 return 0;
7431 /* We need the gp to load the address of __mcount. */
7432 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7433 return 1;
7435 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7436 if (cfun->is_thunk)
7437 return 1;
7439 /* The nonlocal receiver pattern assumes that the gp is valid for
7440 the nested function. Reasonable because it's almost always set
7441 correctly already. For the cases where that's wrong, make sure
7442 the nested function loads its gp on entry. */
7443 if (crtl->has_nonlocal_goto)
7444 return 1;
7446 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7447 Even if we are a static function, we still need to do this in case
7448 our address is taken and passed to something like qsort. */
7450 push_topmost_sequence ();
7451 insn = get_insns ();
7452 pop_topmost_sequence ();
7454 for (; insn; insn = NEXT_INSN (insn))
7455 if (NONDEBUG_INSN_P (insn)
7456 && GET_CODE (PATTERN (insn)) != USE
7457 && GET_CODE (PATTERN (insn)) != CLOBBER
7458 && get_attr_usegp (insn))
7459 return 1;
7461 return 0;
7465 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7466 sequences. */
7468 static rtx
7469 set_frame_related_p (void)
7471 rtx seq = get_insns ();
7472 rtx insn;
7474 end_sequence ();
7476 if (!seq)
7477 return NULL_RTX;
7479 if (INSN_P (seq))
7481 insn = seq;
7482 while (insn != NULL_RTX)
7484 RTX_FRAME_RELATED_P (insn) = 1;
7485 insn = NEXT_INSN (insn);
7487 seq = emit_insn (seq);
7489 else
7491 seq = emit_insn (seq);
7492 RTX_FRAME_RELATED_P (seq) = 1;
7494 return seq;
7497 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7499 /* Generates a store with the proper unwind info attached. VALUE is
7500 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7501 contains SP+FRAME_BIAS, and that is the unwind info that should be
7502 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7503 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7505 static void
7506 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7507 HOST_WIDE_INT base_ofs, rtx frame_reg)
7509 rtx addr, mem, insn;
7511 addr = plus_constant (Pmode, base_reg, base_ofs);
7512 mem = gen_frame_mem (DImode, addr);
7514 insn = emit_move_insn (mem, value);
7515 RTX_FRAME_RELATED_P (insn) = 1;
7517 if (frame_bias || value != frame_reg)
7519 if (frame_bias)
7521 addr = plus_constant (Pmode, stack_pointer_rtx,
7522 frame_bias + base_ofs);
7523 mem = gen_rtx_MEM (DImode, addr);
7526 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7527 gen_rtx_SET (VOIDmode, mem, frame_reg));
7531 static void
7532 emit_frame_store (unsigned int regno, rtx base_reg,
7533 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7535 rtx reg = gen_rtx_REG (DImode, regno);
7536 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7539 /* Compute the frame size. SIZE is the size of the "naked" frame
7540 and SA_SIZE is the size of the register save area. */
7542 static HOST_WIDE_INT
7543 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7545 if (TARGET_ABI_OPEN_VMS)
7546 return ALPHA_ROUND (sa_size
7547 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7548 + size
7549 + crtl->args.pretend_args_size);
7550 else
7551 return ALPHA_ROUND (crtl->outgoing_args_size)
7552 + sa_size
7553 + ALPHA_ROUND (size
7554 + crtl->args.pretend_args_size);
7557 /* Write function prologue. */
7559 /* On vms we have two kinds of functions:
7561 - stack frame (PROC_STACK)
7562 these are 'normal' functions with local vars and which are
7563 calling other functions
7564 - register frame (PROC_REGISTER)
7565 keeps all data in registers, needs no stack
7567 We must pass this to the assembler so it can generate the
7568 proper pdsc (procedure descriptor)
7569 This is done with the '.pdesc' command.
7571 On not-vms, we don't really differentiate between the two, as we can
7572 simply allocate stack without saving registers. */
7574 void
7575 alpha_expand_prologue (void)
7577 /* Registers to save. */
7578 unsigned long imask = 0;
7579 unsigned long fmask = 0;
7580 /* Stack space needed for pushing registers clobbered by us. */
7581 HOST_WIDE_INT sa_size, sa_bias;
7582 /* Complete stack size needed. */
7583 HOST_WIDE_INT frame_size;
7584 /* Probed stack size; it additionally includes the size of
7585 the "reserve region" if any. */
7586 HOST_WIDE_INT probed_size;
7587 /* Offset from base reg to register save area. */
7588 HOST_WIDE_INT reg_offset;
7589 rtx sa_reg;
7590 int i;
7592 sa_size = alpha_sa_size ();
7593 frame_size = compute_frame_size (get_frame_size (), sa_size);
7595 if (flag_stack_usage_info)
7596 current_function_static_stack_size = frame_size;
7598 if (TARGET_ABI_OPEN_VMS)
7599 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7600 else
7601 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7603 alpha_sa_mask (&imask, &fmask);
7605 /* Emit an insn to reload GP, if needed. */
7606 if (TARGET_ABI_OSF)
7608 alpha_function_needs_gp = alpha_does_function_need_gp ();
7609 if (alpha_function_needs_gp)
7610 emit_insn (gen_prologue_ldgp ());
7613 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7614 the call to mcount ourselves, rather than having the linker do it
7615 magically in response to -pg. Since _mcount has special linkage,
7616 don't represent the call as a call. */
7617 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7618 emit_insn (gen_prologue_mcount ());
7620 /* Adjust the stack by the frame size. If the frame size is > 4096
7621 bytes, we need to be sure we probe somewhere in the first and last
7622 4096 bytes (we can probably get away without the latter test) and
7623 every 8192 bytes in between. If the frame size is > 32768, we
7624 do this in a loop. Otherwise, we generate the explicit probe
7625 instructions.
7627 Note that we are only allowed to adjust sp once in the prologue. */
7629 probed_size = frame_size;
7630 if (flag_stack_check)
7631 probed_size += STACK_CHECK_PROTECT;
7633 if (probed_size <= 32768)
7635 if (probed_size > 4096)
7637 int probed;
7639 for (probed = 4096; probed < probed_size; probed += 8192)
7640 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7642 /* We only have to do this probe if we aren't saving registers or
7643 if we are probing beyond the frame because of -fstack-check. */
7644 if ((sa_size == 0 && probed_size > probed - 4096)
7645 || flag_stack_check)
7646 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7649 if (frame_size != 0)
7650 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7651 GEN_INT (-frame_size))));
7653 else
7655 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7656 number of 8192 byte blocks to probe. We then probe each block
7657 in the loop and then set SP to the proper location. If the
7658 amount remaining is > 4096, we have to do one more probe if we
7659 are not saving any registers or if we are probing beyond the
7660 frame because of -fstack-check. */
7662 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7663 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7664 rtx ptr = gen_rtx_REG (DImode, 22);
7665 rtx count = gen_rtx_REG (DImode, 23);
7666 rtx seq;
7668 emit_move_insn (count, GEN_INT (blocks));
7669 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7671 /* Because of the difficulty in emitting a new basic block this
7672 late in the compilation, generate the loop as a single insn. */
7673 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7675 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7677 rtx last = gen_rtx_MEM (DImode,
7678 plus_constant (Pmode, ptr, -leftover));
7679 MEM_VOLATILE_P (last) = 1;
7680 emit_move_insn (last, const0_rtx);
7683 if (flag_stack_check)
7685 /* If -fstack-check is specified we have to load the entire
7686 constant into a register and subtract from the sp in one go,
7687 because the probed stack size is not equal to the frame size. */
7688 HOST_WIDE_INT lo, hi;
7689 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7690 hi = frame_size - lo;
7692 emit_move_insn (ptr, GEN_INT (hi));
7693 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7694 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7695 ptr));
7697 else
7699 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7700 GEN_INT (-leftover)));
7703 /* This alternative is special, because the DWARF code cannot
7704 possibly intuit through the loop above. So we invent this
7705 note it looks at instead. */
7706 RTX_FRAME_RELATED_P (seq) = 1;
7707 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7708 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7709 plus_constant (Pmode, stack_pointer_rtx,
7710 -frame_size)));
7713 /* Cope with very large offsets to the register save area. */
7714 sa_bias = 0;
7715 sa_reg = stack_pointer_rtx;
7716 if (reg_offset + sa_size > 0x8000)
7718 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7719 rtx sa_bias_rtx;
7721 if (low + sa_size <= 0x8000)
7722 sa_bias = reg_offset - low, reg_offset = low;
7723 else
7724 sa_bias = reg_offset, reg_offset = 0;
7726 sa_reg = gen_rtx_REG (DImode, 24);
7727 sa_bias_rtx = GEN_INT (sa_bias);
7729 if (add_operand (sa_bias_rtx, DImode))
7730 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7731 else
7733 emit_move_insn (sa_reg, sa_bias_rtx);
7734 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7738 /* Save regs in stack order. Beginning with VMS PV. */
7739 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7740 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7742 /* Save register RA next. */
7743 if (imask & (1UL << REG_RA))
7745 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7746 imask &= ~(1UL << REG_RA);
7747 reg_offset += 8;
7750 /* Now save any other registers required to be saved. */
7751 for (i = 0; i < 31; i++)
7752 if (imask & (1UL << i))
7754 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7755 reg_offset += 8;
7758 for (i = 0; i < 31; i++)
7759 if (fmask & (1UL << i))
7761 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7762 reg_offset += 8;
7765 if (TARGET_ABI_OPEN_VMS)
7767 /* Register frame procedures save the fp. */
7768 if (alpha_procedure_type == PT_REGISTER)
7770 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7771 hard_frame_pointer_rtx);
7772 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7773 RTX_FRAME_RELATED_P (insn) = 1;
7776 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7777 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7778 gen_rtx_REG (DImode, REG_PV)));
7780 if (alpha_procedure_type != PT_NULL
7781 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7782 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7784 /* If we have to allocate space for outgoing args, do it now. */
7785 if (crtl->outgoing_args_size != 0)
7787 rtx seq
7788 = emit_move_insn (stack_pointer_rtx,
7789 plus_constant
7790 (Pmode, hard_frame_pointer_rtx,
7791 - (ALPHA_ROUND
7792 (crtl->outgoing_args_size))));
7794 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7795 if ! frame_pointer_needed. Setting the bit will change the CFA
7796 computation rule to use sp again, which would be wrong if we had
7797 frame_pointer_needed, as this means sp might move unpredictably
7798 later on.
7800 Also, note that
7801 frame_pointer_needed
7802 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7804 crtl->outgoing_args_size != 0
7805 => alpha_procedure_type != PT_NULL,
7807 so when we are not setting the bit here, we are guaranteed to
7808 have emitted an FRP frame pointer update just before. */
7809 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7812 else
7814 /* If we need a frame pointer, set it from the stack pointer. */
7815 if (frame_pointer_needed)
7817 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7818 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7819 else
7820 /* This must always be the last instruction in the
7821 prologue, thus we emit a special move + clobber. */
7822 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7823 stack_pointer_rtx, sa_reg)));
7827 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7828 the prologue, for exception handling reasons, we cannot do this for
7829 any insn that might fault. We could prevent this for mems with a
7830 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7831 have to prevent all such scheduling with a blockage.
7833 Linux, on the other hand, never bothered to implement OSF/1's
7834 exception handling, and so doesn't care about such things. Anyone
7835 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7837 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7838 emit_insn (gen_blockage ());
7841 /* Count the number of .file directives, so that .loc is up to date. */
7842 int num_source_filenames = 0;
7844 /* Output the textual info surrounding the prologue. */
7846 void
7847 alpha_start_function (FILE *file, const char *fnname,
7848 tree decl ATTRIBUTE_UNUSED)
7850 unsigned long imask = 0;
7851 unsigned long fmask = 0;
7852 /* Stack space needed for pushing registers clobbered by us. */
7853 HOST_WIDE_INT sa_size;
7854 /* Complete stack size needed. */
7855 unsigned HOST_WIDE_INT frame_size;
7856 /* The maximum debuggable frame size. */
7857 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
7858 /* Offset from base reg to register save area. */
7859 HOST_WIDE_INT reg_offset;
7860 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7861 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7862 int i;
7864 #if TARGET_ABI_OPEN_VMS
7865 vms_start_function (fnname);
7866 #endif
7868 alpha_fnname = fnname;
7869 sa_size = alpha_sa_size ();
7870 frame_size = compute_frame_size (get_frame_size (), sa_size);
7872 if (TARGET_ABI_OPEN_VMS)
7873 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7874 else
7875 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7877 alpha_sa_mask (&imask, &fmask);
7879 /* Issue function start and label. */
7880 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7882 fputs ("\t.ent ", file);
7883 assemble_name (file, fnname);
7884 putc ('\n', file);
7886 /* If the function needs GP, we'll write the "..ng" label there.
7887 Otherwise, do it here. */
7888 if (TARGET_ABI_OSF
7889 && ! alpha_function_needs_gp
7890 && ! cfun->is_thunk)
7892 putc ('$', file);
7893 assemble_name (file, fnname);
7894 fputs ("..ng:\n", file);
7897 /* Nested functions on VMS that are potentially called via trampoline
7898 get a special transfer entry point that loads the called functions
7899 procedure descriptor and static chain. */
7900 if (TARGET_ABI_OPEN_VMS
7901 && !TREE_PUBLIC (decl)
7902 && DECL_CONTEXT (decl)
7903 && !TYPE_P (DECL_CONTEXT (decl))
7904 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
7906 strcpy (tramp_label, fnname);
7907 strcat (tramp_label, "..tr");
7908 ASM_OUTPUT_LABEL (file, tramp_label);
7909 fprintf (file, "\tldq $1,24($27)\n");
7910 fprintf (file, "\tldq $27,16($27)\n");
7913 strcpy (entry_label, fnname);
7914 if (TARGET_ABI_OPEN_VMS)
7915 strcat (entry_label, "..en");
7917 ASM_OUTPUT_LABEL (file, entry_label);
7918 inside_function = TRUE;
7920 if (TARGET_ABI_OPEN_VMS)
7921 fprintf (file, "\t.base $%d\n", vms_base_regno);
7923 if (TARGET_ABI_OSF
7924 && TARGET_IEEE_CONFORMANT
7925 && !flag_inhibit_size_directive)
7927 /* Set flags in procedure descriptor to request IEEE-conformant
7928 math-library routines. The value we set it to is PDSC_EXC_IEEE
7929 (/usr/include/pdsc.h). */
7930 fputs ("\t.eflag 48\n", file);
7933 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7934 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7935 alpha_arg_offset = -frame_size + 48;
7937 /* Describe our frame. If the frame size is larger than an integer,
7938 print it as zero to avoid an assembler error. We won't be
7939 properly describing such a frame, but that's the best we can do. */
7940 if (TARGET_ABI_OPEN_VMS)
7941 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7942 HOST_WIDE_INT_PRINT_DEC "\n",
7943 vms_unwind_regno,
7944 frame_size >= (1UL << 31) ? 0 : frame_size,
7945 reg_offset);
7946 else if (!flag_inhibit_size_directive)
7947 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7948 (frame_pointer_needed
7949 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7950 frame_size >= max_frame_size ? 0 : frame_size,
7951 crtl->args.pretend_args_size);
7953 /* Describe which registers were spilled. */
7954 if (TARGET_ABI_OPEN_VMS)
7956 if (imask)
7957 /* ??? Does VMS care if mask contains ra? The old code didn't
7958 set it, so I don't here. */
7959 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7960 if (fmask)
7961 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7962 if (alpha_procedure_type == PT_REGISTER)
7963 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7965 else if (!flag_inhibit_size_directive)
7967 if (imask)
7969 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7970 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7972 for (i = 0; i < 32; ++i)
7973 if (imask & (1UL << i))
7974 reg_offset += 8;
7977 if (fmask)
7978 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7979 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7982 #if TARGET_ABI_OPEN_VMS
7983 /* If a user condition handler has been installed at some point, emit
7984 the procedure descriptor bits to point the Condition Handling Facility
7985 at the indirection wrapper, and state the fp offset at which the user
7986 handler may be found. */
7987 if (cfun->machine->uses_condition_handler)
7989 fprintf (file, "\t.handler __gcc_shell_handler\n");
7990 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
7993 #ifdef TARGET_VMS_CRASH_DEBUG
7994 /* Support of minimal traceback info. */
7995 switch_to_section (readonly_data_section);
7996 fprintf (file, "\t.align 3\n");
7997 assemble_name (file, fnname); fputs ("..na:\n", file);
7998 fputs ("\t.ascii \"", file);
7999 assemble_name (file, fnname);
8000 fputs ("\\0\"\n", file);
8001 switch_to_section (text_section);
8002 #endif
8003 #endif /* TARGET_ABI_OPEN_VMS */
8006 /* Emit the .prologue note at the scheduled end of the prologue. */
8008 static void
8009 alpha_output_function_end_prologue (FILE *file)
8011 if (TARGET_ABI_OPEN_VMS)
8012 fputs ("\t.prologue\n", file);
8013 else if (!flag_inhibit_size_directive)
8014 fprintf (file, "\t.prologue %d\n",
8015 alpha_function_needs_gp || cfun->is_thunk);
8018 /* Write function epilogue. */
8020 void
8021 alpha_expand_epilogue (void)
8023 /* Registers to save. */
8024 unsigned long imask = 0;
8025 unsigned long fmask = 0;
8026 /* Stack space needed for pushing registers clobbered by us. */
8027 HOST_WIDE_INT sa_size;
8028 /* Complete stack size needed. */
8029 HOST_WIDE_INT frame_size;
8030 /* Offset from base reg to register save area. */
8031 HOST_WIDE_INT reg_offset;
8032 int fp_is_frame_pointer, fp_offset;
8033 rtx sa_reg, sa_reg_exp = NULL;
8034 rtx sp_adj1, sp_adj2, mem, reg, insn;
8035 rtx eh_ofs;
8036 rtx cfa_restores = NULL_RTX;
8037 int i;
8039 sa_size = alpha_sa_size ();
8040 frame_size = compute_frame_size (get_frame_size (), sa_size);
8042 if (TARGET_ABI_OPEN_VMS)
8044 if (alpha_procedure_type == PT_STACK)
8045 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8046 else
8047 reg_offset = 0;
8049 else
8050 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8052 alpha_sa_mask (&imask, &fmask);
8054 fp_is_frame_pointer
8055 = (TARGET_ABI_OPEN_VMS
8056 ? alpha_procedure_type == PT_STACK
8057 : frame_pointer_needed);
8058 fp_offset = 0;
8059 sa_reg = stack_pointer_rtx;
8061 if (crtl->calls_eh_return)
8062 eh_ofs = EH_RETURN_STACKADJ_RTX;
8063 else
8064 eh_ofs = NULL_RTX;
8066 if (sa_size)
8068 /* If we have a frame pointer, restore SP from it. */
8069 if (TARGET_ABI_OPEN_VMS
8070 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8071 : frame_pointer_needed)
8072 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8074 /* Cope with very large offsets to the register save area. */
8075 if (reg_offset + sa_size > 0x8000)
8077 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8078 HOST_WIDE_INT bias;
8080 if (low + sa_size <= 0x8000)
8081 bias = reg_offset - low, reg_offset = low;
8082 else
8083 bias = reg_offset, reg_offset = 0;
8085 sa_reg = gen_rtx_REG (DImode, 22);
8086 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8088 emit_move_insn (sa_reg, sa_reg_exp);
8091 /* Restore registers in order, excepting a true frame pointer. */
8093 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8094 reg = gen_rtx_REG (DImode, REG_RA);
8095 emit_move_insn (reg, mem);
8096 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8098 reg_offset += 8;
8099 imask &= ~(1UL << REG_RA);
8101 for (i = 0; i < 31; ++i)
8102 if (imask & (1UL << i))
8104 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8105 fp_offset = reg_offset;
8106 else
8108 mem = gen_frame_mem (DImode,
8109 plus_constant (Pmode, sa_reg,
8110 reg_offset));
8111 reg = gen_rtx_REG (DImode, i);
8112 emit_move_insn (reg, mem);
8113 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8114 cfa_restores);
8116 reg_offset += 8;
8119 for (i = 0; i < 31; ++i)
8120 if (fmask & (1UL << i))
8122 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8123 reg_offset));
8124 reg = gen_rtx_REG (DFmode, i+32);
8125 emit_move_insn (reg, mem);
8126 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8127 reg_offset += 8;
8131 if (frame_size || eh_ofs)
8133 sp_adj1 = stack_pointer_rtx;
8135 if (eh_ofs)
8137 sp_adj1 = gen_rtx_REG (DImode, 23);
8138 emit_move_insn (sp_adj1,
8139 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8142 /* If the stack size is large, begin computation into a temporary
8143 register so as not to interfere with a potential fp restore,
8144 which must be consecutive with an SP restore. */
8145 if (frame_size < 32768 && !cfun->calls_alloca)
8146 sp_adj2 = GEN_INT (frame_size);
8147 else if (frame_size < 0x40007fffL)
8149 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8151 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8152 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8153 sp_adj1 = sa_reg;
8154 else
8156 sp_adj1 = gen_rtx_REG (DImode, 23);
8157 emit_move_insn (sp_adj1, sp_adj2);
8159 sp_adj2 = GEN_INT (low);
8161 else
8163 rtx tmp = gen_rtx_REG (DImode, 23);
8164 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8165 if (!sp_adj2)
8167 /* We can't drop new things to memory this late, afaik,
8168 so build it up by pieces. */
8169 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8170 -(frame_size < 0));
8171 gcc_assert (sp_adj2);
8175 /* From now on, things must be in order. So emit blockages. */
8177 /* Restore the frame pointer. */
8178 if (fp_is_frame_pointer)
8180 emit_insn (gen_blockage ());
8181 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8182 fp_offset));
8183 emit_move_insn (hard_frame_pointer_rtx, mem);
8184 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8185 hard_frame_pointer_rtx, cfa_restores);
8187 else if (TARGET_ABI_OPEN_VMS)
8189 emit_insn (gen_blockage ());
8190 emit_move_insn (hard_frame_pointer_rtx,
8191 gen_rtx_REG (DImode, vms_save_fp_regno));
8192 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8193 hard_frame_pointer_rtx, cfa_restores);
8196 /* Restore the stack pointer. */
8197 emit_insn (gen_blockage ());
8198 if (sp_adj2 == const0_rtx)
8199 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8200 else
8201 insn = emit_move_insn (stack_pointer_rtx,
8202 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8203 REG_NOTES (insn) = cfa_restores;
8204 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8205 RTX_FRAME_RELATED_P (insn) = 1;
8207 else
8209 gcc_assert (cfa_restores == NULL);
8211 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8213 emit_insn (gen_blockage ());
8214 insn = emit_move_insn (hard_frame_pointer_rtx,
8215 gen_rtx_REG (DImode, vms_save_fp_regno));
8216 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8217 RTX_FRAME_RELATED_P (insn) = 1;
8222 /* Output the rest of the textual info surrounding the epilogue. */
8224 void
8225 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8227 rtx insn;
8229 /* We output a nop after noreturn calls at the very end of the function to
8230 ensure that the return address always remains in the caller's code range,
8231 as not doing so might confuse unwinding engines. */
8232 insn = get_last_insn ();
8233 if (!INSN_P (insn))
8234 insn = prev_active_insn (insn);
8235 if (insn && CALL_P (insn))
8236 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8238 #if TARGET_ABI_OPEN_VMS
8239 /* Write the linkage entries. */
8240 alpha_write_linkage (file, fnname);
8241 #endif
8243 /* End the function. */
8244 if (TARGET_ABI_OPEN_VMS
8245 || !flag_inhibit_size_directive)
8247 fputs ("\t.end ", file);
8248 assemble_name (file, fnname);
8249 putc ('\n', file);
8251 inside_function = FALSE;
8254 #if TARGET_ABI_OSF
8255 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8257 In order to avoid the hordes of differences between generated code
8258 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8259 lots of code loading up large constants, generate rtl and emit it
8260 instead of going straight to text.
8262 Not sure why this idea hasn't been explored before... */
8264 static void
8265 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8266 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8267 tree function)
8269 HOST_WIDE_INT hi, lo;
8270 rtx this_rtx, insn, funexp;
8272 /* We always require a valid GP. */
8273 emit_insn (gen_prologue_ldgp ());
8274 emit_note (NOTE_INSN_PROLOGUE_END);
8276 /* Find the "this" pointer. If the function returns a structure,
8277 the structure return pointer is in $16. */
8278 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8279 this_rtx = gen_rtx_REG (Pmode, 17);
8280 else
8281 this_rtx = gen_rtx_REG (Pmode, 16);
8283 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8284 entire constant for the add. */
8285 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8286 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8287 if (hi + lo == delta)
8289 if (hi)
8290 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8291 if (lo)
8292 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8294 else
8296 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8297 delta, -(delta < 0));
8298 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8301 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8302 if (vcall_offset)
8304 rtx tmp, tmp2;
8306 tmp = gen_rtx_REG (Pmode, 0);
8307 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8309 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8310 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8311 if (hi + lo == vcall_offset)
8313 if (hi)
8314 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8316 else
8318 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8319 vcall_offset, -(vcall_offset < 0));
8320 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8321 lo = 0;
8323 if (lo)
8324 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8325 else
8326 tmp2 = tmp;
8327 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8329 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8332 /* Generate a tail call to the target function. */
8333 if (! TREE_USED (function))
8335 assemble_external (function);
8336 TREE_USED (function) = 1;
8338 funexp = XEXP (DECL_RTL (function), 0);
8339 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8340 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8341 SIBLING_CALL_P (insn) = 1;
8343 /* Run just enough of rest_of_compilation to get the insns emitted.
8344 There's not really enough bulk here to make other passes such as
8345 instruction scheduling worth while. Note that use_thunk calls
8346 assemble_start_function and assemble_end_function. */
8347 insn = get_insns ();
8348 shorten_branches (insn);
8349 final_start_function (insn, file, 1);
8350 final (insn, file, 1);
8351 final_end_function ();
8353 #endif /* TARGET_ABI_OSF */
8355 /* Debugging support. */
8357 #include "gstab.h"
8359 /* Name of the file containing the current function. */
8361 static const char *current_function_file = "";
8363 /* Offsets to alpha virtual arg/local debugging pointers. */
8365 long alpha_arg_offset;
8366 long alpha_auto_offset;
8368 /* Emit a new filename to a stream. */
8370 void
8371 alpha_output_filename (FILE *stream, const char *name)
8373 static int first_time = TRUE;
8375 if (first_time)
8377 first_time = FALSE;
8378 ++num_source_filenames;
8379 current_function_file = name;
8380 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8381 output_quoted_string (stream, name);
8382 fprintf (stream, "\n");
8385 else if (name != current_function_file
8386 && strcmp (name, current_function_file) != 0)
8388 ++num_source_filenames;
8389 current_function_file = name;
8390 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8392 output_quoted_string (stream, name);
8393 fprintf (stream, "\n");
8397 /* Structure to show the current status of registers and memory. */
8399 struct shadow_summary
8401 struct {
8402 unsigned int i : 31; /* Mask of int regs */
8403 unsigned int fp : 31; /* Mask of fp regs */
8404 unsigned int mem : 1; /* mem == imem | fpmem */
8405 } used, defd;
8408 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8409 to the summary structure. SET is nonzero if the insn is setting the
8410 object, otherwise zero. */
8412 static void
8413 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8415 const char *format_ptr;
8416 int i, j;
8418 if (x == 0)
8419 return;
8421 switch (GET_CODE (x))
8423 /* ??? Note that this case would be incorrect if the Alpha had a
8424 ZERO_EXTRACT in SET_DEST. */
8425 case SET:
8426 summarize_insn (SET_SRC (x), sum, 0);
8427 summarize_insn (SET_DEST (x), sum, 1);
8428 break;
8430 case CLOBBER:
8431 summarize_insn (XEXP (x, 0), sum, 1);
8432 break;
8434 case USE:
8435 summarize_insn (XEXP (x, 0), sum, 0);
8436 break;
8438 case ASM_OPERANDS:
8439 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8440 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8441 break;
8443 case PARALLEL:
8444 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8445 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8446 break;
8448 case SUBREG:
8449 summarize_insn (SUBREG_REG (x), sum, 0);
8450 break;
8452 case REG:
8454 int regno = REGNO (x);
8455 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8457 if (regno == 31 || regno == 63)
8458 break;
8460 if (set)
8462 if (regno < 32)
8463 sum->defd.i |= mask;
8464 else
8465 sum->defd.fp |= mask;
8467 else
8469 if (regno < 32)
8470 sum->used.i |= mask;
8471 else
8472 sum->used.fp |= mask;
8475 break;
8477 case MEM:
8478 if (set)
8479 sum->defd.mem = 1;
8480 else
8481 sum->used.mem = 1;
8483 /* Find the regs used in memory address computation: */
8484 summarize_insn (XEXP (x, 0), sum, 0);
8485 break;
8487 case CONST_INT: case CONST_DOUBLE:
8488 case SYMBOL_REF: case LABEL_REF: case CONST:
8489 case SCRATCH: case ASM_INPUT:
8490 break;
8492 /* Handle common unary and binary ops for efficiency. */
8493 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8494 case MOD: case UDIV: case UMOD: case AND: case IOR:
8495 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8496 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8497 case NE: case EQ: case GE: case GT: case LE:
8498 case LT: case GEU: case GTU: case LEU: case LTU:
8499 summarize_insn (XEXP (x, 0), sum, 0);
8500 summarize_insn (XEXP (x, 1), sum, 0);
8501 break;
8503 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8504 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8505 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8506 case SQRT: case FFS:
8507 summarize_insn (XEXP (x, 0), sum, 0);
8508 break;
8510 default:
8511 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8512 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8513 switch (format_ptr[i])
8515 case 'e':
8516 summarize_insn (XEXP (x, i), sum, 0);
8517 break;
8519 case 'E':
8520 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8521 summarize_insn (XVECEXP (x, i, j), sum, 0);
8522 break;
8524 case 'i':
8525 break;
8527 default:
8528 gcc_unreachable ();
8533 /* Ensure a sufficient number of `trapb' insns are in the code when
8534 the user requests code with a trap precision of functions or
8535 instructions.
8537 In naive mode, when the user requests a trap-precision of
8538 "instruction", a trapb is needed after every instruction that may
8539 generate a trap. This ensures that the code is resumption safe but
8540 it is also slow.
8542 When optimizations are turned on, we delay issuing a trapb as long
8543 as possible. In this context, a trap shadow is the sequence of
8544 instructions that starts with a (potentially) trap generating
8545 instruction and extends to the next trapb or call_pal instruction
8546 (but GCC never generates call_pal by itself). We can delay (and
8547 therefore sometimes omit) a trapb subject to the following
8548 conditions:
8550 (a) On entry to the trap shadow, if any Alpha register or memory
8551 location contains a value that is used as an operand value by some
8552 instruction in the trap shadow (live on entry), then no instruction
8553 in the trap shadow may modify the register or memory location.
8555 (b) Within the trap shadow, the computation of the base register
8556 for a memory load or store instruction may not involve using the
8557 result of an instruction that might generate an UNPREDICTABLE
8558 result.
8560 (c) Within the trap shadow, no register may be used more than once
8561 as a destination register. (This is to make life easier for the
8562 trap-handler.)
8564 (d) The trap shadow may not include any branch instructions. */
8566 static void
8567 alpha_handle_trap_shadows (void)
8569 struct shadow_summary shadow;
8570 int trap_pending, exception_nesting;
8571 rtx i, n;
8573 trap_pending = 0;
8574 exception_nesting = 0;
8575 shadow.used.i = 0;
8576 shadow.used.fp = 0;
8577 shadow.used.mem = 0;
8578 shadow.defd = shadow.used;
8580 for (i = get_insns (); i ; i = NEXT_INSN (i))
8582 if (NOTE_P (i))
8584 switch (NOTE_KIND (i))
8586 case NOTE_INSN_EH_REGION_BEG:
8587 exception_nesting++;
8588 if (trap_pending)
8589 goto close_shadow;
8590 break;
8592 case NOTE_INSN_EH_REGION_END:
8593 exception_nesting--;
8594 if (trap_pending)
8595 goto close_shadow;
8596 break;
8598 case NOTE_INSN_EPILOGUE_BEG:
8599 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8600 goto close_shadow;
8601 break;
8604 else if (trap_pending)
8606 if (alpha_tp == ALPHA_TP_FUNC)
8608 if (JUMP_P (i)
8609 && GET_CODE (PATTERN (i)) == RETURN)
8610 goto close_shadow;
8612 else if (alpha_tp == ALPHA_TP_INSN)
8614 if (optimize > 0)
8616 struct shadow_summary sum;
8618 sum.used.i = 0;
8619 sum.used.fp = 0;
8620 sum.used.mem = 0;
8621 sum.defd = sum.used;
8623 switch (GET_CODE (i))
8625 case INSN:
8626 /* Annoyingly, get_attr_trap will die on these. */
8627 if (GET_CODE (PATTERN (i)) == USE
8628 || GET_CODE (PATTERN (i)) == CLOBBER)
8629 break;
8631 summarize_insn (PATTERN (i), &sum, 0);
8633 if ((sum.defd.i & shadow.defd.i)
8634 || (sum.defd.fp & shadow.defd.fp))
8636 /* (c) would be violated */
8637 goto close_shadow;
8640 /* Combine shadow with summary of current insn: */
8641 shadow.used.i |= sum.used.i;
8642 shadow.used.fp |= sum.used.fp;
8643 shadow.used.mem |= sum.used.mem;
8644 shadow.defd.i |= sum.defd.i;
8645 shadow.defd.fp |= sum.defd.fp;
8646 shadow.defd.mem |= sum.defd.mem;
8648 if ((sum.defd.i & shadow.used.i)
8649 || (sum.defd.fp & shadow.used.fp)
8650 || (sum.defd.mem & shadow.used.mem))
8652 /* (a) would be violated (also takes care of (b)) */
8653 gcc_assert (get_attr_trap (i) != TRAP_YES
8654 || (!(sum.defd.i & sum.used.i)
8655 && !(sum.defd.fp & sum.used.fp)));
8657 goto close_shadow;
8659 break;
8661 case JUMP_INSN:
8662 case CALL_INSN:
8663 case CODE_LABEL:
8664 goto close_shadow;
8666 default:
8667 gcc_unreachable ();
8670 else
8672 close_shadow:
8673 n = emit_insn_before (gen_trapb (), i);
8674 PUT_MODE (n, TImode);
8675 PUT_MODE (i, TImode);
8676 trap_pending = 0;
8677 shadow.used.i = 0;
8678 shadow.used.fp = 0;
8679 shadow.used.mem = 0;
8680 shadow.defd = shadow.used;
8685 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8686 && NONJUMP_INSN_P (i)
8687 && GET_CODE (PATTERN (i)) != USE
8688 && GET_CODE (PATTERN (i)) != CLOBBER
8689 && get_attr_trap (i) == TRAP_YES)
8691 if (optimize && !trap_pending)
8692 summarize_insn (PATTERN (i), &shadow, 0);
8693 trap_pending = 1;
8698 /* Alpha can only issue instruction groups simultaneously if they are
8699 suitably aligned. This is very processor-specific. */
8700 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8701 that are marked "fake". These instructions do not exist on that target,
8702 but it is possible to see these insns with deranged combinations of
8703 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8704 choose a result at random. */
8706 enum alphaev4_pipe {
8707 EV4_STOP = 0,
8708 EV4_IB0 = 1,
8709 EV4_IB1 = 2,
8710 EV4_IBX = 4
8713 enum alphaev5_pipe {
8714 EV5_STOP = 0,
8715 EV5_NONE = 1,
8716 EV5_E01 = 2,
8717 EV5_E0 = 4,
8718 EV5_E1 = 8,
8719 EV5_FAM = 16,
8720 EV5_FA = 32,
8721 EV5_FM = 64
8724 static enum alphaev4_pipe
8725 alphaev4_insn_pipe (rtx insn)
8727 if (recog_memoized (insn) < 0)
8728 return EV4_STOP;
8729 if (get_attr_length (insn) != 4)
8730 return EV4_STOP;
8732 switch (get_attr_type (insn))
8734 case TYPE_ILD:
8735 case TYPE_LDSYM:
8736 case TYPE_FLD:
8737 case TYPE_LD_L:
8738 return EV4_IBX;
8740 case TYPE_IADD:
8741 case TYPE_ILOG:
8742 case TYPE_ICMOV:
8743 case TYPE_ICMP:
8744 case TYPE_FST:
8745 case TYPE_SHIFT:
8746 case TYPE_IMUL:
8747 case TYPE_FBR:
8748 case TYPE_MVI: /* fake */
8749 return EV4_IB0;
8751 case TYPE_IST:
8752 case TYPE_MISC:
8753 case TYPE_IBR:
8754 case TYPE_JSR:
8755 case TYPE_CALLPAL:
8756 case TYPE_FCPYS:
8757 case TYPE_FCMOV:
8758 case TYPE_FADD:
8759 case TYPE_FDIV:
8760 case TYPE_FMUL:
8761 case TYPE_ST_C:
8762 case TYPE_MB:
8763 case TYPE_FSQRT: /* fake */
8764 case TYPE_FTOI: /* fake */
8765 case TYPE_ITOF: /* fake */
8766 return EV4_IB1;
8768 default:
8769 gcc_unreachable ();
8773 static enum alphaev5_pipe
8774 alphaev5_insn_pipe (rtx insn)
8776 if (recog_memoized (insn) < 0)
8777 return EV5_STOP;
8778 if (get_attr_length (insn) != 4)
8779 return EV5_STOP;
8781 switch (get_attr_type (insn))
8783 case TYPE_ILD:
8784 case TYPE_FLD:
8785 case TYPE_LDSYM:
8786 case TYPE_IADD:
8787 case TYPE_ILOG:
8788 case TYPE_ICMOV:
8789 case TYPE_ICMP:
8790 return EV5_E01;
8792 case TYPE_IST:
8793 case TYPE_FST:
8794 case TYPE_SHIFT:
8795 case TYPE_IMUL:
8796 case TYPE_MISC:
8797 case TYPE_MVI:
8798 case TYPE_LD_L:
8799 case TYPE_ST_C:
8800 case TYPE_MB:
8801 case TYPE_FTOI: /* fake */
8802 case TYPE_ITOF: /* fake */
8803 return EV5_E0;
8805 case TYPE_IBR:
8806 case TYPE_JSR:
8807 case TYPE_CALLPAL:
8808 return EV5_E1;
8810 case TYPE_FCPYS:
8811 return EV5_FAM;
8813 case TYPE_FBR:
8814 case TYPE_FCMOV:
8815 case TYPE_FADD:
8816 case TYPE_FDIV:
8817 case TYPE_FSQRT: /* fake */
8818 return EV5_FA;
8820 case TYPE_FMUL:
8821 return EV5_FM;
8823 default:
8824 gcc_unreachable ();
8828 /* IN_USE is a mask of the slots currently filled within the insn group.
8829 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8830 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8832 LEN is, of course, the length of the group in bytes. */
8834 static rtx
8835 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8837 int len, in_use;
8839 len = in_use = 0;
8841 if (! INSN_P (insn)
8842 || GET_CODE (PATTERN (insn)) == CLOBBER
8843 || GET_CODE (PATTERN (insn)) == USE)
8844 goto next_and_done;
8846 while (1)
8848 enum alphaev4_pipe pipe;
8850 pipe = alphaev4_insn_pipe (insn);
8851 switch (pipe)
8853 case EV4_STOP:
8854 /* Force complex instructions to start new groups. */
8855 if (in_use)
8856 goto done;
8858 /* If this is a completely unrecognized insn, it's an asm.
8859 We don't know how long it is, so record length as -1 to
8860 signal a needed realignment. */
8861 if (recog_memoized (insn) < 0)
8862 len = -1;
8863 else
8864 len = get_attr_length (insn);
8865 goto next_and_done;
8867 case EV4_IBX:
8868 if (in_use & EV4_IB0)
8870 if (in_use & EV4_IB1)
8871 goto done;
8872 in_use |= EV4_IB1;
8874 else
8875 in_use |= EV4_IB0 | EV4_IBX;
8876 break;
8878 case EV4_IB0:
8879 if (in_use & EV4_IB0)
8881 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8882 goto done;
8883 in_use |= EV4_IB1;
8885 in_use |= EV4_IB0;
8886 break;
8888 case EV4_IB1:
8889 if (in_use & EV4_IB1)
8890 goto done;
8891 in_use |= EV4_IB1;
8892 break;
8894 default:
8895 gcc_unreachable ();
8897 len += 4;
8899 /* Haifa doesn't do well scheduling branches. */
8900 if (JUMP_P (insn))
8901 goto next_and_done;
8903 next:
8904 insn = next_nonnote_insn (insn);
8906 if (!insn || ! INSN_P (insn))
8907 goto done;
8909 /* Let Haifa tell us where it thinks insn group boundaries are. */
8910 if (GET_MODE (insn) == TImode)
8911 goto done;
8913 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8914 goto next;
8917 next_and_done:
8918 insn = next_nonnote_insn (insn);
8920 done:
8921 *plen = len;
8922 *pin_use = in_use;
8923 return insn;
8926 /* IN_USE is a mask of the slots currently filled within the insn group.
8927 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8928 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8930 LEN is, of course, the length of the group in bytes. */
8932 static rtx
8933 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8935 int len, in_use;
8937 len = in_use = 0;
8939 if (! INSN_P (insn)
8940 || GET_CODE (PATTERN (insn)) == CLOBBER
8941 || GET_CODE (PATTERN (insn)) == USE)
8942 goto next_and_done;
8944 while (1)
8946 enum alphaev5_pipe pipe;
8948 pipe = alphaev5_insn_pipe (insn);
8949 switch (pipe)
8951 case EV5_STOP:
8952 /* Force complex instructions to start new groups. */
8953 if (in_use)
8954 goto done;
8956 /* If this is a completely unrecognized insn, it's an asm.
8957 We don't know how long it is, so record length as -1 to
8958 signal a needed realignment. */
8959 if (recog_memoized (insn) < 0)
8960 len = -1;
8961 else
8962 len = get_attr_length (insn);
8963 goto next_and_done;
8965 /* ??? Most of the places below, we would like to assert never
8966 happen, as it would indicate an error either in Haifa, or
8967 in the scheduling description. Unfortunately, Haifa never
8968 schedules the last instruction of the BB, so we don't have
8969 an accurate TI bit to go off. */
8970 case EV5_E01:
8971 if (in_use & EV5_E0)
8973 if (in_use & EV5_E1)
8974 goto done;
8975 in_use |= EV5_E1;
8977 else
8978 in_use |= EV5_E0 | EV5_E01;
8979 break;
8981 case EV5_E0:
8982 if (in_use & EV5_E0)
8984 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8985 goto done;
8986 in_use |= EV5_E1;
8988 in_use |= EV5_E0;
8989 break;
8991 case EV5_E1:
8992 if (in_use & EV5_E1)
8993 goto done;
8994 in_use |= EV5_E1;
8995 break;
8997 case EV5_FAM:
8998 if (in_use & EV5_FA)
9000 if (in_use & EV5_FM)
9001 goto done;
9002 in_use |= EV5_FM;
9004 else
9005 in_use |= EV5_FA | EV5_FAM;
9006 break;
9008 case EV5_FA:
9009 if (in_use & EV5_FA)
9010 goto done;
9011 in_use |= EV5_FA;
9012 break;
9014 case EV5_FM:
9015 if (in_use & EV5_FM)
9016 goto done;
9017 in_use |= EV5_FM;
9018 break;
9020 case EV5_NONE:
9021 break;
9023 default:
9024 gcc_unreachable ();
9026 len += 4;
9028 /* Haifa doesn't do well scheduling branches. */
9029 /* ??? If this is predicted not-taken, slotting continues, except
9030 that no more IBR, FBR, or JSR insns may be slotted. */
9031 if (JUMP_P (insn))
9032 goto next_and_done;
9034 next:
9035 insn = next_nonnote_insn (insn);
9037 if (!insn || ! INSN_P (insn))
9038 goto done;
9040 /* Let Haifa tell us where it thinks insn group boundaries are. */
9041 if (GET_MODE (insn) == TImode)
9042 goto done;
9044 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9045 goto next;
9048 next_and_done:
9049 insn = next_nonnote_insn (insn);
9051 done:
9052 *plen = len;
9053 *pin_use = in_use;
9054 return insn;
9057 static rtx
9058 alphaev4_next_nop (int *pin_use)
9060 int in_use = *pin_use;
9061 rtx nop;
9063 if (!(in_use & EV4_IB0))
9065 in_use |= EV4_IB0;
9066 nop = gen_nop ();
9068 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9070 in_use |= EV4_IB1;
9071 nop = gen_nop ();
9073 else if (TARGET_FP && !(in_use & EV4_IB1))
9075 in_use |= EV4_IB1;
9076 nop = gen_fnop ();
9078 else
9079 nop = gen_unop ();
9081 *pin_use = in_use;
9082 return nop;
9085 static rtx
9086 alphaev5_next_nop (int *pin_use)
9088 int in_use = *pin_use;
9089 rtx nop;
9091 if (!(in_use & EV5_E1))
9093 in_use |= EV5_E1;
9094 nop = gen_nop ();
9096 else if (TARGET_FP && !(in_use & EV5_FA))
9098 in_use |= EV5_FA;
9099 nop = gen_fnop ();
9101 else if (TARGET_FP && !(in_use & EV5_FM))
9103 in_use |= EV5_FM;
9104 nop = gen_fnop ();
9106 else
9107 nop = gen_unop ();
9109 *pin_use = in_use;
9110 return nop;
9113 /* The instruction group alignment main loop. */
9115 static void
9116 alpha_align_insns (unsigned int max_align,
9117 rtx (*next_group) (rtx, int *, int *),
9118 rtx (*next_nop) (int *))
9120 /* ALIGN is the known alignment for the insn group. */
9121 unsigned int align;
9122 /* OFS is the offset of the current insn in the insn group. */
9123 int ofs;
9124 int prev_in_use, in_use, len, ldgp;
9125 rtx i, next;
9127 /* Let shorten branches care for assigning alignments to code labels. */
9128 shorten_branches (get_insns ());
9130 if (align_functions < 4)
9131 align = 4;
9132 else if ((unsigned int) align_functions < max_align)
9133 align = align_functions;
9134 else
9135 align = max_align;
9137 ofs = prev_in_use = 0;
9138 i = get_insns ();
9139 if (NOTE_P (i))
9140 i = next_nonnote_insn (i);
9142 ldgp = alpha_function_needs_gp ? 8 : 0;
9144 while (i)
9146 next = (*next_group) (i, &in_use, &len);
9148 /* When we see a label, resync alignment etc. */
9149 if (LABEL_P (i))
9151 unsigned int new_align = 1 << label_to_alignment (i);
9153 if (new_align >= align)
9155 align = new_align < max_align ? new_align : max_align;
9156 ofs = 0;
9159 else if (ofs & (new_align-1))
9160 ofs = (ofs | (new_align-1)) + 1;
9161 gcc_assert (!len);
9164 /* Handle complex instructions special. */
9165 else if (in_use == 0)
9167 /* Asms will have length < 0. This is a signal that we have
9168 lost alignment knowledge. Assume, however, that the asm
9169 will not mis-align instructions. */
9170 if (len < 0)
9172 ofs = 0;
9173 align = 4;
9174 len = 0;
9178 /* If the known alignment is smaller than the recognized insn group,
9179 realign the output. */
9180 else if ((int) align < len)
9182 unsigned int new_log_align = len > 8 ? 4 : 3;
9183 rtx prev, where;
9185 where = prev = prev_nonnote_insn (i);
9186 if (!where || !LABEL_P (where))
9187 where = i;
9189 /* Can't realign between a call and its gp reload. */
9190 if (! (TARGET_EXPLICIT_RELOCS
9191 && prev && CALL_P (prev)))
9193 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9194 align = 1 << new_log_align;
9195 ofs = 0;
9199 /* We may not insert padding inside the initial ldgp sequence. */
9200 else if (ldgp > 0)
9201 ldgp -= len;
9203 /* If the group won't fit in the same INT16 as the previous,
9204 we need to add padding to keep the group together. Rather
9205 than simply leaving the insn filling to the assembler, we
9206 can make use of the knowledge of what sorts of instructions
9207 were issued in the previous group to make sure that all of
9208 the added nops are really free. */
9209 else if (ofs + len > (int) align)
9211 int nop_count = (align - ofs) / 4;
9212 rtx where;
9214 /* Insert nops before labels, branches, and calls to truly merge
9215 the execution of the nops with the previous instruction group. */
9216 where = prev_nonnote_insn (i);
9217 if (where)
9219 if (LABEL_P (where))
9221 rtx where2 = prev_nonnote_insn (where);
9222 if (where2 && JUMP_P (where2))
9223 where = where2;
9225 else if (NONJUMP_INSN_P (where))
9226 where = i;
9228 else
9229 where = i;
9232 emit_insn_before ((*next_nop)(&prev_in_use), where);
9233 while (--nop_count);
9234 ofs = 0;
9237 ofs = (ofs + len) & (align - 1);
9238 prev_in_use = in_use;
9239 i = next;
9243 /* Insert an unop between sibcall or noreturn function call and GP load. */
9245 static void
9246 alpha_pad_function_end (void)
9248 rtx insn, next;
9250 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9252 if (!CALL_P (insn)
9253 || !(SIBLING_CALL_P (insn)
9254 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9255 continue;
9257 /* Make sure we do not split a call and its corresponding
9258 CALL_ARG_LOCATION note. */
9259 next = NEXT_INSN (insn);
9260 if (next == NULL)
9261 continue;
9262 if (BARRIER_P (next))
9264 next = NEXT_INSN (next);
9265 if (next == NULL)
9266 continue;
9268 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9269 insn = next;
9271 next = next_active_insn (insn);
9272 if (next)
9274 rtx pat = PATTERN (next);
9276 if (GET_CODE (pat) == SET
9277 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9278 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9279 emit_insn_after (gen_unop (), insn);
9284 /* Machine dependent reorg pass. */
9286 static void
9287 alpha_reorg (void)
9289 /* Workaround for a linker error that triggers when an exception
9290 handler immediatelly follows a sibcall or a noreturn function.
9292 In the sibcall case:
9294 The instruction stream from an object file:
9296 1d8: 00 00 fb 6b jmp (t12)
9297 1dc: 00 00 ba 27 ldah gp,0(ra)
9298 1e0: 00 00 bd 23 lda gp,0(gp)
9299 1e4: 00 00 7d a7 ldq t12,0(gp)
9300 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9302 was converted in the final link pass to:
9304 12003aa88: 67 fa ff c3 br 120039428 <...>
9305 12003aa8c: 00 00 fe 2f unop
9306 12003aa90: 00 00 fe 2f unop
9307 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9308 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9310 And in the noreturn case:
9312 The instruction stream from an object file:
9314 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9315 58: 00 00 ba 27 ldah gp,0(ra)
9316 5c: 00 00 bd 23 lda gp,0(gp)
9317 60: 00 00 7d a7 ldq t12,0(gp)
9318 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9320 was converted in the final link pass to:
9322 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9323 fdb28: 00 00 fe 2f unop
9324 fdb2c: 00 00 fe 2f unop
9325 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9326 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9328 GP load instructions were wrongly cleared by the linker relaxation
9329 pass. This workaround prevents removal of GP loads by inserting
9330 an unop instruction between a sibcall or noreturn function call and
9331 exception handler prologue. */
9333 if (current_function_has_exception_handlers ())
9334 alpha_pad_function_end ();
9336 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9337 alpha_handle_trap_shadows ();
9339 /* Due to the number of extra trapb insns, don't bother fixing up
9340 alignment when trap precision is instruction. Moreover, we can
9341 only do our job when sched2 is run. */
9342 if (optimize && !optimize_size
9343 && alpha_tp != ALPHA_TP_INSN
9344 && flag_schedule_insns_after_reload)
9346 if (alpha_tune == PROCESSOR_EV4)
9347 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9348 else if (alpha_tune == PROCESSOR_EV5)
9349 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9353 static void
9354 alpha_file_start (void)
9356 default_file_start ();
9358 fputs ("\t.set noreorder\n", asm_out_file);
9359 fputs ("\t.set volatile\n", asm_out_file);
9360 if (TARGET_ABI_OSF)
9361 fputs ("\t.set noat\n", asm_out_file);
9362 if (TARGET_EXPLICIT_RELOCS)
9363 fputs ("\t.set nomacro\n", asm_out_file);
9364 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9366 const char *arch;
9368 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9369 arch = "ev6";
9370 else if (TARGET_MAX)
9371 arch = "pca56";
9372 else if (TARGET_BWX)
9373 arch = "ev56";
9374 else if (alpha_cpu == PROCESSOR_EV5)
9375 arch = "ev5";
9376 else
9377 arch = "ev4";
9379 fprintf (asm_out_file, "\t.arch %s\n", arch);
9383 /* Since we don't have a .dynbss section, we should not allow global
9384 relocations in the .rodata section. */
9386 static int
9387 alpha_elf_reloc_rw_mask (void)
9389 return flag_pic ? 3 : 2;
9392 /* Return a section for X. The only special thing we do here is to
9393 honor small data. */
9395 static section *
9396 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9397 unsigned HOST_WIDE_INT align)
9399 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9400 /* ??? Consider using mergeable sdata sections. */
9401 return sdata_section;
9402 else
9403 return default_elf_select_rtx_section (mode, x, align);
9406 static unsigned int
9407 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9409 unsigned int flags = 0;
9411 if (strcmp (name, ".sdata") == 0
9412 || strncmp (name, ".sdata.", 7) == 0
9413 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9414 || strcmp (name, ".sbss") == 0
9415 || strncmp (name, ".sbss.", 6) == 0
9416 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9417 flags = SECTION_SMALL;
9419 flags |= default_section_type_flags (decl, name, reloc);
9420 return flags;
9423 /* Structure to collect function names for final output in link section. */
9424 /* Note that items marked with GTY can't be ifdef'ed out. */
9426 enum reloc_kind
9428 KIND_LINKAGE,
9429 KIND_CODEADDR
9432 struct GTY(()) alpha_links
9434 rtx func;
9435 rtx linkage;
9436 enum reloc_kind rkind;
9439 #if TARGET_ABI_OPEN_VMS
9441 /* Return the VMS argument type corresponding to MODE. */
9443 enum avms_arg_type
9444 alpha_arg_type (enum machine_mode mode)
9446 switch (mode)
9448 case SFmode:
9449 return TARGET_FLOAT_VAX ? FF : FS;
9450 case DFmode:
9451 return TARGET_FLOAT_VAX ? FD : FT;
9452 default:
9453 return I64;
9457 /* Return an rtx for an integer representing the VMS Argument Information
9458 register value. */
9461 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9463 unsigned HOST_WIDE_INT regval = cum.num_args;
9464 int i;
9466 for (i = 0; i < 6; i++)
9467 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9469 return GEN_INT (regval);
9473 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9474 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9475 this is the reference to the linkage pointer value, 0 if this is the
9476 reference to the function entry value. RFLAG is 1 if this a reduced
9477 reference (code address only), 0 if this is a full reference. */
9480 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9482 struct alpha_links *al = NULL;
9483 const char *name = XSTR (func, 0);
9485 if (cfun->machine->links)
9487 splay_tree_node lnode;
9489 /* Is this name already defined? */
9490 lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name);
9491 if (lnode)
9492 al = (struct alpha_links *) lnode->value;
9494 else
9495 cfun->machine->links = splay_tree_new_ggc
9496 ((splay_tree_compare_fn) strcmp,
9497 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9498 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9500 if (al == NULL)
9502 size_t buf_len;
9503 char *linksym;
9504 tree id;
9506 if (name[0] == '*')
9507 name++;
9509 /* Follow transparent alias, as this is used for CRTL translations. */
9510 id = maybe_get_identifier (name);
9511 if (id)
9513 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9514 id = TREE_CHAIN (id);
9515 name = IDENTIFIER_POINTER (id);
9518 buf_len = strlen (name) + 8 + 9;
9519 linksym = (char *) alloca (buf_len);
9520 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9522 al = ggc_alloc_alpha_links ();
9523 al->func = func;
9524 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9526 splay_tree_insert (cfun->machine->links,
9527 (splay_tree_key) ggc_strdup (name),
9528 (splay_tree_value) al);
9531 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9533 if (lflag)
9534 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9535 else
9536 return al->linkage;
9539 static int
9540 alpha_write_one_linkage (splay_tree_node node, void *data)
9542 const char *const name = (const char *) node->key;
9543 struct alpha_links *link = (struct alpha_links *) node->value;
9544 FILE *stream = (FILE *) data;
9546 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9547 if (link->rkind == KIND_CODEADDR)
9549 /* External and used, request code address. */
9550 fprintf (stream, "\t.code_address ");
9552 else
9554 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9555 && SYMBOL_REF_LOCAL_P (link->func))
9557 /* Locally defined, build linkage pair. */
9558 fprintf (stream, "\t.quad %s..en\n", name);
9559 fprintf (stream, "\t.quad ");
9561 else
9563 /* External, request linkage pair. */
9564 fprintf (stream, "\t.linkage ");
9567 assemble_name (stream, name);
9568 fputs ("\n", stream);
9570 return 0;
9573 static void
9574 alpha_write_linkage (FILE *stream, const char *funname)
9576 fprintf (stream, "\t.link\n");
9577 fprintf (stream, "\t.align 3\n");
9578 in_section = NULL;
9580 #ifdef TARGET_VMS_CRASH_DEBUG
9581 fputs ("\t.name ", stream);
9582 assemble_name (stream, funname);
9583 fputs ("..na\n", stream);
9584 #endif
9586 ASM_OUTPUT_LABEL (stream, funname);
9587 fprintf (stream, "\t.pdesc ");
9588 assemble_name (stream, funname);
9589 fprintf (stream, "..en,%s\n",
9590 alpha_procedure_type == PT_STACK ? "stack"
9591 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9593 if (cfun->machine->links)
9595 splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream);
9596 /* splay_tree_delete (func->links); */
9600 /* Switch to an arbitrary section NAME with attributes as specified
9601 by FLAGS. ALIGN specifies any known alignment requirements for
9602 the section; 0 if the default should be used. */
9604 static void
9605 vms_asm_named_section (const char *name, unsigned int flags,
9606 tree decl ATTRIBUTE_UNUSED)
9608 fputc ('\n', asm_out_file);
9609 fprintf (asm_out_file, ".section\t%s", name);
9611 if (flags & SECTION_DEBUG)
9612 fprintf (asm_out_file, ",NOWRT");
9614 fputc ('\n', asm_out_file);
9617 /* Record an element in the table of global constructors. SYMBOL is
9618 a SYMBOL_REF of the function to be called; PRIORITY is a number
9619 between 0 and MAX_INIT_PRIORITY.
9621 Differs from default_ctors_section_asm_out_constructor in that the
9622 width of the .ctors entry is always 64 bits, rather than the 32 bits
9623 used by a normal pointer. */
9625 static void
9626 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9628 switch_to_section (ctors_section);
9629 assemble_align (BITS_PER_WORD);
9630 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9633 static void
9634 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9636 switch_to_section (dtors_section);
9637 assemble_align (BITS_PER_WORD);
9638 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9640 #else
9642 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9643 bool lflag ATTRIBUTE_UNUSED,
9644 bool rflag ATTRIBUTE_UNUSED)
9646 return NULL_RTX;
9649 #endif /* TARGET_ABI_OPEN_VMS */
9651 static void
9652 alpha_init_libfuncs (void)
9654 if (TARGET_ABI_OPEN_VMS)
9656 /* Use the VMS runtime library functions for division and
9657 remainder. */
9658 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9659 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9660 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9661 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9662 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9663 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9664 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9665 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9666 abort_libfunc = init_one_libfunc ("decc$abort");
9667 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9668 #ifdef MEM_LIBFUNCS_INIT
9669 MEM_LIBFUNCS_INIT;
9670 #endif
9674 /* On the Alpha, we use this to disable the floating-point registers
9675 when they don't exist. */
9677 static void
9678 alpha_conditional_register_usage (void)
9680 int i;
9681 if (! TARGET_FPREGS)
9682 for (i = 32; i < 63; i++)
9683 fixed_regs[i] = call_used_regs[i] = 1;
9686 /* Canonicalize a comparison from one we don't have to one we do have. */
9688 static void
9689 alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9690 bool op0_preserve_value)
9692 if (!op0_preserve_value
9693 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9694 && (REG_P (*op1) || *op1 == const0_rtx))
9696 rtx tem = *op0;
9697 *op0 = *op1;
9698 *op1 = tem;
9699 *code = (int)swap_condition ((enum rtx_code)*code);
9702 if ((*code == LT || *code == LTU)
9703 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9705 *code = *code == LT ? LE : LEU;
9706 *op1 = GEN_INT (255);
9710 /* Initialize the GCC target structure. */
9711 #if TARGET_ABI_OPEN_VMS
9712 # undef TARGET_ATTRIBUTE_TABLE
9713 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9714 # undef TARGET_CAN_ELIMINATE
9715 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9716 #endif
9718 #undef TARGET_IN_SMALL_DATA_P
9719 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9721 #undef TARGET_ASM_ALIGNED_HI_OP
9722 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9723 #undef TARGET_ASM_ALIGNED_DI_OP
9724 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9726 /* Default unaligned ops are provided for ELF systems. To get unaligned
9727 data for non-ELF systems, we have to turn off auto alignment. */
9728 #if TARGET_ABI_OPEN_VMS
9729 #undef TARGET_ASM_UNALIGNED_HI_OP
9730 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9731 #undef TARGET_ASM_UNALIGNED_SI_OP
9732 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9733 #undef TARGET_ASM_UNALIGNED_DI_OP
9734 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9735 #endif
9737 #undef TARGET_ASM_RELOC_RW_MASK
9738 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9739 #undef TARGET_ASM_SELECT_RTX_SECTION
9740 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9741 #undef TARGET_SECTION_TYPE_FLAGS
9742 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9744 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9745 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9747 #undef TARGET_INIT_LIBFUNCS
9748 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9750 #undef TARGET_LEGITIMIZE_ADDRESS
9751 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9752 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9753 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9755 #undef TARGET_ASM_FILE_START
9756 #define TARGET_ASM_FILE_START alpha_file_start
9758 #undef TARGET_SCHED_ADJUST_COST
9759 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9760 #undef TARGET_SCHED_ISSUE_RATE
9761 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9762 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9763 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9764 alpha_multipass_dfa_lookahead
9766 #undef TARGET_HAVE_TLS
9767 #define TARGET_HAVE_TLS HAVE_AS_TLS
9769 #undef TARGET_BUILTIN_DECL
9770 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9771 #undef TARGET_INIT_BUILTINS
9772 #define TARGET_INIT_BUILTINS alpha_init_builtins
9773 #undef TARGET_EXPAND_BUILTIN
9774 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9775 #undef TARGET_FOLD_BUILTIN
9776 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9778 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9779 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9780 #undef TARGET_CANNOT_COPY_INSN_P
9781 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9782 #undef TARGET_LEGITIMATE_CONSTANT_P
9783 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9784 #undef TARGET_CANNOT_FORCE_CONST_MEM
9785 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9787 #if TARGET_ABI_OSF
9788 #undef TARGET_ASM_OUTPUT_MI_THUNK
9789 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9790 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9791 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9792 #undef TARGET_STDARG_OPTIMIZE_HOOK
9793 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9794 #endif
9796 /* Use 16-bits anchor. */
9797 #undef TARGET_MIN_ANCHOR_OFFSET
9798 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9799 #undef TARGET_MAX_ANCHOR_OFFSET
9800 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9801 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9802 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9804 #undef TARGET_RTX_COSTS
9805 #define TARGET_RTX_COSTS alpha_rtx_costs
9806 #undef TARGET_ADDRESS_COST
9807 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
9809 #undef TARGET_MACHINE_DEPENDENT_REORG
9810 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9812 #undef TARGET_PROMOTE_FUNCTION_MODE
9813 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9814 #undef TARGET_PROMOTE_PROTOTYPES
9815 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9816 #undef TARGET_RETURN_IN_MEMORY
9817 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9818 #undef TARGET_PASS_BY_REFERENCE
9819 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9820 #undef TARGET_SETUP_INCOMING_VARARGS
9821 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9822 #undef TARGET_STRICT_ARGUMENT_NAMING
9823 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9824 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9825 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9826 #undef TARGET_SPLIT_COMPLEX_ARG
9827 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9828 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9829 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9830 #undef TARGET_ARG_PARTIAL_BYTES
9831 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9832 #undef TARGET_FUNCTION_ARG
9833 #define TARGET_FUNCTION_ARG alpha_function_arg
9834 #undef TARGET_FUNCTION_ARG_ADVANCE
9835 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9836 #undef TARGET_TRAMPOLINE_INIT
9837 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9839 #undef TARGET_INSTANTIATE_DECLS
9840 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9842 #undef TARGET_SECONDARY_RELOAD
9843 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9845 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9846 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9847 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9848 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9850 #undef TARGET_BUILD_BUILTIN_VA_LIST
9851 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9853 #undef TARGET_EXPAND_BUILTIN_VA_START
9854 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9856 /* The Alpha architecture does not require sequential consistency. See
9857 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9858 for an example of how it can be violated in practice. */
9859 #undef TARGET_RELAXED_ORDERING
9860 #define TARGET_RELAXED_ORDERING true
9862 #undef TARGET_OPTION_OVERRIDE
9863 #define TARGET_OPTION_OVERRIDE alpha_option_override
9865 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9866 #undef TARGET_MANGLE_TYPE
9867 #define TARGET_MANGLE_TYPE alpha_mangle_type
9868 #endif
9870 #undef TARGET_LEGITIMATE_ADDRESS_P
9871 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9873 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9874 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9876 #undef TARGET_CANONICALIZE_COMPARISON
9877 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
9879 struct gcc_target targetm = TARGET_INITIALIZER;
9882 #include "gt-alpha.h"