PR middle-end/54635
[official-gcc.git] / gcc / config / alpha / alpha.c
blobd726b5acca79236686fef85b642fccc17abffb7a
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "splay-tree.h"
53 #include "gimple.h"
54 #include "tree-flow.h"
55 #include "tree-stdarg.h"
56 #include "tm-constrs.h"
57 #include "df.h"
58 #include "libfuncs.h"
59 #include "opts.h"
60 #include "params.h"
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
68 static const char * const alpha_cpu_name[] =
70 "ev4", "ev5", "ev6"
73 /* Specify how accurate floating-point traps need to be. */
75 enum alpha_trap_precision alpha_tp;
77 /* Specify the floating-point rounding mode. */
79 enum alpha_fp_rounding_mode alpha_fprm;
81 /* Specify which things cause traps. */
83 enum alpha_fp_trap_mode alpha_fptm;
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
88 static int inside_function = FALSE;
90 /* The number of cycles of latency we should assume on memory reads. */
92 int alpha_memory_latency = 3;
94 /* Whether the function needs the GP. */
96 static int alpha_function_needs_gp;
98 /* The assembler name of the current function. */
100 static const char *alpha_fnname;
102 /* The next explicit relocation sequence number. */
103 extern GTY(()) int alpha_next_sequence_number;
104 int alpha_next_sequence_number = 1;
106 /* The literal and gpdisp sequence numbers for this insn, as printed
107 by %# and %* respectively. */
108 extern GTY(()) int alpha_this_literal_sequence_number;
109 extern GTY(()) int alpha_this_gpdisp_sequence_number;
110 int alpha_this_literal_sequence_number;
111 int alpha_this_gpdisp_sequence_number;
113 /* Costs of various operations on the different architectures. */
115 struct alpha_rtx_cost_data
117 unsigned char fp_add;
118 unsigned char fp_mult;
119 unsigned char fp_div_sf;
120 unsigned char fp_div_df;
121 unsigned char int_mult_si;
122 unsigned char int_mult_di;
123 unsigned char int_shift;
124 unsigned char int_cmov;
125 unsigned short int_div;
128 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
130 { /* EV4 */
131 COSTS_N_INSNS (6), /* fp_add */
132 COSTS_N_INSNS (6), /* fp_mult */
133 COSTS_N_INSNS (34), /* fp_div_sf */
134 COSTS_N_INSNS (63), /* fp_div_df */
135 COSTS_N_INSNS (23), /* int_mult_si */
136 COSTS_N_INSNS (23), /* int_mult_di */
137 COSTS_N_INSNS (2), /* int_shift */
138 COSTS_N_INSNS (2), /* int_cmov */
139 COSTS_N_INSNS (97), /* int_div */
141 { /* EV5 */
142 COSTS_N_INSNS (4), /* fp_add */
143 COSTS_N_INSNS (4), /* fp_mult */
144 COSTS_N_INSNS (15), /* fp_div_sf */
145 COSTS_N_INSNS (22), /* fp_div_df */
146 COSTS_N_INSNS (8), /* int_mult_si */
147 COSTS_N_INSNS (12), /* int_mult_di */
148 COSTS_N_INSNS (1) + 1, /* int_shift */
149 COSTS_N_INSNS (1), /* int_cmov */
150 COSTS_N_INSNS (83), /* int_div */
152 { /* EV6 */
153 COSTS_N_INSNS (4), /* fp_add */
154 COSTS_N_INSNS (4), /* fp_mult */
155 COSTS_N_INSNS (12), /* fp_div_sf */
156 COSTS_N_INSNS (15), /* fp_div_df */
157 COSTS_N_INSNS (7), /* int_mult_si */
158 COSTS_N_INSNS (7), /* int_mult_di */
159 COSTS_N_INSNS (1), /* int_shift */
160 COSTS_N_INSNS (2), /* int_cmov */
161 COSTS_N_INSNS (86), /* int_div */
165 /* Similar but tuned for code size instead of execution latency. The
166 extra +N is fractional cost tuning based on latency. It's used to
167 encourage use of cheaper insns like shift, but only if there's just
168 one of them. */
170 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
172 COSTS_N_INSNS (1), /* fp_add */
173 COSTS_N_INSNS (1), /* fp_mult */
174 COSTS_N_INSNS (1), /* fp_div_sf */
175 COSTS_N_INSNS (1) + 1, /* fp_div_df */
176 COSTS_N_INSNS (1) + 1, /* int_mult_si */
177 COSTS_N_INSNS (1) + 2, /* int_mult_di */
178 COSTS_N_INSNS (1), /* int_shift */
179 COSTS_N_INSNS (1), /* int_cmov */
180 COSTS_N_INSNS (6), /* int_div */
183 /* Get the number of args of a function in one of two ways. */
184 #if TARGET_ABI_OPEN_VMS
185 #define NUM_ARGS crtl->args.info.num_args
186 #else
187 #define NUM_ARGS crtl->args.info
188 #endif
190 #define REG_PV 27
191 #define REG_RA 26
193 /* Declarations of static functions. */
194 static struct machine_function *alpha_init_machine_status (void);
195 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
197 #if TARGET_ABI_OPEN_VMS
198 static void alpha_write_linkage (FILE *, const char *);
199 static bool vms_valid_pointer_mode (enum machine_mode);
200 #else
201 #define vms_patch_builtins() gcc_unreachable()
202 #endif
204 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
205 /* Implement TARGET_MANGLE_TYPE. */
207 static const char *
208 alpha_mangle_type (const_tree type)
210 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
211 && TARGET_LONG_DOUBLE_128)
212 return "g";
214 /* For all other types, use normal C++ mangling. */
215 return NULL;
217 #endif
219 /* Parse target option strings. */
221 static void
222 alpha_option_override (void)
224 static const struct cpu_table {
225 const char *const name;
226 const enum processor_type processor;
227 const int flags;
228 const unsigned short line_size; /* in bytes */
229 const unsigned short l1_size; /* in kb. */
230 const unsigned short l2_size; /* in kb. */
231 } cpu_table[] = {
232 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
233 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
234 had 64k to 8M 8-byte direct Bcache. */
235 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
236 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
237 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
239 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
240 and 1M to 16M 64 byte L3 (not modeled).
241 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
242 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
243 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
244 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
245 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
246 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
247 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
248 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
249 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
251 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
252 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
253 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
254 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
255 64, 64, 16*1024 },
256 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
257 64, 64, 16*1024 }
260 int const ct_size = ARRAY_SIZE (cpu_table);
261 int line_size = 0, l1_size = 0, l2_size = 0;
262 int i;
264 #ifdef SUBTARGET_OVERRIDE_OPTIONS
265 SUBTARGET_OVERRIDE_OPTIONS;
266 #endif
268 /* Default to full IEEE compliance mode for Go language. */
269 if (strcmp (lang_hooks.name, "GNU Go") == 0
270 && !(target_flags_explicit & MASK_IEEE))
271 target_flags |= MASK_IEEE;
273 alpha_fprm = ALPHA_FPRM_NORM;
274 alpha_tp = ALPHA_TP_PROG;
275 alpha_fptm = ALPHA_FPTM_N;
277 if (TARGET_IEEE)
279 alpha_tp = ALPHA_TP_INSN;
280 alpha_fptm = ALPHA_FPTM_SU;
282 if (TARGET_IEEE_WITH_INEXACT)
284 alpha_tp = ALPHA_TP_INSN;
285 alpha_fptm = ALPHA_FPTM_SUI;
288 if (alpha_tp_string)
290 if (! strcmp (alpha_tp_string, "p"))
291 alpha_tp = ALPHA_TP_PROG;
292 else if (! strcmp (alpha_tp_string, "f"))
293 alpha_tp = ALPHA_TP_FUNC;
294 else if (! strcmp (alpha_tp_string, "i"))
295 alpha_tp = ALPHA_TP_INSN;
296 else
297 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
300 if (alpha_fprm_string)
302 if (! strcmp (alpha_fprm_string, "n"))
303 alpha_fprm = ALPHA_FPRM_NORM;
304 else if (! strcmp (alpha_fprm_string, "m"))
305 alpha_fprm = ALPHA_FPRM_MINF;
306 else if (! strcmp (alpha_fprm_string, "c"))
307 alpha_fprm = ALPHA_FPRM_CHOP;
308 else if (! strcmp (alpha_fprm_string,"d"))
309 alpha_fprm = ALPHA_FPRM_DYN;
310 else
311 error ("bad value %qs for -mfp-rounding-mode switch",
312 alpha_fprm_string);
315 if (alpha_fptm_string)
317 if (strcmp (alpha_fptm_string, "n") == 0)
318 alpha_fptm = ALPHA_FPTM_N;
319 else if (strcmp (alpha_fptm_string, "u") == 0)
320 alpha_fptm = ALPHA_FPTM_U;
321 else if (strcmp (alpha_fptm_string, "su") == 0)
322 alpha_fptm = ALPHA_FPTM_SU;
323 else if (strcmp (alpha_fptm_string, "sui") == 0)
324 alpha_fptm = ALPHA_FPTM_SUI;
325 else
326 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
329 if (alpha_cpu_string)
331 for (i = 0; i < ct_size; i++)
332 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
334 alpha_tune = alpha_cpu = cpu_table[i].processor;
335 line_size = cpu_table[i].line_size;
336 l1_size = cpu_table[i].l1_size;
337 l2_size = cpu_table[i].l2_size;
338 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
339 target_flags |= cpu_table[i].flags;
340 break;
342 if (i == ct_size)
343 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
346 if (alpha_tune_string)
348 for (i = 0; i < ct_size; i++)
349 if (! strcmp (alpha_tune_string, cpu_table [i].name))
351 alpha_tune = cpu_table[i].processor;
352 line_size = cpu_table[i].line_size;
353 l1_size = cpu_table[i].l1_size;
354 l2_size = cpu_table[i].l2_size;
355 break;
357 if (i == ct_size)
358 error ("bad value %qs for -mtune switch", alpha_tune_string);
361 if (line_size)
362 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
363 global_options.x_param_values,
364 global_options_set.x_param_values);
365 if (l1_size)
366 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
367 global_options.x_param_values,
368 global_options_set.x_param_values);
369 if (l2_size)
370 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
371 global_options.x_param_values,
372 global_options_set.x_param_values);
374 /* Do some sanity checks on the above options. */
376 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
377 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
379 warning (0, "fp software completion requires -mtrap-precision=i");
380 alpha_tp = ALPHA_TP_INSN;
383 if (alpha_cpu == PROCESSOR_EV6)
385 /* Except for EV6 pass 1 (not released), we always have precise
386 arithmetic traps. Which means we can do software completion
387 without minding trap shadows. */
388 alpha_tp = ALPHA_TP_PROG;
391 if (TARGET_FLOAT_VAX)
393 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
395 warning (0, "rounding mode not supported for VAX floats");
396 alpha_fprm = ALPHA_FPRM_NORM;
398 if (alpha_fptm == ALPHA_FPTM_SUI)
400 warning (0, "trap mode not supported for VAX floats");
401 alpha_fptm = ALPHA_FPTM_SU;
403 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
404 warning (0, "128-bit long double not supported for VAX floats");
405 target_flags &= ~MASK_LONG_DOUBLE_128;
409 char *end;
410 int lat;
412 if (!alpha_mlat_string)
413 alpha_mlat_string = "L1";
415 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
416 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
418 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
419 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
420 && alpha_mlat_string[2] == '\0')
422 static int const cache_latency[][4] =
424 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
425 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
426 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
429 lat = alpha_mlat_string[1] - '0';
430 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
432 warning (0, "L%d cache latency unknown for %s",
433 lat, alpha_cpu_name[alpha_tune]);
434 lat = 3;
436 else
437 lat = cache_latency[alpha_tune][lat-1];
439 else if (! strcmp (alpha_mlat_string, "main"))
441 /* Most current memories have about 370ns latency. This is
442 a reasonable guess for a fast cpu. */
443 lat = 150;
445 else
447 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
448 lat = 3;
451 alpha_memory_latency = lat;
454 /* Default the definition of "small data" to 8 bytes. */
455 if (!global_options_set.x_g_switch_value)
456 g_switch_value = 8;
458 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
459 if (flag_pic == 1)
460 target_flags |= MASK_SMALL_DATA;
461 else if (flag_pic == 2)
462 target_flags &= ~MASK_SMALL_DATA;
464 /* Align labels and loops for optimal branching. */
465 /* ??? Kludge these by not doing anything if we don't optimize. */
466 if (optimize > 0)
468 if (align_loops <= 0)
469 align_loops = 16;
470 if (align_jumps <= 0)
471 align_jumps = 16;
473 if (align_functions <= 0)
474 align_functions = 16;
476 /* Register variables and functions with the garbage collector. */
478 /* Set up function hooks. */
479 init_machine_status = alpha_init_machine_status;
481 /* Tell the compiler when we're using VAX floating point. */
482 if (TARGET_FLOAT_VAX)
484 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
485 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
486 REAL_MODE_FORMAT (TFmode) = NULL;
489 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
490 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
491 target_flags |= MASK_LONG_DOUBLE_128;
492 #endif
495 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
498 zap_mask (HOST_WIDE_INT value)
500 int i;
502 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
503 i++, value >>= 8)
504 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
505 return 0;
507 return 1;
510 /* Return true if OP is valid for a particular TLS relocation.
511 We are already guaranteed that OP is a CONST. */
514 tls_symbolic_operand_1 (rtx op, int size, int unspec)
516 op = XEXP (op, 0);
518 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
519 return 0;
520 op = XVECEXP (op, 0, 0);
522 if (GET_CODE (op) != SYMBOL_REF)
523 return 0;
525 switch (SYMBOL_REF_TLS_MODEL (op))
527 case TLS_MODEL_LOCAL_DYNAMIC:
528 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
529 case TLS_MODEL_INITIAL_EXEC:
530 return unspec == UNSPEC_TPREL && size == 64;
531 case TLS_MODEL_LOCAL_EXEC:
532 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
533 default:
534 gcc_unreachable ();
538 /* Used by aligned_memory_operand and unaligned_memory_operand to
539 resolve what reload is going to do with OP if it's a register. */
542 resolve_reload_operand (rtx op)
544 if (reload_in_progress)
546 rtx tmp = op;
547 if (GET_CODE (tmp) == SUBREG)
548 tmp = SUBREG_REG (tmp);
549 if (REG_P (tmp)
550 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
552 op = reg_equiv_memory_loc (REGNO (tmp));
553 if (op == 0)
554 return 0;
557 return op;
560 /* The scalar modes supported differs from the default check-what-c-supports
561 version in that sometimes TFmode is available even when long double
562 indicates only DFmode. */
564 static bool
565 alpha_scalar_mode_supported_p (enum machine_mode mode)
567 switch (mode)
569 case QImode:
570 case HImode:
571 case SImode:
572 case DImode:
573 case TImode: /* via optabs.c */
574 return true;
576 case SFmode:
577 case DFmode:
578 return true;
580 case TFmode:
581 return TARGET_HAS_XFLOATING_LIBS;
583 default:
584 return false;
588 /* Alpha implements a couple of integer vector mode operations when
589 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
590 which allows the vectorizer to operate on e.g. move instructions,
591 or when expand_vector_operations can do something useful. */
593 static bool
594 alpha_vector_mode_supported_p (enum machine_mode mode)
596 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
599 /* Return 1 if this function can directly return via $26. */
602 direct_return (void)
604 return (TARGET_ABI_OSF
605 && reload_completed
606 && alpha_sa_size () == 0
607 && get_frame_size () == 0
608 && crtl->outgoing_args_size == 0
609 && crtl->args.pretend_args_size == 0);
612 /* Return the TLS model to use for SYMBOL. */
614 static enum tls_model
615 tls_symbolic_operand_type (rtx symbol)
617 enum tls_model model;
619 if (GET_CODE (symbol) != SYMBOL_REF)
620 return TLS_MODEL_NONE;
621 model = SYMBOL_REF_TLS_MODEL (symbol);
623 /* Local-exec with a 64-bit size is the same code as initial-exec. */
624 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
625 model = TLS_MODEL_INITIAL_EXEC;
627 return model;
630 /* Return true if the function DECL will share the same GP as any
631 function in the current unit of translation. */
633 static bool
634 decl_has_samegp (const_tree decl)
636 /* Functions that are not local can be overridden, and thus may
637 not share the same gp. */
638 if (!(*targetm.binds_local_p) (decl))
639 return false;
641 /* If -msmall-data is in effect, assume that there is only one GP
642 for the module, and so any local symbol has this property. We
643 need explicit relocations to be able to enforce this for symbols
644 not defined in this unit of translation, however. */
645 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
646 return true;
648 /* Functions that are not external are defined in this UoT. */
649 /* ??? Irritatingly, static functions not yet emitted are still
650 marked "external". Apply this to non-static functions only. */
651 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
654 /* Return true if EXP should be placed in the small data section. */
656 static bool
657 alpha_in_small_data_p (const_tree exp)
659 /* We want to merge strings, so we never consider them small data. */
660 if (TREE_CODE (exp) == STRING_CST)
661 return false;
663 /* Functions are never in the small data area. Duh. */
664 if (TREE_CODE (exp) == FUNCTION_DECL)
665 return false;
667 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
669 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
670 if (strcmp (section, ".sdata") == 0
671 || strcmp (section, ".sbss") == 0)
672 return true;
674 else
676 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
678 /* If this is an incomplete type with size 0, then we can't put it
679 in sdata because it might be too big when completed. */
680 if (size > 0 && size <= g_switch_value)
681 return true;
684 return false;
687 #if TARGET_ABI_OPEN_VMS
688 static bool
689 vms_valid_pointer_mode (enum machine_mode mode)
691 return (mode == SImode || mode == DImode);
694 static bool
695 alpha_linkage_symbol_p (const char *symname)
697 int symlen = strlen (symname);
699 if (symlen > 4)
700 return strcmp (&symname [symlen - 4], "..lk") == 0;
702 return false;
705 #define LINKAGE_SYMBOL_REF_P(X) \
706 ((GET_CODE (X) == SYMBOL_REF \
707 && alpha_linkage_symbol_p (XSTR (X, 0))) \
708 || (GET_CODE (X) == CONST \
709 && GET_CODE (XEXP (X, 0)) == PLUS \
710 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
711 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
712 #endif
714 /* legitimate_address_p recognizes an RTL expression that is a valid
715 memory address for an instruction. The MODE argument is the
716 machine mode for the MEM expression that wants to use this address.
718 For Alpha, we have either a constant address or the sum of a
719 register and a constant address, or just a register. For DImode,
720 any of those forms can be surrounded with an AND that clear the
721 low-order three bits; this is an "unaligned" access. */
723 static bool
724 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
726 /* If this is an ldq_u type address, discard the outer AND. */
727 if (mode == DImode
728 && GET_CODE (x) == AND
729 && CONST_INT_P (XEXP (x, 1))
730 && INTVAL (XEXP (x, 1)) == -8)
731 x = XEXP (x, 0);
733 /* Discard non-paradoxical subregs. */
734 if (GET_CODE (x) == SUBREG
735 && (GET_MODE_SIZE (GET_MODE (x))
736 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
737 x = SUBREG_REG (x);
739 /* Unadorned general registers are valid. */
740 if (REG_P (x)
741 && (strict
742 ? STRICT_REG_OK_FOR_BASE_P (x)
743 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
744 return true;
746 /* Constant addresses (i.e. +/- 32k) are valid. */
747 if (CONSTANT_ADDRESS_P (x))
748 return true;
750 #if TARGET_ABI_OPEN_VMS
751 if (LINKAGE_SYMBOL_REF_P (x))
752 return true;
753 #endif
755 /* Register plus a small constant offset is valid. */
756 if (GET_CODE (x) == PLUS)
758 rtx ofs = XEXP (x, 1);
759 x = XEXP (x, 0);
761 /* Discard non-paradoxical subregs. */
762 if (GET_CODE (x) == SUBREG
763 && (GET_MODE_SIZE (GET_MODE (x))
764 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
765 x = SUBREG_REG (x);
767 if (REG_P (x))
769 if (! strict
770 && NONSTRICT_REG_OK_FP_BASE_P (x)
771 && CONST_INT_P (ofs))
772 return true;
773 if ((strict
774 ? STRICT_REG_OK_FOR_BASE_P (x)
775 : NONSTRICT_REG_OK_FOR_BASE_P (x))
776 && CONSTANT_ADDRESS_P (ofs))
777 return true;
781 /* If we're managing explicit relocations, LO_SUM is valid, as are small
782 data symbols. Avoid explicit relocations of modes larger than word
783 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
784 else if (TARGET_EXPLICIT_RELOCS
785 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
787 if (small_symbolic_operand (x, Pmode))
788 return true;
790 if (GET_CODE (x) == LO_SUM)
792 rtx ofs = XEXP (x, 1);
793 x = XEXP (x, 0);
795 /* Discard non-paradoxical subregs. */
796 if (GET_CODE (x) == SUBREG
797 && (GET_MODE_SIZE (GET_MODE (x))
798 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
799 x = SUBREG_REG (x);
801 /* Must have a valid base register. */
802 if (! (REG_P (x)
803 && (strict
804 ? STRICT_REG_OK_FOR_BASE_P (x)
805 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
806 return false;
808 /* The symbol must be local. */
809 if (local_symbolic_operand (ofs, Pmode)
810 || dtp32_symbolic_operand (ofs, Pmode)
811 || tp32_symbolic_operand (ofs, Pmode))
812 return true;
816 return false;
819 /* Build the SYMBOL_REF for __tls_get_addr. */
821 static GTY(()) rtx tls_get_addr_libfunc;
823 static rtx
824 get_tls_get_addr (void)
826 if (!tls_get_addr_libfunc)
827 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
828 return tls_get_addr_libfunc;
831 /* Try machine-dependent ways of modifying an illegitimate address
832 to be legitimate. If we find one, return the new, valid address. */
834 static rtx
835 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
837 HOST_WIDE_INT addend;
839 /* If the address is (plus reg const_int) and the CONST_INT is not a
840 valid offset, compute the high part of the constant and add it to
841 the register. Then our address is (plus temp low-part-const). */
842 if (GET_CODE (x) == PLUS
843 && REG_P (XEXP (x, 0))
844 && CONST_INT_P (XEXP (x, 1))
845 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
847 addend = INTVAL (XEXP (x, 1));
848 x = XEXP (x, 0);
849 goto split_addend;
852 /* If the address is (const (plus FOO const_int)), find the low-order
853 part of the CONST_INT. Then load FOO plus any high-order part of the
854 CONST_INT into a register. Our address is (plus reg low-part-const).
855 This is done to reduce the number of GOT entries. */
856 if (can_create_pseudo_p ()
857 && GET_CODE (x) == CONST
858 && GET_CODE (XEXP (x, 0)) == PLUS
859 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
861 addend = INTVAL (XEXP (XEXP (x, 0), 1));
862 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
863 goto split_addend;
866 /* If we have a (plus reg const), emit the load as in (2), then add
867 the two registers, and finally generate (plus reg low-part-const) as
868 our address. */
869 if (can_create_pseudo_p ()
870 && GET_CODE (x) == PLUS
871 && REG_P (XEXP (x, 0))
872 && GET_CODE (XEXP (x, 1)) == CONST
873 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
874 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
876 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
877 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
878 XEXP (XEXP (XEXP (x, 1), 0), 0),
879 NULL_RTX, 1, OPTAB_LIB_WIDEN);
880 goto split_addend;
883 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
884 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
885 around +/- 32k offset. */
886 if (TARGET_EXPLICIT_RELOCS
887 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
888 && symbolic_operand (x, Pmode))
890 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
892 switch (tls_symbolic_operand_type (x))
894 case TLS_MODEL_NONE:
895 break;
897 case TLS_MODEL_GLOBAL_DYNAMIC:
898 start_sequence ();
900 r0 = gen_rtx_REG (Pmode, 0);
901 r16 = gen_rtx_REG (Pmode, 16);
902 tga = get_tls_get_addr ();
903 dest = gen_reg_rtx (Pmode);
904 seq = GEN_INT (alpha_next_sequence_number++);
906 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
907 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
908 insn = emit_call_insn (insn);
909 RTL_CONST_CALL_P (insn) = 1;
910 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
912 insn = get_insns ();
913 end_sequence ();
915 emit_libcall_block (insn, dest, r0, x);
916 return dest;
918 case TLS_MODEL_LOCAL_DYNAMIC:
919 start_sequence ();
921 r0 = gen_rtx_REG (Pmode, 0);
922 r16 = gen_rtx_REG (Pmode, 16);
923 tga = get_tls_get_addr ();
924 scratch = gen_reg_rtx (Pmode);
925 seq = GEN_INT (alpha_next_sequence_number++);
927 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
928 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
929 insn = emit_call_insn (insn);
930 RTL_CONST_CALL_P (insn) = 1;
931 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
933 insn = get_insns ();
934 end_sequence ();
936 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
937 UNSPEC_TLSLDM_CALL);
938 emit_libcall_block (insn, scratch, r0, eqv);
940 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
941 eqv = gen_rtx_CONST (Pmode, eqv);
943 if (alpha_tls_size == 64)
945 dest = gen_reg_rtx (Pmode);
946 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
947 emit_insn (gen_adddi3 (dest, dest, scratch));
948 return dest;
950 if (alpha_tls_size == 32)
952 insn = gen_rtx_HIGH (Pmode, eqv);
953 insn = gen_rtx_PLUS (Pmode, scratch, insn);
954 scratch = gen_reg_rtx (Pmode);
955 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
957 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
959 case TLS_MODEL_INITIAL_EXEC:
960 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
961 eqv = gen_rtx_CONST (Pmode, eqv);
962 tp = gen_reg_rtx (Pmode);
963 scratch = gen_reg_rtx (Pmode);
964 dest = gen_reg_rtx (Pmode);
966 emit_insn (gen_load_tp (tp));
967 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
968 emit_insn (gen_adddi3 (dest, tp, scratch));
969 return dest;
971 case TLS_MODEL_LOCAL_EXEC:
972 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
973 eqv = gen_rtx_CONST (Pmode, eqv);
974 tp = gen_reg_rtx (Pmode);
976 emit_insn (gen_load_tp (tp));
977 if (alpha_tls_size == 32)
979 insn = gen_rtx_HIGH (Pmode, eqv);
980 insn = gen_rtx_PLUS (Pmode, tp, insn);
981 tp = gen_reg_rtx (Pmode);
982 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
984 return gen_rtx_LO_SUM (Pmode, tp, eqv);
986 default:
987 gcc_unreachable ();
990 if (local_symbolic_operand (x, Pmode))
992 if (small_symbolic_operand (x, Pmode))
993 return x;
994 else
996 if (can_create_pseudo_p ())
997 scratch = gen_reg_rtx (Pmode);
998 emit_insn (gen_rtx_SET (VOIDmode, scratch,
999 gen_rtx_HIGH (Pmode, x)));
1000 return gen_rtx_LO_SUM (Pmode, scratch, x);
1005 return NULL;
1007 split_addend:
1009 HOST_WIDE_INT low, high;
1011 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1012 addend -= low;
1013 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1014 addend -= high;
1016 if (addend)
1017 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1018 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1019 1, OPTAB_LIB_WIDEN);
1020 if (high)
1021 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1022 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1023 1, OPTAB_LIB_WIDEN);
1025 return plus_constant (Pmode, x, low);
1030 /* Try machine-dependent ways of modifying an illegitimate address
1031 to be legitimate. Return X or the new, valid address. */
1033 static rtx
1034 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1035 enum machine_mode mode)
1037 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1038 return new_x ? new_x : x;
1041 /* Return true if ADDR has an effect that depends on the machine mode it
1042 is used for. On the Alpha this is true only for the unaligned modes.
1043 We can simplify the test since we know that the address must be valid. */
1045 static bool
1046 alpha_mode_dependent_address_p (const_rtx addr,
1047 addr_space_t as ATTRIBUTE_UNUSED)
1049 return GET_CODE (addr) == AND;
1052 /* Primarily this is required for TLS symbols, but given that our move
1053 patterns *ought* to be able to handle any symbol at any time, we
1054 should never be spilling symbolic operands to the constant pool, ever. */
1056 static bool
1057 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1059 enum rtx_code code = GET_CODE (x);
1060 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1063 /* We do not allow indirect calls to be optimized into sibling calls, nor
1064 can we allow a call to a function with a different GP to be optimized
1065 into a sibcall. */
1067 static bool
1068 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1070 /* Can't do indirect tail calls, since we don't know if the target
1071 uses the same GP. */
1072 if (!decl)
1073 return false;
1075 /* Otherwise, we can make a tail call if the target function shares
1076 the same GP. */
1077 return decl_has_samegp (decl);
1081 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1083 rtx x = *px;
1085 /* Don't re-split. */
1086 if (GET_CODE (x) == LO_SUM)
1087 return -1;
1089 return small_symbolic_operand (x, Pmode) != 0;
1092 static int
1093 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1095 rtx x = *px;
1097 /* Don't re-split. */
1098 if (GET_CODE (x) == LO_SUM)
1099 return -1;
1101 if (small_symbolic_operand (x, Pmode))
1103 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1104 *px = x;
1105 return -1;
1108 return 0;
1112 split_small_symbolic_operand (rtx x)
1114 x = copy_insn (x);
1115 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1116 return x;
1119 /* Indicate that INSN cannot be duplicated. This is true for any insn
1120 that we've marked with gpdisp relocs, since those have to stay in
1121 1-1 correspondence with one another.
1123 Technically we could copy them if we could set up a mapping from one
1124 sequence number to another, across the set of insns to be duplicated.
1125 This seems overly complicated and error-prone since interblock motion
1126 from sched-ebb could move one of the pair of insns to a different block.
1128 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1129 then they'll be in a different block from their ldgp. Which could lead
1130 the bb reorder code to think that it would be ok to copy just the block
1131 containing the call and branch to the block containing the ldgp. */
1133 static bool
1134 alpha_cannot_copy_insn_p (rtx insn)
1136 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1137 return false;
1138 if (recog_memoized (insn) >= 0)
1139 return get_attr_cannot_copy (insn);
1140 else
1141 return false;
1145 /* Try a machine-dependent way of reloading an illegitimate address
1146 operand. If we find one, push the reload and return the new rtx. */
1149 alpha_legitimize_reload_address (rtx x,
1150 enum machine_mode mode ATTRIBUTE_UNUSED,
1151 int opnum, int type,
1152 int ind_levels ATTRIBUTE_UNUSED)
1154 /* We must recognize output that we have already generated ourselves. */
1155 if (GET_CODE (x) == PLUS
1156 && GET_CODE (XEXP (x, 0)) == PLUS
1157 && REG_P (XEXP (XEXP (x, 0), 0))
1158 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1159 && CONST_INT_P (XEXP (x, 1)))
1161 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1162 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1163 opnum, (enum reload_type) type);
1164 return x;
1167 /* We wish to handle large displacements off a base register by
1168 splitting the addend across an ldah and the mem insn. This
1169 cuts number of extra insns needed from 3 to 1. */
1170 if (GET_CODE (x) == PLUS
1171 && REG_P (XEXP (x, 0))
1172 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1173 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1174 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1176 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1177 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1178 HOST_WIDE_INT high
1179 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1181 /* Check for 32-bit overflow. */
1182 if (high + low != val)
1183 return NULL_RTX;
1185 /* Reload the high part into a base reg; leave the low part
1186 in the mem directly. */
1187 x = gen_rtx_PLUS (GET_MODE (x),
1188 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1189 GEN_INT (high)),
1190 GEN_INT (low));
1192 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1193 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1194 opnum, (enum reload_type) type);
1195 return x;
1198 return NULL_RTX;
1201 /* Compute a (partial) cost for rtx X. Return true if the complete
1202 cost has been computed, and false if subexpressions should be
1203 scanned. In either case, *TOTAL contains the cost result. */
1205 static bool
1206 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1207 bool speed)
1209 enum machine_mode mode = GET_MODE (x);
1210 bool float_mode_p = FLOAT_MODE_P (mode);
1211 const struct alpha_rtx_cost_data *cost_data;
1213 if (!speed)
1214 cost_data = &alpha_rtx_cost_size;
1215 else
1216 cost_data = &alpha_rtx_cost_data[alpha_tune];
1218 switch (code)
1220 case CONST_INT:
1221 /* If this is an 8-bit constant, return zero since it can be used
1222 nearly anywhere with no cost. If it is a valid operand for an
1223 ADD or AND, likewise return 0 if we know it will be used in that
1224 context. Otherwise, return 2 since it might be used there later.
1225 All other constants take at least two insns. */
1226 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1228 *total = 0;
1229 return true;
1231 /* FALLTHRU */
1233 case CONST_DOUBLE:
1234 if (x == CONST0_RTX (mode))
1235 *total = 0;
1236 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1237 || (outer_code == AND && and_operand (x, VOIDmode)))
1238 *total = 0;
1239 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1240 *total = 2;
1241 else
1242 *total = COSTS_N_INSNS (2);
1243 return true;
1245 case CONST:
1246 case SYMBOL_REF:
1247 case LABEL_REF:
1248 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1249 *total = COSTS_N_INSNS (outer_code != MEM);
1250 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1251 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1252 else if (tls_symbolic_operand_type (x))
1253 /* Estimate of cost for call_pal rduniq. */
1254 /* ??? How many insns do we emit here? More than one... */
1255 *total = COSTS_N_INSNS (15);
1256 else
1257 /* Otherwise we do a load from the GOT. */
1258 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1259 return true;
1261 case HIGH:
1262 /* This is effectively an add_operand. */
1263 *total = 2;
1264 return true;
1266 case PLUS:
1267 case MINUS:
1268 if (float_mode_p)
1269 *total = cost_data->fp_add;
1270 else if (GET_CODE (XEXP (x, 0)) == MULT
1271 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1273 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1274 (enum rtx_code) outer_code, opno, speed)
1275 + rtx_cost (XEXP (x, 1),
1276 (enum rtx_code) outer_code, opno, speed)
1277 + COSTS_N_INSNS (1));
1278 return true;
1280 return false;
1282 case MULT:
1283 if (float_mode_p)
1284 *total = cost_data->fp_mult;
1285 else if (mode == DImode)
1286 *total = cost_data->int_mult_di;
1287 else
1288 *total = cost_data->int_mult_si;
1289 return false;
1291 case ASHIFT:
1292 if (CONST_INT_P (XEXP (x, 1))
1293 && INTVAL (XEXP (x, 1)) <= 3)
1295 *total = COSTS_N_INSNS (1);
1296 return false;
1298 /* FALLTHRU */
1300 case ASHIFTRT:
1301 case LSHIFTRT:
1302 *total = cost_data->int_shift;
1303 return false;
1305 case IF_THEN_ELSE:
1306 if (float_mode_p)
1307 *total = cost_data->fp_add;
1308 else
1309 *total = cost_data->int_cmov;
1310 return false;
1312 case DIV:
1313 case UDIV:
1314 case MOD:
1315 case UMOD:
1316 if (!float_mode_p)
1317 *total = cost_data->int_div;
1318 else if (mode == SFmode)
1319 *total = cost_data->fp_div_sf;
1320 else
1321 *total = cost_data->fp_div_df;
1322 return false;
1324 case MEM:
1325 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1326 return true;
1328 case NEG:
1329 if (! float_mode_p)
1331 *total = COSTS_N_INSNS (1);
1332 return false;
1334 /* FALLTHRU */
1336 case ABS:
1337 if (! float_mode_p)
1339 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1340 return false;
1342 /* FALLTHRU */
1344 case FLOAT:
1345 case UNSIGNED_FLOAT:
1346 case FIX:
1347 case UNSIGNED_FIX:
1348 case FLOAT_TRUNCATE:
1349 *total = cost_data->fp_add;
1350 return false;
1352 case FLOAT_EXTEND:
1353 if (MEM_P (XEXP (x, 0)))
1354 *total = 0;
1355 else
1356 *total = cost_data->fp_add;
1357 return false;
1359 default:
1360 return false;
1364 /* REF is an alignable memory location. Place an aligned SImode
1365 reference into *PALIGNED_MEM and the number of bits to shift into
1366 *PBITNUM. SCRATCH is a free register for use in reloading out
1367 of range stack slots. */
1369 void
1370 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1372 rtx base;
1373 HOST_WIDE_INT disp, offset;
1375 gcc_assert (MEM_P (ref));
1377 if (reload_in_progress
1378 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1380 base = find_replacement (&XEXP (ref, 0));
1381 gcc_assert (memory_address_p (GET_MODE (ref), base));
1383 else
1384 base = XEXP (ref, 0);
1386 if (GET_CODE (base) == PLUS)
1387 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1388 else
1389 disp = 0;
1391 /* Find the byte offset within an aligned word. If the memory itself is
1392 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1393 will have examined the base register and determined it is aligned, and
1394 thus displacements from it are naturally alignable. */
1395 if (MEM_ALIGN (ref) >= 32)
1396 offset = 0;
1397 else
1398 offset = disp & 3;
1400 /* The location should not cross aligned word boundary. */
1401 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1402 <= GET_MODE_SIZE (SImode));
1404 /* Access the entire aligned word. */
1405 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1407 /* Convert the byte offset within the word to a bit offset. */
1408 offset *= BITS_PER_UNIT;
1409 *pbitnum = GEN_INT (offset);
1412 /* Similar, but just get the address. Handle the two reload cases.
1413 Add EXTRA_OFFSET to the address we return. */
1416 get_unaligned_address (rtx ref)
1418 rtx base;
1419 HOST_WIDE_INT offset = 0;
1421 gcc_assert (MEM_P (ref));
1423 if (reload_in_progress
1424 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1426 base = find_replacement (&XEXP (ref, 0));
1428 gcc_assert (memory_address_p (GET_MODE (ref), base));
1430 else
1431 base = XEXP (ref, 0);
1433 if (GET_CODE (base) == PLUS)
1434 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 return plus_constant (Pmode, base, offset);
1439 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1440 X is always returned in a register. */
1443 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1445 if (GET_CODE (addr) == PLUS)
1447 ofs += INTVAL (XEXP (addr, 1));
1448 addr = XEXP (addr, 0);
1451 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1452 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1455 /* On the Alpha, all (non-symbolic) constants except zero go into
1456 a floating-point register via memory. Note that we cannot
1457 return anything that is not a subset of RCLASS, and that some
1458 symbolic constants cannot be dropped to memory. */
1460 enum reg_class
1461 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1463 /* Zero is present in any register class. */
1464 if (x == CONST0_RTX (GET_MODE (x)))
1465 return rclass;
1467 /* These sorts of constants we can easily drop to memory. */
1468 if (CONST_INT_P (x)
1469 || GET_CODE (x) == CONST_DOUBLE
1470 || GET_CODE (x) == CONST_VECTOR)
1472 if (rclass == FLOAT_REGS)
1473 return NO_REGS;
1474 if (rclass == ALL_REGS)
1475 return GENERAL_REGS;
1476 return rclass;
1479 /* All other kinds of constants should not (and in the case of HIGH
1480 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1481 secondary reload. */
1482 if (CONSTANT_P (x))
1483 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1485 return rclass;
1488 /* Inform reload about cases where moving X with a mode MODE to a register in
1489 RCLASS requires an extra scratch or immediate register. Return the class
1490 needed for the immediate register. */
1492 static reg_class_t
1493 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1494 enum machine_mode mode, secondary_reload_info *sri)
1496 enum reg_class rclass = (enum reg_class) rclass_i;
1498 /* Loading and storing HImode or QImode values to and from memory
1499 usually requires a scratch register. */
1500 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1502 if (any_memory_operand (x, mode))
1504 if (in_p)
1506 if (!aligned_memory_operand (x, mode))
1507 sri->icode = direct_optab_handler (reload_in_optab, mode);
1509 else
1510 sri->icode = direct_optab_handler (reload_out_optab, mode);
1511 return NO_REGS;
1515 /* We also cannot do integral arithmetic into FP regs, as might result
1516 from register elimination into a DImode fp register. */
1517 if (rclass == FLOAT_REGS)
1519 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1520 return GENERAL_REGS;
1521 if (in_p && INTEGRAL_MODE_P (mode)
1522 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1523 return GENERAL_REGS;
1526 return NO_REGS;
1529 /* Subfunction of the following function. Update the flags of any MEM
1530 found in part of X. */
1532 static int
1533 alpha_set_memflags_1 (rtx *xp, void *data)
1535 rtx x = *xp, orig = (rtx) data;
1537 if (!MEM_P (x))
1538 return 0;
1540 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1541 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1542 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1544 /* Sadly, we cannot use alias sets because the extra aliasing
1545 produced by the AND interferes. Given that two-byte quantities
1546 are the only thing we would be able to differentiate anyway,
1547 there does not seem to be any point in convoluting the early
1548 out of the alias check. */
1550 return -1;
1553 /* Given SEQ, which is an INSN list, look for any MEMs in either
1554 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1555 volatile flags from REF into each of the MEMs found. If REF is not
1556 a MEM, don't do anything. */
1558 void
1559 alpha_set_memflags (rtx seq, rtx ref)
1561 rtx insn;
1563 if (!MEM_P (ref))
1564 return;
1566 /* This is only called from alpha.md, after having had something
1567 generated from one of the insn patterns. So if everything is
1568 zero, the pattern is already up-to-date. */
1569 if (!MEM_VOLATILE_P (ref)
1570 && !MEM_NOTRAP_P (ref)
1571 && !MEM_READONLY_P (ref))
1572 return;
1574 for (insn = seq; insn; insn = NEXT_INSN (insn))
1575 if (INSN_P (insn))
1576 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1577 else
1578 gcc_unreachable ();
1581 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1582 int, bool);
1584 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1585 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1586 and return pc_rtx if successful. */
1588 static rtx
1589 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1590 HOST_WIDE_INT c, int n, bool no_output)
1592 HOST_WIDE_INT new_const;
1593 int i, bits;
1594 /* Use a pseudo if highly optimizing and still generating RTL. */
1595 rtx subtarget
1596 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1597 rtx temp, insn;
1599 /* If this is a sign-extended 32-bit constant, we can do this in at most
1600 three insns, so do it if we have enough insns left. We always have
1601 a sign-extended 32-bit constant when compiling on a narrow machine. */
1603 if (HOST_BITS_PER_WIDE_INT != 64
1604 || c >> 31 == -1 || c >> 31 == 0)
1606 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1607 HOST_WIDE_INT tmp1 = c - low;
1608 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1609 HOST_WIDE_INT extra = 0;
1611 /* If HIGH will be interpreted as negative but the constant is
1612 positive, we must adjust it to do two ldha insns. */
1614 if ((high & 0x8000) != 0 && c >= 0)
1616 extra = 0x4000;
1617 tmp1 -= 0x40000000;
1618 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1621 if (c == low || (low == 0 && extra == 0))
1623 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1624 but that meant that we can't handle INT_MIN on 32-bit machines
1625 (like NT/Alpha), because we recurse indefinitely through
1626 emit_move_insn to gen_movdi. So instead, since we know exactly
1627 what we want, create it explicitly. */
1629 if (no_output)
1630 return pc_rtx;
1631 if (target == NULL)
1632 target = gen_reg_rtx (mode);
1633 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1634 return target;
1636 else if (n >= 2 + (extra != 0))
1638 if (no_output)
1639 return pc_rtx;
1640 if (!can_create_pseudo_p ())
1642 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1643 temp = target;
1645 else
1646 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1647 subtarget, mode);
1649 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1650 This means that if we go through expand_binop, we'll try to
1651 generate extensions, etc, which will require new pseudos, which
1652 will fail during some split phases. The SImode add patterns
1653 still exist, but are not named. So build the insns by hand. */
1655 if (extra != 0)
1657 if (! subtarget)
1658 subtarget = gen_reg_rtx (mode);
1659 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1660 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1661 emit_insn (insn);
1662 temp = subtarget;
1665 if (target == NULL)
1666 target = gen_reg_rtx (mode);
1667 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1668 insn = gen_rtx_SET (VOIDmode, target, insn);
1669 emit_insn (insn);
1670 return target;
1674 /* If we couldn't do it that way, try some other methods. But if we have
1675 no instructions left, don't bother. Likewise, if this is SImode and
1676 we can't make pseudos, we can't do anything since the expand_binop
1677 and expand_unop calls will widen and try to make pseudos. */
1679 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1680 return 0;
1682 /* Next, see if we can load a related constant and then shift and possibly
1683 negate it to get the constant we want. Try this once each increasing
1684 numbers of insns. */
1686 for (i = 1; i < n; i++)
1688 /* First, see if minus some low bits, we've an easy load of
1689 high bits. */
1691 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1692 if (new_const != 0)
1694 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1695 if (temp)
1697 if (no_output)
1698 return temp;
1699 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1700 target, 0, OPTAB_WIDEN);
1704 /* Next try complementing. */
1705 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1706 if (temp)
1708 if (no_output)
1709 return temp;
1710 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1713 /* Next try to form a constant and do a left shift. We can do this
1714 if some low-order bits are zero; the exact_log2 call below tells
1715 us that information. The bits we are shifting out could be any
1716 value, but here we'll just try the 0- and sign-extended forms of
1717 the constant. To try to increase the chance of having the same
1718 constant in more than one insn, start at the highest number of
1719 bits to shift, but try all possibilities in case a ZAPNOT will
1720 be useful. */
1722 bits = exact_log2 (c & -c);
1723 if (bits > 0)
1724 for (; bits > 0; bits--)
1726 new_const = c >> bits;
1727 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1728 if (!temp && c < 0)
1730 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1731 temp = alpha_emit_set_const (subtarget, mode, new_const,
1732 i, no_output);
1734 if (temp)
1736 if (no_output)
1737 return temp;
1738 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1739 target, 0, OPTAB_WIDEN);
1743 /* Now try high-order zero bits. Here we try the shifted-in bits as
1744 all zero and all ones. Be careful to avoid shifting outside the
1745 mode and to avoid shifting outside the host wide int size. */
1746 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1747 confuse the recursive call and set all of the high 32 bits. */
1749 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1750 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1751 if (bits > 0)
1752 for (; bits > 0; bits--)
1754 new_const = c << bits;
1755 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1756 if (!temp)
1758 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1759 temp = alpha_emit_set_const (subtarget, mode, new_const,
1760 i, no_output);
1762 if (temp)
1764 if (no_output)
1765 return temp;
1766 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1767 target, 1, OPTAB_WIDEN);
1771 /* Now try high-order 1 bits. We get that with a sign-extension.
1772 But one bit isn't enough here. Be careful to avoid shifting outside
1773 the mode and to avoid shifting outside the host wide int size. */
1775 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1776 - floor_log2 (~ c) - 2);
1777 if (bits > 0)
1778 for (; bits > 0; bits--)
1780 new_const = c << bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1782 if (!temp)
1784 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1785 temp = alpha_emit_set_const (subtarget, mode, new_const,
1786 i, no_output);
1788 if (temp)
1790 if (no_output)
1791 return temp;
1792 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1793 target, 0, OPTAB_WIDEN);
1798 #if HOST_BITS_PER_WIDE_INT == 64
1799 /* Finally, see if can load a value into the target that is the same as the
1800 constant except that all bytes that are 0 are changed to be 0xff. If we
1801 can, then we can do a ZAPNOT to obtain the desired constant. */
1803 new_const = c;
1804 for (i = 0; i < 64; i += 8)
1805 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1806 new_const |= (HOST_WIDE_INT) 0xff << i;
1808 /* We are only called for SImode and DImode. If this is SImode, ensure that
1809 we are sign extended to a full word. */
1811 if (mode == SImode)
1812 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1814 if (new_const != c)
1816 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1817 if (temp)
1819 if (no_output)
1820 return temp;
1821 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1822 target, 0, OPTAB_WIDEN);
1825 #endif
1827 return 0;
1830 /* Try to output insns to set TARGET equal to the constant C if it can be
1831 done in less than N insns. Do all computations in MODE. Returns the place
1832 where the output has been placed if it can be done and the insns have been
1833 emitted. If it would take more than N insns, zero is returned and no
1834 insns and emitted. */
1836 static rtx
1837 alpha_emit_set_const (rtx target, enum machine_mode mode,
1838 HOST_WIDE_INT c, int n, bool no_output)
1840 enum machine_mode orig_mode = mode;
1841 rtx orig_target = target;
1842 rtx result = 0;
1843 int i;
1845 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1846 can't load this constant in one insn, do this in DImode. */
1847 if (!can_create_pseudo_p () && mode == SImode
1848 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1850 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1851 if (result)
1852 return result;
1854 target = no_output ? NULL : gen_lowpart (DImode, target);
1855 mode = DImode;
1857 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1859 target = no_output ? NULL : gen_lowpart (DImode, target);
1860 mode = DImode;
1863 /* Try 1 insn, then 2, then up to N. */
1864 for (i = 1; i <= n; i++)
1866 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1867 if (result)
1869 rtx insn, set;
1871 if (no_output)
1872 return result;
1874 insn = get_last_insn ();
1875 set = single_set (insn);
1876 if (! CONSTANT_P (SET_SRC (set)))
1877 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1878 break;
1882 /* Allow for the case where we changed the mode of TARGET. */
1883 if (result)
1885 if (result == target)
1886 result = orig_target;
1887 else if (mode != orig_mode)
1888 result = gen_lowpart (orig_mode, result);
1891 return result;
1894 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1895 fall back to a straight forward decomposition. We do this to avoid
1896 exponential run times encountered when looking for longer sequences
1897 with alpha_emit_set_const. */
1899 static rtx
1900 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1902 HOST_WIDE_INT d1, d2, d3, d4;
1904 /* Decompose the entire word */
1905 #if HOST_BITS_PER_WIDE_INT >= 64
1906 gcc_assert (c2 == -(c1 < 0));
1907 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1908 c1 -= d1;
1909 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1910 c1 = (c1 - d2) >> 32;
1911 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1912 c1 -= d3;
1913 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1914 gcc_assert (c1 == d4);
1915 #else
1916 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1917 c1 -= d1;
1918 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1919 gcc_assert (c1 == d2);
1920 c2 += (d2 < 0);
1921 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1922 c2 -= d3;
1923 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1924 gcc_assert (c2 == d4);
1925 #endif
1927 /* Construct the high word */
1928 if (d4)
1930 emit_move_insn (target, GEN_INT (d4));
1931 if (d3)
1932 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1934 else
1935 emit_move_insn (target, GEN_INT (d3));
1937 /* Shift it into place */
1938 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1940 /* Add in the low bits. */
1941 if (d2)
1942 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1943 if (d1)
1944 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1946 return target;
1949 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1950 the low 64 bits. */
1952 static void
1953 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1955 HOST_WIDE_INT i0, i1;
1957 if (GET_CODE (x) == CONST_VECTOR)
1958 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
1961 if (CONST_INT_P (x))
1963 i0 = INTVAL (x);
1964 i1 = -(i0 < 0);
1966 else if (HOST_BITS_PER_WIDE_INT >= 64)
1968 i0 = CONST_DOUBLE_LOW (x);
1969 i1 = -(i0 < 0);
1971 else
1973 i0 = CONST_DOUBLE_LOW (x);
1974 i1 = CONST_DOUBLE_HIGH (x);
1977 *p0 = i0;
1978 *p1 = i1;
1981 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
1982 we are willing to load the value into a register via a move pattern.
1983 Normally this is all symbolic constants, integral constants that
1984 take three or fewer instructions, and floating-point zero. */
1986 bool
1987 alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
1989 HOST_WIDE_INT i0, i1;
1991 switch (GET_CODE (x))
1993 case LABEL_REF:
1994 case HIGH:
1995 return true;
1997 case CONST:
1998 if (GET_CODE (XEXP (x, 0)) == PLUS
1999 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2000 x = XEXP (XEXP (x, 0), 0);
2001 else
2002 return true;
2004 if (GET_CODE (x) != SYMBOL_REF)
2005 return true;
2007 /* FALLTHRU */
2009 case SYMBOL_REF:
2010 /* TLS symbols are never valid. */
2011 return SYMBOL_REF_TLS_MODEL (x) == 0;
2013 case CONST_DOUBLE:
2014 if (x == CONST0_RTX (mode))
2015 return true;
2016 if (FLOAT_MODE_P (mode))
2017 return false;
2018 goto do_integer;
2020 case CONST_VECTOR:
2021 if (x == CONST0_RTX (mode))
2022 return true;
2023 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2024 return false;
2025 if (GET_MODE_SIZE (mode) != 8)
2026 return false;
2027 goto do_integer;
2029 case CONST_INT:
2030 do_integer:
2031 if (TARGET_BUILD_CONSTANTS)
2032 return true;
2033 alpha_extract_integer (x, &i0, &i1);
2034 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2035 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2036 return false;
2038 default:
2039 return false;
2043 /* Operand 1 is known to be a constant, and should require more than one
2044 instruction to load. Emit that multi-part load. */
2046 bool
2047 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2049 HOST_WIDE_INT i0, i1;
2050 rtx temp = NULL_RTX;
2052 alpha_extract_integer (operands[1], &i0, &i1);
2054 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2055 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2057 if (!temp && TARGET_BUILD_CONSTANTS)
2058 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2060 if (temp)
2062 if (!rtx_equal_p (operands[0], temp))
2063 emit_move_insn (operands[0], temp);
2064 return true;
2067 return false;
2070 /* Expand a move instruction; return true if all work is done.
2071 We don't handle non-bwx subword loads here. */
2073 bool
2074 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2076 rtx tmp;
2078 /* If the output is not a register, the input must be. */
2079 if (MEM_P (operands[0])
2080 && ! reg_or_0_operand (operands[1], mode))
2081 operands[1] = force_reg (mode, operands[1]);
2083 /* Allow legitimize_address to perform some simplifications. */
2084 if (mode == Pmode && symbolic_operand (operands[1], mode))
2086 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2087 if (tmp)
2089 if (tmp == operands[0])
2090 return true;
2091 operands[1] = tmp;
2092 return false;
2096 /* Early out for non-constants and valid constants. */
2097 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2098 return false;
2100 /* Split large integers. */
2101 if (CONST_INT_P (operands[1])
2102 || GET_CODE (operands[1]) == CONST_DOUBLE
2103 || GET_CODE (operands[1]) == CONST_VECTOR)
2105 if (alpha_split_const_mov (mode, operands))
2106 return true;
2109 /* Otherwise we've nothing left but to drop the thing to memory. */
2110 tmp = force_const_mem (mode, operands[1]);
2112 if (tmp == NULL_RTX)
2113 return false;
2115 if (reload_in_progress)
2117 emit_move_insn (operands[0], XEXP (tmp, 0));
2118 operands[1] = replace_equiv_address (tmp, operands[0]);
2120 else
2121 operands[1] = validize_mem (tmp);
2122 return false;
2125 /* Expand a non-bwx QImode or HImode move instruction;
2126 return true if all work is done. */
2128 bool
2129 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2131 rtx seq;
2133 /* If the output is not a register, the input must be. */
2134 if (MEM_P (operands[0]))
2135 operands[1] = force_reg (mode, operands[1]);
2137 /* Handle four memory cases, unaligned and aligned for either the input
2138 or the output. The only case where we can be called during reload is
2139 for aligned loads; all other cases require temporaries. */
2141 if (any_memory_operand (operands[1], mode))
2143 if (aligned_memory_operand (operands[1], mode))
2145 if (reload_in_progress)
2147 if (mode == QImode)
2148 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2149 else
2150 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2151 emit_insn (seq);
2153 else
2155 rtx aligned_mem, bitnum;
2156 rtx scratch = gen_reg_rtx (SImode);
2157 rtx subtarget;
2158 bool copyout;
2160 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2162 subtarget = operands[0];
2163 if (REG_P (subtarget))
2164 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2165 else
2166 subtarget = gen_reg_rtx (DImode), copyout = true;
2168 if (mode == QImode)
2169 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2170 bitnum, scratch);
2171 else
2172 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2173 bitnum, scratch);
2174 emit_insn (seq);
2176 if (copyout)
2177 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2180 else
2182 /* Don't pass these as parameters since that makes the generated
2183 code depend on parameter evaluation order which will cause
2184 bootstrap failures. */
2186 rtx temp1, temp2, subtarget, ua;
2187 bool copyout;
2189 temp1 = gen_reg_rtx (DImode);
2190 temp2 = gen_reg_rtx (DImode);
2192 subtarget = operands[0];
2193 if (REG_P (subtarget))
2194 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2195 else
2196 subtarget = gen_reg_rtx (DImode), copyout = true;
2198 ua = get_unaligned_address (operands[1]);
2199 if (mode == QImode)
2200 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2201 else
2202 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2204 alpha_set_memflags (seq, operands[1]);
2205 emit_insn (seq);
2207 if (copyout)
2208 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2210 return true;
2213 if (any_memory_operand (operands[0], mode))
2215 if (aligned_memory_operand (operands[0], mode))
2217 rtx aligned_mem, bitnum;
2218 rtx temp1 = gen_reg_rtx (SImode);
2219 rtx temp2 = gen_reg_rtx (SImode);
2221 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2223 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2224 temp1, temp2));
2226 else
2228 rtx temp1 = gen_reg_rtx (DImode);
2229 rtx temp2 = gen_reg_rtx (DImode);
2230 rtx temp3 = gen_reg_rtx (DImode);
2231 rtx ua = get_unaligned_address (operands[0]);
2233 if (mode == QImode)
2234 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2235 else
2236 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2238 alpha_set_memflags (seq, operands[0]);
2239 emit_insn (seq);
2241 return true;
2244 return false;
2247 /* Implement the movmisalign patterns. One of the operands is a memory
2248 that is not naturally aligned. Emit instructions to load it. */
2250 void
2251 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2253 /* Honor misaligned loads, for those we promised to do so. */
2254 if (MEM_P (operands[1]))
2256 rtx tmp;
2258 if (register_operand (operands[0], mode))
2259 tmp = operands[0];
2260 else
2261 tmp = gen_reg_rtx (mode);
2263 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2264 if (tmp != operands[0])
2265 emit_move_insn (operands[0], tmp);
2267 else if (MEM_P (operands[0]))
2269 if (!reg_or_0_operand (operands[1], mode))
2270 operands[1] = force_reg (mode, operands[1]);
2271 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2273 else
2274 gcc_unreachable ();
2277 /* Generate an unsigned DImode to FP conversion. This is the same code
2278 optabs would emit if we didn't have TFmode patterns.
2280 For SFmode, this is the only construction I've found that can pass
2281 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2282 intermediates will work, because you'll get intermediate rounding
2283 that ruins the end result. Some of this could be fixed by turning
2284 on round-to-positive-infinity, but that requires diddling the fpsr,
2285 which kills performance. I tried turning this around and converting
2286 to a negative number, so that I could turn on /m, but either I did
2287 it wrong or there's something else cause I wound up with the exact
2288 same single-bit error. There is a branch-less form of this same code:
2290 srl $16,1,$1
2291 and $16,1,$2
2292 cmplt $16,0,$3
2293 or $1,$2,$2
2294 cmovge $16,$16,$2
2295 itoft $3,$f10
2296 itoft $2,$f11
2297 cvtqs $f11,$f11
2298 adds $f11,$f11,$f0
2299 fcmoveq $f10,$f11,$f0
2301 I'm not using it because it's the same number of instructions as
2302 this branch-full form, and it has more serialized long latency
2303 instructions on the critical path.
2305 For DFmode, we can avoid rounding errors by breaking up the word
2306 into two pieces, converting them separately, and adding them back:
2308 LC0: .long 0,0x5f800000
2310 itoft $16,$f11
2311 lda $2,LC0
2312 cmplt $16,0,$1
2313 cpyse $f11,$f31,$f10
2314 cpyse $f31,$f11,$f11
2315 s4addq $1,$2,$1
2316 lds $f12,0($1)
2317 cvtqt $f10,$f10
2318 cvtqt $f11,$f11
2319 addt $f12,$f10,$f0
2320 addt $f0,$f11,$f0
2322 This doesn't seem to be a clear-cut win over the optabs form.
2323 It probably all depends on the distribution of numbers being
2324 converted -- in the optabs form, all but high-bit-set has a
2325 much lower minimum execution time. */
2327 void
2328 alpha_emit_floatuns (rtx operands[2])
2330 rtx neglab, donelab, i0, i1, f0, in, out;
2331 enum machine_mode mode;
2333 out = operands[0];
2334 in = force_reg (DImode, operands[1]);
2335 mode = GET_MODE (out);
2336 neglab = gen_label_rtx ();
2337 donelab = gen_label_rtx ();
2338 i0 = gen_reg_rtx (DImode);
2339 i1 = gen_reg_rtx (DImode);
2340 f0 = gen_reg_rtx (mode);
2342 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2344 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2345 emit_jump_insn (gen_jump (donelab));
2346 emit_barrier ();
2348 emit_label (neglab);
2350 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2351 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2352 emit_insn (gen_iordi3 (i0, i0, i1));
2353 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2354 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2356 emit_label (donelab);
2359 /* Generate the comparison for a conditional branch. */
2361 void
2362 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2364 enum rtx_code cmp_code, branch_code;
2365 enum machine_mode branch_mode = VOIDmode;
2366 enum rtx_code code = GET_CODE (operands[0]);
2367 rtx op0 = operands[1], op1 = operands[2];
2368 rtx tem;
2370 if (cmp_mode == TFmode)
2372 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2373 op1 = const0_rtx;
2374 cmp_mode = DImode;
2377 /* The general case: fold the comparison code to the types of compares
2378 that we have, choosing the branch as necessary. */
2379 switch (code)
2381 case EQ: case LE: case LT: case LEU: case LTU:
2382 case UNORDERED:
2383 /* We have these compares. */
2384 cmp_code = code, branch_code = NE;
2385 break;
2387 case NE:
2388 case ORDERED:
2389 /* These must be reversed. */
2390 cmp_code = reverse_condition (code), branch_code = EQ;
2391 break;
2393 case GE: case GT: case GEU: case GTU:
2394 /* For FP, we swap them, for INT, we reverse them. */
2395 if (cmp_mode == DFmode)
2397 cmp_code = swap_condition (code);
2398 branch_code = NE;
2399 tem = op0, op0 = op1, op1 = tem;
2401 else
2403 cmp_code = reverse_condition (code);
2404 branch_code = EQ;
2406 break;
2408 default:
2409 gcc_unreachable ();
2412 if (cmp_mode == DFmode)
2414 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2416 /* When we are not as concerned about non-finite values, and we
2417 are comparing against zero, we can branch directly. */
2418 if (op1 == CONST0_RTX (DFmode))
2419 cmp_code = UNKNOWN, branch_code = code;
2420 else if (op0 == CONST0_RTX (DFmode))
2422 /* Undo the swap we probably did just above. */
2423 tem = op0, op0 = op1, op1 = tem;
2424 branch_code = swap_condition (cmp_code);
2425 cmp_code = UNKNOWN;
2428 else
2430 /* ??? We mark the branch mode to be CCmode to prevent the
2431 compare and branch from being combined, since the compare
2432 insn follows IEEE rules that the branch does not. */
2433 branch_mode = CCmode;
2436 else
2438 /* The following optimizations are only for signed compares. */
2439 if (code != LEU && code != LTU && code != GEU && code != GTU)
2441 /* Whee. Compare and branch against 0 directly. */
2442 if (op1 == const0_rtx)
2443 cmp_code = UNKNOWN, branch_code = code;
2445 /* If the constants doesn't fit into an immediate, but can
2446 be generated by lda/ldah, we adjust the argument and
2447 compare against zero, so we can use beq/bne directly. */
2448 /* ??? Don't do this when comparing against symbols, otherwise
2449 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2450 be declared false out of hand (at least for non-weak). */
2451 else if (CONST_INT_P (op1)
2452 && (code == EQ || code == NE)
2453 && !(symbolic_operand (op0, VOIDmode)
2454 || (REG_P (op0) && REG_POINTER (op0))))
2456 rtx n_op1 = GEN_INT (-INTVAL (op1));
2458 if (! satisfies_constraint_I (op1)
2459 && (satisfies_constraint_K (n_op1)
2460 || satisfies_constraint_L (n_op1)))
2461 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2465 if (!reg_or_0_operand (op0, DImode))
2466 op0 = force_reg (DImode, op0);
2467 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2468 op1 = force_reg (DImode, op1);
2471 /* Emit an initial compare instruction, if necessary. */
2472 tem = op0;
2473 if (cmp_code != UNKNOWN)
2475 tem = gen_reg_rtx (cmp_mode);
2476 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2479 /* Emit the branch instruction. */
2480 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2481 gen_rtx_IF_THEN_ELSE (VOIDmode,
2482 gen_rtx_fmt_ee (branch_code,
2483 branch_mode, tem,
2484 CONST0_RTX (cmp_mode)),
2485 gen_rtx_LABEL_REF (VOIDmode,
2486 operands[3]),
2487 pc_rtx));
2488 emit_jump_insn (tem);
2491 /* Certain simplifications can be done to make invalid setcc operations
2492 valid. Return the final comparison, or NULL if we can't work. */
2494 bool
2495 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2497 enum rtx_code cmp_code;
2498 enum rtx_code code = GET_CODE (operands[1]);
2499 rtx op0 = operands[2], op1 = operands[3];
2500 rtx tmp;
2502 if (cmp_mode == TFmode)
2504 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2505 op1 = const0_rtx;
2506 cmp_mode = DImode;
2509 if (cmp_mode == DFmode && !TARGET_FIX)
2510 return 0;
2512 /* The general case: fold the comparison code to the types of compares
2513 that we have, choosing the branch as necessary. */
2515 cmp_code = UNKNOWN;
2516 switch (code)
2518 case EQ: case LE: case LT: case LEU: case LTU:
2519 case UNORDERED:
2520 /* We have these compares. */
2521 if (cmp_mode == DFmode)
2522 cmp_code = code, code = NE;
2523 break;
2525 case NE:
2526 if (cmp_mode == DImode && op1 == const0_rtx)
2527 break;
2528 /* FALLTHRU */
2530 case ORDERED:
2531 cmp_code = reverse_condition (code);
2532 code = EQ;
2533 break;
2535 case GE: case GT: case GEU: case GTU:
2536 /* These normally need swapping, but for integer zero we have
2537 special patterns that recognize swapped operands. */
2538 if (cmp_mode == DImode && op1 == const0_rtx)
2539 break;
2540 code = swap_condition (code);
2541 if (cmp_mode == DFmode)
2542 cmp_code = code, code = NE;
2543 tmp = op0, op0 = op1, op1 = tmp;
2544 break;
2546 default:
2547 gcc_unreachable ();
2550 if (cmp_mode == DImode)
2552 if (!register_operand (op0, DImode))
2553 op0 = force_reg (DImode, op0);
2554 if (!reg_or_8bit_operand (op1, DImode))
2555 op1 = force_reg (DImode, op1);
2558 /* Emit an initial compare instruction, if necessary. */
2559 if (cmp_code != UNKNOWN)
2561 tmp = gen_reg_rtx (cmp_mode);
2562 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2563 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2565 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2566 op1 = const0_rtx;
2569 /* Emit the setcc instruction. */
2570 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2571 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2572 return true;
2576 /* Rewrite a comparison against zero CMP of the form
2577 (CODE (cc0) (const_int 0)) so it can be written validly in
2578 a conditional move (if_then_else CMP ...).
2579 If both of the operands that set cc0 are nonzero we must emit
2580 an insn to perform the compare (it can't be done within
2581 the conditional move). */
2584 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2586 enum rtx_code code = GET_CODE (cmp);
2587 enum rtx_code cmov_code = NE;
2588 rtx op0 = XEXP (cmp, 0);
2589 rtx op1 = XEXP (cmp, 1);
2590 enum machine_mode cmp_mode
2591 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2592 enum machine_mode cmov_mode = VOIDmode;
2593 int local_fast_math = flag_unsafe_math_optimizations;
2594 rtx tem;
2596 if (cmp_mode == TFmode)
2598 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2599 op1 = const0_rtx;
2600 cmp_mode = DImode;
2603 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2605 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2607 enum rtx_code cmp_code;
2609 if (! TARGET_FIX)
2610 return 0;
2612 /* If we have fp<->int register move instructions, do a cmov by
2613 performing the comparison in fp registers, and move the
2614 zero/nonzero value to integer registers, where we can then
2615 use a normal cmov, or vice-versa. */
2617 switch (code)
2619 case EQ: case LE: case LT: case LEU: case LTU:
2620 case UNORDERED:
2621 /* We have these compares. */
2622 cmp_code = code, code = NE;
2623 break;
2625 case NE:
2626 case ORDERED:
2627 /* These must be reversed. */
2628 cmp_code = reverse_condition (code), code = EQ;
2629 break;
2631 case GE: case GT: case GEU: case GTU:
2632 /* These normally need swapping, but for integer zero we have
2633 special patterns that recognize swapped operands. */
2634 if (cmp_mode == DImode && op1 == const0_rtx)
2635 cmp_code = code, code = NE;
2636 else
2638 cmp_code = swap_condition (code);
2639 code = NE;
2640 tem = op0, op0 = op1, op1 = tem;
2642 break;
2644 default:
2645 gcc_unreachable ();
2648 if (cmp_mode == DImode)
2650 if (!reg_or_0_operand (op0, DImode))
2651 op0 = force_reg (DImode, op0);
2652 if (!reg_or_8bit_operand (op1, DImode))
2653 op1 = force_reg (DImode, op1);
2656 tem = gen_reg_rtx (cmp_mode);
2657 emit_insn (gen_rtx_SET (VOIDmode, tem,
2658 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2659 op0, op1)));
2661 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2662 op0 = gen_lowpart (cmp_mode, tem);
2663 op1 = CONST0_RTX (cmp_mode);
2664 local_fast_math = 1;
2667 if (cmp_mode == DImode)
2669 if (!reg_or_0_operand (op0, DImode))
2670 op0 = force_reg (DImode, op0);
2671 if (!reg_or_8bit_operand (op1, DImode))
2672 op1 = force_reg (DImode, op1);
2675 /* We may be able to use a conditional move directly.
2676 This avoids emitting spurious compares. */
2677 if (signed_comparison_operator (cmp, VOIDmode)
2678 && (cmp_mode == DImode || local_fast_math)
2679 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2680 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2682 /* We can't put the comparison inside the conditional move;
2683 emit a compare instruction and put that inside the
2684 conditional move. Make sure we emit only comparisons we have;
2685 swap or reverse as necessary. */
2687 if (!can_create_pseudo_p ())
2688 return NULL_RTX;
2690 switch (code)
2692 case EQ: case LE: case LT: case LEU: case LTU:
2693 case UNORDERED:
2694 /* We have these compares: */
2695 break;
2697 case NE:
2698 case ORDERED:
2699 /* These must be reversed. */
2700 code = reverse_condition (code);
2701 cmov_code = EQ;
2702 break;
2704 case GE: case GT: case GEU: case GTU:
2705 /* These must be swapped. */
2706 if (op1 != CONST0_RTX (cmp_mode))
2708 code = swap_condition (code);
2709 tem = op0, op0 = op1, op1 = tem;
2711 break;
2713 default:
2714 gcc_unreachable ();
2717 if (cmp_mode == DImode)
2719 if (!reg_or_0_operand (op0, DImode))
2720 op0 = force_reg (DImode, op0);
2721 if (!reg_or_8bit_operand (op1, DImode))
2722 op1 = force_reg (DImode, op1);
2725 /* ??? We mark the branch mode to be CCmode to prevent the compare
2726 and cmov from being combined, since the compare insn follows IEEE
2727 rules that the cmov does not. */
2728 if (cmp_mode == DFmode && !local_fast_math)
2729 cmov_mode = CCmode;
2731 tem = gen_reg_rtx (cmp_mode);
2732 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2733 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2736 /* Simplify a conditional move of two constants into a setcc with
2737 arithmetic. This is done with a splitter since combine would
2738 just undo the work if done during code generation. It also catches
2739 cases we wouldn't have before cse. */
2742 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2743 rtx t_rtx, rtx f_rtx)
2745 HOST_WIDE_INT t, f, diff;
2746 enum machine_mode mode;
2747 rtx target, subtarget, tmp;
2749 mode = GET_MODE (dest);
2750 t = INTVAL (t_rtx);
2751 f = INTVAL (f_rtx);
2752 diff = t - f;
2754 if (((code == NE || code == EQ) && diff < 0)
2755 || (code == GE || code == GT))
2757 code = reverse_condition (code);
2758 diff = t, t = f, f = diff;
2759 diff = t - f;
2762 subtarget = target = dest;
2763 if (mode != DImode)
2765 target = gen_lowpart (DImode, dest);
2766 if (can_create_pseudo_p ())
2767 subtarget = gen_reg_rtx (DImode);
2768 else
2769 subtarget = target;
2771 /* Below, we must be careful to use copy_rtx on target and subtarget
2772 in intermediate insns, as they may be a subreg rtx, which may not
2773 be shared. */
2775 if (f == 0 && exact_log2 (diff) > 0
2776 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2777 viable over a longer latency cmove. On EV5, the E0 slot is a
2778 scarce resource, and on EV4 shift has the same latency as a cmove. */
2779 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2781 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2782 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2784 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2785 GEN_INT (exact_log2 (t)));
2786 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2788 else if (f == 0 && t == -1)
2790 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2791 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2793 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2795 else if (diff == 1 || diff == 4 || diff == 8)
2797 rtx add_op;
2799 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2800 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2802 if (diff == 1)
2803 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2804 else
2806 add_op = GEN_INT (f);
2807 if (sext_add_operand (add_op, mode))
2809 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2810 GEN_INT (diff));
2811 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2812 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2814 else
2815 return 0;
2818 else
2819 return 0;
2821 return 1;
2824 /* Look up the function X_floating library function name for the
2825 given operation. */
2827 struct GTY(()) xfloating_op
2829 const enum rtx_code code;
2830 const char *const GTY((skip)) osf_func;
2831 const char *const GTY((skip)) vms_func;
2832 rtx libcall;
2835 static GTY(()) struct xfloating_op xfloating_ops[] =
2837 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2838 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2839 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2840 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2841 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2842 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2843 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2844 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2845 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2846 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2847 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2848 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2849 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2850 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2851 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2854 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2856 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2857 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2860 static rtx
2861 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2863 struct xfloating_op *ops = xfloating_ops;
2864 long n = ARRAY_SIZE (xfloating_ops);
2865 long i;
2867 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2869 /* How irritating. Nothing to key off for the main table. */
2870 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2872 ops = vax_cvt_ops;
2873 n = ARRAY_SIZE (vax_cvt_ops);
2876 for (i = 0; i < n; ++i, ++ops)
2877 if (ops->code == code)
2879 rtx func = ops->libcall;
2880 if (!func)
2882 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2883 ? ops->vms_func : ops->osf_func);
2884 ops->libcall = func;
2886 return func;
2889 gcc_unreachable ();
2892 /* Most X_floating operations take the rounding mode as an argument.
2893 Compute that here. */
2895 static int
2896 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2897 enum alpha_fp_rounding_mode round)
2899 int mode;
2901 switch (round)
2903 case ALPHA_FPRM_NORM:
2904 mode = 2;
2905 break;
2906 case ALPHA_FPRM_MINF:
2907 mode = 1;
2908 break;
2909 case ALPHA_FPRM_CHOP:
2910 mode = 0;
2911 break;
2912 case ALPHA_FPRM_DYN:
2913 mode = 4;
2914 break;
2915 default:
2916 gcc_unreachable ();
2918 /* XXX For reference, round to +inf is mode = 3. */
2921 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2922 mode |= 0x10000;
2924 return mode;
2927 /* Emit an X_floating library function call.
2929 Note that these functions do not follow normal calling conventions:
2930 TFmode arguments are passed in two integer registers (as opposed to
2931 indirect); TFmode return values appear in R16+R17.
2933 FUNC is the function to call.
2934 TARGET is where the output belongs.
2935 OPERANDS are the inputs.
2936 NOPERANDS is the count of inputs.
2937 EQUIV is the expression equivalent for the function.
2940 static void
2941 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2942 int noperands, rtx equiv)
2944 rtx usage = NULL_RTX, tmp, reg;
2945 int regno = 16, i;
2947 start_sequence ();
2949 for (i = 0; i < noperands; ++i)
2951 switch (GET_MODE (operands[i]))
2953 case TFmode:
2954 reg = gen_rtx_REG (TFmode, regno);
2955 regno += 2;
2956 break;
2958 case DFmode:
2959 reg = gen_rtx_REG (DFmode, regno + 32);
2960 regno += 1;
2961 break;
2963 case VOIDmode:
2964 gcc_assert (CONST_INT_P (operands[i]));
2965 /* FALLTHRU */
2966 case DImode:
2967 reg = gen_rtx_REG (DImode, regno);
2968 regno += 1;
2969 break;
2971 default:
2972 gcc_unreachable ();
2975 emit_move_insn (reg, operands[i]);
2976 use_reg (&usage, reg);
2979 switch (GET_MODE (target))
2981 case TFmode:
2982 reg = gen_rtx_REG (TFmode, 16);
2983 break;
2984 case DFmode:
2985 reg = gen_rtx_REG (DFmode, 32);
2986 break;
2987 case DImode:
2988 reg = gen_rtx_REG (DImode, 0);
2989 break;
2990 default:
2991 gcc_unreachable ();
2994 tmp = gen_rtx_MEM (QImode, func);
2995 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2996 const0_rtx, const0_rtx));
2997 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2998 RTL_CONST_CALL_P (tmp) = 1;
3000 tmp = get_insns ();
3001 end_sequence ();
3003 emit_libcall_block (tmp, target, reg, equiv);
3006 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3008 void
3009 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3011 rtx func;
3012 int mode;
3013 rtx out_operands[3];
3015 func = alpha_lookup_xfloating_lib_func (code);
3016 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3018 out_operands[0] = operands[1];
3019 out_operands[1] = operands[2];
3020 out_operands[2] = GEN_INT (mode);
3021 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3022 gen_rtx_fmt_ee (code, TFmode, operands[1],
3023 operands[2]));
3026 /* Emit an X_floating library function call for a comparison. */
3028 static rtx
3029 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3031 enum rtx_code cmp_code, res_code;
3032 rtx func, out, operands[2], note;
3034 /* X_floating library comparison functions return
3035 -1 unordered
3036 0 false
3037 1 true
3038 Convert the compare against the raw return value. */
3040 cmp_code = *pcode;
3041 switch (cmp_code)
3043 case UNORDERED:
3044 cmp_code = EQ;
3045 res_code = LT;
3046 break;
3047 case ORDERED:
3048 cmp_code = EQ;
3049 res_code = GE;
3050 break;
3051 case NE:
3052 res_code = NE;
3053 break;
3054 case EQ:
3055 case LT:
3056 case GT:
3057 case LE:
3058 case GE:
3059 res_code = GT;
3060 break;
3061 default:
3062 gcc_unreachable ();
3064 *pcode = res_code;
3066 func = alpha_lookup_xfloating_lib_func (cmp_code);
3068 operands[0] = op0;
3069 operands[1] = op1;
3070 out = gen_reg_rtx (DImode);
3072 /* What's actually returned is -1,0,1, not a proper boolean value,
3073 so use an EXPR_LIST as with a generic libcall instead of a
3074 comparison type expression. */
3075 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3076 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3077 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3078 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3080 return out;
3083 /* Emit an X_floating library function call for a conversion. */
3085 void
3086 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3088 int noperands = 1, mode;
3089 rtx out_operands[2];
3090 rtx func;
3091 enum rtx_code code = orig_code;
3093 if (code == UNSIGNED_FIX)
3094 code = FIX;
3096 func = alpha_lookup_xfloating_lib_func (code);
3098 out_operands[0] = operands[1];
3100 switch (code)
3102 case FIX:
3103 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3104 out_operands[1] = GEN_INT (mode);
3105 noperands = 2;
3106 break;
3107 case FLOAT_TRUNCATE:
3108 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3109 out_operands[1] = GEN_INT (mode);
3110 noperands = 2;
3111 break;
3112 default:
3113 break;
3116 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3117 gen_rtx_fmt_e (orig_code,
3118 GET_MODE (operands[0]),
3119 operands[1]));
3122 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3123 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3124 guarantee that the sequence
3125 set (OP[0] OP[2])
3126 set (OP[1] OP[3])
3127 is valid. Naturally, output operand ordering is little-endian.
3128 This is used by *movtf_internal and *movti_internal. */
3130 void
3131 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3132 bool fixup_overlap)
3134 switch (GET_CODE (operands[1]))
3136 case REG:
3137 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3138 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3139 break;
3141 case MEM:
3142 operands[3] = adjust_address (operands[1], DImode, 8);
3143 operands[2] = adjust_address (operands[1], DImode, 0);
3144 break;
3146 case CONST_INT:
3147 case CONST_DOUBLE:
3148 gcc_assert (operands[1] == CONST0_RTX (mode));
3149 operands[2] = operands[3] = const0_rtx;
3150 break;
3152 default:
3153 gcc_unreachable ();
3156 switch (GET_CODE (operands[0]))
3158 case REG:
3159 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3160 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3161 break;
3163 case MEM:
3164 operands[1] = adjust_address (operands[0], DImode, 8);
3165 operands[0] = adjust_address (operands[0], DImode, 0);
3166 break;
3168 default:
3169 gcc_unreachable ();
3172 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3174 rtx tmp;
3175 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3176 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3180 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3181 op2 is a register containing the sign bit, operation is the
3182 logical operation to be performed. */
3184 void
3185 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3187 rtx high_bit = operands[2];
3188 rtx scratch;
3189 int move;
3191 alpha_split_tmode_pair (operands, TFmode, false);
3193 /* Detect three flavors of operand overlap. */
3194 move = 1;
3195 if (rtx_equal_p (operands[0], operands[2]))
3196 move = 0;
3197 else if (rtx_equal_p (operands[1], operands[2]))
3199 if (rtx_equal_p (operands[0], high_bit))
3200 move = 2;
3201 else
3202 move = -1;
3205 if (move < 0)
3206 emit_move_insn (operands[0], operands[2]);
3208 /* ??? If the destination overlaps both source tf and high_bit, then
3209 assume source tf is dead in its entirety and use the other half
3210 for a scratch register. Otherwise "scratch" is just the proper
3211 destination register. */
3212 scratch = operands[move < 2 ? 1 : 3];
3214 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3216 if (move > 0)
3218 emit_move_insn (operands[0], operands[2]);
3219 if (move > 1)
3220 emit_move_insn (operands[1], scratch);
3224 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3225 unaligned data:
3227 unsigned: signed:
3228 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3229 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3230 lda r3,X(r11) lda r3,X+2(r11)
3231 extwl r1,r3,r1 extql r1,r3,r1
3232 extwh r2,r3,r2 extqh r2,r3,r2
3233 or r1.r2.r1 or r1,r2,r1
3234 sra r1,48,r1
3236 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3237 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3238 lda r3,X(r11) lda r3,X(r11)
3239 extll r1,r3,r1 extll r1,r3,r1
3240 extlh r2,r3,r2 extlh r2,r3,r2
3241 or r1.r2.r1 addl r1,r2,r1
3243 quad: ldq_u r1,X(r11)
3244 ldq_u r2,X+7(r11)
3245 lda r3,X(r11)
3246 extql r1,r3,r1
3247 extqh r2,r3,r2
3248 or r1.r2.r1
3251 void
3252 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3253 HOST_WIDE_INT ofs, int sign)
3255 rtx meml, memh, addr, extl, exth, tmp, mema;
3256 enum machine_mode mode;
3258 if (TARGET_BWX && size == 2)
3260 meml = adjust_address (mem, QImode, ofs);
3261 memh = adjust_address (mem, QImode, ofs+1);
3262 extl = gen_reg_rtx (DImode);
3263 exth = gen_reg_rtx (DImode);
3264 emit_insn (gen_zero_extendqidi2 (extl, meml));
3265 emit_insn (gen_zero_extendqidi2 (exth, memh));
3266 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3267 NULL, 1, OPTAB_LIB_WIDEN);
3268 addr = expand_simple_binop (DImode, IOR, extl, exth,
3269 NULL, 1, OPTAB_LIB_WIDEN);
3271 if (sign && GET_MODE (tgt) != HImode)
3273 addr = gen_lowpart (HImode, addr);
3274 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3276 else
3278 if (GET_MODE (tgt) != DImode)
3279 addr = gen_lowpart (GET_MODE (tgt), addr);
3280 emit_move_insn (tgt, addr);
3282 return;
3285 meml = gen_reg_rtx (DImode);
3286 memh = gen_reg_rtx (DImode);
3287 addr = gen_reg_rtx (DImode);
3288 extl = gen_reg_rtx (DImode);
3289 exth = gen_reg_rtx (DImode);
3291 mema = XEXP (mem, 0);
3292 if (GET_CODE (mema) == LO_SUM)
3293 mema = force_reg (Pmode, mema);
3295 /* AND addresses cannot be in any alias set, since they may implicitly
3296 alias surrounding code. Ideally we'd have some alias set that
3297 covered all types except those with alignment 8 or higher. */
3299 tmp = change_address (mem, DImode,
3300 gen_rtx_AND (DImode,
3301 plus_constant (DImode, mema, ofs),
3302 GEN_INT (-8)));
3303 set_mem_alias_set (tmp, 0);
3304 emit_move_insn (meml, tmp);
3306 tmp = change_address (mem, DImode,
3307 gen_rtx_AND (DImode,
3308 plus_constant (DImode, mema,
3309 ofs + size - 1),
3310 GEN_INT (-8)));
3311 set_mem_alias_set (tmp, 0);
3312 emit_move_insn (memh, tmp);
3314 if (sign && size == 2)
3316 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3318 emit_insn (gen_extql (extl, meml, addr));
3319 emit_insn (gen_extqh (exth, memh, addr));
3321 /* We must use tgt here for the target. Alpha-vms port fails if we use
3322 addr for the target, because addr is marked as a pointer and combine
3323 knows that pointers are always sign-extended 32-bit values. */
3324 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3325 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3326 addr, 1, OPTAB_WIDEN);
3328 else
3330 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3331 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3332 switch ((int) size)
3334 case 2:
3335 emit_insn (gen_extwh (exth, memh, addr));
3336 mode = HImode;
3337 break;
3338 case 4:
3339 emit_insn (gen_extlh (exth, memh, addr));
3340 mode = SImode;
3341 break;
3342 case 8:
3343 emit_insn (gen_extqh (exth, memh, addr));
3344 mode = DImode;
3345 break;
3346 default:
3347 gcc_unreachable ();
3350 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3351 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3352 sign, OPTAB_WIDEN);
3355 if (addr != tgt)
3356 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3359 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3361 void
3362 alpha_expand_unaligned_store (rtx dst, rtx src,
3363 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3365 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3367 if (TARGET_BWX && size == 2)
3369 if (src != const0_rtx)
3371 dstl = gen_lowpart (QImode, src);
3372 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3373 NULL, 1, OPTAB_LIB_WIDEN);
3374 dsth = gen_lowpart (QImode, dsth);
3376 else
3377 dstl = dsth = const0_rtx;
3379 meml = adjust_address (dst, QImode, ofs);
3380 memh = adjust_address (dst, QImode, ofs+1);
3382 emit_move_insn (meml, dstl);
3383 emit_move_insn (memh, dsth);
3384 return;
3387 dstl = gen_reg_rtx (DImode);
3388 dsth = gen_reg_rtx (DImode);
3389 insl = gen_reg_rtx (DImode);
3390 insh = gen_reg_rtx (DImode);
3392 dsta = XEXP (dst, 0);
3393 if (GET_CODE (dsta) == LO_SUM)
3394 dsta = force_reg (Pmode, dsta);
3396 /* AND addresses cannot be in any alias set, since they may implicitly
3397 alias surrounding code. Ideally we'd have some alias set that
3398 covered all types except those with alignment 8 or higher. */
3400 meml = change_address (dst, DImode,
3401 gen_rtx_AND (DImode,
3402 plus_constant (DImode, dsta, ofs),
3403 GEN_INT (-8)));
3404 set_mem_alias_set (meml, 0);
3406 memh = change_address (dst, DImode,
3407 gen_rtx_AND (DImode,
3408 plus_constant (DImode, dsta,
3409 ofs + size - 1),
3410 GEN_INT (-8)));
3411 set_mem_alias_set (memh, 0);
3413 emit_move_insn (dsth, memh);
3414 emit_move_insn (dstl, meml);
3416 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3418 if (src != CONST0_RTX (GET_MODE (src)))
3420 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3421 GEN_INT (size*8), addr));
3423 switch ((int) size)
3425 case 2:
3426 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3427 break;
3428 case 4:
3429 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3430 break;
3431 case 8:
3432 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3433 break;
3434 default:
3435 gcc_unreachable ();
3439 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3441 switch ((int) size)
3443 case 2:
3444 emit_insn (gen_mskwl (dstl, dstl, addr));
3445 break;
3446 case 4:
3447 emit_insn (gen_mskll (dstl, dstl, addr));
3448 break;
3449 case 8:
3450 emit_insn (gen_mskql (dstl, dstl, addr));
3451 break;
3452 default:
3453 gcc_unreachable ();
3456 if (src != CONST0_RTX (GET_MODE (src)))
3458 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3459 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3462 /* Must store high before low for degenerate case of aligned. */
3463 emit_move_insn (memh, dsth);
3464 emit_move_insn (meml, dstl);
3467 /* The block move code tries to maximize speed by separating loads and
3468 stores at the expense of register pressure: we load all of the data
3469 before we store it back out. There are two secondary effects worth
3470 mentioning, that this speeds copying to/from aligned and unaligned
3471 buffers, and that it makes the code significantly easier to write. */
3473 #define MAX_MOVE_WORDS 8
3475 /* Load an integral number of consecutive unaligned quadwords. */
3477 static void
3478 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3479 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3481 rtx const im8 = GEN_INT (-8);
3482 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3483 rtx sreg, areg, tmp, smema;
3484 HOST_WIDE_INT i;
3486 smema = XEXP (smem, 0);
3487 if (GET_CODE (smema) == LO_SUM)
3488 smema = force_reg (Pmode, smema);
3490 /* Generate all the tmp registers we need. */
3491 for (i = 0; i < words; ++i)
3493 data_regs[i] = out_regs[i];
3494 ext_tmps[i] = gen_reg_rtx (DImode);
3496 data_regs[words] = gen_reg_rtx (DImode);
3498 if (ofs != 0)
3499 smem = adjust_address (smem, GET_MODE (smem), ofs);
3501 /* Load up all of the source data. */
3502 for (i = 0; i < words; ++i)
3504 tmp = change_address (smem, DImode,
3505 gen_rtx_AND (DImode,
3506 plus_constant (DImode, smema, 8*i),
3507 im8));
3508 set_mem_alias_set (tmp, 0);
3509 emit_move_insn (data_regs[i], tmp);
3512 tmp = change_address (smem, DImode,
3513 gen_rtx_AND (DImode,
3514 plus_constant (DImode, smema,
3515 8*words - 1),
3516 im8));
3517 set_mem_alias_set (tmp, 0);
3518 emit_move_insn (data_regs[words], tmp);
3520 /* Extract the half-word fragments. Unfortunately DEC decided to make
3521 extxh with offset zero a noop instead of zeroing the register, so
3522 we must take care of that edge condition ourselves with cmov. */
3524 sreg = copy_addr_to_reg (smema);
3525 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3526 1, OPTAB_WIDEN);
3527 for (i = 0; i < words; ++i)
3529 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3530 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3531 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3532 gen_rtx_IF_THEN_ELSE (DImode,
3533 gen_rtx_EQ (DImode, areg,
3534 const0_rtx),
3535 const0_rtx, ext_tmps[i])));
3538 /* Merge the half-words into whole words. */
3539 for (i = 0; i < words; ++i)
3541 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3542 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3546 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3547 may be NULL to store zeros. */
3549 static void
3550 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3551 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3553 rtx const im8 = GEN_INT (-8);
3554 rtx ins_tmps[MAX_MOVE_WORDS];
3555 rtx st_tmp_1, st_tmp_2, dreg;
3556 rtx st_addr_1, st_addr_2, dmema;
3557 HOST_WIDE_INT i;
3559 dmema = XEXP (dmem, 0);
3560 if (GET_CODE (dmema) == LO_SUM)
3561 dmema = force_reg (Pmode, dmema);
3563 /* Generate all the tmp registers we need. */
3564 if (data_regs != NULL)
3565 for (i = 0; i < words; ++i)
3566 ins_tmps[i] = gen_reg_rtx(DImode);
3567 st_tmp_1 = gen_reg_rtx(DImode);
3568 st_tmp_2 = gen_reg_rtx(DImode);
3570 if (ofs != 0)
3571 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3573 st_addr_2 = change_address (dmem, DImode,
3574 gen_rtx_AND (DImode,
3575 plus_constant (DImode, dmema,
3576 words*8 - 1),
3577 im8));
3578 set_mem_alias_set (st_addr_2, 0);
3580 st_addr_1 = change_address (dmem, DImode,
3581 gen_rtx_AND (DImode, dmema, im8));
3582 set_mem_alias_set (st_addr_1, 0);
3584 /* Load up the destination end bits. */
3585 emit_move_insn (st_tmp_2, st_addr_2);
3586 emit_move_insn (st_tmp_1, st_addr_1);
3588 /* Shift the input data into place. */
3589 dreg = copy_addr_to_reg (dmema);
3590 if (data_regs != NULL)
3592 for (i = words-1; i >= 0; --i)
3594 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3595 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3597 for (i = words-1; i > 0; --i)
3599 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3600 ins_tmps[i-1], ins_tmps[i-1], 1,
3601 OPTAB_WIDEN);
3605 /* Split and merge the ends with the destination data. */
3606 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3607 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3609 if (data_regs != NULL)
3611 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3612 st_tmp_2, 1, OPTAB_WIDEN);
3613 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3614 st_tmp_1, 1, OPTAB_WIDEN);
3617 /* Store it all. */
3618 emit_move_insn (st_addr_2, st_tmp_2);
3619 for (i = words-1; i > 0; --i)
3621 rtx tmp = change_address (dmem, DImode,
3622 gen_rtx_AND (DImode,
3623 plus_constant (DImode,
3624 dmema, i*8),
3625 im8));
3626 set_mem_alias_set (tmp, 0);
3627 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3629 emit_move_insn (st_addr_1, st_tmp_1);
3633 /* Expand string/block move operations.
3635 operands[0] is the pointer to the destination.
3636 operands[1] is the pointer to the source.
3637 operands[2] is the number of bytes to move.
3638 operands[3] is the alignment. */
3641 alpha_expand_block_move (rtx operands[])
3643 rtx bytes_rtx = operands[2];
3644 rtx align_rtx = operands[3];
3645 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3646 HOST_WIDE_INT bytes = orig_bytes;
3647 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3648 HOST_WIDE_INT dst_align = src_align;
3649 rtx orig_src = operands[1];
3650 rtx orig_dst = operands[0];
3651 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3652 rtx tmp;
3653 unsigned int i, words, ofs, nregs = 0;
3655 if (orig_bytes <= 0)
3656 return 1;
3657 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3658 return 0;
3660 /* Look for additional alignment information from recorded register info. */
3662 tmp = XEXP (orig_src, 0);
3663 if (REG_P (tmp))
3664 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3665 else if (GET_CODE (tmp) == PLUS
3666 && REG_P (XEXP (tmp, 0))
3667 && CONST_INT_P (XEXP (tmp, 1)))
3669 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3670 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3672 if (a > src_align)
3674 if (a >= 64 && c % 8 == 0)
3675 src_align = 64;
3676 else if (a >= 32 && c % 4 == 0)
3677 src_align = 32;
3678 else if (a >= 16 && c % 2 == 0)
3679 src_align = 16;
3683 tmp = XEXP (orig_dst, 0);
3684 if (REG_P (tmp))
3685 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3686 else if (GET_CODE (tmp) == PLUS
3687 && REG_P (XEXP (tmp, 0))
3688 && CONST_INT_P (XEXP (tmp, 1)))
3690 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3691 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3693 if (a > dst_align)
3695 if (a >= 64 && c % 8 == 0)
3696 dst_align = 64;
3697 else if (a >= 32 && c % 4 == 0)
3698 dst_align = 32;
3699 else if (a >= 16 && c % 2 == 0)
3700 dst_align = 16;
3704 ofs = 0;
3705 if (src_align >= 64 && bytes >= 8)
3707 words = bytes / 8;
3709 for (i = 0; i < words; ++i)
3710 data_regs[nregs + i] = gen_reg_rtx (DImode);
3712 for (i = 0; i < words; ++i)
3713 emit_move_insn (data_regs[nregs + i],
3714 adjust_address (orig_src, DImode, ofs + i * 8));
3716 nregs += words;
3717 bytes -= words * 8;
3718 ofs += words * 8;
3721 if (src_align >= 32 && bytes >= 4)
3723 words = bytes / 4;
3725 for (i = 0; i < words; ++i)
3726 data_regs[nregs + i] = gen_reg_rtx (SImode);
3728 for (i = 0; i < words; ++i)
3729 emit_move_insn (data_regs[nregs + i],
3730 adjust_address (orig_src, SImode, ofs + i * 4));
3732 nregs += words;
3733 bytes -= words * 4;
3734 ofs += words * 4;
3737 if (bytes >= 8)
3739 words = bytes / 8;
3741 for (i = 0; i < words+1; ++i)
3742 data_regs[nregs + i] = gen_reg_rtx (DImode);
3744 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3745 words, ofs);
3747 nregs += words;
3748 bytes -= words * 8;
3749 ofs += words * 8;
3752 if (! TARGET_BWX && bytes >= 4)
3754 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3755 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3756 bytes -= 4;
3757 ofs += 4;
3760 if (bytes >= 2)
3762 if (src_align >= 16)
3764 do {
3765 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3766 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3767 bytes -= 2;
3768 ofs += 2;
3769 } while (bytes >= 2);
3771 else if (! TARGET_BWX)
3773 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3774 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3775 bytes -= 2;
3776 ofs += 2;
3780 while (bytes > 0)
3782 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3783 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3784 bytes -= 1;
3785 ofs += 1;
3788 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3790 /* Now save it back out again. */
3792 i = 0, ofs = 0;
3794 /* Write out the data in whatever chunks reading the source allowed. */
3795 if (dst_align >= 64)
3797 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3799 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3800 data_regs[i]);
3801 ofs += 8;
3802 i++;
3806 if (dst_align >= 32)
3808 /* If the source has remaining DImode regs, write them out in
3809 two pieces. */
3810 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3812 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3813 NULL_RTX, 1, OPTAB_WIDEN);
3815 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3816 gen_lowpart (SImode, data_regs[i]));
3817 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3818 gen_lowpart (SImode, tmp));
3819 ofs += 8;
3820 i++;
3823 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3825 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3826 data_regs[i]);
3827 ofs += 4;
3828 i++;
3832 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3834 /* Write out a remaining block of words using unaligned methods. */
3836 for (words = 1; i + words < nregs; words++)
3837 if (GET_MODE (data_regs[i + words]) != DImode)
3838 break;
3840 if (words == 1)
3841 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3842 else
3843 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3844 words, ofs);
3846 i += words;
3847 ofs += words * 8;
3850 /* Due to the above, this won't be aligned. */
3851 /* ??? If we have more than one of these, consider constructing full
3852 words in registers and using alpha_expand_unaligned_store_words. */
3853 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3855 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3856 ofs += 4;
3857 i++;
3860 if (dst_align >= 16)
3861 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3863 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3864 i++;
3865 ofs += 2;
3867 else
3868 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3870 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3871 i++;
3872 ofs += 2;
3875 /* The remainder must be byte copies. */
3876 while (i < nregs)
3878 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3879 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3880 i++;
3881 ofs += 1;
3884 return 1;
3888 alpha_expand_block_clear (rtx operands[])
3890 rtx bytes_rtx = operands[1];
3891 rtx align_rtx = operands[3];
3892 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3893 HOST_WIDE_INT bytes = orig_bytes;
3894 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3895 HOST_WIDE_INT alignofs = 0;
3896 rtx orig_dst = operands[0];
3897 rtx tmp;
3898 int i, words, ofs = 0;
3900 if (orig_bytes <= 0)
3901 return 1;
3902 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3903 return 0;
3905 /* Look for stricter alignment. */
3906 tmp = XEXP (orig_dst, 0);
3907 if (REG_P (tmp))
3908 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3909 else if (GET_CODE (tmp) == PLUS
3910 && REG_P (XEXP (tmp, 0))
3911 && CONST_INT_P (XEXP (tmp, 1)))
3913 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3914 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3916 if (a > align)
3918 if (a >= 64)
3919 align = a, alignofs = 8 - c % 8;
3920 else if (a >= 32)
3921 align = a, alignofs = 4 - c % 4;
3922 else if (a >= 16)
3923 align = a, alignofs = 2 - c % 2;
3927 /* Handle an unaligned prefix first. */
3929 if (alignofs > 0)
3931 #if HOST_BITS_PER_WIDE_INT >= 64
3932 /* Given that alignofs is bounded by align, the only time BWX could
3933 generate three stores is for a 7 byte fill. Prefer two individual
3934 stores over a load/mask/store sequence. */
3935 if ((!TARGET_BWX || alignofs == 7)
3936 && align >= 32
3937 && !(alignofs == 4 && bytes >= 4))
3939 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3940 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3941 rtx mem, tmp;
3942 HOST_WIDE_INT mask;
3944 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3945 set_mem_alias_set (mem, 0);
3947 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3948 if (bytes < alignofs)
3950 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3951 ofs += bytes;
3952 bytes = 0;
3954 else
3956 bytes -= alignofs;
3957 ofs += alignofs;
3959 alignofs = 0;
3961 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3962 NULL_RTX, 1, OPTAB_WIDEN);
3964 emit_move_insn (mem, tmp);
3966 #endif
3968 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3970 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3971 bytes -= 1;
3972 ofs += 1;
3973 alignofs -= 1;
3975 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3977 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3978 bytes -= 2;
3979 ofs += 2;
3980 alignofs -= 2;
3982 if (alignofs == 4 && bytes >= 4)
3984 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3985 bytes -= 4;
3986 ofs += 4;
3987 alignofs = 0;
3990 /* If we've not used the extra lead alignment information by now,
3991 we won't be able to. Downgrade align to match what's left over. */
3992 if (alignofs > 0)
3994 alignofs = alignofs & -alignofs;
3995 align = MIN (align, alignofs * BITS_PER_UNIT);
3999 /* Handle a block of contiguous long-words. */
4001 if (align >= 64 && bytes >= 8)
4003 words = bytes / 8;
4005 for (i = 0; i < words; ++i)
4006 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4007 const0_rtx);
4009 bytes -= words * 8;
4010 ofs += words * 8;
4013 /* If the block is large and appropriately aligned, emit a single
4014 store followed by a sequence of stq_u insns. */
4016 if (align >= 32 && bytes > 16)
4018 rtx orig_dsta;
4020 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4021 bytes -= 4;
4022 ofs += 4;
4024 orig_dsta = XEXP (orig_dst, 0);
4025 if (GET_CODE (orig_dsta) == LO_SUM)
4026 orig_dsta = force_reg (Pmode, orig_dsta);
4028 words = bytes / 8;
4029 for (i = 0; i < words; ++i)
4031 rtx mem
4032 = change_address (orig_dst, DImode,
4033 gen_rtx_AND (DImode,
4034 plus_constant (DImode, orig_dsta,
4035 ofs + i*8),
4036 GEN_INT (-8)));
4037 set_mem_alias_set (mem, 0);
4038 emit_move_insn (mem, const0_rtx);
4041 /* Depending on the alignment, the first stq_u may have overlapped
4042 with the initial stl, which means that the last stq_u didn't
4043 write as much as it would appear. Leave those questionable bytes
4044 unaccounted for. */
4045 bytes -= words * 8 - 4;
4046 ofs += words * 8 - 4;
4049 /* Handle a smaller block of aligned words. */
4051 if ((align >= 64 && bytes == 4)
4052 || (align == 32 && bytes >= 4))
4054 words = bytes / 4;
4056 for (i = 0; i < words; ++i)
4057 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4058 const0_rtx);
4060 bytes -= words * 4;
4061 ofs += words * 4;
4064 /* An unaligned block uses stq_u stores for as many as possible. */
4066 if (bytes >= 8)
4068 words = bytes / 8;
4070 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4072 bytes -= words * 8;
4073 ofs += words * 8;
4076 /* Next clean up any trailing pieces. */
4078 #if HOST_BITS_PER_WIDE_INT >= 64
4079 /* Count the number of bits in BYTES for which aligned stores could
4080 be emitted. */
4081 words = 0;
4082 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4083 if (bytes & i)
4084 words += 1;
4086 /* If we have appropriate alignment (and it wouldn't take too many
4087 instructions otherwise), mask out the bytes we need. */
4088 if (TARGET_BWX ? words > 2 : bytes > 0)
4090 if (align >= 64)
4092 rtx mem, tmp;
4093 HOST_WIDE_INT mask;
4095 mem = adjust_address (orig_dst, DImode, ofs);
4096 set_mem_alias_set (mem, 0);
4098 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4100 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4101 NULL_RTX, 1, OPTAB_WIDEN);
4103 emit_move_insn (mem, tmp);
4104 return 1;
4106 else if (align >= 32 && bytes < 4)
4108 rtx mem, tmp;
4109 HOST_WIDE_INT mask;
4111 mem = adjust_address (orig_dst, SImode, ofs);
4112 set_mem_alias_set (mem, 0);
4114 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4116 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4117 NULL_RTX, 1, OPTAB_WIDEN);
4119 emit_move_insn (mem, tmp);
4120 return 1;
4123 #endif
4125 if (!TARGET_BWX && bytes >= 4)
4127 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4128 bytes -= 4;
4129 ofs += 4;
4132 if (bytes >= 2)
4134 if (align >= 16)
4136 do {
4137 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4138 const0_rtx);
4139 bytes -= 2;
4140 ofs += 2;
4141 } while (bytes >= 2);
4143 else if (! TARGET_BWX)
4145 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4146 bytes -= 2;
4147 ofs += 2;
4151 while (bytes > 0)
4153 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4154 bytes -= 1;
4155 ofs += 1;
4158 return 1;
4161 /* Returns a mask so that zap(x, value) == x & mask. */
4164 alpha_expand_zap_mask (HOST_WIDE_INT value)
4166 rtx result;
4167 int i;
4169 if (HOST_BITS_PER_WIDE_INT >= 64)
4171 HOST_WIDE_INT mask = 0;
4173 for (i = 7; i >= 0; --i)
4175 mask <<= 8;
4176 if (!((value >> i) & 1))
4177 mask |= 0xff;
4180 result = gen_int_mode (mask, DImode);
4182 else
4184 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4186 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4188 for (i = 7; i >= 4; --i)
4190 mask_hi <<= 8;
4191 if (!((value >> i) & 1))
4192 mask_hi |= 0xff;
4195 for (i = 3; i >= 0; --i)
4197 mask_lo <<= 8;
4198 if (!((value >> i) & 1))
4199 mask_lo |= 0xff;
4202 result = immed_double_const (mask_lo, mask_hi, DImode);
4205 return result;
4208 void
4209 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4210 enum machine_mode mode,
4211 rtx op0, rtx op1, rtx op2)
4213 op0 = gen_lowpart (mode, op0);
4215 if (op1 == const0_rtx)
4216 op1 = CONST0_RTX (mode);
4217 else
4218 op1 = gen_lowpart (mode, op1);
4220 if (op2 == const0_rtx)
4221 op2 = CONST0_RTX (mode);
4222 else
4223 op2 = gen_lowpart (mode, op2);
4225 emit_insn ((*gen) (op0, op1, op2));
4228 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4229 COND is true. Mark the jump as unlikely to be taken. */
4231 static void
4232 emit_unlikely_jump (rtx cond, rtx label)
4234 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4235 rtx x;
4237 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4238 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4239 add_reg_note (x, REG_BR_PROB, very_unlikely);
4242 /* A subroutine of the atomic operation splitters. Emit a load-locked
4243 instruction in MODE. */
4245 static void
4246 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4248 rtx (*fn) (rtx, rtx) = NULL;
4249 if (mode == SImode)
4250 fn = gen_load_locked_si;
4251 else if (mode == DImode)
4252 fn = gen_load_locked_di;
4253 emit_insn (fn (reg, mem));
4256 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4257 instruction in MODE. */
4259 static void
4260 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4262 rtx (*fn) (rtx, rtx, rtx) = NULL;
4263 if (mode == SImode)
4264 fn = gen_store_conditional_si;
4265 else if (mode == DImode)
4266 fn = gen_store_conditional_di;
4267 emit_insn (fn (res, mem, val));
4270 /* Subroutines of the atomic operation splitters. Emit barriers
4271 as needed for the memory MODEL. */
4273 static void
4274 alpha_pre_atomic_barrier (enum memmodel model)
4276 if (need_atomic_barrier_p (model, true))
4277 emit_insn (gen_memory_barrier ());
4280 static void
4281 alpha_post_atomic_barrier (enum memmodel model)
4283 if (need_atomic_barrier_p (model, false))
4284 emit_insn (gen_memory_barrier ());
4287 /* A subroutine of the atomic operation splitters. Emit an insxl
4288 instruction in MODE. */
4290 static rtx
4291 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4293 rtx ret = gen_reg_rtx (DImode);
4294 rtx (*fn) (rtx, rtx, rtx);
4296 switch (mode)
4298 case QImode:
4299 fn = gen_insbl;
4300 break;
4301 case HImode:
4302 fn = gen_inswl;
4303 break;
4304 case SImode:
4305 fn = gen_insll;
4306 break;
4307 case DImode:
4308 fn = gen_insql;
4309 break;
4310 default:
4311 gcc_unreachable ();
4314 op1 = force_reg (mode, op1);
4315 emit_insn (fn (ret, op1, op2));
4317 return ret;
4320 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4321 to perform. MEM is the memory on which to operate. VAL is the second
4322 operand of the binary operator. BEFORE and AFTER are optional locations to
4323 return the value of MEM either before of after the operation. SCRATCH is
4324 a scratch register. */
4326 void
4327 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4328 rtx after, rtx scratch, enum memmodel model)
4330 enum machine_mode mode = GET_MODE (mem);
4331 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4333 alpha_pre_atomic_barrier (model);
4335 label = gen_label_rtx ();
4336 emit_label (label);
4337 label = gen_rtx_LABEL_REF (DImode, label);
4339 if (before == NULL)
4340 before = scratch;
4341 emit_load_locked (mode, before, mem);
4343 if (code == NOT)
4345 x = gen_rtx_AND (mode, before, val);
4346 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4348 x = gen_rtx_NOT (mode, val);
4350 else
4351 x = gen_rtx_fmt_ee (code, mode, before, val);
4352 if (after)
4353 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4354 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4356 emit_store_conditional (mode, cond, mem, scratch);
4358 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4359 emit_unlikely_jump (x, label);
4361 alpha_post_atomic_barrier (model);
4364 /* Expand a compare and swap operation. */
4366 void
4367 alpha_split_compare_and_swap (rtx operands[])
4369 rtx cond, retval, mem, oldval, newval;
4370 bool is_weak;
4371 enum memmodel mod_s, mod_f;
4372 enum machine_mode mode;
4373 rtx label1, label2, x;
4375 cond = operands[0];
4376 retval = operands[1];
4377 mem = operands[2];
4378 oldval = operands[3];
4379 newval = operands[4];
4380 is_weak = (operands[5] != const0_rtx);
4381 mod_s = (enum memmodel) INTVAL (operands[6]);
4382 mod_f = (enum memmodel) INTVAL (operands[7]);
4383 mode = GET_MODE (mem);
4385 alpha_pre_atomic_barrier (mod_s);
4387 label1 = NULL_RTX;
4388 if (!is_weak)
4390 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4391 emit_label (XEXP (label1, 0));
4393 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4395 emit_load_locked (mode, retval, mem);
4397 x = gen_lowpart (DImode, retval);
4398 if (oldval == const0_rtx)
4400 emit_move_insn (cond, const0_rtx);
4401 x = gen_rtx_NE (DImode, x, const0_rtx);
4403 else
4405 x = gen_rtx_EQ (DImode, x, oldval);
4406 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4407 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4409 emit_unlikely_jump (x, label2);
4411 emit_move_insn (cond, newval);
4412 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4414 if (!is_weak)
4416 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4417 emit_unlikely_jump (x, label1);
4420 if (mod_f != MEMMODEL_RELAXED)
4421 emit_label (XEXP (label2, 0));
4423 alpha_post_atomic_barrier (mod_s);
4425 if (mod_f == MEMMODEL_RELAXED)
4426 emit_label (XEXP (label2, 0));
4429 void
4430 alpha_expand_compare_and_swap_12 (rtx operands[])
4432 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4433 enum machine_mode mode;
4434 rtx addr, align, wdst;
4435 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4437 cond = operands[0];
4438 dst = operands[1];
4439 mem = operands[2];
4440 oldval = operands[3];
4441 newval = operands[4];
4442 is_weak = operands[5];
4443 mod_s = operands[6];
4444 mod_f = operands[7];
4445 mode = GET_MODE (mem);
4447 /* We forced the address into a register via mem_noofs_operand. */
4448 addr = XEXP (mem, 0);
4449 gcc_assert (register_operand (addr, DImode));
4451 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4452 NULL_RTX, 1, OPTAB_DIRECT);
4454 oldval = convert_modes (DImode, mode, oldval, 1);
4456 if (newval != const0_rtx)
4457 newval = emit_insxl (mode, newval, addr);
4459 wdst = gen_reg_rtx (DImode);
4460 if (mode == QImode)
4461 gen = gen_atomic_compare_and_swapqi_1;
4462 else
4463 gen = gen_atomic_compare_and_swaphi_1;
4464 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4465 is_weak, mod_s, mod_f));
4467 emit_move_insn (dst, gen_lowpart (mode, wdst));
4470 void
4471 alpha_split_compare_and_swap_12 (rtx operands[])
4473 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4474 enum machine_mode mode;
4475 bool is_weak;
4476 enum memmodel mod_s, mod_f;
4477 rtx label1, label2, mem, addr, width, mask, x;
4479 cond = operands[0];
4480 dest = operands[1];
4481 orig_mem = operands[2];
4482 oldval = operands[3];
4483 newval = operands[4];
4484 align = operands[5];
4485 is_weak = (operands[6] != const0_rtx);
4486 mod_s = (enum memmodel) INTVAL (operands[7]);
4487 mod_f = (enum memmodel) INTVAL (operands[8]);
4488 scratch = operands[9];
4489 mode = GET_MODE (orig_mem);
4490 addr = XEXP (orig_mem, 0);
4492 mem = gen_rtx_MEM (DImode, align);
4493 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4494 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4495 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4497 alpha_pre_atomic_barrier (mod_s);
4499 label1 = NULL_RTX;
4500 if (!is_weak)
4502 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4503 emit_label (XEXP (label1, 0));
4505 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4507 emit_load_locked (DImode, scratch, mem);
4509 width = GEN_INT (GET_MODE_BITSIZE (mode));
4510 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4511 emit_insn (gen_extxl (dest, scratch, width, addr));
4513 if (oldval == const0_rtx)
4515 emit_move_insn (cond, const0_rtx);
4516 x = gen_rtx_NE (DImode, dest, const0_rtx);
4518 else
4520 x = gen_rtx_EQ (DImode, dest, oldval);
4521 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4522 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4524 emit_unlikely_jump (x, label2);
4526 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4528 if (newval != const0_rtx)
4529 emit_insn (gen_iordi3 (cond, cond, newval));
4531 emit_store_conditional (DImode, cond, mem, cond);
4533 if (!is_weak)
4535 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4536 emit_unlikely_jump (x, label1);
4539 if (mod_f != MEMMODEL_RELAXED)
4540 emit_label (XEXP (label2, 0));
4542 alpha_post_atomic_barrier (mod_s);
4544 if (mod_f == MEMMODEL_RELAXED)
4545 emit_label (XEXP (label2, 0));
4548 /* Expand an atomic exchange operation. */
4550 void
4551 alpha_split_atomic_exchange (rtx operands[])
4553 rtx retval, mem, val, scratch;
4554 enum memmodel model;
4555 enum machine_mode mode;
4556 rtx label, x, cond;
4558 retval = operands[0];
4559 mem = operands[1];
4560 val = operands[2];
4561 model = (enum memmodel) INTVAL (operands[3]);
4562 scratch = operands[4];
4563 mode = GET_MODE (mem);
4564 cond = gen_lowpart (DImode, scratch);
4566 alpha_pre_atomic_barrier (model);
4568 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4569 emit_label (XEXP (label, 0));
4571 emit_load_locked (mode, retval, mem);
4572 emit_move_insn (scratch, val);
4573 emit_store_conditional (mode, cond, mem, scratch);
4575 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4576 emit_unlikely_jump (x, label);
4578 alpha_post_atomic_barrier (model);
4581 void
4582 alpha_expand_atomic_exchange_12 (rtx operands[])
4584 rtx dst, mem, val, model;
4585 enum machine_mode mode;
4586 rtx addr, align, wdst;
4587 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4589 dst = operands[0];
4590 mem = operands[1];
4591 val = operands[2];
4592 model = operands[3];
4593 mode = GET_MODE (mem);
4595 /* We forced the address into a register via mem_noofs_operand. */
4596 addr = XEXP (mem, 0);
4597 gcc_assert (register_operand (addr, DImode));
4599 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4600 NULL_RTX, 1, OPTAB_DIRECT);
4602 /* Insert val into the correct byte location within the word. */
4603 if (val != const0_rtx)
4604 val = emit_insxl (mode, val, addr);
4606 wdst = gen_reg_rtx (DImode);
4607 if (mode == QImode)
4608 gen = gen_atomic_exchangeqi_1;
4609 else
4610 gen = gen_atomic_exchangehi_1;
4611 emit_insn (gen (wdst, mem, val, align, model));
4613 emit_move_insn (dst, gen_lowpart (mode, wdst));
4616 void
4617 alpha_split_atomic_exchange_12 (rtx operands[])
4619 rtx dest, orig_mem, addr, val, align, scratch;
4620 rtx label, mem, width, mask, x;
4621 enum machine_mode mode;
4622 enum memmodel model;
4624 dest = operands[0];
4625 orig_mem = operands[1];
4626 val = operands[2];
4627 align = operands[3];
4628 model = (enum memmodel) INTVAL (operands[4]);
4629 scratch = operands[5];
4630 mode = GET_MODE (orig_mem);
4631 addr = XEXP (orig_mem, 0);
4633 mem = gen_rtx_MEM (DImode, align);
4634 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4635 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4636 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4638 alpha_pre_atomic_barrier (model);
4640 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4641 emit_label (XEXP (label, 0));
4643 emit_load_locked (DImode, scratch, mem);
4645 width = GEN_INT (GET_MODE_BITSIZE (mode));
4646 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4647 emit_insn (gen_extxl (dest, scratch, width, addr));
4648 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4649 if (val != const0_rtx)
4650 emit_insn (gen_iordi3 (scratch, scratch, val));
4652 emit_store_conditional (DImode, scratch, mem, scratch);
4654 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4655 emit_unlikely_jump (x, label);
4657 alpha_post_atomic_barrier (model);
4660 /* Adjust the cost of a scheduling dependency. Return the new cost of
4661 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4663 static int
4664 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4666 enum attr_type dep_insn_type;
4668 /* If the dependence is an anti-dependence, there is no cost. For an
4669 output dependence, there is sometimes a cost, but it doesn't seem
4670 worth handling those few cases. */
4671 if (REG_NOTE_KIND (link) != 0)
4672 return cost;
4674 /* If we can't recognize the insns, we can't really do anything. */
4675 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4676 return cost;
4678 dep_insn_type = get_attr_type (dep_insn);
4680 /* Bring in the user-defined memory latency. */
4681 if (dep_insn_type == TYPE_ILD
4682 || dep_insn_type == TYPE_FLD
4683 || dep_insn_type == TYPE_LDSYM)
4684 cost += alpha_memory_latency-1;
4686 /* Everything else handled in DFA bypasses now. */
4688 return cost;
4691 /* The number of instructions that can be issued per cycle. */
4693 static int
4694 alpha_issue_rate (void)
4696 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4699 /* How many alternative schedules to try. This should be as wide as the
4700 scheduling freedom in the DFA, but no wider. Making this value too
4701 large results extra work for the scheduler.
4703 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4704 alternative schedules. For EV5, we can choose between E0/E1 and
4705 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4707 static int
4708 alpha_multipass_dfa_lookahead (void)
4710 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4713 /* Machine-specific function data. */
4715 struct GTY(()) alpha_links;
4717 struct GTY(()) machine_function
4719 /* For OSF. */
4720 const char *some_ld_name;
4722 /* For flag_reorder_blocks_and_partition. */
4723 rtx gp_save_rtx;
4725 /* For VMS condition handlers. */
4726 bool uses_condition_handler;
4728 /* Linkage entries. */
4729 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
4730 links;
4733 /* How to allocate a 'struct machine_function'. */
4735 static struct machine_function *
4736 alpha_init_machine_status (void)
4738 return ggc_alloc_cleared_machine_function ();
4741 /* Support for frame based VMS condition handlers. */
4743 /* A VMS condition handler may be established for a function with a call to
4744 __builtin_establish_vms_condition_handler, and cancelled with a call to
4745 __builtin_revert_vms_condition_handler.
4747 The VMS Condition Handling Facility knows about the existence of a handler
4748 from the procedure descriptor .handler field. As the VMS native compilers,
4749 we store the user specified handler's address at a fixed location in the
4750 stack frame and point the procedure descriptor at a common wrapper which
4751 fetches the real handler's address and issues an indirect call.
4753 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4755 We force the procedure kind to PT_STACK, and the fixed frame location is
4756 fp+8, just before the register save area. We use the handler_data field in
4757 the procedure descriptor to state the fp offset at which the installed
4758 handler address can be found. */
4760 #define VMS_COND_HANDLER_FP_OFFSET 8
4762 /* Expand code to store the currently installed user VMS condition handler
4763 into TARGET and install HANDLER as the new condition handler. */
4765 void
4766 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4768 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4769 VMS_COND_HANDLER_FP_OFFSET);
4771 rtx handler_slot
4772 = gen_rtx_MEM (DImode, handler_slot_address);
4774 emit_move_insn (target, handler_slot);
4775 emit_move_insn (handler_slot, handler);
4777 /* Notify the start/prologue/epilogue emitters that the condition handler
4778 slot is needed. In addition to reserving the slot space, this will force
4779 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4780 use above is correct. */
4781 cfun->machine->uses_condition_handler = true;
4784 /* Expand code to store the current VMS condition handler into TARGET and
4785 nullify it. */
4787 void
4788 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4790 /* We implement this by establishing a null condition handler, with the tiny
4791 side effect of setting uses_condition_handler. This is a little bit
4792 pessimistic if no actual builtin_establish call is ever issued, which is
4793 not a real problem and expected never to happen anyway. */
4795 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4798 /* Functions to save and restore alpha_return_addr_rtx. */
4800 /* Start the ball rolling with RETURN_ADDR_RTX. */
4803 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4805 if (count != 0)
4806 return const0_rtx;
4808 return get_hard_reg_initial_val (Pmode, REG_RA);
4811 /* Return or create a memory slot containing the gp value for the current
4812 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4815 alpha_gp_save_rtx (void)
4817 rtx seq, m = cfun->machine->gp_save_rtx;
4819 if (m == NULL)
4821 start_sequence ();
4823 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4824 m = validize_mem (m);
4825 emit_move_insn (m, pic_offset_table_rtx);
4827 seq = get_insns ();
4828 end_sequence ();
4830 /* We used to simply emit the sequence after entry_of_function.
4831 However this breaks the CFG if the first instruction in the
4832 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4833 label. Emit the sequence properly on the edge. We are only
4834 invoked from dw2_build_landing_pads and finish_eh_generation
4835 will call commit_edge_insertions thanks to a kludge. */
4836 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4838 cfun->machine->gp_save_rtx = m;
4841 return m;
4844 static void
4845 alpha_instantiate_decls (void)
4847 if (cfun->machine->gp_save_rtx != NULL_RTX)
4848 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4851 static int
4852 alpha_ra_ever_killed (void)
4854 rtx top;
4856 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4857 return (int)df_regs_ever_live_p (REG_RA);
4859 push_topmost_sequence ();
4860 top = get_insns ();
4861 pop_topmost_sequence ();
4863 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4867 /* Return the trap mode suffix applicable to the current
4868 instruction, or NULL. */
4870 static const char *
4871 get_trap_mode_suffix (void)
4873 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4875 switch (s)
4877 case TRAP_SUFFIX_NONE:
4878 return NULL;
4880 case TRAP_SUFFIX_SU:
4881 if (alpha_fptm >= ALPHA_FPTM_SU)
4882 return "su";
4883 return NULL;
4885 case TRAP_SUFFIX_SUI:
4886 if (alpha_fptm >= ALPHA_FPTM_SUI)
4887 return "sui";
4888 return NULL;
4890 case TRAP_SUFFIX_V_SV:
4891 switch (alpha_fptm)
4893 case ALPHA_FPTM_N:
4894 return NULL;
4895 case ALPHA_FPTM_U:
4896 return "v";
4897 case ALPHA_FPTM_SU:
4898 case ALPHA_FPTM_SUI:
4899 return "sv";
4900 default:
4901 gcc_unreachable ();
4904 case TRAP_SUFFIX_V_SV_SVI:
4905 switch (alpha_fptm)
4907 case ALPHA_FPTM_N:
4908 return NULL;
4909 case ALPHA_FPTM_U:
4910 return "v";
4911 case ALPHA_FPTM_SU:
4912 return "sv";
4913 case ALPHA_FPTM_SUI:
4914 return "svi";
4915 default:
4916 gcc_unreachable ();
4918 break;
4920 case TRAP_SUFFIX_U_SU_SUI:
4921 switch (alpha_fptm)
4923 case ALPHA_FPTM_N:
4924 return NULL;
4925 case ALPHA_FPTM_U:
4926 return "u";
4927 case ALPHA_FPTM_SU:
4928 return "su";
4929 case ALPHA_FPTM_SUI:
4930 return "sui";
4931 default:
4932 gcc_unreachable ();
4934 break;
4936 default:
4937 gcc_unreachable ();
4939 gcc_unreachable ();
4942 /* Return the rounding mode suffix applicable to the current
4943 instruction, or NULL. */
4945 static const char *
4946 get_round_mode_suffix (void)
4948 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4950 switch (s)
4952 case ROUND_SUFFIX_NONE:
4953 return NULL;
4954 case ROUND_SUFFIX_NORMAL:
4955 switch (alpha_fprm)
4957 case ALPHA_FPRM_NORM:
4958 return NULL;
4959 case ALPHA_FPRM_MINF:
4960 return "m";
4961 case ALPHA_FPRM_CHOP:
4962 return "c";
4963 case ALPHA_FPRM_DYN:
4964 return "d";
4965 default:
4966 gcc_unreachable ();
4968 break;
4970 case ROUND_SUFFIX_C:
4971 return "c";
4973 default:
4974 gcc_unreachable ();
4976 gcc_unreachable ();
4979 /* Locate some local-dynamic symbol still in use by this function
4980 so that we can print its name in some movdi_er_tlsldm pattern. */
4982 static int
4983 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4985 rtx x = *px;
4987 if (GET_CODE (x) == SYMBOL_REF
4988 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4990 cfun->machine->some_ld_name = XSTR (x, 0);
4991 return 1;
4994 return 0;
4997 static const char *
4998 get_some_local_dynamic_name (void)
5000 rtx insn;
5002 if (cfun->machine->some_ld_name)
5003 return cfun->machine->some_ld_name;
5005 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5006 if (INSN_P (insn)
5007 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5008 return cfun->machine->some_ld_name;
5010 gcc_unreachable ();
5013 /* Print an operand. Recognize special options, documented below. */
5015 void
5016 print_operand (FILE *file, rtx x, int code)
5018 int i;
5020 switch (code)
5022 case '~':
5023 /* Print the assembler name of the current function. */
5024 assemble_name (file, alpha_fnname);
5025 break;
5027 case '&':
5028 assemble_name (file, get_some_local_dynamic_name ());
5029 break;
5031 case '/':
5033 const char *trap = get_trap_mode_suffix ();
5034 const char *round = get_round_mode_suffix ();
5036 if (trap || round)
5037 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5038 break;
5041 case ',':
5042 /* Generates single precision instruction suffix. */
5043 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5044 break;
5046 case '-':
5047 /* Generates double precision instruction suffix. */
5048 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5049 break;
5051 case '#':
5052 if (alpha_this_literal_sequence_number == 0)
5053 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5054 fprintf (file, "%d", alpha_this_literal_sequence_number);
5055 break;
5057 case '*':
5058 if (alpha_this_gpdisp_sequence_number == 0)
5059 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5060 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5061 break;
5063 case 'H':
5064 if (GET_CODE (x) == HIGH)
5065 output_addr_const (file, XEXP (x, 0));
5066 else
5067 output_operand_lossage ("invalid %%H value");
5068 break;
5070 case 'J':
5072 const char *lituse;
5074 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5076 x = XVECEXP (x, 0, 0);
5077 lituse = "lituse_tlsgd";
5079 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5081 x = XVECEXP (x, 0, 0);
5082 lituse = "lituse_tlsldm";
5084 else if (CONST_INT_P (x))
5085 lituse = "lituse_jsr";
5086 else
5088 output_operand_lossage ("invalid %%J value");
5089 break;
5092 if (x != const0_rtx)
5093 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5095 break;
5097 case 'j':
5099 const char *lituse;
5101 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5102 lituse = "lituse_jsrdirect";
5103 #else
5104 lituse = "lituse_jsr";
5105 #endif
5107 gcc_assert (INTVAL (x) != 0);
5108 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5110 break;
5111 case 'r':
5112 /* If this operand is the constant zero, write it as "$31". */
5113 if (REG_P (x))
5114 fprintf (file, "%s", reg_names[REGNO (x)]);
5115 else if (x == CONST0_RTX (GET_MODE (x)))
5116 fprintf (file, "$31");
5117 else
5118 output_operand_lossage ("invalid %%r value");
5119 break;
5121 case 'R':
5122 /* Similar, but for floating-point. */
5123 if (REG_P (x))
5124 fprintf (file, "%s", reg_names[REGNO (x)]);
5125 else if (x == CONST0_RTX (GET_MODE (x)))
5126 fprintf (file, "$f31");
5127 else
5128 output_operand_lossage ("invalid %%R value");
5129 break;
5131 case 'N':
5132 /* Write the 1's complement of a constant. */
5133 if (!CONST_INT_P (x))
5134 output_operand_lossage ("invalid %%N value");
5136 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5137 break;
5139 case 'P':
5140 /* Write 1 << C, for a constant C. */
5141 if (!CONST_INT_P (x))
5142 output_operand_lossage ("invalid %%P value");
5144 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5145 break;
5147 case 'h':
5148 /* Write the high-order 16 bits of a constant, sign-extended. */
5149 if (!CONST_INT_P (x))
5150 output_operand_lossage ("invalid %%h value");
5152 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5153 break;
5155 case 'L':
5156 /* Write the low-order 16 bits of a constant, sign-extended. */
5157 if (!CONST_INT_P (x))
5158 output_operand_lossage ("invalid %%L value");
5160 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5161 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5162 break;
5164 case 'm':
5165 /* Write mask for ZAP insn. */
5166 if (GET_CODE (x) == CONST_DOUBLE)
5168 HOST_WIDE_INT mask = 0;
5169 HOST_WIDE_INT value;
5171 value = CONST_DOUBLE_LOW (x);
5172 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5173 i++, value >>= 8)
5174 if (value & 0xff)
5175 mask |= (1 << i);
5177 value = CONST_DOUBLE_HIGH (x);
5178 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5179 i++, value >>= 8)
5180 if (value & 0xff)
5181 mask |= (1 << (i + sizeof (int)));
5183 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5186 else if (CONST_INT_P (x))
5188 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5190 for (i = 0; i < 8; i++, value >>= 8)
5191 if (value & 0xff)
5192 mask |= (1 << i);
5194 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5196 else
5197 output_operand_lossage ("invalid %%m value");
5198 break;
5200 case 'M':
5201 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5202 if (!CONST_INT_P (x)
5203 || (INTVAL (x) != 8 && INTVAL (x) != 16
5204 && INTVAL (x) != 32 && INTVAL (x) != 64))
5205 output_operand_lossage ("invalid %%M value");
5207 fprintf (file, "%s",
5208 (INTVAL (x) == 8 ? "b"
5209 : INTVAL (x) == 16 ? "w"
5210 : INTVAL (x) == 32 ? "l"
5211 : "q"));
5212 break;
5214 case 'U':
5215 /* Similar, except do it from the mask. */
5216 if (CONST_INT_P (x))
5218 HOST_WIDE_INT value = INTVAL (x);
5220 if (value == 0xff)
5222 fputc ('b', file);
5223 break;
5225 if (value == 0xffff)
5227 fputc ('w', file);
5228 break;
5230 if (value == 0xffffffff)
5232 fputc ('l', file);
5233 break;
5235 if (value == -1)
5237 fputc ('q', file);
5238 break;
5241 else if (HOST_BITS_PER_WIDE_INT == 32
5242 && GET_CODE (x) == CONST_DOUBLE
5243 && CONST_DOUBLE_LOW (x) == 0xffffffff
5244 && CONST_DOUBLE_HIGH (x) == 0)
5246 fputc ('l', file);
5247 break;
5249 output_operand_lossage ("invalid %%U value");
5250 break;
5252 case 's':
5253 /* Write the constant value divided by 8. */
5254 if (!CONST_INT_P (x)
5255 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5256 || (INTVAL (x) & 7) != 0)
5257 output_operand_lossage ("invalid %%s value");
5259 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5260 break;
5262 case 'S':
5263 /* Same, except compute (64 - c) / 8 */
5265 if (!CONST_INT_P (x)
5266 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5267 && (INTVAL (x) & 7) != 8)
5268 output_operand_lossage ("invalid %%s value");
5270 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5271 break;
5273 case 'C': case 'D': case 'c': case 'd':
5274 /* Write out comparison name. */
5276 enum rtx_code c = GET_CODE (x);
5278 if (!COMPARISON_P (x))
5279 output_operand_lossage ("invalid %%C value");
5281 else if (code == 'D')
5282 c = reverse_condition (c);
5283 else if (code == 'c')
5284 c = swap_condition (c);
5285 else if (code == 'd')
5286 c = swap_condition (reverse_condition (c));
5288 if (c == LEU)
5289 fprintf (file, "ule");
5290 else if (c == LTU)
5291 fprintf (file, "ult");
5292 else if (c == UNORDERED)
5293 fprintf (file, "un");
5294 else
5295 fprintf (file, "%s", GET_RTX_NAME (c));
5297 break;
5299 case 'E':
5300 /* Write the divide or modulus operator. */
5301 switch (GET_CODE (x))
5303 case DIV:
5304 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5305 break;
5306 case UDIV:
5307 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5308 break;
5309 case MOD:
5310 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5311 break;
5312 case UMOD:
5313 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5314 break;
5315 default:
5316 output_operand_lossage ("invalid %%E value");
5317 break;
5319 break;
5321 case 'A':
5322 /* Write "_u" for unaligned access. */
5323 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5324 fprintf (file, "_u");
5325 break;
5327 case 0:
5328 if (REG_P (x))
5329 fprintf (file, "%s", reg_names[REGNO (x)]);
5330 else if (MEM_P (x))
5331 output_address (XEXP (x, 0));
5332 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5334 switch (XINT (XEXP (x, 0), 1))
5336 case UNSPEC_DTPREL:
5337 case UNSPEC_TPREL:
5338 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5339 break;
5340 default:
5341 output_operand_lossage ("unknown relocation unspec");
5342 break;
5345 else
5346 output_addr_const (file, x);
5347 break;
5349 default:
5350 output_operand_lossage ("invalid %%xn code");
5354 void
5355 print_operand_address (FILE *file, rtx addr)
5357 int basereg = 31;
5358 HOST_WIDE_INT offset = 0;
5360 if (GET_CODE (addr) == AND)
5361 addr = XEXP (addr, 0);
5363 if (GET_CODE (addr) == PLUS
5364 && CONST_INT_P (XEXP (addr, 1)))
5366 offset = INTVAL (XEXP (addr, 1));
5367 addr = XEXP (addr, 0);
5370 if (GET_CODE (addr) == LO_SUM)
5372 const char *reloc16, *reloclo;
5373 rtx op1 = XEXP (addr, 1);
5375 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5377 op1 = XEXP (op1, 0);
5378 switch (XINT (op1, 1))
5380 case UNSPEC_DTPREL:
5381 reloc16 = NULL;
5382 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5383 break;
5384 case UNSPEC_TPREL:
5385 reloc16 = NULL;
5386 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5387 break;
5388 default:
5389 output_operand_lossage ("unknown relocation unspec");
5390 return;
5393 output_addr_const (file, XVECEXP (op1, 0, 0));
5395 else
5397 reloc16 = "gprel";
5398 reloclo = "gprellow";
5399 output_addr_const (file, op1);
5402 if (offset)
5403 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5405 addr = XEXP (addr, 0);
5406 switch (GET_CODE (addr))
5408 case REG:
5409 basereg = REGNO (addr);
5410 break;
5412 case SUBREG:
5413 basereg = subreg_regno (addr);
5414 break;
5416 default:
5417 gcc_unreachable ();
5420 fprintf (file, "($%d)\t\t!%s", basereg,
5421 (basereg == 29 ? reloc16 : reloclo));
5422 return;
5425 switch (GET_CODE (addr))
5427 case REG:
5428 basereg = REGNO (addr);
5429 break;
5431 case SUBREG:
5432 basereg = subreg_regno (addr);
5433 break;
5435 case CONST_INT:
5436 offset = INTVAL (addr);
5437 break;
5439 #if TARGET_ABI_OPEN_VMS
5440 case SYMBOL_REF:
5441 fprintf (file, "%s", XSTR (addr, 0));
5442 return;
5444 case CONST:
5445 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5446 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5447 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5448 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5449 INTVAL (XEXP (XEXP (addr, 0), 1)));
5450 return;
5452 #endif
5453 default:
5454 gcc_unreachable ();
5457 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5460 /* Emit RTL insns to initialize the variable parts of a trampoline at
5461 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5462 for the static chain value for the function. */
5464 static void
5465 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5467 rtx fnaddr, mem, word1, word2;
5469 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5471 #ifdef POINTERS_EXTEND_UNSIGNED
5472 fnaddr = convert_memory_address (Pmode, fnaddr);
5473 chain_value = convert_memory_address (Pmode, chain_value);
5474 #endif
5476 if (TARGET_ABI_OPEN_VMS)
5478 const char *fnname;
5479 char *trname;
5481 /* Construct the name of the trampoline entry point. */
5482 fnname = XSTR (fnaddr, 0);
5483 trname = (char *) alloca (strlen (fnname) + 5);
5484 strcpy (trname, fnname);
5485 strcat (trname, "..tr");
5486 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5487 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5489 /* Trampoline (or "bounded") procedure descriptor is constructed from
5490 the function's procedure descriptor with certain fields zeroed IAW
5491 the VMS calling standard. This is stored in the first quadword. */
5492 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5493 word1 = expand_and (DImode, word1,
5494 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5495 NULL);
5497 else
5499 /* These 4 instructions are:
5500 ldq $1,24($27)
5501 ldq $27,16($27)
5502 jmp $31,($27),0
5504 We don't bother setting the HINT field of the jump; the nop
5505 is merely there for padding. */
5506 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5507 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5510 /* Store the first two words, as computed above. */
5511 mem = adjust_address (m_tramp, DImode, 0);
5512 emit_move_insn (mem, word1);
5513 mem = adjust_address (m_tramp, DImode, 8);
5514 emit_move_insn (mem, word2);
5516 /* Store function address and static chain value. */
5517 mem = adjust_address (m_tramp, Pmode, 16);
5518 emit_move_insn (mem, fnaddr);
5519 mem = adjust_address (m_tramp, Pmode, 24);
5520 emit_move_insn (mem, chain_value);
5522 if (TARGET_ABI_OSF)
5524 emit_insn (gen_imb ());
5525 #ifdef HAVE_ENABLE_EXECUTE_STACK
5526 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5527 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5528 #endif
5532 /* Determine where to put an argument to a function.
5533 Value is zero to push the argument on the stack,
5534 or a hard register in which to store the argument.
5536 MODE is the argument's machine mode.
5537 TYPE is the data type of the argument (as a tree).
5538 This is null for libcalls where that information may
5539 not be available.
5540 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5541 the preceding args and about the function being called.
5542 NAMED is nonzero if this argument is a named parameter
5543 (otherwise it is an extra parameter matching an ellipsis).
5545 On Alpha the first 6 words of args are normally in registers
5546 and the rest are pushed. */
5548 static rtx
5549 alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5550 const_tree type, bool named ATTRIBUTE_UNUSED)
5552 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5553 int basereg;
5554 int num_args;
5556 /* Don't get confused and pass small structures in FP registers. */
5557 if (type && AGGREGATE_TYPE_P (type))
5558 basereg = 16;
5559 else
5561 #ifdef ENABLE_CHECKING
5562 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5563 values here. */
5564 gcc_assert (!COMPLEX_MODE_P (mode));
5565 #endif
5567 /* Set up defaults for FP operands passed in FP registers, and
5568 integral operands passed in integer registers. */
5569 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5570 basereg = 32 + 16;
5571 else
5572 basereg = 16;
5575 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5576 the two platforms, so we can't avoid conditional compilation. */
5577 #if TARGET_ABI_OPEN_VMS
5579 if (mode == VOIDmode)
5580 return alpha_arg_info_reg_val (*cum);
5582 num_args = cum->num_args;
5583 if (num_args >= 6
5584 || targetm.calls.must_pass_in_stack (mode, type))
5585 return NULL_RTX;
5587 #elif TARGET_ABI_OSF
5589 if (*cum >= 6)
5590 return NULL_RTX;
5591 num_args = *cum;
5593 /* VOID is passed as a special flag for "last argument". */
5594 if (type == void_type_node)
5595 basereg = 16;
5596 else if (targetm.calls.must_pass_in_stack (mode, type))
5597 return NULL_RTX;
5599 #else
5600 #error Unhandled ABI
5601 #endif
5603 return gen_rtx_REG (mode, num_args + basereg);
5606 /* Update the data in CUM to advance over an argument
5607 of mode MODE and data type TYPE.
5608 (TYPE is null for libcalls where that information may not be available.) */
5610 static void
5611 alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5612 const_tree type, bool named ATTRIBUTE_UNUSED)
5614 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5615 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5616 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5618 #if TARGET_ABI_OSF
5619 *cum += increment;
5620 #else
5621 if (!onstack && cum->num_args < 6)
5622 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5623 cum->num_args += increment;
5624 #endif
5627 static int
5628 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5629 enum machine_mode mode ATTRIBUTE_UNUSED,
5630 tree type ATTRIBUTE_UNUSED,
5631 bool named ATTRIBUTE_UNUSED)
5633 int words = 0;
5634 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5636 #if TARGET_ABI_OPEN_VMS
5637 if (cum->num_args < 6
5638 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5639 words = 6 - cum->num_args;
5640 #elif TARGET_ABI_OSF
5641 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5642 words = 6 - *cum;
5643 #else
5644 #error Unhandled ABI
5645 #endif
5647 return words * UNITS_PER_WORD;
5651 /* Return true if TYPE must be returned in memory, instead of in registers. */
5653 static bool
5654 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5656 enum machine_mode mode = VOIDmode;
5657 int size;
5659 if (type)
5661 mode = TYPE_MODE (type);
5663 /* All aggregates are returned in memory, except on OpenVMS where
5664 records that fit 64 bits should be returned by immediate value
5665 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5666 if (TARGET_ABI_OPEN_VMS
5667 && TREE_CODE (type) != ARRAY_TYPE
5668 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5669 return false;
5671 if (AGGREGATE_TYPE_P (type))
5672 return true;
5675 size = GET_MODE_SIZE (mode);
5676 switch (GET_MODE_CLASS (mode))
5678 case MODE_VECTOR_FLOAT:
5679 /* Pass all float vectors in memory, like an aggregate. */
5680 return true;
5682 case MODE_COMPLEX_FLOAT:
5683 /* We judge complex floats on the size of their element,
5684 not the size of the whole type. */
5685 size = GET_MODE_UNIT_SIZE (mode);
5686 break;
5688 case MODE_INT:
5689 case MODE_FLOAT:
5690 case MODE_COMPLEX_INT:
5691 case MODE_VECTOR_INT:
5692 break;
5694 default:
5695 /* ??? We get called on all sorts of random stuff from
5696 aggregate_value_p. We must return something, but it's not
5697 clear what's safe to return. Pretend it's a struct I
5698 guess. */
5699 return true;
5702 /* Otherwise types must fit in one register. */
5703 return size > UNITS_PER_WORD;
5706 /* Return true if TYPE should be passed by invisible reference. */
5708 static bool
5709 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5710 enum machine_mode mode,
5711 const_tree type ATTRIBUTE_UNUSED,
5712 bool named ATTRIBUTE_UNUSED)
5714 return mode == TFmode || mode == TCmode;
5717 /* Define how to find the value returned by a function. VALTYPE is the
5718 data type of the value (as a tree). If the precise function being
5719 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5720 MODE is set instead of VALTYPE for libcalls.
5722 On Alpha the value is found in $0 for integer functions and
5723 $f0 for floating-point functions. */
5726 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5727 enum machine_mode mode)
5729 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5730 enum mode_class mclass;
5732 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5734 if (valtype)
5735 mode = TYPE_MODE (valtype);
5737 mclass = GET_MODE_CLASS (mode);
5738 switch (mclass)
5740 case MODE_INT:
5741 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5742 where we have them returning both SImode and DImode. */
5743 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5744 PROMOTE_MODE (mode, dummy, valtype);
5745 /* FALLTHRU */
5747 case MODE_COMPLEX_INT:
5748 case MODE_VECTOR_INT:
5749 regnum = 0;
5750 break;
5752 case MODE_FLOAT:
5753 regnum = 32;
5754 break;
5756 case MODE_COMPLEX_FLOAT:
5758 enum machine_mode cmode = GET_MODE_INNER (mode);
5760 return gen_rtx_PARALLEL
5761 (VOIDmode,
5762 gen_rtvec (2,
5763 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5764 const0_rtx),
5765 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5766 GEN_INT (GET_MODE_SIZE (cmode)))));
5769 case MODE_RANDOM:
5770 /* We should only reach here for BLKmode on VMS. */
5771 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5772 regnum = 0;
5773 break;
5775 default:
5776 gcc_unreachable ();
5779 return gen_rtx_REG (mode, regnum);
5782 /* TCmode complex values are passed by invisible reference. We
5783 should not split these values. */
5785 static bool
5786 alpha_split_complex_arg (const_tree type)
5788 return TYPE_MODE (type) != TCmode;
5791 static tree
5792 alpha_build_builtin_va_list (void)
5794 tree base, ofs, space, record, type_decl;
5796 if (TARGET_ABI_OPEN_VMS)
5797 return ptr_type_node;
5799 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5800 type_decl = build_decl (BUILTINS_LOCATION,
5801 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5802 TYPE_STUB_DECL (record) = type_decl;
5803 TYPE_NAME (record) = type_decl;
5805 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5807 /* Dummy field to prevent alignment warnings. */
5808 space = build_decl (BUILTINS_LOCATION,
5809 FIELD_DECL, NULL_TREE, integer_type_node);
5810 DECL_FIELD_CONTEXT (space) = record;
5811 DECL_ARTIFICIAL (space) = 1;
5812 DECL_IGNORED_P (space) = 1;
5814 ofs = build_decl (BUILTINS_LOCATION,
5815 FIELD_DECL, get_identifier ("__offset"),
5816 integer_type_node);
5817 DECL_FIELD_CONTEXT (ofs) = record;
5818 DECL_CHAIN (ofs) = space;
5819 /* ??? This is a hack, __offset is marked volatile to prevent
5820 DCE that confuses stdarg optimization and results in
5821 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5822 TREE_THIS_VOLATILE (ofs) = 1;
5824 base = build_decl (BUILTINS_LOCATION,
5825 FIELD_DECL, get_identifier ("__base"),
5826 ptr_type_node);
5827 DECL_FIELD_CONTEXT (base) = record;
5828 DECL_CHAIN (base) = ofs;
5830 TYPE_FIELDS (record) = base;
5831 layout_type (record);
5833 va_list_gpr_counter_field = ofs;
5834 return record;
5837 #if TARGET_ABI_OSF
5838 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5839 and constant additions. */
5841 static gimple
5842 va_list_skip_additions (tree lhs)
5844 gimple stmt;
5846 for (;;)
5848 enum tree_code code;
5850 stmt = SSA_NAME_DEF_STMT (lhs);
5852 if (gimple_code (stmt) == GIMPLE_PHI)
5853 return stmt;
5855 if (!is_gimple_assign (stmt)
5856 || gimple_assign_lhs (stmt) != lhs)
5857 return NULL;
5859 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5860 return stmt;
5861 code = gimple_assign_rhs_code (stmt);
5862 if (!CONVERT_EXPR_CODE_P (code)
5863 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5864 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5865 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5866 return stmt;
5868 lhs = gimple_assign_rhs1 (stmt);
5872 /* Check if LHS = RHS statement is
5873 LHS = *(ap.__base + ap.__offset + cst)
5875 LHS = *(ap.__base
5876 + ((ap.__offset + cst <= 47)
5877 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5878 If the former, indicate that GPR registers are needed,
5879 if the latter, indicate that FPR registers are needed.
5881 Also look for LHS = (*ptr).field, where ptr is one of the forms
5882 listed above.
5884 On alpha, cfun->va_list_gpr_size is used as size of the needed
5885 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5886 registers are needed and bit 1 set if FPR registers are needed.
5887 Return true if va_list references should not be scanned for the
5888 current statement. */
5890 static bool
5891 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5893 tree base, offset, rhs;
5894 int offset_arg = 1;
5895 gimple base_stmt;
5897 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5898 != GIMPLE_SINGLE_RHS)
5899 return false;
5901 rhs = gimple_assign_rhs1 (stmt);
5902 while (handled_component_p (rhs))
5903 rhs = TREE_OPERAND (rhs, 0);
5904 if (TREE_CODE (rhs) != MEM_REF
5905 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5906 return false;
5908 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5909 if (stmt == NULL
5910 || !is_gimple_assign (stmt)
5911 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5912 return false;
5914 base = gimple_assign_rhs1 (stmt);
5915 if (TREE_CODE (base) == SSA_NAME)
5917 base_stmt = va_list_skip_additions (base);
5918 if (base_stmt
5919 && is_gimple_assign (base_stmt)
5920 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5921 base = gimple_assign_rhs1 (base_stmt);
5924 if (TREE_CODE (base) != COMPONENT_REF
5925 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5927 base = gimple_assign_rhs2 (stmt);
5928 if (TREE_CODE (base) == SSA_NAME)
5930 base_stmt = va_list_skip_additions (base);
5931 if (base_stmt
5932 && is_gimple_assign (base_stmt)
5933 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5934 base = gimple_assign_rhs1 (base_stmt);
5937 if (TREE_CODE (base) != COMPONENT_REF
5938 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5939 return false;
5941 offset_arg = 0;
5944 base = get_base_address (base);
5945 if (TREE_CODE (base) != VAR_DECL
5946 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
5947 return false;
5949 offset = gimple_op (stmt, 1 + offset_arg);
5950 if (TREE_CODE (offset) == SSA_NAME)
5952 gimple offset_stmt = va_list_skip_additions (offset);
5954 if (offset_stmt
5955 && gimple_code (offset_stmt) == GIMPLE_PHI)
5957 HOST_WIDE_INT sub;
5958 gimple arg1_stmt, arg2_stmt;
5959 tree arg1, arg2;
5960 enum tree_code code1, code2;
5962 if (gimple_phi_num_args (offset_stmt) != 2)
5963 goto escapes;
5965 arg1_stmt
5966 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5967 arg2_stmt
5968 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5969 if (arg1_stmt == NULL
5970 || !is_gimple_assign (arg1_stmt)
5971 || arg2_stmt == NULL
5972 || !is_gimple_assign (arg2_stmt))
5973 goto escapes;
5975 code1 = gimple_assign_rhs_code (arg1_stmt);
5976 code2 = gimple_assign_rhs_code (arg2_stmt);
5977 if (code1 == COMPONENT_REF
5978 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5979 /* Do nothing. */;
5980 else if (code2 == COMPONENT_REF
5981 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5983 gimple tem = arg1_stmt;
5984 code2 = code1;
5985 arg1_stmt = arg2_stmt;
5986 arg2_stmt = tem;
5988 else
5989 goto escapes;
5991 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5992 goto escapes;
5994 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5995 if (code2 == MINUS_EXPR)
5996 sub = -sub;
5997 if (sub < -48 || sub > -32)
5998 goto escapes;
6000 arg1 = gimple_assign_rhs1 (arg1_stmt);
6001 arg2 = gimple_assign_rhs1 (arg2_stmt);
6002 if (TREE_CODE (arg2) == SSA_NAME)
6004 arg2_stmt = va_list_skip_additions (arg2);
6005 if (arg2_stmt == NULL
6006 || !is_gimple_assign (arg2_stmt)
6007 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6008 goto escapes;
6009 arg2 = gimple_assign_rhs1 (arg2_stmt);
6011 if (arg1 != arg2)
6012 goto escapes;
6014 if (TREE_CODE (arg1) != COMPONENT_REF
6015 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6016 || get_base_address (arg1) != base)
6017 goto escapes;
6019 /* Need floating point regs. */
6020 cfun->va_list_fpr_size |= 2;
6021 return false;
6023 if (offset_stmt
6024 && is_gimple_assign (offset_stmt)
6025 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6026 offset = gimple_assign_rhs1 (offset_stmt);
6028 if (TREE_CODE (offset) != COMPONENT_REF
6029 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6030 || get_base_address (offset) != base)
6031 goto escapes;
6032 else
6033 /* Need general regs. */
6034 cfun->va_list_fpr_size |= 1;
6035 return false;
6037 escapes:
6038 si->va_list_escapes = true;
6039 return false;
6041 #endif
6043 /* Perform any needed actions needed for a function that is receiving a
6044 variable number of arguments. */
6046 static void
6047 alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
6048 tree type, int *pretend_size, int no_rtl)
6050 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6052 /* Skip the current argument. */
6053 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6054 true);
6056 #if TARGET_ABI_OPEN_VMS
6057 /* For VMS, we allocate space for all 6 arg registers plus a count.
6059 However, if NO registers need to be saved, don't allocate any space.
6060 This is not only because we won't need the space, but because AP
6061 includes the current_pretend_args_size and we don't want to mess up
6062 any ap-relative addresses already made. */
6063 if (cum.num_args < 6)
6065 if (!no_rtl)
6067 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6068 emit_insn (gen_arg_home ());
6070 *pretend_size = 7 * UNITS_PER_WORD;
6072 #else
6073 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6074 only push those that are remaining. However, if NO registers need to
6075 be saved, don't allocate any space. This is not only because we won't
6076 need the space, but because AP includes the current_pretend_args_size
6077 and we don't want to mess up any ap-relative addresses already made.
6079 If we are not to use the floating-point registers, save the integer
6080 registers where we would put the floating-point registers. This is
6081 not the most efficient way to implement varargs with just one register
6082 class, but it isn't worth doing anything more efficient in this rare
6083 case. */
6084 if (cum >= 6)
6085 return;
6087 if (!no_rtl)
6089 int count;
6090 alias_set_type set = get_varargs_alias_set ();
6091 rtx tmp;
6093 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6094 if (count > 6 - cum)
6095 count = 6 - cum;
6097 /* Detect whether integer registers or floating-point registers
6098 are needed by the detected va_arg statements. See above for
6099 how these values are computed. Note that the "escape" value
6100 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6101 these bits set. */
6102 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6104 if (cfun->va_list_fpr_size & 1)
6106 tmp = gen_rtx_MEM (BLKmode,
6107 plus_constant (Pmode, virtual_incoming_args_rtx,
6108 (cum + 6) * UNITS_PER_WORD));
6109 MEM_NOTRAP_P (tmp) = 1;
6110 set_mem_alias_set (tmp, set);
6111 move_block_from_reg (16 + cum, tmp, count);
6114 if (cfun->va_list_fpr_size & 2)
6116 tmp = gen_rtx_MEM (BLKmode,
6117 plus_constant (Pmode, virtual_incoming_args_rtx,
6118 cum * UNITS_PER_WORD));
6119 MEM_NOTRAP_P (tmp) = 1;
6120 set_mem_alias_set (tmp, set);
6121 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6124 *pretend_size = 12 * UNITS_PER_WORD;
6125 #endif
6128 static void
6129 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6131 HOST_WIDE_INT offset;
6132 tree t, offset_field, base_field;
6134 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6135 return;
6137 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6138 up by 48, storing fp arg registers in the first 48 bytes, and the
6139 integer arg registers in the next 48 bytes. This is only done,
6140 however, if any integer registers need to be stored.
6142 If no integer registers need be stored, then we must subtract 48
6143 in order to account for the integer arg registers which are counted
6144 in argsize above, but which are not actually stored on the stack.
6145 Must further be careful here about structures straddling the last
6146 integer argument register; that futzes with pretend_args_size,
6147 which changes the meaning of AP. */
6149 if (NUM_ARGS < 6)
6150 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6151 else
6152 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6154 if (TARGET_ABI_OPEN_VMS)
6156 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6157 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6158 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6159 TREE_SIDE_EFFECTS (t) = 1;
6160 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6162 else
6164 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6165 offset_field = DECL_CHAIN (base_field);
6167 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6168 valist, base_field, NULL_TREE);
6169 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6170 valist, offset_field, NULL_TREE);
6172 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6173 t = fold_build_pointer_plus_hwi (t, offset);
6174 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6175 TREE_SIDE_EFFECTS (t) = 1;
6176 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6178 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6179 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6180 TREE_SIDE_EFFECTS (t) = 1;
6181 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6185 static tree
6186 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6187 gimple_seq *pre_p)
6189 tree type_size, ptr_type, addend, t, addr;
6190 gimple_seq internal_post;
6192 /* If the type could not be passed in registers, skip the block
6193 reserved for the registers. */
6194 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6196 t = build_int_cst (TREE_TYPE (offset), 6*8);
6197 gimplify_assign (offset,
6198 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6199 pre_p);
6202 addend = offset;
6203 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6205 if (TREE_CODE (type) == COMPLEX_TYPE)
6207 tree real_part, imag_part, real_temp;
6209 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6210 offset, pre_p);
6212 /* Copy the value into a new temporary, lest the formal temporary
6213 be reused out from under us. */
6214 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6216 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6217 offset, pre_p);
6219 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6221 else if (TREE_CODE (type) == REAL_TYPE)
6223 tree fpaddend, cond, fourtyeight;
6225 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6226 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6227 addend, fourtyeight);
6228 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6229 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6230 fpaddend, addend);
6233 /* Build the final address and force that value into a temporary. */
6234 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6235 internal_post = NULL;
6236 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6237 gimple_seq_add_seq (pre_p, internal_post);
6239 /* Update the offset field. */
6240 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6241 if (type_size == NULL || TREE_OVERFLOW (type_size))
6242 t = size_zero_node;
6243 else
6245 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6246 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6247 t = size_binop (MULT_EXPR, t, size_int (8));
6249 t = fold_convert (TREE_TYPE (offset), t);
6250 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6251 pre_p);
6253 return build_va_arg_indirect_ref (addr);
6256 static tree
6257 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6258 gimple_seq *post_p)
6260 tree offset_field, base_field, offset, base, t, r;
6261 bool indirect;
6263 if (TARGET_ABI_OPEN_VMS)
6264 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6266 base_field = TYPE_FIELDS (va_list_type_node);
6267 offset_field = DECL_CHAIN (base_field);
6268 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6269 valist, base_field, NULL_TREE);
6270 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6271 valist, offset_field, NULL_TREE);
6273 /* Pull the fields of the structure out into temporaries. Since we never
6274 modify the base field, we can use a formal temporary. Sign-extend the
6275 offset field so that it's the proper width for pointer arithmetic. */
6276 base = get_formal_tmp_var (base_field, pre_p);
6278 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6279 offset = get_initialized_tmp_var (t, pre_p, NULL);
6281 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6282 if (indirect)
6283 type = build_pointer_type_for_mode (type, ptr_mode, true);
6285 /* Find the value. Note that this will be a stable indirection, or
6286 a composite of stable indirections in the case of complex. */
6287 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6289 /* Stuff the offset temporary back into its field. */
6290 gimplify_assign (unshare_expr (offset_field),
6291 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6293 if (indirect)
6294 r = build_va_arg_indirect_ref (r);
6296 return r;
6299 /* Builtins. */
6301 enum alpha_builtin
6303 ALPHA_BUILTIN_CMPBGE,
6304 ALPHA_BUILTIN_EXTBL,
6305 ALPHA_BUILTIN_EXTWL,
6306 ALPHA_BUILTIN_EXTLL,
6307 ALPHA_BUILTIN_EXTQL,
6308 ALPHA_BUILTIN_EXTWH,
6309 ALPHA_BUILTIN_EXTLH,
6310 ALPHA_BUILTIN_EXTQH,
6311 ALPHA_BUILTIN_INSBL,
6312 ALPHA_BUILTIN_INSWL,
6313 ALPHA_BUILTIN_INSLL,
6314 ALPHA_BUILTIN_INSQL,
6315 ALPHA_BUILTIN_INSWH,
6316 ALPHA_BUILTIN_INSLH,
6317 ALPHA_BUILTIN_INSQH,
6318 ALPHA_BUILTIN_MSKBL,
6319 ALPHA_BUILTIN_MSKWL,
6320 ALPHA_BUILTIN_MSKLL,
6321 ALPHA_BUILTIN_MSKQL,
6322 ALPHA_BUILTIN_MSKWH,
6323 ALPHA_BUILTIN_MSKLH,
6324 ALPHA_BUILTIN_MSKQH,
6325 ALPHA_BUILTIN_UMULH,
6326 ALPHA_BUILTIN_ZAP,
6327 ALPHA_BUILTIN_ZAPNOT,
6328 ALPHA_BUILTIN_AMASK,
6329 ALPHA_BUILTIN_IMPLVER,
6330 ALPHA_BUILTIN_RPCC,
6331 ALPHA_BUILTIN_THREAD_POINTER,
6332 ALPHA_BUILTIN_SET_THREAD_POINTER,
6333 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6334 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6336 /* TARGET_MAX */
6337 ALPHA_BUILTIN_MINUB8,
6338 ALPHA_BUILTIN_MINSB8,
6339 ALPHA_BUILTIN_MINUW4,
6340 ALPHA_BUILTIN_MINSW4,
6341 ALPHA_BUILTIN_MAXUB8,
6342 ALPHA_BUILTIN_MAXSB8,
6343 ALPHA_BUILTIN_MAXUW4,
6344 ALPHA_BUILTIN_MAXSW4,
6345 ALPHA_BUILTIN_PERR,
6346 ALPHA_BUILTIN_PKLB,
6347 ALPHA_BUILTIN_PKWB,
6348 ALPHA_BUILTIN_UNPKBL,
6349 ALPHA_BUILTIN_UNPKBW,
6351 /* TARGET_CIX */
6352 ALPHA_BUILTIN_CTTZ,
6353 ALPHA_BUILTIN_CTLZ,
6354 ALPHA_BUILTIN_CTPOP,
6356 ALPHA_BUILTIN_max
6359 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6360 CODE_FOR_builtin_cmpbge,
6361 CODE_FOR_extbl,
6362 CODE_FOR_extwl,
6363 CODE_FOR_extll,
6364 CODE_FOR_extql,
6365 CODE_FOR_extwh,
6366 CODE_FOR_extlh,
6367 CODE_FOR_extqh,
6368 CODE_FOR_builtin_insbl,
6369 CODE_FOR_builtin_inswl,
6370 CODE_FOR_builtin_insll,
6371 CODE_FOR_insql,
6372 CODE_FOR_inswh,
6373 CODE_FOR_inslh,
6374 CODE_FOR_insqh,
6375 CODE_FOR_mskbl,
6376 CODE_FOR_mskwl,
6377 CODE_FOR_mskll,
6378 CODE_FOR_mskql,
6379 CODE_FOR_mskwh,
6380 CODE_FOR_msklh,
6381 CODE_FOR_mskqh,
6382 CODE_FOR_umuldi3_highpart,
6383 CODE_FOR_builtin_zap,
6384 CODE_FOR_builtin_zapnot,
6385 CODE_FOR_builtin_amask,
6386 CODE_FOR_builtin_implver,
6387 CODE_FOR_builtin_rpcc,
6388 CODE_FOR_load_tp,
6389 CODE_FOR_set_tp,
6390 CODE_FOR_builtin_establish_vms_condition_handler,
6391 CODE_FOR_builtin_revert_vms_condition_handler,
6393 /* TARGET_MAX */
6394 CODE_FOR_builtin_minub8,
6395 CODE_FOR_builtin_minsb8,
6396 CODE_FOR_builtin_minuw4,
6397 CODE_FOR_builtin_minsw4,
6398 CODE_FOR_builtin_maxub8,
6399 CODE_FOR_builtin_maxsb8,
6400 CODE_FOR_builtin_maxuw4,
6401 CODE_FOR_builtin_maxsw4,
6402 CODE_FOR_builtin_perr,
6403 CODE_FOR_builtin_pklb,
6404 CODE_FOR_builtin_pkwb,
6405 CODE_FOR_builtin_unpkbl,
6406 CODE_FOR_builtin_unpkbw,
6408 /* TARGET_CIX */
6409 CODE_FOR_ctzdi2,
6410 CODE_FOR_clzdi2,
6411 CODE_FOR_popcountdi2
6414 struct alpha_builtin_def
6416 const char *name;
6417 enum alpha_builtin code;
6418 unsigned int target_mask;
6419 bool is_const;
6422 static struct alpha_builtin_def const zero_arg_builtins[] = {
6423 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6424 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6427 static struct alpha_builtin_def const one_arg_builtins[] = {
6428 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6429 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6430 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6431 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6432 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6433 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6434 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6435 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6438 static struct alpha_builtin_def const two_arg_builtins[] = {
6439 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6440 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6441 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6442 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6443 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6444 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6445 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6446 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6447 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6448 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6449 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6450 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6451 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6452 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6453 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6454 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6455 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6456 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6457 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6458 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6459 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6460 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6461 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6462 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6463 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6464 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6465 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6466 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6467 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6468 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6469 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6470 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6471 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6472 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6475 static GTY(()) tree alpha_dimode_u;
6476 static GTY(()) tree alpha_v8qi_u;
6477 static GTY(()) tree alpha_v8qi_s;
6478 static GTY(()) tree alpha_v4hi_u;
6479 static GTY(()) tree alpha_v4hi_s;
6481 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6483 /* Return the alpha builtin for CODE. */
6485 static tree
6486 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6488 if (code >= ALPHA_BUILTIN_max)
6489 return error_mark_node;
6490 return alpha_builtins[code];
6493 /* Helper function of alpha_init_builtins. Add the built-in specified
6494 by NAME, TYPE, CODE, and ECF. */
6496 static void
6497 alpha_builtin_function (const char *name, tree ftype,
6498 enum alpha_builtin code, unsigned ecf)
6500 tree decl = add_builtin_function (name, ftype, (int) code,
6501 BUILT_IN_MD, NULL, NULL_TREE);
6503 if (ecf & ECF_CONST)
6504 TREE_READONLY (decl) = 1;
6505 if (ecf & ECF_NOTHROW)
6506 TREE_NOTHROW (decl) = 1;
6508 alpha_builtins [(int) code] = decl;
6511 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6512 functions pointed to by P, with function type FTYPE. */
6514 static void
6515 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6516 tree ftype)
6518 size_t i;
6520 for (i = 0; i < count; ++i, ++p)
6521 if ((target_flags & p->target_mask) == p->target_mask)
6522 alpha_builtin_function (p->name, ftype, p->code,
6523 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6526 static void
6527 alpha_init_builtins (void)
6529 tree ftype;
6531 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6532 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6533 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6534 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6535 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6537 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6538 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6540 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6541 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6543 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6544 alpha_dimode_u, NULL_TREE);
6545 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6547 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
6548 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6549 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6551 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6552 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6553 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6555 if (TARGET_ABI_OPEN_VMS)
6557 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6558 NULL_TREE);
6559 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6560 ftype,
6561 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6564 ftype = build_function_type_list (ptr_type_node, void_type_node,
6565 NULL_TREE);
6566 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6567 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6569 vms_patch_builtins ();
6573 /* Expand an expression EXP that calls a built-in function,
6574 with result going to TARGET if that's convenient
6575 (and in mode MODE if that's convenient).
6576 SUBTARGET may be used as the target for computing one of EXP's operands.
6577 IGNORE is nonzero if the value is to be ignored. */
6579 static rtx
6580 alpha_expand_builtin (tree exp, rtx target,
6581 rtx subtarget ATTRIBUTE_UNUSED,
6582 enum machine_mode mode ATTRIBUTE_UNUSED,
6583 int ignore ATTRIBUTE_UNUSED)
6585 #define MAX_ARGS 2
6587 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6588 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6589 tree arg;
6590 call_expr_arg_iterator iter;
6591 enum insn_code icode;
6592 rtx op[MAX_ARGS], pat;
6593 int arity;
6594 bool nonvoid;
6596 if (fcode >= ALPHA_BUILTIN_max)
6597 internal_error ("bad builtin fcode");
6598 icode = code_for_builtin[fcode];
6599 if (icode == 0)
6600 internal_error ("bad builtin fcode");
6602 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6604 arity = 0;
6605 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6607 const struct insn_operand_data *insn_op;
6609 if (arg == error_mark_node)
6610 return NULL_RTX;
6611 if (arity > MAX_ARGS)
6612 return NULL_RTX;
6614 insn_op = &insn_data[icode].operand[arity + nonvoid];
6616 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6618 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6619 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6620 arity++;
6623 if (nonvoid)
6625 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6626 if (!target
6627 || GET_MODE (target) != tmode
6628 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6629 target = gen_reg_rtx (tmode);
6632 switch (arity)
6634 case 0:
6635 pat = GEN_FCN (icode) (target);
6636 break;
6637 case 1:
6638 if (nonvoid)
6639 pat = GEN_FCN (icode) (target, op[0]);
6640 else
6641 pat = GEN_FCN (icode) (op[0]);
6642 break;
6643 case 2:
6644 pat = GEN_FCN (icode) (target, op[0], op[1]);
6645 break;
6646 default:
6647 gcc_unreachable ();
6649 if (!pat)
6650 return NULL_RTX;
6651 emit_insn (pat);
6653 if (nonvoid)
6654 return target;
6655 else
6656 return const0_rtx;
6660 /* Several bits below assume HWI >= 64 bits. This should be enforced
6661 by config.gcc. */
6662 #if HOST_BITS_PER_WIDE_INT < 64
6663 # error "HOST_WIDE_INT too small"
6664 #endif
6666 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6667 with an 8-bit output vector. OPINT contains the integer operands; bit N
6668 of OP_CONST is set if OPINT[N] is valid. */
6670 static tree
6671 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6673 if (op_const == 3)
6675 int i, val;
6676 for (i = 0, val = 0; i < 8; ++i)
6678 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6679 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6680 if (c0 >= c1)
6681 val |= 1 << i;
6683 return build_int_cst (alpha_dimode_u, val);
6685 else if (op_const == 2 && opint[1] == 0)
6686 return build_int_cst (alpha_dimode_u, 0xff);
6687 return NULL;
6690 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6691 specialized form of an AND operation. Other byte manipulation instructions
6692 are defined in terms of this instruction, so this is also used as a
6693 subroutine for other builtins.
6695 OP contains the tree operands; OPINT contains the extracted integer values.
6696 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6697 OPINT may be considered. */
6699 static tree
6700 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6701 long op_const)
6703 if (op_const & 2)
6705 unsigned HOST_WIDE_INT mask = 0;
6706 int i;
6708 for (i = 0; i < 8; ++i)
6709 if ((opint[1] >> i) & 1)
6710 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6712 if (op_const & 1)
6713 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6715 if (op)
6716 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6717 build_int_cst (alpha_dimode_u, mask));
6719 else if ((op_const & 1) && opint[0] == 0)
6720 return build_int_cst (alpha_dimode_u, 0);
6721 return NULL;
6724 /* Fold the builtins for the EXT family of instructions. */
6726 static tree
6727 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6728 long op_const, unsigned HOST_WIDE_INT bytemask,
6729 bool is_high)
6731 long zap_const = 2;
6732 tree *zap_op = NULL;
6734 if (op_const & 2)
6736 unsigned HOST_WIDE_INT loc;
6738 loc = opint[1] & 7;
6739 loc *= BITS_PER_UNIT;
6741 if (loc != 0)
6743 if (op_const & 1)
6745 unsigned HOST_WIDE_INT temp = opint[0];
6746 if (is_high)
6747 temp <<= loc;
6748 else
6749 temp >>= loc;
6750 opint[0] = temp;
6751 zap_const = 3;
6754 else
6755 zap_op = op;
6758 opint[1] = bytemask;
6759 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6762 /* Fold the builtins for the INS family of instructions. */
6764 static tree
6765 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6766 long op_const, unsigned HOST_WIDE_INT bytemask,
6767 bool is_high)
6769 if ((op_const & 1) && opint[0] == 0)
6770 return build_int_cst (alpha_dimode_u, 0);
6772 if (op_const & 2)
6774 unsigned HOST_WIDE_INT temp, loc, byteloc;
6775 tree *zap_op = NULL;
6777 loc = opint[1] & 7;
6778 bytemask <<= loc;
6780 temp = opint[0];
6781 if (is_high)
6783 byteloc = (64 - (loc * 8)) & 0x3f;
6784 if (byteloc == 0)
6785 zap_op = op;
6786 else
6787 temp >>= byteloc;
6788 bytemask >>= 8;
6790 else
6792 byteloc = loc * 8;
6793 if (byteloc == 0)
6794 zap_op = op;
6795 else
6796 temp <<= byteloc;
6799 opint[0] = temp;
6800 opint[1] = bytemask;
6801 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6804 return NULL;
6807 static tree
6808 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6809 long op_const, unsigned HOST_WIDE_INT bytemask,
6810 bool is_high)
6812 if (op_const & 2)
6814 unsigned HOST_WIDE_INT loc;
6816 loc = opint[1] & 7;
6817 bytemask <<= loc;
6819 if (is_high)
6820 bytemask >>= 8;
6822 opint[1] = bytemask ^ 0xff;
6825 return alpha_fold_builtin_zapnot (op, opint, op_const);
6828 static tree
6829 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6831 tree op0 = fold_convert (vtype, op[0]);
6832 tree op1 = fold_convert (vtype, op[1]);
6833 tree val = fold_build2 (code, vtype, op0, op1);
6834 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6837 static tree
6838 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6840 unsigned HOST_WIDE_INT temp = 0;
6841 int i;
6843 if (op_const != 3)
6844 return NULL;
6846 for (i = 0; i < 8; ++i)
6848 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6849 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6850 if (a >= b)
6851 temp += a - b;
6852 else
6853 temp += b - a;
6856 return build_int_cst (alpha_dimode_u, temp);
6859 static tree
6860 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6862 unsigned HOST_WIDE_INT temp;
6864 if (op_const == 0)
6865 return NULL;
6867 temp = opint[0] & 0xff;
6868 temp |= (opint[0] >> 24) & 0xff00;
6870 return build_int_cst (alpha_dimode_u, temp);
6873 static tree
6874 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6876 unsigned HOST_WIDE_INT temp;
6878 if (op_const == 0)
6879 return NULL;
6881 temp = opint[0] & 0xff;
6882 temp |= (opint[0] >> 8) & 0xff00;
6883 temp |= (opint[0] >> 16) & 0xff0000;
6884 temp |= (opint[0] >> 24) & 0xff000000;
6886 return build_int_cst (alpha_dimode_u, temp);
6889 static tree
6890 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6892 unsigned HOST_WIDE_INT temp;
6894 if (op_const == 0)
6895 return NULL;
6897 temp = opint[0] & 0xff;
6898 temp |= (opint[0] & 0xff00) << 24;
6900 return build_int_cst (alpha_dimode_u, temp);
6903 static tree
6904 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6906 unsigned HOST_WIDE_INT temp;
6908 if (op_const == 0)
6909 return NULL;
6911 temp = opint[0] & 0xff;
6912 temp |= (opint[0] & 0x0000ff00) << 8;
6913 temp |= (opint[0] & 0x00ff0000) << 16;
6914 temp |= (opint[0] & 0xff000000) << 24;
6916 return build_int_cst (alpha_dimode_u, temp);
6919 static tree
6920 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6922 unsigned HOST_WIDE_INT temp;
6924 if (op_const == 0)
6925 return NULL;
6927 if (opint[0] == 0)
6928 temp = 64;
6929 else
6930 temp = exact_log2 (opint[0] & -opint[0]);
6932 return build_int_cst (alpha_dimode_u, temp);
6935 static tree
6936 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6938 unsigned HOST_WIDE_INT temp;
6940 if (op_const == 0)
6941 return NULL;
6943 if (opint[0] == 0)
6944 temp = 64;
6945 else
6946 temp = 64 - floor_log2 (opint[0]) - 1;
6948 return build_int_cst (alpha_dimode_u, temp);
6951 static tree
6952 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6954 unsigned HOST_WIDE_INT temp, op;
6956 if (op_const == 0)
6957 return NULL;
6959 op = opint[0];
6960 temp = 0;
6961 while (op)
6962 temp++, op &= op - 1;
6964 return build_int_cst (alpha_dimode_u, temp);
6967 /* Fold one of our builtin functions. */
6969 static tree
6970 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6971 bool ignore ATTRIBUTE_UNUSED)
6973 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6974 long op_const = 0;
6975 int i;
6977 if (n_args > MAX_ARGS)
6978 return NULL;
6980 for (i = 0; i < n_args; i++)
6982 tree arg = op[i];
6983 if (arg == error_mark_node)
6984 return NULL;
6986 opint[i] = 0;
6987 if (TREE_CODE (arg) == INTEGER_CST)
6989 op_const |= 1L << i;
6990 opint[i] = int_cst_value (arg);
6994 switch (DECL_FUNCTION_CODE (fndecl))
6996 case ALPHA_BUILTIN_CMPBGE:
6997 return alpha_fold_builtin_cmpbge (opint, op_const);
6999 case ALPHA_BUILTIN_EXTBL:
7000 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7001 case ALPHA_BUILTIN_EXTWL:
7002 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7003 case ALPHA_BUILTIN_EXTLL:
7004 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7005 case ALPHA_BUILTIN_EXTQL:
7006 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7007 case ALPHA_BUILTIN_EXTWH:
7008 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7009 case ALPHA_BUILTIN_EXTLH:
7010 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7011 case ALPHA_BUILTIN_EXTQH:
7012 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7014 case ALPHA_BUILTIN_INSBL:
7015 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7016 case ALPHA_BUILTIN_INSWL:
7017 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7018 case ALPHA_BUILTIN_INSLL:
7019 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7020 case ALPHA_BUILTIN_INSQL:
7021 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7022 case ALPHA_BUILTIN_INSWH:
7023 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7024 case ALPHA_BUILTIN_INSLH:
7025 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7026 case ALPHA_BUILTIN_INSQH:
7027 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7029 case ALPHA_BUILTIN_MSKBL:
7030 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7031 case ALPHA_BUILTIN_MSKWL:
7032 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7033 case ALPHA_BUILTIN_MSKLL:
7034 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7035 case ALPHA_BUILTIN_MSKQL:
7036 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7037 case ALPHA_BUILTIN_MSKWH:
7038 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7039 case ALPHA_BUILTIN_MSKLH:
7040 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7041 case ALPHA_BUILTIN_MSKQH:
7042 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7044 case ALPHA_BUILTIN_UMULH:
7045 return fold_build2 (MULT_HIGHPART_EXPR, alpha_dimode_u, op[0], op[1]);
7047 case ALPHA_BUILTIN_ZAP:
7048 opint[1] ^= 0xff;
7049 /* FALLTHRU */
7050 case ALPHA_BUILTIN_ZAPNOT:
7051 return alpha_fold_builtin_zapnot (op, opint, op_const);
7053 case ALPHA_BUILTIN_MINUB8:
7054 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7055 case ALPHA_BUILTIN_MINSB8:
7056 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7057 case ALPHA_BUILTIN_MINUW4:
7058 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7059 case ALPHA_BUILTIN_MINSW4:
7060 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7061 case ALPHA_BUILTIN_MAXUB8:
7062 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7063 case ALPHA_BUILTIN_MAXSB8:
7064 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7065 case ALPHA_BUILTIN_MAXUW4:
7066 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7067 case ALPHA_BUILTIN_MAXSW4:
7068 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7070 case ALPHA_BUILTIN_PERR:
7071 return alpha_fold_builtin_perr (opint, op_const);
7072 case ALPHA_BUILTIN_PKLB:
7073 return alpha_fold_builtin_pklb (opint, op_const);
7074 case ALPHA_BUILTIN_PKWB:
7075 return alpha_fold_builtin_pkwb (opint, op_const);
7076 case ALPHA_BUILTIN_UNPKBL:
7077 return alpha_fold_builtin_unpkbl (opint, op_const);
7078 case ALPHA_BUILTIN_UNPKBW:
7079 return alpha_fold_builtin_unpkbw (opint, op_const);
7081 case ALPHA_BUILTIN_CTTZ:
7082 return alpha_fold_builtin_cttz (opint, op_const);
7083 case ALPHA_BUILTIN_CTLZ:
7084 return alpha_fold_builtin_ctlz (opint, op_const);
7085 case ALPHA_BUILTIN_CTPOP:
7086 return alpha_fold_builtin_ctpop (opint, op_const);
7088 case ALPHA_BUILTIN_AMASK:
7089 case ALPHA_BUILTIN_IMPLVER:
7090 case ALPHA_BUILTIN_RPCC:
7091 case ALPHA_BUILTIN_THREAD_POINTER:
7092 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7093 /* None of these are foldable at compile-time. */
7094 default:
7095 return NULL;
7099 /* This page contains routines that are used to determine what the function
7100 prologue and epilogue code will do and write them out. */
7102 /* Compute the size of the save area in the stack. */
7104 /* These variables are used for communication between the following functions.
7105 They indicate various things about the current function being compiled
7106 that are used to tell what kind of prologue, epilogue and procedure
7107 descriptor to generate. */
7109 /* Nonzero if we need a stack procedure. */
7110 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7111 static enum alpha_procedure_types alpha_procedure_type;
7113 /* Register number (either FP or SP) that is used to unwind the frame. */
7114 static int vms_unwind_regno;
7116 /* Register number used to save FP. We need not have one for RA since
7117 we don't modify it for register procedures. This is only defined
7118 for register frame procedures. */
7119 static int vms_save_fp_regno;
7121 /* Register number used to reference objects off our PV. */
7122 static int vms_base_regno;
7124 /* Compute register masks for saved registers. */
7126 static void
7127 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7129 unsigned long imask = 0;
7130 unsigned long fmask = 0;
7131 unsigned int i;
7133 /* When outputting a thunk, we don't have valid register life info,
7134 but assemble_start_function wants to output .frame and .mask
7135 directives. */
7136 if (cfun->is_thunk)
7138 *imaskP = 0;
7139 *fmaskP = 0;
7140 return;
7143 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7144 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7146 /* One for every register we have to save. */
7147 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7148 if (! fixed_regs[i] && ! call_used_regs[i]
7149 && df_regs_ever_live_p (i) && i != REG_RA)
7151 if (i < 32)
7152 imask |= (1UL << i);
7153 else
7154 fmask |= (1UL << (i - 32));
7157 /* We need to restore these for the handler. */
7158 if (crtl->calls_eh_return)
7160 for (i = 0; ; ++i)
7162 unsigned regno = EH_RETURN_DATA_REGNO (i);
7163 if (regno == INVALID_REGNUM)
7164 break;
7165 imask |= 1UL << regno;
7169 /* If any register spilled, then spill the return address also. */
7170 /* ??? This is required by the Digital stack unwind specification
7171 and isn't needed if we're doing Dwarf2 unwinding. */
7172 if (imask || fmask || alpha_ra_ever_killed ())
7173 imask |= (1UL << REG_RA);
7175 *imaskP = imask;
7176 *fmaskP = fmask;
7180 alpha_sa_size (void)
7182 unsigned long mask[2];
7183 int sa_size = 0;
7184 int i, j;
7186 alpha_sa_mask (&mask[0], &mask[1]);
7188 for (j = 0; j < 2; ++j)
7189 for (i = 0; i < 32; ++i)
7190 if ((mask[j] >> i) & 1)
7191 sa_size++;
7193 if (TARGET_ABI_OPEN_VMS)
7195 /* Start with a stack procedure if we make any calls (REG_RA used), or
7196 need a frame pointer, with a register procedure if we otherwise need
7197 at least a slot, and with a null procedure in other cases. */
7198 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7199 alpha_procedure_type = PT_STACK;
7200 else if (get_frame_size() != 0)
7201 alpha_procedure_type = PT_REGISTER;
7202 else
7203 alpha_procedure_type = PT_NULL;
7205 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7206 made the final decision on stack procedure vs register procedure. */
7207 if (alpha_procedure_type == PT_STACK)
7208 sa_size -= 2;
7210 /* Decide whether to refer to objects off our PV via FP or PV.
7211 If we need FP for something else or if we receive a nonlocal
7212 goto (which expects PV to contain the value), we must use PV.
7213 Otherwise, start by assuming we can use FP. */
7215 vms_base_regno
7216 = (frame_pointer_needed
7217 || cfun->has_nonlocal_label
7218 || alpha_procedure_type == PT_STACK
7219 || crtl->outgoing_args_size)
7220 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7222 /* If we want to copy PV into FP, we need to find some register
7223 in which to save FP. */
7225 vms_save_fp_regno = -1;
7226 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7227 for (i = 0; i < 32; i++)
7228 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7229 vms_save_fp_regno = i;
7231 /* A VMS condition handler requires a stack procedure in our
7232 implementation. (not required by the calling standard). */
7233 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7234 || cfun->machine->uses_condition_handler)
7235 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7236 else if (alpha_procedure_type == PT_NULL)
7237 vms_base_regno = REG_PV;
7239 /* Stack unwinding should be done via FP unless we use it for PV. */
7240 vms_unwind_regno = (vms_base_regno == REG_PV
7241 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7243 /* If this is a stack procedure, allow space for saving FP, RA and
7244 a condition handler slot if needed. */
7245 if (alpha_procedure_type == PT_STACK)
7246 sa_size += 2 + cfun->machine->uses_condition_handler;
7248 else
7250 /* Our size must be even (multiple of 16 bytes). */
7251 if (sa_size & 1)
7252 sa_size++;
7255 return sa_size * 8;
7258 /* Define the offset between two registers, one to be eliminated,
7259 and the other its replacement, at the start of a routine. */
7261 HOST_WIDE_INT
7262 alpha_initial_elimination_offset (unsigned int from,
7263 unsigned int to ATTRIBUTE_UNUSED)
7265 HOST_WIDE_INT ret;
7267 ret = alpha_sa_size ();
7268 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7270 switch (from)
7272 case FRAME_POINTER_REGNUM:
7273 break;
7275 case ARG_POINTER_REGNUM:
7276 ret += (ALPHA_ROUND (get_frame_size ()
7277 + crtl->args.pretend_args_size)
7278 - crtl->args.pretend_args_size);
7279 break;
7281 default:
7282 gcc_unreachable ();
7285 return ret;
7288 #if TARGET_ABI_OPEN_VMS
7290 /* Worker function for TARGET_CAN_ELIMINATE. */
7292 static bool
7293 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7295 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7296 alpha_sa_size ();
7298 switch (alpha_procedure_type)
7300 case PT_NULL:
7301 /* NULL procedures have no frame of their own and we only
7302 know how to resolve from the current stack pointer. */
7303 return to == STACK_POINTER_REGNUM;
7305 case PT_REGISTER:
7306 case PT_STACK:
7307 /* We always eliminate except to the stack pointer if there is no
7308 usable frame pointer at hand. */
7309 return (to != STACK_POINTER_REGNUM
7310 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7313 gcc_unreachable ();
7316 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7317 designates the same location as FROM. */
7319 HOST_WIDE_INT
7320 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7322 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7323 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7324 on the proper computations and will need the register save area size
7325 in most cases. */
7327 HOST_WIDE_INT sa_size = alpha_sa_size ();
7329 /* PT_NULL procedures have no frame of their own and we only allow
7330 elimination to the stack pointer. This is the argument pointer and we
7331 resolve the soft frame pointer to that as well. */
7333 if (alpha_procedure_type == PT_NULL)
7334 return 0;
7336 /* For a PT_STACK procedure the frame layout looks as follows
7338 -----> decreasing addresses
7340 < size rounded up to 16 | likewise >
7341 --------------#------------------------------+++--------------+++-------#
7342 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7343 --------------#---------------------------------------------------------#
7344 ^ ^ ^ ^
7345 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7348 PT_REGISTER procedures are similar in that they may have a frame of their
7349 own. They have no regs-sa/pv/outgoing-args area.
7351 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7352 to STACK_PTR if need be. */
7355 HOST_WIDE_INT offset;
7356 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7358 switch (from)
7360 case FRAME_POINTER_REGNUM:
7361 offset = ALPHA_ROUND (sa_size + pv_save_size);
7362 break;
7363 case ARG_POINTER_REGNUM:
7364 offset = (ALPHA_ROUND (sa_size + pv_save_size
7365 + get_frame_size ()
7366 + crtl->args.pretend_args_size)
7367 - crtl->args.pretend_args_size);
7368 break;
7369 default:
7370 gcc_unreachable ();
7373 if (to == STACK_POINTER_REGNUM)
7374 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7376 return offset;
7380 #define COMMON_OBJECT "common_object"
7382 static tree
7383 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7384 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7385 bool *no_add_attrs ATTRIBUTE_UNUSED)
7387 tree decl = *node;
7388 gcc_assert (DECL_P (decl));
7390 DECL_COMMON (decl) = 1;
7391 return NULL_TREE;
7394 static const struct attribute_spec vms_attribute_table[] =
7396 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7397 affects_type_identity } */
7398 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7399 { NULL, 0, 0, false, false, false, NULL, false }
7402 void
7403 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7404 unsigned HOST_WIDE_INT size,
7405 unsigned int align)
7407 tree attr = DECL_ATTRIBUTES (decl);
7408 fprintf (file, "%s", COMMON_ASM_OP);
7409 assemble_name (file, name);
7410 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7411 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7412 fprintf (file, ",%u", align / BITS_PER_UNIT);
7413 if (attr)
7415 attr = lookup_attribute (COMMON_OBJECT, attr);
7416 if (attr)
7417 fprintf (file, ",%s",
7418 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7420 fputc ('\n', file);
7423 #undef COMMON_OBJECT
7425 #endif
7427 static int
7428 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7430 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7434 alpha_find_lo_sum_using_gp (rtx insn)
7436 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7439 static int
7440 alpha_does_function_need_gp (void)
7442 rtx insn;
7444 /* The GP being variable is an OSF abi thing. */
7445 if (! TARGET_ABI_OSF)
7446 return 0;
7448 /* We need the gp to load the address of __mcount. */
7449 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7450 return 1;
7452 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7453 if (cfun->is_thunk)
7454 return 1;
7456 /* The nonlocal receiver pattern assumes that the gp is valid for
7457 the nested function. Reasonable because it's almost always set
7458 correctly already. For the cases where that's wrong, make sure
7459 the nested function loads its gp on entry. */
7460 if (crtl->has_nonlocal_goto)
7461 return 1;
7463 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7464 Even if we are a static function, we still need to do this in case
7465 our address is taken and passed to something like qsort. */
7467 push_topmost_sequence ();
7468 insn = get_insns ();
7469 pop_topmost_sequence ();
7471 for (; insn; insn = NEXT_INSN (insn))
7472 if (NONDEBUG_INSN_P (insn)
7473 && ! JUMP_TABLE_DATA_P (insn)
7474 && GET_CODE (PATTERN (insn)) != USE
7475 && GET_CODE (PATTERN (insn)) != CLOBBER
7476 && get_attr_usegp (insn))
7477 return 1;
7479 return 0;
7483 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7484 sequences. */
7486 static rtx
7487 set_frame_related_p (void)
7489 rtx seq = get_insns ();
7490 rtx insn;
7492 end_sequence ();
7494 if (!seq)
7495 return NULL_RTX;
7497 if (INSN_P (seq))
7499 insn = seq;
7500 while (insn != NULL_RTX)
7502 RTX_FRAME_RELATED_P (insn) = 1;
7503 insn = NEXT_INSN (insn);
7505 seq = emit_insn (seq);
7507 else
7509 seq = emit_insn (seq);
7510 RTX_FRAME_RELATED_P (seq) = 1;
7512 return seq;
7515 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7517 /* Generates a store with the proper unwind info attached. VALUE is
7518 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7519 contains SP+FRAME_BIAS, and that is the unwind info that should be
7520 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7521 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7523 static void
7524 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7525 HOST_WIDE_INT base_ofs, rtx frame_reg)
7527 rtx addr, mem, insn;
7529 addr = plus_constant (Pmode, base_reg, base_ofs);
7530 mem = gen_frame_mem (DImode, addr);
7532 insn = emit_move_insn (mem, value);
7533 RTX_FRAME_RELATED_P (insn) = 1;
7535 if (frame_bias || value != frame_reg)
7537 if (frame_bias)
7539 addr = plus_constant (Pmode, stack_pointer_rtx,
7540 frame_bias + base_ofs);
7541 mem = gen_rtx_MEM (DImode, addr);
7544 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7545 gen_rtx_SET (VOIDmode, mem, frame_reg));
7549 static void
7550 emit_frame_store (unsigned int regno, rtx base_reg,
7551 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7553 rtx reg = gen_rtx_REG (DImode, regno);
7554 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7557 /* Compute the frame size. SIZE is the size of the "naked" frame
7558 and SA_SIZE is the size of the register save area. */
7560 static HOST_WIDE_INT
7561 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7563 if (TARGET_ABI_OPEN_VMS)
7564 return ALPHA_ROUND (sa_size
7565 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7566 + size
7567 + crtl->args.pretend_args_size);
7568 else
7569 return ALPHA_ROUND (crtl->outgoing_args_size)
7570 + sa_size
7571 + ALPHA_ROUND (size
7572 + crtl->args.pretend_args_size);
7575 /* Write function prologue. */
7577 /* On vms we have two kinds of functions:
7579 - stack frame (PROC_STACK)
7580 these are 'normal' functions with local vars and which are
7581 calling other functions
7582 - register frame (PROC_REGISTER)
7583 keeps all data in registers, needs no stack
7585 We must pass this to the assembler so it can generate the
7586 proper pdsc (procedure descriptor)
7587 This is done with the '.pdesc' command.
7589 On not-vms, we don't really differentiate between the two, as we can
7590 simply allocate stack without saving registers. */
7592 void
7593 alpha_expand_prologue (void)
7595 /* Registers to save. */
7596 unsigned long imask = 0;
7597 unsigned long fmask = 0;
7598 /* Stack space needed for pushing registers clobbered by us. */
7599 HOST_WIDE_INT sa_size, sa_bias;
7600 /* Complete stack size needed. */
7601 HOST_WIDE_INT frame_size;
7602 /* Probed stack size; it additionally includes the size of
7603 the "reserve region" if any. */
7604 HOST_WIDE_INT probed_size;
7605 /* Offset from base reg to register save area. */
7606 HOST_WIDE_INT reg_offset;
7607 rtx sa_reg;
7608 int i;
7610 sa_size = alpha_sa_size ();
7611 frame_size = compute_frame_size (get_frame_size (), sa_size);
7613 if (flag_stack_usage_info)
7614 current_function_static_stack_size = frame_size;
7616 if (TARGET_ABI_OPEN_VMS)
7617 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7618 else
7619 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7621 alpha_sa_mask (&imask, &fmask);
7623 /* Emit an insn to reload GP, if needed. */
7624 if (TARGET_ABI_OSF)
7626 alpha_function_needs_gp = alpha_does_function_need_gp ();
7627 if (alpha_function_needs_gp)
7628 emit_insn (gen_prologue_ldgp ());
7631 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7632 the call to mcount ourselves, rather than having the linker do it
7633 magically in response to -pg. Since _mcount has special linkage,
7634 don't represent the call as a call. */
7635 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7636 emit_insn (gen_prologue_mcount ());
7638 /* Adjust the stack by the frame size. If the frame size is > 4096
7639 bytes, we need to be sure we probe somewhere in the first and last
7640 4096 bytes (we can probably get away without the latter test) and
7641 every 8192 bytes in between. If the frame size is > 32768, we
7642 do this in a loop. Otherwise, we generate the explicit probe
7643 instructions.
7645 Note that we are only allowed to adjust sp once in the prologue. */
7647 probed_size = frame_size;
7648 if (flag_stack_check)
7649 probed_size += STACK_CHECK_PROTECT;
7651 if (probed_size <= 32768)
7653 if (probed_size > 4096)
7655 int probed;
7657 for (probed = 4096; probed < probed_size; probed += 8192)
7658 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7660 /* We only have to do this probe if we aren't saving registers or
7661 if we are probing beyond the frame because of -fstack-check. */
7662 if ((sa_size == 0 && probed_size > probed - 4096)
7663 || flag_stack_check)
7664 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7667 if (frame_size != 0)
7668 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7669 GEN_INT (-frame_size))));
7671 else
7673 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7674 number of 8192 byte blocks to probe. We then probe each block
7675 in the loop and then set SP to the proper location. If the
7676 amount remaining is > 4096, we have to do one more probe if we
7677 are not saving any registers or if we are probing beyond the
7678 frame because of -fstack-check. */
7680 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7681 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7682 rtx ptr = gen_rtx_REG (DImode, 22);
7683 rtx count = gen_rtx_REG (DImode, 23);
7684 rtx seq;
7686 emit_move_insn (count, GEN_INT (blocks));
7687 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7689 /* Because of the difficulty in emitting a new basic block this
7690 late in the compilation, generate the loop as a single insn. */
7691 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7693 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7695 rtx last = gen_rtx_MEM (DImode,
7696 plus_constant (Pmode, ptr, -leftover));
7697 MEM_VOLATILE_P (last) = 1;
7698 emit_move_insn (last, const0_rtx);
7701 if (flag_stack_check)
7703 /* If -fstack-check is specified we have to load the entire
7704 constant into a register and subtract from the sp in one go,
7705 because the probed stack size is not equal to the frame size. */
7706 HOST_WIDE_INT lo, hi;
7707 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7708 hi = frame_size - lo;
7710 emit_move_insn (ptr, GEN_INT (hi));
7711 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7712 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7713 ptr));
7715 else
7717 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7718 GEN_INT (-leftover)));
7721 /* This alternative is special, because the DWARF code cannot
7722 possibly intuit through the loop above. So we invent this
7723 note it looks at instead. */
7724 RTX_FRAME_RELATED_P (seq) = 1;
7725 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7726 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7727 plus_constant (Pmode, stack_pointer_rtx,
7728 -frame_size)));
7731 /* Cope with very large offsets to the register save area. */
7732 sa_bias = 0;
7733 sa_reg = stack_pointer_rtx;
7734 if (reg_offset + sa_size > 0x8000)
7736 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7737 rtx sa_bias_rtx;
7739 if (low + sa_size <= 0x8000)
7740 sa_bias = reg_offset - low, reg_offset = low;
7741 else
7742 sa_bias = reg_offset, reg_offset = 0;
7744 sa_reg = gen_rtx_REG (DImode, 24);
7745 sa_bias_rtx = GEN_INT (sa_bias);
7747 if (add_operand (sa_bias_rtx, DImode))
7748 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7749 else
7751 emit_move_insn (sa_reg, sa_bias_rtx);
7752 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7756 /* Save regs in stack order. Beginning with VMS PV. */
7757 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7758 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7760 /* Save register RA next. */
7761 if (imask & (1UL << REG_RA))
7763 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7764 imask &= ~(1UL << REG_RA);
7765 reg_offset += 8;
7768 /* Now save any other registers required to be saved. */
7769 for (i = 0; i < 31; i++)
7770 if (imask & (1UL << i))
7772 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7773 reg_offset += 8;
7776 for (i = 0; i < 31; i++)
7777 if (fmask & (1UL << i))
7779 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7780 reg_offset += 8;
7783 if (TARGET_ABI_OPEN_VMS)
7785 /* Register frame procedures save the fp. */
7786 if (alpha_procedure_type == PT_REGISTER)
7788 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7789 hard_frame_pointer_rtx);
7790 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7791 RTX_FRAME_RELATED_P (insn) = 1;
7794 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7795 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7796 gen_rtx_REG (DImode, REG_PV)));
7798 if (alpha_procedure_type != PT_NULL
7799 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7800 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7802 /* If we have to allocate space for outgoing args, do it now. */
7803 if (crtl->outgoing_args_size != 0)
7805 rtx seq
7806 = emit_move_insn (stack_pointer_rtx,
7807 plus_constant
7808 (Pmode, hard_frame_pointer_rtx,
7809 - (ALPHA_ROUND
7810 (crtl->outgoing_args_size))));
7812 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7813 if ! frame_pointer_needed. Setting the bit will change the CFA
7814 computation rule to use sp again, which would be wrong if we had
7815 frame_pointer_needed, as this means sp might move unpredictably
7816 later on.
7818 Also, note that
7819 frame_pointer_needed
7820 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7822 crtl->outgoing_args_size != 0
7823 => alpha_procedure_type != PT_NULL,
7825 so when we are not setting the bit here, we are guaranteed to
7826 have emitted an FRP frame pointer update just before. */
7827 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7830 else
7832 /* If we need a frame pointer, set it from the stack pointer. */
7833 if (frame_pointer_needed)
7835 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7836 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7837 else
7838 /* This must always be the last instruction in the
7839 prologue, thus we emit a special move + clobber. */
7840 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7841 stack_pointer_rtx, sa_reg)));
7845 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7846 the prologue, for exception handling reasons, we cannot do this for
7847 any insn that might fault. We could prevent this for mems with a
7848 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7849 have to prevent all such scheduling with a blockage.
7851 Linux, on the other hand, never bothered to implement OSF/1's
7852 exception handling, and so doesn't care about such things. Anyone
7853 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7855 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7856 emit_insn (gen_blockage ());
7859 /* Count the number of .file directives, so that .loc is up to date. */
7860 int num_source_filenames = 0;
7862 /* Output the textual info surrounding the prologue. */
7864 void
7865 alpha_start_function (FILE *file, const char *fnname,
7866 tree decl ATTRIBUTE_UNUSED)
7868 unsigned long imask = 0;
7869 unsigned long fmask = 0;
7870 /* Stack space needed for pushing registers clobbered by us. */
7871 HOST_WIDE_INT sa_size;
7872 /* Complete stack size needed. */
7873 unsigned HOST_WIDE_INT frame_size;
7874 /* The maximum debuggable frame size. */
7875 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
7876 /* Offset from base reg to register save area. */
7877 HOST_WIDE_INT reg_offset;
7878 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7879 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7880 int i;
7882 #if TARGET_ABI_OPEN_VMS
7883 vms_start_function (fnname);
7884 #endif
7886 alpha_fnname = fnname;
7887 sa_size = alpha_sa_size ();
7888 frame_size = compute_frame_size (get_frame_size (), sa_size);
7890 if (TARGET_ABI_OPEN_VMS)
7891 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7892 else
7893 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7895 alpha_sa_mask (&imask, &fmask);
7897 /* Issue function start and label. */
7898 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7900 fputs ("\t.ent ", file);
7901 assemble_name (file, fnname);
7902 putc ('\n', file);
7904 /* If the function needs GP, we'll write the "..ng" label there.
7905 Otherwise, do it here. */
7906 if (TARGET_ABI_OSF
7907 && ! alpha_function_needs_gp
7908 && ! cfun->is_thunk)
7910 putc ('$', file);
7911 assemble_name (file, fnname);
7912 fputs ("..ng:\n", file);
7915 /* Nested functions on VMS that are potentially called via trampoline
7916 get a special transfer entry point that loads the called functions
7917 procedure descriptor and static chain. */
7918 if (TARGET_ABI_OPEN_VMS
7919 && !TREE_PUBLIC (decl)
7920 && DECL_CONTEXT (decl)
7921 && !TYPE_P (DECL_CONTEXT (decl))
7922 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
7924 strcpy (tramp_label, fnname);
7925 strcat (tramp_label, "..tr");
7926 ASM_OUTPUT_LABEL (file, tramp_label);
7927 fprintf (file, "\tldq $1,24($27)\n");
7928 fprintf (file, "\tldq $27,16($27)\n");
7931 strcpy (entry_label, fnname);
7932 if (TARGET_ABI_OPEN_VMS)
7933 strcat (entry_label, "..en");
7935 ASM_OUTPUT_LABEL (file, entry_label);
7936 inside_function = TRUE;
7938 if (TARGET_ABI_OPEN_VMS)
7939 fprintf (file, "\t.base $%d\n", vms_base_regno);
7941 if (TARGET_ABI_OSF
7942 && TARGET_IEEE_CONFORMANT
7943 && !flag_inhibit_size_directive)
7945 /* Set flags in procedure descriptor to request IEEE-conformant
7946 math-library routines. The value we set it to is PDSC_EXC_IEEE
7947 (/usr/include/pdsc.h). */
7948 fputs ("\t.eflag 48\n", file);
7951 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7952 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7953 alpha_arg_offset = -frame_size + 48;
7955 /* Describe our frame. If the frame size is larger than an integer,
7956 print it as zero to avoid an assembler error. We won't be
7957 properly describing such a frame, but that's the best we can do. */
7958 if (TARGET_ABI_OPEN_VMS)
7959 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7960 HOST_WIDE_INT_PRINT_DEC "\n",
7961 vms_unwind_regno,
7962 frame_size >= (1UL << 31) ? 0 : frame_size,
7963 reg_offset);
7964 else if (!flag_inhibit_size_directive)
7965 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7966 (frame_pointer_needed
7967 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7968 frame_size >= max_frame_size ? 0 : frame_size,
7969 crtl->args.pretend_args_size);
7971 /* Describe which registers were spilled. */
7972 if (TARGET_ABI_OPEN_VMS)
7974 if (imask)
7975 /* ??? Does VMS care if mask contains ra? The old code didn't
7976 set it, so I don't here. */
7977 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7978 if (fmask)
7979 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7980 if (alpha_procedure_type == PT_REGISTER)
7981 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7983 else if (!flag_inhibit_size_directive)
7985 if (imask)
7987 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7988 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7990 for (i = 0; i < 32; ++i)
7991 if (imask & (1UL << i))
7992 reg_offset += 8;
7995 if (fmask)
7996 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7997 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8000 #if TARGET_ABI_OPEN_VMS
8001 /* If a user condition handler has been installed at some point, emit
8002 the procedure descriptor bits to point the Condition Handling Facility
8003 at the indirection wrapper, and state the fp offset at which the user
8004 handler may be found. */
8005 if (cfun->machine->uses_condition_handler)
8007 fprintf (file, "\t.handler __gcc_shell_handler\n");
8008 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8011 #ifdef TARGET_VMS_CRASH_DEBUG
8012 /* Support of minimal traceback info. */
8013 switch_to_section (readonly_data_section);
8014 fprintf (file, "\t.align 3\n");
8015 assemble_name (file, fnname); fputs ("..na:\n", file);
8016 fputs ("\t.ascii \"", file);
8017 assemble_name (file, fnname);
8018 fputs ("\\0\"\n", file);
8019 switch_to_section (text_section);
8020 #endif
8021 #endif /* TARGET_ABI_OPEN_VMS */
8024 /* Emit the .prologue note at the scheduled end of the prologue. */
8026 static void
8027 alpha_output_function_end_prologue (FILE *file)
8029 if (TARGET_ABI_OPEN_VMS)
8030 fputs ("\t.prologue\n", file);
8031 else if (!flag_inhibit_size_directive)
8032 fprintf (file, "\t.prologue %d\n",
8033 alpha_function_needs_gp || cfun->is_thunk);
8036 /* Write function epilogue. */
8038 void
8039 alpha_expand_epilogue (void)
8041 /* Registers to save. */
8042 unsigned long imask = 0;
8043 unsigned long fmask = 0;
8044 /* Stack space needed for pushing registers clobbered by us. */
8045 HOST_WIDE_INT sa_size;
8046 /* Complete stack size needed. */
8047 HOST_WIDE_INT frame_size;
8048 /* Offset from base reg to register save area. */
8049 HOST_WIDE_INT reg_offset;
8050 int fp_is_frame_pointer, fp_offset;
8051 rtx sa_reg, sa_reg_exp = NULL;
8052 rtx sp_adj1, sp_adj2, mem, reg, insn;
8053 rtx eh_ofs;
8054 rtx cfa_restores = NULL_RTX;
8055 int i;
8057 sa_size = alpha_sa_size ();
8058 frame_size = compute_frame_size (get_frame_size (), sa_size);
8060 if (TARGET_ABI_OPEN_VMS)
8062 if (alpha_procedure_type == PT_STACK)
8063 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8064 else
8065 reg_offset = 0;
8067 else
8068 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8070 alpha_sa_mask (&imask, &fmask);
8072 fp_is_frame_pointer
8073 = (TARGET_ABI_OPEN_VMS
8074 ? alpha_procedure_type == PT_STACK
8075 : frame_pointer_needed);
8076 fp_offset = 0;
8077 sa_reg = stack_pointer_rtx;
8079 if (crtl->calls_eh_return)
8080 eh_ofs = EH_RETURN_STACKADJ_RTX;
8081 else
8082 eh_ofs = NULL_RTX;
8084 if (sa_size)
8086 /* If we have a frame pointer, restore SP from it. */
8087 if (TARGET_ABI_OPEN_VMS
8088 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8089 : frame_pointer_needed)
8090 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8092 /* Cope with very large offsets to the register save area. */
8093 if (reg_offset + sa_size > 0x8000)
8095 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8096 HOST_WIDE_INT bias;
8098 if (low + sa_size <= 0x8000)
8099 bias = reg_offset - low, reg_offset = low;
8100 else
8101 bias = reg_offset, reg_offset = 0;
8103 sa_reg = gen_rtx_REG (DImode, 22);
8104 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8106 emit_move_insn (sa_reg, sa_reg_exp);
8109 /* Restore registers in order, excepting a true frame pointer. */
8111 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8112 reg = gen_rtx_REG (DImode, REG_RA);
8113 emit_move_insn (reg, mem);
8114 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8116 reg_offset += 8;
8117 imask &= ~(1UL << REG_RA);
8119 for (i = 0; i < 31; ++i)
8120 if (imask & (1UL << i))
8122 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8123 fp_offset = reg_offset;
8124 else
8126 mem = gen_frame_mem (DImode,
8127 plus_constant (Pmode, sa_reg,
8128 reg_offset));
8129 reg = gen_rtx_REG (DImode, i);
8130 emit_move_insn (reg, mem);
8131 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8132 cfa_restores);
8134 reg_offset += 8;
8137 for (i = 0; i < 31; ++i)
8138 if (fmask & (1UL << i))
8140 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8141 reg_offset));
8142 reg = gen_rtx_REG (DFmode, i+32);
8143 emit_move_insn (reg, mem);
8144 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8145 reg_offset += 8;
8149 if (frame_size || eh_ofs)
8151 sp_adj1 = stack_pointer_rtx;
8153 if (eh_ofs)
8155 sp_adj1 = gen_rtx_REG (DImode, 23);
8156 emit_move_insn (sp_adj1,
8157 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8160 /* If the stack size is large, begin computation into a temporary
8161 register so as not to interfere with a potential fp restore,
8162 which must be consecutive with an SP restore. */
8163 if (frame_size < 32768 && !cfun->calls_alloca)
8164 sp_adj2 = GEN_INT (frame_size);
8165 else if (frame_size < 0x40007fffL)
8167 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8169 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8170 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8171 sp_adj1 = sa_reg;
8172 else
8174 sp_adj1 = gen_rtx_REG (DImode, 23);
8175 emit_move_insn (sp_adj1, sp_adj2);
8177 sp_adj2 = GEN_INT (low);
8179 else
8181 rtx tmp = gen_rtx_REG (DImode, 23);
8182 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8183 if (!sp_adj2)
8185 /* We can't drop new things to memory this late, afaik,
8186 so build it up by pieces. */
8187 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8188 -(frame_size < 0));
8189 gcc_assert (sp_adj2);
8193 /* From now on, things must be in order. So emit blockages. */
8195 /* Restore the frame pointer. */
8196 if (fp_is_frame_pointer)
8198 emit_insn (gen_blockage ());
8199 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8200 fp_offset));
8201 emit_move_insn (hard_frame_pointer_rtx, mem);
8202 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8203 hard_frame_pointer_rtx, cfa_restores);
8205 else if (TARGET_ABI_OPEN_VMS)
8207 emit_insn (gen_blockage ());
8208 emit_move_insn (hard_frame_pointer_rtx,
8209 gen_rtx_REG (DImode, vms_save_fp_regno));
8210 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8211 hard_frame_pointer_rtx, cfa_restores);
8214 /* Restore the stack pointer. */
8215 emit_insn (gen_blockage ());
8216 if (sp_adj2 == const0_rtx)
8217 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8218 else
8219 insn = emit_move_insn (stack_pointer_rtx,
8220 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8221 REG_NOTES (insn) = cfa_restores;
8222 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8223 RTX_FRAME_RELATED_P (insn) = 1;
8225 else
8227 gcc_assert (cfa_restores == NULL);
8229 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8231 emit_insn (gen_blockage ());
8232 insn = emit_move_insn (hard_frame_pointer_rtx,
8233 gen_rtx_REG (DImode, vms_save_fp_regno));
8234 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8235 RTX_FRAME_RELATED_P (insn) = 1;
8240 /* Output the rest of the textual info surrounding the epilogue. */
8242 void
8243 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8245 rtx insn;
8247 /* We output a nop after noreturn calls at the very end of the function to
8248 ensure that the return address always remains in the caller's code range,
8249 as not doing so might confuse unwinding engines. */
8250 insn = get_last_insn ();
8251 if (!INSN_P (insn))
8252 insn = prev_active_insn (insn);
8253 if (insn && CALL_P (insn))
8254 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8256 #if TARGET_ABI_OPEN_VMS
8257 /* Write the linkage entries. */
8258 alpha_write_linkage (file, fnname);
8259 #endif
8261 /* End the function. */
8262 if (TARGET_ABI_OPEN_VMS
8263 || !flag_inhibit_size_directive)
8265 fputs ("\t.end ", file);
8266 assemble_name (file, fnname);
8267 putc ('\n', file);
8269 inside_function = FALSE;
8272 #if TARGET_ABI_OSF
8273 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8275 In order to avoid the hordes of differences between generated code
8276 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8277 lots of code loading up large constants, generate rtl and emit it
8278 instead of going straight to text.
8280 Not sure why this idea hasn't been explored before... */
8282 static void
8283 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8284 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8285 tree function)
8287 HOST_WIDE_INT hi, lo;
8288 rtx this_rtx, insn, funexp;
8290 /* We always require a valid GP. */
8291 emit_insn (gen_prologue_ldgp ());
8292 emit_note (NOTE_INSN_PROLOGUE_END);
8294 /* Find the "this" pointer. If the function returns a structure,
8295 the structure return pointer is in $16. */
8296 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8297 this_rtx = gen_rtx_REG (Pmode, 17);
8298 else
8299 this_rtx = gen_rtx_REG (Pmode, 16);
8301 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8302 entire constant for the add. */
8303 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8304 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8305 if (hi + lo == delta)
8307 if (hi)
8308 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8309 if (lo)
8310 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8312 else
8314 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8315 delta, -(delta < 0));
8316 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8319 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8320 if (vcall_offset)
8322 rtx tmp, tmp2;
8324 tmp = gen_rtx_REG (Pmode, 0);
8325 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8327 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8328 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8329 if (hi + lo == vcall_offset)
8331 if (hi)
8332 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8334 else
8336 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8337 vcall_offset, -(vcall_offset < 0));
8338 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8339 lo = 0;
8341 if (lo)
8342 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8343 else
8344 tmp2 = tmp;
8345 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8347 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8350 /* Generate a tail call to the target function. */
8351 if (! TREE_USED (function))
8353 assemble_external (function);
8354 TREE_USED (function) = 1;
8356 funexp = XEXP (DECL_RTL (function), 0);
8357 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8358 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8359 SIBLING_CALL_P (insn) = 1;
8361 /* Run just enough of rest_of_compilation to get the insns emitted.
8362 There's not really enough bulk here to make other passes such as
8363 instruction scheduling worth while. Note that use_thunk calls
8364 assemble_start_function and assemble_end_function. */
8365 insn = get_insns ();
8366 shorten_branches (insn);
8367 final_start_function (insn, file, 1);
8368 final (insn, file, 1);
8369 final_end_function ();
8371 #endif /* TARGET_ABI_OSF */
8373 /* Debugging support. */
8375 #include "gstab.h"
8377 /* Name of the file containing the current function. */
8379 static const char *current_function_file = "";
8381 /* Offsets to alpha virtual arg/local debugging pointers. */
8383 long alpha_arg_offset;
8384 long alpha_auto_offset;
8386 /* Emit a new filename to a stream. */
8388 void
8389 alpha_output_filename (FILE *stream, const char *name)
8391 static int first_time = TRUE;
8393 if (first_time)
8395 first_time = FALSE;
8396 ++num_source_filenames;
8397 current_function_file = name;
8398 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8399 output_quoted_string (stream, name);
8400 fprintf (stream, "\n");
8403 else if (name != current_function_file
8404 && strcmp (name, current_function_file) != 0)
8406 ++num_source_filenames;
8407 current_function_file = name;
8408 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8410 output_quoted_string (stream, name);
8411 fprintf (stream, "\n");
8415 /* Structure to show the current status of registers and memory. */
8417 struct shadow_summary
8419 struct {
8420 unsigned int i : 31; /* Mask of int regs */
8421 unsigned int fp : 31; /* Mask of fp regs */
8422 unsigned int mem : 1; /* mem == imem | fpmem */
8423 } used, defd;
8426 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8427 to the summary structure. SET is nonzero if the insn is setting the
8428 object, otherwise zero. */
8430 static void
8431 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8433 const char *format_ptr;
8434 int i, j;
8436 if (x == 0)
8437 return;
8439 switch (GET_CODE (x))
8441 /* ??? Note that this case would be incorrect if the Alpha had a
8442 ZERO_EXTRACT in SET_DEST. */
8443 case SET:
8444 summarize_insn (SET_SRC (x), sum, 0);
8445 summarize_insn (SET_DEST (x), sum, 1);
8446 break;
8448 case CLOBBER:
8449 summarize_insn (XEXP (x, 0), sum, 1);
8450 break;
8452 case USE:
8453 summarize_insn (XEXP (x, 0), sum, 0);
8454 break;
8456 case ASM_OPERANDS:
8457 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8458 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8459 break;
8461 case PARALLEL:
8462 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8463 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8464 break;
8466 case SUBREG:
8467 summarize_insn (SUBREG_REG (x), sum, 0);
8468 break;
8470 case REG:
8472 int regno = REGNO (x);
8473 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8475 if (regno == 31 || regno == 63)
8476 break;
8478 if (set)
8480 if (regno < 32)
8481 sum->defd.i |= mask;
8482 else
8483 sum->defd.fp |= mask;
8485 else
8487 if (regno < 32)
8488 sum->used.i |= mask;
8489 else
8490 sum->used.fp |= mask;
8493 break;
8495 case MEM:
8496 if (set)
8497 sum->defd.mem = 1;
8498 else
8499 sum->used.mem = 1;
8501 /* Find the regs used in memory address computation: */
8502 summarize_insn (XEXP (x, 0), sum, 0);
8503 break;
8505 case CONST_INT: case CONST_DOUBLE:
8506 case SYMBOL_REF: case LABEL_REF: case CONST:
8507 case SCRATCH: case ASM_INPUT:
8508 break;
8510 /* Handle common unary and binary ops for efficiency. */
8511 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8512 case MOD: case UDIV: case UMOD: case AND: case IOR:
8513 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8514 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8515 case NE: case EQ: case GE: case GT: case LE:
8516 case LT: case GEU: case GTU: case LEU: case LTU:
8517 summarize_insn (XEXP (x, 0), sum, 0);
8518 summarize_insn (XEXP (x, 1), sum, 0);
8519 break;
8521 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8522 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8523 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8524 case SQRT: case FFS:
8525 summarize_insn (XEXP (x, 0), sum, 0);
8526 break;
8528 default:
8529 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8530 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8531 switch (format_ptr[i])
8533 case 'e':
8534 summarize_insn (XEXP (x, i), sum, 0);
8535 break;
8537 case 'E':
8538 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8539 summarize_insn (XVECEXP (x, i, j), sum, 0);
8540 break;
8542 case 'i':
8543 break;
8545 default:
8546 gcc_unreachable ();
8551 /* Ensure a sufficient number of `trapb' insns are in the code when
8552 the user requests code with a trap precision of functions or
8553 instructions.
8555 In naive mode, when the user requests a trap-precision of
8556 "instruction", a trapb is needed after every instruction that may
8557 generate a trap. This ensures that the code is resumption safe but
8558 it is also slow.
8560 When optimizations are turned on, we delay issuing a trapb as long
8561 as possible. In this context, a trap shadow is the sequence of
8562 instructions that starts with a (potentially) trap generating
8563 instruction and extends to the next trapb or call_pal instruction
8564 (but GCC never generates call_pal by itself). We can delay (and
8565 therefore sometimes omit) a trapb subject to the following
8566 conditions:
8568 (a) On entry to the trap shadow, if any Alpha register or memory
8569 location contains a value that is used as an operand value by some
8570 instruction in the trap shadow (live on entry), then no instruction
8571 in the trap shadow may modify the register or memory location.
8573 (b) Within the trap shadow, the computation of the base register
8574 for a memory load or store instruction may not involve using the
8575 result of an instruction that might generate an UNPREDICTABLE
8576 result.
8578 (c) Within the trap shadow, no register may be used more than once
8579 as a destination register. (This is to make life easier for the
8580 trap-handler.)
8582 (d) The trap shadow may not include any branch instructions. */
8584 static void
8585 alpha_handle_trap_shadows (void)
8587 struct shadow_summary shadow;
8588 int trap_pending, exception_nesting;
8589 rtx i, n;
8591 trap_pending = 0;
8592 exception_nesting = 0;
8593 shadow.used.i = 0;
8594 shadow.used.fp = 0;
8595 shadow.used.mem = 0;
8596 shadow.defd = shadow.used;
8598 for (i = get_insns (); i ; i = NEXT_INSN (i))
8600 if (NOTE_P (i))
8602 switch (NOTE_KIND (i))
8604 case NOTE_INSN_EH_REGION_BEG:
8605 exception_nesting++;
8606 if (trap_pending)
8607 goto close_shadow;
8608 break;
8610 case NOTE_INSN_EH_REGION_END:
8611 exception_nesting--;
8612 if (trap_pending)
8613 goto close_shadow;
8614 break;
8616 case NOTE_INSN_EPILOGUE_BEG:
8617 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8618 goto close_shadow;
8619 break;
8622 else if (trap_pending)
8624 if (alpha_tp == ALPHA_TP_FUNC)
8626 if (JUMP_P (i)
8627 && GET_CODE (PATTERN (i)) == RETURN)
8628 goto close_shadow;
8630 else if (alpha_tp == ALPHA_TP_INSN)
8632 if (optimize > 0)
8634 struct shadow_summary sum;
8636 sum.used.i = 0;
8637 sum.used.fp = 0;
8638 sum.used.mem = 0;
8639 sum.defd = sum.used;
8641 switch (GET_CODE (i))
8643 case INSN:
8644 /* Annoyingly, get_attr_trap will die on these. */
8645 if (GET_CODE (PATTERN (i)) == USE
8646 || GET_CODE (PATTERN (i)) == CLOBBER)
8647 break;
8649 summarize_insn (PATTERN (i), &sum, 0);
8651 if ((sum.defd.i & shadow.defd.i)
8652 || (sum.defd.fp & shadow.defd.fp))
8654 /* (c) would be violated */
8655 goto close_shadow;
8658 /* Combine shadow with summary of current insn: */
8659 shadow.used.i |= sum.used.i;
8660 shadow.used.fp |= sum.used.fp;
8661 shadow.used.mem |= sum.used.mem;
8662 shadow.defd.i |= sum.defd.i;
8663 shadow.defd.fp |= sum.defd.fp;
8664 shadow.defd.mem |= sum.defd.mem;
8666 if ((sum.defd.i & shadow.used.i)
8667 || (sum.defd.fp & shadow.used.fp)
8668 || (sum.defd.mem & shadow.used.mem))
8670 /* (a) would be violated (also takes care of (b)) */
8671 gcc_assert (get_attr_trap (i) != TRAP_YES
8672 || (!(sum.defd.i & sum.used.i)
8673 && !(sum.defd.fp & sum.used.fp)));
8675 goto close_shadow;
8677 break;
8679 case JUMP_INSN:
8680 case CALL_INSN:
8681 case CODE_LABEL:
8682 goto close_shadow;
8684 default:
8685 gcc_unreachable ();
8688 else
8690 close_shadow:
8691 n = emit_insn_before (gen_trapb (), i);
8692 PUT_MODE (n, TImode);
8693 PUT_MODE (i, TImode);
8694 trap_pending = 0;
8695 shadow.used.i = 0;
8696 shadow.used.fp = 0;
8697 shadow.used.mem = 0;
8698 shadow.defd = shadow.used;
8703 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8704 && NONJUMP_INSN_P (i)
8705 && GET_CODE (PATTERN (i)) != USE
8706 && GET_CODE (PATTERN (i)) != CLOBBER
8707 && get_attr_trap (i) == TRAP_YES)
8709 if (optimize && !trap_pending)
8710 summarize_insn (PATTERN (i), &shadow, 0);
8711 trap_pending = 1;
8716 /* Alpha can only issue instruction groups simultaneously if they are
8717 suitably aligned. This is very processor-specific. */
8718 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8719 that are marked "fake". These instructions do not exist on that target,
8720 but it is possible to see these insns with deranged combinations of
8721 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8722 choose a result at random. */
8724 enum alphaev4_pipe {
8725 EV4_STOP = 0,
8726 EV4_IB0 = 1,
8727 EV4_IB1 = 2,
8728 EV4_IBX = 4
8731 enum alphaev5_pipe {
8732 EV5_STOP = 0,
8733 EV5_NONE = 1,
8734 EV5_E01 = 2,
8735 EV5_E0 = 4,
8736 EV5_E1 = 8,
8737 EV5_FAM = 16,
8738 EV5_FA = 32,
8739 EV5_FM = 64
8742 static enum alphaev4_pipe
8743 alphaev4_insn_pipe (rtx insn)
8745 if (recog_memoized (insn) < 0)
8746 return EV4_STOP;
8747 if (get_attr_length (insn) != 4)
8748 return EV4_STOP;
8750 switch (get_attr_type (insn))
8752 case TYPE_ILD:
8753 case TYPE_LDSYM:
8754 case TYPE_FLD:
8755 case TYPE_LD_L:
8756 return EV4_IBX;
8758 case TYPE_IADD:
8759 case TYPE_ILOG:
8760 case TYPE_ICMOV:
8761 case TYPE_ICMP:
8762 case TYPE_FST:
8763 case TYPE_SHIFT:
8764 case TYPE_IMUL:
8765 case TYPE_FBR:
8766 case TYPE_MVI: /* fake */
8767 return EV4_IB0;
8769 case TYPE_IST:
8770 case TYPE_MISC:
8771 case TYPE_IBR:
8772 case TYPE_JSR:
8773 case TYPE_CALLPAL:
8774 case TYPE_FCPYS:
8775 case TYPE_FCMOV:
8776 case TYPE_FADD:
8777 case TYPE_FDIV:
8778 case TYPE_FMUL:
8779 case TYPE_ST_C:
8780 case TYPE_MB:
8781 case TYPE_FSQRT: /* fake */
8782 case TYPE_FTOI: /* fake */
8783 case TYPE_ITOF: /* fake */
8784 return EV4_IB1;
8786 default:
8787 gcc_unreachable ();
8791 static enum alphaev5_pipe
8792 alphaev5_insn_pipe (rtx insn)
8794 if (recog_memoized (insn) < 0)
8795 return EV5_STOP;
8796 if (get_attr_length (insn) != 4)
8797 return EV5_STOP;
8799 switch (get_attr_type (insn))
8801 case TYPE_ILD:
8802 case TYPE_FLD:
8803 case TYPE_LDSYM:
8804 case TYPE_IADD:
8805 case TYPE_ILOG:
8806 case TYPE_ICMOV:
8807 case TYPE_ICMP:
8808 return EV5_E01;
8810 case TYPE_IST:
8811 case TYPE_FST:
8812 case TYPE_SHIFT:
8813 case TYPE_IMUL:
8814 case TYPE_MISC:
8815 case TYPE_MVI:
8816 case TYPE_LD_L:
8817 case TYPE_ST_C:
8818 case TYPE_MB:
8819 case TYPE_FTOI: /* fake */
8820 case TYPE_ITOF: /* fake */
8821 return EV5_E0;
8823 case TYPE_IBR:
8824 case TYPE_JSR:
8825 case TYPE_CALLPAL:
8826 return EV5_E1;
8828 case TYPE_FCPYS:
8829 return EV5_FAM;
8831 case TYPE_FBR:
8832 case TYPE_FCMOV:
8833 case TYPE_FADD:
8834 case TYPE_FDIV:
8835 case TYPE_FSQRT: /* fake */
8836 return EV5_FA;
8838 case TYPE_FMUL:
8839 return EV5_FM;
8841 default:
8842 gcc_unreachable ();
8846 /* IN_USE is a mask of the slots currently filled within the insn group.
8847 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8848 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8850 LEN is, of course, the length of the group in bytes. */
8852 static rtx
8853 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8855 int len, in_use;
8857 len = in_use = 0;
8859 if (! INSN_P (insn)
8860 || GET_CODE (PATTERN (insn)) == CLOBBER
8861 || GET_CODE (PATTERN (insn)) == USE)
8862 goto next_and_done;
8864 while (1)
8866 enum alphaev4_pipe pipe;
8868 pipe = alphaev4_insn_pipe (insn);
8869 switch (pipe)
8871 case EV4_STOP:
8872 /* Force complex instructions to start new groups. */
8873 if (in_use)
8874 goto done;
8876 /* If this is a completely unrecognized insn, it's an asm.
8877 We don't know how long it is, so record length as -1 to
8878 signal a needed realignment. */
8879 if (recog_memoized (insn) < 0)
8880 len = -1;
8881 else
8882 len = get_attr_length (insn);
8883 goto next_and_done;
8885 case EV4_IBX:
8886 if (in_use & EV4_IB0)
8888 if (in_use & EV4_IB1)
8889 goto done;
8890 in_use |= EV4_IB1;
8892 else
8893 in_use |= EV4_IB0 | EV4_IBX;
8894 break;
8896 case EV4_IB0:
8897 if (in_use & EV4_IB0)
8899 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8900 goto done;
8901 in_use |= EV4_IB1;
8903 in_use |= EV4_IB0;
8904 break;
8906 case EV4_IB1:
8907 if (in_use & EV4_IB1)
8908 goto done;
8909 in_use |= EV4_IB1;
8910 break;
8912 default:
8913 gcc_unreachable ();
8915 len += 4;
8917 /* Haifa doesn't do well scheduling branches. */
8918 if (JUMP_P (insn))
8919 goto next_and_done;
8921 next:
8922 insn = next_nonnote_insn (insn);
8924 if (!insn || ! INSN_P (insn))
8925 goto done;
8927 /* Let Haifa tell us where it thinks insn group boundaries are. */
8928 if (GET_MODE (insn) == TImode)
8929 goto done;
8931 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8932 goto next;
8935 next_and_done:
8936 insn = next_nonnote_insn (insn);
8938 done:
8939 *plen = len;
8940 *pin_use = in_use;
8941 return insn;
8944 /* IN_USE is a mask of the slots currently filled within the insn group.
8945 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8946 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8948 LEN is, of course, the length of the group in bytes. */
8950 static rtx
8951 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8953 int len, in_use;
8955 len = in_use = 0;
8957 if (! INSN_P (insn)
8958 || GET_CODE (PATTERN (insn)) == CLOBBER
8959 || GET_CODE (PATTERN (insn)) == USE)
8960 goto next_and_done;
8962 while (1)
8964 enum alphaev5_pipe pipe;
8966 pipe = alphaev5_insn_pipe (insn);
8967 switch (pipe)
8969 case EV5_STOP:
8970 /* Force complex instructions to start new groups. */
8971 if (in_use)
8972 goto done;
8974 /* If this is a completely unrecognized insn, it's an asm.
8975 We don't know how long it is, so record length as -1 to
8976 signal a needed realignment. */
8977 if (recog_memoized (insn) < 0)
8978 len = -1;
8979 else
8980 len = get_attr_length (insn);
8981 goto next_and_done;
8983 /* ??? Most of the places below, we would like to assert never
8984 happen, as it would indicate an error either in Haifa, or
8985 in the scheduling description. Unfortunately, Haifa never
8986 schedules the last instruction of the BB, so we don't have
8987 an accurate TI bit to go off. */
8988 case EV5_E01:
8989 if (in_use & EV5_E0)
8991 if (in_use & EV5_E1)
8992 goto done;
8993 in_use |= EV5_E1;
8995 else
8996 in_use |= EV5_E0 | EV5_E01;
8997 break;
8999 case EV5_E0:
9000 if (in_use & EV5_E0)
9002 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9003 goto done;
9004 in_use |= EV5_E1;
9006 in_use |= EV5_E0;
9007 break;
9009 case EV5_E1:
9010 if (in_use & EV5_E1)
9011 goto done;
9012 in_use |= EV5_E1;
9013 break;
9015 case EV5_FAM:
9016 if (in_use & EV5_FA)
9018 if (in_use & EV5_FM)
9019 goto done;
9020 in_use |= EV5_FM;
9022 else
9023 in_use |= EV5_FA | EV5_FAM;
9024 break;
9026 case EV5_FA:
9027 if (in_use & EV5_FA)
9028 goto done;
9029 in_use |= EV5_FA;
9030 break;
9032 case EV5_FM:
9033 if (in_use & EV5_FM)
9034 goto done;
9035 in_use |= EV5_FM;
9036 break;
9038 case EV5_NONE:
9039 break;
9041 default:
9042 gcc_unreachable ();
9044 len += 4;
9046 /* Haifa doesn't do well scheduling branches. */
9047 /* ??? If this is predicted not-taken, slotting continues, except
9048 that no more IBR, FBR, or JSR insns may be slotted. */
9049 if (JUMP_P (insn))
9050 goto next_and_done;
9052 next:
9053 insn = next_nonnote_insn (insn);
9055 if (!insn || ! INSN_P (insn))
9056 goto done;
9058 /* Let Haifa tell us where it thinks insn group boundaries are. */
9059 if (GET_MODE (insn) == TImode)
9060 goto done;
9062 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9063 goto next;
9066 next_and_done:
9067 insn = next_nonnote_insn (insn);
9069 done:
9070 *plen = len;
9071 *pin_use = in_use;
9072 return insn;
9075 static rtx
9076 alphaev4_next_nop (int *pin_use)
9078 int in_use = *pin_use;
9079 rtx nop;
9081 if (!(in_use & EV4_IB0))
9083 in_use |= EV4_IB0;
9084 nop = gen_nop ();
9086 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9088 in_use |= EV4_IB1;
9089 nop = gen_nop ();
9091 else if (TARGET_FP && !(in_use & EV4_IB1))
9093 in_use |= EV4_IB1;
9094 nop = gen_fnop ();
9096 else
9097 nop = gen_unop ();
9099 *pin_use = in_use;
9100 return nop;
9103 static rtx
9104 alphaev5_next_nop (int *pin_use)
9106 int in_use = *pin_use;
9107 rtx nop;
9109 if (!(in_use & EV5_E1))
9111 in_use |= EV5_E1;
9112 nop = gen_nop ();
9114 else if (TARGET_FP && !(in_use & EV5_FA))
9116 in_use |= EV5_FA;
9117 nop = gen_fnop ();
9119 else if (TARGET_FP && !(in_use & EV5_FM))
9121 in_use |= EV5_FM;
9122 nop = gen_fnop ();
9124 else
9125 nop = gen_unop ();
9127 *pin_use = in_use;
9128 return nop;
9131 /* The instruction group alignment main loop. */
9133 static void
9134 alpha_align_insns (unsigned int max_align,
9135 rtx (*next_group) (rtx, int *, int *),
9136 rtx (*next_nop) (int *))
9138 /* ALIGN is the known alignment for the insn group. */
9139 unsigned int align;
9140 /* OFS is the offset of the current insn in the insn group. */
9141 int ofs;
9142 int prev_in_use, in_use, len, ldgp;
9143 rtx i, next;
9145 /* Let shorten branches care for assigning alignments to code labels. */
9146 shorten_branches (get_insns ());
9148 if (align_functions < 4)
9149 align = 4;
9150 else if ((unsigned int) align_functions < max_align)
9151 align = align_functions;
9152 else
9153 align = max_align;
9155 ofs = prev_in_use = 0;
9156 i = get_insns ();
9157 if (NOTE_P (i))
9158 i = next_nonnote_insn (i);
9160 ldgp = alpha_function_needs_gp ? 8 : 0;
9162 while (i)
9164 next = (*next_group) (i, &in_use, &len);
9166 /* When we see a label, resync alignment etc. */
9167 if (LABEL_P (i))
9169 unsigned int new_align = 1 << label_to_alignment (i);
9171 if (new_align >= align)
9173 align = new_align < max_align ? new_align : max_align;
9174 ofs = 0;
9177 else if (ofs & (new_align-1))
9178 ofs = (ofs | (new_align-1)) + 1;
9179 gcc_assert (!len);
9182 /* Handle complex instructions special. */
9183 else if (in_use == 0)
9185 /* Asms will have length < 0. This is a signal that we have
9186 lost alignment knowledge. Assume, however, that the asm
9187 will not mis-align instructions. */
9188 if (len < 0)
9190 ofs = 0;
9191 align = 4;
9192 len = 0;
9196 /* If the known alignment is smaller than the recognized insn group,
9197 realign the output. */
9198 else if ((int) align < len)
9200 unsigned int new_log_align = len > 8 ? 4 : 3;
9201 rtx prev, where;
9203 where = prev = prev_nonnote_insn (i);
9204 if (!where || !LABEL_P (where))
9205 where = i;
9207 /* Can't realign between a call and its gp reload. */
9208 if (! (TARGET_EXPLICIT_RELOCS
9209 && prev && CALL_P (prev)))
9211 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9212 align = 1 << new_log_align;
9213 ofs = 0;
9217 /* We may not insert padding inside the initial ldgp sequence. */
9218 else if (ldgp > 0)
9219 ldgp -= len;
9221 /* If the group won't fit in the same INT16 as the previous,
9222 we need to add padding to keep the group together. Rather
9223 than simply leaving the insn filling to the assembler, we
9224 can make use of the knowledge of what sorts of instructions
9225 were issued in the previous group to make sure that all of
9226 the added nops are really free. */
9227 else if (ofs + len > (int) align)
9229 int nop_count = (align - ofs) / 4;
9230 rtx where;
9232 /* Insert nops before labels, branches, and calls to truly merge
9233 the execution of the nops with the previous instruction group. */
9234 where = prev_nonnote_insn (i);
9235 if (where)
9237 if (LABEL_P (where))
9239 rtx where2 = prev_nonnote_insn (where);
9240 if (where2 && JUMP_P (where2))
9241 where = where2;
9243 else if (NONJUMP_INSN_P (where))
9244 where = i;
9246 else
9247 where = i;
9250 emit_insn_before ((*next_nop)(&prev_in_use), where);
9251 while (--nop_count);
9252 ofs = 0;
9255 ofs = (ofs + len) & (align - 1);
9256 prev_in_use = in_use;
9257 i = next;
9261 /* Insert an unop between sibcall or noreturn function call and GP load. */
9263 static void
9264 alpha_pad_function_end (void)
9266 rtx insn, next;
9268 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9270 if (! (CALL_P (insn)
9271 && (SIBLING_CALL_P (insn)
9272 || find_reg_note (insn, REG_NORETURN, NULL_RTX))))
9273 continue;
9275 /* Make sure we do not split a call and its corresponding
9276 CALL_ARG_LOCATION note. */
9277 if (CALL_P (insn))
9279 next = NEXT_INSN (insn);
9280 if (next && NOTE_P (next)
9281 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9282 insn = next;
9285 next = next_active_insn (insn);
9287 if (next)
9289 rtx pat = PATTERN (next);
9291 if (GET_CODE (pat) == SET
9292 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9293 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9294 emit_insn_after (gen_unop (), insn);
9299 /* Machine dependent reorg pass. */
9301 static void
9302 alpha_reorg (void)
9304 /* Workaround for a linker error that triggers when an exception
9305 handler immediatelly follows a sibcall or a noreturn function.
9307 In the sibcall case:
9309 The instruction stream from an object file:
9311 1d8: 00 00 fb 6b jmp (t12)
9312 1dc: 00 00 ba 27 ldah gp,0(ra)
9313 1e0: 00 00 bd 23 lda gp,0(gp)
9314 1e4: 00 00 7d a7 ldq t12,0(gp)
9315 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9317 was converted in the final link pass to:
9319 12003aa88: 67 fa ff c3 br 120039428 <...>
9320 12003aa8c: 00 00 fe 2f unop
9321 12003aa90: 00 00 fe 2f unop
9322 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9323 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9325 And in the noreturn case:
9327 The instruction stream from an object file:
9329 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9330 58: 00 00 ba 27 ldah gp,0(ra)
9331 5c: 00 00 bd 23 lda gp,0(gp)
9332 60: 00 00 7d a7 ldq t12,0(gp)
9333 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9335 was converted in the final link pass to:
9337 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9338 fdb28: 00 00 fe 2f unop
9339 fdb2c: 00 00 fe 2f unop
9340 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9341 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9343 GP load instructions were wrongly cleared by the linker relaxation
9344 pass. This workaround prevents removal of GP loads by inserting
9345 an unop instruction between a sibcall or noreturn function call and
9346 exception handler prologue. */
9348 if (current_function_has_exception_handlers ())
9349 alpha_pad_function_end ();
9351 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9352 alpha_handle_trap_shadows ();
9354 /* Due to the number of extra trapb insns, don't bother fixing up
9355 alignment when trap precision is instruction. Moreover, we can
9356 only do our job when sched2 is run. */
9357 if (optimize && !optimize_size
9358 && alpha_tp != ALPHA_TP_INSN
9359 && flag_schedule_insns_after_reload)
9361 if (alpha_tune == PROCESSOR_EV4)
9362 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9363 else if (alpha_tune == PROCESSOR_EV5)
9364 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9368 static void
9369 alpha_file_start (void)
9371 default_file_start ();
9373 fputs ("\t.set noreorder\n", asm_out_file);
9374 fputs ("\t.set volatile\n", asm_out_file);
9375 if (TARGET_ABI_OSF)
9376 fputs ("\t.set noat\n", asm_out_file);
9377 if (TARGET_EXPLICIT_RELOCS)
9378 fputs ("\t.set nomacro\n", asm_out_file);
9379 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9381 const char *arch;
9383 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9384 arch = "ev6";
9385 else if (TARGET_MAX)
9386 arch = "pca56";
9387 else if (TARGET_BWX)
9388 arch = "ev56";
9389 else if (alpha_cpu == PROCESSOR_EV5)
9390 arch = "ev5";
9391 else
9392 arch = "ev4";
9394 fprintf (asm_out_file, "\t.arch %s\n", arch);
9398 /* Since we don't have a .dynbss section, we should not allow global
9399 relocations in the .rodata section. */
9401 static int
9402 alpha_elf_reloc_rw_mask (void)
9404 return flag_pic ? 3 : 2;
9407 /* Return a section for X. The only special thing we do here is to
9408 honor small data. */
9410 static section *
9411 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9412 unsigned HOST_WIDE_INT align)
9414 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9415 /* ??? Consider using mergeable sdata sections. */
9416 return sdata_section;
9417 else
9418 return default_elf_select_rtx_section (mode, x, align);
9421 static unsigned int
9422 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9424 unsigned int flags = 0;
9426 if (strcmp (name, ".sdata") == 0
9427 || strncmp (name, ".sdata.", 7) == 0
9428 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9429 || strcmp (name, ".sbss") == 0
9430 || strncmp (name, ".sbss.", 6) == 0
9431 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9432 flags = SECTION_SMALL;
9434 flags |= default_section_type_flags (decl, name, reloc);
9435 return flags;
9438 /* Structure to collect function names for final output in link section. */
9439 /* Note that items marked with GTY can't be ifdef'ed out. */
9441 enum reloc_kind
9443 KIND_LINKAGE,
9444 KIND_CODEADDR
9447 struct GTY(()) alpha_links
9449 rtx func;
9450 rtx linkage;
9451 enum reloc_kind rkind;
9454 #if TARGET_ABI_OPEN_VMS
9456 /* Return the VMS argument type corresponding to MODE. */
9458 enum avms_arg_type
9459 alpha_arg_type (enum machine_mode mode)
9461 switch (mode)
9463 case SFmode:
9464 return TARGET_FLOAT_VAX ? FF : FS;
9465 case DFmode:
9466 return TARGET_FLOAT_VAX ? FD : FT;
9467 default:
9468 return I64;
9472 /* Return an rtx for an integer representing the VMS Argument Information
9473 register value. */
9476 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9478 unsigned HOST_WIDE_INT regval = cum.num_args;
9479 int i;
9481 for (i = 0; i < 6; i++)
9482 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9484 return GEN_INT (regval);
9488 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9489 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9490 this is the reference to the linkage pointer value, 0 if this is the
9491 reference to the function entry value. RFLAG is 1 if this a reduced
9492 reference (code address only), 0 if this is a full reference. */
9495 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9497 struct alpha_links *al = NULL;
9498 const char *name = XSTR (func, 0);
9500 if (cfun->machine->links)
9502 splay_tree_node lnode;
9504 /* Is this name already defined? */
9505 lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name);
9506 if (lnode)
9507 al = (struct alpha_links *) lnode->value;
9509 else
9510 cfun->machine->links = splay_tree_new_ggc
9511 ((splay_tree_compare_fn) strcmp,
9512 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9513 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9515 if (al == NULL)
9517 size_t buf_len;
9518 char *linksym;
9519 tree id;
9521 if (name[0] == '*')
9522 name++;
9524 /* Follow transparent alias, as this is used for CRTL translations. */
9525 id = maybe_get_identifier (name);
9526 if (id)
9528 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9529 id = TREE_CHAIN (id);
9530 name = IDENTIFIER_POINTER (id);
9533 buf_len = strlen (name) + 8 + 9;
9534 linksym = (char *) alloca (buf_len);
9535 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9537 al = ggc_alloc_alpha_links ();
9538 al->func = func;
9539 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9541 splay_tree_insert (cfun->machine->links,
9542 (splay_tree_key) ggc_strdup (name),
9543 (splay_tree_value) al);
9546 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9548 if (lflag)
9549 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9550 else
9551 return al->linkage;
9554 static int
9555 alpha_write_one_linkage (splay_tree_node node, void *data)
9557 const char *const name = (const char *) node->key;
9558 struct alpha_links *link = (struct alpha_links *) node->value;
9559 FILE *stream = (FILE *) data;
9561 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9562 if (link->rkind == KIND_CODEADDR)
9564 /* External and used, request code address. */
9565 fprintf (stream, "\t.code_address ");
9567 else
9569 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9570 && SYMBOL_REF_LOCAL_P (link->func))
9572 /* Locally defined, build linkage pair. */
9573 fprintf (stream, "\t.quad %s..en\n", name);
9574 fprintf (stream, "\t.quad ");
9576 else
9578 /* External, request linkage pair. */
9579 fprintf (stream, "\t.linkage ");
9582 assemble_name (stream, name);
9583 fputs ("\n", stream);
9585 return 0;
9588 static void
9589 alpha_write_linkage (FILE *stream, const char *funname)
9591 fprintf (stream, "\t.link\n");
9592 fprintf (stream, "\t.align 3\n");
9593 in_section = NULL;
9595 #ifdef TARGET_VMS_CRASH_DEBUG
9596 fputs ("\t.name ", stream);
9597 assemble_name (stream, funname);
9598 fputs ("..na\n", stream);
9599 #endif
9601 ASM_OUTPUT_LABEL (stream, funname);
9602 fprintf (stream, "\t.pdesc ");
9603 assemble_name (stream, funname);
9604 fprintf (stream, "..en,%s\n",
9605 alpha_procedure_type == PT_STACK ? "stack"
9606 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9608 if (cfun->machine->links)
9610 splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream);
9611 /* splay_tree_delete (func->links); */
9615 /* Switch to an arbitrary section NAME with attributes as specified
9616 by FLAGS. ALIGN specifies any known alignment requirements for
9617 the section; 0 if the default should be used. */
9619 static void
9620 vms_asm_named_section (const char *name, unsigned int flags,
9621 tree decl ATTRIBUTE_UNUSED)
9623 fputc ('\n', asm_out_file);
9624 fprintf (asm_out_file, ".section\t%s", name);
9626 if (flags & SECTION_DEBUG)
9627 fprintf (asm_out_file, ",NOWRT");
9629 fputc ('\n', asm_out_file);
9632 /* Record an element in the table of global constructors. SYMBOL is
9633 a SYMBOL_REF of the function to be called; PRIORITY is a number
9634 between 0 and MAX_INIT_PRIORITY.
9636 Differs from default_ctors_section_asm_out_constructor in that the
9637 width of the .ctors entry is always 64 bits, rather than the 32 bits
9638 used by a normal pointer. */
9640 static void
9641 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9643 switch_to_section (ctors_section);
9644 assemble_align (BITS_PER_WORD);
9645 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9648 static void
9649 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9651 switch_to_section (dtors_section);
9652 assemble_align (BITS_PER_WORD);
9653 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9655 #else
9657 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9658 bool lflag ATTRIBUTE_UNUSED,
9659 bool rflag ATTRIBUTE_UNUSED)
9661 return NULL_RTX;
9664 #endif /* TARGET_ABI_OPEN_VMS */
9666 static void
9667 alpha_init_libfuncs (void)
9669 if (TARGET_ABI_OPEN_VMS)
9671 /* Use the VMS runtime library functions for division and
9672 remainder. */
9673 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9674 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9675 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9676 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9677 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9678 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9679 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9680 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9681 abort_libfunc = init_one_libfunc ("decc$abort");
9682 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9683 #ifdef MEM_LIBFUNCS_INIT
9684 MEM_LIBFUNCS_INIT;
9685 #endif
9689 /* On the Alpha, we use this to disable the floating-point registers
9690 when they don't exist. */
9692 static void
9693 alpha_conditional_register_usage (void)
9695 int i;
9696 if (! TARGET_FPREGS)
9697 for (i = 32; i < 63; i++)
9698 fixed_regs[i] = call_used_regs[i] = 1;
9701 /* Initialize the GCC target structure. */
9702 #if TARGET_ABI_OPEN_VMS
9703 # undef TARGET_ATTRIBUTE_TABLE
9704 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9705 # undef TARGET_CAN_ELIMINATE
9706 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9707 #endif
9709 #undef TARGET_IN_SMALL_DATA_P
9710 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9712 #undef TARGET_ASM_ALIGNED_HI_OP
9713 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9714 #undef TARGET_ASM_ALIGNED_DI_OP
9715 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9717 /* Default unaligned ops are provided for ELF systems. To get unaligned
9718 data for non-ELF systems, we have to turn off auto alignment. */
9719 #if TARGET_ABI_OPEN_VMS
9720 #undef TARGET_ASM_UNALIGNED_HI_OP
9721 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9722 #undef TARGET_ASM_UNALIGNED_SI_OP
9723 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9724 #undef TARGET_ASM_UNALIGNED_DI_OP
9725 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9726 #endif
9728 #undef TARGET_ASM_RELOC_RW_MASK
9729 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9730 #undef TARGET_ASM_SELECT_RTX_SECTION
9731 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9732 #undef TARGET_SECTION_TYPE_FLAGS
9733 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9735 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9736 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9738 #undef TARGET_INIT_LIBFUNCS
9739 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9741 #undef TARGET_LEGITIMIZE_ADDRESS
9742 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9743 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9744 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9746 #undef TARGET_ASM_FILE_START
9747 #define TARGET_ASM_FILE_START alpha_file_start
9749 #undef TARGET_SCHED_ADJUST_COST
9750 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9751 #undef TARGET_SCHED_ISSUE_RATE
9752 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9753 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9754 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9755 alpha_multipass_dfa_lookahead
9757 #undef TARGET_HAVE_TLS
9758 #define TARGET_HAVE_TLS HAVE_AS_TLS
9760 #undef TARGET_BUILTIN_DECL
9761 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9762 #undef TARGET_INIT_BUILTINS
9763 #define TARGET_INIT_BUILTINS alpha_init_builtins
9764 #undef TARGET_EXPAND_BUILTIN
9765 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9766 #undef TARGET_FOLD_BUILTIN
9767 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9769 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9770 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9771 #undef TARGET_CANNOT_COPY_INSN_P
9772 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9773 #undef TARGET_LEGITIMATE_CONSTANT_P
9774 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9775 #undef TARGET_CANNOT_FORCE_CONST_MEM
9776 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9778 #if TARGET_ABI_OSF
9779 #undef TARGET_ASM_OUTPUT_MI_THUNK
9780 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9781 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9782 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9783 #undef TARGET_STDARG_OPTIMIZE_HOOK
9784 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9785 #endif
9787 /* Use 16-bits anchor. */
9788 #undef TARGET_MIN_ANCHOR_OFFSET
9789 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9790 #undef TARGET_MAX_ANCHOR_OFFSET
9791 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9792 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9793 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9795 #undef TARGET_RTX_COSTS
9796 #define TARGET_RTX_COSTS alpha_rtx_costs
9797 #undef TARGET_ADDRESS_COST
9798 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
9800 #undef TARGET_MACHINE_DEPENDENT_REORG
9801 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9803 #undef TARGET_PROMOTE_FUNCTION_MODE
9804 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9805 #undef TARGET_PROMOTE_PROTOTYPES
9806 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9807 #undef TARGET_RETURN_IN_MEMORY
9808 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9809 #undef TARGET_PASS_BY_REFERENCE
9810 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9811 #undef TARGET_SETUP_INCOMING_VARARGS
9812 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9813 #undef TARGET_STRICT_ARGUMENT_NAMING
9814 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9815 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9816 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9817 #undef TARGET_SPLIT_COMPLEX_ARG
9818 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9819 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9820 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9821 #undef TARGET_ARG_PARTIAL_BYTES
9822 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9823 #undef TARGET_FUNCTION_ARG
9824 #define TARGET_FUNCTION_ARG alpha_function_arg
9825 #undef TARGET_FUNCTION_ARG_ADVANCE
9826 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9827 #undef TARGET_TRAMPOLINE_INIT
9828 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9830 #undef TARGET_INSTANTIATE_DECLS
9831 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9833 #undef TARGET_SECONDARY_RELOAD
9834 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9836 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9837 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9838 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9839 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9841 #undef TARGET_BUILD_BUILTIN_VA_LIST
9842 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9844 #undef TARGET_EXPAND_BUILTIN_VA_START
9845 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9847 /* The Alpha architecture does not require sequential consistency. See
9848 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9849 for an example of how it can be violated in practice. */
9850 #undef TARGET_RELAXED_ORDERING
9851 #define TARGET_RELAXED_ORDERING true
9853 #undef TARGET_OPTION_OVERRIDE
9854 #define TARGET_OPTION_OVERRIDE alpha_option_override
9856 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9857 #undef TARGET_MANGLE_TYPE
9858 #define TARGET_MANGLE_TYPE alpha_mangle_type
9859 #endif
9861 #undef TARGET_LEGITIMATE_ADDRESS_P
9862 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9864 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9865 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9867 struct gcc_target targetm = TARGET_INITIALIZER;
9870 #include "gt-alpha.h"