Merge from mainline (165734:167278).
[official-gcc/graphite-test-results.git] / gcc / config / alpha / alpha.c
blob1ecd2c4d70feae0083d094be19816c99b4c92037
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include "splay-tree.h"
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60 #include "libfuncs.h"
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
68 static const char * const alpha_cpu_name[] =
70 "ev4", "ev5", "ev6"
73 /* Specify how accurate floating-point traps need to be. */
75 enum alpha_trap_precision alpha_tp;
77 /* Specify the floating-point rounding mode. */
79 enum alpha_fp_rounding_mode alpha_fprm;
81 /* Specify which things cause traps. */
83 enum alpha_fp_trap_mode alpha_fptm;
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
88 static int inside_function = FALSE;
90 /* The number of cycles of latency we should assume on memory reads. */
92 int alpha_memory_latency = 3;
94 /* Whether the function needs the GP. */
96 static int alpha_function_needs_gp;
98 /* The alias set for prologue/epilogue register save/restore. */
100 static GTY(()) alias_set_type alpha_sr_alias_set;
102 /* The assembler name of the current function. */
104 static const char *alpha_fnname;
106 /* The next explicit relocation sequence number. */
107 extern GTY(()) int alpha_next_sequence_number;
108 int alpha_next_sequence_number = 1;
110 /* The literal and gpdisp sequence numbers for this insn, as printed
111 by %# and %* respectively. */
112 extern GTY(()) int alpha_this_literal_sequence_number;
113 extern GTY(()) int alpha_this_gpdisp_sequence_number;
114 int alpha_this_literal_sequence_number;
115 int alpha_this_gpdisp_sequence_number;
117 /* Costs of various operations on the different architectures. */
119 struct alpha_rtx_cost_data
121 unsigned char fp_add;
122 unsigned char fp_mult;
123 unsigned char fp_div_sf;
124 unsigned char fp_div_df;
125 unsigned char int_mult_si;
126 unsigned char int_mult_di;
127 unsigned char int_shift;
128 unsigned char int_cmov;
129 unsigned short int_div;
132 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
134 { /* EV4 */
135 COSTS_N_INSNS (6), /* fp_add */
136 COSTS_N_INSNS (6), /* fp_mult */
137 COSTS_N_INSNS (34), /* fp_div_sf */
138 COSTS_N_INSNS (63), /* fp_div_df */
139 COSTS_N_INSNS (23), /* int_mult_si */
140 COSTS_N_INSNS (23), /* int_mult_di */
141 COSTS_N_INSNS (2), /* int_shift */
142 COSTS_N_INSNS (2), /* int_cmov */
143 COSTS_N_INSNS (97), /* int_div */
145 { /* EV5 */
146 COSTS_N_INSNS (4), /* fp_add */
147 COSTS_N_INSNS (4), /* fp_mult */
148 COSTS_N_INSNS (15), /* fp_div_sf */
149 COSTS_N_INSNS (22), /* fp_div_df */
150 COSTS_N_INSNS (8), /* int_mult_si */
151 COSTS_N_INSNS (12), /* int_mult_di */
152 COSTS_N_INSNS (1) + 1, /* int_shift */
153 COSTS_N_INSNS (1), /* int_cmov */
154 COSTS_N_INSNS (83), /* int_div */
156 { /* EV6 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (12), /* fp_div_sf */
160 COSTS_N_INSNS (15), /* fp_div_df */
161 COSTS_N_INSNS (7), /* int_mult_si */
162 COSTS_N_INSNS (7), /* int_mult_di */
163 COSTS_N_INSNS (1), /* int_shift */
164 COSTS_N_INSNS (2), /* int_cmov */
165 COSTS_N_INSNS (86), /* int_div */
169 /* Similar but tuned for code size instead of execution latency. The
170 extra +N is fractional cost tuning based on latency. It's used to
171 encourage use of cheaper insns like shift, but only if there's just
172 one of them. */
174 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
176 COSTS_N_INSNS (1), /* fp_add */
177 COSTS_N_INSNS (1), /* fp_mult */
178 COSTS_N_INSNS (1), /* fp_div_sf */
179 COSTS_N_INSNS (1) + 1, /* fp_div_df */
180 COSTS_N_INSNS (1) + 1, /* int_mult_si */
181 COSTS_N_INSNS (1) + 2, /* int_mult_di */
182 COSTS_N_INSNS (1), /* int_shift */
183 COSTS_N_INSNS (1), /* int_cmov */
184 COSTS_N_INSNS (6), /* int_div */
187 /* Get the number of args of a function in one of two ways. */
188 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
189 #define NUM_ARGS crtl->args.info.num_args
190 #else
191 #define NUM_ARGS crtl->args.info
192 #endif
194 #define REG_PV 27
195 #define REG_RA 26
197 /* Declarations of static functions. */
198 static struct machine_function *alpha_init_machine_status (void);
199 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
201 #if TARGET_ABI_OPEN_VMS
202 static void alpha_write_linkage (FILE *, const char *, tree);
203 static bool vms_valid_pointer_mode (enum machine_mode);
204 #endif
206 static void unicosmk_output_deferred_case_vectors (FILE *);
207 static void unicosmk_gen_dsib (unsigned long *);
208 static void unicosmk_output_ssib (FILE *, const char *);
209 static int unicosmk_need_dex (rtx);
211 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
212 static const struct default_options alpha_option_optimization_table[] =
214 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
215 { OPT_LEVELS_NONE, 0, NULL, 0 }
218 /* Implement TARGET_HANDLE_OPTION. */
220 static bool
221 alpha_handle_option (size_t code, const char *arg, int value)
223 switch (code)
225 case OPT_mfp_regs:
226 if (value == 0)
227 target_flags |= MASK_SOFT_FP;
228 break;
230 case OPT_mieee:
231 case OPT_mieee_with_inexact:
232 target_flags |= MASK_IEEE_CONFORMANT;
233 break;
235 case OPT_mtls_size_:
236 if (value != 16 && value != 32 && value != 64)
237 error ("bad value %qs for -mtls-size switch", arg);
238 break;
241 return true;
244 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
245 /* Implement TARGET_MANGLE_TYPE. */
247 static const char *
248 alpha_mangle_type (const_tree type)
250 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
251 && TARGET_LONG_DOUBLE_128)
252 return "g";
254 /* For all other types, use normal C++ mangling. */
255 return NULL;
257 #endif
259 /* Parse target option strings. */
261 static void
262 alpha_option_override (void)
264 static const struct cpu_table {
265 const char *const name;
266 const enum processor_type processor;
267 const int flags;
268 } cpu_table[] = {
269 { "ev4", PROCESSOR_EV4, 0 },
270 { "ev45", PROCESSOR_EV4, 0 },
271 { "21064", PROCESSOR_EV4, 0 },
272 { "ev5", PROCESSOR_EV5, 0 },
273 { "21164", PROCESSOR_EV5, 0 },
274 { "ev56", PROCESSOR_EV5, MASK_BWX },
275 { "21164a", PROCESSOR_EV5, MASK_BWX },
276 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
277 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
278 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
279 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
280 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
281 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
282 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
285 int const ct_size = ARRAY_SIZE (cpu_table);
286 int i;
288 #ifdef SUBTARGET_OVERRIDE_OPTIONS
289 SUBTARGET_OVERRIDE_OPTIONS;
290 #endif
292 /* Unicos/Mk doesn't have shared libraries. */
293 if (TARGET_ABI_UNICOSMK && flag_pic)
295 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
296 (flag_pic > 1) ? "PIC" : "pic");
297 flag_pic = 0;
300 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
301 floating-point instructions. Make that the default for this target. */
302 if (TARGET_ABI_UNICOSMK)
303 alpha_fprm = ALPHA_FPRM_DYN;
304 else
305 alpha_fprm = ALPHA_FPRM_NORM;
307 alpha_tp = ALPHA_TP_PROG;
308 alpha_fptm = ALPHA_FPTM_N;
310 /* We cannot use su and sui qualifiers for conversion instructions on
311 Unicos/Mk. I'm not sure if this is due to assembler or hardware
312 limitations. Right now, we issue a warning if -mieee is specified
313 and then ignore it; eventually, we should either get it right or
314 disable the option altogether. */
316 if (TARGET_IEEE)
318 if (TARGET_ABI_UNICOSMK)
319 warning (0, "-mieee not supported on Unicos/Mk");
320 else
322 alpha_tp = ALPHA_TP_INSN;
323 alpha_fptm = ALPHA_FPTM_SU;
327 if (TARGET_IEEE_WITH_INEXACT)
329 if (TARGET_ABI_UNICOSMK)
330 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
331 else
333 alpha_tp = ALPHA_TP_INSN;
334 alpha_fptm = ALPHA_FPTM_SUI;
338 if (alpha_tp_string)
340 if (! strcmp (alpha_tp_string, "p"))
341 alpha_tp = ALPHA_TP_PROG;
342 else if (! strcmp (alpha_tp_string, "f"))
343 alpha_tp = ALPHA_TP_FUNC;
344 else if (! strcmp (alpha_tp_string, "i"))
345 alpha_tp = ALPHA_TP_INSN;
346 else
347 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
350 if (alpha_fprm_string)
352 if (! strcmp (alpha_fprm_string, "n"))
353 alpha_fprm = ALPHA_FPRM_NORM;
354 else if (! strcmp (alpha_fprm_string, "m"))
355 alpha_fprm = ALPHA_FPRM_MINF;
356 else if (! strcmp (alpha_fprm_string, "c"))
357 alpha_fprm = ALPHA_FPRM_CHOP;
358 else if (! strcmp (alpha_fprm_string,"d"))
359 alpha_fprm = ALPHA_FPRM_DYN;
360 else
361 error ("bad value %qs for -mfp-rounding-mode switch",
362 alpha_fprm_string);
365 if (alpha_fptm_string)
367 if (strcmp (alpha_fptm_string, "n") == 0)
368 alpha_fptm = ALPHA_FPTM_N;
369 else if (strcmp (alpha_fptm_string, "u") == 0)
370 alpha_fptm = ALPHA_FPTM_U;
371 else if (strcmp (alpha_fptm_string, "su") == 0)
372 alpha_fptm = ALPHA_FPTM_SU;
373 else if (strcmp (alpha_fptm_string, "sui") == 0)
374 alpha_fptm = ALPHA_FPTM_SUI;
375 else
376 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
379 if (alpha_cpu_string)
381 for (i = 0; i < ct_size; i++)
382 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
384 alpha_tune = alpha_cpu = cpu_table [i].processor;
385 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
386 target_flags |= cpu_table [i].flags;
387 break;
389 if (i == ct_size)
390 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
393 if (alpha_tune_string)
395 for (i = 0; i < ct_size; i++)
396 if (! strcmp (alpha_tune_string, cpu_table [i].name))
398 alpha_tune = cpu_table [i].processor;
399 break;
401 if (i == ct_size)
402 error ("bad value %qs for -mtune switch", alpha_tune_string);
405 /* Do some sanity checks on the above options. */
407 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
409 warning (0, "trap mode not supported on Unicos/Mk");
410 alpha_fptm = ALPHA_FPTM_N;
413 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
414 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
416 warning (0, "fp software completion requires -mtrap-precision=i");
417 alpha_tp = ALPHA_TP_INSN;
420 if (alpha_cpu == PROCESSOR_EV6)
422 /* Except for EV6 pass 1 (not released), we always have precise
423 arithmetic traps. Which means we can do software completion
424 without minding trap shadows. */
425 alpha_tp = ALPHA_TP_PROG;
428 if (TARGET_FLOAT_VAX)
430 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
432 warning (0, "rounding mode not supported for VAX floats");
433 alpha_fprm = ALPHA_FPRM_NORM;
435 if (alpha_fptm == ALPHA_FPTM_SUI)
437 warning (0, "trap mode not supported for VAX floats");
438 alpha_fptm = ALPHA_FPTM_SU;
440 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
441 warning (0, "128-bit long double not supported for VAX floats");
442 target_flags &= ~MASK_LONG_DOUBLE_128;
446 char *end;
447 int lat;
449 if (!alpha_mlat_string)
450 alpha_mlat_string = "L1";
452 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
453 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
455 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
456 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
457 && alpha_mlat_string[2] == '\0')
459 static int const cache_latency[][4] =
461 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
462 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
463 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
466 lat = alpha_mlat_string[1] - '0';
467 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
469 warning (0, "L%d cache latency unknown for %s",
470 lat, alpha_cpu_name[alpha_tune]);
471 lat = 3;
473 else
474 lat = cache_latency[alpha_tune][lat-1];
476 else if (! strcmp (alpha_mlat_string, "main"))
478 /* Most current memories have about 370ns latency. This is
479 a reasonable guess for a fast cpu. */
480 lat = 150;
482 else
484 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
485 lat = 3;
488 alpha_memory_latency = lat;
491 /* Default the definition of "small data" to 8 bytes. */
492 if (!global_options_set.x_g_switch_value)
493 g_switch_value = 8;
495 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
496 if (flag_pic == 1)
497 target_flags |= MASK_SMALL_DATA;
498 else if (flag_pic == 2)
499 target_flags &= ~MASK_SMALL_DATA;
501 /* Align labels and loops for optimal branching. */
502 /* ??? Kludge these by not doing anything if we don't optimize and also if
503 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
504 if (optimize > 0 && write_symbols != SDB_DEBUG)
506 if (align_loops <= 0)
507 align_loops = 16;
508 if (align_jumps <= 0)
509 align_jumps = 16;
511 if (align_functions <= 0)
512 align_functions = 16;
514 /* Acquire a unique set number for our register saves and restores. */
515 alpha_sr_alias_set = new_alias_set ();
517 /* Register variables and functions with the garbage collector. */
519 /* Set up function hooks. */
520 init_machine_status = alpha_init_machine_status;
522 /* Tell the compiler when we're using VAX floating point. */
523 if (TARGET_FLOAT_VAX)
525 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
526 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
527 REAL_MODE_FORMAT (TFmode) = NULL;
530 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
531 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
532 target_flags |= MASK_LONG_DOUBLE_128;
533 #endif
535 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
536 can be optimized to ap = __builtin_next_arg (0). */
537 if (TARGET_ABI_UNICOSMK)
538 targetm.expand_builtin_va_start = NULL;
541 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
544 zap_mask (HOST_WIDE_INT value)
546 int i;
548 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
549 i++, value >>= 8)
550 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
551 return 0;
553 return 1;
556 /* Return true if OP is valid for a particular TLS relocation.
557 We are already guaranteed that OP is a CONST. */
560 tls_symbolic_operand_1 (rtx op, int size, int unspec)
562 op = XEXP (op, 0);
564 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
565 return 0;
566 op = XVECEXP (op, 0, 0);
568 if (GET_CODE (op) != SYMBOL_REF)
569 return 0;
571 switch (SYMBOL_REF_TLS_MODEL (op))
573 case TLS_MODEL_LOCAL_DYNAMIC:
574 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
575 case TLS_MODEL_INITIAL_EXEC:
576 return unspec == UNSPEC_TPREL && size == 64;
577 case TLS_MODEL_LOCAL_EXEC:
578 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
579 default:
580 gcc_unreachable ();
584 /* Used by aligned_memory_operand and unaligned_memory_operand to
585 resolve what reload is going to do with OP if it's a register. */
588 resolve_reload_operand (rtx op)
590 if (reload_in_progress)
592 rtx tmp = op;
593 if (GET_CODE (tmp) == SUBREG)
594 tmp = SUBREG_REG (tmp);
595 if (REG_P (tmp)
596 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
598 op = reg_equiv_memory_loc[REGNO (tmp)];
599 if (op == 0)
600 return 0;
603 return op;
606 /* The scalar modes supported differs from the default check-what-c-supports
607 version in that sometimes TFmode is available even when long double
608 indicates only DFmode. On unicosmk, we have the situation that HImode
609 doesn't map to any C type, but of course we still support that. */
611 static bool
612 alpha_scalar_mode_supported_p (enum machine_mode mode)
614 switch (mode)
616 case QImode:
617 case HImode:
618 case SImode:
619 case DImode:
620 case TImode: /* via optabs.c */
621 return true;
623 case SFmode:
624 case DFmode:
625 return true;
627 case TFmode:
628 return TARGET_HAS_XFLOATING_LIBS;
630 default:
631 return false;
635 /* Alpha implements a couple of integer vector mode operations when
636 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
637 which allows the vectorizer to operate on e.g. move instructions,
638 or when expand_vector_operations can do something useful. */
640 static bool
641 alpha_vector_mode_supported_p (enum machine_mode mode)
643 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
646 /* Return 1 if this function can directly return via $26. */
649 direct_return (void)
651 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
652 && reload_completed
653 && alpha_sa_size () == 0
654 && get_frame_size () == 0
655 && crtl->outgoing_args_size == 0
656 && crtl->args.pretend_args_size == 0);
659 /* Return the ADDR_VEC associated with a tablejump insn. */
662 alpha_tablejump_addr_vec (rtx insn)
664 rtx tmp;
666 tmp = JUMP_LABEL (insn);
667 if (!tmp)
668 return NULL_RTX;
669 tmp = NEXT_INSN (tmp);
670 if (!tmp)
671 return NULL_RTX;
672 if (JUMP_P (tmp)
673 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
674 return PATTERN (tmp);
675 return NULL_RTX;
678 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
681 alpha_tablejump_best_label (rtx insn)
683 rtx jump_table = alpha_tablejump_addr_vec (insn);
684 rtx best_label = NULL_RTX;
686 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
687 there for edge frequency counts from profile data. */
689 if (jump_table)
691 int n_labels = XVECLEN (jump_table, 1);
692 int best_count = -1;
693 int i, j;
695 for (i = 0; i < n_labels; i++)
697 int count = 1;
699 for (j = i + 1; j < n_labels; j++)
700 if (XEXP (XVECEXP (jump_table, 1, i), 0)
701 == XEXP (XVECEXP (jump_table, 1, j), 0))
702 count++;
704 if (count > best_count)
705 best_count = count, best_label = XVECEXP (jump_table, 1, i);
709 return best_label ? best_label : const0_rtx;
712 /* Return the TLS model to use for SYMBOL. */
714 static enum tls_model
715 tls_symbolic_operand_type (rtx symbol)
717 enum tls_model model;
719 if (GET_CODE (symbol) != SYMBOL_REF)
720 return TLS_MODEL_NONE;
721 model = SYMBOL_REF_TLS_MODEL (symbol);
723 /* Local-exec with a 64-bit size is the same code as initial-exec. */
724 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
725 model = TLS_MODEL_INITIAL_EXEC;
727 return model;
730 /* Return true if the function DECL will share the same GP as any
731 function in the current unit of translation. */
733 static bool
734 decl_has_samegp (const_tree decl)
736 /* Functions that are not local can be overridden, and thus may
737 not share the same gp. */
738 if (!(*targetm.binds_local_p) (decl))
739 return false;
741 /* If -msmall-data is in effect, assume that there is only one GP
742 for the module, and so any local symbol has this property. We
743 need explicit relocations to be able to enforce this for symbols
744 not defined in this unit of translation, however. */
745 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
746 return true;
748 /* Functions that are not external are defined in this UoT. */
749 /* ??? Irritatingly, static functions not yet emitted are still
750 marked "external". Apply this to non-static functions only. */
751 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
754 /* Return true if EXP should be placed in the small data section. */
756 static bool
757 alpha_in_small_data_p (const_tree exp)
759 /* We want to merge strings, so we never consider them small data. */
760 if (TREE_CODE (exp) == STRING_CST)
761 return false;
763 /* Functions are never in the small data area. Duh. */
764 if (TREE_CODE (exp) == FUNCTION_DECL)
765 return false;
767 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
769 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
770 if (strcmp (section, ".sdata") == 0
771 || strcmp (section, ".sbss") == 0)
772 return true;
774 else
776 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
778 /* If this is an incomplete type with size 0, then we can't put it
779 in sdata because it might be too big when completed. */
780 if (size > 0 && size <= g_switch_value)
781 return true;
784 return false;
787 #if TARGET_ABI_OPEN_VMS
788 static bool
789 vms_valid_pointer_mode (enum machine_mode mode)
791 return (mode == SImode || mode == DImode);
794 static bool
795 alpha_linkage_symbol_p (const char *symname)
797 int symlen = strlen (symname);
799 if (symlen > 4)
800 return strcmp (&symname [symlen - 4], "..lk") == 0;
802 return false;
805 #define LINKAGE_SYMBOL_REF_P(X) \
806 ((GET_CODE (X) == SYMBOL_REF \
807 && alpha_linkage_symbol_p (XSTR (X, 0))) \
808 || (GET_CODE (X) == CONST \
809 && GET_CODE (XEXP (X, 0)) == PLUS \
810 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
811 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
812 #endif
814 /* legitimate_address_p recognizes an RTL expression that is a valid
815 memory address for an instruction. The MODE argument is the
816 machine mode for the MEM expression that wants to use this address.
818 For Alpha, we have either a constant address or the sum of a
819 register and a constant address, or just a register. For DImode,
820 any of those forms can be surrounded with an AND that clear the
821 low-order three bits; this is an "unaligned" access. */
823 static bool
824 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
826 /* If this is an ldq_u type address, discard the outer AND. */
827 if (mode == DImode
828 && GET_CODE (x) == AND
829 && CONST_INT_P (XEXP (x, 1))
830 && INTVAL (XEXP (x, 1)) == -8)
831 x = XEXP (x, 0);
833 /* Discard non-paradoxical subregs. */
834 if (GET_CODE (x) == SUBREG
835 && (GET_MODE_SIZE (GET_MODE (x))
836 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
837 x = SUBREG_REG (x);
839 /* Unadorned general registers are valid. */
840 if (REG_P (x)
841 && (strict
842 ? STRICT_REG_OK_FOR_BASE_P (x)
843 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
844 return true;
846 /* Constant addresses (i.e. +/- 32k) are valid. */
847 if (CONSTANT_ADDRESS_P (x))
848 return true;
850 #if TARGET_ABI_OPEN_VMS
851 if (LINKAGE_SYMBOL_REF_P (x))
852 return true;
853 #endif
855 /* Register plus a small constant offset is valid. */
856 if (GET_CODE (x) == PLUS)
858 rtx ofs = XEXP (x, 1);
859 x = XEXP (x, 0);
861 /* Discard non-paradoxical subregs. */
862 if (GET_CODE (x) == SUBREG
863 && (GET_MODE_SIZE (GET_MODE (x))
864 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
865 x = SUBREG_REG (x);
867 if (REG_P (x))
869 if (! strict
870 && NONSTRICT_REG_OK_FP_BASE_P (x)
871 && CONST_INT_P (ofs))
872 return true;
873 if ((strict
874 ? STRICT_REG_OK_FOR_BASE_P (x)
875 : NONSTRICT_REG_OK_FOR_BASE_P (x))
876 && CONSTANT_ADDRESS_P (ofs))
877 return true;
881 /* If we're managing explicit relocations, LO_SUM is valid, as are small
882 data symbols. Avoid explicit relocations of modes larger than word
883 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
884 else if (TARGET_EXPLICIT_RELOCS
885 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
887 if (small_symbolic_operand (x, Pmode))
888 return true;
890 if (GET_CODE (x) == LO_SUM)
892 rtx ofs = XEXP (x, 1);
893 x = XEXP (x, 0);
895 /* Discard non-paradoxical subregs. */
896 if (GET_CODE (x) == SUBREG
897 && (GET_MODE_SIZE (GET_MODE (x))
898 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
899 x = SUBREG_REG (x);
901 /* Must have a valid base register. */
902 if (! (REG_P (x)
903 && (strict
904 ? STRICT_REG_OK_FOR_BASE_P (x)
905 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
906 return false;
908 /* The symbol must be local. */
909 if (local_symbolic_operand (ofs, Pmode)
910 || dtp32_symbolic_operand (ofs, Pmode)
911 || tp32_symbolic_operand (ofs, Pmode))
912 return true;
916 return false;
919 /* Build the SYMBOL_REF for __tls_get_addr. */
921 static GTY(()) rtx tls_get_addr_libfunc;
923 static rtx
924 get_tls_get_addr (void)
926 if (!tls_get_addr_libfunc)
927 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
928 return tls_get_addr_libfunc;
931 /* Try machine-dependent ways of modifying an illegitimate address
932 to be legitimate. If we find one, return the new, valid address. */
934 static rtx
935 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
937 HOST_WIDE_INT addend;
939 /* If the address is (plus reg const_int) and the CONST_INT is not a
940 valid offset, compute the high part of the constant and add it to
941 the register. Then our address is (plus temp low-part-const). */
942 if (GET_CODE (x) == PLUS
943 && REG_P (XEXP (x, 0))
944 && CONST_INT_P (XEXP (x, 1))
945 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
947 addend = INTVAL (XEXP (x, 1));
948 x = XEXP (x, 0);
949 goto split_addend;
952 /* If the address is (const (plus FOO const_int)), find the low-order
953 part of the CONST_INT. Then load FOO plus any high-order part of the
954 CONST_INT into a register. Our address is (plus reg low-part-const).
955 This is done to reduce the number of GOT entries. */
956 if (can_create_pseudo_p ()
957 && GET_CODE (x) == CONST
958 && GET_CODE (XEXP (x, 0)) == PLUS
959 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
961 addend = INTVAL (XEXP (XEXP (x, 0), 1));
962 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
963 goto split_addend;
966 /* If we have a (plus reg const), emit the load as in (2), then add
967 the two registers, and finally generate (plus reg low-part-const) as
968 our address. */
969 if (can_create_pseudo_p ()
970 && GET_CODE (x) == PLUS
971 && REG_P (XEXP (x, 0))
972 && GET_CODE (XEXP (x, 1)) == CONST
973 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
974 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
976 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
977 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
978 XEXP (XEXP (XEXP (x, 1), 0), 0),
979 NULL_RTX, 1, OPTAB_LIB_WIDEN);
980 goto split_addend;
983 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
984 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
985 around +/- 32k offset. */
986 if (TARGET_EXPLICIT_RELOCS
987 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
988 && symbolic_operand (x, Pmode))
990 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
992 switch (tls_symbolic_operand_type (x))
994 case TLS_MODEL_NONE:
995 break;
997 case TLS_MODEL_GLOBAL_DYNAMIC:
998 start_sequence ();
1000 r0 = gen_rtx_REG (Pmode, 0);
1001 r16 = gen_rtx_REG (Pmode, 16);
1002 tga = get_tls_get_addr ();
1003 dest = gen_reg_rtx (Pmode);
1004 seq = GEN_INT (alpha_next_sequence_number++);
1006 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1007 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1008 insn = emit_call_insn (insn);
1009 RTL_CONST_CALL_P (insn) = 1;
1010 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1012 insn = get_insns ();
1013 end_sequence ();
1015 emit_libcall_block (insn, dest, r0, x);
1016 return dest;
1018 case TLS_MODEL_LOCAL_DYNAMIC:
1019 start_sequence ();
1021 r0 = gen_rtx_REG (Pmode, 0);
1022 r16 = gen_rtx_REG (Pmode, 16);
1023 tga = get_tls_get_addr ();
1024 scratch = gen_reg_rtx (Pmode);
1025 seq = GEN_INT (alpha_next_sequence_number++);
1027 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1028 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1029 insn = emit_call_insn (insn);
1030 RTL_CONST_CALL_P (insn) = 1;
1031 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1033 insn = get_insns ();
1034 end_sequence ();
1036 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1037 UNSPEC_TLSLDM_CALL);
1038 emit_libcall_block (insn, scratch, r0, eqv);
1040 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1041 eqv = gen_rtx_CONST (Pmode, eqv);
1043 if (alpha_tls_size == 64)
1045 dest = gen_reg_rtx (Pmode);
1046 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1047 emit_insn (gen_adddi3 (dest, dest, scratch));
1048 return dest;
1050 if (alpha_tls_size == 32)
1052 insn = gen_rtx_HIGH (Pmode, eqv);
1053 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1054 scratch = gen_reg_rtx (Pmode);
1055 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1057 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1059 case TLS_MODEL_INITIAL_EXEC:
1060 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1061 eqv = gen_rtx_CONST (Pmode, eqv);
1062 tp = gen_reg_rtx (Pmode);
1063 scratch = gen_reg_rtx (Pmode);
1064 dest = gen_reg_rtx (Pmode);
1066 emit_insn (gen_load_tp (tp));
1067 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1068 emit_insn (gen_adddi3 (dest, tp, scratch));
1069 return dest;
1071 case TLS_MODEL_LOCAL_EXEC:
1072 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1073 eqv = gen_rtx_CONST (Pmode, eqv);
1074 tp = gen_reg_rtx (Pmode);
1076 emit_insn (gen_load_tp (tp));
1077 if (alpha_tls_size == 32)
1079 insn = gen_rtx_HIGH (Pmode, eqv);
1080 insn = gen_rtx_PLUS (Pmode, tp, insn);
1081 tp = gen_reg_rtx (Pmode);
1082 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1084 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1086 default:
1087 gcc_unreachable ();
1090 if (local_symbolic_operand (x, Pmode))
1092 if (small_symbolic_operand (x, Pmode))
1093 return x;
1094 else
1096 if (can_create_pseudo_p ())
1097 scratch = gen_reg_rtx (Pmode);
1098 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1099 gen_rtx_HIGH (Pmode, x)));
1100 return gen_rtx_LO_SUM (Pmode, scratch, x);
1105 return NULL;
1107 split_addend:
1109 HOST_WIDE_INT low, high;
1111 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1112 addend -= low;
1113 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1114 addend -= high;
1116 if (addend)
1117 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1118 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1119 1, OPTAB_LIB_WIDEN);
1120 if (high)
1121 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1122 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1123 1, OPTAB_LIB_WIDEN);
1125 return plus_constant (x, low);
1130 /* Try machine-dependent ways of modifying an illegitimate address
1131 to be legitimate. Return X or the new, valid address. */
1133 static rtx
1134 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1135 enum machine_mode mode)
1137 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1138 return new_x ? new_x : x;
1141 /* Primarily this is required for TLS symbols, but given that our move
1142 patterns *ought* to be able to handle any symbol at any time, we
1143 should never be spilling symbolic operands to the constant pool, ever. */
1145 static bool
1146 alpha_cannot_force_const_mem (rtx x)
1148 enum rtx_code code = GET_CODE (x);
1149 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1152 /* We do not allow indirect calls to be optimized into sibling calls, nor
1153 can we allow a call to a function with a different GP to be optimized
1154 into a sibcall. */
1156 static bool
1157 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1159 /* Can't do indirect tail calls, since we don't know if the target
1160 uses the same GP. */
1161 if (!decl)
1162 return false;
1164 /* Otherwise, we can make a tail call if the target function shares
1165 the same GP. */
1166 return decl_has_samegp (decl);
1170 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1172 rtx x = *px;
1174 /* Don't re-split. */
1175 if (GET_CODE (x) == LO_SUM)
1176 return -1;
1178 return small_symbolic_operand (x, Pmode) != 0;
1181 static int
1182 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1184 rtx x = *px;
1186 /* Don't re-split. */
1187 if (GET_CODE (x) == LO_SUM)
1188 return -1;
1190 if (small_symbolic_operand (x, Pmode))
1192 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1193 *px = x;
1194 return -1;
1197 return 0;
1201 split_small_symbolic_operand (rtx x)
1203 x = copy_insn (x);
1204 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1205 return x;
1208 /* Indicate that INSN cannot be duplicated. This is true for any insn
1209 that we've marked with gpdisp relocs, since those have to stay in
1210 1-1 correspondence with one another.
1212 Technically we could copy them if we could set up a mapping from one
1213 sequence number to another, across the set of insns to be duplicated.
1214 This seems overly complicated and error-prone since interblock motion
1215 from sched-ebb could move one of the pair of insns to a different block.
1217 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1218 then they'll be in a different block from their ldgp. Which could lead
1219 the bb reorder code to think that it would be ok to copy just the block
1220 containing the call and branch to the block containing the ldgp. */
1222 static bool
1223 alpha_cannot_copy_insn_p (rtx insn)
1225 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1226 return false;
1227 if (recog_memoized (insn) >= 0)
1228 return get_attr_cannot_copy (insn);
1229 else
1230 return false;
1234 /* Try a machine-dependent way of reloading an illegitimate address
1235 operand. If we find one, push the reload and return the new rtx. */
1238 alpha_legitimize_reload_address (rtx x,
1239 enum machine_mode mode ATTRIBUTE_UNUSED,
1240 int opnum, int type,
1241 int ind_levels ATTRIBUTE_UNUSED)
1243 /* We must recognize output that we have already generated ourselves. */
1244 if (GET_CODE (x) == PLUS
1245 && GET_CODE (XEXP (x, 0)) == PLUS
1246 && REG_P (XEXP (XEXP (x, 0), 0))
1247 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1248 && CONST_INT_P (XEXP (x, 1)))
1250 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1251 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1252 opnum, (enum reload_type) type);
1253 return x;
1256 /* We wish to handle large displacements off a base register by
1257 splitting the addend across an ldah and the mem insn. This
1258 cuts number of extra insns needed from 3 to 1. */
1259 if (GET_CODE (x) == PLUS
1260 && REG_P (XEXP (x, 0))
1261 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1262 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1263 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1265 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1266 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1267 HOST_WIDE_INT high
1268 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1270 /* Check for 32-bit overflow. */
1271 if (high + low != val)
1272 return NULL_RTX;
1274 /* Reload the high part into a base reg; leave the low part
1275 in the mem directly. */
1276 x = gen_rtx_PLUS (GET_MODE (x),
1277 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1278 GEN_INT (high)),
1279 GEN_INT (low));
1281 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1282 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1283 opnum, (enum reload_type) type);
1284 return x;
1287 return NULL_RTX;
1290 /* Compute a (partial) cost for rtx X. Return true if the complete
1291 cost has been computed, and false if subexpressions should be
1292 scanned. In either case, *TOTAL contains the cost result. */
1294 static bool
1295 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1296 bool speed)
1298 enum machine_mode mode = GET_MODE (x);
1299 bool float_mode_p = FLOAT_MODE_P (mode);
1300 const struct alpha_rtx_cost_data *cost_data;
1302 if (!speed)
1303 cost_data = &alpha_rtx_cost_size;
1304 else
1305 cost_data = &alpha_rtx_cost_data[alpha_tune];
1307 switch (code)
1309 case CONST_INT:
1310 /* If this is an 8-bit constant, return zero since it can be used
1311 nearly anywhere with no cost. If it is a valid operand for an
1312 ADD or AND, likewise return 0 if we know it will be used in that
1313 context. Otherwise, return 2 since it might be used there later.
1314 All other constants take at least two insns. */
1315 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1317 *total = 0;
1318 return true;
1320 /* FALLTHRU */
1322 case CONST_DOUBLE:
1323 if (x == CONST0_RTX (mode))
1324 *total = 0;
1325 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1326 || (outer_code == AND && and_operand (x, VOIDmode)))
1327 *total = 0;
1328 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1329 *total = 2;
1330 else
1331 *total = COSTS_N_INSNS (2);
1332 return true;
1334 case CONST:
1335 case SYMBOL_REF:
1336 case LABEL_REF:
1337 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1338 *total = COSTS_N_INSNS (outer_code != MEM);
1339 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1340 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1341 else if (tls_symbolic_operand_type (x))
1342 /* Estimate of cost for call_pal rduniq. */
1343 /* ??? How many insns do we emit here? More than one... */
1344 *total = COSTS_N_INSNS (15);
1345 else
1346 /* Otherwise we do a load from the GOT. */
1347 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1348 return true;
1350 case HIGH:
1351 /* This is effectively an add_operand. */
1352 *total = 2;
1353 return true;
1355 case PLUS:
1356 case MINUS:
1357 if (float_mode_p)
1358 *total = cost_data->fp_add;
1359 else if (GET_CODE (XEXP (x, 0)) == MULT
1360 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1362 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1363 (enum rtx_code) outer_code, speed)
1364 + rtx_cost (XEXP (x, 1),
1365 (enum rtx_code) outer_code, speed)
1366 + COSTS_N_INSNS (1));
1367 return true;
1369 return false;
1371 case MULT:
1372 if (float_mode_p)
1373 *total = cost_data->fp_mult;
1374 else if (mode == DImode)
1375 *total = cost_data->int_mult_di;
1376 else
1377 *total = cost_data->int_mult_si;
1378 return false;
1380 case ASHIFT:
1381 if (CONST_INT_P (XEXP (x, 1))
1382 && INTVAL (XEXP (x, 1)) <= 3)
1384 *total = COSTS_N_INSNS (1);
1385 return false;
1387 /* FALLTHRU */
1389 case ASHIFTRT:
1390 case LSHIFTRT:
1391 *total = cost_data->int_shift;
1392 return false;
1394 case IF_THEN_ELSE:
1395 if (float_mode_p)
1396 *total = cost_data->fp_add;
1397 else
1398 *total = cost_data->int_cmov;
1399 return false;
1401 case DIV:
1402 case UDIV:
1403 case MOD:
1404 case UMOD:
1405 if (!float_mode_p)
1406 *total = cost_data->int_div;
1407 else if (mode == SFmode)
1408 *total = cost_data->fp_div_sf;
1409 else
1410 *total = cost_data->fp_div_df;
1411 return false;
1413 case MEM:
1414 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1415 return true;
1417 case NEG:
1418 if (! float_mode_p)
1420 *total = COSTS_N_INSNS (1);
1421 return false;
1423 /* FALLTHRU */
1425 case ABS:
1426 if (! float_mode_p)
1428 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1429 return false;
1431 /* FALLTHRU */
1433 case FLOAT:
1434 case UNSIGNED_FLOAT:
1435 case FIX:
1436 case UNSIGNED_FIX:
1437 case FLOAT_TRUNCATE:
1438 *total = cost_data->fp_add;
1439 return false;
1441 case FLOAT_EXTEND:
1442 if (MEM_P (XEXP (x, 0)))
1443 *total = 0;
1444 else
1445 *total = cost_data->fp_add;
1446 return false;
1448 default:
1449 return false;
1453 /* REF is an alignable memory location. Place an aligned SImode
1454 reference into *PALIGNED_MEM and the number of bits to shift into
1455 *PBITNUM. SCRATCH is a free register for use in reloading out
1456 of range stack slots. */
1458 void
1459 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1461 rtx base;
1462 HOST_WIDE_INT disp, offset;
1464 gcc_assert (MEM_P (ref));
1466 if (reload_in_progress
1467 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1469 base = find_replacement (&XEXP (ref, 0));
1470 gcc_assert (memory_address_p (GET_MODE (ref), base));
1472 else
1473 base = XEXP (ref, 0);
1475 if (GET_CODE (base) == PLUS)
1476 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1477 else
1478 disp = 0;
1480 /* Find the byte offset within an aligned word. If the memory itself is
1481 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1482 will have examined the base register and determined it is aligned, and
1483 thus displacements from it are naturally alignable. */
1484 if (MEM_ALIGN (ref) >= 32)
1485 offset = 0;
1486 else
1487 offset = disp & 3;
1489 /* The location should not cross aligned word boundary. */
1490 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1491 <= GET_MODE_SIZE (SImode));
1493 /* Access the entire aligned word. */
1494 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1496 /* Convert the byte offset within the word to a bit offset. */
1497 if (WORDS_BIG_ENDIAN)
1498 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1499 else
1500 offset *= 8;
1501 *pbitnum = GEN_INT (offset);
1504 /* Similar, but just get the address. Handle the two reload cases.
1505 Add EXTRA_OFFSET to the address we return. */
1508 get_unaligned_address (rtx ref)
1510 rtx base;
1511 HOST_WIDE_INT offset = 0;
1513 gcc_assert (MEM_P (ref));
1515 if (reload_in_progress
1516 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1518 base = find_replacement (&XEXP (ref, 0));
1520 gcc_assert (memory_address_p (GET_MODE (ref), base));
1522 else
1523 base = XEXP (ref, 0);
1525 if (GET_CODE (base) == PLUS)
1526 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1528 return plus_constant (base, offset);
1531 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1532 X is always returned in a register. */
1535 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1537 if (GET_CODE (addr) == PLUS)
1539 ofs += INTVAL (XEXP (addr, 1));
1540 addr = XEXP (addr, 0);
1543 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1544 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1547 /* On the Alpha, all (non-symbolic) constants except zero go into
1548 a floating-point register via memory. Note that we cannot
1549 return anything that is not a subset of RCLASS, and that some
1550 symbolic constants cannot be dropped to memory. */
1552 enum reg_class
1553 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1555 /* Zero is present in any register class. */
1556 if (x == CONST0_RTX (GET_MODE (x)))
1557 return rclass;
1559 /* These sorts of constants we can easily drop to memory. */
1560 if (CONST_INT_P (x)
1561 || GET_CODE (x) == CONST_DOUBLE
1562 || GET_CODE (x) == CONST_VECTOR)
1564 if (rclass == FLOAT_REGS)
1565 return NO_REGS;
1566 if (rclass == ALL_REGS)
1567 return GENERAL_REGS;
1568 return rclass;
1571 /* All other kinds of constants should not (and in the case of HIGH
1572 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1573 secondary reload. */
1574 if (CONSTANT_P (x))
1575 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1577 return rclass;
1580 /* Inform reload about cases where moving X with a mode MODE to a register in
1581 RCLASS requires an extra scratch or immediate register. Return the class
1582 needed for the immediate register. */
1584 static reg_class_t
1585 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1586 enum machine_mode mode, secondary_reload_info *sri)
1588 enum reg_class rclass = (enum reg_class) rclass_i;
1590 /* Loading and storing HImode or QImode values to and from memory
1591 usually requires a scratch register. */
1592 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1594 if (any_memory_operand (x, mode))
1596 if (in_p)
1598 if (!aligned_memory_operand (x, mode))
1599 sri->icode = direct_optab_handler (reload_in_optab, mode);
1601 else
1602 sri->icode = direct_optab_handler (reload_out_optab, mode);
1603 return NO_REGS;
1607 /* We also cannot do integral arithmetic into FP regs, as might result
1608 from register elimination into a DImode fp register. */
1609 if (rclass == FLOAT_REGS)
1611 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1612 return GENERAL_REGS;
1613 if (in_p && INTEGRAL_MODE_P (mode)
1614 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1615 return GENERAL_REGS;
1618 return NO_REGS;
1621 /* Subfunction of the following function. Update the flags of any MEM
1622 found in part of X. */
1624 static int
1625 alpha_set_memflags_1 (rtx *xp, void *data)
1627 rtx x = *xp, orig = (rtx) data;
1629 if (!MEM_P (x))
1630 return 0;
1632 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1633 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1634 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1635 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1636 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1638 /* Sadly, we cannot use alias sets because the extra aliasing
1639 produced by the AND interferes. Given that two-byte quantities
1640 are the only thing we would be able to differentiate anyway,
1641 there does not seem to be any point in convoluting the early
1642 out of the alias check. */
1644 return -1;
1647 /* Given SEQ, which is an INSN list, look for any MEMs in either
1648 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1649 volatile flags from REF into each of the MEMs found. If REF is not
1650 a MEM, don't do anything. */
1652 void
1653 alpha_set_memflags (rtx seq, rtx ref)
1655 rtx insn;
1657 if (!MEM_P (ref))
1658 return;
1660 /* This is only called from alpha.md, after having had something
1661 generated from one of the insn patterns. So if everything is
1662 zero, the pattern is already up-to-date. */
1663 if (!MEM_VOLATILE_P (ref)
1664 && !MEM_IN_STRUCT_P (ref)
1665 && !MEM_SCALAR_P (ref)
1666 && !MEM_NOTRAP_P (ref)
1667 && !MEM_READONLY_P (ref))
1668 return;
1670 for (insn = seq; insn; insn = NEXT_INSN (insn))
1671 if (INSN_P (insn))
1672 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1673 else
1674 gcc_unreachable ();
1677 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1678 int, bool);
1680 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1681 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1682 and return pc_rtx if successful. */
1684 static rtx
1685 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1686 HOST_WIDE_INT c, int n, bool no_output)
1688 HOST_WIDE_INT new_const;
1689 int i, bits;
1690 /* Use a pseudo if highly optimizing and still generating RTL. */
1691 rtx subtarget
1692 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1693 rtx temp, insn;
1695 /* If this is a sign-extended 32-bit constant, we can do this in at most
1696 three insns, so do it if we have enough insns left. We always have
1697 a sign-extended 32-bit constant when compiling on a narrow machine. */
1699 if (HOST_BITS_PER_WIDE_INT != 64
1700 || c >> 31 == -1 || c >> 31 == 0)
1702 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1703 HOST_WIDE_INT tmp1 = c - low;
1704 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1705 HOST_WIDE_INT extra = 0;
1707 /* If HIGH will be interpreted as negative but the constant is
1708 positive, we must adjust it to do two ldha insns. */
1710 if ((high & 0x8000) != 0 && c >= 0)
1712 extra = 0x4000;
1713 tmp1 -= 0x40000000;
1714 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1717 if (c == low || (low == 0 && extra == 0))
1719 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1720 but that meant that we can't handle INT_MIN on 32-bit machines
1721 (like NT/Alpha), because we recurse indefinitely through
1722 emit_move_insn to gen_movdi. So instead, since we know exactly
1723 what we want, create it explicitly. */
1725 if (no_output)
1726 return pc_rtx;
1727 if (target == NULL)
1728 target = gen_reg_rtx (mode);
1729 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1730 return target;
1732 else if (n >= 2 + (extra != 0))
1734 if (no_output)
1735 return pc_rtx;
1736 if (!can_create_pseudo_p ())
1738 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1739 temp = target;
1741 else
1742 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1743 subtarget, mode);
1745 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1746 This means that if we go through expand_binop, we'll try to
1747 generate extensions, etc, which will require new pseudos, which
1748 will fail during some split phases. The SImode add patterns
1749 still exist, but are not named. So build the insns by hand. */
1751 if (extra != 0)
1753 if (! subtarget)
1754 subtarget = gen_reg_rtx (mode);
1755 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1756 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1757 emit_insn (insn);
1758 temp = subtarget;
1761 if (target == NULL)
1762 target = gen_reg_rtx (mode);
1763 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1764 insn = gen_rtx_SET (VOIDmode, target, insn);
1765 emit_insn (insn);
1766 return target;
1770 /* If we couldn't do it that way, try some other methods. But if we have
1771 no instructions left, don't bother. Likewise, if this is SImode and
1772 we can't make pseudos, we can't do anything since the expand_binop
1773 and expand_unop calls will widen and try to make pseudos. */
1775 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1776 return 0;
1778 /* Next, see if we can load a related constant and then shift and possibly
1779 negate it to get the constant we want. Try this once each increasing
1780 numbers of insns. */
1782 for (i = 1; i < n; i++)
1784 /* First, see if minus some low bits, we've an easy load of
1785 high bits. */
1787 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1788 if (new_const != 0)
1790 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1791 if (temp)
1793 if (no_output)
1794 return temp;
1795 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1796 target, 0, OPTAB_WIDEN);
1800 /* Next try complementing. */
1801 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1802 if (temp)
1804 if (no_output)
1805 return temp;
1806 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1809 /* Next try to form a constant and do a left shift. We can do this
1810 if some low-order bits are zero; the exact_log2 call below tells
1811 us that information. The bits we are shifting out could be any
1812 value, but here we'll just try the 0- and sign-extended forms of
1813 the constant. To try to increase the chance of having the same
1814 constant in more than one insn, start at the highest number of
1815 bits to shift, but try all possibilities in case a ZAPNOT will
1816 be useful. */
1818 bits = exact_log2 (c & -c);
1819 if (bits > 0)
1820 for (; bits > 0; bits--)
1822 new_const = c >> bits;
1823 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1824 if (!temp && c < 0)
1826 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1827 temp = alpha_emit_set_const (subtarget, mode, new_const,
1828 i, no_output);
1830 if (temp)
1832 if (no_output)
1833 return temp;
1834 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1835 target, 0, OPTAB_WIDEN);
1839 /* Now try high-order zero bits. Here we try the shifted-in bits as
1840 all zero and all ones. Be careful to avoid shifting outside the
1841 mode and to avoid shifting outside the host wide int size. */
1842 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1843 confuse the recursive call and set all of the high 32 bits. */
1845 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1846 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1847 if (bits > 0)
1848 for (; bits > 0; bits--)
1850 new_const = c << bits;
1851 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1852 if (!temp)
1854 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1855 temp = alpha_emit_set_const (subtarget, mode, new_const,
1856 i, no_output);
1858 if (temp)
1860 if (no_output)
1861 return temp;
1862 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1863 target, 1, OPTAB_WIDEN);
1867 /* Now try high-order 1 bits. We get that with a sign-extension.
1868 But one bit isn't enough here. Be careful to avoid shifting outside
1869 the mode and to avoid shifting outside the host wide int size. */
1871 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1872 - floor_log2 (~ c) - 2);
1873 if (bits > 0)
1874 for (; bits > 0; bits--)
1876 new_const = c << bits;
1877 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1878 if (!temp)
1880 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1881 temp = alpha_emit_set_const (subtarget, mode, new_const,
1882 i, no_output);
1884 if (temp)
1886 if (no_output)
1887 return temp;
1888 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1889 target, 0, OPTAB_WIDEN);
1894 #if HOST_BITS_PER_WIDE_INT == 64
1895 /* Finally, see if can load a value into the target that is the same as the
1896 constant except that all bytes that are 0 are changed to be 0xff. If we
1897 can, then we can do a ZAPNOT to obtain the desired constant. */
1899 new_const = c;
1900 for (i = 0; i < 64; i += 8)
1901 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1902 new_const |= (HOST_WIDE_INT) 0xff << i;
1904 /* We are only called for SImode and DImode. If this is SImode, ensure that
1905 we are sign extended to a full word. */
1907 if (mode == SImode)
1908 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1910 if (new_const != c)
1912 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1913 if (temp)
1915 if (no_output)
1916 return temp;
1917 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1918 target, 0, OPTAB_WIDEN);
1921 #endif
1923 return 0;
1926 /* Try to output insns to set TARGET equal to the constant C if it can be
1927 done in less than N insns. Do all computations in MODE. Returns the place
1928 where the output has been placed if it can be done and the insns have been
1929 emitted. If it would take more than N insns, zero is returned and no
1930 insns and emitted. */
1932 static rtx
1933 alpha_emit_set_const (rtx target, enum machine_mode mode,
1934 HOST_WIDE_INT c, int n, bool no_output)
1936 enum machine_mode orig_mode = mode;
1937 rtx orig_target = target;
1938 rtx result = 0;
1939 int i;
1941 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1942 can't load this constant in one insn, do this in DImode. */
1943 if (!can_create_pseudo_p () && mode == SImode
1944 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1946 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1947 if (result)
1948 return result;
1950 target = no_output ? NULL : gen_lowpart (DImode, target);
1951 mode = DImode;
1953 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1955 target = no_output ? NULL : gen_lowpart (DImode, target);
1956 mode = DImode;
1959 /* Try 1 insn, then 2, then up to N. */
1960 for (i = 1; i <= n; i++)
1962 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1963 if (result)
1965 rtx insn, set;
1967 if (no_output)
1968 return result;
1970 insn = get_last_insn ();
1971 set = single_set (insn);
1972 if (! CONSTANT_P (SET_SRC (set)))
1973 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1974 break;
1978 /* Allow for the case where we changed the mode of TARGET. */
1979 if (result)
1981 if (result == target)
1982 result = orig_target;
1983 else if (mode != orig_mode)
1984 result = gen_lowpart (orig_mode, result);
1987 return result;
1990 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1991 fall back to a straight forward decomposition. We do this to avoid
1992 exponential run times encountered when looking for longer sequences
1993 with alpha_emit_set_const. */
1995 static rtx
1996 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1998 HOST_WIDE_INT d1, d2, d3, d4;
2000 /* Decompose the entire word */
2001 #if HOST_BITS_PER_WIDE_INT >= 64
2002 gcc_assert (c2 == -(c1 < 0));
2003 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2004 c1 -= d1;
2005 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2006 c1 = (c1 - d2) >> 32;
2007 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2008 c1 -= d3;
2009 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2010 gcc_assert (c1 == d4);
2011 #else
2012 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2013 c1 -= d1;
2014 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2015 gcc_assert (c1 == d2);
2016 c2 += (d2 < 0);
2017 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2018 c2 -= d3;
2019 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2020 gcc_assert (c2 == d4);
2021 #endif
2023 /* Construct the high word */
2024 if (d4)
2026 emit_move_insn (target, GEN_INT (d4));
2027 if (d3)
2028 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2030 else
2031 emit_move_insn (target, GEN_INT (d3));
2033 /* Shift it into place */
2034 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2036 /* Add in the low bits. */
2037 if (d2)
2038 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2039 if (d1)
2040 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2042 return target;
2045 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2046 the low 64 bits. */
2048 static void
2049 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2051 HOST_WIDE_INT i0, i1;
2053 if (GET_CODE (x) == CONST_VECTOR)
2054 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2057 if (CONST_INT_P (x))
2059 i0 = INTVAL (x);
2060 i1 = -(i0 < 0);
2062 else if (HOST_BITS_PER_WIDE_INT >= 64)
2064 i0 = CONST_DOUBLE_LOW (x);
2065 i1 = -(i0 < 0);
2067 else
2069 i0 = CONST_DOUBLE_LOW (x);
2070 i1 = CONST_DOUBLE_HIGH (x);
2073 *p0 = i0;
2074 *p1 = i1;
2077 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2078 are willing to load the value into a register via a move pattern.
2079 Normally this is all symbolic constants, integral constants that
2080 take three or fewer instructions, and floating-point zero. */
2082 bool
2083 alpha_legitimate_constant_p (rtx x)
2085 enum machine_mode mode = GET_MODE (x);
2086 HOST_WIDE_INT i0, i1;
2088 switch (GET_CODE (x))
2090 case LABEL_REF:
2091 case HIGH:
2092 return true;
2094 case CONST:
2095 if (GET_CODE (XEXP (x, 0)) == PLUS
2096 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2097 x = XEXP (XEXP (x, 0), 0);
2098 else
2099 return true;
2101 if (GET_CODE (x) != SYMBOL_REF)
2102 return true;
2104 /* FALLTHRU */
2106 case SYMBOL_REF:
2107 /* TLS symbols are never valid. */
2108 return SYMBOL_REF_TLS_MODEL (x) == 0;
2110 case CONST_DOUBLE:
2111 if (x == CONST0_RTX (mode))
2112 return true;
2113 if (FLOAT_MODE_P (mode))
2114 return false;
2115 goto do_integer;
2117 case CONST_VECTOR:
2118 if (x == CONST0_RTX (mode))
2119 return true;
2120 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2121 return false;
2122 if (GET_MODE_SIZE (mode) != 8)
2123 return false;
2124 goto do_integer;
2126 case CONST_INT:
2127 do_integer:
2128 if (TARGET_BUILD_CONSTANTS)
2129 return true;
2130 alpha_extract_integer (x, &i0, &i1);
2131 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2132 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2133 return false;
2135 default:
2136 return false;
2140 /* Operand 1 is known to be a constant, and should require more than one
2141 instruction to load. Emit that multi-part load. */
2143 bool
2144 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2146 HOST_WIDE_INT i0, i1;
2147 rtx temp = NULL_RTX;
2149 alpha_extract_integer (operands[1], &i0, &i1);
2151 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2152 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2154 if (!temp && TARGET_BUILD_CONSTANTS)
2155 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2157 if (temp)
2159 if (!rtx_equal_p (operands[0], temp))
2160 emit_move_insn (operands[0], temp);
2161 return true;
2164 return false;
2167 /* Expand a move instruction; return true if all work is done.
2168 We don't handle non-bwx subword loads here. */
2170 bool
2171 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2173 rtx tmp;
2175 /* If the output is not a register, the input must be. */
2176 if (MEM_P (operands[0])
2177 && ! reg_or_0_operand (operands[1], mode))
2178 operands[1] = force_reg (mode, operands[1]);
2180 /* Allow legitimize_address to perform some simplifications. */
2181 if (mode == Pmode && symbolic_operand (operands[1], mode))
2183 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2184 if (tmp)
2186 if (tmp == operands[0])
2187 return true;
2188 operands[1] = tmp;
2189 return false;
2193 /* Early out for non-constants and valid constants. */
2194 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2195 return false;
2197 /* Split large integers. */
2198 if (CONST_INT_P (operands[1])
2199 || GET_CODE (operands[1]) == CONST_DOUBLE
2200 || GET_CODE (operands[1]) == CONST_VECTOR)
2202 if (alpha_split_const_mov (mode, operands))
2203 return true;
2206 /* Otherwise we've nothing left but to drop the thing to memory. */
2207 tmp = force_const_mem (mode, operands[1]);
2209 if (tmp == NULL_RTX)
2210 return false;
2212 if (reload_in_progress)
2214 emit_move_insn (operands[0], XEXP (tmp, 0));
2215 operands[1] = replace_equiv_address (tmp, operands[0]);
2217 else
2218 operands[1] = validize_mem (tmp);
2219 return false;
2222 /* Expand a non-bwx QImode or HImode move instruction;
2223 return true if all work is done. */
2225 bool
2226 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2228 rtx seq;
2230 /* If the output is not a register, the input must be. */
2231 if (MEM_P (operands[0]))
2232 operands[1] = force_reg (mode, operands[1]);
2234 /* Handle four memory cases, unaligned and aligned for either the input
2235 or the output. The only case where we can be called during reload is
2236 for aligned loads; all other cases require temporaries. */
2238 if (any_memory_operand (operands[1], mode))
2240 if (aligned_memory_operand (operands[1], mode))
2242 if (reload_in_progress)
2244 if (mode == QImode)
2245 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2246 else
2247 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2248 emit_insn (seq);
2250 else
2252 rtx aligned_mem, bitnum;
2253 rtx scratch = gen_reg_rtx (SImode);
2254 rtx subtarget;
2255 bool copyout;
2257 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2259 subtarget = operands[0];
2260 if (REG_P (subtarget))
2261 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2262 else
2263 subtarget = gen_reg_rtx (DImode), copyout = true;
2265 if (mode == QImode)
2266 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2267 bitnum, scratch);
2268 else
2269 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2270 bitnum, scratch);
2271 emit_insn (seq);
2273 if (copyout)
2274 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2277 else
2279 /* Don't pass these as parameters since that makes the generated
2280 code depend on parameter evaluation order which will cause
2281 bootstrap failures. */
2283 rtx temp1, temp2, subtarget, ua;
2284 bool copyout;
2286 temp1 = gen_reg_rtx (DImode);
2287 temp2 = gen_reg_rtx (DImode);
2289 subtarget = operands[0];
2290 if (REG_P (subtarget))
2291 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2292 else
2293 subtarget = gen_reg_rtx (DImode), copyout = true;
2295 ua = get_unaligned_address (operands[1]);
2296 if (mode == QImode)
2297 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2298 else
2299 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2301 alpha_set_memflags (seq, operands[1]);
2302 emit_insn (seq);
2304 if (copyout)
2305 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2307 return true;
2310 if (any_memory_operand (operands[0], mode))
2312 if (aligned_memory_operand (operands[0], mode))
2314 rtx aligned_mem, bitnum;
2315 rtx temp1 = gen_reg_rtx (SImode);
2316 rtx temp2 = gen_reg_rtx (SImode);
2318 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2320 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2321 temp1, temp2));
2323 else
2325 rtx temp1 = gen_reg_rtx (DImode);
2326 rtx temp2 = gen_reg_rtx (DImode);
2327 rtx temp3 = gen_reg_rtx (DImode);
2328 rtx ua = get_unaligned_address (operands[0]);
2330 if (mode == QImode)
2331 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2332 else
2333 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2335 alpha_set_memflags (seq, operands[0]);
2336 emit_insn (seq);
2338 return true;
2341 return false;
2344 /* Implement the movmisalign patterns. One of the operands is a memory
2345 that is not naturally aligned. Emit instructions to load it. */
2347 void
2348 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2350 /* Honor misaligned loads, for those we promised to do so. */
2351 if (MEM_P (operands[1]))
2353 rtx tmp;
2355 if (register_operand (operands[0], mode))
2356 tmp = operands[0];
2357 else
2358 tmp = gen_reg_rtx (mode);
2360 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2361 if (tmp != operands[0])
2362 emit_move_insn (operands[0], tmp);
2364 else if (MEM_P (operands[0]))
2366 if (!reg_or_0_operand (operands[1], mode))
2367 operands[1] = force_reg (mode, operands[1]);
2368 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2370 else
2371 gcc_unreachable ();
2374 /* Generate an unsigned DImode to FP conversion. This is the same code
2375 optabs would emit if we didn't have TFmode patterns.
2377 For SFmode, this is the only construction I've found that can pass
2378 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2379 intermediates will work, because you'll get intermediate rounding
2380 that ruins the end result. Some of this could be fixed by turning
2381 on round-to-positive-infinity, but that requires diddling the fpsr,
2382 which kills performance. I tried turning this around and converting
2383 to a negative number, so that I could turn on /m, but either I did
2384 it wrong or there's something else cause I wound up with the exact
2385 same single-bit error. There is a branch-less form of this same code:
2387 srl $16,1,$1
2388 and $16,1,$2
2389 cmplt $16,0,$3
2390 or $1,$2,$2
2391 cmovge $16,$16,$2
2392 itoft $3,$f10
2393 itoft $2,$f11
2394 cvtqs $f11,$f11
2395 adds $f11,$f11,$f0
2396 fcmoveq $f10,$f11,$f0
2398 I'm not using it because it's the same number of instructions as
2399 this branch-full form, and it has more serialized long latency
2400 instructions on the critical path.
2402 For DFmode, we can avoid rounding errors by breaking up the word
2403 into two pieces, converting them separately, and adding them back:
2405 LC0: .long 0,0x5f800000
2407 itoft $16,$f11
2408 lda $2,LC0
2409 cmplt $16,0,$1
2410 cpyse $f11,$f31,$f10
2411 cpyse $f31,$f11,$f11
2412 s4addq $1,$2,$1
2413 lds $f12,0($1)
2414 cvtqt $f10,$f10
2415 cvtqt $f11,$f11
2416 addt $f12,$f10,$f0
2417 addt $f0,$f11,$f0
2419 This doesn't seem to be a clear-cut win over the optabs form.
2420 It probably all depends on the distribution of numbers being
2421 converted -- in the optabs form, all but high-bit-set has a
2422 much lower minimum execution time. */
2424 void
2425 alpha_emit_floatuns (rtx operands[2])
2427 rtx neglab, donelab, i0, i1, f0, in, out;
2428 enum machine_mode mode;
2430 out = operands[0];
2431 in = force_reg (DImode, operands[1]);
2432 mode = GET_MODE (out);
2433 neglab = gen_label_rtx ();
2434 donelab = gen_label_rtx ();
2435 i0 = gen_reg_rtx (DImode);
2436 i1 = gen_reg_rtx (DImode);
2437 f0 = gen_reg_rtx (mode);
2439 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2441 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2442 emit_jump_insn (gen_jump (donelab));
2443 emit_barrier ();
2445 emit_label (neglab);
2447 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2448 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2449 emit_insn (gen_iordi3 (i0, i0, i1));
2450 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2451 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2453 emit_label (donelab);
2456 /* Generate the comparison for a conditional branch. */
2458 void
2459 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2461 enum rtx_code cmp_code, branch_code;
2462 enum machine_mode branch_mode = VOIDmode;
2463 enum rtx_code code = GET_CODE (operands[0]);
2464 rtx op0 = operands[1], op1 = operands[2];
2465 rtx tem;
2467 if (cmp_mode == TFmode)
2469 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2470 op1 = const0_rtx;
2471 cmp_mode = DImode;
2474 /* The general case: fold the comparison code to the types of compares
2475 that we have, choosing the branch as necessary. */
2476 switch (code)
2478 case EQ: case LE: case LT: case LEU: case LTU:
2479 case UNORDERED:
2480 /* We have these compares: */
2481 cmp_code = code, branch_code = NE;
2482 break;
2484 case NE:
2485 case ORDERED:
2486 /* These must be reversed. */
2487 cmp_code = reverse_condition (code), branch_code = EQ;
2488 break;
2490 case GE: case GT: case GEU: case GTU:
2491 /* For FP, we swap them, for INT, we reverse them. */
2492 if (cmp_mode == DFmode)
2494 cmp_code = swap_condition (code);
2495 branch_code = NE;
2496 tem = op0, op0 = op1, op1 = tem;
2498 else
2500 cmp_code = reverse_condition (code);
2501 branch_code = EQ;
2503 break;
2505 default:
2506 gcc_unreachable ();
2509 if (cmp_mode == DFmode)
2511 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2513 /* When we are not as concerned about non-finite values, and we
2514 are comparing against zero, we can branch directly. */
2515 if (op1 == CONST0_RTX (DFmode))
2516 cmp_code = UNKNOWN, branch_code = code;
2517 else if (op0 == CONST0_RTX (DFmode))
2519 /* Undo the swap we probably did just above. */
2520 tem = op0, op0 = op1, op1 = tem;
2521 branch_code = swap_condition (cmp_code);
2522 cmp_code = UNKNOWN;
2525 else
2527 /* ??? We mark the branch mode to be CCmode to prevent the
2528 compare and branch from being combined, since the compare
2529 insn follows IEEE rules that the branch does not. */
2530 branch_mode = CCmode;
2533 else
2535 /* The following optimizations are only for signed compares. */
2536 if (code != LEU && code != LTU && code != GEU && code != GTU)
2538 /* Whee. Compare and branch against 0 directly. */
2539 if (op1 == const0_rtx)
2540 cmp_code = UNKNOWN, branch_code = code;
2542 /* If the constants doesn't fit into an immediate, but can
2543 be generated by lda/ldah, we adjust the argument and
2544 compare against zero, so we can use beq/bne directly. */
2545 /* ??? Don't do this when comparing against symbols, otherwise
2546 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2547 be declared false out of hand (at least for non-weak). */
2548 else if (CONST_INT_P (op1)
2549 && (code == EQ || code == NE)
2550 && !(symbolic_operand (op0, VOIDmode)
2551 || (REG_P (op0) && REG_POINTER (op0))))
2553 rtx n_op1 = GEN_INT (-INTVAL (op1));
2555 if (! satisfies_constraint_I (op1)
2556 && (satisfies_constraint_K (n_op1)
2557 || satisfies_constraint_L (n_op1)))
2558 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2562 if (!reg_or_0_operand (op0, DImode))
2563 op0 = force_reg (DImode, op0);
2564 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2565 op1 = force_reg (DImode, op1);
2568 /* Emit an initial compare instruction, if necessary. */
2569 tem = op0;
2570 if (cmp_code != UNKNOWN)
2572 tem = gen_reg_rtx (cmp_mode);
2573 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2576 /* Emit the branch instruction. */
2577 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2578 gen_rtx_IF_THEN_ELSE (VOIDmode,
2579 gen_rtx_fmt_ee (branch_code,
2580 branch_mode, tem,
2581 CONST0_RTX (cmp_mode)),
2582 gen_rtx_LABEL_REF (VOIDmode,
2583 operands[3]),
2584 pc_rtx));
2585 emit_jump_insn (tem);
2588 /* Certain simplifications can be done to make invalid setcc operations
2589 valid. Return the final comparison, or NULL if we can't work. */
2591 bool
2592 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2594 enum rtx_code cmp_code;
2595 enum rtx_code code = GET_CODE (operands[1]);
2596 rtx op0 = operands[2], op1 = operands[3];
2597 rtx tmp;
2599 if (cmp_mode == TFmode)
2601 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2602 op1 = const0_rtx;
2603 cmp_mode = DImode;
2606 if (cmp_mode == DFmode && !TARGET_FIX)
2607 return 0;
2609 /* The general case: fold the comparison code to the types of compares
2610 that we have, choosing the branch as necessary. */
2612 cmp_code = UNKNOWN;
2613 switch (code)
2615 case EQ: case LE: case LT: case LEU: case LTU:
2616 case UNORDERED:
2617 /* We have these compares. */
2618 if (cmp_mode == DFmode)
2619 cmp_code = code, code = NE;
2620 break;
2622 case NE:
2623 if (cmp_mode == DImode && op1 == const0_rtx)
2624 break;
2625 /* FALLTHRU */
2627 case ORDERED:
2628 cmp_code = reverse_condition (code);
2629 code = EQ;
2630 break;
2632 case GE: case GT: case GEU: case GTU:
2633 /* These normally need swapping, but for integer zero we have
2634 special patterns that recognize swapped operands. */
2635 if (cmp_mode == DImode && op1 == const0_rtx)
2636 break;
2637 code = swap_condition (code);
2638 if (cmp_mode == DFmode)
2639 cmp_code = code, code = NE;
2640 tmp = op0, op0 = op1, op1 = tmp;
2641 break;
2643 default:
2644 gcc_unreachable ();
2647 if (cmp_mode == DImode)
2649 if (!register_operand (op0, DImode))
2650 op0 = force_reg (DImode, op0);
2651 if (!reg_or_8bit_operand (op1, DImode))
2652 op1 = force_reg (DImode, op1);
2655 /* Emit an initial compare instruction, if necessary. */
2656 if (cmp_code != UNKNOWN)
2658 tmp = gen_reg_rtx (cmp_mode);
2659 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2660 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2662 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2663 op1 = const0_rtx;
2666 /* Emit the setcc instruction. */
2667 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2668 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2669 return true;
2673 /* Rewrite a comparison against zero CMP of the form
2674 (CODE (cc0) (const_int 0)) so it can be written validly in
2675 a conditional move (if_then_else CMP ...).
2676 If both of the operands that set cc0 are nonzero we must emit
2677 an insn to perform the compare (it can't be done within
2678 the conditional move). */
2681 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2683 enum rtx_code code = GET_CODE (cmp);
2684 enum rtx_code cmov_code = NE;
2685 rtx op0 = XEXP (cmp, 0);
2686 rtx op1 = XEXP (cmp, 1);
2687 enum machine_mode cmp_mode
2688 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2689 enum machine_mode cmov_mode = VOIDmode;
2690 int local_fast_math = flag_unsafe_math_optimizations;
2691 rtx tem;
2693 if (cmp_mode == TFmode)
2695 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2696 op1 = const0_rtx;
2697 cmp_mode = DImode;
2700 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2702 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2704 enum rtx_code cmp_code;
2706 if (! TARGET_FIX)
2707 return 0;
2709 /* If we have fp<->int register move instructions, do a cmov by
2710 performing the comparison in fp registers, and move the
2711 zero/nonzero value to integer registers, where we can then
2712 use a normal cmov, or vice-versa. */
2714 switch (code)
2716 case EQ: case LE: case LT: case LEU: case LTU:
2717 /* We have these compares. */
2718 cmp_code = code, code = NE;
2719 break;
2721 case NE:
2722 /* This must be reversed. */
2723 cmp_code = EQ, code = EQ;
2724 break;
2726 case GE: case GT: case GEU: case GTU:
2727 /* These normally need swapping, but for integer zero we have
2728 special patterns that recognize swapped operands. */
2729 if (cmp_mode == DImode && op1 == const0_rtx)
2730 cmp_code = code, code = NE;
2731 else
2733 cmp_code = swap_condition (code);
2734 code = NE;
2735 tem = op0, op0 = op1, op1 = tem;
2737 break;
2739 default:
2740 gcc_unreachable ();
2743 tem = gen_reg_rtx (cmp_mode);
2744 emit_insn (gen_rtx_SET (VOIDmode, tem,
2745 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2746 op0, op1)));
2748 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2749 op0 = gen_lowpart (cmp_mode, tem);
2750 op1 = CONST0_RTX (cmp_mode);
2751 local_fast_math = 1;
2754 /* We may be able to use a conditional move directly.
2755 This avoids emitting spurious compares. */
2756 if (signed_comparison_operator (cmp, VOIDmode)
2757 && (cmp_mode == DImode || local_fast_math)
2758 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2759 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2761 /* We can't put the comparison inside the conditional move;
2762 emit a compare instruction and put that inside the
2763 conditional move. Make sure we emit only comparisons we have;
2764 swap or reverse as necessary. */
2766 if (!can_create_pseudo_p ())
2767 return NULL_RTX;
2769 switch (code)
2771 case EQ: case LE: case LT: case LEU: case LTU:
2772 /* We have these compares: */
2773 break;
2775 case NE:
2776 /* This must be reversed. */
2777 code = reverse_condition (code);
2778 cmov_code = EQ;
2779 break;
2781 case GE: case GT: case GEU: case GTU:
2782 /* These must be swapped. */
2783 if (op1 != CONST0_RTX (cmp_mode))
2785 code = swap_condition (code);
2786 tem = op0, op0 = op1, op1 = tem;
2788 break;
2790 default:
2791 gcc_unreachable ();
2794 if (cmp_mode == DImode)
2796 if (!reg_or_0_operand (op0, DImode))
2797 op0 = force_reg (DImode, op0);
2798 if (!reg_or_8bit_operand (op1, DImode))
2799 op1 = force_reg (DImode, op1);
2802 /* ??? We mark the branch mode to be CCmode to prevent the compare
2803 and cmov from being combined, since the compare insn follows IEEE
2804 rules that the cmov does not. */
2805 if (cmp_mode == DFmode && !local_fast_math)
2806 cmov_mode = CCmode;
2808 tem = gen_reg_rtx (cmp_mode);
2809 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2810 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2813 /* Simplify a conditional move of two constants into a setcc with
2814 arithmetic. This is done with a splitter since combine would
2815 just undo the work if done during code generation. It also catches
2816 cases we wouldn't have before cse. */
2819 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2820 rtx t_rtx, rtx f_rtx)
2822 HOST_WIDE_INT t, f, diff;
2823 enum machine_mode mode;
2824 rtx target, subtarget, tmp;
2826 mode = GET_MODE (dest);
2827 t = INTVAL (t_rtx);
2828 f = INTVAL (f_rtx);
2829 diff = t - f;
2831 if (((code == NE || code == EQ) && diff < 0)
2832 || (code == GE || code == GT))
2834 code = reverse_condition (code);
2835 diff = t, t = f, f = diff;
2836 diff = t - f;
2839 subtarget = target = dest;
2840 if (mode != DImode)
2842 target = gen_lowpart (DImode, dest);
2843 if (can_create_pseudo_p ())
2844 subtarget = gen_reg_rtx (DImode);
2845 else
2846 subtarget = target;
2848 /* Below, we must be careful to use copy_rtx on target and subtarget
2849 in intermediate insns, as they may be a subreg rtx, which may not
2850 be shared. */
2852 if (f == 0 && exact_log2 (diff) > 0
2853 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2854 viable over a longer latency cmove. On EV5, the E0 slot is a
2855 scarce resource, and on EV4 shift has the same latency as a cmove. */
2856 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2858 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2859 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2861 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2862 GEN_INT (exact_log2 (t)));
2863 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2865 else if (f == 0 && t == -1)
2867 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2868 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2870 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2872 else if (diff == 1 || diff == 4 || diff == 8)
2874 rtx add_op;
2876 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2877 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2879 if (diff == 1)
2880 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2881 else
2883 add_op = GEN_INT (f);
2884 if (sext_add_operand (add_op, mode))
2886 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2887 GEN_INT (diff));
2888 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2889 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2891 else
2892 return 0;
2895 else
2896 return 0;
2898 return 1;
2901 /* Look up the function X_floating library function name for the
2902 given operation. */
2904 struct GTY(()) xfloating_op
2906 const enum rtx_code code;
2907 const char *const GTY((skip)) osf_func;
2908 const char *const GTY((skip)) vms_func;
2909 rtx libcall;
2912 static GTY(()) struct xfloating_op xfloating_ops[] =
2914 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2915 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2916 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2917 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2918 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2919 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2920 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2921 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2922 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2923 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2924 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2925 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2926 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2927 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2928 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2931 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2933 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2934 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2937 static rtx
2938 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2940 struct xfloating_op *ops = xfloating_ops;
2941 long n = ARRAY_SIZE (xfloating_ops);
2942 long i;
2944 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2946 /* How irritating. Nothing to key off for the main table. */
2947 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2949 ops = vax_cvt_ops;
2950 n = ARRAY_SIZE (vax_cvt_ops);
2953 for (i = 0; i < n; ++i, ++ops)
2954 if (ops->code == code)
2956 rtx func = ops->libcall;
2957 if (!func)
2959 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2960 ? ops->vms_func : ops->osf_func);
2961 ops->libcall = func;
2963 return func;
2966 gcc_unreachable ();
2969 /* Most X_floating operations take the rounding mode as an argument.
2970 Compute that here. */
2972 static int
2973 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2974 enum alpha_fp_rounding_mode round)
2976 int mode;
2978 switch (round)
2980 case ALPHA_FPRM_NORM:
2981 mode = 2;
2982 break;
2983 case ALPHA_FPRM_MINF:
2984 mode = 1;
2985 break;
2986 case ALPHA_FPRM_CHOP:
2987 mode = 0;
2988 break;
2989 case ALPHA_FPRM_DYN:
2990 mode = 4;
2991 break;
2992 default:
2993 gcc_unreachable ();
2995 /* XXX For reference, round to +inf is mode = 3. */
2998 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2999 mode |= 0x10000;
3001 return mode;
3004 /* Emit an X_floating library function call.
3006 Note that these functions do not follow normal calling conventions:
3007 TFmode arguments are passed in two integer registers (as opposed to
3008 indirect); TFmode return values appear in R16+R17.
3010 FUNC is the function to call.
3011 TARGET is where the output belongs.
3012 OPERANDS are the inputs.
3013 NOPERANDS is the count of inputs.
3014 EQUIV is the expression equivalent for the function.
3017 static void
3018 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3019 int noperands, rtx equiv)
3021 rtx usage = NULL_RTX, tmp, reg;
3022 int regno = 16, i;
3024 start_sequence ();
3026 for (i = 0; i < noperands; ++i)
3028 switch (GET_MODE (operands[i]))
3030 case TFmode:
3031 reg = gen_rtx_REG (TFmode, regno);
3032 regno += 2;
3033 break;
3035 case DFmode:
3036 reg = gen_rtx_REG (DFmode, regno + 32);
3037 regno += 1;
3038 break;
3040 case VOIDmode:
3041 gcc_assert (CONST_INT_P (operands[i]));
3042 /* FALLTHRU */
3043 case DImode:
3044 reg = gen_rtx_REG (DImode, regno);
3045 regno += 1;
3046 break;
3048 default:
3049 gcc_unreachable ();
3052 emit_move_insn (reg, operands[i]);
3053 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3056 switch (GET_MODE (target))
3058 case TFmode:
3059 reg = gen_rtx_REG (TFmode, 16);
3060 break;
3061 case DFmode:
3062 reg = gen_rtx_REG (DFmode, 32);
3063 break;
3064 case DImode:
3065 reg = gen_rtx_REG (DImode, 0);
3066 break;
3067 default:
3068 gcc_unreachable ();
3071 tmp = gen_rtx_MEM (QImode, func);
3072 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3073 const0_rtx, const0_rtx));
3074 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3075 RTL_CONST_CALL_P (tmp) = 1;
3077 tmp = get_insns ();
3078 end_sequence ();
3080 emit_libcall_block (tmp, target, reg, equiv);
3083 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3085 void
3086 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3088 rtx func;
3089 int mode;
3090 rtx out_operands[3];
3092 func = alpha_lookup_xfloating_lib_func (code);
3093 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3095 out_operands[0] = operands[1];
3096 out_operands[1] = operands[2];
3097 out_operands[2] = GEN_INT (mode);
3098 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3099 gen_rtx_fmt_ee (code, TFmode, operands[1],
3100 operands[2]));
3103 /* Emit an X_floating library function call for a comparison. */
3105 static rtx
3106 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3108 enum rtx_code cmp_code, res_code;
3109 rtx func, out, operands[2], note;
3111 /* X_floating library comparison functions return
3112 -1 unordered
3113 0 false
3114 1 true
3115 Convert the compare against the raw return value. */
3117 cmp_code = *pcode;
3118 switch (cmp_code)
3120 case UNORDERED:
3121 cmp_code = EQ;
3122 res_code = LT;
3123 break;
3124 case ORDERED:
3125 cmp_code = EQ;
3126 res_code = GE;
3127 break;
3128 case NE:
3129 res_code = NE;
3130 break;
3131 case EQ:
3132 case LT:
3133 case GT:
3134 case LE:
3135 case GE:
3136 res_code = GT;
3137 break;
3138 default:
3139 gcc_unreachable ();
3141 *pcode = res_code;
3143 func = alpha_lookup_xfloating_lib_func (cmp_code);
3145 operands[0] = op0;
3146 operands[1] = op1;
3147 out = gen_reg_rtx (DImode);
3149 /* What's actually returned is -1,0,1, not a proper boolean value,
3150 so use an EXPR_LIST as with a generic libcall instead of a
3151 comparison type expression. */
3152 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3153 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3154 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3155 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3157 return out;
3160 /* Emit an X_floating library function call for a conversion. */
3162 void
3163 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3165 int noperands = 1, mode;
3166 rtx out_operands[2];
3167 rtx func;
3168 enum rtx_code code = orig_code;
3170 if (code == UNSIGNED_FIX)
3171 code = FIX;
3173 func = alpha_lookup_xfloating_lib_func (code);
3175 out_operands[0] = operands[1];
3177 switch (code)
3179 case FIX:
3180 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3181 out_operands[1] = GEN_INT (mode);
3182 noperands = 2;
3183 break;
3184 case FLOAT_TRUNCATE:
3185 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3186 out_operands[1] = GEN_INT (mode);
3187 noperands = 2;
3188 break;
3189 default:
3190 break;
3193 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3194 gen_rtx_fmt_e (orig_code,
3195 GET_MODE (operands[0]),
3196 operands[1]));
3199 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3200 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3201 guarantee that the sequence
3202 set (OP[0] OP[2])
3203 set (OP[1] OP[3])
3204 is valid. Naturally, output operand ordering is little-endian.
3205 This is used by *movtf_internal and *movti_internal. */
3207 void
3208 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3209 bool fixup_overlap)
3211 switch (GET_CODE (operands[1]))
3213 case REG:
3214 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3215 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3216 break;
3218 case MEM:
3219 operands[3] = adjust_address (operands[1], DImode, 8);
3220 operands[2] = adjust_address (operands[1], DImode, 0);
3221 break;
3223 case CONST_INT:
3224 case CONST_DOUBLE:
3225 gcc_assert (operands[1] == CONST0_RTX (mode));
3226 operands[2] = operands[3] = const0_rtx;
3227 break;
3229 default:
3230 gcc_unreachable ();
3233 switch (GET_CODE (operands[0]))
3235 case REG:
3236 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3237 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3238 break;
3240 case MEM:
3241 operands[1] = adjust_address (operands[0], DImode, 8);
3242 operands[0] = adjust_address (operands[0], DImode, 0);
3243 break;
3245 default:
3246 gcc_unreachable ();
3249 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3251 rtx tmp;
3252 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3253 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3257 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3258 op2 is a register containing the sign bit, operation is the
3259 logical operation to be performed. */
3261 void
3262 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3264 rtx high_bit = operands[2];
3265 rtx scratch;
3266 int move;
3268 alpha_split_tmode_pair (operands, TFmode, false);
3270 /* Detect three flavors of operand overlap. */
3271 move = 1;
3272 if (rtx_equal_p (operands[0], operands[2]))
3273 move = 0;
3274 else if (rtx_equal_p (operands[1], operands[2]))
3276 if (rtx_equal_p (operands[0], high_bit))
3277 move = 2;
3278 else
3279 move = -1;
3282 if (move < 0)
3283 emit_move_insn (operands[0], operands[2]);
3285 /* ??? If the destination overlaps both source tf and high_bit, then
3286 assume source tf is dead in its entirety and use the other half
3287 for a scratch register. Otherwise "scratch" is just the proper
3288 destination register. */
3289 scratch = operands[move < 2 ? 1 : 3];
3291 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3293 if (move > 0)
3295 emit_move_insn (operands[0], operands[2]);
3296 if (move > 1)
3297 emit_move_insn (operands[1], scratch);
3301 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3302 unaligned data:
3304 unsigned: signed:
3305 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3306 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3307 lda r3,X(r11) lda r3,X+2(r11)
3308 extwl r1,r3,r1 extql r1,r3,r1
3309 extwh r2,r3,r2 extqh r2,r3,r2
3310 or r1.r2.r1 or r1,r2,r1
3311 sra r1,48,r1
3313 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3314 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3315 lda r3,X(r11) lda r3,X(r11)
3316 extll r1,r3,r1 extll r1,r3,r1
3317 extlh r2,r3,r2 extlh r2,r3,r2
3318 or r1.r2.r1 addl r1,r2,r1
3320 quad: ldq_u r1,X(r11)
3321 ldq_u r2,X+7(r11)
3322 lda r3,X(r11)
3323 extql r1,r3,r1
3324 extqh r2,r3,r2
3325 or r1.r2.r1
3328 void
3329 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3330 HOST_WIDE_INT ofs, int sign)
3332 rtx meml, memh, addr, extl, exth, tmp, mema;
3333 enum machine_mode mode;
3335 if (TARGET_BWX && size == 2)
3337 meml = adjust_address (mem, QImode, ofs);
3338 memh = adjust_address (mem, QImode, ofs+1);
3339 if (BYTES_BIG_ENDIAN)
3340 tmp = meml, meml = memh, memh = tmp;
3341 extl = gen_reg_rtx (DImode);
3342 exth = gen_reg_rtx (DImode);
3343 emit_insn (gen_zero_extendqidi2 (extl, meml));
3344 emit_insn (gen_zero_extendqidi2 (exth, memh));
3345 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3346 NULL, 1, OPTAB_LIB_WIDEN);
3347 addr = expand_simple_binop (DImode, IOR, extl, exth,
3348 NULL, 1, OPTAB_LIB_WIDEN);
3350 if (sign && GET_MODE (tgt) != HImode)
3352 addr = gen_lowpart (HImode, addr);
3353 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3355 else
3357 if (GET_MODE (tgt) != DImode)
3358 addr = gen_lowpart (GET_MODE (tgt), addr);
3359 emit_move_insn (tgt, addr);
3361 return;
3364 meml = gen_reg_rtx (DImode);
3365 memh = gen_reg_rtx (DImode);
3366 addr = gen_reg_rtx (DImode);
3367 extl = gen_reg_rtx (DImode);
3368 exth = gen_reg_rtx (DImode);
3370 mema = XEXP (mem, 0);
3371 if (GET_CODE (mema) == LO_SUM)
3372 mema = force_reg (Pmode, mema);
3374 /* AND addresses cannot be in any alias set, since they may implicitly
3375 alias surrounding code. Ideally we'd have some alias set that
3376 covered all types except those with alignment 8 or higher. */
3378 tmp = change_address (mem, DImode,
3379 gen_rtx_AND (DImode,
3380 plus_constant (mema, ofs),
3381 GEN_INT (-8)));
3382 set_mem_alias_set (tmp, 0);
3383 emit_move_insn (meml, tmp);
3385 tmp = change_address (mem, DImode,
3386 gen_rtx_AND (DImode,
3387 plus_constant (mema, ofs + size - 1),
3388 GEN_INT (-8)));
3389 set_mem_alias_set (tmp, 0);
3390 emit_move_insn (memh, tmp);
3392 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3394 emit_move_insn (addr, plus_constant (mema, -1));
3396 emit_insn (gen_extqh_be (extl, meml, addr));
3397 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3399 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3400 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3401 addr, 1, OPTAB_WIDEN);
3403 else if (sign && size == 2)
3405 emit_move_insn (addr, plus_constant (mema, ofs+2));
3407 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3408 emit_insn (gen_extqh_le (exth, memh, addr));
3410 /* We must use tgt here for the target. Alpha-vms port fails if we use
3411 addr for the target, because addr is marked as a pointer and combine
3412 knows that pointers are always sign-extended 32-bit values. */
3413 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3414 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3415 addr, 1, OPTAB_WIDEN);
3417 else
3419 if (WORDS_BIG_ENDIAN)
3421 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3422 switch ((int) size)
3424 case 2:
3425 emit_insn (gen_extwh_be (extl, meml, addr));
3426 mode = HImode;
3427 break;
3429 case 4:
3430 emit_insn (gen_extlh_be (extl, meml, addr));
3431 mode = SImode;
3432 break;
3434 case 8:
3435 emit_insn (gen_extqh_be (extl, meml, addr));
3436 mode = DImode;
3437 break;
3439 default:
3440 gcc_unreachable ();
3442 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3444 else
3446 emit_move_insn (addr, plus_constant (mema, ofs));
3447 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3448 switch ((int) size)
3450 case 2:
3451 emit_insn (gen_extwh_le (exth, memh, addr));
3452 mode = HImode;
3453 break;
3455 case 4:
3456 emit_insn (gen_extlh_le (exth, memh, addr));
3457 mode = SImode;
3458 break;
3460 case 8:
3461 emit_insn (gen_extqh_le (exth, memh, addr));
3462 mode = DImode;
3463 break;
3465 default:
3466 gcc_unreachable ();
3470 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3471 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3472 sign, OPTAB_WIDEN);
3475 if (addr != tgt)
3476 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3479 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3481 void
3482 alpha_expand_unaligned_store (rtx dst, rtx src,
3483 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3485 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3487 if (TARGET_BWX && size == 2)
3489 if (src != const0_rtx)
3491 dstl = gen_lowpart (QImode, src);
3492 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3493 NULL, 1, OPTAB_LIB_WIDEN);
3494 dsth = gen_lowpart (QImode, dsth);
3496 else
3497 dstl = dsth = const0_rtx;
3499 meml = adjust_address (dst, QImode, ofs);
3500 memh = adjust_address (dst, QImode, ofs+1);
3501 if (BYTES_BIG_ENDIAN)
3502 addr = meml, meml = memh, memh = addr;
3504 emit_move_insn (meml, dstl);
3505 emit_move_insn (memh, dsth);
3506 return;
3509 dstl = gen_reg_rtx (DImode);
3510 dsth = gen_reg_rtx (DImode);
3511 insl = gen_reg_rtx (DImode);
3512 insh = gen_reg_rtx (DImode);
3514 dsta = XEXP (dst, 0);
3515 if (GET_CODE (dsta) == LO_SUM)
3516 dsta = force_reg (Pmode, dsta);
3518 /* AND addresses cannot be in any alias set, since they may implicitly
3519 alias surrounding code. Ideally we'd have some alias set that
3520 covered all types except those with alignment 8 or higher. */
3522 meml = change_address (dst, DImode,
3523 gen_rtx_AND (DImode,
3524 plus_constant (dsta, ofs),
3525 GEN_INT (-8)));
3526 set_mem_alias_set (meml, 0);
3528 memh = change_address (dst, DImode,
3529 gen_rtx_AND (DImode,
3530 plus_constant (dsta, ofs + size - 1),
3531 GEN_INT (-8)));
3532 set_mem_alias_set (memh, 0);
3534 emit_move_insn (dsth, memh);
3535 emit_move_insn (dstl, meml);
3536 if (WORDS_BIG_ENDIAN)
3538 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3540 if (src != const0_rtx)
3542 switch ((int) size)
3544 case 2:
3545 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3546 break;
3547 case 4:
3548 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3549 break;
3550 case 8:
3551 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3552 break;
3554 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3555 GEN_INT (size*8), addr));
3558 switch ((int) size)
3560 case 2:
3561 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3562 break;
3563 case 4:
3565 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3566 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3567 break;
3569 case 8:
3570 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3571 break;
3574 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3576 else
3578 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3580 if (src != CONST0_RTX (GET_MODE (src)))
3582 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3583 GEN_INT (size*8), addr));
3585 switch ((int) size)
3587 case 2:
3588 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3589 break;
3590 case 4:
3591 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3592 break;
3593 case 8:
3594 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3595 break;
3599 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3601 switch ((int) size)
3603 case 2:
3604 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3605 break;
3606 case 4:
3608 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3609 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3610 break;
3612 case 8:
3613 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3614 break;
3618 if (src != CONST0_RTX (GET_MODE (src)))
3620 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3621 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3624 if (WORDS_BIG_ENDIAN)
3626 emit_move_insn (meml, dstl);
3627 emit_move_insn (memh, dsth);
3629 else
3631 /* Must store high before low for degenerate case of aligned. */
3632 emit_move_insn (memh, dsth);
3633 emit_move_insn (meml, dstl);
3637 /* The block move code tries to maximize speed by separating loads and
3638 stores at the expense of register pressure: we load all of the data
3639 before we store it back out. There are two secondary effects worth
3640 mentioning, that this speeds copying to/from aligned and unaligned
3641 buffers, and that it makes the code significantly easier to write. */
3643 #define MAX_MOVE_WORDS 8
3645 /* Load an integral number of consecutive unaligned quadwords. */
3647 static void
3648 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3649 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3651 rtx const im8 = GEN_INT (-8);
3652 rtx const i64 = GEN_INT (64);
3653 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3654 rtx sreg, areg, tmp, smema;
3655 HOST_WIDE_INT i;
3657 smema = XEXP (smem, 0);
3658 if (GET_CODE (smema) == LO_SUM)
3659 smema = force_reg (Pmode, smema);
3661 /* Generate all the tmp registers we need. */
3662 for (i = 0; i < words; ++i)
3664 data_regs[i] = out_regs[i];
3665 ext_tmps[i] = gen_reg_rtx (DImode);
3667 data_regs[words] = gen_reg_rtx (DImode);
3669 if (ofs != 0)
3670 smem = adjust_address (smem, GET_MODE (smem), ofs);
3672 /* Load up all of the source data. */
3673 for (i = 0; i < words; ++i)
3675 tmp = change_address (smem, DImode,
3676 gen_rtx_AND (DImode,
3677 plus_constant (smema, 8*i),
3678 im8));
3679 set_mem_alias_set (tmp, 0);
3680 emit_move_insn (data_regs[i], tmp);
3683 tmp = change_address (smem, DImode,
3684 gen_rtx_AND (DImode,
3685 plus_constant (smema, 8*words - 1),
3686 im8));
3687 set_mem_alias_set (tmp, 0);
3688 emit_move_insn (data_regs[words], tmp);
3690 /* Extract the half-word fragments. Unfortunately DEC decided to make
3691 extxh with offset zero a noop instead of zeroing the register, so
3692 we must take care of that edge condition ourselves with cmov. */
3694 sreg = copy_addr_to_reg (smema);
3695 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3696 1, OPTAB_WIDEN);
3697 if (WORDS_BIG_ENDIAN)
3698 emit_move_insn (sreg, plus_constant (sreg, 7));
3699 for (i = 0; i < words; ++i)
3701 if (WORDS_BIG_ENDIAN)
3703 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3704 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3706 else
3708 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3709 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3711 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3712 gen_rtx_IF_THEN_ELSE (DImode,
3713 gen_rtx_EQ (DImode, areg,
3714 const0_rtx),
3715 const0_rtx, ext_tmps[i])));
3718 /* Merge the half-words into whole words. */
3719 for (i = 0; i < words; ++i)
3721 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3722 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3726 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3727 may be NULL to store zeros. */
3729 static void
3730 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3731 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3733 rtx const im8 = GEN_INT (-8);
3734 rtx const i64 = GEN_INT (64);
3735 rtx ins_tmps[MAX_MOVE_WORDS];
3736 rtx st_tmp_1, st_tmp_2, dreg;
3737 rtx st_addr_1, st_addr_2, dmema;
3738 HOST_WIDE_INT i;
3740 dmema = XEXP (dmem, 0);
3741 if (GET_CODE (dmema) == LO_SUM)
3742 dmema = force_reg (Pmode, dmema);
3744 /* Generate all the tmp registers we need. */
3745 if (data_regs != NULL)
3746 for (i = 0; i < words; ++i)
3747 ins_tmps[i] = gen_reg_rtx(DImode);
3748 st_tmp_1 = gen_reg_rtx(DImode);
3749 st_tmp_2 = gen_reg_rtx(DImode);
3751 if (ofs != 0)
3752 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3754 st_addr_2 = change_address (dmem, DImode,
3755 gen_rtx_AND (DImode,
3756 plus_constant (dmema, words*8 - 1),
3757 im8));
3758 set_mem_alias_set (st_addr_2, 0);
3760 st_addr_1 = change_address (dmem, DImode,
3761 gen_rtx_AND (DImode, dmema, im8));
3762 set_mem_alias_set (st_addr_1, 0);
3764 /* Load up the destination end bits. */
3765 emit_move_insn (st_tmp_2, st_addr_2);
3766 emit_move_insn (st_tmp_1, st_addr_1);
3768 /* Shift the input data into place. */
3769 dreg = copy_addr_to_reg (dmema);
3770 if (WORDS_BIG_ENDIAN)
3771 emit_move_insn (dreg, plus_constant (dreg, 7));
3772 if (data_regs != NULL)
3774 for (i = words-1; i >= 0; --i)
3776 if (WORDS_BIG_ENDIAN)
3778 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3779 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3781 else
3783 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3784 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3787 for (i = words-1; i > 0; --i)
3789 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3790 ins_tmps[i-1], ins_tmps[i-1], 1,
3791 OPTAB_WIDEN);
3795 /* Split and merge the ends with the destination data. */
3796 if (WORDS_BIG_ENDIAN)
3798 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3799 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3801 else
3803 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3804 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3807 if (data_regs != NULL)
3809 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3810 st_tmp_2, 1, OPTAB_WIDEN);
3811 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3812 st_tmp_1, 1, OPTAB_WIDEN);
3815 /* Store it all. */
3816 if (WORDS_BIG_ENDIAN)
3817 emit_move_insn (st_addr_1, st_tmp_1);
3818 else
3819 emit_move_insn (st_addr_2, st_tmp_2);
3820 for (i = words-1; i > 0; --i)
3822 rtx tmp = change_address (dmem, DImode,
3823 gen_rtx_AND (DImode,
3824 plus_constant(dmema,
3825 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3826 im8));
3827 set_mem_alias_set (tmp, 0);
3828 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3830 if (WORDS_BIG_ENDIAN)
3831 emit_move_insn (st_addr_2, st_tmp_2);
3832 else
3833 emit_move_insn (st_addr_1, st_tmp_1);
3837 /* Expand string/block move operations.
3839 operands[0] is the pointer to the destination.
3840 operands[1] is the pointer to the source.
3841 operands[2] is the number of bytes to move.
3842 operands[3] is the alignment. */
3845 alpha_expand_block_move (rtx operands[])
3847 rtx bytes_rtx = operands[2];
3848 rtx align_rtx = operands[3];
3849 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3850 HOST_WIDE_INT bytes = orig_bytes;
3851 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3852 HOST_WIDE_INT dst_align = src_align;
3853 rtx orig_src = operands[1];
3854 rtx orig_dst = operands[0];
3855 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3856 rtx tmp;
3857 unsigned int i, words, ofs, nregs = 0;
3859 if (orig_bytes <= 0)
3860 return 1;
3861 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3862 return 0;
3864 /* Look for additional alignment information from recorded register info. */
3866 tmp = XEXP (orig_src, 0);
3867 if (REG_P (tmp))
3868 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3869 else if (GET_CODE (tmp) == PLUS
3870 && REG_P (XEXP (tmp, 0))
3871 && CONST_INT_P (XEXP (tmp, 1)))
3873 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3874 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3876 if (a > src_align)
3878 if (a >= 64 && c % 8 == 0)
3879 src_align = 64;
3880 else if (a >= 32 && c % 4 == 0)
3881 src_align = 32;
3882 else if (a >= 16 && c % 2 == 0)
3883 src_align = 16;
3887 tmp = XEXP (orig_dst, 0);
3888 if (REG_P (tmp))
3889 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3890 else if (GET_CODE (tmp) == PLUS
3891 && REG_P (XEXP (tmp, 0))
3892 && CONST_INT_P (XEXP (tmp, 1)))
3894 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3895 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3897 if (a > dst_align)
3899 if (a >= 64 && c % 8 == 0)
3900 dst_align = 64;
3901 else if (a >= 32 && c % 4 == 0)
3902 dst_align = 32;
3903 else if (a >= 16 && c % 2 == 0)
3904 dst_align = 16;
3908 ofs = 0;
3909 if (src_align >= 64 && bytes >= 8)
3911 words = bytes / 8;
3913 for (i = 0; i < words; ++i)
3914 data_regs[nregs + i] = gen_reg_rtx (DImode);
3916 for (i = 0; i < words; ++i)
3917 emit_move_insn (data_regs[nregs + i],
3918 adjust_address (orig_src, DImode, ofs + i * 8));
3920 nregs += words;
3921 bytes -= words * 8;
3922 ofs += words * 8;
3925 if (src_align >= 32 && bytes >= 4)
3927 words = bytes / 4;
3929 for (i = 0; i < words; ++i)
3930 data_regs[nregs + i] = gen_reg_rtx (SImode);
3932 for (i = 0; i < words; ++i)
3933 emit_move_insn (data_regs[nregs + i],
3934 adjust_address (orig_src, SImode, ofs + i * 4));
3936 nregs += words;
3937 bytes -= words * 4;
3938 ofs += words * 4;
3941 if (bytes >= 8)
3943 words = bytes / 8;
3945 for (i = 0; i < words+1; ++i)
3946 data_regs[nregs + i] = gen_reg_rtx (DImode);
3948 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3949 words, ofs);
3951 nregs += words;
3952 bytes -= words * 8;
3953 ofs += words * 8;
3956 if (! TARGET_BWX && bytes >= 4)
3958 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3959 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3960 bytes -= 4;
3961 ofs += 4;
3964 if (bytes >= 2)
3966 if (src_align >= 16)
3968 do {
3969 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3970 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3971 bytes -= 2;
3972 ofs += 2;
3973 } while (bytes >= 2);
3975 else if (! TARGET_BWX)
3977 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3978 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3979 bytes -= 2;
3980 ofs += 2;
3984 while (bytes > 0)
3986 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3987 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3988 bytes -= 1;
3989 ofs += 1;
3992 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3994 /* Now save it back out again. */
3996 i = 0, ofs = 0;
3998 /* Write out the data in whatever chunks reading the source allowed. */
3999 if (dst_align >= 64)
4001 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4003 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4004 data_regs[i]);
4005 ofs += 8;
4006 i++;
4010 if (dst_align >= 32)
4012 /* If the source has remaining DImode regs, write them out in
4013 two pieces. */
4014 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4016 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4017 NULL_RTX, 1, OPTAB_WIDEN);
4019 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4020 gen_lowpart (SImode, data_regs[i]));
4021 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4022 gen_lowpart (SImode, tmp));
4023 ofs += 8;
4024 i++;
4027 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4029 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4030 data_regs[i]);
4031 ofs += 4;
4032 i++;
4036 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4038 /* Write out a remaining block of words using unaligned methods. */
4040 for (words = 1; i + words < nregs; words++)
4041 if (GET_MODE (data_regs[i + words]) != DImode)
4042 break;
4044 if (words == 1)
4045 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4046 else
4047 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4048 words, ofs);
4050 i += words;
4051 ofs += words * 8;
4054 /* Due to the above, this won't be aligned. */
4055 /* ??? If we have more than one of these, consider constructing full
4056 words in registers and using alpha_expand_unaligned_store_words. */
4057 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4059 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4060 ofs += 4;
4061 i++;
4064 if (dst_align >= 16)
4065 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4067 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4068 i++;
4069 ofs += 2;
4071 else
4072 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4074 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4075 i++;
4076 ofs += 2;
4079 /* The remainder must be byte copies. */
4080 while (i < nregs)
4082 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4083 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4084 i++;
4085 ofs += 1;
4088 return 1;
4092 alpha_expand_block_clear (rtx operands[])
4094 rtx bytes_rtx = operands[1];
4095 rtx align_rtx = operands[3];
4096 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4097 HOST_WIDE_INT bytes = orig_bytes;
4098 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4099 HOST_WIDE_INT alignofs = 0;
4100 rtx orig_dst = operands[0];
4101 rtx tmp;
4102 int i, words, ofs = 0;
4104 if (orig_bytes <= 0)
4105 return 1;
4106 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4107 return 0;
4109 /* Look for stricter alignment. */
4110 tmp = XEXP (orig_dst, 0);
4111 if (REG_P (tmp))
4112 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4113 else if (GET_CODE (tmp) == PLUS
4114 && REG_P (XEXP (tmp, 0))
4115 && CONST_INT_P (XEXP (tmp, 1)))
4117 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4118 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4120 if (a > align)
4122 if (a >= 64)
4123 align = a, alignofs = 8 - c % 8;
4124 else if (a >= 32)
4125 align = a, alignofs = 4 - c % 4;
4126 else if (a >= 16)
4127 align = a, alignofs = 2 - c % 2;
4131 /* Handle an unaligned prefix first. */
4133 if (alignofs > 0)
4135 #if HOST_BITS_PER_WIDE_INT >= 64
4136 /* Given that alignofs is bounded by align, the only time BWX could
4137 generate three stores is for a 7 byte fill. Prefer two individual
4138 stores over a load/mask/store sequence. */
4139 if ((!TARGET_BWX || alignofs == 7)
4140 && align >= 32
4141 && !(alignofs == 4 && bytes >= 4))
4143 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4144 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4145 rtx mem, tmp;
4146 HOST_WIDE_INT mask;
4148 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4149 set_mem_alias_set (mem, 0);
4151 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4152 if (bytes < alignofs)
4154 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4155 ofs += bytes;
4156 bytes = 0;
4158 else
4160 bytes -= alignofs;
4161 ofs += alignofs;
4163 alignofs = 0;
4165 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4166 NULL_RTX, 1, OPTAB_WIDEN);
4168 emit_move_insn (mem, tmp);
4170 #endif
4172 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4174 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4175 bytes -= 1;
4176 ofs += 1;
4177 alignofs -= 1;
4179 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4181 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4182 bytes -= 2;
4183 ofs += 2;
4184 alignofs -= 2;
4186 if (alignofs == 4 && bytes >= 4)
4188 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4189 bytes -= 4;
4190 ofs += 4;
4191 alignofs = 0;
4194 /* If we've not used the extra lead alignment information by now,
4195 we won't be able to. Downgrade align to match what's left over. */
4196 if (alignofs > 0)
4198 alignofs = alignofs & -alignofs;
4199 align = MIN (align, alignofs * BITS_PER_UNIT);
4203 /* Handle a block of contiguous long-words. */
4205 if (align >= 64 && bytes >= 8)
4207 words = bytes / 8;
4209 for (i = 0; i < words; ++i)
4210 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4211 const0_rtx);
4213 bytes -= words * 8;
4214 ofs += words * 8;
4217 /* If the block is large and appropriately aligned, emit a single
4218 store followed by a sequence of stq_u insns. */
4220 if (align >= 32 && bytes > 16)
4222 rtx orig_dsta;
4224 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4225 bytes -= 4;
4226 ofs += 4;
4228 orig_dsta = XEXP (orig_dst, 0);
4229 if (GET_CODE (orig_dsta) == LO_SUM)
4230 orig_dsta = force_reg (Pmode, orig_dsta);
4232 words = bytes / 8;
4233 for (i = 0; i < words; ++i)
4235 rtx mem
4236 = change_address (orig_dst, DImode,
4237 gen_rtx_AND (DImode,
4238 plus_constant (orig_dsta, ofs + i*8),
4239 GEN_INT (-8)));
4240 set_mem_alias_set (mem, 0);
4241 emit_move_insn (mem, const0_rtx);
4244 /* Depending on the alignment, the first stq_u may have overlapped
4245 with the initial stl, which means that the last stq_u didn't
4246 write as much as it would appear. Leave those questionable bytes
4247 unaccounted for. */
4248 bytes -= words * 8 - 4;
4249 ofs += words * 8 - 4;
4252 /* Handle a smaller block of aligned words. */
4254 if ((align >= 64 && bytes == 4)
4255 || (align == 32 && bytes >= 4))
4257 words = bytes / 4;
4259 for (i = 0; i < words; ++i)
4260 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4261 const0_rtx);
4263 bytes -= words * 4;
4264 ofs += words * 4;
4267 /* An unaligned block uses stq_u stores for as many as possible. */
4269 if (bytes >= 8)
4271 words = bytes / 8;
4273 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4275 bytes -= words * 8;
4276 ofs += words * 8;
4279 /* Next clean up any trailing pieces. */
4281 #if HOST_BITS_PER_WIDE_INT >= 64
4282 /* Count the number of bits in BYTES for which aligned stores could
4283 be emitted. */
4284 words = 0;
4285 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4286 if (bytes & i)
4287 words += 1;
4289 /* If we have appropriate alignment (and it wouldn't take too many
4290 instructions otherwise), mask out the bytes we need. */
4291 if (TARGET_BWX ? words > 2 : bytes > 0)
4293 if (align >= 64)
4295 rtx mem, tmp;
4296 HOST_WIDE_INT mask;
4298 mem = adjust_address (orig_dst, DImode, ofs);
4299 set_mem_alias_set (mem, 0);
4301 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4303 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4304 NULL_RTX, 1, OPTAB_WIDEN);
4306 emit_move_insn (mem, tmp);
4307 return 1;
4309 else if (align >= 32 && bytes < 4)
4311 rtx mem, tmp;
4312 HOST_WIDE_INT mask;
4314 mem = adjust_address (orig_dst, SImode, ofs);
4315 set_mem_alias_set (mem, 0);
4317 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4319 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4320 NULL_RTX, 1, OPTAB_WIDEN);
4322 emit_move_insn (mem, tmp);
4323 return 1;
4326 #endif
4328 if (!TARGET_BWX && bytes >= 4)
4330 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4331 bytes -= 4;
4332 ofs += 4;
4335 if (bytes >= 2)
4337 if (align >= 16)
4339 do {
4340 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4341 const0_rtx);
4342 bytes -= 2;
4343 ofs += 2;
4344 } while (bytes >= 2);
4346 else if (! TARGET_BWX)
4348 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4349 bytes -= 2;
4350 ofs += 2;
4354 while (bytes > 0)
4356 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4357 bytes -= 1;
4358 ofs += 1;
4361 return 1;
4364 /* Returns a mask so that zap(x, value) == x & mask. */
4367 alpha_expand_zap_mask (HOST_WIDE_INT value)
4369 rtx result;
4370 int i;
4372 if (HOST_BITS_PER_WIDE_INT >= 64)
4374 HOST_WIDE_INT mask = 0;
4376 for (i = 7; i >= 0; --i)
4378 mask <<= 8;
4379 if (!((value >> i) & 1))
4380 mask |= 0xff;
4383 result = gen_int_mode (mask, DImode);
4385 else
4387 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4389 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4391 for (i = 7; i >= 4; --i)
4393 mask_hi <<= 8;
4394 if (!((value >> i) & 1))
4395 mask_hi |= 0xff;
4398 for (i = 3; i >= 0; --i)
4400 mask_lo <<= 8;
4401 if (!((value >> i) & 1))
4402 mask_lo |= 0xff;
4405 result = immed_double_const (mask_lo, mask_hi, DImode);
4408 return result;
4411 void
4412 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4413 enum machine_mode mode,
4414 rtx op0, rtx op1, rtx op2)
4416 op0 = gen_lowpart (mode, op0);
4418 if (op1 == const0_rtx)
4419 op1 = CONST0_RTX (mode);
4420 else
4421 op1 = gen_lowpart (mode, op1);
4423 if (op2 == const0_rtx)
4424 op2 = CONST0_RTX (mode);
4425 else
4426 op2 = gen_lowpart (mode, op2);
4428 emit_insn ((*gen) (op0, op1, op2));
4431 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4432 COND is true. Mark the jump as unlikely to be taken. */
4434 static void
4435 emit_unlikely_jump (rtx cond, rtx label)
4437 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4438 rtx x;
4440 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4441 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4442 add_reg_note (x, REG_BR_PROB, very_unlikely);
4445 /* A subroutine of the atomic operation splitters. Emit a load-locked
4446 instruction in MODE. */
4448 static void
4449 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4451 rtx (*fn) (rtx, rtx) = NULL;
4452 if (mode == SImode)
4453 fn = gen_load_locked_si;
4454 else if (mode == DImode)
4455 fn = gen_load_locked_di;
4456 emit_insn (fn (reg, mem));
4459 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4460 instruction in MODE. */
4462 static void
4463 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4465 rtx (*fn) (rtx, rtx, rtx) = NULL;
4466 if (mode == SImode)
4467 fn = gen_store_conditional_si;
4468 else if (mode == DImode)
4469 fn = gen_store_conditional_di;
4470 emit_insn (fn (res, mem, val));
4473 /* A subroutine of the atomic operation splitters. Emit an insxl
4474 instruction in MODE. */
4476 static rtx
4477 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4479 rtx ret = gen_reg_rtx (DImode);
4480 rtx (*fn) (rtx, rtx, rtx);
4482 if (WORDS_BIG_ENDIAN)
4484 if (mode == QImode)
4485 fn = gen_insbl_be;
4486 else
4487 fn = gen_inswl_be;
4489 else
4491 if (mode == QImode)
4492 fn = gen_insbl_le;
4493 else
4494 fn = gen_inswl_le;
4496 /* The insbl and inswl patterns require a register operand. */
4497 op1 = force_reg (mode, op1);
4498 emit_insn (fn (ret, op1, op2));
4500 return ret;
4503 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4504 to perform. MEM is the memory on which to operate. VAL is the second
4505 operand of the binary operator. BEFORE and AFTER are optional locations to
4506 return the value of MEM either before of after the operation. SCRATCH is
4507 a scratch register. */
4509 void
4510 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4511 rtx before, rtx after, rtx scratch)
4513 enum machine_mode mode = GET_MODE (mem);
4514 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4516 emit_insn (gen_memory_barrier ());
4518 label = gen_label_rtx ();
4519 emit_label (label);
4520 label = gen_rtx_LABEL_REF (DImode, label);
4522 if (before == NULL)
4523 before = scratch;
4524 emit_load_locked (mode, before, mem);
4526 if (code == NOT)
4528 x = gen_rtx_AND (mode, before, val);
4529 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4531 x = gen_rtx_NOT (mode, val);
4533 else
4534 x = gen_rtx_fmt_ee (code, mode, before, val);
4535 if (after)
4536 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4537 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4539 emit_store_conditional (mode, cond, mem, scratch);
4541 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4542 emit_unlikely_jump (x, label);
4544 emit_insn (gen_memory_barrier ());
4547 /* Expand a compare and swap operation. */
4549 void
4550 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4551 rtx scratch)
4553 enum machine_mode mode = GET_MODE (mem);
4554 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4556 emit_insn (gen_memory_barrier ());
4558 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4559 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4560 emit_label (XEXP (label1, 0));
4562 emit_load_locked (mode, retval, mem);
4564 x = gen_lowpart (DImode, retval);
4565 if (oldval == const0_rtx)
4566 x = gen_rtx_NE (DImode, x, const0_rtx);
4567 else
4569 x = gen_rtx_EQ (DImode, x, oldval);
4570 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4571 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4573 emit_unlikely_jump (x, label2);
4575 emit_move_insn (scratch, newval);
4576 emit_store_conditional (mode, cond, mem, scratch);
4578 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4579 emit_unlikely_jump (x, label1);
4581 emit_insn (gen_memory_barrier ());
4582 emit_label (XEXP (label2, 0));
4585 void
4586 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4588 enum machine_mode mode = GET_MODE (mem);
4589 rtx addr, align, wdst;
4590 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4592 addr = force_reg (DImode, XEXP (mem, 0));
4593 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4594 NULL_RTX, 1, OPTAB_DIRECT);
4596 oldval = convert_modes (DImode, mode, oldval, 1);
4597 newval = emit_insxl (mode, newval, addr);
4599 wdst = gen_reg_rtx (DImode);
4600 if (mode == QImode)
4601 fn5 = gen_sync_compare_and_swapqi_1;
4602 else
4603 fn5 = gen_sync_compare_and_swaphi_1;
4604 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4606 emit_move_insn (dst, gen_lowpart (mode, wdst));
4609 void
4610 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4611 rtx oldval, rtx newval, rtx align,
4612 rtx scratch, rtx cond)
4614 rtx label1, label2, mem, width, mask, x;
4616 mem = gen_rtx_MEM (DImode, align);
4617 MEM_VOLATILE_P (mem) = 1;
4619 emit_insn (gen_memory_barrier ());
4620 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4621 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4622 emit_label (XEXP (label1, 0));
4624 emit_load_locked (DImode, scratch, mem);
4626 width = GEN_INT (GET_MODE_BITSIZE (mode));
4627 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4628 if (WORDS_BIG_ENDIAN)
4629 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4630 else
4631 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4633 if (oldval == const0_rtx)
4634 x = gen_rtx_NE (DImode, dest, const0_rtx);
4635 else
4637 x = gen_rtx_EQ (DImode, dest, oldval);
4638 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4639 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4641 emit_unlikely_jump (x, label2);
4643 if (WORDS_BIG_ENDIAN)
4644 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4645 else
4646 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4647 emit_insn (gen_iordi3 (scratch, scratch, newval));
4649 emit_store_conditional (DImode, scratch, mem, scratch);
4651 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4652 emit_unlikely_jump (x, label1);
4654 emit_insn (gen_memory_barrier ());
4655 emit_label (XEXP (label2, 0));
4658 /* Expand an atomic exchange operation. */
4660 void
4661 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4663 enum machine_mode mode = GET_MODE (mem);
4664 rtx label, x, cond = gen_lowpart (DImode, scratch);
4666 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4667 emit_label (XEXP (label, 0));
4669 emit_load_locked (mode, retval, mem);
4670 emit_move_insn (scratch, val);
4671 emit_store_conditional (mode, cond, mem, scratch);
4673 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4674 emit_unlikely_jump (x, label);
4676 emit_insn (gen_memory_barrier ());
4679 void
4680 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4682 enum machine_mode mode = GET_MODE (mem);
4683 rtx addr, align, wdst;
4684 rtx (*fn4) (rtx, rtx, rtx, rtx);
4686 /* Force the address into a register. */
4687 addr = force_reg (DImode, XEXP (mem, 0));
4689 /* Align it to a multiple of 8. */
4690 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4691 NULL_RTX, 1, OPTAB_DIRECT);
4693 /* Insert val into the correct byte location within the word. */
4694 val = emit_insxl (mode, val, addr);
4696 wdst = gen_reg_rtx (DImode);
4697 if (mode == QImode)
4698 fn4 = gen_sync_lock_test_and_setqi_1;
4699 else
4700 fn4 = gen_sync_lock_test_and_sethi_1;
4701 emit_insn (fn4 (wdst, addr, val, align));
4703 emit_move_insn (dst, gen_lowpart (mode, wdst));
4706 void
4707 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4708 rtx val, rtx align, rtx scratch)
4710 rtx label, mem, width, mask, x;
4712 mem = gen_rtx_MEM (DImode, align);
4713 MEM_VOLATILE_P (mem) = 1;
4715 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4716 emit_label (XEXP (label, 0));
4718 emit_load_locked (DImode, scratch, mem);
4720 width = GEN_INT (GET_MODE_BITSIZE (mode));
4721 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4722 if (WORDS_BIG_ENDIAN)
4724 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4725 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4727 else
4729 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4730 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4732 emit_insn (gen_iordi3 (scratch, scratch, val));
4734 emit_store_conditional (DImode, scratch, mem, scratch);
4736 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4737 emit_unlikely_jump (x, label);
4739 emit_insn (gen_memory_barrier ());
4742 /* Adjust the cost of a scheduling dependency. Return the new cost of
4743 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4745 static int
4746 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4748 enum attr_type dep_insn_type;
4750 /* If the dependence is an anti-dependence, there is no cost. For an
4751 output dependence, there is sometimes a cost, but it doesn't seem
4752 worth handling those few cases. */
4753 if (REG_NOTE_KIND (link) != 0)
4754 return cost;
4756 /* If we can't recognize the insns, we can't really do anything. */
4757 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4758 return cost;
4760 dep_insn_type = get_attr_type (dep_insn);
4762 /* Bring in the user-defined memory latency. */
4763 if (dep_insn_type == TYPE_ILD
4764 || dep_insn_type == TYPE_FLD
4765 || dep_insn_type == TYPE_LDSYM)
4766 cost += alpha_memory_latency-1;
4768 /* Everything else handled in DFA bypasses now. */
4770 return cost;
4773 /* The number of instructions that can be issued per cycle. */
4775 static int
4776 alpha_issue_rate (void)
4778 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4781 /* How many alternative schedules to try. This should be as wide as the
4782 scheduling freedom in the DFA, but no wider. Making this value too
4783 large results extra work for the scheduler.
4785 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4786 alternative schedules. For EV5, we can choose between E0/E1 and
4787 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4789 static int
4790 alpha_multipass_dfa_lookahead (void)
4792 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4795 /* Machine-specific function data. */
4797 struct GTY(()) machine_function
4799 /* For unicosmk. */
4800 /* List of call information words for calls from this function. */
4801 struct rtx_def *first_ciw;
4802 struct rtx_def *last_ciw;
4803 int ciw_count;
4805 /* List of deferred case vectors. */
4806 struct rtx_def *addr_list;
4808 /* For OSF. */
4809 const char *some_ld_name;
4811 /* For TARGET_LD_BUGGY_LDGP. */
4812 struct rtx_def *gp_save_rtx;
4814 /* For VMS condition handlers. */
4815 bool uses_condition_handler;
4818 /* How to allocate a 'struct machine_function'. */
4820 static struct machine_function *
4821 alpha_init_machine_status (void)
4823 return ggc_alloc_cleared_machine_function ();
4826 /* Support for frame based VMS condition handlers. */
4828 /* A VMS condition handler may be established for a function with a call to
4829 __builtin_establish_vms_condition_handler, and cancelled with a call to
4830 __builtin_revert_vms_condition_handler.
4832 The VMS Condition Handling Facility knows about the existence of a handler
4833 from the procedure descriptor .handler field. As the VMS native compilers,
4834 we store the user specified handler's address at a fixed location in the
4835 stack frame and point the procedure descriptor at a common wrapper which
4836 fetches the real handler's address and issues an indirect call.
4838 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4840 We force the procedure kind to PT_STACK, and the fixed frame location is
4841 fp+8, just before the register save area. We use the handler_data field in
4842 the procedure descriptor to state the fp offset at which the installed
4843 handler address can be found. */
4845 #define VMS_COND_HANDLER_FP_OFFSET 8
4847 /* Expand code to store the currently installed user VMS condition handler
4848 into TARGET and install HANDLER as the new condition handler. */
4850 void
4851 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4853 rtx handler_slot_address
4854 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4856 rtx handler_slot
4857 = gen_rtx_MEM (DImode, handler_slot_address);
4859 emit_move_insn (target, handler_slot);
4860 emit_move_insn (handler_slot, handler);
4862 /* Notify the start/prologue/epilogue emitters that the condition handler
4863 slot is needed. In addition to reserving the slot space, this will force
4864 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4865 use above is correct. */
4866 cfun->machine->uses_condition_handler = true;
4869 /* Expand code to store the current VMS condition handler into TARGET and
4870 nullify it. */
4872 void
4873 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4875 /* We implement this by establishing a null condition handler, with the tiny
4876 side effect of setting uses_condition_handler. This is a little bit
4877 pessimistic if no actual builtin_establish call is ever issued, which is
4878 not a real problem and expected never to happen anyway. */
4880 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4883 /* Functions to save and restore alpha_return_addr_rtx. */
4885 /* Start the ball rolling with RETURN_ADDR_RTX. */
4888 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4890 if (count != 0)
4891 return const0_rtx;
4893 return get_hard_reg_initial_val (Pmode, REG_RA);
4896 /* Return or create a memory slot containing the gp value for the current
4897 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4900 alpha_gp_save_rtx (void)
4902 rtx seq, m = cfun->machine->gp_save_rtx;
4904 if (m == NULL)
4906 start_sequence ();
4908 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4909 m = validize_mem (m);
4910 emit_move_insn (m, pic_offset_table_rtx);
4912 seq = get_insns ();
4913 end_sequence ();
4915 /* We used to simply emit the sequence after entry_of_function.
4916 However this breaks the CFG if the first instruction in the
4917 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4918 label. Emit the sequence properly on the edge. We are only
4919 invoked from dw2_build_landing_pads and finish_eh_generation
4920 will call commit_edge_insertions thanks to a kludge. */
4921 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4923 cfun->machine->gp_save_rtx = m;
4926 return m;
4929 static int
4930 alpha_ra_ever_killed (void)
4932 rtx top;
4934 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4935 return (int)df_regs_ever_live_p (REG_RA);
4937 push_topmost_sequence ();
4938 top = get_insns ();
4939 pop_topmost_sequence ();
4941 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4945 /* Return the trap mode suffix applicable to the current
4946 instruction, or NULL. */
4948 static const char *
4949 get_trap_mode_suffix (void)
4951 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4953 switch (s)
4955 case TRAP_SUFFIX_NONE:
4956 return NULL;
4958 case TRAP_SUFFIX_SU:
4959 if (alpha_fptm >= ALPHA_FPTM_SU)
4960 return "su";
4961 return NULL;
4963 case TRAP_SUFFIX_SUI:
4964 if (alpha_fptm >= ALPHA_FPTM_SUI)
4965 return "sui";
4966 return NULL;
4968 case TRAP_SUFFIX_V_SV:
4969 switch (alpha_fptm)
4971 case ALPHA_FPTM_N:
4972 return NULL;
4973 case ALPHA_FPTM_U:
4974 return "v";
4975 case ALPHA_FPTM_SU:
4976 case ALPHA_FPTM_SUI:
4977 return "sv";
4978 default:
4979 gcc_unreachable ();
4982 case TRAP_SUFFIX_V_SV_SVI:
4983 switch (alpha_fptm)
4985 case ALPHA_FPTM_N:
4986 return NULL;
4987 case ALPHA_FPTM_U:
4988 return "v";
4989 case ALPHA_FPTM_SU:
4990 return "sv";
4991 case ALPHA_FPTM_SUI:
4992 return "svi";
4993 default:
4994 gcc_unreachable ();
4996 break;
4998 case TRAP_SUFFIX_U_SU_SUI:
4999 switch (alpha_fptm)
5001 case ALPHA_FPTM_N:
5002 return NULL;
5003 case ALPHA_FPTM_U:
5004 return "u";
5005 case ALPHA_FPTM_SU:
5006 return "su";
5007 case ALPHA_FPTM_SUI:
5008 return "sui";
5009 default:
5010 gcc_unreachable ();
5012 break;
5014 default:
5015 gcc_unreachable ();
5017 gcc_unreachable ();
5020 /* Return the rounding mode suffix applicable to the current
5021 instruction, or NULL. */
5023 static const char *
5024 get_round_mode_suffix (void)
5026 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5028 switch (s)
5030 case ROUND_SUFFIX_NONE:
5031 return NULL;
5032 case ROUND_SUFFIX_NORMAL:
5033 switch (alpha_fprm)
5035 case ALPHA_FPRM_NORM:
5036 return NULL;
5037 case ALPHA_FPRM_MINF:
5038 return "m";
5039 case ALPHA_FPRM_CHOP:
5040 return "c";
5041 case ALPHA_FPRM_DYN:
5042 return "d";
5043 default:
5044 gcc_unreachable ();
5046 break;
5048 case ROUND_SUFFIX_C:
5049 return "c";
5051 default:
5052 gcc_unreachable ();
5054 gcc_unreachable ();
5057 /* Locate some local-dynamic symbol still in use by this function
5058 so that we can print its name in some movdi_er_tlsldm pattern. */
5060 static int
5061 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5063 rtx x = *px;
5065 if (GET_CODE (x) == SYMBOL_REF
5066 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5068 cfun->machine->some_ld_name = XSTR (x, 0);
5069 return 1;
5072 return 0;
5075 static const char *
5076 get_some_local_dynamic_name (void)
5078 rtx insn;
5080 if (cfun->machine->some_ld_name)
5081 return cfun->machine->some_ld_name;
5083 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5084 if (INSN_P (insn)
5085 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5086 return cfun->machine->some_ld_name;
5088 gcc_unreachable ();
5091 /* Print an operand. Recognize special options, documented below. */
5093 void
5094 print_operand (FILE *file, rtx x, int code)
5096 int i;
5098 switch (code)
5100 case '~':
5101 /* Print the assembler name of the current function. */
5102 assemble_name (file, alpha_fnname);
5103 break;
5105 case '&':
5106 assemble_name (file, get_some_local_dynamic_name ());
5107 break;
5109 case '/':
5111 const char *trap = get_trap_mode_suffix ();
5112 const char *round = get_round_mode_suffix ();
5114 if (trap || round)
5115 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5116 (trap ? trap : ""), (round ? round : ""));
5117 break;
5120 case ',':
5121 /* Generates single precision instruction suffix. */
5122 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5123 break;
5125 case '-':
5126 /* Generates double precision instruction suffix. */
5127 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5128 break;
5130 case '#':
5131 if (alpha_this_literal_sequence_number == 0)
5132 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5133 fprintf (file, "%d", alpha_this_literal_sequence_number);
5134 break;
5136 case '*':
5137 if (alpha_this_gpdisp_sequence_number == 0)
5138 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5139 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5140 break;
5142 case 'H':
5143 if (GET_CODE (x) == HIGH)
5144 output_addr_const (file, XEXP (x, 0));
5145 else
5146 output_operand_lossage ("invalid %%H value");
5147 break;
5149 case 'J':
5151 const char *lituse;
5153 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5155 x = XVECEXP (x, 0, 0);
5156 lituse = "lituse_tlsgd";
5158 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5160 x = XVECEXP (x, 0, 0);
5161 lituse = "lituse_tlsldm";
5163 else if (CONST_INT_P (x))
5164 lituse = "lituse_jsr";
5165 else
5167 output_operand_lossage ("invalid %%J value");
5168 break;
5171 if (x != const0_rtx)
5172 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5174 break;
5176 case 'j':
5178 const char *lituse;
5180 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5181 lituse = "lituse_jsrdirect";
5182 #else
5183 lituse = "lituse_jsr";
5184 #endif
5186 gcc_assert (INTVAL (x) != 0);
5187 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5189 break;
5190 case 'r':
5191 /* If this operand is the constant zero, write it as "$31". */
5192 if (REG_P (x))
5193 fprintf (file, "%s", reg_names[REGNO (x)]);
5194 else if (x == CONST0_RTX (GET_MODE (x)))
5195 fprintf (file, "$31");
5196 else
5197 output_operand_lossage ("invalid %%r value");
5198 break;
5200 case 'R':
5201 /* Similar, but for floating-point. */
5202 if (REG_P (x))
5203 fprintf (file, "%s", reg_names[REGNO (x)]);
5204 else if (x == CONST0_RTX (GET_MODE (x)))
5205 fprintf (file, "$f31");
5206 else
5207 output_operand_lossage ("invalid %%R value");
5208 break;
5210 case 'N':
5211 /* Write the 1's complement of a constant. */
5212 if (!CONST_INT_P (x))
5213 output_operand_lossage ("invalid %%N value");
5215 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5216 break;
5218 case 'P':
5219 /* Write 1 << C, for a constant C. */
5220 if (!CONST_INT_P (x))
5221 output_operand_lossage ("invalid %%P value");
5223 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5224 break;
5226 case 'h':
5227 /* Write the high-order 16 bits of a constant, sign-extended. */
5228 if (!CONST_INT_P (x))
5229 output_operand_lossage ("invalid %%h value");
5231 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5232 break;
5234 case 'L':
5235 /* Write the low-order 16 bits of a constant, sign-extended. */
5236 if (!CONST_INT_P (x))
5237 output_operand_lossage ("invalid %%L value");
5239 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5240 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5241 break;
5243 case 'm':
5244 /* Write mask for ZAP insn. */
5245 if (GET_CODE (x) == CONST_DOUBLE)
5247 HOST_WIDE_INT mask = 0;
5248 HOST_WIDE_INT value;
5250 value = CONST_DOUBLE_LOW (x);
5251 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5252 i++, value >>= 8)
5253 if (value & 0xff)
5254 mask |= (1 << i);
5256 value = CONST_DOUBLE_HIGH (x);
5257 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5258 i++, value >>= 8)
5259 if (value & 0xff)
5260 mask |= (1 << (i + sizeof (int)));
5262 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5265 else if (CONST_INT_P (x))
5267 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5269 for (i = 0; i < 8; i++, value >>= 8)
5270 if (value & 0xff)
5271 mask |= (1 << i);
5273 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5275 else
5276 output_operand_lossage ("invalid %%m value");
5277 break;
5279 case 'M':
5280 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5281 if (!CONST_INT_P (x)
5282 || (INTVAL (x) != 8 && INTVAL (x) != 16
5283 && INTVAL (x) != 32 && INTVAL (x) != 64))
5284 output_operand_lossage ("invalid %%M value");
5286 fprintf (file, "%s",
5287 (INTVAL (x) == 8 ? "b"
5288 : INTVAL (x) == 16 ? "w"
5289 : INTVAL (x) == 32 ? "l"
5290 : "q"));
5291 break;
5293 case 'U':
5294 /* Similar, except do it from the mask. */
5295 if (CONST_INT_P (x))
5297 HOST_WIDE_INT value = INTVAL (x);
5299 if (value == 0xff)
5301 fputc ('b', file);
5302 break;
5304 if (value == 0xffff)
5306 fputc ('w', file);
5307 break;
5309 if (value == 0xffffffff)
5311 fputc ('l', file);
5312 break;
5314 if (value == -1)
5316 fputc ('q', file);
5317 break;
5320 else if (HOST_BITS_PER_WIDE_INT == 32
5321 && GET_CODE (x) == CONST_DOUBLE
5322 && CONST_DOUBLE_LOW (x) == 0xffffffff
5323 && CONST_DOUBLE_HIGH (x) == 0)
5325 fputc ('l', file);
5326 break;
5328 output_operand_lossage ("invalid %%U value");
5329 break;
5331 case 's':
5332 /* Write the constant value divided by 8 for little-endian mode or
5333 (56 - value) / 8 for big-endian mode. */
5335 if (!CONST_INT_P (x)
5336 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5337 ? 56
5338 : 64)
5339 || (INTVAL (x) & 7) != 0)
5340 output_operand_lossage ("invalid %%s value");
5342 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5343 WORDS_BIG_ENDIAN
5344 ? (56 - INTVAL (x)) / 8
5345 : INTVAL (x) / 8);
5346 break;
5348 case 'S':
5349 /* Same, except compute (64 - c) / 8 */
5351 if (!CONST_INT_P (x)
5352 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5353 && (INTVAL (x) & 7) != 8)
5354 output_operand_lossage ("invalid %%s value");
5356 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5357 break;
5359 case 't':
5361 /* On Unicos/Mk systems: use a DEX expression if the symbol
5362 clashes with a register name. */
5363 int dex = unicosmk_need_dex (x);
5364 if (dex)
5365 fprintf (file, "DEX(%d)", dex);
5366 else
5367 output_addr_const (file, x);
5369 break;
5371 case 'C': case 'D': case 'c': case 'd':
5372 /* Write out comparison name. */
5374 enum rtx_code c = GET_CODE (x);
5376 if (!COMPARISON_P (x))
5377 output_operand_lossage ("invalid %%C value");
5379 else if (code == 'D')
5380 c = reverse_condition (c);
5381 else if (code == 'c')
5382 c = swap_condition (c);
5383 else if (code == 'd')
5384 c = swap_condition (reverse_condition (c));
5386 if (c == LEU)
5387 fprintf (file, "ule");
5388 else if (c == LTU)
5389 fprintf (file, "ult");
5390 else if (c == UNORDERED)
5391 fprintf (file, "un");
5392 else
5393 fprintf (file, "%s", GET_RTX_NAME (c));
5395 break;
5397 case 'E':
5398 /* Write the divide or modulus operator. */
5399 switch (GET_CODE (x))
5401 case DIV:
5402 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5403 break;
5404 case UDIV:
5405 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5406 break;
5407 case MOD:
5408 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5409 break;
5410 case UMOD:
5411 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5412 break;
5413 default:
5414 output_operand_lossage ("invalid %%E value");
5415 break;
5417 break;
5419 case 'A':
5420 /* Write "_u" for unaligned access. */
5421 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5422 fprintf (file, "_u");
5423 break;
5425 case 0:
5426 if (REG_P (x))
5427 fprintf (file, "%s", reg_names[REGNO (x)]);
5428 else if (MEM_P (x))
5429 output_address (XEXP (x, 0));
5430 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5432 switch (XINT (XEXP (x, 0), 1))
5434 case UNSPEC_DTPREL:
5435 case UNSPEC_TPREL:
5436 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5437 break;
5438 default:
5439 output_operand_lossage ("unknown relocation unspec");
5440 break;
5443 else
5444 output_addr_const (file, x);
5445 break;
5447 default:
5448 output_operand_lossage ("invalid %%xn code");
5452 void
5453 print_operand_address (FILE *file, rtx addr)
5455 int basereg = 31;
5456 HOST_WIDE_INT offset = 0;
5458 if (GET_CODE (addr) == AND)
5459 addr = XEXP (addr, 0);
5461 if (GET_CODE (addr) == PLUS
5462 && CONST_INT_P (XEXP (addr, 1)))
5464 offset = INTVAL (XEXP (addr, 1));
5465 addr = XEXP (addr, 0);
5468 if (GET_CODE (addr) == LO_SUM)
5470 const char *reloc16, *reloclo;
5471 rtx op1 = XEXP (addr, 1);
5473 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5475 op1 = XEXP (op1, 0);
5476 switch (XINT (op1, 1))
5478 case UNSPEC_DTPREL:
5479 reloc16 = NULL;
5480 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5481 break;
5482 case UNSPEC_TPREL:
5483 reloc16 = NULL;
5484 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5485 break;
5486 default:
5487 output_operand_lossage ("unknown relocation unspec");
5488 return;
5491 output_addr_const (file, XVECEXP (op1, 0, 0));
5493 else
5495 reloc16 = "gprel";
5496 reloclo = "gprellow";
5497 output_addr_const (file, op1);
5500 if (offset)
5501 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5503 addr = XEXP (addr, 0);
5504 switch (GET_CODE (addr))
5506 case REG:
5507 basereg = REGNO (addr);
5508 break;
5510 case SUBREG:
5511 basereg = subreg_regno (addr);
5512 break;
5514 default:
5515 gcc_unreachable ();
5518 fprintf (file, "($%d)\t\t!%s", basereg,
5519 (basereg == 29 ? reloc16 : reloclo));
5520 return;
5523 switch (GET_CODE (addr))
5525 case REG:
5526 basereg = REGNO (addr);
5527 break;
5529 case SUBREG:
5530 basereg = subreg_regno (addr);
5531 break;
5533 case CONST_INT:
5534 offset = INTVAL (addr);
5535 break;
5537 #if TARGET_ABI_OPEN_VMS
5538 case SYMBOL_REF:
5539 fprintf (file, "%s", XSTR (addr, 0));
5540 return;
5542 case CONST:
5543 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5544 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5545 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5546 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5547 INTVAL (XEXP (XEXP (addr, 0), 1)));
5548 return;
5550 #endif
5551 default:
5552 gcc_unreachable ();
5555 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5558 /* Emit RTL insns to initialize the variable parts of a trampoline at
5559 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5560 for the static chain value for the function. */
5562 static void
5563 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5565 rtx fnaddr, mem, word1, word2;
5567 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5569 #ifdef POINTERS_EXTEND_UNSIGNED
5570 fnaddr = convert_memory_address (Pmode, fnaddr);
5571 chain_value = convert_memory_address (Pmode, chain_value);
5572 #endif
5574 if (TARGET_ABI_OPEN_VMS)
5576 const char *fnname;
5577 char *trname;
5579 /* Construct the name of the trampoline entry point. */
5580 fnname = XSTR (fnaddr, 0);
5581 trname = (char *) alloca (strlen (fnname) + 5);
5582 strcpy (trname, fnname);
5583 strcat (trname, "..tr");
5584 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5585 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5587 /* Trampoline (or "bounded") procedure descriptor is constructed from
5588 the function's procedure descriptor with certain fields zeroed IAW
5589 the VMS calling standard. This is stored in the first quadword. */
5590 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5591 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5593 else
5595 /* These 4 instructions are:
5596 ldq $1,24($27)
5597 ldq $27,16($27)
5598 jmp $31,($27),0
5600 We don't bother setting the HINT field of the jump; the nop
5601 is merely there for padding. */
5602 word1 = GEN_INT (0xa77b0010a43b0018);
5603 word2 = GEN_INT (0x47ff041f6bfb0000);
5606 /* Store the first two words, as computed above. */
5607 mem = adjust_address (m_tramp, DImode, 0);
5608 emit_move_insn (mem, word1);
5609 mem = adjust_address (m_tramp, DImode, 8);
5610 emit_move_insn (mem, word2);
5612 /* Store function address and static chain value. */
5613 mem = adjust_address (m_tramp, Pmode, 16);
5614 emit_move_insn (mem, fnaddr);
5615 mem = adjust_address (m_tramp, Pmode, 24);
5616 emit_move_insn (mem, chain_value);
5618 if (!TARGET_ABI_OPEN_VMS)
5620 emit_insn (gen_imb ());
5621 #ifdef ENABLE_EXECUTE_STACK
5622 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5623 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5624 #endif
5628 /* Determine where to put an argument to a function.
5629 Value is zero to push the argument on the stack,
5630 or a hard register in which to store the argument.
5632 MODE is the argument's machine mode.
5633 TYPE is the data type of the argument (as a tree).
5634 This is null for libcalls where that information may
5635 not be available.
5636 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5637 the preceding args and about the function being called.
5638 NAMED is nonzero if this argument is a named parameter
5639 (otherwise it is an extra parameter matching an ellipsis).
5641 On Alpha the first 6 words of args are normally in registers
5642 and the rest are pushed. */
5644 static rtx
5645 alpha_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5646 const_tree type, bool named ATTRIBUTE_UNUSED)
5648 int basereg;
5649 int num_args;
5651 /* Don't get confused and pass small structures in FP registers. */
5652 if (type && AGGREGATE_TYPE_P (type))
5653 basereg = 16;
5654 else
5656 #ifdef ENABLE_CHECKING
5657 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5658 values here. */
5659 gcc_assert (!COMPLEX_MODE_P (mode));
5660 #endif
5662 /* Set up defaults for FP operands passed in FP registers, and
5663 integral operands passed in integer registers. */
5664 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5665 basereg = 32 + 16;
5666 else
5667 basereg = 16;
5670 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5671 the two platforms, so we can't avoid conditional compilation. */
5672 #if TARGET_ABI_OPEN_VMS
5674 if (mode == VOIDmode)
5675 return alpha_arg_info_reg_val (*cum);
5677 num_args = cum->num_args;
5678 if (num_args >= 6
5679 || targetm.calls.must_pass_in_stack (mode, type))
5680 return NULL_RTX;
5682 #elif TARGET_ABI_OSF
5684 if (*cum >= 6)
5685 return NULL_RTX;
5686 num_args = *cum;
5688 /* VOID is passed as a special flag for "last argument". */
5689 if (type == void_type_node)
5690 basereg = 16;
5691 else if (targetm.calls.must_pass_in_stack (mode, type))
5692 return NULL_RTX;
5694 #else
5695 #error Unhandled ABI
5696 #endif
5698 return gen_rtx_REG (mode, num_args + basereg);
5701 /* Update the data in CUM to advance over an argument
5702 of mode MODE and data type TYPE.
5703 (TYPE is null for libcalls where that information may not be available.) */
5705 static void
5706 alpha_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5707 const_tree type, bool named ATTRIBUTE_UNUSED)
5709 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5710 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5712 #if TARGET_ABI_OSF
5713 *cum += increment;
5714 #else
5715 if (!onstack && cum->num_args < 6)
5716 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5717 cum->num_args += increment;
5718 #endif
5721 static int
5722 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5723 enum machine_mode mode ATTRIBUTE_UNUSED,
5724 tree type ATTRIBUTE_UNUSED,
5725 bool named ATTRIBUTE_UNUSED)
5727 int words = 0;
5729 #if TARGET_ABI_OPEN_VMS
5730 if (cum->num_args < 6
5731 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5732 words = 6 - cum->num_args;
5733 #elif TARGET_ABI_UNICOSMK
5734 /* Never any split arguments. */
5735 #elif TARGET_ABI_OSF
5736 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5737 words = 6 - *cum;
5738 #else
5739 #error Unhandled ABI
5740 #endif
5742 return words * UNITS_PER_WORD;
5746 /* Return true if TYPE must be returned in memory, instead of in registers. */
5748 static bool
5749 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5751 enum machine_mode mode = VOIDmode;
5752 int size;
5754 if (type)
5756 mode = TYPE_MODE (type);
5758 /* All aggregates are returned in memory, except on OpenVMS where
5759 records that fit 64 bits should be returned by immediate value
5760 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5761 if (TARGET_ABI_OPEN_VMS
5762 && TREE_CODE (type) != ARRAY_TYPE
5763 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5764 return false;
5766 if (AGGREGATE_TYPE_P (type))
5767 return true;
5770 size = GET_MODE_SIZE (mode);
5771 switch (GET_MODE_CLASS (mode))
5773 case MODE_VECTOR_FLOAT:
5774 /* Pass all float vectors in memory, like an aggregate. */
5775 return true;
5777 case MODE_COMPLEX_FLOAT:
5778 /* We judge complex floats on the size of their element,
5779 not the size of the whole type. */
5780 size = GET_MODE_UNIT_SIZE (mode);
5781 break;
5783 case MODE_INT:
5784 case MODE_FLOAT:
5785 case MODE_COMPLEX_INT:
5786 case MODE_VECTOR_INT:
5787 break;
5789 default:
5790 /* ??? We get called on all sorts of random stuff from
5791 aggregate_value_p. We must return something, but it's not
5792 clear what's safe to return. Pretend it's a struct I
5793 guess. */
5794 return true;
5797 /* Otherwise types must fit in one register. */
5798 return size > UNITS_PER_WORD;
5801 /* Return true if TYPE should be passed by invisible reference. */
5803 static bool
5804 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5805 enum machine_mode mode,
5806 const_tree type ATTRIBUTE_UNUSED,
5807 bool named ATTRIBUTE_UNUSED)
5809 return mode == TFmode || mode == TCmode;
5812 /* Define how to find the value returned by a function. VALTYPE is the
5813 data type of the value (as a tree). If the precise function being
5814 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5815 MODE is set instead of VALTYPE for libcalls.
5817 On Alpha the value is found in $0 for integer functions and
5818 $f0 for floating-point functions. */
5821 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5822 enum machine_mode mode)
5824 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5825 enum mode_class mclass;
5827 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5829 if (valtype)
5830 mode = TYPE_MODE (valtype);
5832 mclass = GET_MODE_CLASS (mode);
5833 switch (mclass)
5835 case MODE_INT:
5836 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5837 where we have them returning both SImode and DImode. */
5838 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5839 PROMOTE_MODE (mode, dummy, valtype);
5840 /* FALLTHRU */
5842 case MODE_COMPLEX_INT:
5843 case MODE_VECTOR_INT:
5844 regnum = 0;
5845 break;
5847 case MODE_FLOAT:
5848 regnum = 32;
5849 break;
5851 case MODE_COMPLEX_FLOAT:
5853 enum machine_mode cmode = GET_MODE_INNER (mode);
5855 return gen_rtx_PARALLEL
5856 (VOIDmode,
5857 gen_rtvec (2,
5858 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5859 const0_rtx),
5860 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5861 GEN_INT (GET_MODE_SIZE (cmode)))));
5864 case MODE_RANDOM:
5865 /* We should only reach here for BLKmode on VMS. */
5866 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5867 regnum = 0;
5868 break;
5870 default:
5871 gcc_unreachable ();
5874 return gen_rtx_REG (mode, regnum);
5877 /* TCmode complex values are passed by invisible reference. We
5878 should not split these values. */
5880 static bool
5881 alpha_split_complex_arg (const_tree type)
5883 return TYPE_MODE (type) != TCmode;
5886 static tree
5887 alpha_build_builtin_va_list (void)
5889 tree base, ofs, space, record, type_decl;
5891 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5892 return ptr_type_node;
5894 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5895 type_decl = build_decl (BUILTINS_LOCATION,
5896 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5897 TYPE_STUB_DECL (record) = type_decl;
5898 TYPE_NAME (record) = type_decl;
5900 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5902 /* Dummy field to prevent alignment warnings. */
5903 space = build_decl (BUILTINS_LOCATION,
5904 FIELD_DECL, NULL_TREE, integer_type_node);
5905 DECL_FIELD_CONTEXT (space) = record;
5906 DECL_ARTIFICIAL (space) = 1;
5907 DECL_IGNORED_P (space) = 1;
5909 ofs = build_decl (BUILTINS_LOCATION,
5910 FIELD_DECL, get_identifier ("__offset"),
5911 integer_type_node);
5912 DECL_FIELD_CONTEXT (ofs) = record;
5913 DECL_CHAIN (ofs) = space;
5914 /* ??? This is a hack, __offset is marked volatile to prevent
5915 DCE that confuses stdarg optimization and results in
5916 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5917 TREE_THIS_VOLATILE (ofs) = 1;
5919 base = build_decl (BUILTINS_LOCATION,
5920 FIELD_DECL, get_identifier ("__base"),
5921 ptr_type_node);
5922 DECL_FIELD_CONTEXT (base) = record;
5923 DECL_CHAIN (base) = ofs;
5925 TYPE_FIELDS (record) = base;
5926 layout_type (record);
5928 va_list_gpr_counter_field = ofs;
5929 return record;
5932 #if TARGET_ABI_OSF
5933 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5934 and constant additions. */
5936 static gimple
5937 va_list_skip_additions (tree lhs)
5939 gimple stmt;
5941 for (;;)
5943 enum tree_code code;
5945 stmt = SSA_NAME_DEF_STMT (lhs);
5947 if (gimple_code (stmt) == GIMPLE_PHI)
5948 return stmt;
5950 if (!is_gimple_assign (stmt)
5951 || gimple_assign_lhs (stmt) != lhs)
5952 return NULL;
5954 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5955 return stmt;
5956 code = gimple_assign_rhs_code (stmt);
5957 if (!CONVERT_EXPR_CODE_P (code)
5958 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5959 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5960 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5961 return stmt;
5963 lhs = gimple_assign_rhs1 (stmt);
5967 /* Check if LHS = RHS statement is
5968 LHS = *(ap.__base + ap.__offset + cst)
5970 LHS = *(ap.__base
5971 + ((ap.__offset + cst <= 47)
5972 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5973 If the former, indicate that GPR registers are needed,
5974 if the latter, indicate that FPR registers are needed.
5976 Also look for LHS = (*ptr).field, where ptr is one of the forms
5977 listed above.
5979 On alpha, cfun->va_list_gpr_size is used as size of the needed
5980 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5981 registers are needed and bit 1 set if FPR registers are needed.
5982 Return true if va_list references should not be scanned for the
5983 current statement. */
5985 static bool
5986 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5988 tree base, offset, rhs;
5989 int offset_arg = 1;
5990 gimple base_stmt;
5992 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5993 != GIMPLE_SINGLE_RHS)
5994 return false;
5996 rhs = gimple_assign_rhs1 (stmt);
5997 while (handled_component_p (rhs))
5998 rhs = TREE_OPERAND (rhs, 0);
5999 if (TREE_CODE (rhs) != MEM_REF
6000 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6001 return false;
6003 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6004 if (stmt == NULL
6005 || !is_gimple_assign (stmt)
6006 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6007 return false;
6009 base = gimple_assign_rhs1 (stmt);
6010 if (TREE_CODE (base) == SSA_NAME)
6012 base_stmt = va_list_skip_additions (base);
6013 if (base_stmt
6014 && is_gimple_assign (base_stmt)
6015 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6016 base = gimple_assign_rhs1 (base_stmt);
6019 if (TREE_CODE (base) != COMPONENT_REF
6020 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6022 base = gimple_assign_rhs2 (stmt);
6023 if (TREE_CODE (base) == SSA_NAME)
6025 base_stmt = va_list_skip_additions (base);
6026 if (base_stmt
6027 && is_gimple_assign (base_stmt)
6028 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6029 base = gimple_assign_rhs1 (base_stmt);
6032 if (TREE_CODE (base) != COMPONENT_REF
6033 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6034 return false;
6036 offset_arg = 0;
6039 base = get_base_address (base);
6040 if (TREE_CODE (base) != VAR_DECL
6041 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
6042 return false;
6044 offset = gimple_op (stmt, 1 + offset_arg);
6045 if (TREE_CODE (offset) == SSA_NAME)
6047 gimple offset_stmt = va_list_skip_additions (offset);
6049 if (offset_stmt
6050 && gimple_code (offset_stmt) == GIMPLE_PHI)
6052 HOST_WIDE_INT sub;
6053 gimple arg1_stmt, arg2_stmt;
6054 tree arg1, arg2;
6055 enum tree_code code1, code2;
6057 if (gimple_phi_num_args (offset_stmt) != 2)
6058 goto escapes;
6060 arg1_stmt
6061 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6062 arg2_stmt
6063 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6064 if (arg1_stmt == NULL
6065 || !is_gimple_assign (arg1_stmt)
6066 || arg2_stmt == NULL
6067 || !is_gimple_assign (arg2_stmt))
6068 goto escapes;
6070 code1 = gimple_assign_rhs_code (arg1_stmt);
6071 code2 = gimple_assign_rhs_code (arg2_stmt);
6072 if (code1 == COMPONENT_REF
6073 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6074 /* Do nothing. */;
6075 else if (code2 == COMPONENT_REF
6076 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6078 gimple tem = arg1_stmt;
6079 code2 = code1;
6080 arg1_stmt = arg2_stmt;
6081 arg2_stmt = tem;
6083 else
6084 goto escapes;
6086 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6087 goto escapes;
6089 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6090 if (code2 == MINUS_EXPR)
6091 sub = -sub;
6092 if (sub < -48 || sub > -32)
6093 goto escapes;
6095 arg1 = gimple_assign_rhs1 (arg1_stmt);
6096 arg2 = gimple_assign_rhs1 (arg2_stmt);
6097 if (TREE_CODE (arg2) == SSA_NAME)
6099 arg2_stmt = va_list_skip_additions (arg2);
6100 if (arg2_stmt == NULL
6101 || !is_gimple_assign (arg2_stmt)
6102 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6103 goto escapes;
6104 arg2 = gimple_assign_rhs1 (arg2_stmt);
6106 if (arg1 != arg2)
6107 goto escapes;
6109 if (TREE_CODE (arg1) != COMPONENT_REF
6110 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6111 || get_base_address (arg1) != base)
6112 goto escapes;
6114 /* Need floating point regs. */
6115 cfun->va_list_fpr_size |= 2;
6116 return false;
6118 if (offset_stmt
6119 && is_gimple_assign (offset_stmt)
6120 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6121 offset = gimple_assign_rhs1 (offset_stmt);
6123 if (TREE_CODE (offset) != COMPONENT_REF
6124 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6125 || get_base_address (offset) != base)
6126 goto escapes;
6127 else
6128 /* Need general regs. */
6129 cfun->va_list_fpr_size |= 1;
6130 return false;
6132 escapes:
6133 si->va_list_escapes = true;
6134 return false;
6136 #endif
6138 /* Perform any needed actions needed for a function that is receiving a
6139 variable number of arguments. */
6141 static void
6142 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6143 tree type, int *pretend_size, int no_rtl)
6145 CUMULATIVE_ARGS cum = *pcum;
6147 /* Skip the current argument. */
6148 targetm.calls.function_arg_advance (&cum, mode, type, true);
6150 #if TARGET_ABI_UNICOSMK
6151 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6152 arguments on the stack. Unfortunately, it doesn't always store the first
6153 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6154 with stdargs as we always have at least one named argument there. */
6155 if (cum.num_reg_words < 6)
6157 if (!no_rtl)
6159 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6160 emit_insn (gen_arg_home_umk ());
6162 *pretend_size = 0;
6164 #elif TARGET_ABI_OPEN_VMS
6165 /* For VMS, we allocate space for all 6 arg registers plus a count.
6167 However, if NO registers need to be saved, don't allocate any space.
6168 This is not only because we won't need the space, but because AP
6169 includes the current_pretend_args_size and we don't want to mess up
6170 any ap-relative addresses already made. */
6171 if (cum.num_args < 6)
6173 if (!no_rtl)
6175 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6176 emit_insn (gen_arg_home ());
6178 *pretend_size = 7 * UNITS_PER_WORD;
6180 #else
6181 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6182 only push those that are remaining. However, if NO registers need to
6183 be saved, don't allocate any space. This is not only because we won't
6184 need the space, but because AP includes the current_pretend_args_size
6185 and we don't want to mess up any ap-relative addresses already made.
6187 If we are not to use the floating-point registers, save the integer
6188 registers where we would put the floating-point registers. This is
6189 not the most efficient way to implement varargs with just one register
6190 class, but it isn't worth doing anything more efficient in this rare
6191 case. */
6192 if (cum >= 6)
6193 return;
6195 if (!no_rtl)
6197 int count;
6198 alias_set_type set = get_varargs_alias_set ();
6199 rtx tmp;
6201 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6202 if (count > 6 - cum)
6203 count = 6 - cum;
6205 /* Detect whether integer registers or floating-point registers
6206 are needed by the detected va_arg statements. See above for
6207 how these values are computed. Note that the "escape" value
6208 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6209 these bits set. */
6210 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6212 if (cfun->va_list_fpr_size & 1)
6214 tmp = gen_rtx_MEM (BLKmode,
6215 plus_constant (virtual_incoming_args_rtx,
6216 (cum + 6) * UNITS_PER_WORD));
6217 MEM_NOTRAP_P (tmp) = 1;
6218 set_mem_alias_set (tmp, set);
6219 move_block_from_reg (16 + cum, tmp, count);
6222 if (cfun->va_list_fpr_size & 2)
6224 tmp = gen_rtx_MEM (BLKmode,
6225 plus_constant (virtual_incoming_args_rtx,
6226 cum * UNITS_PER_WORD));
6227 MEM_NOTRAP_P (tmp) = 1;
6228 set_mem_alias_set (tmp, set);
6229 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6232 *pretend_size = 12 * UNITS_PER_WORD;
6233 #endif
6236 static void
6237 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6239 HOST_WIDE_INT offset;
6240 tree t, offset_field, base_field;
6242 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6243 return;
6245 if (TARGET_ABI_UNICOSMK)
6246 std_expand_builtin_va_start (valist, nextarg);
6248 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6249 up by 48, storing fp arg registers in the first 48 bytes, and the
6250 integer arg registers in the next 48 bytes. This is only done,
6251 however, if any integer registers need to be stored.
6253 If no integer registers need be stored, then we must subtract 48
6254 in order to account for the integer arg registers which are counted
6255 in argsize above, but which are not actually stored on the stack.
6256 Must further be careful here about structures straddling the last
6257 integer argument register; that futzes with pretend_args_size,
6258 which changes the meaning of AP. */
6260 if (NUM_ARGS < 6)
6261 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6262 else
6263 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6265 if (TARGET_ABI_OPEN_VMS)
6267 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6268 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6269 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6270 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6271 TREE_SIDE_EFFECTS (t) = 1;
6272 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6274 else
6276 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6277 offset_field = DECL_CHAIN (base_field);
6279 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6280 valist, base_field, NULL_TREE);
6281 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6282 valist, offset_field, NULL_TREE);
6284 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6285 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6286 size_int (offset));
6287 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6288 TREE_SIDE_EFFECTS (t) = 1;
6289 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6291 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6292 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6293 TREE_SIDE_EFFECTS (t) = 1;
6294 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6298 static tree
6299 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6300 gimple_seq *pre_p)
6302 tree type_size, ptr_type, addend, t, addr;
6303 gimple_seq internal_post;
6305 /* If the type could not be passed in registers, skip the block
6306 reserved for the registers. */
6307 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6309 t = build_int_cst (TREE_TYPE (offset), 6*8);
6310 gimplify_assign (offset,
6311 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6312 pre_p);
6315 addend = offset;
6316 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6318 if (TREE_CODE (type) == COMPLEX_TYPE)
6320 tree real_part, imag_part, real_temp;
6322 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6323 offset, pre_p);
6325 /* Copy the value into a new temporary, lest the formal temporary
6326 be reused out from under us. */
6327 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6329 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6330 offset, pre_p);
6332 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6334 else if (TREE_CODE (type) == REAL_TYPE)
6336 tree fpaddend, cond, fourtyeight;
6338 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6339 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6340 addend, fourtyeight);
6341 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6342 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6343 fpaddend, addend);
6346 /* Build the final address and force that value into a temporary. */
6347 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6348 fold_convert (sizetype, addend));
6349 internal_post = NULL;
6350 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6351 gimple_seq_add_seq (pre_p, internal_post);
6353 /* Update the offset field. */
6354 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6355 if (type_size == NULL || TREE_OVERFLOW (type_size))
6356 t = size_zero_node;
6357 else
6359 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6360 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6361 t = size_binop (MULT_EXPR, t, size_int (8));
6363 t = fold_convert (TREE_TYPE (offset), t);
6364 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6365 pre_p);
6367 return build_va_arg_indirect_ref (addr);
6370 static tree
6371 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6372 gimple_seq *post_p)
6374 tree offset_field, base_field, offset, base, t, r;
6375 bool indirect;
6377 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6378 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6380 base_field = TYPE_FIELDS (va_list_type_node);
6381 offset_field = DECL_CHAIN (base_field);
6382 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6383 valist, base_field, NULL_TREE);
6384 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6385 valist, offset_field, NULL_TREE);
6387 /* Pull the fields of the structure out into temporaries. Since we never
6388 modify the base field, we can use a formal temporary. Sign-extend the
6389 offset field so that it's the proper width for pointer arithmetic. */
6390 base = get_formal_tmp_var (base_field, pre_p);
6392 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6393 offset = get_initialized_tmp_var (t, pre_p, NULL);
6395 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6396 if (indirect)
6397 type = build_pointer_type_for_mode (type, ptr_mode, true);
6399 /* Find the value. Note that this will be a stable indirection, or
6400 a composite of stable indirections in the case of complex. */
6401 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6403 /* Stuff the offset temporary back into its field. */
6404 gimplify_assign (unshare_expr (offset_field),
6405 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6407 if (indirect)
6408 r = build_va_arg_indirect_ref (r);
6410 return r;
6413 /* Builtins. */
6415 enum alpha_builtin
6417 ALPHA_BUILTIN_CMPBGE,
6418 ALPHA_BUILTIN_EXTBL,
6419 ALPHA_BUILTIN_EXTWL,
6420 ALPHA_BUILTIN_EXTLL,
6421 ALPHA_BUILTIN_EXTQL,
6422 ALPHA_BUILTIN_EXTWH,
6423 ALPHA_BUILTIN_EXTLH,
6424 ALPHA_BUILTIN_EXTQH,
6425 ALPHA_BUILTIN_INSBL,
6426 ALPHA_BUILTIN_INSWL,
6427 ALPHA_BUILTIN_INSLL,
6428 ALPHA_BUILTIN_INSQL,
6429 ALPHA_BUILTIN_INSWH,
6430 ALPHA_BUILTIN_INSLH,
6431 ALPHA_BUILTIN_INSQH,
6432 ALPHA_BUILTIN_MSKBL,
6433 ALPHA_BUILTIN_MSKWL,
6434 ALPHA_BUILTIN_MSKLL,
6435 ALPHA_BUILTIN_MSKQL,
6436 ALPHA_BUILTIN_MSKWH,
6437 ALPHA_BUILTIN_MSKLH,
6438 ALPHA_BUILTIN_MSKQH,
6439 ALPHA_BUILTIN_UMULH,
6440 ALPHA_BUILTIN_ZAP,
6441 ALPHA_BUILTIN_ZAPNOT,
6442 ALPHA_BUILTIN_AMASK,
6443 ALPHA_BUILTIN_IMPLVER,
6444 ALPHA_BUILTIN_RPCC,
6445 ALPHA_BUILTIN_THREAD_POINTER,
6446 ALPHA_BUILTIN_SET_THREAD_POINTER,
6447 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6448 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6450 /* TARGET_MAX */
6451 ALPHA_BUILTIN_MINUB8,
6452 ALPHA_BUILTIN_MINSB8,
6453 ALPHA_BUILTIN_MINUW4,
6454 ALPHA_BUILTIN_MINSW4,
6455 ALPHA_BUILTIN_MAXUB8,
6456 ALPHA_BUILTIN_MAXSB8,
6457 ALPHA_BUILTIN_MAXUW4,
6458 ALPHA_BUILTIN_MAXSW4,
6459 ALPHA_BUILTIN_PERR,
6460 ALPHA_BUILTIN_PKLB,
6461 ALPHA_BUILTIN_PKWB,
6462 ALPHA_BUILTIN_UNPKBL,
6463 ALPHA_BUILTIN_UNPKBW,
6465 /* TARGET_CIX */
6466 ALPHA_BUILTIN_CTTZ,
6467 ALPHA_BUILTIN_CTLZ,
6468 ALPHA_BUILTIN_CTPOP,
6470 ALPHA_BUILTIN_max
6473 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6474 CODE_FOR_builtin_cmpbge,
6475 CODE_FOR_builtin_extbl,
6476 CODE_FOR_builtin_extwl,
6477 CODE_FOR_builtin_extll,
6478 CODE_FOR_builtin_extql,
6479 CODE_FOR_builtin_extwh,
6480 CODE_FOR_builtin_extlh,
6481 CODE_FOR_builtin_extqh,
6482 CODE_FOR_builtin_insbl,
6483 CODE_FOR_builtin_inswl,
6484 CODE_FOR_builtin_insll,
6485 CODE_FOR_builtin_insql,
6486 CODE_FOR_builtin_inswh,
6487 CODE_FOR_builtin_inslh,
6488 CODE_FOR_builtin_insqh,
6489 CODE_FOR_builtin_mskbl,
6490 CODE_FOR_builtin_mskwl,
6491 CODE_FOR_builtin_mskll,
6492 CODE_FOR_builtin_mskql,
6493 CODE_FOR_builtin_mskwh,
6494 CODE_FOR_builtin_msklh,
6495 CODE_FOR_builtin_mskqh,
6496 CODE_FOR_umuldi3_highpart,
6497 CODE_FOR_builtin_zap,
6498 CODE_FOR_builtin_zapnot,
6499 CODE_FOR_builtin_amask,
6500 CODE_FOR_builtin_implver,
6501 CODE_FOR_builtin_rpcc,
6502 CODE_FOR_load_tp,
6503 CODE_FOR_set_tp,
6504 CODE_FOR_builtin_establish_vms_condition_handler,
6505 CODE_FOR_builtin_revert_vms_condition_handler,
6507 /* TARGET_MAX */
6508 CODE_FOR_builtin_minub8,
6509 CODE_FOR_builtin_minsb8,
6510 CODE_FOR_builtin_minuw4,
6511 CODE_FOR_builtin_minsw4,
6512 CODE_FOR_builtin_maxub8,
6513 CODE_FOR_builtin_maxsb8,
6514 CODE_FOR_builtin_maxuw4,
6515 CODE_FOR_builtin_maxsw4,
6516 CODE_FOR_builtin_perr,
6517 CODE_FOR_builtin_pklb,
6518 CODE_FOR_builtin_pkwb,
6519 CODE_FOR_builtin_unpkbl,
6520 CODE_FOR_builtin_unpkbw,
6522 /* TARGET_CIX */
6523 CODE_FOR_ctzdi2,
6524 CODE_FOR_clzdi2,
6525 CODE_FOR_popcountdi2
6528 struct alpha_builtin_def
6530 const char *name;
6531 enum alpha_builtin code;
6532 unsigned int target_mask;
6533 bool is_const;
6536 static struct alpha_builtin_def const zero_arg_builtins[] = {
6537 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6538 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6541 static struct alpha_builtin_def const one_arg_builtins[] = {
6542 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6543 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6544 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6545 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6546 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6547 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6548 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6549 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6552 static struct alpha_builtin_def const two_arg_builtins[] = {
6553 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6554 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6555 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6556 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6557 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6558 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6559 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6560 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6561 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6562 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6563 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6564 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6565 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6566 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6567 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6568 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6569 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6570 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6571 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6572 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6573 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6574 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6575 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6576 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6577 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6578 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6579 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6580 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6581 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6582 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6583 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6584 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6585 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6586 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6589 static GTY(()) tree alpha_v8qi_u;
6590 static GTY(()) tree alpha_v8qi_s;
6591 static GTY(()) tree alpha_v4hi_u;
6592 static GTY(()) tree alpha_v4hi_s;
6594 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6596 /* Return the alpha builtin for CODE. */
6598 static tree
6599 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6601 if (code >= ALPHA_BUILTIN_max)
6602 return error_mark_node;
6603 return alpha_builtins[code];
6606 /* Helper function of alpha_init_builtins. Add the built-in specified
6607 by NAME, TYPE, CODE, and ECF. */
6609 static void
6610 alpha_builtin_function (const char *name, tree ftype,
6611 enum alpha_builtin code, unsigned ecf)
6613 tree decl = add_builtin_function (name, ftype, (int) code,
6614 BUILT_IN_MD, NULL, NULL_TREE);
6616 if (ecf & ECF_CONST)
6617 TREE_READONLY (decl) = 1;
6618 if (ecf & ECF_NOTHROW)
6619 TREE_NOTHROW (decl) = 1;
6621 alpha_builtins [(int) code] = decl;
6624 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6625 functions pointed to by P, with function type FTYPE. */
6627 static void
6628 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6629 tree ftype)
6631 size_t i;
6633 for (i = 0; i < count; ++i, ++p)
6634 if ((target_flags & p->target_mask) == p->target_mask)
6635 alpha_builtin_function (p->name, ftype, p->code,
6636 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6639 static void
6640 alpha_init_builtins (void)
6642 tree dimode_integer_type_node;
6643 tree ftype;
6645 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6647 /* Fwrite on VMS is non-standard. */
6648 #if TARGET_ABI_OPEN_VMS
6649 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6650 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6651 #endif
6653 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6654 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6655 ftype);
6657 ftype = build_function_type_list (dimode_integer_type_node,
6658 dimode_integer_type_node, NULL_TREE);
6659 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6660 ftype);
6662 ftype = build_function_type_list (dimode_integer_type_node,
6663 dimode_integer_type_node,
6664 dimode_integer_type_node, NULL_TREE);
6665 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6666 ftype);
6668 ftype = build_function_type (ptr_type_node, void_list_node);
6669 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6670 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6672 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6673 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6674 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6676 if (TARGET_ABI_OPEN_VMS)
6678 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6679 NULL_TREE);
6680 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6681 ftype,
6682 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6685 ftype = build_function_type_list (ptr_type_node, void_type_node,
6686 NULL_TREE);
6687 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6688 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6691 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6692 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6693 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6694 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6697 /* Expand an expression EXP that calls a built-in function,
6698 with result going to TARGET if that's convenient
6699 (and in mode MODE if that's convenient).
6700 SUBTARGET may be used as the target for computing one of EXP's operands.
6701 IGNORE is nonzero if the value is to be ignored. */
6703 static rtx
6704 alpha_expand_builtin (tree exp, rtx target,
6705 rtx subtarget ATTRIBUTE_UNUSED,
6706 enum machine_mode mode ATTRIBUTE_UNUSED,
6707 int ignore ATTRIBUTE_UNUSED)
6709 #define MAX_ARGS 2
6711 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6712 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6713 tree arg;
6714 call_expr_arg_iterator iter;
6715 enum insn_code icode;
6716 rtx op[MAX_ARGS], pat;
6717 int arity;
6718 bool nonvoid;
6720 if (fcode >= ALPHA_BUILTIN_max)
6721 internal_error ("bad builtin fcode");
6722 icode = code_for_builtin[fcode];
6723 if (icode == 0)
6724 internal_error ("bad builtin fcode");
6726 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6728 arity = 0;
6729 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6731 const struct insn_operand_data *insn_op;
6733 if (arg == error_mark_node)
6734 return NULL_RTX;
6735 if (arity > MAX_ARGS)
6736 return NULL_RTX;
6738 insn_op = &insn_data[icode].operand[arity + nonvoid];
6740 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6742 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6743 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6744 arity++;
6747 if (nonvoid)
6749 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6750 if (!target
6751 || GET_MODE (target) != tmode
6752 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6753 target = gen_reg_rtx (tmode);
6756 switch (arity)
6758 case 0:
6759 pat = GEN_FCN (icode) (target);
6760 break;
6761 case 1:
6762 if (nonvoid)
6763 pat = GEN_FCN (icode) (target, op[0]);
6764 else
6765 pat = GEN_FCN (icode) (op[0]);
6766 break;
6767 case 2:
6768 pat = GEN_FCN (icode) (target, op[0], op[1]);
6769 break;
6770 default:
6771 gcc_unreachable ();
6773 if (!pat)
6774 return NULL_RTX;
6775 emit_insn (pat);
6777 if (nonvoid)
6778 return target;
6779 else
6780 return const0_rtx;
6784 /* Several bits below assume HWI >= 64 bits. This should be enforced
6785 by config.gcc. */
6786 #if HOST_BITS_PER_WIDE_INT < 64
6787 # error "HOST_WIDE_INT too small"
6788 #endif
6790 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6791 with an 8-bit output vector. OPINT contains the integer operands; bit N
6792 of OP_CONST is set if OPINT[N] is valid. */
6794 static tree
6795 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6797 if (op_const == 3)
6799 int i, val;
6800 for (i = 0, val = 0; i < 8; ++i)
6802 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6803 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6804 if (c0 >= c1)
6805 val |= 1 << i;
6807 return build_int_cst (long_integer_type_node, val);
6809 else if (op_const == 2 && opint[1] == 0)
6810 return build_int_cst (long_integer_type_node, 0xff);
6811 return NULL;
6814 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6815 specialized form of an AND operation. Other byte manipulation instructions
6816 are defined in terms of this instruction, so this is also used as a
6817 subroutine for other builtins.
6819 OP contains the tree operands; OPINT contains the extracted integer values.
6820 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6821 OPINT may be considered. */
6823 static tree
6824 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6825 long op_const)
6827 if (op_const & 2)
6829 unsigned HOST_WIDE_INT mask = 0;
6830 int i;
6832 for (i = 0; i < 8; ++i)
6833 if ((opint[1] >> i) & 1)
6834 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6836 if (op_const & 1)
6837 return build_int_cst (long_integer_type_node, opint[0] & mask);
6839 if (op)
6840 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6841 build_int_cst (long_integer_type_node, mask));
6843 else if ((op_const & 1) && opint[0] == 0)
6844 return build_int_cst (long_integer_type_node, 0);
6845 return NULL;
6848 /* Fold the builtins for the EXT family of instructions. */
6850 static tree
6851 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6852 long op_const, unsigned HOST_WIDE_INT bytemask,
6853 bool is_high)
6855 long zap_const = 2;
6856 tree *zap_op = NULL;
6858 if (op_const & 2)
6860 unsigned HOST_WIDE_INT loc;
6862 loc = opint[1] & 7;
6863 if (BYTES_BIG_ENDIAN)
6864 loc ^= 7;
6865 loc *= 8;
6867 if (loc != 0)
6869 if (op_const & 1)
6871 unsigned HOST_WIDE_INT temp = opint[0];
6872 if (is_high)
6873 temp <<= loc;
6874 else
6875 temp >>= loc;
6876 opint[0] = temp;
6877 zap_const = 3;
6880 else
6881 zap_op = op;
6884 opint[1] = bytemask;
6885 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6888 /* Fold the builtins for the INS family of instructions. */
6890 static tree
6891 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6892 long op_const, unsigned HOST_WIDE_INT bytemask,
6893 bool is_high)
6895 if ((op_const & 1) && opint[0] == 0)
6896 return build_int_cst (long_integer_type_node, 0);
6898 if (op_const & 2)
6900 unsigned HOST_WIDE_INT temp, loc, byteloc;
6901 tree *zap_op = NULL;
6903 loc = opint[1] & 7;
6904 if (BYTES_BIG_ENDIAN)
6905 loc ^= 7;
6906 bytemask <<= loc;
6908 temp = opint[0];
6909 if (is_high)
6911 byteloc = (64 - (loc * 8)) & 0x3f;
6912 if (byteloc == 0)
6913 zap_op = op;
6914 else
6915 temp >>= byteloc;
6916 bytemask >>= 8;
6918 else
6920 byteloc = loc * 8;
6921 if (byteloc == 0)
6922 zap_op = op;
6923 else
6924 temp <<= byteloc;
6927 opint[0] = temp;
6928 opint[1] = bytemask;
6929 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6932 return NULL;
6935 static tree
6936 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6937 long op_const, unsigned HOST_WIDE_INT bytemask,
6938 bool is_high)
6940 if (op_const & 2)
6942 unsigned HOST_WIDE_INT loc;
6944 loc = opint[1] & 7;
6945 if (BYTES_BIG_ENDIAN)
6946 loc ^= 7;
6947 bytemask <<= loc;
6949 if (is_high)
6950 bytemask >>= 8;
6952 opint[1] = bytemask ^ 0xff;
6955 return alpha_fold_builtin_zapnot (op, opint, op_const);
6958 static tree
6959 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6961 switch (op_const)
6963 case 3:
6965 unsigned HOST_WIDE_INT l;
6966 HOST_WIDE_INT h;
6968 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6970 #if HOST_BITS_PER_WIDE_INT > 64
6971 # error fixme
6972 #endif
6974 return build_int_cst (long_integer_type_node, h);
6977 case 1:
6978 opint[1] = opint[0];
6979 /* FALLTHRU */
6980 case 2:
6981 /* Note that (X*1) >> 64 == 0. */
6982 if (opint[1] == 0 || opint[1] == 1)
6983 return build_int_cst (long_integer_type_node, 0);
6984 break;
6986 return NULL;
6989 static tree
6990 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6992 tree op0 = fold_convert (vtype, op[0]);
6993 tree op1 = fold_convert (vtype, op[1]);
6994 tree val = fold_build2 (code, vtype, op0, op1);
6995 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6998 static tree
6999 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
7001 unsigned HOST_WIDE_INT temp = 0;
7002 int i;
7004 if (op_const != 3)
7005 return NULL;
7007 for (i = 0; i < 8; ++i)
7009 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
7010 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
7011 if (a >= b)
7012 temp += a - b;
7013 else
7014 temp += b - a;
7017 return build_int_cst (long_integer_type_node, temp);
7020 static tree
7021 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
7023 unsigned HOST_WIDE_INT temp;
7025 if (op_const == 0)
7026 return NULL;
7028 temp = opint[0] & 0xff;
7029 temp |= (opint[0] >> 24) & 0xff00;
7031 return build_int_cst (long_integer_type_node, temp);
7034 static tree
7035 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7037 unsigned HOST_WIDE_INT temp;
7039 if (op_const == 0)
7040 return NULL;
7042 temp = opint[0] & 0xff;
7043 temp |= (opint[0] >> 8) & 0xff00;
7044 temp |= (opint[0] >> 16) & 0xff0000;
7045 temp |= (opint[0] >> 24) & 0xff000000;
7047 return build_int_cst (long_integer_type_node, temp);
7050 static tree
7051 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7053 unsigned HOST_WIDE_INT temp;
7055 if (op_const == 0)
7056 return NULL;
7058 temp = opint[0] & 0xff;
7059 temp |= (opint[0] & 0xff00) << 24;
7061 return build_int_cst (long_integer_type_node, temp);
7064 static tree
7065 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7067 unsigned HOST_WIDE_INT temp;
7069 if (op_const == 0)
7070 return NULL;
7072 temp = opint[0] & 0xff;
7073 temp |= (opint[0] & 0x0000ff00) << 8;
7074 temp |= (opint[0] & 0x00ff0000) << 16;
7075 temp |= (opint[0] & 0xff000000) << 24;
7077 return build_int_cst (long_integer_type_node, temp);
7080 static tree
7081 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7083 unsigned HOST_WIDE_INT temp;
7085 if (op_const == 0)
7086 return NULL;
7088 if (opint[0] == 0)
7089 temp = 64;
7090 else
7091 temp = exact_log2 (opint[0] & -opint[0]);
7093 return build_int_cst (long_integer_type_node, temp);
7096 static tree
7097 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7099 unsigned HOST_WIDE_INT temp;
7101 if (op_const == 0)
7102 return NULL;
7104 if (opint[0] == 0)
7105 temp = 64;
7106 else
7107 temp = 64 - floor_log2 (opint[0]) - 1;
7109 return build_int_cst (long_integer_type_node, temp);
7112 static tree
7113 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7115 unsigned HOST_WIDE_INT temp, op;
7117 if (op_const == 0)
7118 return NULL;
7120 op = opint[0];
7121 temp = 0;
7122 while (op)
7123 temp++, op &= op - 1;
7125 return build_int_cst (long_integer_type_node, temp);
7128 /* Fold one of our builtin functions. */
7130 static tree
7131 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7132 bool ignore ATTRIBUTE_UNUSED)
7134 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7135 long op_const = 0;
7136 int i;
7138 if (n_args >= MAX_ARGS)
7139 return NULL;
7141 for (i = 0; i < n_args; i++)
7143 tree arg = op[i];
7144 if (arg == error_mark_node)
7145 return NULL;
7147 opint[i] = 0;
7148 if (TREE_CODE (arg) == INTEGER_CST)
7150 op_const |= 1L << i;
7151 opint[i] = int_cst_value (arg);
7155 switch (DECL_FUNCTION_CODE (fndecl))
7157 case ALPHA_BUILTIN_CMPBGE:
7158 return alpha_fold_builtin_cmpbge (opint, op_const);
7160 case ALPHA_BUILTIN_EXTBL:
7161 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7162 case ALPHA_BUILTIN_EXTWL:
7163 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7164 case ALPHA_BUILTIN_EXTLL:
7165 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7166 case ALPHA_BUILTIN_EXTQL:
7167 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7168 case ALPHA_BUILTIN_EXTWH:
7169 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7170 case ALPHA_BUILTIN_EXTLH:
7171 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7172 case ALPHA_BUILTIN_EXTQH:
7173 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7175 case ALPHA_BUILTIN_INSBL:
7176 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7177 case ALPHA_BUILTIN_INSWL:
7178 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7179 case ALPHA_BUILTIN_INSLL:
7180 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7181 case ALPHA_BUILTIN_INSQL:
7182 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7183 case ALPHA_BUILTIN_INSWH:
7184 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7185 case ALPHA_BUILTIN_INSLH:
7186 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7187 case ALPHA_BUILTIN_INSQH:
7188 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7190 case ALPHA_BUILTIN_MSKBL:
7191 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7192 case ALPHA_BUILTIN_MSKWL:
7193 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7194 case ALPHA_BUILTIN_MSKLL:
7195 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7196 case ALPHA_BUILTIN_MSKQL:
7197 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7198 case ALPHA_BUILTIN_MSKWH:
7199 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7200 case ALPHA_BUILTIN_MSKLH:
7201 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7202 case ALPHA_BUILTIN_MSKQH:
7203 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7205 case ALPHA_BUILTIN_UMULH:
7206 return alpha_fold_builtin_umulh (opint, op_const);
7208 case ALPHA_BUILTIN_ZAP:
7209 opint[1] ^= 0xff;
7210 /* FALLTHRU */
7211 case ALPHA_BUILTIN_ZAPNOT:
7212 return alpha_fold_builtin_zapnot (op, opint, op_const);
7214 case ALPHA_BUILTIN_MINUB8:
7215 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7216 case ALPHA_BUILTIN_MINSB8:
7217 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7218 case ALPHA_BUILTIN_MINUW4:
7219 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7220 case ALPHA_BUILTIN_MINSW4:
7221 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7222 case ALPHA_BUILTIN_MAXUB8:
7223 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7224 case ALPHA_BUILTIN_MAXSB8:
7225 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7226 case ALPHA_BUILTIN_MAXUW4:
7227 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7228 case ALPHA_BUILTIN_MAXSW4:
7229 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7231 case ALPHA_BUILTIN_PERR:
7232 return alpha_fold_builtin_perr (opint, op_const);
7233 case ALPHA_BUILTIN_PKLB:
7234 return alpha_fold_builtin_pklb (opint, op_const);
7235 case ALPHA_BUILTIN_PKWB:
7236 return alpha_fold_builtin_pkwb (opint, op_const);
7237 case ALPHA_BUILTIN_UNPKBL:
7238 return alpha_fold_builtin_unpkbl (opint, op_const);
7239 case ALPHA_BUILTIN_UNPKBW:
7240 return alpha_fold_builtin_unpkbw (opint, op_const);
7242 case ALPHA_BUILTIN_CTTZ:
7243 return alpha_fold_builtin_cttz (opint, op_const);
7244 case ALPHA_BUILTIN_CTLZ:
7245 return alpha_fold_builtin_ctlz (opint, op_const);
7246 case ALPHA_BUILTIN_CTPOP:
7247 return alpha_fold_builtin_ctpop (opint, op_const);
7249 case ALPHA_BUILTIN_AMASK:
7250 case ALPHA_BUILTIN_IMPLVER:
7251 case ALPHA_BUILTIN_RPCC:
7252 case ALPHA_BUILTIN_THREAD_POINTER:
7253 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7254 /* None of these are foldable at compile-time. */
7255 default:
7256 return NULL;
7260 /* This page contains routines that are used to determine what the function
7261 prologue and epilogue code will do and write them out. */
7263 /* Compute the size of the save area in the stack. */
7265 /* These variables are used for communication between the following functions.
7266 They indicate various things about the current function being compiled
7267 that are used to tell what kind of prologue, epilogue and procedure
7268 descriptor to generate. */
7270 /* Nonzero if we need a stack procedure. */
7271 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7272 static enum alpha_procedure_types alpha_procedure_type;
7274 /* Register number (either FP or SP) that is used to unwind the frame. */
7275 static int vms_unwind_regno;
7277 /* Register number used to save FP. We need not have one for RA since
7278 we don't modify it for register procedures. This is only defined
7279 for register frame procedures. */
7280 static int vms_save_fp_regno;
7282 /* Register number used to reference objects off our PV. */
7283 static int vms_base_regno;
7285 /* Compute register masks for saved registers. */
7287 static void
7288 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7290 unsigned long imask = 0;
7291 unsigned long fmask = 0;
7292 unsigned int i;
7294 /* When outputting a thunk, we don't have valid register life info,
7295 but assemble_start_function wants to output .frame and .mask
7296 directives. */
7297 if (cfun->is_thunk)
7299 *imaskP = 0;
7300 *fmaskP = 0;
7301 return;
7304 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7305 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7307 /* One for every register we have to save. */
7308 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7309 if (! fixed_regs[i] && ! call_used_regs[i]
7310 && df_regs_ever_live_p (i) && i != REG_RA
7311 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7313 if (i < 32)
7314 imask |= (1UL << i);
7315 else
7316 fmask |= (1UL << (i - 32));
7319 /* We need to restore these for the handler. */
7320 if (crtl->calls_eh_return)
7322 for (i = 0; ; ++i)
7324 unsigned regno = EH_RETURN_DATA_REGNO (i);
7325 if (regno == INVALID_REGNUM)
7326 break;
7327 imask |= 1UL << regno;
7331 /* If any register spilled, then spill the return address also. */
7332 /* ??? This is required by the Digital stack unwind specification
7333 and isn't needed if we're doing Dwarf2 unwinding. */
7334 if (imask || fmask || alpha_ra_ever_killed ())
7335 imask |= (1UL << REG_RA);
7337 *imaskP = imask;
7338 *fmaskP = fmask;
7342 alpha_sa_size (void)
7344 unsigned long mask[2];
7345 int sa_size = 0;
7346 int i, j;
7348 alpha_sa_mask (&mask[0], &mask[1]);
7350 if (TARGET_ABI_UNICOSMK)
7352 if (mask[0] || mask[1])
7353 sa_size = 14;
7355 else
7357 for (j = 0; j < 2; ++j)
7358 for (i = 0; i < 32; ++i)
7359 if ((mask[j] >> i) & 1)
7360 sa_size++;
7363 if (TARGET_ABI_UNICOSMK)
7365 /* We might not need to generate a frame if we don't make any calls
7366 (including calls to __T3E_MISMATCH if this is a vararg function),
7367 don't have any local variables which require stack slots, don't
7368 use alloca and have not determined that we need a frame for other
7369 reasons. */
7371 alpha_procedure_type
7372 = (sa_size || get_frame_size() != 0
7373 || crtl->outgoing_args_size
7374 || cfun->stdarg || cfun->calls_alloca
7375 || frame_pointer_needed)
7376 ? PT_STACK : PT_REGISTER;
7378 /* Always reserve space for saving callee-saved registers if we
7379 need a frame as required by the calling convention. */
7380 if (alpha_procedure_type == PT_STACK)
7381 sa_size = 14;
7383 else if (TARGET_ABI_OPEN_VMS)
7385 /* Start with a stack procedure if we make any calls (REG_RA used), or
7386 need a frame pointer, with a register procedure if we otherwise need
7387 at least a slot, and with a null procedure in other cases. */
7388 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7389 alpha_procedure_type = PT_STACK;
7390 else if (get_frame_size() != 0)
7391 alpha_procedure_type = PT_REGISTER;
7392 else
7393 alpha_procedure_type = PT_NULL;
7395 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7396 made the final decision on stack procedure vs register procedure. */
7397 if (alpha_procedure_type == PT_STACK)
7398 sa_size -= 2;
7400 /* Decide whether to refer to objects off our PV via FP or PV.
7401 If we need FP for something else or if we receive a nonlocal
7402 goto (which expects PV to contain the value), we must use PV.
7403 Otherwise, start by assuming we can use FP. */
7405 vms_base_regno
7406 = (frame_pointer_needed
7407 || cfun->has_nonlocal_label
7408 || alpha_procedure_type == PT_STACK
7409 || crtl->outgoing_args_size)
7410 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7412 /* If we want to copy PV into FP, we need to find some register
7413 in which to save FP. */
7415 vms_save_fp_regno = -1;
7416 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7417 for (i = 0; i < 32; i++)
7418 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7419 vms_save_fp_regno = i;
7421 /* A VMS condition handler requires a stack procedure in our
7422 implementation. (not required by the calling standard). */
7423 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7424 || cfun->machine->uses_condition_handler)
7425 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7426 else if (alpha_procedure_type == PT_NULL)
7427 vms_base_regno = REG_PV;
7429 /* Stack unwinding should be done via FP unless we use it for PV. */
7430 vms_unwind_regno = (vms_base_regno == REG_PV
7431 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7433 /* If this is a stack procedure, allow space for saving FP, RA and
7434 a condition handler slot if needed. */
7435 if (alpha_procedure_type == PT_STACK)
7436 sa_size += 2 + cfun->machine->uses_condition_handler;
7438 else
7440 /* Our size must be even (multiple of 16 bytes). */
7441 if (sa_size & 1)
7442 sa_size++;
7445 return sa_size * 8;
7448 /* Define the offset between two registers, one to be eliminated,
7449 and the other its replacement, at the start of a routine. */
7451 HOST_WIDE_INT
7452 alpha_initial_elimination_offset (unsigned int from,
7453 unsigned int to ATTRIBUTE_UNUSED)
7455 HOST_WIDE_INT ret;
7457 ret = alpha_sa_size ();
7458 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7460 switch (from)
7462 case FRAME_POINTER_REGNUM:
7463 break;
7465 case ARG_POINTER_REGNUM:
7466 ret += (ALPHA_ROUND (get_frame_size ()
7467 + crtl->args.pretend_args_size)
7468 - crtl->args.pretend_args_size);
7469 break;
7471 default:
7472 gcc_unreachable ();
7475 return ret;
7478 #if TARGET_ABI_OPEN_VMS
7480 /* Worker function for TARGET_CAN_ELIMINATE. */
7482 static bool
7483 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7485 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7486 alpha_sa_size ();
7488 switch (alpha_procedure_type)
7490 case PT_NULL:
7491 /* NULL procedures have no frame of their own and we only
7492 know how to resolve from the current stack pointer. */
7493 return to == STACK_POINTER_REGNUM;
7495 case PT_REGISTER:
7496 case PT_STACK:
7497 /* We always eliminate except to the stack pointer if there is no
7498 usable frame pointer at hand. */
7499 return (to != STACK_POINTER_REGNUM
7500 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7503 gcc_unreachable ();
7506 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7507 designates the same location as FROM. */
7509 HOST_WIDE_INT
7510 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7512 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7513 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7514 on the proper computations and will need the register save area size
7515 in most cases. */
7517 HOST_WIDE_INT sa_size = alpha_sa_size ();
7519 /* PT_NULL procedures have no frame of their own and we only allow
7520 elimination to the stack pointer. This is the argument pointer and we
7521 resolve the soft frame pointer to that as well. */
7523 if (alpha_procedure_type == PT_NULL)
7524 return 0;
7526 /* For a PT_STACK procedure the frame layout looks as follows
7528 -----> decreasing addresses
7530 < size rounded up to 16 | likewise >
7531 --------------#------------------------------+++--------------+++-------#
7532 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7533 --------------#---------------------------------------------------------#
7534 ^ ^ ^ ^
7535 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7538 PT_REGISTER procedures are similar in that they may have a frame of their
7539 own. They have no regs-sa/pv/outgoing-args area.
7541 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7542 to STACK_PTR if need be. */
7545 HOST_WIDE_INT offset;
7546 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7548 switch (from)
7550 case FRAME_POINTER_REGNUM:
7551 offset = ALPHA_ROUND (sa_size + pv_save_size);
7552 break;
7553 case ARG_POINTER_REGNUM:
7554 offset = (ALPHA_ROUND (sa_size + pv_save_size
7555 + get_frame_size ()
7556 + crtl->args.pretend_args_size)
7557 - crtl->args.pretend_args_size);
7558 break;
7559 default:
7560 gcc_unreachable ();
7563 if (to == STACK_POINTER_REGNUM)
7564 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7566 return offset;
7570 #define COMMON_OBJECT "common_object"
7572 static tree
7573 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7574 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7575 bool *no_add_attrs ATTRIBUTE_UNUSED)
7577 tree decl = *node;
7578 gcc_assert (DECL_P (decl));
7580 DECL_COMMON (decl) = 1;
7581 return NULL_TREE;
7584 static const struct attribute_spec vms_attribute_table[] =
7586 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7587 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler },
7588 { NULL, 0, 0, false, false, false, NULL }
7591 void
7592 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7593 unsigned HOST_WIDE_INT size,
7594 unsigned int align)
7596 tree attr = DECL_ATTRIBUTES (decl);
7597 fprintf (file, "%s", COMMON_ASM_OP);
7598 assemble_name (file, name);
7599 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7600 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7601 fprintf (file, ",%u", align / BITS_PER_UNIT);
7602 if (attr)
7604 attr = lookup_attribute (COMMON_OBJECT, attr);
7605 if (attr)
7606 fprintf (file, ",%s",
7607 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7609 fputc ('\n', file);
7612 #undef COMMON_OBJECT
7614 #endif
7616 static int
7617 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7619 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7623 alpha_find_lo_sum_using_gp (rtx insn)
7625 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7628 static int
7629 alpha_does_function_need_gp (void)
7631 rtx insn;
7633 /* The GP being variable is an OSF abi thing. */
7634 if (! TARGET_ABI_OSF)
7635 return 0;
7637 /* We need the gp to load the address of __mcount. */
7638 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7639 return 1;
7641 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7642 if (cfun->is_thunk)
7643 return 1;
7645 /* The nonlocal receiver pattern assumes that the gp is valid for
7646 the nested function. Reasonable because it's almost always set
7647 correctly already. For the cases where that's wrong, make sure
7648 the nested function loads its gp on entry. */
7649 if (crtl->has_nonlocal_goto)
7650 return 1;
7652 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7653 Even if we are a static function, we still need to do this in case
7654 our address is taken and passed to something like qsort. */
7656 push_topmost_sequence ();
7657 insn = get_insns ();
7658 pop_topmost_sequence ();
7660 for (; insn; insn = NEXT_INSN (insn))
7661 if (NONDEBUG_INSN_P (insn)
7662 && ! JUMP_TABLE_DATA_P (insn)
7663 && GET_CODE (PATTERN (insn)) != USE
7664 && GET_CODE (PATTERN (insn)) != CLOBBER
7665 && get_attr_usegp (insn))
7666 return 1;
7668 return 0;
7672 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7673 sequences. */
7675 static rtx
7676 set_frame_related_p (void)
7678 rtx seq = get_insns ();
7679 rtx insn;
7681 end_sequence ();
7683 if (!seq)
7684 return NULL_RTX;
7686 if (INSN_P (seq))
7688 insn = seq;
7689 while (insn != NULL_RTX)
7691 RTX_FRAME_RELATED_P (insn) = 1;
7692 insn = NEXT_INSN (insn);
7694 seq = emit_insn (seq);
7696 else
7698 seq = emit_insn (seq);
7699 RTX_FRAME_RELATED_P (seq) = 1;
7701 return seq;
7704 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7706 /* Generates a store with the proper unwind info attached. VALUE is
7707 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7708 contains SP+FRAME_BIAS, and that is the unwind info that should be
7709 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7710 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7712 static void
7713 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7714 HOST_WIDE_INT base_ofs, rtx frame_reg)
7716 rtx addr, mem, insn;
7718 addr = plus_constant (base_reg, base_ofs);
7719 mem = gen_rtx_MEM (DImode, addr);
7720 set_mem_alias_set (mem, alpha_sr_alias_set);
7722 insn = emit_move_insn (mem, value);
7723 RTX_FRAME_RELATED_P (insn) = 1;
7725 if (frame_bias || value != frame_reg)
7727 if (frame_bias)
7729 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7730 mem = gen_rtx_MEM (DImode, addr);
7733 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7734 gen_rtx_SET (VOIDmode, mem, frame_reg));
7738 static void
7739 emit_frame_store (unsigned int regno, rtx base_reg,
7740 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7742 rtx reg = gen_rtx_REG (DImode, regno);
7743 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7746 /* Compute the frame size. SIZE is the size of the "naked" frame
7747 and SA_SIZE is the size of the register save area. */
7749 static HOST_WIDE_INT
7750 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7752 if (TARGET_ABI_OPEN_VMS)
7753 return ALPHA_ROUND (sa_size
7754 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7755 + size
7756 + crtl->args.pretend_args_size);
7757 else if (TARGET_ABI_UNICOSMK)
7758 /* We have to allocate space for the DSIB if we generate a frame. */
7759 return ALPHA_ROUND (sa_size
7760 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7761 + ALPHA_ROUND (size
7762 + crtl->outgoing_args_size);
7763 else
7764 return ALPHA_ROUND (crtl->outgoing_args_size)
7765 + sa_size
7766 + ALPHA_ROUND (size
7767 + crtl->args.pretend_args_size);
7770 /* Write function prologue. */
7772 /* On vms we have two kinds of functions:
7774 - stack frame (PROC_STACK)
7775 these are 'normal' functions with local vars and which are
7776 calling other functions
7777 - register frame (PROC_REGISTER)
7778 keeps all data in registers, needs no stack
7780 We must pass this to the assembler so it can generate the
7781 proper pdsc (procedure descriptor)
7782 This is done with the '.pdesc' command.
7784 On not-vms, we don't really differentiate between the two, as we can
7785 simply allocate stack without saving registers. */
7787 void
7788 alpha_expand_prologue (void)
7790 /* Registers to save. */
7791 unsigned long imask = 0;
7792 unsigned long fmask = 0;
7793 /* Stack space needed for pushing registers clobbered by us. */
7794 HOST_WIDE_INT sa_size;
7795 /* Complete stack size needed. */
7796 HOST_WIDE_INT frame_size;
7797 /* Probed stack size; it additionally includes the size of
7798 the "reserve region" if any. */
7799 HOST_WIDE_INT probed_size;
7800 /* Offset from base reg to register save area. */
7801 HOST_WIDE_INT reg_offset;
7802 rtx sa_reg;
7803 int i;
7805 sa_size = alpha_sa_size ();
7806 frame_size = compute_frame_size (get_frame_size (), sa_size);
7808 if (flag_stack_usage)
7809 current_function_static_stack_size = frame_size;
7811 if (TARGET_ABI_OPEN_VMS)
7812 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7813 else
7814 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7816 alpha_sa_mask (&imask, &fmask);
7818 /* Emit an insn to reload GP, if needed. */
7819 if (TARGET_ABI_OSF)
7821 alpha_function_needs_gp = alpha_does_function_need_gp ();
7822 if (alpha_function_needs_gp)
7823 emit_insn (gen_prologue_ldgp ());
7826 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7827 the call to mcount ourselves, rather than having the linker do it
7828 magically in response to -pg. Since _mcount has special linkage,
7829 don't represent the call as a call. */
7830 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7831 emit_insn (gen_prologue_mcount ());
7833 if (TARGET_ABI_UNICOSMK)
7834 unicosmk_gen_dsib (&imask);
7836 /* Adjust the stack by the frame size. If the frame size is > 4096
7837 bytes, we need to be sure we probe somewhere in the first and last
7838 4096 bytes (we can probably get away without the latter test) and
7839 every 8192 bytes in between. If the frame size is > 32768, we
7840 do this in a loop. Otherwise, we generate the explicit probe
7841 instructions.
7843 Note that we are only allowed to adjust sp once in the prologue. */
7845 probed_size = frame_size;
7846 if (flag_stack_check)
7847 probed_size += STACK_CHECK_PROTECT;
7849 if (probed_size <= 32768)
7851 if (probed_size > 4096)
7853 int probed;
7855 for (probed = 4096; probed < probed_size; probed += 8192)
7856 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7857 ? -probed + 64
7858 : -probed)));
7860 /* We only have to do this probe if we aren't saving registers or
7861 if we are probing beyond the frame because of -fstack-check. */
7862 if ((sa_size == 0 && probed_size > probed - 4096)
7863 || flag_stack_check)
7864 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7867 if (frame_size != 0)
7868 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7869 GEN_INT (TARGET_ABI_UNICOSMK
7870 ? -frame_size + 64
7871 : -frame_size))));
7873 else
7875 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7876 number of 8192 byte blocks to probe. We then probe each block
7877 in the loop and then set SP to the proper location. If the
7878 amount remaining is > 4096, we have to do one more probe if we
7879 are not saving any registers or if we are probing beyond the
7880 frame because of -fstack-check. */
7882 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7883 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7884 rtx ptr = gen_rtx_REG (DImode, 22);
7885 rtx count = gen_rtx_REG (DImode, 23);
7886 rtx seq;
7888 emit_move_insn (count, GEN_INT (blocks));
7889 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7890 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7892 /* Because of the difficulty in emitting a new basic block this
7893 late in the compilation, generate the loop as a single insn. */
7894 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7896 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7898 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7899 MEM_VOLATILE_P (last) = 1;
7900 emit_move_insn (last, const0_rtx);
7903 if (TARGET_ABI_WINDOWS_NT || flag_stack_check)
7905 /* For NT stack unwind (done by 'reverse execution'), it's
7906 not OK to take the result of a loop, even though the value
7907 is already in ptr, so we reload it via a single operation
7908 and subtract it to sp.
7910 Same if -fstack-check is specified, because the probed stack
7911 size is not equal to the frame size.
7913 Yes, that's correct -- we have to reload the whole constant
7914 into a temporary via ldah+lda then subtract from sp. */
7916 HOST_WIDE_INT lo, hi;
7917 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7918 hi = frame_size - lo;
7920 emit_move_insn (ptr, GEN_INT (hi));
7921 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7922 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7923 ptr));
7925 else
7927 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7928 GEN_INT (-leftover)));
7931 /* This alternative is special, because the DWARF code cannot
7932 possibly intuit through the loop above. So we invent this
7933 note it looks at instead. */
7934 RTX_FRAME_RELATED_P (seq) = 1;
7935 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7936 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7937 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7938 GEN_INT (TARGET_ABI_UNICOSMK
7939 ? -frame_size + 64
7940 : -frame_size))));
7943 if (!TARGET_ABI_UNICOSMK)
7945 HOST_WIDE_INT sa_bias = 0;
7947 /* Cope with very large offsets to the register save area. */
7948 sa_reg = stack_pointer_rtx;
7949 if (reg_offset + sa_size > 0x8000)
7951 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7952 rtx sa_bias_rtx;
7954 if (low + sa_size <= 0x8000)
7955 sa_bias = reg_offset - low, reg_offset = low;
7956 else
7957 sa_bias = reg_offset, reg_offset = 0;
7959 sa_reg = gen_rtx_REG (DImode, 24);
7960 sa_bias_rtx = GEN_INT (sa_bias);
7962 if (add_operand (sa_bias_rtx, DImode))
7963 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7964 else
7966 emit_move_insn (sa_reg, sa_bias_rtx);
7967 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7971 /* Save regs in stack order. Beginning with VMS PV. */
7972 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7973 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7975 /* Save register RA next. */
7976 if (imask & (1UL << REG_RA))
7978 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7979 imask &= ~(1UL << REG_RA);
7980 reg_offset += 8;
7983 /* Now save any other registers required to be saved. */
7984 for (i = 0; i < 31; i++)
7985 if (imask & (1UL << i))
7987 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7988 reg_offset += 8;
7991 for (i = 0; i < 31; i++)
7992 if (fmask & (1UL << i))
7994 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7995 reg_offset += 8;
7998 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8000 /* The standard frame on the T3E includes space for saving registers.
8001 We just have to use it. We don't have to save the return address and
8002 the old frame pointer here - they are saved in the DSIB. */
8004 reg_offset = -56;
8005 for (i = 9; i < 15; i++)
8006 if (imask & (1UL << i))
8008 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
8009 reg_offset -= 8;
8011 for (i = 2; i < 10; i++)
8012 if (fmask & (1UL << i))
8014 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
8015 reg_offset -= 8;
8019 if (TARGET_ABI_OPEN_VMS)
8021 /* Register frame procedures save the fp. */
8022 if (alpha_procedure_type == PT_REGISTER)
8024 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
8025 hard_frame_pointer_rtx);
8026 add_reg_note (insn, REG_CFA_REGISTER, NULL);
8027 RTX_FRAME_RELATED_P (insn) = 1;
8030 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
8031 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
8032 gen_rtx_REG (DImode, REG_PV)));
8034 if (alpha_procedure_type != PT_NULL
8035 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8036 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8038 /* If we have to allocate space for outgoing args, do it now. */
8039 if (crtl->outgoing_args_size != 0)
8041 rtx seq
8042 = emit_move_insn (stack_pointer_rtx,
8043 plus_constant
8044 (hard_frame_pointer_rtx,
8045 - (ALPHA_ROUND
8046 (crtl->outgoing_args_size))));
8048 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
8049 if ! frame_pointer_needed. Setting the bit will change the CFA
8050 computation rule to use sp again, which would be wrong if we had
8051 frame_pointer_needed, as this means sp might move unpredictably
8052 later on.
8054 Also, note that
8055 frame_pointer_needed
8056 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8058 crtl->outgoing_args_size != 0
8059 => alpha_procedure_type != PT_NULL,
8061 so when we are not setting the bit here, we are guaranteed to
8062 have emitted an FRP frame pointer update just before. */
8063 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
8066 else if (!TARGET_ABI_UNICOSMK)
8068 /* If we need a frame pointer, set it from the stack pointer. */
8069 if (frame_pointer_needed)
8071 if (TARGET_CAN_FAULT_IN_PROLOGUE)
8072 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8073 else
8074 /* This must always be the last instruction in the
8075 prologue, thus we emit a special move + clobber. */
8076 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8077 stack_pointer_rtx, sa_reg)));
8081 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8082 the prologue, for exception handling reasons, we cannot do this for
8083 any insn that might fault. We could prevent this for mems with a
8084 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8085 have to prevent all such scheduling with a blockage.
8087 Linux, on the other hand, never bothered to implement OSF/1's
8088 exception handling, and so doesn't care about such things. Anyone
8089 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8091 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8092 emit_insn (gen_blockage ());
8095 /* Count the number of .file directives, so that .loc is up to date. */
8096 int num_source_filenames = 0;
8098 /* Output the textual info surrounding the prologue. */
8100 void
8101 alpha_start_function (FILE *file, const char *fnname,
8102 tree decl ATTRIBUTE_UNUSED)
8104 unsigned long imask = 0;
8105 unsigned long fmask = 0;
8106 /* Stack space needed for pushing registers clobbered by us. */
8107 HOST_WIDE_INT sa_size;
8108 /* Complete stack size needed. */
8109 unsigned HOST_WIDE_INT frame_size;
8110 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
8111 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
8112 ? 524288
8113 : 1UL << 31;
8114 /* Offset from base reg to register save area. */
8115 HOST_WIDE_INT reg_offset;
8116 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8117 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8118 int i;
8120 /* Don't emit an extern directive for functions defined in the same file. */
8121 if (TARGET_ABI_UNICOSMK)
8123 tree name_tree;
8124 name_tree = get_identifier (fnname);
8125 TREE_ASM_WRITTEN (name_tree) = 1;
8128 #if TARGET_ABI_OPEN_VMS
8129 if (vms_debug_main
8130 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
8132 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
8133 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
8134 switch_to_section (text_section);
8135 vms_debug_main = NULL;
8137 #endif
8139 alpha_fnname = fnname;
8140 sa_size = alpha_sa_size ();
8141 frame_size = compute_frame_size (get_frame_size (), sa_size);
8143 if (TARGET_ABI_OPEN_VMS)
8144 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8145 else
8146 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8148 alpha_sa_mask (&imask, &fmask);
8150 /* Ecoff can handle multiple .file directives, so put out file and lineno.
8151 We have to do that before the .ent directive as we cannot switch
8152 files within procedures with native ecoff because line numbers are
8153 linked to procedure descriptors.
8154 Outputting the lineno helps debugging of one line functions as they
8155 would otherwise get no line number at all. Please note that we would
8156 like to put out last_linenum from final.c, but it is not accessible. */
8158 if (write_symbols == SDB_DEBUG)
8160 #ifdef ASM_OUTPUT_SOURCE_FILENAME
8161 ASM_OUTPUT_SOURCE_FILENAME (file,
8162 DECL_SOURCE_FILE (current_function_decl));
8163 #endif
8164 #ifdef SDB_OUTPUT_SOURCE_LINE
8165 if (debug_info_level != DINFO_LEVEL_TERSE)
8166 SDB_OUTPUT_SOURCE_LINE (file,
8167 DECL_SOURCE_LINE (current_function_decl));
8168 #endif
8171 /* Issue function start and label. */
8172 if (TARGET_ABI_OPEN_VMS
8173 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
8175 fputs ("\t.ent ", file);
8176 assemble_name (file, fnname);
8177 putc ('\n', file);
8179 /* If the function needs GP, we'll write the "..ng" label there.
8180 Otherwise, do it here. */
8181 if (TARGET_ABI_OSF
8182 && ! alpha_function_needs_gp
8183 && ! cfun->is_thunk)
8185 putc ('$', file);
8186 assemble_name (file, fnname);
8187 fputs ("..ng:\n", file);
8190 /* Nested functions on VMS that are potentially called via trampoline
8191 get a special transfer entry point that loads the called functions
8192 procedure descriptor and static chain. */
8193 if (TARGET_ABI_OPEN_VMS
8194 && !TREE_PUBLIC (decl)
8195 && DECL_CONTEXT (decl)
8196 && !TYPE_P (DECL_CONTEXT (decl)))
8198 strcpy (tramp_label, fnname);
8199 strcat (tramp_label, "..tr");
8200 ASM_OUTPUT_LABEL (file, tramp_label);
8201 fprintf (file, "\tldq $1,24($27)\n");
8202 fprintf (file, "\tldq $27,16($27)\n");
8205 strcpy (entry_label, fnname);
8206 if (TARGET_ABI_OPEN_VMS)
8207 strcat (entry_label, "..en");
8209 /* For public functions, the label must be globalized by appending an
8210 additional colon. */
8211 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
8212 strcat (entry_label, ":");
8214 ASM_OUTPUT_LABEL (file, entry_label);
8215 inside_function = TRUE;
8217 if (TARGET_ABI_OPEN_VMS)
8218 fprintf (file, "\t.base $%d\n", vms_base_regno);
8220 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
8221 && !flag_inhibit_size_directive)
8223 /* Set flags in procedure descriptor to request IEEE-conformant
8224 math-library routines. The value we set it to is PDSC_EXC_IEEE
8225 (/usr/include/pdsc.h). */
8226 fputs ("\t.eflag 48\n", file);
8229 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8230 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8231 alpha_arg_offset = -frame_size + 48;
8233 /* Describe our frame. If the frame size is larger than an integer,
8234 print it as zero to avoid an assembler error. We won't be
8235 properly describing such a frame, but that's the best we can do. */
8236 if (TARGET_ABI_UNICOSMK)
8238 else if (TARGET_ABI_OPEN_VMS)
8239 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8240 HOST_WIDE_INT_PRINT_DEC "\n",
8241 vms_unwind_regno,
8242 frame_size >= (1UL << 31) ? 0 : frame_size,
8243 reg_offset);
8244 else if (!flag_inhibit_size_directive)
8245 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8246 (frame_pointer_needed
8247 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8248 frame_size >= max_frame_size ? 0 : frame_size,
8249 crtl->args.pretend_args_size);
8251 /* Describe which registers were spilled. */
8252 if (TARGET_ABI_UNICOSMK)
8254 else if (TARGET_ABI_OPEN_VMS)
8256 if (imask)
8257 /* ??? Does VMS care if mask contains ra? The old code didn't
8258 set it, so I don't here. */
8259 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8260 if (fmask)
8261 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8262 if (alpha_procedure_type == PT_REGISTER)
8263 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8265 else if (!flag_inhibit_size_directive)
8267 if (imask)
8269 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8270 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8272 for (i = 0; i < 32; ++i)
8273 if (imask & (1UL << i))
8274 reg_offset += 8;
8277 if (fmask)
8278 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8279 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8282 #if TARGET_ABI_OPEN_VMS
8283 /* If a user condition handler has been installed at some point, emit
8284 the procedure descriptor bits to point the Condition Handling Facility
8285 at the indirection wrapper, and state the fp offset at which the user
8286 handler may be found. */
8287 if (cfun->machine->uses_condition_handler)
8289 fprintf (file, "\t.handler __gcc_shell_handler\n");
8290 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8293 /* Ifdef'ed cause link_section are only available then. */
8294 switch_to_section (readonly_data_section);
8295 fprintf (file, "\t.align 3\n");
8296 assemble_name (file, fnname); fputs ("..na:\n", file);
8297 fputs ("\t.ascii \"", file);
8298 assemble_name (file, fnname);
8299 fputs ("\\0\"\n", file);
8300 alpha_need_linkage (fnname, 1);
8301 switch_to_section (text_section);
8302 #endif
8305 /* Emit the .prologue note at the scheduled end of the prologue. */
8307 static void
8308 alpha_output_function_end_prologue (FILE *file)
8310 if (TARGET_ABI_UNICOSMK)
8312 else if (TARGET_ABI_OPEN_VMS)
8313 fputs ("\t.prologue\n", file);
8314 else if (TARGET_ABI_WINDOWS_NT)
8315 fputs ("\t.prologue 0\n", file);
8316 else if (!flag_inhibit_size_directive)
8317 fprintf (file, "\t.prologue %d\n",
8318 alpha_function_needs_gp || cfun->is_thunk);
8321 /* Write function epilogue. */
8323 void
8324 alpha_expand_epilogue (void)
8326 /* Registers to save. */
8327 unsigned long imask = 0;
8328 unsigned long fmask = 0;
8329 /* Stack space needed for pushing registers clobbered by us. */
8330 HOST_WIDE_INT sa_size;
8331 /* Complete stack size needed. */
8332 HOST_WIDE_INT frame_size;
8333 /* Offset from base reg to register save area. */
8334 HOST_WIDE_INT reg_offset;
8335 int fp_is_frame_pointer, fp_offset;
8336 rtx sa_reg, sa_reg_exp = NULL;
8337 rtx sp_adj1, sp_adj2, mem, reg, insn;
8338 rtx eh_ofs;
8339 rtx cfa_restores = NULL_RTX;
8340 int i;
8342 sa_size = alpha_sa_size ();
8343 frame_size = compute_frame_size (get_frame_size (), sa_size);
8345 if (TARGET_ABI_OPEN_VMS)
8347 if (alpha_procedure_type == PT_STACK)
8348 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8349 else
8350 reg_offset = 0;
8352 else
8353 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8355 alpha_sa_mask (&imask, &fmask);
8357 fp_is_frame_pointer
8358 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8359 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8360 fp_offset = 0;
8361 sa_reg = stack_pointer_rtx;
8363 if (crtl->calls_eh_return)
8364 eh_ofs = EH_RETURN_STACKADJ_RTX;
8365 else
8366 eh_ofs = NULL_RTX;
8368 if (!TARGET_ABI_UNICOSMK && sa_size)
8370 /* If we have a frame pointer, restore SP from it. */
8371 if ((TARGET_ABI_OPEN_VMS
8372 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8373 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8374 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8376 /* Cope with very large offsets to the register save area. */
8377 if (reg_offset + sa_size > 0x8000)
8379 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8380 HOST_WIDE_INT bias;
8382 if (low + sa_size <= 0x8000)
8383 bias = reg_offset - low, reg_offset = low;
8384 else
8385 bias = reg_offset, reg_offset = 0;
8387 sa_reg = gen_rtx_REG (DImode, 22);
8388 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8390 emit_move_insn (sa_reg, sa_reg_exp);
8393 /* Restore registers in order, excepting a true frame pointer. */
8395 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8396 if (! eh_ofs)
8397 set_mem_alias_set (mem, alpha_sr_alias_set);
8398 reg = gen_rtx_REG (DImode, REG_RA);
8399 emit_move_insn (reg, mem);
8400 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8402 reg_offset += 8;
8403 imask &= ~(1UL << REG_RA);
8405 for (i = 0; i < 31; ++i)
8406 if (imask & (1UL << i))
8408 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8409 fp_offset = reg_offset;
8410 else
8412 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8413 set_mem_alias_set (mem, alpha_sr_alias_set);
8414 reg = gen_rtx_REG (DImode, i);
8415 emit_move_insn (reg, mem);
8416 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8417 cfa_restores);
8419 reg_offset += 8;
8422 for (i = 0; i < 31; ++i)
8423 if (fmask & (1UL << i))
8425 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8426 set_mem_alias_set (mem, alpha_sr_alias_set);
8427 reg = gen_rtx_REG (DFmode, i+32);
8428 emit_move_insn (reg, mem);
8429 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8430 reg_offset += 8;
8433 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8435 /* Restore callee-saved general-purpose registers. */
8437 reg_offset = -56;
8439 for (i = 9; i < 15; i++)
8440 if (imask & (1UL << i))
8442 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8443 reg_offset));
8444 set_mem_alias_set (mem, alpha_sr_alias_set);
8445 reg = gen_rtx_REG (DImode, i);
8446 emit_move_insn (reg, mem);
8447 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8448 reg_offset -= 8;
8451 for (i = 2; i < 10; i++)
8452 if (fmask & (1UL << i))
8454 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8455 reg_offset));
8456 set_mem_alias_set (mem, alpha_sr_alias_set);
8457 reg = gen_rtx_REG (DFmode, i+32);
8458 emit_move_insn (reg, mem);
8459 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8460 reg_offset -= 8;
8463 /* Restore the return address from the DSIB. */
8464 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8465 set_mem_alias_set (mem, alpha_sr_alias_set);
8466 reg = gen_rtx_REG (DImode, REG_RA);
8467 emit_move_insn (reg, mem);
8468 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8471 if (frame_size || eh_ofs)
8473 sp_adj1 = stack_pointer_rtx;
8475 if (eh_ofs)
8477 sp_adj1 = gen_rtx_REG (DImode, 23);
8478 emit_move_insn (sp_adj1,
8479 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8482 /* If the stack size is large, begin computation into a temporary
8483 register so as not to interfere with a potential fp restore,
8484 which must be consecutive with an SP restore. */
8485 if (frame_size < 32768
8486 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8487 sp_adj2 = GEN_INT (frame_size);
8488 else if (TARGET_ABI_UNICOSMK)
8490 sp_adj1 = gen_rtx_REG (DImode, 23);
8491 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8492 sp_adj2 = const0_rtx;
8494 else if (frame_size < 0x40007fffL)
8496 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8498 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8499 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8500 sp_adj1 = sa_reg;
8501 else
8503 sp_adj1 = gen_rtx_REG (DImode, 23);
8504 emit_move_insn (sp_adj1, sp_adj2);
8506 sp_adj2 = GEN_INT (low);
8508 else
8510 rtx tmp = gen_rtx_REG (DImode, 23);
8511 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8512 if (!sp_adj2)
8514 /* We can't drop new things to memory this late, afaik,
8515 so build it up by pieces. */
8516 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8517 -(frame_size < 0));
8518 gcc_assert (sp_adj2);
8522 /* From now on, things must be in order. So emit blockages. */
8524 /* Restore the frame pointer. */
8525 if (TARGET_ABI_UNICOSMK)
8527 emit_insn (gen_blockage ());
8528 mem = gen_rtx_MEM (DImode,
8529 plus_constant (hard_frame_pointer_rtx, -16));
8530 set_mem_alias_set (mem, alpha_sr_alias_set);
8531 emit_move_insn (hard_frame_pointer_rtx, mem);
8532 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8533 hard_frame_pointer_rtx, cfa_restores);
8535 else if (fp_is_frame_pointer)
8537 emit_insn (gen_blockage ());
8538 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8539 set_mem_alias_set (mem, alpha_sr_alias_set);
8540 emit_move_insn (hard_frame_pointer_rtx, mem);
8541 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8542 hard_frame_pointer_rtx, cfa_restores);
8544 else if (TARGET_ABI_OPEN_VMS)
8546 emit_insn (gen_blockage ());
8547 emit_move_insn (hard_frame_pointer_rtx,
8548 gen_rtx_REG (DImode, vms_save_fp_regno));
8549 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8550 hard_frame_pointer_rtx, cfa_restores);
8553 /* Restore the stack pointer. */
8554 emit_insn (gen_blockage ());
8555 if (sp_adj2 == const0_rtx)
8556 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8557 else
8558 insn = emit_move_insn (stack_pointer_rtx,
8559 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8560 REG_NOTES (insn) = cfa_restores;
8561 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8562 RTX_FRAME_RELATED_P (insn) = 1;
8564 else
8566 gcc_assert (cfa_restores == NULL);
8568 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8570 emit_insn (gen_blockage ());
8571 insn = emit_move_insn (hard_frame_pointer_rtx,
8572 gen_rtx_REG (DImode, vms_save_fp_regno));
8573 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8574 RTX_FRAME_RELATED_P (insn) = 1;
8576 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8578 /* Decrement the frame pointer if the function does not have a
8579 frame. */
8580 emit_insn (gen_blockage ());
8581 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8582 hard_frame_pointer_rtx, constm1_rtx));
8587 /* Output the rest of the textual info surrounding the epilogue. */
8589 void
8590 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8592 rtx insn;
8594 /* We output a nop after noreturn calls at the very end of the function to
8595 ensure that the return address always remains in the caller's code range,
8596 as not doing so might confuse unwinding engines. */
8597 insn = get_last_insn ();
8598 if (!INSN_P (insn))
8599 insn = prev_active_insn (insn);
8600 if (insn && CALL_P (insn))
8601 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8603 #if TARGET_ABI_OPEN_VMS
8604 alpha_write_linkage (file, fnname, decl);
8605 #endif
8607 /* End the function. */
8608 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8610 fputs ("\t.end ", file);
8611 assemble_name (file, fnname);
8612 putc ('\n', file);
8614 inside_function = FALSE;
8616 /* Output jump tables and the static subroutine information block. */
8617 if (TARGET_ABI_UNICOSMK)
8619 unicosmk_output_ssib (file, fnname);
8620 unicosmk_output_deferred_case_vectors (file);
8624 #if TARGET_ABI_OPEN_VMS
8625 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8627 #ifdef DO_CRTL_NAMES
8628 DO_CRTL_NAMES;
8629 #endif
8631 #endif
8633 #if TARGET_ABI_OSF
8634 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8636 In order to avoid the hordes of differences between generated code
8637 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8638 lots of code loading up large constants, generate rtl and emit it
8639 instead of going straight to text.
8641 Not sure why this idea hasn't been explored before... */
8643 static void
8644 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8645 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8646 tree function)
8648 HOST_WIDE_INT hi, lo;
8649 rtx this_rtx, insn, funexp;
8651 /* We always require a valid GP. */
8652 emit_insn (gen_prologue_ldgp ());
8653 emit_note (NOTE_INSN_PROLOGUE_END);
8655 /* Find the "this" pointer. If the function returns a structure,
8656 the structure return pointer is in $16. */
8657 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8658 this_rtx = gen_rtx_REG (Pmode, 17);
8659 else
8660 this_rtx = gen_rtx_REG (Pmode, 16);
8662 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8663 entire constant for the add. */
8664 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8665 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8666 if (hi + lo == delta)
8668 if (hi)
8669 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8670 if (lo)
8671 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8673 else
8675 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8676 delta, -(delta < 0));
8677 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8680 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8681 if (vcall_offset)
8683 rtx tmp, tmp2;
8685 tmp = gen_rtx_REG (Pmode, 0);
8686 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8688 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8689 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8690 if (hi + lo == vcall_offset)
8692 if (hi)
8693 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8695 else
8697 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8698 vcall_offset, -(vcall_offset < 0));
8699 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8700 lo = 0;
8702 if (lo)
8703 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8704 else
8705 tmp2 = tmp;
8706 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8708 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8711 /* Generate a tail call to the target function. */
8712 if (! TREE_USED (function))
8714 assemble_external (function);
8715 TREE_USED (function) = 1;
8717 funexp = XEXP (DECL_RTL (function), 0);
8718 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8719 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8720 SIBLING_CALL_P (insn) = 1;
8722 /* Run just enough of rest_of_compilation to get the insns emitted.
8723 There's not really enough bulk here to make other passes such as
8724 instruction scheduling worth while. Note that use_thunk calls
8725 assemble_start_function and assemble_end_function. */
8726 insn = get_insns ();
8727 insn_locators_alloc ();
8728 shorten_branches (insn);
8729 final_start_function (insn, file, 1);
8730 final (insn, file, 1);
8731 final_end_function ();
8733 #endif /* TARGET_ABI_OSF */
8735 /* Debugging support. */
8737 #include "gstab.h"
8739 /* Count the number of sdb related labels are generated (to find block
8740 start and end boundaries). */
8742 int sdb_label_count = 0;
8744 /* Name of the file containing the current function. */
8746 static const char *current_function_file = "";
8748 /* Offsets to alpha virtual arg/local debugging pointers. */
8750 long alpha_arg_offset;
8751 long alpha_auto_offset;
8753 /* Emit a new filename to a stream. */
8755 void
8756 alpha_output_filename (FILE *stream, const char *name)
8758 static int first_time = TRUE;
8760 if (first_time)
8762 first_time = FALSE;
8763 ++num_source_filenames;
8764 current_function_file = name;
8765 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8766 output_quoted_string (stream, name);
8767 fprintf (stream, "\n");
8768 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8769 fprintf (stream, "\t#@stabs\n");
8772 else if (write_symbols == DBX_DEBUG)
8773 /* dbxout.c will emit an appropriate .stabs directive. */
8774 return;
8776 else if (name != current_function_file
8777 && strcmp (name, current_function_file) != 0)
8779 if (inside_function && ! TARGET_GAS)
8780 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8781 else
8783 ++num_source_filenames;
8784 current_function_file = name;
8785 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8788 output_quoted_string (stream, name);
8789 fprintf (stream, "\n");
8793 /* Structure to show the current status of registers and memory. */
8795 struct shadow_summary
8797 struct {
8798 unsigned int i : 31; /* Mask of int regs */
8799 unsigned int fp : 31; /* Mask of fp regs */
8800 unsigned int mem : 1; /* mem == imem | fpmem */
8801 } used, defd;
8804 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8805 to the summary structure. SET is nonzero if the insn is setting the
8806 object, otherwise zero. */
8808 static void
8809 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8811 const char *format_ptr;
8812 int i, j;
8814 if (x == 0)
8815 return;
8817 switch (GET_CODE (x))
8819 /* ??? Note that this case would be incorrect if the Alpha had a
8820 ZERO_EXTRACT in SET_DEST. */
8821 case SET:
8822 summarize_insn (SET_SRC (x), sum, 0);
8823 summarize_insn (SET_DEST (x), sum, 1);
8824 break;
8826 case CLOBBER:
8827 summarize_insn (XEXP (x, 0), sum, 1);
8828 break;
8830 case USE:
8831 summarize_insn (XEXP (x, 0), sum, 0);
8832 break;
8834 case ASM_OPERANDS:
8835 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8836 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8837 break;
8839 case PARALLEL:
8840 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8841 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8842 break;
8844 case SUBREG:
8845 summarize_insn (SUBREG_REG (x), sum, 0);
8846 break;
8848 case REG:
8850 int regno = REGNO (x);
8851 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8853 if (regno == 31 || regno == 63)
8854 break;
8856 if (set)
8858 if (regno < 32)
8859 sum->defd.i |= mask;
8860 else
8861 sum->defd.fp |= mask;
8863 else
8865 if (regno < 32)
8866 sum->used.i |= mask;
8867 else
8868 sum->used.fp |= mask;
8871 break;
8873 case MEM:
8874 if (set)
8875 sum->defd.mem = 1;
8876 else
8877 sum->used.mem = 1;
8879 /* Find the regs used in memory address computation: */
8880 summarize_insn (XEXP (x, 0), sum, 0);
8881 break;
8883 case CONST_INT: case CONST_DOUBLE:
8884 case SYMBOL_REF: case LABEL_REF: case CONST:
8885 case SCRATCH: case ASM_INPUT:
8886 break;
8888 /* Handle common unary and binary ops for efficiency. */
8889 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8890 case MOD: case UDIV: case UMOD: case AND: case IOR:
8891 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8892 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8893 case NE: case EQ: case GE: case GT: case LE:
8894 case LT: case GEU: case GTU: case LEU: case LTU:
8895 summarize_insn (XEXP (x, 0), sum, 0);
8896 summarize_insn (XEXP (x, 1), sum, 0);
8897 break;
8899 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8900 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8901 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8902 case SQRT: case FFS:
8903 summarize_insn (XEXP (x, 0), sum, 0);
8904 break;
8906 default:
8907 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8908 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8909 switch (format_ptr[i])
8911 case 'e':
8912 summarize_insn (XEXP (x, i), sum, 0);
8913 break;
8915 case 'E':
8916 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8917 summarize_insn (XVECEXP (x, i, j), sum, 0);
8918 break;
8920 case 'i':
8921 break;
8923 default:
8924 gcc_unreachable ();
8929 /* Ensure a sufficient number of `trapb' insns are in the code when
8930 the user requests code with a trap precision of functions or
8931 instructions.
8933 In naive mode, when the user requests a trap-precision of
8934 "instruction", a trapb is needed after every instruction that may
8935 generate a trap. This ensures that the code is resumption safe but
8936 it is also slow.
8938 When optimizations are turned on, we delay issuing a trapb as long
8939 as possible. In this context, a trap shadow is the sequence of
8940 instructions that starts with a (potentially) trap generating
8941 instruction and extends to the next trapb or call_pal instruction
8942 (but GCC never generates call_pal by itself). We can delay (and
8943 therefore sometimes omit) a trapb subject to the following
8944 conditions:
8946 (a) On entry to the trap shadow, if any Alpha register or memory
8947 location contains a value that is used as an operand value by some
8948 instruction in the trap shadow (live on entry), then no instruction
8949 in the trap shadow may modify the register or memory location.
8951 (b) Within the trap shadow, the computation of the base register
8952 for a memory load or store instruction may not involve using the
8953 result of an instruction that might generate an UNPREDICTABLE
8954 result.
8956 (c) Within the trap shadow, no register may be used more than once
8957 as a destination register. (This is to make life easier for the
8958 trap-handler.)
8960 (d) The trap shadow may not include any branch instructions. */
8962 static void
8963 alpha_handle_trap_shadows (void)
8965 struct shadow_summary shadow;
8966 int trap_pending, exception_nesting;
8967 rtx i, n;
8969 trap_pending = 0;
8970 exception_nesting = 0;
8971 shadow.used.i = 0;
8972 shadow.used.fp = 0;
8973 shadow.used.mem = 0;
8974 shadow.defd = shadow.used;
8976 for (i = get_insns (); i ; i = NEXT_INSN (i))
8978 if (NOTE_P (i))
8980 switch (NOTE_KIND (i))
8982 case NOTE_INSN_EH_REGION_BEG:
8983 exception_nesting++;
8984 if (trap_pending)
8985 goto close_shadow;
8986 break;
8988 case NOTE_INSN_EH_REGION_END:
8989 exception_nesting--;
8990 if (trap_pending)
8991 goto close_shadow;
8992 break;
8994 case NOTE_INSN_EPILOGUE_BEG:
8995 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8996 goto close_shadow;
8997 break;
9000 else if (trap_pending)
9002 if (alpha_tp == ALPHA_TP_FUNC)
9004 if (JUMP_P (i)
9005 && GET_CODE (PATTERN (i)) == RETURN)
9006 goto close_shadow;
9008 else if (alpha_tp == ALPHA_TP_INSN)
9010 if (optimize > 0)
9012 struct shadow_summary sum;
9014 sum.used.i = 0;
9015 sum.used.fp = 0;
9016 sum.used.mem = 0;
9017 sum.defd = sum.used;
9019 switch (GET_CODE (i))
9021 case INSN:
9022 /* Annoyingly, get_attr_trap will die on these. */
9023 if (GET_CODE (PATTERN (i)) == USE
9024 || GET_CODE (PATTERN (i)) == CLOBBER)
9025 break;
9027 summarize_insn (PATTERN (i), &sum, 0);
9029 if ((sum.defd.i & shadow.defd.i)
9030 || (sum.defd.fp & shadow.defd.fp))
9032 /* (c) would be violated */
9033 goto close_shadow;
9036 /* Combine shadow with summary of current insn: */
9037 shadow.used.i |= sum.used.i;
9038 shadow.used.fp |= sum.used.fp;
9039 shadow.used.mem |= sum.used.mem;
9040 shadow.defd.i |= sum.defd.i;
9041 shadow.defd.fp |= sum.defd.fp;
9042 shadow.defd.mem |= sum.defd.mem;
9044 if ((sum.defd.i & shadow.used.i)
9045 || (sum.defd.fp & shadow.used.fp)
9046 || (sum.defd.mem & shadow.used.mem))
9048 /* (a) would be violated (also takes care of (b)) */
9049 gcc_assert (get_attr_trap (i) != TRAP_YES
9050 || (!(sum.defd.i & sum.used.i)
9051 && !(sum.defd.fp & sum.used.fp)));
9053 goto close_shadow;
9055 break;
9057 case JUMP_INSN:
9058 case CALL_INSN:
9059 case CODE_LABEL:
9060 goto close_shadow;
9062 default:
9063 gcc_unreachable ();
9066 else
9068 close_shadow:
9069 n = emit_insn_before (gen_trapb (), i);
9070 PUT_MODE (n, TImode);
9071 PUT_MODE (i, TImode);
9072 trap_pending = 0;
9073 shadow.used.i = 0;
9074 shadow.used.fp = 0;
9075 shadow.used.mem = 0;
9076 shadow.defd = shadow.used;
9081 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
9082 && NONJUMP_INSN_P (i)
9083 && GET_CODE (PATTERN (i)) != USE
9084 && GET_CODE (PATTERN (i)) != CLOBBER
9085 && get_attr_trap (i) == TRAP_YES)
9087 if (optimize && !trap_pending)
9088 summarize_insn (PATTERN (i), &shadow, 0);
9089 trap_pending = 1;
9094 /* Alpha can only issue instruction groups simultaneously if they are
9095 suitably aligned. This is very processor-specific. */
9096 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
9097 that are marked "fake". These instructions do not exist on that target,
9098 but it is possible to see these insns with deranged combinations of
9099 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
9100 choose a result at random. */
9102 enum alphaev4_pipe {
9103 EV4_STOP = 0,
9104 EV4_IB0 = 1,
9105 EV4_IB1 = 2,
9106 EV4_IBX = 4
9109 enum alphaev5_pipe {
9110 EV5_STOP = 0,
9111 EV5_NONE = 1,
9112 EV5_E01 = 2,
9113 EV5_E0 = 4,
9114 EV5_E1 = 8,
9115 EV5_FAM = 16,
9116 EV5_FA = 32,
9117 EV5_FM = 64
9120 static enum alphaev4_pipe
9121 alphaev4_insn_pipe (rtx insn)
9123 if (recog_memoized (insn) < 0)
9124 return EV4_STOP;
9125 if (get_attr_length (insn) != 4)
9126 return EV4_STOP;
9128 switch (get_attr_type (insn))
9130 case TYPE_ILD:
9131 case TYPE_LDSYM:
9132 case TYPE_FLD:
9133 case TYPE_LD_L:
9134 return EV4_IBX;
9136 case TYPE_IADD:
9137 case TYPE_ILOG:
9138 case TYPE_ICMOV:
9139 case TYPE_ICMP:
9140 case TYPE_FST:
9141 case TYPE_SHIFT:
9142 case TYPE_IMUL:
9143 case TYPE_FBR:
9144 case TYPE_MVI: /* fake */
9145 return EV4_IB0;
9147 case TYPE_IST:
9148 case TYPE_MISC:
9149 case TYPE_IBR:
9150 case TYPE_JSR:
9151 case TYPE_CALLPAL:
9152 case TYPE_FCPYS:
9153 case TYPE_FCMOV:
9154 case TYPE_FADD:
9155 case TYPE_FDIV:
9156 case TYPE_FMUL:
9157 case TYPE_ST_C:
9158 case TYPE_MB:
9159 case TYPE_FSQRT: /* fake */
9160 case TYPE_FTOI: /* fake */
9161 case TYPE_ITOF: /* fake */
9162 return EV4_IB1;
9164 default:
9165 gcc_unreachable ();
9169 static enum alphaev5_pipe
9170 alphaev5_insn_pipe (rtx insn)
9172 if (recog_memoized (insn) < 0)
9173 return EV5_STOP;
9174 if (get_attr_length (insn) != 4)
9175 return EV5_STOP;
9177 switch (get_attr_type (insn))
9179 case TYPE_ILD:
9180 case TYPE_FLD:
9181 case TYPE_LDSYM:
9182 case TYPE_IADD:
9183 case TYPE_ILOG:
9184 case TYPE_ICMOV:
9185 case TYPE_ICMP:
9186 return EV5_E01;
9188 case TYPE_IST:
9189 case TYPE_FST:
9190 case TYPE_SHIFT:
9191 case TYPE_IMUL:
9192 case TYPE_MISC:
9193 case TYPE_MVI:
9194 case TYPE_LD_L:
9195 case TYPE_ST_C:
9196 case TYPE_MB:
9197 case TYPE_FTOI: /* fake */
9198 case TYPE_ITOF: /* fake */
9199 return EV5_E0;
9201 case TYPE_IBR:
9202 case TYPE_JSR:
9203 case TYPE_CALLPAL:
9204 return EV5_E1;
9206 case TYPE_FCPYS:
9207 return EV5_FAM;
9209 case TYPE_FBR:
9210 case TYPE_FCMOV:
9211 case TYPE_FADD:
9212 case TYPE_FDIV:
9213 case TYPE_FSQRT: /* fake */
9214 return EV5_FA;
9216 case TYPE_FMUL:
9217 return EV5_FM;
9219 default:
9220 gcc_unreachable ();
9224 /* IN_USE is a mask of the slots currently filled within the insn group.
9225 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9226 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9228 LEN is, of course, the length of the group in bytes. */
9230 static rtx
9231 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9233 int len, in_use;
9235 len = in_use = 0;
9237 if (! INSN_P (insn)
9238 || GET_CODE (PATTERN (insn)) == CLOBBER
9239 || GET_CODE (PATTERN (insn)) == USE)
9240 goto next_and_done;
9242 while (1)
9244 enum alphaev4_pipe pipe;
9246 pipe = alphaev4_insn_pipe (insn);
9247 switch (pipe)
9249 case EV4_STOP:
9250 /* Force complex instructions to start new groups. */
9251 if (in_use)
9252 goto done;
9254 /* If this is a completely unrecognized insn, it's an asm.
9255 We don't know how long it is, so record length as -1 to
9256 signal a needed realignment. */
9257 if (recog_memoized (insn) < 0)
9258 len = -1;
9259 else
9260 len = get_attr_length (insn);
9261 goto next_and_done;
9263 case EV4_IBX:
9264 if (in_use & EV4_IB0)
9266 if (in_use & EV4_IB1)
9267 goto done;
9268 in_use |= EV4_IB1;
9270 else
9271 in_use |= EV4_IB0 | EV4_IBX;
9272 break;
9274 case EV4_IB0:
9275 if (in_use & EV4_IB0)
9277 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9278 goto done;
9279 in_use |= EV4_IB1;
9281 in_use |= EV4_IB0;
9282 break;
9284 case EV4_IB1:
9285 if (in_use & EV4_IB1)
9286 goto done;
9287 in_use |= EV4_IB1;
9288 break;
9290 default:
9291 gcc_unreachable ();
9293 len += 4;
9295 /* Haifa doesn't do well scheduling branches. */
9296 if (JUMP_P (insn))
9297 goto next_and_done;
9299 next:
9300 insn = next_nonnote_insn (insn);
9302 if (!insn || ! INSN_P (insn))
9303 goto done;
9305 /* Let Haifa tell us where it thinks insn group boundaries are. */
9306 if (GET_MODE (insn) == TImode)
9307 goto done;
9309 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9310 goto next;
9313 next_and_done:
9314 insn = next_nonnote_insn (insn);
9316 done:
9317 *plen = len;
9318 *pin_use = in_use;
9319 return insn;
9322 /* IN_USE is a mask of the slots currently filled within the insn group.
9323 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9324 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9326 LEN is, of course, the length of the group in bytes. */
9328 static rtx
9329 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9331 int len, in_use;
9333 len = in_use = 0;
9335 if (! INSN_P (insn)
9336 || GET_CODE (PATTERN (insn)) == CLOBBER
9337 || GET_CODE (PATTERN (insn)) == USE)
9338 goto next_and_done;
9340 while (1)
9342 enum alphaev5_pipe pipe;
9344 pipe = alphaev5_insn_pipe (insn);
9345 switch (pipe)
9347 case EV5_STOP:
9348 /* Force complex instructions to start new groups. */
9349 if (in_use)
9350 goto done;
9352 /* If this is a completely unrecognized insn, it's an asm.
9353 We don't know how long it is, so record length as -1 to
9354 signal a needed realignment. */
9355 if (recog_memoized (insn) < 0)
9356 len = -1;
9357 else
9358 len = get_attr_length (insn);
9359 goto next_and_done;
9361 /* ??? Most of the places below, we would like to assert never
9362 happen, as it would indicate an error either in Haifa, or
9363 in the scheduling description. Unfortunately, Haifa never
9364 schedules the last instruction of the BB, so we don't have
9365 an accurate TI bit to go off. */
9366 case EV5_E01:
9367 if (in_use & EV5_E0)
9369 if (in_use & EV5_E1)
9370 goto done;
9371 in_use |= EV5_E1;
9373 else
9374 in_use |= EV5_E0 | EV5_E01;
9375 break;
9377 case EV5_E0:
9378 if (in_use & EV5_E0)
9380 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9381 goto done;
9382 in_use |= EV5_E1;
9384 in_use |= EV5_E0;
9385 break;
9387 case EV5_E1:
9388 if (in_use & EV5_E1)
9389 goto done;
9390 in_use |= EV5_E1;
9391 break;
9393 case EV5_FAM:
9394 if (in_use & EV5_FA)
9396 if (in_use & EV5_FM)
9397 goto done;
9398 in_use |= EV5_FM;
9400 else
9401 in_use |= EV5_FA | EV5_FAM;
9402 break;
9404 case EV5_FA:
9405 if (in_use & EV5_FA)
9406 goto done;
9407 in_use |= EV5_FA;
9408 break;
9410 case EV5_FM:
9411 if (in_use & EV5_FM)
9412 goto done;
9413 in_use |= EV5_FM;
9414 break;
9416 case EV5_NONE:
9417 break;
9419 default:
9420 gcc_unreachable ();
9422 len += 4;
9424 /* Haifa doesn't do well scheduling branches. */
9425 /* ??? If this is predicted not-taken, slotting continues, except
9426 that no more IBR, FBR, or JSR insns may be slotted. */
9427 if (JUMP_P (insn))
9428 goto next_and_done;
9430 next:
9431 insn = next_nonnote_insn (insn);
9433 if (!insn || ! INSN_P (insn))
9434 goto done;
9436 /* Let Haifa tell us where it thinks insn group boundaries are. */
9437 if (GET_MODE (insn) == TImode)
9438 goto done;
9440 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9441 goto next;
9444 next_and_done:
9445 insn = next_nonnote_insn (insn);
9447 done:
9448 *plen = len;
9449 *pin_use = in_use;
9450 return insn;
9453 static rtx
9454 alphaev4_next_nop (int *pin_use)
9456 int in_use = *pin_use;
9457 rtx nop;
9459 if (!(in_use & EV4_IB0))
9461 in_use |= EV4_IB0;
9462 nop = gen_nop ();
9464 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9466 in_use |= EV4_IB1;
9467 nop = gen_nop ();
9469 else if (TARGET_FP && !(in_use & EV4_IB1))
9471 in_use |= EV4_IB1;
9472 nop = gen_fnop ();
9474 else
9475 nop = gen_unop ();
9477 *pin_use = in_use;
9478 return nop;
9481 static rtx
9482 alphaev5_next_nop (int *pin_use)
9484 int in_use = *pin_use;
9485 rtx nop;
9487 if (!(in_use & EV5_E1))
9489 in_use |= EV5_E1;
9490 nop = gen_nop ();
9492 else if (TARGET_FP && !(in_use & EV5_FA))
9494 in_use |= EV5_FA;
9495 nop = gen_fnop ();
9497 else if (TARGET_FP && !(in_use & EV5_FM))
9499 in_use |= EV5_FM;
9500 nop = gen_fnop ();
9502 else
9503 nop = gen_unop ();
9505 *pin_use = in_use;
9506 return nop;
9509 /* The instruction group alignment main loop. */
9511 static void
9512 alpha_align_insns (unsigned int max_align,
9513 rtx (*next_group) (rtx, int *, int *),
9514 rtx (*next_nop) (int *))
9516 /* ALIGN is the known alignment for the insn group. */
9517 unsigned int align;
9518 /* OFS is the offset of the current insn in the insn group. */
9519 int ofs;
9520 int prev_in_use, in_use, len, ldgp;
9521 rtx i, next;
9523 /* Let shorten branches care for assigning alignments to code labels. */
9524 shorten_branches (get_insns ());
9526 if (align_functions < 4)
9527 align = 4;
9528 else if ((unsigned int) align_functions < max_align)
9529 align = align_functions;
9530 else
9531 align = max_align;
9533 ofs = prev_in_use = 0;
9534 i = get_insns ();
9535 if (NOTE_P (i))
9536 i = next_nonnote_insn (i);
9538 ldgp = alpha_function_needs_gp ? 8 : 0;
9540 while (i)
9542 next = (*next_group) (i, &in_use, &len);
9544 /* When we see a label, resync alignment etc. */
9545 if (LABEL_P (i))
9547 unsigned int new_align = 1 << label_to_alignment (i);
9549 if (new_align >= align)
9551 align = new_align < max_align ? new_align : max_align;
9552 ofs = 0;
9555 else if (ofs & (new_align-1))
9556 ofs = (ofs | (new_align-1)) + 1;
9557 gcc_assert (!len);
9560 /* Handle complex instructions special. */
9561 else if (in_use == 0)
9563 /* Asms will have length < 0. This is a signal that we have
9564 lost alignment knowledge. Assume, however, that the asm
9565 will not mis-align instructions. */
9566 if (len < 0)
9568 ofs = 0;
9569 align = 4;
9570 len = 0;
9574 /* If the known alignment is smaller than the recognized insn group,
9575 realign the output. */
9576 else if ((int) align < len)
9578 unsigned int new_log_align = len > 8 ? 4 : 3;
9579 rtx prev, where;
9581 where = prev = prev_nonnote_insn (i);
9582 if (!where || !LABEL_P (where))
9583 where = i;
9585 /* Can't realign between a call and its gp reload. */
9586 if (! (TARGET_EXPLICIT_RELOCS
9587 && prev && CALL_P (prev)))
9589 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9590 align = 1 << new_log_align;
9591 ofs = 0;
9595 /* We may not insert padding inside the initial ldgp sequence. */
9596 else if (ldgp > 0)
9597 ldgp -= len;
9599 /* If the group won't fit in the same INT16 as the previous,
9600 we need to add padding to keep the group together. Rather
9601 than simply leaving the insn filling to the assembler, we
9602 can make use of the knowledge of what sorts of instructions
9603 were issued in the previous group to make sure that all of
9604 the added nops are really free. */
9605 else if (ofs + len > (int) align)
9607 int nop_count = (align - ofs) / 4;
9608 rtx where;
9610 /* Insert nops before labels, branches, and calls to truly merge
9611 the execution of the nops with the previous instruction group. */
9612 where = prev_nonnote_insn (i);
9613 if (where)
9615 if (LABEL_P (where))
9617 rtx where2 = prev_nonnote_insn (where);
9618 if (where2 && JUMP_P (where2))
9619 where = where2;
9621 else if (NONJUMP_INSN_P (where))
9622 where = i;
9624 else
9625 where = i;
9628 emit_insn_before ((*next_nop)(&prev_in_use), where);
9629 while (--nop_count);
9630 ofs = 0;
9633 ofs = (ofs + len) & (align - 1);
9634 prev_in_use = in_use;
9635 i = next;
9639 /* Insert an unop between a noreturn function call and GP load. */
9641 static void
9642 alpha_pad_noreturn (void)
9644 rtx insn, next;
9646 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9648 if (!CALL_P (insn)
9649 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9650 continue;
9652 next = next_active_insn (insn);
9654 if (next)
9656 rtx pat = PATTERN (next);
9658 if (GET_CODE (pat) == SET
9659 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9660 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9661 emit_insn_after (gen_unop (), insn);
9666 /* Machine dependent reorg pass. */
9668 static void
9669 alpha_reorg (void)
9671 /* Workaround for a linker error that triggers when an
9672 exception handler immediatelly follows a noreturn function.
9674 The instruction stream from an object file:
9676 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9677 58: 00 00 ba 27 ldah gp,0(ra)
9678 5c: 00 00 bd 23 lda gp,0(gp)
9679 60: 00 00 7d a7 ldq t12,0(gp)
9680 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9682 was converted in the final link pass to:
9684 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9685 fdb28: 00 00 fe 2f unop
9686 fdb2c: 00 00 fe 2f unop
9687 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9688 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9690 GP load instructions were wrongly cleared by the linker relaxation
9691 pass. This workaround prevents removal of GP loads by inserting
9692 an unop instruction between a noreturn function call and
9693 exception handler prologue. */
9695 if (current_function_has_exception_handlers ())
9696 alpha_pad_noreturn ();
9698 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9699 alpha_handle_trap_shadows ();
9701 /* Due to the number of extra trapb insns, don't bother fixing up
9702 alignment when trap precision is instruction. Moreover, we can
9703 only do our job when sched2 is run. */
9704 if (optimize && !optimize_size
9705 && alpha_tp != ALPHA_TP_INSN
9706 && flag_schedule_insns_after_reload)
9708 if (alpha_tune == PROCESSOR_EV4)
9709 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9710 else if (alpha_tune == PROCESSOR_EV5)
9711 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9715 #if !TARGET_ABI_UNICOSMK
9717 #ifdef HAVE_STAMP_H
9718 #include <stamp.h>
9719 #endif
9721 static void
9722 alpha_file_start (void)
9724 #ifdef OBJECT_FORMAT_ELF
9725 /* If emitting dwarf2 debug information, we cannot generate a .file
9726 directive to start the file, as it will conflict with dwarf2out
9727 file numbers. So it's only useful when emitting mdebug output. */
9728 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9729 #endif
9731 default_file_start ();
9732 #ifdef MS_STAMP
9733 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9734 #endif
9736 fputs ("\t.set noreorder\n", asm_out_file);
9737 fputs ("\t.set volatile\n", asm_out_file);
9738 if (!TARGET_ABI_OPEN_VMS)
9739 fputs ("\t.set noat\n", asm_out_file);
9740 if (TARGET_EXPLICIT_RELOCS)
9741 fputs ("\t.set nomacro\n", asm_out_file);
9742 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9744 const char *arch;
9746 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9747 arch = "ev6";
9748 else if (TARGET_MAX)
9749 arch = "pca56";
9750 else if (TARGET_BWX)
9751 arch = "ev56";
9752 else if (alpha_cpu == PROCESSOR_EV5)
9753 arch = "ev5";
9754 else
9755 arch = "ev4";
9757 fprintf (asm_out_file, "\t.arch %s\n", arch);
9760 #endif
9762 #ifdef OBJECT_FORMAT_ELF
9763 /* Since we don't have a .dynbss section, we should not allow global
9764 relocations in the .rodata section. */
9766 static int
9767 alpha_elf_reloc_rw_mask (void)
9769 return flag_pic ? 3 : 2;
9772 /* Return a section for X. The only special thing we do here is to
9773 honor small data. */
9775 static section *
9776 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9777 unsigned HOST_WIDE_INT align)
9779 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9780 /* ??? Consider using mergeable sdata sections. */
9781 return sdata_section;
9782 else
9783 return default_elf_select_rtx_section (mode, x, align);
9786 static unsigned int
9787 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9789 unsigned int flags = 0;
9791 if (strcmp (name, ".sdata") == 0
9792 || strncmp (name, ".sdata.", 7) == 0
9793 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9794 || strcmp (name, ".sbss") == 0
9795 || strncmp (name, ".sbss.", 6) == 0
9796 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9797 flags = SECTION_SMALL;
9799 flags |= default_section_type_flags (decl, name, reloc);
9800 return flags;
9802 #endif /* OBJECT_FORMAT_ELF */
9804 /* Structure to collect function names for final output in link section. */
9805 /* Note that items marked with GTY can't be ifdef'ed out. */
9807 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9808 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9810 struct GTY(()) alpha_links
9812 int num;
9813 const char *target;
9814 rtx linkage;
9815 enum links_kind lkind;
9816 enum reloc_kind rkind;
9819 struct GTY(()) alpha_funcs
9821 int num;
9822 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9823 links;
9826 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9827 splay_tree alpha_links_tree;
9828 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9829 splay_tree alpha_funcs_tree;
9831 static GTY(()) int alpha_funcs_num;
9833 #if TARGET_ABI_OPEN_VMS
9835 /* Return the VMS argument type corresponding to MODE. */
9837 enum avms_arg_type
9838 alpha_arg_type (enum machine_mode mode)
9840 switch (mode)
9842 case SFmode:
9843 return TARGET_FLOAT_VAX ? FF : FS;
9844 case DFmode:
9845 return TARGET_FLOAT_VAX ? FD : FT;
9846 default:
9847 return I64;
9851 /* Return an rtx for an integer representing the VMS Argument Information
9852 register value. */
9855 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9857 unsigned HOST_WIDE_INT regval = cum.num_args;
9858 int i;
9860 for (i = 0; i < 6; i++)
9861 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9863 return GEN_INT (regval);
9866 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9867 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9868 Return a SYMBOL_REF suited to the call instruction. */
9871 alpha_need_linkage (const char *name, int is_local)
9873 splay_tree_node node;
9874 struct alpha_links *al;
9875 const char *target;
9876 tree id;
9878 if (name[0] == '*')
9879 name++;
9881 if (is_local)
9883 struct alpha_funcs *cfaf;
9885 if (!alpha_funcs_tree)
9886 alpha_funcs_tree = splay_tree_new_ggc
9887 (splay_tree_compare_pointers,
9888 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9889 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9892 cfaf = ggc_alloc_alpha_funcs ();
9894 cfaf->links = 0;
9895 cfaf->num = ++alpha_funcs_num;
9897 splay_tree_insert (alpha_funcs_tree,
9898 (splay_tree_key) current_function_decl,
9899 (splay_tree_value) cfaf);
9902 if (alpha_links_tree)
9904 /* Is this name already defined? */
9906 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9907 if (node)
9909 al = (struct alpha_links *) node->value;
9910 if (is_local)
9912 /* Defined here but external assumed. */
9913 if (al->lkind == KIND_EXTERN)
9914 al->lkind = KIND_LOCAL;
9916 else
9918 /* Used here but unused assumed. */
9919 if (al->lkind == KIND_UNUSED)
9920 al->lkind = KIND_LOCAL;
9922 return al->linkage;
9925 else
9926 alpha_links_tree = splay_tree_new_ggc
9927 ((splay_tree_compare_fn) strcmp,
9928 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9929 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9931 al = ggc_alloc_alpha_links ();
9932 name = ggc_strdup (name);
9934 /* Assume external if no definition. */
9935 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9937 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9938 and find the ultimate alias target like assemble_name. */
9939 id = get_identifier (name);
9940 target = NULL;
9941 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9943 id = TREE_CHAIN (id);
9944 target = IDENTIFIER_POINTER (id);
9947 al->target = target ? target : name;
9948 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9950 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9951 (splay_tree_value) al);
9953 return al->linkage;
9956 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9957 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9958 this is the reference to the linkage pointer value, 0 if this is the
9959 reference to the function entry value. RFLAG is 1 if this a reduced
9960 reference (code address only), 0 if this is a full reference. */
9963 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9965 splay_tree_node cfunnode;
9966 struct alpha_funcs *cfaf;
9967 struct alpha_links *al;
9968 const char *name = XSTR (func, 0);
9970 cfaf = (struct alpha_funcs *) 0;
9971 al = (struct alpha_links *) 0;
9973 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9974 cfaf = (struct alpha_funcs *) cfunnode->value;
9976 if (cfaf->links)
9978 splay_tree_node lnode;
9980 /* Is this name already defined? */
9982 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9983 if (lnode)
9984 al = (struct alpha_links *) lnode->value;
9986 else
9987 cfaf->links = splay_tree_new_ggc
9988 ((splay_tree_compare_fn) strcmp,
9989 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9990 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9992 if (!al)
9994 size_t name_len;
9995 size_t buflen;
9996 char *linksym;
9997 splay_tree_node node = 0;
9998 struct alpha_links *anl;
10000 if (name[0] == '*')
10001 name++;
10003 name_len = strlen (name);
10004 linksym = (char *) alloca (name_len + 50);
10006 al = ggc_alloc_alpha_links ();
10007 al->num = cfaf->num;
10008 al->target = NULL;
10010 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
10011 if (node)
10013 anl = (struct alpha_links *) node->value;
10014 al->lkind = anl->lkind;
10015 name = anl->target;
10018 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
10019 buflen = strlen (linksym);
10021 al->linkage = gen_rtx_SYMBOL_REF
10022 (Pmode, ggc_alloc_string (linksym, buflen + 1));
10024 splay_tree_insert (cfaf->links, (splay_tree_key) name,
10025 (splay_tree_value) al);
10028 if (rflag)
10029 al->rkind = KIND_CODEADDR;
10030 else
10031 al->rkind = KIND_LINKAGE;
10033 if (lflag)
10034 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
10035 else
10036 return al->linkage;
10039 static int
10040 alpha_write_one_linkage (splay_tree_node node, void *data)
10042 const char *const name = (const char *) node->key;
10043 struct alpha_links *link = (struct alpha_links *) node->value;
10044 FILE *stream = (FILE *) data;
10046 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
10047 if (link->rkind == KIND_CODEADDR)
10049 if (link->lkind == KIND_LOCAL)
10051 /* Local and used */
10052 fprintf (stream, "\t.quad %s..en\n", name);
10054 else
10056 /* External and used, request code address. */
10057 fprintf (stream, "\t.code_address %s\n", name);
10060 else
10062 if (link->lkind == KIND_LOCAL)
10064 /* Local and used, build linkage pair. */
10065 fprintf (stream, "\t.quad %s..en\n", name);
10066 fprintf (stream, "\t.quad %s\n", name);
10068 else
10070 /* External and used, request linkage pair. */
10071 fprintf (stream, "\t.linkage %s\n", name);
10075 return 0;
10078 static void
10079 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
10081 splay_tree_node node;
10082 struct alpha_funcs *func;
10084 fprintf (stream, "\t.link\n");
10085 fprintf (stream, "\t.align 3\n");
10086 in_section = NULL;
10088 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
10089 func = (struct alpha_funcs *) node->value;
10091 fputs ("\t.name ", stream);
10092 assemble_name (stream, funname);
10093 fputs ("..na\n", stream);
10094 ASM_OUTPUT_LABEL (stream, funname);
10095 fprintf (stream, "\t.pdesc ");
10096 assemble_name (stream, funname);
10097 fprintf (stream, "..en,%s\n",
10098 alpha_procedure_type == PT_STACK ? "stack"
10099 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
10101 if (func->links)
10103 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
10104 /* splay_tree_delete (func->links); */
10108 /* Switch to an arbitrary section NAME with attributes as specified
10109 by FLAGS. ALIGN specifies any known alignment requirements for
10110 the section; 0 if the default should be used. */
10112 static void
10113 vms_asm_named_section (const char *name, unsigned int flags,
10114 tree decl ATTRIBUTE_UNUSED)
10116 fputc ('\n', asm_out_file);
10117 fprintf (asm_out_file, ".section\t%s", name);
10119 if (flags & SECTION_DEBUG)
10120 fprintf (asm_out_file, ",NOWRT");
10122 fputc ('\n', asm_out_file);
10125 /* Record an element in the table of global constructors. SYMBOL is
10126 a SYMBOL_REF of the function to be called; PRIORITY is a number
10127 between 0 and MAX_INIT_PRIORITY.
10129 Differs from default_ctors_section_asm_out_constructor in that the
10130 width of the .ctors entry is always 64 bits, rather than the 32 bits
10131 used by a normal pointer. */
10133 static void
10134 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10136 switch_to_section (ctors_section);
10137 assemble_align (BITS_PER_WORD);
10138 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10141 static void
10142 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10144 switch_to_section (dtors_section);
10145 assemble_align (BITS_PER_WORD);
10146 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10148 #else
10151 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
10152 int is_local ATTRIBUTE_UNUSED)
10154 return NULL_RTX;
10158 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
10159 tree cfundecl ATTRIBUTE_UNUSED,
10160 int lflag ATTRIBUTE_UNUSED,
10161 int rflag ATTRIBUTE_UNUSED)
10163 return NULL_RTX;
10166 #endif /* TARGET_ABI_OPEN_VMS */
10168 #if TARGET_ABI_UNICOSMK
10170 /* This evaluates to true if we do not know how to pass TYPE solely in
10171 registers. This is the case for all arguments that do not fit in two
10172 registers. */
10174 static bool
10175 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
10177 if (type == NULL)
10178 return false;
10180 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10181 return true;
10182 if (TREE_ADDRESSABLE (type))
10183 return true;
10185 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
10188 /* Define the offset between two registers, one to be eliminated, and the
10189 other its replacement, at the start of a routine. */
10192 unicosmk_initial_elimination_offset (int from, int to)
10194 int fixed_size;
10196 fixed_size = alpha_sa_size();
10197 if (fixed_size != 0)
10198 fixed_size += 48;
10200 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10201 return -fixed_size;
10202 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10203 return 0;
10204 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10205 return (ALPHA_ROUND (crtl->outgoing_args_size)
10206 + ALPHA_ROUND (get_frame_size()));
10207 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10208 return (ALPHA_ROUND (fixed_size)
10209 + ALPHA_ROUND (get_frame_size()
10210 + crtl->outgoing_args_size));
10211 else
10212 gcc_unreachable ();
10215 /* Output the module name for .ident and .end directives. We have to strip
10216 directories and add make sure that the module name starts with a letter
10217 or '$'. */
10219 static void
10220 unicosmk_output_module_name (FILE *file)
10222 const char *name = lbasename (main_input_filename);
10223 unsigned len = strlen (name);
10224 char *clean_name = alloca (len + 2);
10225 char *ptr = clean_name;
10227 /* CAM only accepts module names that start with a letter or '$'. We
10228 prefix the module name with a '$' if necessary. */
10230 if (!ISALPHA (*name))
10231 *ptr++ = '$';
10232 memcpy (ptr, name, len + 1);
10233 clean_symbol_name (clean_name);
10234 fputs (clean_name, file);
10237 /* Output the definition of a common variable. */
10239 void
10240 unicosmk_output_common (FILE *file, const char *name, int size, int align)
10242 tree name_tree;
10243 printf ("T3E__: common %s\n", name);
10245 in_section = NULL;
10246 fputs("\t.endp\n\n\t.psect ", file);
10247 assemble_name(file, name);
10248 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10249 fprintf(file, "\t.byte\t0:%d\n", size);
10251 /* Mark the symbol as defined in this module. */
10252 name_tree = get_identifier (name);
10253 TREE_ASM_WRITTEN (name_tree) = 1;
10256 #define SECTION_PUBLIC SECTION_MACH_DEP
10257 #define SECTION_MAIN (SECTION_PUBLIC << 1)
10258 static int current_section_align;
10260 /* A get_unnamed_section callback for switching to the text section. */
10262 static void
10263 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10265 static int count = 0;
10266 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10269 /* A get_unnamed_section callback for switching to the data section. */
10271 static void
10272 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10274 static int count = 1;
10275 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10278 /* Implement TARGET_ASM_INIT_SECTIONS.
10280 The Cray assembler is really weird with respect to sections. It has only
10281 named sections and you can't reopen a section once it has been closed.
10282 This means that we have to generate unique names whenever we want to
10283 reenter the text or the data section. */
10285 static void
10286 unicosmk_init_sections (void)
10288 text_section = get_unnamed_section (SECTION_CODE,
10289 unicosmk_output_text_section_asm_op,
10290 NULL);
10291 data_section = get_unnamed_section (SECTION_WRITE,
10292 unicosmk_output_data_section_asm_op,
10293 NULL);
10294 readonly_data_section = data_section;
10297 static unsigned int
10298 unicosmk_section_type_flags (tree decl, const char *name,
10299 int reloc ATTRIBUTE_UNUSED)
10301 unsigned int flags = default_section_type_flags (decl, name, reloc);
10303 if (!decl)
10304 return flags;
10306 if (TREE_CODE (decl) == FUNCTION_DECL)
10308 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10309 if (align_functions_log > current_section_align)
10310 current_section_align = align_functions_log;
10312 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10313 flags |= SECTION_MAIN;
10315 else
10316 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10318 if (TREE_PUBLIC (decl))
10319 flags |= SECTION_PUBLIC;
10321 return flags;
10324 /* Generate a section name for decl and associate it with the
10325 declaration. */
10327 static void
10328 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10330 const char *name;
10331 int len;
10333 gcc_assert (decl);
10335 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10336 name = default_strip_name_encoding (name);
10337 len = strlen (name);
10339 if (TREE_CODE (decl) == FUNCTION_DECL)
10341 char *string;
10343 /* It is essential that we prefix the section name here because
10344 otherwise the section names generated for constructors and
10345 destructors confuse collect2. */
10347 string = alloca (len + 6);
10348 sprintf (string, "code@%s", name);
10349 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10351 else if (TREE_PUBLIC (decl))
10352 DECL_SECTION_NAME (decl) = build_string (len, name);
10353 else
10355 char *string;
10357 string = alloca (len + 6);
10358 sprintf (string, "data@%s", name);
10359 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10363 /* Switch to an arbitrary section NAME with attributes as specified
10364 by FLAGS. ALIGN specifies any known alignment requirements for
10365 the section; 0 if the default should be used. */
10367 static void
10368 unicosmk_asm_named_section (const char *name, unsigned int flags,
10369 tree decl ATTRIBUTE_UNUSED)
10371 const char *kind;
10373 /* Close the previous section. */
10375 fputs ("\t.endp\n\n", asm_out_file);
10377 /* Find out what kind of section we are opening. */
10379 if (flags & SECTION_MAIN)
10380 fputs ("\t.start\tmain\n", asm_out_file);
10382 if (flags & SECTION_CODE)
10383 kind = "code";
10384 else if (flags & SECTION_PUBLIC)
10385 kind = "common";
10386 else
10387 kind = "data";
10389 if (current_section_align != 0)
10390 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10391 current_section_align, kind);
10392 else
10393 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10396 static void
10397 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10399 if (DECL_P (decl)
10400 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10401 unicosmk_unique_section (decl, 0);
10404 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10405 in code sections because .align fill unused space with zeroes. */
10407 void
10408 unicosmk_output_align (FILE *file, int align)
10410 if (inside_function)
10411 fprintf (file, "\tgcc@code@align\t%d\n", align);
10412 else
10413 fprintf (file, "\t.align\t%d\n", align);
10416 /* Add a case vector to the current function's list of deferred case
10417 vectors. Case vectors have to be put into a separate section because CAM
10418 does not allow data definitions in code sections. */
10420 void
10421 unicosmk_defer_case_vector (rtx lab, rtx vec)
10423 struct machine_function *machine = cfun->machine;
10425 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10426 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10427 machine->addr_list);
10430 /* Output a case vector. */
10432 static void
10433 unicosmk_output_addr_vec (FILE *file, rtx vec)
10435 rtx lab = XEXP (vec, 0);
10436 rtx body = XEXP (vec, 1);
10437 int vlen = XVECLEN (body, 0);
10438 int idx;
10440 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10442 for (idx = 0; idx < vlen; idx++)
10444 ASM_OUTPUT_ADDR_VEC_ELT
10445 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10449 /* Output current function's deferred case vectors. */
10451 static void
10452 unicosmk_output_deferred_case_vectors (FILE *file)
10454 struct machine_function *machine = cfun->machine;
10455 rtx t;
10457 if (machine->addr_list == NULL_RTX)
10458 return;
10460 switch_to_section (data_section);
10461 for (t = machine->addr_list; t; t = XEXP (t, 1))
10462 unicosmk_output_addr_vec (file, XEXP (t, 0));
10465 /* Generate the name of the SSIB section for the current function. */
10467 #define SSIB_PREFIX "__SSIB_"
10468 #define SSIB_PREFIX_LEN 7
10470 static const char *
10471 unicosmk_ssib_name (void)
10473 /* This is ok since CAM won't be able to deal with names longer than that
10474 anyway. */
10476 static char name[256];
10478 rtx x;
10479 const char *fnname;
10480 int len;
10482 x = DECL_RTL (cfun->decl);
10483 gcc_assert (MEM_P (x));
10484 x = XEXP (x, 0);
10485 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10486 fnname = XSTR (x, 0);
10488 len = strlen (fnname);
10489 if (len + SSIB_PREFIX_LEN > 255)
10490 len = 255 - SSIB_PREFIX_LEN;
10492 strcpy (name, SSIB_PREFIX);
10493 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10494 name[len + SSIB_PREFIX_LEN] = 0;
10496 return name;
10499 /* Set up the dynamic subprogram information block (DSIB) and update the
10500 frame pointer register ($15) for subroutines which have a frame. If the
10501 subroutine doesn't have a frame, simply increment $15. */
10503 static void
10504 unicosmk_gen_dsib (unsigned long *imaskP)
10506 if (alpha_procedure_type == PT_STACK)
10508 const char *ssib_name;
10509 rtx mem;
10511 /* Allocate 64 bytes for the DSIB. */
10513 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10514 GEN_INT (-64))));
10515 emit_insn (gen_blockage ());
10517 /* Save the return address. */
10519 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10520 set_mem_alias_set (mem, alpha_sr_alias_set);
10521 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10522 (*imaskP) &= ~(1UL << REG_RA);
10524 /* Save the old frame pointer. */
10526 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10527 set_mem_alias_set (mem, alpha_sr_alias_set);
10528 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10529 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10531 emit_insn (gen_blockage ());
10533 /* Store the SSIB pointer. */
10535 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10536 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10537 set_mem_alias_set (mem, alpha_sr_alias_set);
10539 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10540 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10541 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10543 /* Save the CIW index. */
10545 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10546 set_mem_alias_set (mem, alpha_sr_alias_set);
10547 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10549 emit_insn (gen_blockage ());
10551 /* Set the new frame pointer. */
10552 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10553 stack_pointer_rtx, GEN_INT (64))));
10555 else
10557 /* Increment the frame pointer register to indicate that we do not
10558 have a frame. */
10559 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10560 hard_frame_pointer_rtx, const1_rtx));
10564 /* Output the static subroutine information block for the current
10565 function. */
10567 static void
10568 unicosmk_output_ssib (FILE *file, const char *fnname)
10570 int len;
10571 int i;
10572 rtx x;
10573 rtx ciw;
10574 struct machine_function *machine = cfun->machine;
10576 in_section = NULL;
10577 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10578 unicosmk_ssib_name ());
10580 /* Some required stuff and the function name length. */
10582 len = strlen (fnname);
10583 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10585 /* Saved registers
10586 ??? We don't do that yet. */
10588 fputs ("\t.quad\t0\n", file);
10590 /* Function address. */
10592 fputs ("\t.quad\t", file);
10593 assemble_name (file, fnname);
10594 putc ('\n', file);
10596 fputs ("\t.quad\t0\n", file);
10597 fputs ("\t.quad\t0\n", file);
10599 /* Function name.
10600 ??? We do it the same way Cray CC does it but this could be
10601 simplified. */
10603 for( i = 0; i < len; i++ )
10604 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10605 if( (len % 8) == 0 )
10606 fputs ("\t.quad\t0\n", file);
10607 else
10608 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10610 /* All call information words used in the function. */
10612 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10614 ciw = XEXP (x, 0);
10615 #if HOST_BITS_PER_WIDE_INT == 32
10616 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10617 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10618 #else
10619 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10620 #endif
10624 /* Add a call information word (CIW) to the list of the current function's
10625 CIWs and return its index.
10627 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10630 unicosmk_add_call_info_word (rtx x)
10632 rtx node;
10633 struct machine_function *machine = cfun->machine;
10635 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10636 if (machine->first_ciw == NULL_RTX)
10637 machine->first_ciw = node;
10638 else
10639 XEXP (machine->last_ciw, 1) = node;
10641 machine->last_ciw = node;
10642 ++machine->ciw_count;
10644 return GEN_INT (machine->ciw_count
10645 + strlen (current_function_name ())/8 + 5);
10648 /* The Cray assembler doesn't accept extern declarations for symbols which
10649 are defined in the same file. We have to keep track of all global
10650 symbols which are referenced and/or defined in a source file and output
10651 extern declarations for those which are referenced but not defined at
10652 the end of file. */
10654 /* List of identifiers for which an extern declaration might have to be
10655 emitted. */
10656 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10658 struct unicosmk_extern_list
10660 struct unicosmk_extern_list *next;
10661 const char *name;
10664 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10666 /* Output extern declarations which are required for every asm file. */
10668 static void
10669 unicosmk_output_default_externs (FILE *file)
10671 static const char *const externs[] =
10672 { "__T3E_MISMATCH" };
10674 int i;
10675 int n;
10677 n = ARRAY_SIZE (externs);
10679 for (i = 0; i < n; i++)
10680 fprintf (file, "\t.extern\t%s\n", externs[i]);
10683 /* Output extern declarations for global symbols which are have been
10684 referenced but not defined. */
10686 static void
10687 unicosmk_output_externs (FILE *file)
10689 struct unicosmk_extern_list *p;
10690 const char *real_name;
10691 int len;
10692 tree name_tree;
10694 len = strlen (user_label_prefix);
10695 for (p = unicosmk_extern_head; p != 0; p = p->next)
10697 /* We have to strip the encoding and possibly remove user_label_prefix
10698 from the identifier in order to handle -fleading-underscore and
10699 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10700 real_name = default_strip_name_encoding (p->name);
10701 if (len && p->name[0] == '*'
10702 && !memcmp (real_name, user_label_prefix, len))
10703 real_name += len;
10705 name_tree = get_identifier (real_name);
10706 if (! TREE_ASM_WRITTEN (name_tree))
10708 TREE_ASM_WRITTEN (name_tree) = 1;
10709 fputs ("\t.extern\t", file);
10710 assemble_name (file, p->name);
10711 putc ('\n', file);
10716 /* Record an extern. */
10718 void
10719 unicosmk_add_extern (const char *name)
10721 struct unicosmk_extern_list *p;
10723 p = (struct unicosmk_extern_list *)
10724 xmalloc (sizeof (struct unicosmk_extern_list));
10725 p->next = unicosmk_extern_head;
10726 p->name = name;
10727 unicosmk_extern_head = p;
10730 /* The Cray assembler generates incorrect code if identifiers which
10731 conflict with register names are used as instruction operands. We have
10732 to replace such identifiers with DEX expressions. */
10734 /* Structure to collect identifiers which have been replaced by DEX
10735 expressions. */
10736 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10738 struct unicosmk_dex {
10739 struct unicosmk_dex *next;
10740 const char *name;
10743 /* List of identifiers which have been replaced by DEX expressions. The DEX
10744 number is determined by the position in the list. */
10746 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10748 /* The number of elements in the DEX list. */
10750 static int unicosmk_dex_count = 0;
10752 /* Check if NAME must be replaced by a DEX expression. */
10754 static int
10755 unicosmk_special_name (const char *name)
10757 if (name[0] == '*')
10758 ++name;
10760 if (name[0] == '$')
10761 ++name;
10763 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10764 return 0;
10766 switch (name[1])
10768 case '1': case '2':
10769 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10771 case '3':
10772 return (name[2] == '\0'
10773 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10775 default:
10776 return (ISDIGIT (name[1]) && name[2] == '\0');
10780 /* Return the DEX number if X must be replaced by a DEX expression and 0
10781 otherwise. */
10783 static int
10784 unicosmk_need_dex (rtx x)
10786 struct unicosmk_dex *dex;
10787 const char *name;
10788 int i;
10790 if (GET_CODE (x) != SYMBOL_REF)
10791 return 0;
10793 name = XSTR (x,0);
10794 if (! unicosmk_special_name (name))
10795 return 0;
10797 i = unicosmk_dex_count;
10798 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10800 if (! strcmp (name, dex->name))
10801 return i;
10802 --i;
10805 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10806 dex->name = name;
10807 dex->next = unicosmk_dex_list;
10808 unicosmk_dex_list = dex;
10810 ++unicosmk_dex_count;
10811 return unicosmk_dex_count;
10814 /* Output the DEX definitions for this file. */
10816 static void
10817 unicosmk_output_dex (FILE *file)
10819 struct unicosmk_dex *dex;
10820 int i;
10822 if (unicosmk_dex_list == NULL)
10823 return;
10825 fprintf (file, "\t.dexstart\n");
10827 i = unicosmk_dex_count;
10828 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10830 fprintf (file, "\tDEX (%d) = ", i);
10831 assemble_name (file, dex->name);
10832 putc ('\n', file);
10833 --i;
10836 fprintf (file, "\t.dexend\n");
10839 /* Output text that to appear at the beginning of an assembler file. */
10841 static void
10842 unicosmk_file_start (void)
10844 int i;
10846 fputs ("\t.ident\t", asm_out_file);
10847 unicosmk_output_module_name (asm_out_file);
10848 fputs ("\n\n", asm_out_file);
10850 /* The Unicos/Mk assembler uses different register names. Instead of trying
10851 to support them, we simply use micro definitions. */
10853 /* CAM has different register names: rN for the integer register N and fN
10854 for the floating-point register N. Instead of trying to use these in
10855 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10856 register. */
10858 for (i = 0; i < 32; ++i)
10859 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10861 for (i = 0; i < 32; ++i)
10862 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10864 putc ('\n', asm_out_file);
10866 /* The .align directive fill unused space with zeroes which does not work
10867 in code sections. We define the macro 'gcc@code@align' which uses nops
10868 instead. Note that it assumes that code sections always have the
10869 biggest possible alignment since . refers to the current offset from
10870 the beginning of the section. */
10872 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10873 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10874 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10875 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10876 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10877 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10878 fputs ("\t.endr\n", asm_out_file);
10879 fputs ("\t.endif\n", asm_out_file);
10880 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10882 /* Output extern declarations which should always be visible. */
10883 unicosmk_output_default_externs (asm_out_file);
10885 /* Open a dummy section. We always need to be inside a section for the
10886 section-switching code to work correctly.
10887 ??? This should be a module id or something like that. I still have to
10888 figure out what the rules for those are. */
10889 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10892 /* Output text to appear at the end of an assembler file. This includes all
10893 pending extern declarations and DEX expressions. */
10895 static void
10896 unicosmk_file_end (void)
10898 fputs ("\t.endp\n\n", asm_out_file);
10900 /* Output all pending externs. */
10902 unicosmk_output_externs (asm_out_file);
10904 /* Output dex definitions used for functions whose names conflict with
10905 register names. */
10907 unicosmk_output_dex (asm_out_file);
10909 fputs ("\t.end\t", asm_out_file);
10910 unicosmk_output_module_name (asm_out_file);
10911 putc ('\n', asm_out_file);
10914 #else
10916 static void
10917 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10920 static void
10921 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10924 static void
10925 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10926 const char * fnname ATTRIBUTE_UNUSED)
10930 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10932 return NULL_RTX;
10935 static int
10936 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10938 return 0;
10941 #endif /* TARGET_ABI_UNICOSMK */
10943 static void
10944 alpha_init_libfuncs (void)
10946 if (TARGET_ABI_UNICOSMK)
10948 /* Prevent gcc from generating calls to __divsi3. */
10949 set_optab_libfunc (sdiv_optab, SImode, 0);
10950 set_optab_libfunc (udiv_optab, SImode, 0);
10952 /* Use the functions provided by the system library
10953 for DImode integer division. */
10954 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10955 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10957 else if (TARGET_ABI_OPEN_VMS)
10959 /* Use the VMS runtime library functions for division and
10960 remainder. */
10961 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10962 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10963 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10964 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10965 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10966 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10967 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10968 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10969 abort_libfunc = init_one_libfunc ("decc$abort");
10970 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10971 #ifdef MEM_LIBFUNCS_INIT
10972 MEM_LIBFUNCS_INIT;
10973 #endif
10977 /* On the Alpha, we use this to disable the floating-point registers
10978 when they don't exist. */
10980 static void
10981 alpha_conditional_register_usage (void)
10983 int i;
10984 if (! TARGET_FPREGS)
10985 for (i = 32; i < 63; i++)
10986 fixed_regs[i] = call_used_regs[i] = 1;
10989 /* Initialize the GCC target structure. */
10990 #if TARGET_ABI_OPEN_VMS
10991 # undef TARGET_ATTRIBUTE_TABLE
10992 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10993 # undef TARGET_CAN_ELIMINATE
10994 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
10995 #endif
10997 #undef TARGET_IN_SMALL_DATA_P
10998 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
11000 #if TARGET_ABI_UNICOSMK
11001 # undef TARGET_INSERT_ATTRIBUTES
11002 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
11003 # undef TARGET_SECTION_TYPE_FLAGS
11004 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
11005 # undef TARGET_ASM_UNIQUE_SECTION
11006 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
11007 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
11008 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
11009 # undef TARGET_ASM_GLOBALIZE_LABEL
11010 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
11011 # undef TARGET_MUST_PASS_IN_STACK
11012 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
11013 #endif
11015 #undef TARGET_ASM_ALIGNED_HI_OP
11016 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
11017 #undef TARGET_ASM_ALIGNED_DI_OP
11018 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
11020 /* Default unaligned ops are provided for ELF systems. To get unaligned
11021 data for non-ELF systems, we have to turn off auto alignment. */
11022 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
11023 #undef TARGET_ASM_UNALIGNED_HI_OP
11024 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
11025 #undef TARGET_ASM_UNALIGNED_SI_OP
11026 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
11027 #undef TARGET_ASM_UNALIGNED_DI_OP
11028 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
11029 #endif
11031 #ifdef OBJECT_FORMAT_ELF
11032 #undef TARGET_ASM_RELOC_RW_MASK
11033 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
11034 #undef TARGET_ASM_SELECT_RTX_SECTION
11035 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
11036 #undef TARGET_SECTION_TYPE_FLAGS
11037 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
11038 #endif
11040 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
11041 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
11043 #undef TARGET_INIT_LIBFUNCS
11044 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
11046 #undef TARGET_LEGITIMIZE_ADDRESS
11047 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
11049 #if TARGET_ABI_UNICOSMK
11050 #undef TARGET_ASM_FILE_START
11051 #define TARGET_ASM_FILE_START unicosmk_file_start
11052 #undef TARGET_ASM_FILE_END
11053 #define TARGET_ASM_FILE_END unicosmk_file_end
11054 #else
11055 #undef TARGET_ASM_FILE_START
11056 #define TARGET_ASM_FILE_START alpha_file_start
11057 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
11058 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
11059 #endif
11061 #undef TARGET_SCHED_ADJUST_COST
11062 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
11063 #undef TARGET_SCHED_ISSUE_RATE
11064 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
11065 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11066 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
11067 alpha_multipass_dfa_lookahead
11069 #undef TARGET_HAVE_TLS
11070 #define TARGET_HAVE_TLS HAVE_AS_TLS
11072 #undef TARGET_BUILTIN_DECL
11073 #define TARGET_BUILTIN_DECL alpha_builtin_decl
11074 #undef TARGET_INIT_BUILTINS
11075 #define TARGET_INIT_BUILTINS alpha_init_builtins
11076 #undef TARGET_EXPAND_BUILTIN
11077 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
11078 #undef TARGET_FOLD_BUILTIN
11079 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
11081 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11082 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
11083 #undef TARGET_CANNOT_COPY_INSN_P
11084 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
11085 #undef TARGET_CANNOT_FORCE_CONST_MEM
11086 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
11088 #if TARGET_ABI_OSF
11089 #undef TARGET_ASM_OUTPUT_MI_THUNK
11090 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
11091 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11092 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11093 #undef TARGET_STDARG_OPTIMIZE_HOOK
11094 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
11095 #endif
11097 #undef TARGET_RTX_COSTS
11098 #define TARGET_RTX_COSTS alpha_rtx_costs
11099 #undef TARGET_ADDRESS_COST
11100 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
11102 #undef TARGET_MACHINE_DEPENDENT_REORG
11103 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
11105 #undef TARGET_PROMOTE_FUNCTION_MODE
11106 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
11107 #undef TARGET_PROMOTE_PROTOTYPES
11108 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
11109 #undef TARGET_RETURN_IN_MEMORY
11110 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
11111 #undef TARGET_PASS_BY_REFERENCE
11112 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
11113 #undef TARGET_SETUP_INCOMING_VARARGS
11114 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
11115 #undef TARGET_STRICT_ARGUMENT_NAMING
11116 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
11117 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
11118 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
11119 #undef TARGET_SPLIT_COMPLEX_ARG
11120 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
11121 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11122 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
11123 #undef TARGET_ARG_PARTIAL_BYTES
11124 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
11125 #undef TARGET_FUNCTION_ARG
11126 #define TARGET_FUNCTION_ARG alpha_function_arg
11127 #undef TARGET_FUNCTION_ARG_ADVANCE
11128 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
11129 #undef TARGET_TRAMPOLINE_INIT
11130 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
11132 #undef TARGET_SECONDARY_RELOAD
11133 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
11135 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11136 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
11137 #undef TARGET_VECTOR_MODE_SUPPORTED_P
11138 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
11140 #undef TARGET_BUILD_BUILTIN_VA_LIST
11141 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
11143 #undef TARGET_EXPAND_BUILTIN_VA_START
11144 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
11146 /* The Alpha architecture does not require sequential consistency. See
11147 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
11148 for an example of how it can be violated in practice. */
11149 #undef TARGET_RELAXED_ORDERING
11150 #define TARGET_RELAXED_ORDERING true
11152 #undef TARGET_DEFAULT_TARGET_FLAGS
11153 #define TARGET_DEFAULT_TARGET_FLAGS \
11154 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
11155 #undef TARGET_HANDLE_OPTION
11156 #define TARGET_HANDLE_OPTION alpha_handle_option
11158 #undef TARGET_OPTION_OVERRIDE
11159 #define TARGET_OPTION_OVERRIDE alpha_option_override
11161 #undef TARGET_OPTION_OPTIMIZATION_TABLE
11162 #define TARGET_OPTION_OPTIMIZATION_TABLE alpha_option_optimization_table
11164 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11165 #undef TARGET_MANGLE_TYPE
11166 #define TARGET_MANGLE_TYPE alpha_mangle_type
11167 #endif
11169 #undef TARGET_LEGITIMATE_ADDRESS_P
11170 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
11172 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11173 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
11175 struct gcc_target targetm = TARGET_INITIALIZER;
11178 #include "gt-alpha.h"