2011-03-21 Daniel Jacobowitz <dan@codesourcery.com>
[official-gcc.git] / gcc / config / alpha / alpha.c
blob78b4d1789e6f4baa8a48a3ee91cce0e83baac4e8
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "splay-tree.h"
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
59 #include "libfuncs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
67 static const char * const alpha_cpu_name[] =
69 "ev4", "ev5", "ev6"
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm;
84 /* Nonzero if inside of a function, because the Alpha asm can't
85 handle .files inside of functions. */
87 static int inside_function = FALSE;
89 /* The number of cycles of latency we should assume on memory reads. */
91 int alpha_memory_latency = 3;
93 /* Whether the function needs the GP. */
95 static int alpha_function_needs_gp;
97 /* The alias set for prologue/epilogue register save/restore. */
99 static GTY(()) alias_set_type alpha_sr_alias_set;
101 /* The assembler name of the current function. */
103 static const char *alpha_fnname;
105 /* The next explicit relocation sequence number. */
106 extern GTY(()) int alpha_next_sequence_number;
107 int alpha_next_sequence_number = 1;
109 /* The literal and gpdisp sequence numbers for this insn, as printed
110 by %# and %* respectively. */
111 extern GTY(()) int alpha_this_literal_sequence_number;
112 extern GTY(()) int alpha_this_gpdisp_sequence_number;
113 int alpha_this_literal_sequence_number;
114 int alpha_this_gpdisp_sequence_number;
116 /* Costs of various operations on the different architectures. */
118 struct alpha_rtx_cost_data
120 unsigned char fp_add;
121 unsigned char fp_mult;
122 unsigned char fp_div_sf;
123 unsigned char fp_div_df;
124 unsigned char int_mult_si;
125 unsigned char int_mult_di;
126 unsigned char int_shift;
127 unsigned char int_cmov;
128 unsigned short int_div;
131 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
133 { /* EV4 */
134 COSTS_N_INSNS (6), /* fp_add */
135 COSTS_N_INSNS (6), /* fp_mult */
136 COSTS_N_INSNS (34), /* fp_div_sf */
137 COSTS_N_INSNS (63), /* fp_div_df */
138 COSTS_N_INSNS (23), /* int_mult_si */
139 COSTS_N_INSNS (23), /* int_mult_di */
140 COSTS_N_INSNS (2), /* int_shift */
141 COSTS_N_INSNS (2), /* int_cmov */
142 COSTS_N_INSNS (97), /* int_div */
144 { /* EV5 */
145 COSTS_N_INSNS (4), /* fp_add */
146 COSTS_N_INSNS (4), /* fp_mult */
147 COSTS_N_INSNS (15), /* fp_div_sf */
148 COSTS_N_INSNS (22), /* fp_div_df */
149 COSTS_N_INSNS (8), /* int_mult_si */
150 COSTS_N_INSNS (12), /* int_mult_di */
151 COSTS_N_INSNS (1) + 1, /* int_shift */
152 COSTS_N_INSNS (1), /* int_cmov */
153 COSTS_N_INSNS (83), /* int_div */
155 { /* EV6 */
156 COSTS_N_INSNS (4), /* fp_add */
157 COSTS_N_INSNS (4), /* fp_mult */
158 COSTS_N_INSNS (12), /* fp_div_sf */
159 COSTS_N_INSNS (15), /* fp_div_df */
160 COSTS_N_INSNS (7), /* int_mult_si */
161 COSTS_N_INSNS (7), /* int_mult_di */
162 COSTS_N_INSNS (1), /* int_shift */
163 COSTS_N_INSNS (2), /* int_cmov */
164 COSTS_N_INSNS (86), /* int_div */
168 /* Similar but tuned for code size instead of execution latency. The
169 extra +N is fractional cost tuning based on latency. It's used to
170 encourage use of cheaper insns like shift, but only if there's just
171 one of them. */
173 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
175 COSTS_N_INSNS (1), /* fp_add */
176 COSTS_N_INSNS (1), /* fp_mult */
177 COSTS_N_INSNS (1), /* fp_div_sf */
178 COSTS_N_INSNS (1) + 1, /* fp_div_df */
179 COSTS_N_INSNS (1) + 1, /* int_mult_si */
180 COSTS_N_INSNS (1) + 2, /* int_mult_di */
181 COSTS_N_INSNS (1), /* int_shift */
182 COSTS_N_INSNS (1), /* int_cmov */
183 COSTS_N_INSNS (6), /* int_div */
186 /* Get the number of args of a function in one of two ways. */
187 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
188 #define NUM_ARGS crtl->args.info.num_args
189 #else
190 #define NUM_ARGS crtl->args.info
191 #endif
193 #define REG_PV 27
194 #define REG_RA 26
196 /* Declarations of static functions. */
197 static struct machine_function *alpha_init_machine_status (void);
198 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
200 #if TARGET_ABI_OPEN_VMS
201 static void alpha_write_linkage (FILE *, const char *, tree);
202 static bool vms_valid_pointer_mode (enum machine_mode);
203 #endif
205 static void unicosmk_output_deferred_case_vectors (FILE *);
206 static void unicosmk_gen_dsib (unsigned long *);
207 static void unicosmk_output_ssib (FILE *, const char *);
208 static int unicosmk_need_dex (rtx);
210 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
211 static const struct default_options alpha_option_optimization_table[] =
213 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
214 { OPT_LEVELS_NONE, 0, NULL, 0 }
217 /* Implement TARGET_HANDLE_OPTION. */
219 static bool
220 alpha_handle_option (size_t code, const char *arg, int value)
222 switch (code)
224 case OPT_mfp_regs:
225 if (value == 0)
226 target_flags |= MASK_SOFT_FP;
227 break;
229 case OPT_mieee:
230 case OPT_mieee_with_inexact:
231 target_flags |= MASK_IEEE_CONFORMANT;
232 break;
234 case OPT_mtls_size_:
235 if (value != 16 && value != 32 && value != 64)
236 error ("bad value %qs for -mtls-size switch", arg);
237 break;
240 return true;
243 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
244 /* Implement TARGET_MANGLE_TYPE. */
246 static const char *
247 alpha_mangle_type (const_tree type)
249 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
250 && TARGET_LONG_DOUBLE_128)
251 return "g";
253 /* For all other types, use normal C++ mangling. */
254 return NULL;
256 #endif
258 /* Parse target option strings. */
260 static void
261 alpha_option_override (void)
263 static const struct cpu_table {
264 const char *const name;
265 const enum processor_type processor;
266 const int flags;
267 } cpu_table[] = {
268 { "ev4", PROCESSOR_EV4, 0 },
269 { "ev45", PROCESSOR_EV4, 0 },
270 { "21064", PROCESSOR_EV4, 0 },
271 { "ev5", PROCESSOR_EV5, 0 },
272 { "21164", PROCESSOR_EV5, 0 },
273 { "ev56", PROCESSOR_EV5, MASK_BWX },
274 { "21164a", PROCESSOR_EV5, MASK_BWX },
275 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
276 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
277 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
278 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
279 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
280 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
281 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
284 int const ct_size = ARRAY_SIZE (cpu_table);
285 int i;
287 #ifdef SUBTARGET_OVERRIDE_OPTIONS
288 SUBTARGET_OVERRIDE_OPTIONS;
289 #endif
291 /* Unicos/Mk doesn't have shared libraries. */
292 if (TARGET_ABI_UNICOSMK && flag_pic)
294 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
295 (flag_pic > 1) ? "PIC" : "pic");
296 flag_pic = 0;
299 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
300 floating-point instructions. Make that the default for this target. */
301 if (TARGET_ABI_UNICOSMK)
302 alpha_fprm = ALPHA_FPRM_DYN;
303 else
304 alpha_fprm = ALPHA_FPRM_NORM;
306 alpha_tp = ALPHA_TP_PROG;
307 alpha_fptm = ALPHA_FPTM_N;
309 /* We cannot use su and sui qualifiers for conversion instructions on
310 Unicos/Mk. I'm not sure if this is due to assembler or hardware
311 limitations. Right now, we issue a warning if -mieee is specified
312 and then ignore it; eventually, we should either get it right or
313 disable the option altogether. */
315 if (TARGET_IEEE)
317 if (TARGET_ABI_UNICOSMK)
318 warning (0, "-mieee not supported on Unicos/Mk");
319 else
321 alpha_tp = ALPHA_TP_INSN;
322 alpha_fptm = ALPHA_FPTM_SU;
326 if (TARGET_IEEE_WITH_INEXACT)
328 if (TARGET_ABI_UNICOSMK)
329 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
330 else
332 alpha_tp = ALPHA_TP_INSN;
333 alpha_fptm = ALPHA_FPTM_SUI;
337 if (alpha_tp_string)
339 if (! strcmp (alpha_tp_string, "p"))
340 alpha_tp = ALPHA_TP_PROG;
341 else if (! strcmp (alpha_tp_string, "f"))
342 alpha_tp = ALPHA_TP_FUNC;
343 else if (! strcmp (alpha_tp_string, "i"))
344 alpha_tp = ALPHA_TP_INSN;
345 else
346 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
349 if (alpha_fprm_string)
351 if (! strcmp (alpha_fprm_string, "n"))
352 alpha_fprm = ALPHA_FPRM_NORM;
353 else if (! strcmp (alpha_fprm_string, "m"))
354 alpha_fprm = ALPHA_FPRM_MINF;
355 else if (! strcmp (alpha_fprm_string, "c"))
356 alpha_fprm = ALPHA_FPRM_CHOP;
357 else if (! strcmp (alpha_fprm_string,"d"))
358 alpha_fprm = ALPHA_FPRM_DYN;
359 else
360 error ("bad value %qs for -mfp-rounding-mode switch",
361 alpha_fprm_string);
364 if (alpha_fptm_string)
366 if (strcmp (alpha_fptm_string, "n") == 0)
367 alpha_fptm = ALPHA_FPTM_N;
368 else if (strcmp (alpha_fptm_string, "u") == 0)
369 alpha_fptm = ALPHA_FPTM_U;
370 else if (strcmp (alpha_fptm_string, "su") == 0)
371 alpha_fptm = ALPHA_FPTM_SU;
372 else if (strcmp (alpha_fptm_string, "sui") == 0)
373 alpha_fptm = ALPHA_FPTM_SUI;
374 else
375 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
378 if (alpha_cpu_string)
380 for (i = 0; i < ct_size; i++)
381 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
383 alpha_tune = alpha_cpu = cpu_table [i].processor;
384 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
385 target_flags |= cpu_table [i].flags;
386 break;
388 if (i == ct_size)
389 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
392 if (alpha_tune_string)
394 for (i = 0; i < ct_size; i++)
395 if (! strcmp (alpha_tune_string, cpu_table [i].name))
397 alpha_tune = cpu_table [i].processor;
398 break;
400 if (i == ct_size)
401 error ("bad value %qs for -mtune switch", alpha_tune_string);
404 /* Do some sanity checks on the above options. */
406 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
408 warning (0, "trap mode not supported on Unicos/Mk");
409 alpha_fptm = ALPHA_FPTM_N;
412 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
413 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
415 warning (0, "fp software completion requires -mtrap-precision=i");
416 alpha_tp = ALPHA_TP_INSN;
419 if (alpha_cpu == PROCESSOR_EV6)
421 /* Except for EV6 pass 1 (not released), we always have precise
422 arithmetic traps. Which means we can do software completion
423 without minding trap shadows. */
424 alpha_tp = ALPHA_TP_PROG;
427 if (TARGET_FLOAT_VAX)
429 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
431 warning (0, "rounding mode not supported for VAX floats");
432 alpha_fprm = ALPHA_FPRM_NORM;
434 if (alpha_fptm == ALPHA_FPTM_SUI)
436 warning (0, "trap mode not supported for VAX floats");
437 alpha_fptm = ALPHA_FPTM_SU;
439 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
440 warning (0, "128-bit long double not supported for VAX floats");
441 target_flags &= ~MASK_LONG_DOUBLE_128;
445 char *end;
446 int lat;
448 if (!alpha_mlat_string)
449 alpha_mlat_string = "L1";
451 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
452 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
454 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
455 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
456 && alpha_mlat_string[2] == '\0')
458 static int const cache_latency[][4] =
460 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
461 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
462 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
465 lat = alpha_mlat_string[1] - '0';
466 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
468 warning (0, "L%d cache latency unknown for %s",
469 lat, alpha_cpu_name[alpha_tune]);
470 lat = 3;
472 else
473 lat = cache_latency[alpha_tune][lat-1];
475 else if (! strcmp (alpha_mlat_string, "main"))
477 /* Most current memories have about 370ns latency. This is
478 a reasonable guess for a fast cpu. */
479 lat = 150;
481 else
483 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
484 lat = 3;
487 alpha_memory_latency = lat;
490 /* Default the definition of "small data" to 8 bytes. */
491 if (!global_options_set.x_g_switch_value)
492 g_switch_value = 8;
494 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
495 if (flag_pic == 1)
496 target_flags |= MASK_SMALL_DATA;
497 else if (flag_pic == 2)
498 target_flags &= ~MASK_SMALL_DATA;
500 /* Align labels and loops for optimal branching. */
501 /* ??? Kludge these by not doing anything if we don't optimize and also if
502 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
503 if (optimize > 0 && write_symbols != SDB_DEBUG)
505 if (align_loops <= 0)
506 align_loops = 16;
507 if (align_jumps <= 0)
508 align_jumps = 16;
510 if (align_functions <= 0)
511 align_functions = 16;
513 /* Acquire a unique set number for our register saves and restores. */
514 alpha_sr_alias_set = new_alias_set ();
516 /* Register variables and functions with the garbage collector. */
518 /* Set up function hooks. */
519 init_machine_status = alpha_init_machine_status;
521 /* Tell the compiler when we're using VAX floating point. */
522 if (TARGET_FLOAT_VAX)
524 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
525 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
526 REAL_MODE_FORMAT (TFmode) = NULL;
529 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
530 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
531 target_flags |= MASK_LONG_DOUBLE_128;
532 #endif
534 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
535 can be optimized to ap = __builtin_next_arg (0). */
536 if (TARGET_ABI_UNICOSMK)
537 targetm.expand_builtin_va_start = NULL;
540 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
543 zap_mask (HOST_WIDE_INT value)
545 int i;
547 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
548 i++, value >>= 8)
549 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
550 return 0;
552 return 1;
555 /* Return true if OP is valid for a particular TLS relocation.
556 We are already guaranteed that OP is a CONST. */
559 tls_symbolic_operand_1 (rtx op, int size, int unspec)
561 op = XEXP (op, 0);
563 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
564 return 0;
565 op = XVECEXP (op, 0, 0);
567 if (GET_CODE (op) != SYMBOL_REF)
568 return 0;
570 switch (SYMBOL_REF_TLS_MODEL (op))
572 case TLS_MODEL_LOCAL_DYNAMIC:
573 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
574 case TLS_MODEL_INITIAL_EXEC:
575 return unspec == UNSPEC_TPREL && size == 64;
576 case TLS_MODEL_LOCAL_EXEC:
577 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
578 default:
579 gcc_unreachable ();
583 /* Used by aligned_memory_operand and unaligned_memory_operand to
584 resolve what reload is going to do with OP if it's a register. */
587 resolve_reload_operand (rtx op)
589 if (reload_in_progress)
591 rtx tmp = op;
592 if (GET_CODE (tmp) == SUBREG)
593 tmp = SUBREG_REG (tmp);
594 if (REG_P (tmp)
595 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
597 op = reg_equiv_memory_loc[REGNO (tmp)];
598 if (op == 0)
599 return 0;
602 return op;
605 /* The scalar modes supported differs from the default check-what-c-supports
606 version in that sometimes TFmode is available even when long double
607 indicates only DFmode. On unicosmk, we have the situation that HImode
608 doesn't map to any C type, but of course we still support that. */
610 static bool
611 alpha_scalar_mode_supported_p (enum machine_mode mode)
613 switch (mode)
615 case QImode:
616 case HImode:
617 case SImode:
618 case DImode:
619 case TImode: /* via optabs.c */
620 return true;
622 case SFmode:
623 case DFmode:
624 return true;
626 case TFmode:
627 return TARGET_HAS_XFLOATING_LIBS;
629 default:
630 return false;
634 /* Alpha implements a couple of integer vector mode operations when
635 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
636 which allows the vectorizer to operate on e.g. move instructions,
637 or when expand_vector_operations can do something useful. */
639 static bool
640 alpha_vector_mode_supported_p (enum machine_mode mode)
642 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
645 /* Return 1 if this function can directly return via $26. */
648 direct_return (void)
650 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
651 && reload_completed
652 && alpha_sa_size () == 0
653 && get_frame_size () == 0
654 && crtl->outgoing_args_size == 0
655 && crtl->args.pretend_args_size == 0);
658 /* Return the ADDR_VEC associated with a tablejump insn. */
661 alpha_tablejump_addr_vec (rtx insn)
663 rtx tmp;
665 tmp = JUMP_LABEL (insn);
666 if (!tmp)
667 return NULL_RTX;
668 tmp = NEXT_INSN (tmp);
669 if (!tmp)
670 return NULL_RTX;
671 if (JUMP_P (tmp)
672 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
673 return PATTERN (tmp);
674 return NULL_RTX;
677 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
680 alpha_tablejump_best_label (rtx insn)
682 rtx jump_table = alpha_tablejump_addr_vec (insn);
683 rtx best_label = NULL_RTX;
685 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
686 there for edge frequency counts from profile data. */
688 if (jump_table)
690 int n_labels = XVECLEN (jump_table, 1);
691 int best_count = -1;
692 int i, j;
694 for (i = 0; i < n_labels; i++)
696 int count = 1;
698 for (j = i + 1; j < n_labels; j++)
699 if (XEXP (XVECEXP (jump_table, 1, i), 0)
700 == XEXP (XVECEXP (jump_table, 1, j), 0))
701 count++;
703 if (count > best_count)
704 best_count = count, best_label = XVECEXP (jump_table, 1, i);
708 return best_label ? best_label : const0_rtx;
711 /* Return the TLS model to use for SYMBOL. */
713 static enum tls_model
714 tls_symbolic_operand_type (rtx symbol)
716 enum tls_model model;
718 if (GET_CODE (symbol) != SYMBOL_REF)
719 return TLS_MODEL_NONE;
720 model = SYMBOL_REF_TLS_MODEL (symbol);
722 /* Local-exec with a 64-bit size is the same code as initial-exec. */
723 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
724 model = TLS_MODEL_INITIAL_EXEC;
726 return model;
729 /* Return true if the function DECL will share the same GP as any
730 function in the current unit of translation. */
732 static bool
733 decl_has_samegp (const_tree decl)
735 /* Functions that are not local can be overridden, and thus may
736 not share the same gp. */
737 if (!(*targetm.binds_local_p) (decl))
738 return false;
740 /* If -msmall-data is in effect, assume that there is only one GP
741 for the module, and so any local symbol has this property. We
742 need explicit relocations to be able to enforce this for symbols
743 not defined in this unit of translation, however. */
744 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
745 return true;
747 /* Functions that are not external are defined in this UoT. */
748 /* ??? Irritatingly, static functions not yet emitted are still
749 marked "external". Apply this to non-static functions only. */
750 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
753 /* Return true if EXP should be placed in the small data section. */
755 static bool
756 alpha_in_small_data_p (const_tree exp)
758 /* We want to merge strings, so we never consider them small data. */
759 if (TREE_CODE (exp) == STRING_CST)
760 return false;
762 /* Functions are never in the small data area. Duh. */
763 if (TREE_CODE (exp) == FUNCTION_DECL)
764 return false;
766 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
768 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
769 if (strcmp (section, ".sdata") == 0
770 || strcmp (section, ".sbss") == 0)
771 return true;
773 else
775 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
777 /* If this is an incomplete type with size 0, then we can't put it
778 in sdata because it might be too big when completed. */
779 if (size > 0 && size <= g_switch_value)
780 return true;
783 return false;
786 #if TARGET_ABI_OPEN_VMS
787 static bool
788 vms_valid_pointer_mode (enum machine_mode mode)
790 return (mode == SImode || mode == DImode);
793 static bool
794 alpha_linkage_symbol_p (const char *symname)
796 int symlen = strlen (symname);
798 if (symlen > 4)
799 return strcmp (&symname [symlen - 4], "..lk") == 0;
801 return false;
804 #define LINKAGE_SYMBOL_REF_P(X) \
805 ((GET_CODE (X) == SYMBOL_REF \
806 && alpha_linkage_symbol_p (XSTR (X, 0))) \
807 || (GET_CODE (X) == CONST \
808 && GET_CODE (XEXP (X, 0)) == PLUS \
809 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
810 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
811 #endif
813 /* legitimate_address_p recognizes an RTL expression that is a valid
814 memory address for an instruction. The MODE argument is the
815 machine mode for the MEM expression that wants to use this address.
817 For Alpha, we have either a constant address or the sum of a
818 register and a constant address, or just a register. For DImode,
819 any of those forms can be surrounded with an AND that clear the
820 low-order three bits; this is an "unaligned" access. */
822 static bool
823 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
825 /* If this is an ldq_u type address, discard the outer AND. */
826 if (mode == DImode
827 && GET_CODE (x) == AND
828 && CONST_INT_P (XEXP (x, 1))
829 && INTVAL (XEXP (x, 1)) == -8)
830 x = XEXP (x, 0);
832 /* Discard non-paradoxical subregs. */
833 if (GET_CODE (x) == SUBREG
834 && (GET_MODE_SIZE (GET_MODE (x))
835 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
836 x = SUBREG_REG (x);
838 /* Unadorned general registers are valid. */
839 if (REG_P (x)
840 && (strict
841 ? STRICT_REG_OK_FOR_BASE_P (x)
842 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
843 return true;
845 /* Constant addresses (i.e. +/- 32k) are valid. */
846 if (CONSTANT_ADDRESS_P (x))
847 return true;
849 #if TARGET_ABI_OPEN_VMS
850 if (LINKAGE_SYMBOL_REF_P (x))
851 return true;
852 #endif
854 /* Register plus a small constant offset is valid. */
855 if (GET_CODE (x) == PLUS)
857 rtx ofs = XEXP (x, 1);
858 x = XEXP (x, 0);
860 /* Discard non-paradoxical subregs. */
861 if (GET_CODE (x) == SUBREG
862 && (GET_MODE_SIZE (GET_MODE (x))
863 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
864 x = SUBREG_REG (x);
866 if (REG_P (x))
868 if (! strict
869 && NONSTRICT_REG_OK_FP_BASE_P (x)
870 && CONST_INT_P (ofs))
871 return true;
872 if ((strict
873 ? STRICT_REG_OK_FOR_BASE_P (x)
874 : NONSTRICT_REG_OK_FOR_BASE_P (x))
875 && CONSTANT_ADDRESS_P (ofs))
876 return true;
880 /* If we're managing explicit relocations, LO_SUM is valid, as are small
881 data symbols. Avoid explicit relocations of modes larger than word
882 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
883 else if (TARGET_EXPLICIT_RELOCS
884 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
886 if (small_symbolic_operand (x, Pmode))
887 return true;
889 if (GET_CODE (x) == LO_SUM)
891 rtx ofs = XEXP (x, 1);
892 x = XEXP (x, 0);
894 /* Discard non-paradoxical subregs. */
895 if (GET_CODE (x) == SUBREG
896 && (GET_MODE_SIZE (GET_MODE (x))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
898 x = SUBREG_REG (x);
900 /* Must have a valid base register. */
901 if (! (REG_P (x)
902 && (strict
903 ? STRICT_REG_OK_FOR_BASE_P (x)
904 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
905 return false;
907 /* The symbol must be local. */
908 if (local_symbolic_operand (ofs, Pmode)
909 || dtp32_symbolic_operand (ofs, Pmode)
910 || tp32_symbolic_operand (ofs, Pmode))
911 return true;
915 return false;
918 /* Build the SYMBOL_REF for __tls_get_addr. */
920 static GTY(()) rtx tls_get_addr_libfunc;
922 static rtx
923 get_tls_get_addr (void)
925 if (!tls_get_addr_libfunc)
926 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
927 return tls_get_addr_libfunc;
930 /* Try machine-dependent ways of modifying an illegitimate address
931 to be legitimate. If we find one, return the new, valid address. */
933 static rtx
934 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
936 HOST_WIDE_INT addend;
938 /* If the address is (plus reg const_int) and the CONST_INT is not a
939 valid offset, compute the high part of the constant and add it to
940 the register. Then our address is (plus temp low-part-const). */
941 if (GET_CODE (x) == PLUS
942 && REG_P (XEXP (x, 0))
943 && CONST_INT_P (XEXP (x, 1))
944 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
946 addend = INTVAL (XEXP (x, 1));
947 x = XEXP (x, 0);
948 goto split_addend;
951 /* If the address is (const (plus FOO const_int)), find the low-order
952 part of the CONST_INT. Then load FOO plus any high-order part of the
953 CONST_INT into a register. Our address is (plus reg low-part-const).
954 This is done to reduce the number of GOT entries. */
955 if (can_create_pseudo_p ()
956 && GET_CODE (x) == CONST
957 && GET_CODE (XEXP (x, 0)) == PLUS
958 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
960 addend = INTVAL (XEXP (XEXP (x, 0), 1));
961 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
962 goto split_addend;
965 /* If we have a (plus reg const), emit the load as in (2), then add
966 the two registers, and finally generate (plus reg low-part-const) as
967 our address. */
968 if (can_create_pseudo_p ()
969 && GET_CODE (x) == PLUS
970 && REG_P (XEXP (x, 0))
971 && GET_CODE (XEXP (x, 1)) == CONST
972 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
973 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
975 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
976 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
977 XEXP (XEXP (XEXP (x, 1), 0), 0),
978 NULL_RTX, 1, OPTAB_LIB_WIDEN);
979 goto split_addend;
982 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
983 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
984 around +/- 32k offset. */
985 if (TARGET_EXPLICIT_RELOCS
986 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
987 && symbolic_operand (x, Pmode))
989 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
991 switch (tls_symbolic_operand_type (x))
993 case TLS_MODEL_NONE:
994 break;
996 case TLS_MODEL_GLOBAL_DYNAMIC:
997 start_sequence ();
999 r0 = gen_rtx_REG (Pmode, 0);
1000 r16 = gen_rtx_REG (Pmode, 16);
1001 tga = get_tls_get_addr ();
1002 dest = gen_reg_rtx (Pmode);
1003 seq = GEN_INT (alpha_next_sequence_number++);
1005 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1006 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1007 insn = emit_call_insn (insn);
1008 RTL_CONST_CALL_P (insn) = 1;
1009 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1011 insn = get_insns ();
1012 end_sequence ();
1014 emit_libcall_block (insn, dest, r0, x);
1015 return dest;
1017 case TLS_MODEL_LOCAL_DYNAMIC:
1018 start_sequence ();
1020 r0 = gen_rtx_REG (Pmode, 0);
1021 r16 = gen_rtx_REG (Pmode, 16);
1022 tga = get_tls_get_addr ();
1023 scratch = gen_reg_rtx (Pmode);
1024 seq = GEN_INT (alpha_next_sequence_number++);
1026 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1027 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1028 insn = emit_call_insn (insn);
1029 RTL_CONST_CALL_P (insn) = 1;
1030 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1032 insn = get_insns ();
1033 end_sequence ();
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1036 UNSPEC_TLSLDM_CALL);
1037 emit_libcall_block (insn, scratch, r0, eqv);
1039 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1040 eqv = gen_rtx_CONST (Pmode, eqv);
1042 if (alpha_tls_size == 64)
1044 dest = gen_reg_rtx (Pmode);
1045 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1046 emit_insn (gen_adddi3 (dest, dest, scratch));
1047 return dest;
1049 if (alpha_tls_size == 32)
1051 insn = gen_rtx_HIGH (Pmode, eqv);
1052 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1053 scratch = gen_reg_rtx (Pmode);
1054 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1056 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1058 case TLS_MODEL_INITIAL_EXEC:
1059 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1060 eqv = gen_rtx_CONST (Pmode, eqv);
1061 tp = gen_reg_rtx (Pmode);
1062 scratch = gen_reg_rtx (Pmode);
1063 dest = gen_reg_rtx (Pmode);
1065 emit_insn (gen_load_tp (tp));
1066 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1067 emit_insn (gen_adddi3 (dest, tp, scratch));
1068 return dest;
1070 case TLS_MODEL_LOCAL_EXEC:
1071 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1072 eqv = gen_rtx_CONST (Pmode, eqv);
1073 tp = gen_reg_rtx (Pmode);
1075 emit_insn (gen_load_tp (tp));
1076 if (alpha_tls_size == 32)
1078 insn = gen_rtx_HIGH (Pmode, eqv);
1079 insn = gen_rtx_PLUS (Pmode, tp, insn);
1080 tp = gen_reg_rtx (Pmode);
1081 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1083 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1085 default:
1086 gcc_unreachable ();
1089 if (local_symbolic_operand (x, Pmode))
1091 if (small_symbolic_operand (x, Pmode))
1092 return x;
1093 else
1095 if (can_create_pseudo_p ())
1096 scratch = gen_reg_rtx (Pmode);
1097 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1098 gen_rtx_HIGH (Pmode, x)));
1099 return gen_rtx_LO_SUM (Pmode, scratch, x);
1104 return NULL;
1106 split_addend:
1108 HOST_WIDE_INT low, high;
1110 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1111 addend -= low;
1112 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1113 addend -= high;
1115 if (addend)
1116 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1117 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1118 1, OPTAB_LIB_WIDEN);
1119 if (high)
1120 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1121 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1122 1, OPTAB_LIB_WIDEN);
1124 return plus_constant (x, low);
1129 /* Try machine-dependent ways of modifying an illegitimate address
1130 to be legitimate. Return X or the new, valid address. */
1132 static rtx
1133 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1134 enum machine_mode mode)
1136 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1137 return new_x ? new_x : x;
1140 /* Primarily this is required for TLS symbols, but given that our move
1141 patterns *ought* to be able to handle any symbol at any time, we
1142 should never be spilling symbolic operands to the constant pool, ever. */
1144 static bool
1145 alpha_cannot_force_const_mem (rtx x)
1147 enum rtx_code code = GET_CODE (x);
1148 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1151 /* We do not allow indirect calls to be optimized into sibling calls, nor
1152 can we allow a call to a function with a different GP to be optimized
1153 into a sibcall. */
1155 static bool
1156 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1158 /* Can't do indirect tail calls, since we don't know if the target
1159 uses the same GP. */
1160 if (!decl)
1161 return false;
1163 /* Otherwise, we can make a tail call if the target function shares
1164 the same GP. */
1165 return decl_has_samegp (decl);
1169 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1171 rtx x = *px;
1173 /* Don't re-split. */
1174 if (GET_CODE (x) == LO_SUM)
1175 return -1;
1177 return small_symbolic_operand (x, Pmode) != 0;
1180 static int
1181 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1183 rtx x = *px;
1185 /* Don't re-split. */
1186 if (GET_CODE (x) == LO_SUM)
1187 return -1;
1189 if (small_symbolic_operand (x, Pmode))
1191 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1192 *px = x;
1193 return -1;
1196 return 0;
1200 split_small_symbolic_operand (rtx x)
1202 x = copy_insn (x);
1203 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1204 return x;
1207 /* Indicate that INSN cannot be duplicated. This is true for any insn
1208 that we've marked with gpdisp relocs, since those have to stay in
1209 1-1 correspondence with one another.
1211 Technically we could copy them if we could set up a mapping from one
1212 sequence number to another, across the set of insns to be duplicated.
1213 This seems overly complicated and error-prone since interblock motion
1214 from sched-ebb could move one of the pair of insns to a different block.
1216 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1217 then they'll be in a different block from their ldgp. Which could lead
1218 the bb reorder code to think that it would be ok to copy just the block
1219 containing the call and branch to the block containing the ldgp. */
1221 static bool
1222 alpha_cannot_copy_insn_p (rtx insn)
1224 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1225 return false;
1226 if (recog_memoized (insn) >= 0)
1227 return get_attr_cannot_copy (insn);
1228 else
1229 return false;
1233 /* Try a machine-dependent way of reloading an illegitimate address
1234 operand. If we find one, push the reload and return the new rtx. */
1237 alpha_legitimize_reload_address (rtx x,
1238 enum machine_mode mode ATTRIBUTE_UNUSED,
1239 int opnum, int type,
1240 int ind_levels ATTRIBUTE_UNUSED)
1242 /* We must recognize output that we have already generated ourselves. */
1243 if (GET_CODE (x) == PLUS
1244 && GET_CODE (XEXP (x, 0)) == PLUS
1245 && REG_P (XEXP (XEXP (x, 0), 0))
1246 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1247 && CONST_INT_P (XEXP (x, 1)))
1249 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1250 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1251 opnum, (enum reload_type) type);
1252 return x;
1255 /* We wish to handle large displacements off a base register by
1256 splitting the addend across an ldah and the mem insn. This
1257 cuts number of extra insns needed from 3 to 1. */
1258 if (GET_CODE (x) == PLUS
1259 && REG_P (XEXP (x, 0))
1260 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1261 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1262 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1264 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1265 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1266 HOST_WIDE_INT high
1267 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1269 /* Check for 32-bit overflow. */
1270 if (high + low != val)
1271 return NULL_RTX;
1273 /* Reload the high part into a base reg; leave the low part
1274 in the mem directly. */
1275 x = gen_rtx_PLUS (GET_MODE (x),
1276 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1277 GEN_INT (high)),
1278 GEN_INT (low));
1280 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1281 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1282 opnum, (enum reload_type) type);
1283 return x;
1286 return NULL_RTX;
1289 /* Compute a (partial) cost for rtx X. Return true if the complete
1290 cost has been computed, and false if subexpressions should be
1291 scanned. In either case, *TOTAL contains the cost result. */
1293 static bool
1294 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1295 bool speed)
1297 enum machine_mode mode = GET_MODE (x);
1298 bool float_mode_p = FLOAT_MODE_P (mode);
1299 const struct alpha_rtx_cost_data *cost_data;
1301 if (!speed)
1302 cost_data = &alpha_rtx_cost_size;
1303 else
1304 cost_data = &alpha_rtx_cost_data[alpha_tune];
1306 switch (code)
1308 case CONST_INT:
1309 /* If this is an 8-bit constant, return zero since it can be used
1310 nearly anywhere with no cost. If it is a valid operand for an
1311 ADD or AND, likewise return 0 if we know it will be used in that
1312 context. Otherwise, return 2 since it might be used there later.
1313 All other constants take at least two insns. */
1314 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1316 *total = 0;
1317 return true;
1319 /* FALLTHRU */
1321 case CONST_DOUBLE:
1322 if (x == CONST0_RTX (mode))
1323 *total = 0;
1324 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1325 || (outer_code == AND && and_operand (x, VOIDmode)))
1326 *total = 0;
1327 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1328 *total = 2;
1329 else
1330 *total = COSTS_N_INSNS (2);
1331 return true;
1333 case CONST:
1334 case SYMBOL_REF:
1335 case LABEL_REF:
1336 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1337 *total = COSTS_N_INSNS (outer_code != MEM);
1338 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1339 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1340 else if (tls_symbolic_operand_type (x))
1341 /* Estimate of cost for call_pal rduniq. */
1342 /* ??? How many insns do we emit here? More than one... */
1343 *total = COSTS_N_INSNS (15);
1344 else
1345 /* Otherwise we do a load from the GOT. */
1346 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1347 return true;
1349 case HIGH:
1350 /* This is effectively an add_operand. */
1351 *total = 2;
1352 return true;
1354 case PLUS:
1355 case MINUS:
1356 if (float_mode_p)
1357 *total = cost_data->fp_add;
1358 else if (GET_CODE (XEXP (x, 0)) == MULT
1359 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1361 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1362 (enum rtx_code) outer_code, speed)
1363 + rtx_cost (XEXP (x, 1),
1364 (enum rtx_code) outer_code, speed)
1365 + COSTS_N_INSNS (1));
1366 return true;
1368 return false;
1370 case MULT:
1371 if (float_mode_p)
1372 *total = cost_data->fp_mult;
1373 else if (mode == DImode)
1374 *total = cost_data->int_mult_di;
1375 else
1376 *total = cost_data->int_mult_si;
1377 return false;
1379 case ASHIFT:
1380 if (CONST_INT_P (XEXP (x, 1))
1381 && INTVAL (XEXP (x, 1)) <= 3)
1383 *total = COSTS_N_INSNS (1);
1384 return false;
1386 /* FALLTHRU */
1388 case ASHIFTRT:
1389 case LSHIFTRT:
1390 *total = cost_data->int_shift;
1391 return false;
1393 case IF_THEN_ELSE:
1394 if (float_mode_p)
1395 *total = cost_data->fp_add;
1396 else
1397 *total = cost_data->int_cmov;
1398 return false;
1400 case DIV:
1401 case UDIV:
1402 case MOD:
1403 case UMOD:
1404 if (!float_mode_p)
1405 *total = cost_data->int_div;
1406 else if (mode == SFmode)
1407 *total = cost_data->fp_div_sf;
1408 else
1409 *total = cost_data->fp_div_df;
1410 return false;
1412 case MEM:
1413 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1414 return true;
1416 case NEG:
1417 if (! float_mode_p)
1419 *total = COSTS_N_INSNS (1);
1420 return false;
1422 /* FALLTHRU */
1424 case ABS:
1425 if (! float_mode_p)
1427 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1428 return false;
1430 /* FALLTHRU */
1432 case FLOAT:
1433 case UNSIGNED_FLOAT:
1434 case FIX:
1435 case UNSIGNED_FIX:
1436 case FLOAT_TRUNCATE:
1437 *total = cost_data->fp_add;
1438 return false;
1440 case FLOAT_EXTEND:
1441 if (MEM_P (XEXP (x, 0)))
1442 *total = 0;
1443 else
1444 *total = cost_data->fp_add;
1445 return false;
1447 default:
1448 return false;
1452 /* REF is an alignable memory location. Place an aligned SImode
1453 reference into *PALIGNED_MEM and the number of bits to shift into
1454 *PBITNUM. SCRATCH is a free register for use in reloading out
1455 of range stack slots. */
1457 void
1458 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1460 rtx base;
1461 HOST_WIDE_INT disp, offset;
1463 gcc_assert (MEM_P (ref));
1465 if (reload_in_progress
1466 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1468 base = find_replacement (&XEXP (ref, 0));
1469 gcc_assert (memory_address_p (GET_MODE (ref), base));
1471 else
1472 base = XEXP (ref, 0);
1474 if (GET_CODE (base) == PLUS)
1475 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1476 else
1477 disp = 0;
1479 /* Find the byte offset within an aligned word. If the memory itself is
1480 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1481 will have examined the base register and determined it is aligned, and
1482 thus displacements from it are naturally alignable. */
1483 if (MEM_ALIGN (ref) >= 32)
1484 offset = 0;
1485 else
1486 offset = disp & 3;
1488 /* The location should not cross aligned word boundary. */
1489 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1490 <= GET_MODE_SIZE (SImode));
1492 /* Access the entire aligned word. */
1493 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1495 /* Convert the byte offset within the word to a bit offset. */
1496 if (WORDS_BIG_ENDIAN)
1497 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1498 else
1499 offset *= 8;
1500 *pbitnum = GEN_INT (offset);
1503 /* Similar, but just get the address. Handle the two reload cases.
1504 Add EXTRA_OFFSET to the address we return. */
1507 get_unaligned_address (rtx ref)
1509 rtx base;
1510 HOST_WIDE_INT offset = 0;
1512 gcc_assert (MEM_P (ref));
1514 if (reload_in_progress
1515 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1517 base = find_replacement (&XEXP (ref, 0));
1519 gcc_assert (memory_address_p (GET_MODE (ref), base));
1521 else
1522 base = XEXP (ref, 0);
1524 if (GET_CODE (base) == PLUS)
1525 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1527 return plus_constant (base, offset);
1530 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1531 X is always returned in a register. */
1534 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1536 if (GET_CODE (addr) == PLUS)
1538 ofs += INTVAL (XEXP (addr, 1));
1539 addr = XEXP (addr, 0);
1542 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1543 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1546 /* On the Alpha, all (non-symbolic) constants except zero go into
1547 a floating-point register via memory. Note that we cannot
1548 return anything that is not a subset of RCLASS, and that some
1549 symbolic constants cannot be dropped to memory. */
1551 enum reg_class
1552 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1554 /* Zero is present in any register class. */
1555 if (x == CONST0_RTX (GET_MODE (x)))
1556 return rclass;
1558 /* These sorts of constants we can easily drop to memory. */
1559 if (CONST_INT_P (x)
1560 || GET_CODE (x) == CONST_DOUBLE
1561 || GET_CODE (x) == CONST_VECTOR)
1563 if (rclass == FLOAT_REGS)
1564 return NO_REGS;
1565 if (rclass == ALL_REGS)
1566 return GENERAL_REGS;
1567 return rclass;
1570 /* All other kinds of constants should not (and in the case of HIGH
1571 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1572 secondary reload. */
1573 if (CONSTANT_P (x))
1574 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1576 return rclass;
1579 /* Inform reload about cases where moving X with a mode MODE to a register in
1580 RCLASS requires an extra scratch or immediate register. Return the class
1581 needed for the immediate register. */
1583 static reg_class_t
1584 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1585 enum machine_mode mode, secondary_reload_info *sri)
1587 enum reg_class rclass = (enum reg_class) rclass_i;
1589 /* Loading and storing HImode or QImode values to and from memory
1590 usually requires a scratch register. */
1591 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1593 if (any_memory_operand (x, mode))
1595 if (in_p)
1597 if (!aligned_memory_operand (x, mode))
1598 sri->icode = direct_optab_handler (reload_in_optab, mode);
1600 else
1601 sri->icode = direct_optab_handler (reload_out_optab, mode);
1602 return NO_REGS;
1606 /* We also cannot do integral arithmetic into FP regs, as might result
1607 from register elimination into a DImode fp register. */
1608 if (rclass == FLOAT_REGS)
1610 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1611 return GENERAL_REGS;
1612 if (in_p && INTEGRAL_MODE_P (mode)
1613 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1614 return GENERAL_REGS;
1617 return NO_REGS;
1620 /* Subfunction of the following function. Update the flags of any MEM
1621 found in part of X. */
1623 static int
1624 alpha_set_memflags_1 (rtx *xp, void *data)
1626 rtx x = *xp, orig = (rtx) data;
1628 if (!MEM_P (x))
1629 return 0;
1631 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1632 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1633 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1634 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1635 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1637 /* Sadly, we cannot use alias sets because the extra aliasing
1638 produced by the AND interferes. Given that two-byte quantities
1639 are the only thing we would be able to differentiate anyway,
1640 there does not seem to be any point in convoluting the early
1641 out of the alias check. */
1643 return -1;
1646 /* Given SEQ, which is an INSN list, look for any MEMs in either
1647 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1648 volatile flags from REF into each of the MEMs found. If REF is not
1649 a MEM, don't do anything. */
1651 void
1652 alpha_set_memflags (rtx seq, rtx ref)
1654 rtx insn;
1656 if (!MEM_P (ref))
1657 return;
1659 /* This is only called from alpha.md, after having had something
1660 generated from one of the insn patterns. So if everything is
1661 zero, the pattern is already up-to-date. */
1662 if (!MEM_VOLATILE_P (ref)
1663 && !MEM_IN_STRUCT_P (ref)
1664 && !MEM_SCALAR_P (ref)
1665 && !MEM_NOTRAP_P (ref)
1666 && !MEM_READONLY_P (ref))
1667 return;
1669 for (insn = seq; insn; insn = NEXT_INSN (insn))
1670 if (INSN_P (insn))
1671 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1672 else
1673 gcc_unreachable ();
1676 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1677 int, bool);
1679 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1680 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1681 and return pc_rtx if successful. */
1683 static rtx
1684 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1685 HOST_WIDE_INT c, int n, bool no_output)
1687 HOST_WIDE_INT new_const;
1688 int i, bits;
1689 /* Use a pseudo if highly optimizing and still generating RTL. */
1690 rtx subtarget
1691 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1692 rtx temp, insn;
1694 /* If this is a sign-extended 32-bit constant, we can do this in at most
1695 three insns, so do it if we have enough insns left. We always have
1696 a sign-extended 32-bit constant when compiling on a narrow machine. */
1698 if (HOST_BITS_PER_WIDE_INT != 64
1699 || c >> 31 == -1 || c >> 31 == 0)
1701 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1702 HOST_WIDE_INT tmp1 = c - low;
1703 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1704 HOST_WIDE_INT extra = 0;
1706 /* If HIGH will be interpreted as negative but the constant is
1707 positive, we must adjust it to do two ldha insns. */
1709 if ((high & 0x8000) != 0 && c >= 0)
1711 extra = 0x4000;
1712 tmp1 -= 0x40000000;
1713 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1716 if (c == low || (low == 0 && extra == 0))
1718 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1719 but that meant that we can't handle INT_MIN on 32-bit machines
1720 (like NT/Alpha), because we recurse indefinitely through
1721 emit_move_insn to gen_movdi. So instead, since we know exactly
1722 what we want, create it explicitly. */
1724 if (no_output)
1725 return pc_rtx;
1726 if (target == NULL)
1727 target = gen_reg_rtx (mode);
1728 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1729 return target;
1731 else if (n >= 2 + (extra != 0))
1733 if (no_output)
1734 return pc_rtx;
1735 if (!can_create_pseudo_p ())
1737 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1738 temp = target;
1740 else
1741 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1742 subtarget, mode);
1744 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1745 This means that if we go through expand_binop, we'll try to
1746 generate extensions, etc, which will require new pseudos, which
1747 will fail during some split phases. The SImode add patterns
1748 still exist, but are not named. So build the insns by hand. */
1750 if (extra != 0)
1752 if (! subtarget)
1753 subtarget = gen_reg_rtx (mode);
1754 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1755 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1756 emit_insn (insn);
1757 temp = subtarget;
1760 if (target == NULL)
1761 target = gen_reg_rtx (mode);
1762 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1763 insn = gen_rtx_SET (VOIDmode, target, insn);
1764 emit_insn (insn);
1765 return target;
1769 /* If we couldn't do it that way, try some other methods. But if we have
1770 no instructions left, don't bother. Likewise, if this is SImode and
1771 we can't make pseudos, we can't do anything since the expand_binop
1772 and expand_unop calls will widen and try to make pseudos. */
1774 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1775 return 0;
1777 /* Next, see if we can load a related constant and then shift and possibly
1778 negate it to get the constant we want. Try this once each increasing
1779 numbers of insns. */
1781 for (i = 1; i < n; i++)
1783 /* First, see if minus some low bits, we've an easy load of
1784 high bits. */
1786 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1787 if (new_const != 0)
1789 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1790 if (temp)
1792 if (no_output)
1793 return temp;
1794 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1795 target, 0, OPTAB_WIDEN);
1799 /* Next try complementing. */
1800 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1801 if (temp)
1803 if (no_output)
1804 return temp;
1805 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1808 /* Next try to form a constant and do a left shift. We can do this
1809 if some low-order bits are zero; the exact_log2 call below tells
1810 us that information. The bits we are shifting out could be any
1811 value, but here we'll just try the 0- and sign-extended forms of
1812 the constant. To try to increase the chance of having the same
1813 constant in more than one insn, start at the highest number of
1814 bits to shift, but try all possibilities in case a ZAPNOT will
1815 be useful. */
1817 bits = exact_log2 (c & -c);
1818 if (bits > 0)
1819 for (; bits > 0; bits--)
1821 new_const = c >> bits;
1822 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1823 if (!temp && c < 0)
1825 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1826 temp = alpha_emit_set_const (subtarget, mode, new_const,
1827 i, no_output);
1829 if (temp)
1831 if (no_output)
1832 return temp;
1833 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1834 target, 0, OPTAB_WIDEN);
1838 /* Now try high-order zero bits. Here we try the shifted-in bits as
1839 all zero and all ones. Be careful to avoid shifting outside the
1840 mode and to avoid shifting outside the host wide int size. */
1841 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1842 confuse the recursive call and set all of the high 32 bits. */
1844 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1845 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1846 if (bits > 0)
1847 for (; bits > 0; bits--)
1849 new_const = c << bits;
1850 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1851 if (!temp)
1853 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1854 temp = alpha_emit_set_const (subtarget, mode, new_const,
1855 i, no_output);
1857 if (temp)
1859 if (no_output)
1860 return temp;
1861 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1862 target, 1, OPTAB_WIDEN);
1866 /* Now try high-order 1 bits. We get that with a sign-extension.
1867 But one bit isn't enough here. Be careful to avoid shifting outside
1868 the mode and to avoid shifting outside the host wide int size. */
1870 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1871 - floor_log2 (~ c) - 2);
1872 if (bits > 0)
1873 for (; bits > 0; bits--)
1875 new_const = c << bits;
1876 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1877 if (!temp)
1879 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1880 temp = alpha_emit_set_const (subtarget, mode, new_const,
1881 i, no_output);
1883 if (temp)
1885 if (no_output)
1886 return temp;
1887 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1888 target, 0, OPTAB_WIDEN);
1893 #if HOST_BITS_PER_WIDE_INT == 64
1894 /* Finally, see if can load a value into the target that is the same as the
1895 constant except that all bytes that are 0 are changed to be 0xff. If we
1896 can, then we can do a ZAPNOT to obtain the desired constant. */
1898 new_const = c;
1899 for (i = 0; i < 64; i += 8)
1900 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1901 new_const |= (HOST_WIDE_INT) 0xff << i;
1903 /* We are only called for SImode and DImode. If this is SImode, ensure that
1904 we are sign extended to a full word. */
1906 if (mode == SImode)
1907 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1909 if (new_const != c)
1911 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1912 if (temp)
1914 if (no_output)
1915 return temp;
1916 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1917 target, 0, OPTAB_WIDEN);
1920 #endif
1922 return 0;
1925 /* Try to output insns to set TARGET equal to the constant C if it can be
1926 done in less than N insns. Do all computations in MODE. Returns the place
1927 where the output has been placed if it can be done and the insns have been
1928 emitted. If it would take more than N insns, zero is returned and no
1929 insns and emitted. */
1931 static rtx
1932 alpha_emit_set_const (rtx target, enum machine_mode mode,
1933 HOST_WIDE_INT c, int n, bool no_output)
1935 enum machine_mode orig_mode = mode;
1936 rtx orig_target = target;
1937 rtx result = 0;
1938 int i;
1940 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1941 can't load this constant in one insn, do this in DImode. */
1942 if (!can_create_pseudo_p () && mode == SImode
1943 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1945 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1946 if (result)
1947 return result;
1949 target = no_output ? NULL : gen_lowpart (DImode, target);
1950 mode = DImode;
1952 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1954 target = no_output ? NULL : gen_lowpart (DImode, target);
1955 mode = DImode;
1958 /* Try 1 insn, then 2, then up to N. */
1959 for (i = 1; i <= n; i++)
1961 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1962 if (result)
1964 rtx insn, set;
1966 if (no_output)
1967 return result;
1969 insn = get_last_insn ();
1970 set = single_set (insn);
1971 if (! CONSTANT_P (SET_SRC (set)))
1972 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1973 break;
1977 /* Allow for the case where we changed the mode of TARGET. */
1978 if (result)
1980 if (result == target)
1981 result = orig_target;
1982 else if (mode != orig_mode)
1983 result = gen_lowpart (orig_mode, result);
1986 return result;
1989 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1990 fall back to a straight forward decomposition. We do this to avoid
1991 exponential run times encountered when looking for longer sequences
1992 with alpha_emit_set_const. */
1994 static rtx
1995 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1997 HOST_WIDE_INT d1, d2, d3, d4;
1999 /* Decompose the entire word */
2000 #if HOST_BITS_PER_WIDE_INT >= 64
2001 gcc_assert (c2 == -(c1 < 0));
2002 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2003 c1 -= d1;
2004 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2005 c1 = (c1 - d2) >> 32;
2006 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2007 c1 -= d3;
2008 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2009 gcc_assert (c1 == d4);
2010 #else
2011 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2012 c1 -= d1;
2013 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2014 gcc_assert (c1 == d2);
2015 c2 += (d2 < 0);
2016 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2017 c2 -= d3;
2018 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 gcc_assert (c2 == d4);
2020 #endif
2022 /* Construct the high word */
2023 if (d4)
2025 emit_move_insn (target, GEN_INT (d4));
2026 if (d3)
2027 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2029 else
2030 emit_move_insn (target, GEN_INT (d3));
2032 /* Shift it into place */
2033 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2035 /* Add in the low bits. */
2036 if (d2)
2037 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2038 if (d1)
2039 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2041 return target;
2044 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2045 the low 64 bits. */
2047 static void
2048 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2050 HOST_WIDE_INT i0, i1;
2052 if (GET_CODE (x) == CONST_VECTOR)
2053 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2056 if (CONST_INT_P (x))
2058 i0 = INTVAL (x);
2059 i1 = -(i0 < 0);
2061 else if (HOST_BITS_PER_WIDE_INT >= 64)
2063 i0 = CONST_DOUBLE_LOW (x);
2064 i1 = -(i0 < 0);
2066 else
2068 i0 = CONST_DOUBLE_LOW (x);
2069 i1 = CONST_DOUBLE_HIGH (x);
2072 *p0 = i0;
2073 *p1 = i1;
2076 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2077 are willing to load the value into a register via a move pattern.
2078 Normally this is all symbolic constants, integral constants that
2079 take three or fewer instructions, and floating-point zero. */
2081 bool
2082 alpha_legitimate_constant_p (rtx x)
2084 enum machine_mode mode = GET_MODE (x);
2085 HOST_WIDE_INT i0, i1;
2087 switch (GET_CODE (x))
2089 case LABEL_REF:
2090 case HIGH:
2091 return true;
2093 case CONST:
2094 if (GET_CODE (XEXP (x, 0)) == PLUS
2095 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2096 x = XEXP (XEXP (x, 0), 0);
2097 else
2098 return true;
2100 if (GET_CODE (x) != SYMBOL_REF)
2101 return true;
2103 /* FALLTHRU */
2105 case SYMBOL_REF:
2106 /* TLS symbols are never valid. */
2107 return SYMBOL_REF_TLS_MODEL (x) == 0;
2109 case CONST_DOUBLE:
2110 if (x == CONST0_RTX (mode))
2111 return true;
2112 if (FLOAT_MODE_P (mode))
2113 return false;
2114 goto do_integer;
2116 case CONST_VECTOR:
2117 if (x == CONST0_RTX (mode))
2118 return true;
2119 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2120 return false;
2121 if (GET_MODE_SIZE (mode) != 8)
2122 return false;
2123 goto do_integer;
2125 case CONST_INT:
2126 do_integer:
2127 if (TARGET_BUILD_CONSTANTS)
2128 return true;
2129 alpha_extract_integer (x, &i0, &i1);
2130 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2131 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2132 return false;
2134 default:
2135 return false;
2139 /* Operand 1 is known to be a constant, and should require more than one
2140 instruction to load. Emit that multi-part load. */
2142 bool
2143 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2145 HOST_WIDE_INT i0, i1;
2146 rtx temp = NULL_RTX;
2148 alpha_extract_integer (operands[1], &i0, &i1);
2150 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2151 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2153 if (!temp && TARGET_BUILD_CONSTANTS)
2154 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2156 if (temp)
2158 if (!rtx_equal_p (operands[0], temp))
2159 emit_move_insn (operands[0], temp);
2160 return true;
2163 return false;
2166 /* Expand a move instruction; return true if all work is done.
2167 We don't handle non-bwx subword loads here. */
2169 bool
2170 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2172 rtx tmp;
2174 /* If the output is not a register, the input must be. */
2175 if (MEM_P (operands[0])
2176 && ! reg_or_0_operand (operands[1], mode))
2177 operands[1] = force_reg (mode, operands[1]);
2179 /* Allow legitimize_address to perform some simplifications. */
2180 if (mode == Pmode && symbolic_operand (operands[1], mode))
2182 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2183 if (tmp)
2185 if (tmp == operands[0])
2186 return true;
2187 operands[1] = tmp;
2188 return false;
2192 /* Early out for non-constants and valid constants. */
2193 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2194 return false;
2196 /* Split large integers. */
2197 if (CONST_INT_P (operands[1])
2198 || GET_CODE (operands[1]) == CONST_DOUBLE
2199 || GET_CODE (operands[1]) == CONST_VECTOR)
2201 if (alpha_split_const_mov (mode, operands))
2202 return true;
2205 /* Otherwise we've nothing left but to drop the thing to memory. */
2206 tmp = force_const_mem (mode, operands[1]);
2208 if (tmp == NULL_RTX)
2209 return false;
2211 if (reload_in_progress)
2213 emit_move_insn (operands[0], XEXP (tmp, 0));
2214 operands[1] = replace_equiv_address (tmp, operands[0]);
2216 else
2217 operands[1] = validize_mem (tmp);
2218 return false;
2221 /* Expand a non-bwx QImode or HImode move instruction;
2222 return true if all work is done. */
2224 bool
2225 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2227 rtx seq;
2229 /* If the output is not a register, the input must be. */
2230 if (MEM_P (operands[0]))
2231 operands[1] = force_reg (mode, operands[1]);
2233 /* Handle four memory cases, unaligned and aligned for either the input
2234 or the output. The only case where we can be called during reload is
2235 for aligned loads; all other cases require temporaries. */
2237 if (any_memory_operand (operands[1], mode))
2239 if (aligned_memory_operand (operands[1], mode))
2241 if (reload_in_progress)
2243 if (mode == QImode)
2244 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2245 else
2246 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2247 emit_insn (seq);
2249 else
2251 rtx aligned_mem, bitnum;
2252 rtx scratch = gen_reg_rtx (SImode);
2253 rtx subtarget;
2254 bool copyout;
2256 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2258 subtarget = operands[0];
2259 if (REG_P (subtarget))
2260 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2261 else
2262 subtarget = gen_reg_rtx (DImode), copyout = true;
2264 if (mode == QImode)
2265 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2266 bitnum, scratch);
2267 else
2268 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2269 bitnum, scratch);
2270 emit_insn (seq);
2272 if (copyout)
2273 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2276 else
2278 /* Don't pass these as parameters since that makes the generated
2279 code depend on parameter evaluation order which will cause
2280 bootstrap failures. */
2282 rtx temp1, temp2, subtarget, ua;
2283 bool copyout;
2285 temp1 = gen_reg_rtx (DImode);
2286 temp2 = gen_reg_rtx (DImode);
2288 subtarget = operands[0];
2289 if (REG_P (subtarget))
2290 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2291 else
2292 subtarget = gen_reg_rtx (DImode), copyout = true;
2294 ua = get_unaligned_address (operands[1]);
2295 if (mode == QImode)
2296 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2297 else
2298 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2300 alpha_set_memflags (seq, operands[1]);
2301 emit_insn (seq);
2303 if (copyout)
2304 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2306 return true;
2309 if (any_memory_operand (operands[0], mode))
2311 if (aligned_memory_operand (operands[0], mode))
2313 rtx aligned_mem, bitnum;
2314 rtx temp1 = gen_reg_rtx (SImode);
2315 rtx temp2 = gen_reg_rtx (SImode);
2317 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2319 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2320 temp1, temp2));
2322 else
2324 rtx temp1 = gen_reg_rtx (DImode);
2325 rtx temp2 = gen_reg_rtx (DImode);
2326 rtx temp3 = gen_reg_rtx (DImode);
2327 rtx ua = get_unaligned_address (operands[0]);
2329 if (mode == QImode)
2330 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2331 else
2332 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2334 alpha_set_memflags (seq, operands[0]);
2335 emit_insn (seq);
2337 return true;
2340 return false;
2343 /* Implement the movmisalign patterns. One of the operands is a memory
2344 that is not naturally aligned. Emit instructions to load it. */
2346 void
2347 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2349 /* Honor misaligned loads, for those we promised to do so. */
2350 if (MEM_P (operands[1]))
2352 rtx tmp;
2354 if (register_operand (operands[0], mode))
2355 tmp = operands[0];
2356 else
2357 tmp = gen_reg_rtx (mode);
2359 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2360 if (tmp != operands[0])
2361 emit_move_insn (operands[0], tmp);
2363 else if (MEM_P (operands[0]))
2365 if (!reg_or_0_operand (operands[1], mode))
2366 operands[1] = force_reg (mode, operands[1]);
2367 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2369 else
2370 gcc_unreachable ();
2373 /* Generate an unsigned DImode to FP conversion. This is the same code
2374 optabs would emit if we didn't have TFmode patterns.
2376 For SFmode, this is the only construction I've found that can pass
2377 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2378 intermediates will work, because you'll get intermediate rounding
2379 that ruins the end result. Some of this could be fixed by turning
2380 on round-to-positive-infinity, but that requires diddling the fpsr,
2381 which kills performance. I tried turning this around and converting
2382 to a negative number, so that I could turn on /m, but either I did
2383 it wrong or there's something else cause I wound up with the exact
2384 same single-bit error. There is a branch-less form of this same code:
2386 srl $16,1,$1
2387 and $16,1,$2
2388 cmplt $16,0,$3
2389 or $1,$2,$2
2390 cmovge $16,$16,$2
2391 itoft $3,$f10
2392 itoft $2,$f11
2393 cvtqs $f11,$f11
2394 adds $f11,$f11,$f0
2395 fcmoveq $f10,$f11,$f0
2397 I'm not using it because it's the same number of instructions as
2398 this branch-full form, and it has more serialized long latency
2399 instructions on the critical path.
2401 For DFmode, we can avoid rounding errors by breaking up the word
2402 into two pieces, converting them separately, and adding them back:
2404 LC0: .long 0,0x5f800000
2406 itoft $16,$f11
2407 lda $2,LC0
2408 cmplt $16,0,$1
2409 cpyse $f11,$f31,$f10
2410 cpyse $f31,$f11,$f11
2411 s4addq $1,$2,$1
2412 lds $f12,0($1)
2413 cvtqt $f10,$f10
2414 cvtqt $f11,$f11
2415 addt $f12,$f10,$f0
2416 addt $f0,$f11,$f0
2418 This doesn't seem to be a clear-cut win over the optabs form.
2419 It probably all depends on the distribution of numbers being
2420 converted -- in the optabs form, all but high-bit-set has a
2421 much lower minimum execution time. */
2423 void
2424 alpha_emit_floatuns (rtx operands[2])
2426 rtx neglab, donelab, i0, i1, f0, in, out;
2427 enum machine_mode mode;
2429 out = operands[0];
2430 in = force_reg (DImode, operands[1]);
2431 mode = GET_MODE (out);
2432 neglab = gen_label_rtx ();
2433 donelab = gen_label_rtx ();
2434 i0 = gen_reg_rtx (DImode);
2435 i1 = gen_reg_rtx (DImode);
2436 f0 = gen_reg_rtx (mode);
2438 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2440 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2441 emit_jump_insn (gen_jump (donelab));
2442 emit_barrier ();
2444 emit_label (neglab);
2446 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2447 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2448 emit_insn (gen_iordi3 (i0, i0, i1));
2449 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2450 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2452 emit_label (donelab);
2455 /* Generate the comparison for a conditional branch. */
2457 void
2458 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2460 enum rtx_code cmp_code, branch_code;
2461 enum machine_mode branch_mode = VOIDmode;
2462 enum rtx_code code = GET_CODE (operands[0]);
2463 rtx op0 = operands[1], op1 = operands[2];
2464 rtx tem;
2466 if (cmp_mode == TFmode)
2468 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2469 op1 = const0_rtx;
2470 cmp_mode = DImode;
2473 /* The general case: fold the comparison code to the types of compares
2474 that we have, choosing the branch as necessary. */
2475 switch (code)
2477 case EQ: case LE: case LT: case LEU: case LTU:
2478 case UNORDERED:
2479 /* We have these compares: */
2480 cmp_code = code, branch_code = NE;
2481 break;
2483 case NE:
2484 case ORDERED:
2485 /* These must be reversed. */
2486 cmp_code = reverse_condition (code), branch_code = EQ;
2487 break;
2489 case GE: case GT: case GEU: case GTU:
2490 /* For FP, we swap them, for INT, we reverse them. */
2491 if (cmp_mode == DFmode)
2493 cmp_code = swap_condition (code);
2494 branch_code = NE;
2495 tem = op0, op0 = op1, op1 = tem;
2497 else
2499 cmp_code = reverse_condition (code);
2500 branch_code = EQ;
2502 break;
2504 default:
2505 gcc_unreachable ();
2508 if (cmp_mode == DFmode)
2510 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2512 /* When we are not as concerned about non-finite values, and we
2513 are comparing against zero, we can branch directly. */
2514 if (op1 == CONST0_RTX (DFmode))
2515 cmp_code = UNKNOWN, branch_code = code;
2516 else if (op0 == CONST0_RTX (DFmode))
2518 /* Undo the swap we probably did just above. */
2519 tem = op0, op0 = op1, op1 = tem;
2520 branch_code = swap_condition (cmp_code);
2521 cmp_code = UNKNOWN;
2524 else
2526 /* ??? We mark the branch mode to be CCmode to prevent the
2527 compare and branch from being combined, since the compare
2528 insn follows IEEE rules that the branch does not. */
2529 branch_mode = CCmode;
2532 else
2534 /* The following optimizations are only for signed compares. */
2535 if (code != LEU && code != LTU && code != GEU && code != GTU)
2537 /* Whee. Compare and branch against 0 directly. */
2538 if (op1 == const0_rtx)
2539 cmp_code = UNKNOWN, branch_code = code;
2541 /* If the constants doesn't fit into an immediate, but can
2542 be generated by lda/ldah, we adjust the argument and
2543 compare against zero, so we can use beq/bne directly. */
2544 /* ??? Don't do this when comparing against symbols, otherwise
2545 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2546 be declared false out of hand (at least for non-weak). */
2547 else if (CONST_INT_P (op1)
2548 && (code == EQ || code == NE)
2549 && !(symbolic_operand (op0, VOIDmode)
2550 || (REG_P (op0) && REG_POINTER (op0))))
2552 rtx n_op1 = GEN_INT (-INTVAL (op1));
2554 if (! satisfies_constraint_I (op1)
2555 && (satisfies_constraint_K (n_op1)
2556 || satisfies_constraint_L (n_op1)))
2557 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2561 if (!reg_or_0_operand (op0, DImode))
2562 op0 = force_reg (DImode, op0);
2563 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2564 op1 = force_reg (DImode, op1);
2567 /* Emit an initial compare instruction, if necessary. */
2568 tem = op0;
2569 if (cmp_code != UNKNOWN)
2571 tem = gen_reg_rtx (cmp_mode);
2572 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2575 /* Emit the branch instruction. */
2576 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2577 gen_rtx_IF_THEN_ELSE (VOIDmode,
2578 gen_rtx_fmt_ee (branch_code,
2579 branch_mode, tem,
2580 CONST0_RTX (cmp_mode)),
2581 gen_rtx_LABEL_REF (VOIDmode,
2582 operands[3]),
2583 pc_rtx));
2584 emit_jump_insn (tem);
2587 /* Certain simplifications can be done to make invalid setcc operations
2588 valid. Return the final comparison, or NULL if we can't work. */
2590 bool
2591 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2593 enum rtx_code cmp_code;
2594 enum rtx_code code = GET_CODE (operands[1]);
2595 rtx op0 = operands[2], op1 = operands[3];
2596 rtx tmp;
2598 if (cmp_mode == TFmode)
2600 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2601 op1 = const0_rtx;
2602 cmp_mode = DImode;
2605 if (cmp_mode == DFmode && !TARGET_FIX)
2606 return 0;
2608 /* The general case: fold the comparison code to the types of compares
2609 that we have, choosing the branch as necessary. */
2611 cmp_code = UNKNOWN;
2612 switch (code)
2614 case EQ: case LE: case LT: case LEU: case LTU:
2615 case UNORDERED:
2616 /* We have these compares. */
2617 if (cmp_mode == DFmode)
2618 cmp_code = code, code = NE;
2619 break;
2621 case NE:
2622 if (cmp_mode == DImode && op1 == const0_rtx)
2623 break;
2624 /* FALLTHRU */
2626 case ORDERED:
2627 cmp_code = reverse_condition (code);
2628 code = EQ;
2629 break;
2631 case GE: case GT: case GEU: case GTU:
2632 /* These normally need swapping, but for integer zero we have
2633 special patterns that recognize swapped operands. */
2634 if (cmp_mode == DImode && op1 == const0_rtx)
2635 break;
2636 code = swap_condition (code);
2637 if (cmp_mode == DFmode)
2638 cmp_code = code, code = NE;
2639 tmp = op0, op0 = op1, op1 = tmp;
2640 break;
2642 default:
2643 gcc_unreachable ();
2646 if (cmp_mode == DImode)
2648 if (!register_operand (op0, DImode))
2649 op0 = force_reg (DImode, op0);
2650 if (!reg_or_8bit_operand (op1, DImode))
2651 op1 = force_reg (DImode, op1);
2654 /* Emit an initial compare instruction, if necessary. */
2655 if (cmp_code != UNKNOWN)
2657 tmp = gen_reg_rtx (cmp_mode);
2658 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2659 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2661 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2662 op1 = const0_rtx;
2665 /* Emit the setcc instruction. */
2666 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2667 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2668 return true;
2672 /* Rewrite a comparison against zero CMP of the form
2673 (CODE (cc0) (const_int 0)) so it can be written validly in
2674 a conditional move (if_then_else CMP ...).
2675 If both of the operands that set cc0 are nonzero we must emit
2676 an insn to perform the compare (it can't be done within
2677 the conditional move). */
2680 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2682 enum rtx_code code = GET_CODE (cmp);
2683 enum rtx_code cmov_code = NE;
2684 rtx op0 = XEXP (cmp, 0);
2685 rtx op1 = XEXP (cmp, 1);
2686 enum machine_mode cmp_mode
2687 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2688 enum machine_mode cmov_mode = VOIDmode;
2689 int local_fast_math = flag_unsafe_math_optimizations;
2690 rtx tem;
2692 if (cmp_mode == TFmode)
2694 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2695 op1 = const0_rtx;
2696 cmp_mode = DImode;
2699 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2701 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2703 enum rtx_code cmp_code;
2705 if (! TARGET_FIX)
2706 return 0;
2708 /* If we have fp<->int register move instructions, do a cmov by
2709 performing the comparison in fp registers, and move the
2710 zero/nonzero value to integer registers, where we can then
2711 use a normal cmov, or vice-versa. */
2713 switch (code)
2715 case EQ: case LE: case LT: case LEU: case LTU:
2716 /* We have these compares. */
2717 cmp_code = code, code = NE;
2718 break;
2720 case NE:
2721 /* This must be reversed. */
2722 cmp_code = EQ, code = EQ;
2723 break;
2725 case GE: case GT: case GEU: case GTU:
2726 /* These normally need swapping, but for integer zero we have
2727 special patterns that recognize swapped operands. */
2728 if (cmp_mode == DImode && op1 == const0_rtx)
2729 cmp_code = code, code = NE;
2730 else
2732 cmp_code = swap_condition (code);
2733 code = NE;
2734 tem = op0, op0 = op1, op1 = tem;
2736 break;
2738 default:
2739 gcc_unreachable ();
2742 tem = gen_reg_rtx (cmp_mode);
2743 emit_insn (gen_rtx_SET (VOIDmode, tem,
2744 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2745 op0, op1)));
2747 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2748 op0 = gen_lowpart (cmp_mode, tem);
2749 op1 = CONST0_RTX (cmp_mode);
2750 local_fast_math = 1;
2753 /* We may be able to use a conditional move directly.
2754 This avoids emitting spurious compares. */
2755 if (signed_comparison_operator (cmp, VOIDmode)
2756 && (cmp_mode == DImode || local_fast_math)
2757 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2758 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2760 /* We can't put the comparison inside the conditional move;
2761 emit a compare instruction and put that inside the
2762 conditional move. Make sure we emit only comparisons we have;
2763 swap or reverse as necessary. */
2765 if (!can_create_pseudo_p ())
2766 return NULL_RTX;
2768 switch (code)
2770 case EQ: case LE: case LT: case LEU: case LTU:
2771 /* We have these compares: */
2772 break;
2774 case NE:
2775 /* This must be reversed. */
2776 code = reverse_condition (code);
2777 cmov_code = EQ;
2778 break;
2780 case GE: case GT: case GEU: case GTU:
2781 /* These must be swapped. */
2782 if (op1 != CONST0_RTX (cmp_mode))
2784 code = swap_condition (code);
2785 tem = op0, op0 = op1, op1 = tem;
2787 break;
2789 default:
2790 gcc_unreachable ();
2793 if (cmp_mode == DImode)
2795 if (!reg_or_0_operand (op0, DImode))
2796 op0 = force_reg (DImode, op0);
2797 if (!reg_or_8bit_operand (op1, DImode))
2798 op1 = force_reg (DImode, op1);
2801 /* ??? We mark the branch mode to be CCmode to prevent the compare
2802 and cmov from being combined, since the compare insn follows IEEE
2803 rules that the cmov does not. */
2804 if (cmp_mode == DFmode && !local_fast_math)
2805 cmov_mode = CCmode;
2807 tem = gen_reg_rtx (cmp_mode);
2808 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2809 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2812 /* Simplify a conditional move of two constants into a setcc with
2813 arithmetic. This is done with a splitter since combine would
2814 just undo the work if done during code generation. It also catches
2815 cases we wouldn't have before cse. */
2818 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2819 rtx t_rtx, rtx f_rtx)
2821 HOST_WIDE_INT t, f, diff;
2822 enum machine_mode mode;
2823 rtx target, subtarget, tmp;
2825 mode = GET_MODE (dest);
2826 t = INTVAL (t_rtx);
2827 f = INTVAL (f_rtx);
2828 diff = t - f;
2830 if (((code == NE || code == EQ) && diff < 0)
2831 || (code == GE || code == GT))
2833 code = reverse_condition (code);
2834 diff = t, t = f, f = diff;
2835 diff = t - f;
2838 subtarget = target = dest;
2839 if (mode != DImode)
2841 target = gen_lowpart (DImode, dest);
2842 if (can_create_pseudo_p ())
2843 subtarget = gen_reg_rtx (DImode);
2844 else
2845 subtarget = target;
2847 /* Below, we must be careful to use copy_rtx on target and subtarget
2848 in intermediate insns, as they may be a subreg rtx, which may not
2849 be shared. */
2851 if (f == 0 && exact_log2 (diff) > 0
2852 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2853 viable over a longer latency cmove. On EV5, the E0 slot is a
2854 scarce resource, and on EV4 shift has the same latency as a cmove. */
2855 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2857 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2858 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2860 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2861 GEN_INT (exact_log2 (t)));
2862 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2864 else if (f == 0 && t == -1)
2866 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2867 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2869 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2871 else if (diff == 1 || diff == 4 || diff == 8)
2873 rtx add_op;
2875 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2876 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2878 if (diff == 1)
2879 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2880 else
2882 add_op = GEN_INT (f);
2883 if (sext_add_operand (add_op, mode))
2885 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2886 GEN_INT (diff));
2887 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2888 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2890 else
2891 return 0;
2894 else
2895 return 0;
2897 return 1;
2900 /* Look up the function X_floating library function name for the
2901 given operation. */
2903 struct GTY(()) xfloating_op
2905 const enum rtx_code code;
2906 const char *const GTY((skip)) osf_func;
2907 const char *const GTY((skip)) vms_func;
2908 rtx libcall;
2911 static GTY(()) struct xfloating_op xfloating_ops[] =
2913 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2914 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2915 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2916 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2917 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2918 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2919 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2920 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2921 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2922 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2923 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2924 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2925 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2926 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2927 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2930 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2932 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2933 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2936 static rtx
2937 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2939 struct xfloating_op *ops = xfloating_ops;
2940 long n = ARRAY_SIZE (xfloating_ops);
2941 long i;
2943 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2945 /* How irritating. Nothing to key off for the main table. */
2946 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2948 ops = vax_cvt_ops;
2949 n = ARRAY_SIZE (vax_cvt_ops);
2952 for (i = 0; i < n; ++i, ++ops)
2953 if (ops->code == code)
2955 rtx func = ops->libcall;
2956 if (!func)
2958 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2959 ? ops->vms_func : ops->osf_func);
2960 ops->libcall = func;
2962 return func;
2965 gcc_unreachable ();
2968 /* Most X_floating operations take the rounding mode as an argument.
2969 Compute that here. */
2971 static int
2972 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2973 enum alpha_fp_rounding_mode round)
2975 int mode;
2977 switch (round)
2979 case ALPHA_FPRM_NORM:
2980 mode = 2;
2981 break;
2982 case ALPHA_FPRM_MINF:
2983 mode = 1;
2984 break;
2985 case ALPHA_FPRM_CHOP:
2986 mode = 0;
2987 break;
2988 case ALPHA_FPRM_DYN:
2989 mode = 4;
2990 break;
2991 default:
2992 gcc_unreachable ();
2994 /* XXX For reference, round to +inf is mode = 3. */
2997 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2998 mode |= 0x10000;
3000 return mode;
3003 /* Emit an X_floating library function call.
3005 Note that these functions do not follow normal calling conventions:
3006 TFmode arguments are passed in two integer registers (as opposed to
3007 indirect); TFmode return values appear in R16+R17.
3009 FUNC is the function to call.
3010 TARGET is where the output belongs.
3011 OPERANDS are the inputs.
3012 NOPERANDS is the count of inputs.
3013 EQUIV is the expression equivalent for the function.
3016 static void
3017 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3018 int noperands, rtx equiv)
3020 rtx usage = NULL_RTX, tmp, reg;
3021 int regno = 16, i;
3023 start_sequence ();
3025 for (i = 0; i < noperands; ++i)
3027 switch (GET_MODE (operands[i]))
3029 case TFmode:
3030 reg = gen_rtx_REG (TFmode, regno);
3031 regno += 2;
3032 break;
3034 case DFmode:
3035 reg = gen_rtx_REG (DFmode, regno + 32);
3036 regno += 1;
3037 break;
3039 case VOIDmode:
3040 gcc_assert (CONST_INT_P (operands[i]));
3041 /* FALLTHRU */
3042 case DImode:
3043 reg = gen_rtx_REG (DImode, regno);
3044 regno += 1;
3045 break;
3047 default:
3048 gcc_unreachable ();
3051 emit_move_insn (reg, operands[i]);
3052 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3055 switch (GET_MODE (target))
3057 case TFmode:
3058 reg = gen_rtx_REG (TFmode, 16);
3059 break;
3060 case DFmode:
3061 reg = gen_rtx_REG (DFmode, 32);
3062 break;
3063 case DImode:
3064 reg = gen_rtx_REG (DImode, 0);
3065 break;
3066 default:
3067 gcc_unreachable ();
3070 tmp = gen_rtx_MEM (QImode, func);
3071 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3072 const0_rtx, const0_rtx));
3073 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3074 RTL_CONST_CALL_P (tmp) = 1;
3076 tmp = get_insns ();
3077 end_sequence ();
3079 emit_libcall_block (tmp, target, reg, equiv);
3082 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3084 void
3085 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3087 rtx func;
3088 int mode;
3089 rtx out_operands[3];
3091 func = alpha_lookup_xfloating_lib_func (code);
3092 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3094 out_operands[0] = operands[1];
3095 out_operands[1] = operands[2];
3096 out_operands[2] = GEN_INT (mode);
3097 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3098 gen_rtx_fmt_ee (code, TFmode, operands[1],
3099 operands[2]));
3102 /* Emit an X_floating library function call for a comparison. */
3104 static rtx
3105 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3107 enum rtx_code cmp_code, res_code;
3108 rtx func, out, operands[2], note;
3110 /* X_floating library comparison functions return
3111 -1 unordered
3112 0 false
3113 1 true
3114 Convert the compare against the raw return value. */
3116 cmp_code = *pcode;
3117 switch (cmp_code)
3119 case UNORDERED:
3120 cmp_code = EQ;
3121 res_code = LT;
3122 break;
3123 case ORDERED:
3124 cmp_code = EQ;
3125 res_code = GE;
3126 break;
3127 case NE:
3128 res_code = NE;
3129 break;
3130 case EQ:
3131 case LT:
3132 case GT:
3133 case LE:
3134 case GE:
3135 res_code = GT;
3136 break;
3137 default:
3138 gcc_unreachable ();
3140 *pcode = res_code;
3142 func = alpha_lookup_xfloating_lib_func (cmp_code);
3144 operands[0] = op0;
3145 operands[1] = op1;
3146 out = gen_reg_rtx (DImode);
3148 /* What's actually returned is -1,0,1, not a proper boolean value,
3149 so use an EXPR_LIST as with a generic libcall instead of a
3150 comparison type expression. */
3151 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3152 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3153 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3154 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3156 return out;
3159 /* Emit an X_floating library function call for a conversion. */
3161 void
3162 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3164 int noperands = 1, mode;
3165 rtx out_operands[2];
3166 rtx func;
3167 enum rtx_code code = orig_code;
3169 if (code == UNSIGNED_FIX)
3170 code = FIX;
3172 func = alpha_lookup_xfloating_lib_func (code);
3174 out_operands[0] = operands[1];
3176 switch (code)
3178 case FIX:
3179 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3180 out_operands[1] = GEN_INT (mode);
3181 noperands = 2;
3182 break;
3183 case FLOAT_TRUNCATE:
3184 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3185 out_operands[1] = GEN_INT (mode);
3186 noperands = 2;
3187 break;
3188 default:
3189 break;
3192 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3193 gen_rtx_fmt_e (orig_code,
3194 GET_MODE (operands[0]),
3195 operands[1]));
3198 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3199 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3200 guarantee that the sequence
3201 set (OP[0] OP[2])
3202 set (OP[1] OP[3])
3203 is valid. Naturally, output operand ordering is little-endian.
3204 This is used by *movtf_internal and *movti_internal. */
3206 void
3207 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3208 bool fixup_overlap)
3210 switch (GET_CODE (operands[1]))
3212 case REG:
3213 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3214 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3215 break;
3217 case MEM:
3218 operands[3] = adjust_address (operands[1], DImode, 8);
3219 operands[2] = adjust_address (operands[1], DImode, 0);
3220 break;
3222 case CONST_INT:
3223 case CONST_DOUBLE:
3224 gcc_assert (operands[1] == CONST0_RTX (mode));
3225 operands[2] = operands[3] = const0_rtx;
3226 break;
3228 default:
3229 gcc_unreachable ();
3232 switch (GET_CODE (operands[0]))
3234 case REG:
3235 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3236 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3237 break;
3239 case MEM:
3240 operands[1] = adjust_address (operands[0], DImode, 8);
3241 operands[0] = adjust_address (operands[0], DImode, 0);
3242 break;
3244 default:
3245 gcc_unreachable ();
3248 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3250 rtx tmp;
3251 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3252 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3256 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3257 op2 is a register containing the sign bit, operation is the
3258 logical operation to be performed. */
3260 void
3261 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3263 rtx high_bit = operands[2];
3264 rtx scratch;
3265 int move;
3267 alpha_split_tmode_pair (operands, TFmode, false);
3269 /* Detect three flavors of operand overlap. */
3270 move = 1;
3271 if (rtx_equal_p (operands[0], operands[2]))
3272 move = 0;
3273 else if (rtx_equal_p (operands[1], operands[2]))
3275 if (rtx_equal_p (operands[0], high_bit))
3276 move = 2;
3277 else
3278 move = -1;
3281 if (move < 0)
3282 emit_move_insn (operands[0], operands[2]);
3284 /* ??? If the destination overlaps both source tf and high_bit, then
3285 assume source tf is dead in its entirety and use the other half
3286 for a scratch register. Otherwise "scratch" is just the proper
3287 destination register. */
3288 scratch = operands[move < 2 ? 1 : 3];
3290 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3292 if (move > 0)
3294 emit_move_insn (operands[0], operands[2]);
3295 if (move > 1)
3296 emit_move_insn (operands[1], scratch);
3300 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3301 unaligned data:
3303 unsigned: signed:
3304 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3305 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3306 lda r3,X(r11) lda r3,X+2(r11)
3307 extwl r1,r3,r1 extql r1,r3,r1
3308 extwh r2,r3,r2 extqh r2,r3,r2
3309 or r1.r2.r1 or r1,r2,r1
3310 sra r1,48,r1
3312 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3313 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3314 lda r3,X(r11) lda r3,X(r11)
3315 extll r1,r3,r1 extll r1,r3,r1
3316 extlh r2,r3,r2 extlh r2,r3,r2
3317 or r1.r2.r1 addl r1,r2,r1
3319 quad: ldq_u r1,X(r11)
3320 ldq_u r2,X+7(r11)
3321 lda r3,X(r11)
3322 extql r1,r3,r1
3323 extqh r2,r3,r2
3324 or r1.r2.r1
3327 void
3328 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3329 HOST_WIDE_INT ofs, int sign)
3331 rtx meml, memh, addr, extl, exth, tmp, mema;
3332 enum machine_mode mode;
3334 if (TARGET_BWX && size == 2)
3336 meml = adjust_address (mem, QImode, ofs);
3337 memh = adjust_address (mem, QImode, ofs+1);
3338 if (BYTES_BIG_ENDIAN)
3339 tmp = meml, meml = memh, memh = tmp;
3340 extl = gen_reg_rtx (DImode);
3341 exth = gen_reg_rtx (DImode);
3342 emit_insn (gen_zero_extendqidi2 (extl, meml));
3343 emit_insn (gen_zero_extendqidi2 (exth, memh));
3344 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3345 NULL, 1, OPTAB_LIB_WIDEN);
3346 addr = expand_simple_binop (DImode, IOR, extl, exth,
3347 NULL, 1, OPTAB_LIB_WIDEN);
3349 if (sign && GET_MODE (tgt) != HImode)
3351 addr = gen_lowpart (HImode, addr);
3352 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3354 else
3356 if (GET_MODE (tgt) != DImode)
3357 addr = gen_lowpart (GET_MODE (tgt), addr);
3358 emit_move_insn (tgt, addr);
3360 return;
3363 meml = gen_reg_rtx (DImode);
3364 memh = gen_reg_rtx (DImode);
3365 addr = gen_reg_rtx (DImode);
3366 extl = gen_reg_rtx (DImode);
3367 exth = gen_reg_rtx (DImode);
3369 mema = XEXP (mem, 0);
3370 if (GET_CODE (mema) == LO_SUM)
3371 mema = force_reg (Pmode, mema);
3373 /* AND addresses cannot be in any alias set, since they may implicitly
3374 alias surrounding code. Ideally we'd have some alias set that
3375 covered all types except those with alignment 8 or higher. */
3377 tmp = change_address (mem, DImode,
3378 gen_rtx_AND (DImode,
3379 plus_constant (mema, ofs),
3380 GEN_INT (-8)));
3381 set_mem_alias_set (tmp, 0);
3382 emit_move_insn (meml, tmp);
3384 tmp = change_address (mem, DImode,
3385 gen_rtx_AND (DImode,
3386 plus_constant (mema, ofs + size - 1),
3387 GEN_INT (-8)));
3388 set_mem_alias_set (tmp, 0);
3389 emit_move_insn (memh, tmp);
3391 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3393 emit_move_insn (addr, plus_constant (mema, -1));
3395 emit_insn (gen_extqh_be (extl, meml, addr));
3396 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3398 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3399 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3400 addr, 1, OPTAB_WIDEN);
3402 else if (sign && size == 2)
3404 emit_move_insn (addr, plus_constant (mema, ofs+2));
3406 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3407 emit_insn (gen_extqh_le (exth, memh, addr));
3409 /* We must use tgt here for the target. Alpha-vms port fails if we use
3410 addr for the target, because addr is marked as a pointer and combine
3411 knows that pointers are always sign-extended 32-bit values. */
3412 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3413 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3414 addr, 1, OPTAB_WIDEN);
3416 else
3418 if (WORDS_BIG_ENDIAN)
3420 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3421 switch ((int) size)
3423 case 2:
3424 emit_insn (gen_extwh_be (extl, meml, addr));
3425 mode = HImode;
3426 break;
3428 case 4:
3429 emit_insn (gen_extlh_be (extl, meml, addr));
3430 mode = SImode;
3431 break;
3433 case 8:
3434 emit_insn (gen_extqh_be (extl, meml, addr));
3435 mode = DImode;
3436 break;
3438 default:
3439 gcc_unreachable ();
3441 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3443 else
3445 emit_move_insn (addr, plus_constant (mema, ofs));
3446 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3447 switch ((int) size)
3449 case 2:
3450 emit_insn (gen_extwh_le (exth, memh, addr));
3451 mode = HImode;
3452 break;
3454 case 4:
3455 emit_insn (gen_extlh_le (exth, memh, addr));
3456 mode = SImode;
3457 break;
3459 case 8:
3460 emit_insn (gen_extqh_le (exth, memh, addr));
3461 mode = DImode;
3462 break;
3464 default:
3465 gcc_unreachable ();
3469 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3470 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3471 sign, OPTAB_WIDEN);
3474 if (addr != tgt)
3475 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3478 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3480 void
3481 alpha_expand_unaligned_store (rtx dst, rtx src,
3482 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3484 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3486 if (TARGET_BWX && size == 2)
3488 if (src != const0_rtx)
3490 dstl = gen_lowpart (QImode, src);
3491 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3492 NULL, 1, OPTAB_LIB_WIDEN);
3493 dsth = gen_lowpart (QImode, dsth);
3495 else
3496 dstl = dsth = const0_rtx;
3498 meml = adjust_address (dst, QImode, ofs);
3499 memh = adjust_address (dst, QImode, ofs+1);
3500 if (BYTES_BIG_ENDIAN)
3501 addr = meml, meml = memh, memh = addr;
3503 emit_move_insn (meml, dstl);
3504 emit_move_insn (memh, dsth);
3505 return;
3508 dstl = gen_reg_rtx (DImode);
3509 dsth = gen_reg_rtx (DImode);
3510 insl = gen_reg_rtx (DImode);
3511 insh = gen_reg_rtx (DImode);
3513 dsta = XEXP (dst, 0);
3514 if (GET_CODE (dsta) == LO_SUM)
3515 dsta = force_reg (Pmode, dsta);
3517 /* AND addresses cannot be in any alias set, since they may implicitly
3518 alias surrounding code. Ideally we'd have some alias set that
3519 covered all types except those with alignment 8 or higher. */
3521 meml = change_address (dst, DImode,
3522 gen_rtx_AND (DImode,
3523 plus_constant (dsta, ofs),
3524 GEN_INT (-8)));
3525 set_mem_alias_set (meml, 0);
3527 memh = change_address (dst, DImode,
3528 gen_rtx_AND (DImode,
3529 plus_constant (dsta, ofs + size - 1),
3530 GEN_INT (-8)));
3531 set_mem_alias_set (memh, 0);
3533 emit_move_insn (dsth, memh);
3534 emit_move_insn (dstl, meml);
3535 if (WORDS_BIG_ENDIAN)
3537 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3539 if (src != const0_rtx)
3541 switch ((int) size)
3543 case 2:
3544 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3545 break;
3546 case 4:
3547 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3548 break;
3549 case 8:
3550 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3551 break;
3553 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3554 GEN_INT (size*8), addr));
3557 switch ((int) size)
3559 case 2:
3560 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3561 break;
3562 case 4:
3564 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3565 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3566 break;
3568 case 8:
3569 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3570 break;
3573 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3575 else
3577 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3579 if (src != CONST0_RTX (GET_MODE (src)))
3581 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3582 GEN_INT (size*8), addr));
3584 switch ((int) size)
3586 case 2:
3587 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3588 break;
3589 case 4:
3590 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3591 break;
3592 case 8:
3593 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3594 break;
3598 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3600 switch ((int) size)
3602 case 2:
3603 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3604 break;
3605 case 4:
3607 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3608 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3609 break;
3611 case 8:
3612 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3613 break;
3617 if (src != CONST0_RTX (GET_MODE (src)))
3619 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3620 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3623 if (WORDS_BIG_ENDIAN)
3625 emit_move_insn (meml, dstl);
3626 emit_move_insn (memh, dsth);
3628 else
3630 /* Must store high before low for degenerate case of aligned. */
3631 emit_move_insn (memh, dsth);
3632 emit_move_insn (meml, dstl);
3636 /* The block move code tries to maximize speed by separating loads and
3637 stores at the expense of register pressure: we load all of the data
3638 before we store it back out. There are two secondary effects worth
3639 mentioning, that this speeds copying to/from aligned and unaligned
3640 buffers, and that it makes the code significantly easier to write. */
3642 #define MAX_MOVE_WORDS 8
3644 /* Load an integral number of consecutive unaligned quadwords. */
3646 static void
3647 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3648 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3650 rtx const im8 = GEN_INT (-8);
3651 rtx const i64 = GEN_INT (64);
3652 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3653 rtx sreg, areg, tmp, smema;
3654 HOST_WIDE_INT i;
3656 smema = XEXP (smem, 0);
3657 if (GET_CODE (smema) == LO_SUM)
3658 smema = force_reg (Pmode, smema);
3660 /* Generate all the tmp registers we need. */
3661 for (i = 0; i < words; ++i)
3663 data_regs[i] = out_regs[i];
3664 ext_tmps[i] = gen_reg_rtx (DImode);
3666 data_regs[words] = gen_reg_rtx (DImode);
3668 if (ofs != 0)
3669 smem = adjust_address (smem, GET_MODE (smem), ofs);
3671 /* Load up all of the source data. */
3672 for (i = 0; i < words; ++i)
3674 tmp = change_address (smem, DImode,
3675 gen_rtx_AND (DImode,
3676 plus_constant (smema, 8*i),
3677 im8));
3678 set_mem_alias_set (tmp, 0);
3679 emit_move_insn (data_regs[i], tmp);
3682 tmp = change_address (smem, DImode,
3683 gen_rtx_AND (DImode,
3684 plus_constant (smema, 8*words - 1),
3685 im8));
3686 set_mem_alias_set (tmp, 0);
3687 emit_move_insn (data_regs[words], tmp);
3689 /* Extract the half-word fragments. Unfortunately DEC decided to make
3690 extxh with offset zero a noop instead of zeroing the register, so
3691 we must take care of that edge condition ourselves with cmov. */
3693 sreg = copy_addr_to_reg (smema);
3694 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3695 1, OPTAB_WIDEN);
3696 if (WORDS_BIG_ENDIAN)
3697 emit_move_insn (sreg, plus_constant (sreg, 7));
3698 for (i = 0; i < words; ++i)
3700 if (WORDS_BIG_ENDIAN)
3702 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3703 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3705 else
3707 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3708 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3710 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3711 gen_rtx_IF_THEN_ELSE (DImode,
3712 gen_rtx_EQ (DImode, areg,
3713 const0_rtx),
3714 const0_rtx, ext_tmps[i])));
3717 /* Merge the half-words into whole words. */
3718 for (i = 0; i < words; ++i)
3720 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3721 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3725 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3726 may be NULL to store zeros. */
3728 static void
3729 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3730 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3732 rtx const im8 = GEN_INT (-8);
3733 rtx const i64 = GEN_INT (64);
3734 rtx ins_tmps[MAX_MOVE_WORDS];
3735 rtx st_tmp_1, st_tmp_2, dreg;
3736 rtx st_addr_1, st_addr_2, dmema;
3737 HOST_WIDE_INT i;
3739 dmema = XEXP (dmem, 0);
3740 if (GET_CODE (dmema) == LO_SUM)
3741 dmema = force_reg (Pmode, dmema);
3743 /* Generate all the tmp registers we need. */
3744 if (data_regs != NULL)
3745 for (i = 0; i < words; ++i)
3746 ins_tmps[i] = gen_reg_rtx(DImode);
3747 st_tmp_1 = gen_reg_rtx(DImode);
3748 st_tmp_2 = gen_reg_rtx(DImode);
3750 if (ofs != 0)
3751 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3753 st_addr_2 = change_address (dmem, DImode,
3754 gen_rtx_AND (DImode,
3755 plus_constant (dmema, words*8 - 1),
3756 im8));
3757 set_mem_alias_set (st_addr_2, 0);
3759 st_addr_1 = change_address (dmem, DImode,
3760 gen_rtx_AND (DImode, dmema, im8));
3761 set_mem_alias_set (st_addr_1, 0);
3763 /* Load up the destination end bits. */
3764 emit_move_insn (st_tmp_2, st_addr_2);
3765 emit_move_insn (st_tmp_1, st_addr_1);
3767 /* Shift the input data into place. */
3768 dreg = copy_addr_to_reg (dmema);
3769 if (WORDS_BIG_ENDIAN)
3770 emit_move_insn (dreg, plus_constant (dreg, 7));
3771 if (data_regs != NULL)
3773 for (i = words-1; i >= 0; --i)
3775 if (WORDS_BIG_ENDIAN)
3777 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3778 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3780 else
3782 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3783 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3786 for (i = words-1; i > 0; --i)
3788 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3789 ins_tmps[i-1], ins_tmps[i-1], 1,
3790 OPTAB_WIDEN);
3794 /* Split and merge the ends with the destination data. */
3795 if (WORDS_BIG_ENDIAN)
3797 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3798 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3800 else
3802 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3803 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3806 if (data_regs != NULL)
3808 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3809 st_tmp_2, 1, OPTAB_WIDEN);
3810 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3811 st_tmp_1, 1, OPTAB_WIDEN);
3814 /* Store it all. */
3815 if (WORDS_BIG_ENDIAN)
3816 emit_move_insn (st_addr_1, st_tmp_1);
3817 else
3818 emit_move_insn (st_addr_2, st_tmp_2);
3819 for (i = words-1; i > 0; --i)
3821 rtx tmp = change_address (dmem, DImode,
3822 gen_rtx_AND (DImode,
3823 plus_constant(dmema,
3824 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3825 im8));
3826 set_mem_alias_set (tmp, 0);
3827 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3829 if (WORDS_BIG_ENDIAN)
3830 emit_move_insn (st_addr_2, st_tmp_2);
3831 else
3832 emit_move_insn (st_addr_1, st_tmp_1);
3836 /* Expand string/block move operations.
3838 operands[0] is the pointer to the destination.
3839 operands[1] is the pointer to the source.
3840 operands[2] is the number of bytes to move.
3841 operands[3] is the alignment. */
3844 alpha_expand_block_move (rtx operands[])
3846 rtx bytes_rtx = operands[2];
3847 rtx align_rtx = operands[3];
3848 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3849 HOST_WIDE_INT bytes = orig_bytes;
3850 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3851 HOST_WIDE_INT dst_align = src_align;
3852 rtx orig_src = operands[1];
3853 rtx orig_dst = operands[0];
3854 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3855 rtx tmp;
3856 unsigned int i, words, ofs, nregs = 0;
3858 if (orig_bytes <= 0)
3859 return 1;
3860 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3861 return 0;
3863 /* Look for additional alignment information from recorded register info. */
3865 tmp = XEXP (orig_src, 0);
3866 if (REG_P (tmp))
3867 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3868 else if (GET_CODE (tmp) == PLUS
3869 && REG_P (XEXP (tmp, 0))
3870 && CONST_INT_P (XEXP (tmp, 1)))
3872 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3873 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3875 if (a > src_align)
3877 if (a >= 64 && c % 8 == 0)
3878 src_align = 64;
3879 else if (a >= 32 && c % 4 == 0)
3880 src_align = 32;
3881 else if (a >= 16 && c % 2 == 0)
3882 src_align = 16;
3886 tmp = XEXP (orig_dst, 0);
3887 if (REG_P (tmp))
3888 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3889 else if (GET_CODE (tmp) == PLUS
3890 && REG_P (XEXP (tmp, 0))
3891 && CONST_INT_P (XEXP (tmp, 1)))
3893 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3894 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3896 if (a > dst_align)
3898 if (a >= 64 && c % 8 == 0)
3899 dst_align = 64;
3900 else if (a >= 32 && c % 4 == 0)
3901 dst_align = 32;
3902 else if (a >= 16 && c % 2 == 0)
3903 dst_align = 16;
3907 ofs = 0;
3908 if (src_align >= 64 && bytes >= 8)
3910 words = bytes / 8;
3912 for (i = 0; i < words; ++i)
3913 data_regs[nregs + i] = gen_reg_rtx (DImode);
3915 for (i = 0; i < words; ++i)
3916 emit_move_insn (data_regs[nregs + i],
3917 adjust_address (orig_src, DImode, ofs + i * 8));
3919 nregs += words;
3920 bytes -= words * 8;
3921 ofs += words * 8;
3924 if (src_align >= 32 && bytes >= 4)
3926 words = bytes / 4;
3928 for (i = 0; i < words; ++i)
3929 data_regs[nregs + i] = gen_reg_rtx (SImode);
3931 for (i = 0; i < words; ++i)
3932 emit_move_insn (data_regs[nregs + i],
3933 adjust_address (orig_src, SImode, ofs + i * 4));
3935 nregs += words;
3936 bytes -= words * 4;
3937 ofs += words * 4;
3940 if (bytes >= 8)
3942 words = bytes / 8;
3944 for (i = 0; i < words+1; ++i)
3945 data_regs[nregs + i] = gen_reg_rtx (DImode);
3947 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3948 words, ofs);
3950 nregs += words;
3951 bytes -= words * 8;
3952 ofs += words * 8;
3955 if (! TARGET_BWX && bytes >= 4)
3957 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3958 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3959 bytes -= 4;
3960 ofs += 4;
3963 if (bytes >= 2)
3965 if (src_align >= 16)
3967 do {
3968 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3969 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3970 bytes -= 2;
3971 ofs += 2;
3972 } while (bytes >= 2);
3974 else if (! TARGET_BWX)
3976 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3977 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3978 bytes -= 2;
3979 ofs += 2;
3983 while (bytes > 0)
3985 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3986 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3987 bytes -= 1;
3988 ofs += 1;
3991 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3993 /* Now save it back out again. */
3995 i = 0, ofs = 0;
3997 /* Write out the data in whatever chunks reading the source allowed. */
3998 if (dst_align >= 64)
4000 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4002 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4003 data_regs[i]);
4004 ofs += 8;
4005 i++;
4009 if (dst_align >= 32)
4011 /* If the source has remaining DImode regs, write them out in
4012 two pieces. */
4013 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4015 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4016 NULL_RTX, 1, OPTAB_WIDEN);
4018 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4019 gen_lowpart (SImode, data_regs[i]));
4020 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4021 gen_lowpart (SImode, tmp));
4022 ofs += 8;
4023 i++;
4026 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4028 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4029 data_regs[i]);
4030 ofs += 4;
4031 i++;
4035 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4037 /* Write out a remaining block of words using unaligned methods. */
4039 for (words = 1; i + words < nregs; words++)
4040 if (GET_MODE (data_regs[i + words]) != DImode)
4041 break;
4043 if (words == 1)
4044 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4045 else
4046 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4047 words, ofs);
4049 i += words;
4050 ofs += words * 8;
4053 /* Due to the above, this won't be aligned. */
4054 /* ??? If we have more than one of these, consider constructing full
4055 words in registers and using alpha_expand_unaligned_store_words. */
4056 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4058 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4059 ofs += 4;
4060 i++;
4063 if (dst_align >= 16)
4064 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4066 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4067 i++;
4068 ofs += 2;
4070 else
4071 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4073 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4074 i++;
4075 ofs += 2;
4078 /* The remainder must be byte copies. */
4079 while (i < nregs)
4081 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4082 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4083 i++;
4084 ofs += 1;
4087 return 1;
4091 alpha_expand_block_clear (rtx operands[])
4093 rtx bytes_rtx = operands[1];
4094 rtx align_rtx = operands[3];
4095 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4096 HOST_WIDE_INT bytes = orig_bytes;
4097 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4098 HOST_WIDE_INT alignofs = 0;
4099 rtx orig_dst = operands[0];
4100 rtx tmp;
4101 int i, words, ofs = 0;
4103 if (orig_bytes <= 0)
4104 return 1;
4105 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4106 return 0;
4108 /* Look for stricter alignment. */
4109 tmp = XEXP (orig_dst, 0);
4110 if (REG_P (tmp))
4111 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4112 else if (GET_CODE (tmp) == PLUS
4113 && REG_P (XEXP (tmp, 0))
4114 && CONST_INT_P (XEXP (tmp, 1)))
4116 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4117 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4119 if (a > align)
4121 if (a >= 64)
4122 align = a, alignofs = 8 - c % 8;
4123 else if (a >= 32)
4124 align = a, alignofs = 4 - c % 4;
4125 else if (a >= 16)
4126 align = a, alignofs = 2 - c % 2;
4130 /* Handle an unaligned prefix first. */
4132 if (alignofs > 0)
4134 #if HOST_BITS_PER_WIDE_INT >= 64
4135 /* Given that alignofs is bounded by align, the only time BWX could
4136 generate three stores is for a 7 byte fill. Prefer two individual
4137 stores over a load/mask/store sequence. */
4138 if ((!TARGET_BWX || alignofs == 7)
4139 && align >= 32
4140 && !(alignofs == 4 && bytes >= 4))
4142 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4143 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4144 rtx mem, tmp;
4145 HOST_WIDE_INT mask;
4147 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4148 set_mem_alias_set (mem, 0);
4150 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4151 if (bytes < alignofs)
4153 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4154 ofs += bytes;
4155 bytes = 0;
4157 else
4159 bytes -= alignofs;
4160 ofs += alignofs;
4162 alignofs = 0;
4164 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4165 NULL_RTX, 1, OPTAB_WIDEN);
4167 emit_move_insn (mem, tmp);
4169 #endif
4171 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4173 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4174 bytes -= 1;
4175 ofs += 1;
4176 alignofs -= 1;
4178 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4180 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4181 bytes -= 2;
4182 ofs += 2;
4183 alignofs -= 2;
4185 if (alignofs == 4 && bytes >= 4)
4187 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4188 bytes -= 4;
4189 ofs += 4;
4190 alignofs = 0;
4193 /* If we've not used the extra lead alignment information by now,
4194 we won't be able to. Downgrade align to match what's left over. */
4195 if (alignofs > 0)
4197 alignofs = alignofs & -alignofs;
4198 align = MIN (align, alignofs * BITS_PER_UNIT);
4202 /* Handle a block of contiguous long-words. */
4204 if (align >= 64 && bytes >= 8)
4206 words = bytes / 8;
4208 for (i = 0; i < words; ++i)
4209 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4210 const0_rtx);
4212 bytes -= words * 8;
4213 ofs += words * 8;
4216 /* If the block is large and appropriately aligned, emit a single
4217 store followed by a sequence of stq_u insns. */
4219 if (align >= 32 && bytes > 16)
4221 rtx orig_dsta;
4223 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4224 bytes -= 4;
4225 ofs += 4;
4227 orig_dsta = XEXP (orig_dst, 0);
4228 if (GET_CODE (orig_dsta) == LO_SUM)
4229 orig_dsta = force_reg (Pmode, orig_dsta);
4231 words = bytes / 8;
4232 for (i = 0; i < words; ++i)
4234 rtx mem
4235 = change_address (orig_dst, DImode,
4236 gen_rtx_AND (DImode,
4237 plus_constant (orig_dsta, ofs + i*8),
4238 GEN_INT (-8)));
4239 set_mem_alias_set (mem, 0);
4240 emit_move_insn (mem, const0_rtx);
4243 /* Depending on the alignment, the first stq_u may have overlapped
4244 with the initial stl, which means that the last stq_u didn't
4245 write as much as it would appear. Leave those questionable bytes
4246 unaccounted for. */
4247 bytes -= words * 8 - 4;
4248 ofs += words * 8 - 4;
4251 /* Handle a smaller block of aligned words. */
4253 if ((align >= 64 && bytes == 4)
4254 || (align == 32 && bytes >= 4))
4256 words = bytes / 4;
4258 for (i = 0; i < words; ++i)
4259 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4260 const0_rtx);
4262 bytes -= words * 4;
4263 ofs += words * 4;
4266 /* An unaligned block uses stq_u stores for as many as possible. */
4268 if (bytes >= 8)
4270 words = bytes / 8;
4272 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4274 bytes -= words * 8;
4275 ofs += words * 8;
4278 /* Next clean up any trailing pieces. */
4280 #if HOST_BITS_PER_WIDE_INT >= 64
4281 /* Count the number of bits in BYTES for which aligned stores could
4282 be emitted. */
4283 words = 0;
4284 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4285 if (bytes & i)
4286 words += 1;
4288 /* If we have appropriate alignment (and it wouldn't take too many
4289 instructions otherwise), mask out the bytes we need. */
4290 if (TARGET_BWX ? words > 2 : bytes > 0)
4292 if (align >= 64)
4294 rtx mem, tmp;
4295 HOST_WIDE_INT mask;
4297 mem = adjust_address (orig_dst, DImode, ofs);
4298 set_mem_alias_set (mem, 0);
4300 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4302 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4303 NULL_RTX, 1, OPTAB_WIDEN);
4305 emit_move_insn (mem, tmp);
4306 return 1;
4308 else if (align >= 32 && bytes < 4)
4310 rtx mem, tmp;
4311 HOST_WIDE_INT mask;
4313 mem = adjust_address (orig_dst, SImode, ofs);
4314 set_mem_alias_set (mem, 0);
4316 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4318 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4319 NULL_RTX, 1, OPTAB_WIDEN);
4321 emit_move_insn (mem, tmp);
4322 return 1;
4325 #endif
4327 if (!TARGET_BWX && bytes >= 4)
4329 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4330 bytes -= 4;
4331 ofs += 4;
4334 if (bytes >= 2)
4336 if (align >= 16)
4338 do {
4339 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4340 const0_rtx);
4341 bytes -= 2;
4342 ofs += 2;
4343 } while (bytes >= 2);
4345 else if (! TARGET_BWX)
4347 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4348 bytes -= 2;
4349 ofs += 2;
4353 while (bytes > 0)
4355 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4356 bytes -= 1;
4357 ofs += 1;
4360 return 1;
4363 /* Returns a mask so that zap(x, value) == x & mask. */
4366 alpha_expand_zap_mask (HOST_WIDE_INT value)
4368 rtx result;
4369 int i;
4371 if (HOST_BITS_PER_WIDE_INT >= 64)
4373 HOST_WIDE_INT mask = 0;
4375 for (i = 7; i >= 0; --i)
4377 mask <<= 8;
4378 if (!((value >> i) & 1))
4379 mask |= 0xff;
4382 result = gen_int_mode (mask, DImode);
4384 else
4386 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4388 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4390 for (i = 7; i >= 4; --i)
4392 mask_hi <<= 8;
4393 if (!((value >> i) & 1))
4394 mask_hi |= 0xff;
4397 for (i = 3; i >= 0; --i)
4399 mask_lo <<= 8;
4400 if (!((value >> i) & 1))
4401 mask_lo |= 0xff;
4404 result = immed_double_const (mask_lo, mask_hi, DImode);
4407 return result;
4410 void
4411 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4412 enum machine_mode mode,
4413 rtx op0, rtx op1, rtx op2)
4415 op0 = gen_lowpart (mode, op0);
4417 if (op1 == const0_rtx)
4418 op1 = CONST0_RTX (mode);
4419 else
4420 op1 = gen_lowpart (mode, op1);
4422 if (op2 == const0_rtx)
4423 op2 = CONST0_RTX (mode);
4424 else
4425 op2 = gen_lowpart (mode, op2);
4427 emit_insn ((*gen) (op0, op1, op2));
4430 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4431 COND is true. Mark the jump as unlikely to be taken. */
4433 static void
4434 emit_unlikely_jump (rtx cond, rtx label)
4436 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4437 rtx x;
4439 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4440 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4441 add_reg_note (x, REG_BR_PROB, very_unlikely);
4444 /* A subroutine of the atomic operation splitters. Emit a load-locked
4445 instruction in MODE. */
4447 static void
4448 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4450 rtx (*fn) (rtx, rtx) = NULL;
4451 if (mode == SImode)
4452 fn = gen_load_locked_si;
4453 else if (mode == DImode)
4454 fn = gen_load_locked_di;
4455 emit_insn (fn (reg, mem));
4458 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4459 instruction in MODE. */
4461 static void
4462 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4464 rtx (*fn) (rtx, rtx, rtx) = NULL;
4465 if (mode == SImode)
4466 fn = gen_store_conditional_si;
4467 else if (mode == DImode)
4468 fn = gen_store_conditional_di;
4469 emit_insn (fn (res, mem, val));
4472 /* A subroutine of the atomic operation splitters. Emit an insxl
4473 instruction in MODE. */
4475 static rtx
4476 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4478 rtx ret = gen_reg_rtx (DImode);
4479 rtx (*fn) (rtx, rtx, rtx);
4481 if (WORDS_BIG_ENDIAN)
4483 if (mode == QImode)
4484 fn = gen_insbl_be;
4485 else
4486 fn = gen_inswl_be;
4488 else
4490 if (mode == QImode)
4491 fn = gen_insbl_le;
4492 else
4493 fn = gen_inswl_le;
4495 /* The insbl and inswl patterns require a register operand. */
4496 op1 = force_reg (mode, op1);
4497 emit_insn (fn (ret, op1, op2));
4499 return ret;
4502 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4503 to perform. MEM is the memory on which to operate. VAL is the second
4504 operand of the binary operator. BEFORE and AFTER are optional locations to
4505 return the value of MEM either before of after the operation. SCRATCH is
4506 a scratch register. */
4508 void
4509 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4510 rtx before, rtx after, rtx scratch)
4512 enum machine_mode mode = GET_MODE (mem);
4513 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4515 emit_insn (gen_memory_barrier ());
4517 label = gen_label_rtx ();
4518 emit_label (label);
4519 label = gen_rtx_LABEL_REF (DImode, label);
4521 if (before == NULL)
4522 before = scratch;
4523 emit_load_locked (mode, before, mem);
4525 if (code == NOT)
4527 x = gen_rtx_AND (mode, before, val);
4528 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4530 x = gen_rtx_NOT (mode, val);
4532 else
4533 x = gen_rtx_fmt_ee (code, mode, before, val);
4534 if (after)
4535 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4536 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4538 emit_store_conditional (mode, cond, mem, scratch);
4540 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4541 emit_unlikely_jump (x, label);
4543 emit_insn (gen_memory_barrier ());
4546 /* Expand a compare and swap operation. */
4548 void
4549 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4550 rtx scratch)
4552 enum machine_mode mode = GET_MODE (mem);
4553 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4555 emit_insn (gen_memory_barrier ());
4557 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4558 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4559 emit_label (XEXP (label1, 0));
4561 emit_load_locked (mode, retval, mem);
4563 x = gen_lowpart (DImode, retval);
4564 if (oldval == const0_rtx)
4565 x = gen_rtx_NE (DImode, x, const0_rtx);
4566 else
4568 x = gen_rtx_EQ (DImode, x, oldval);
4569 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4570 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4572 emit_unlikely_jump (x, label2);
4574 emit_move_insn (scratch, newval);
4575 emit_store_conditional (mode, cond, mem, scratch);
4577 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4578 emit_unlikely_jump (x, label1);
4580 emit_insn (gen_memory_barrier ());
4581 emit_label (XEXP (label2, 0));
4584 void
4585 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4587 enum machine_mode mode = GET_MODE (mem);
4588 rtx addr, align, wdst;
4589 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4591 addr = force_reg (DImode, XEXP (mem, 0));
4592 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4593 NULL_RTX, 1, OPTAB_DIRECT);
4595 oldval = convert_modes (DImode, mode, oldval, 1);
4596 newval = emit_insxl (mode, newval, addr);
4598 wdst = gen_reg_rtx (DImode);
4599 if (mode == QImode)
4600 fn5 = gen_sync_compare_and_swapqi_1;
4601 else
4602 fn5 = gen_sync_compare_and_swaphi_1;
4603 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4605 emit_move_insn (dst, gen_lowpart (mode, wdst));
4608 void
4609 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4610 rtx oldval, rtx newval, rtx align,
4611 rtx scratch, rtx cond)
4613 rtx label1, label2, mem, width, mask, x;
4615 mem = gen_rtx_MEM (DImode, align);
4616 MEM_VOLATILE_P (mem) = 1;
4618 emit_insn (gen_memory_barrier ());
4619 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4620 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4621 emit_label (XEXP (label1, 0));
4623 emit_load_locked (DImode, scratch, mem);
4625 width = GEN_INT (GET_MODE_BITSIZE (mode));
4626 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4627 if (WORDS_BIG_ENDIAN)
4628 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4629 else
4630 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4632 if (oldval == const0_rtx)
4633 x = gen_rtx_NE (DImode, dest, const0_rtx);
4634 else
4636 x = gen_rtx_EQ (DImode, dest, oldval);
4637 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4638 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4640 emit_unlikely_jump (x, label2);
4642 if (WORDS_BIG_ENDIAN)
4643 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4644 else
4645 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4646 emit_insn (gen_iordi3 (scratch, scratch, newval));
4648 emit_store_conditional (DImode, scratch, mem, scratch);
4650 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4651 emit_unlikely_jump (x, label1);
4653 emit_insn (gen_memory_barrier ());
4654 emit_label (XEXP (label2, 0));
4657 /* Expand an atomic exchange operation. */
4659 void
4660 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4662 enum machine_mode mode = GET_MODE (mem);
4663 rtx label, x, cond = gen_lowpart (DImode, scratch);
4665 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4666 emit_label (XEXP (label, 0));
4668 emit_load_locked (mode, retval, mem);
4669 emit_move_insn (scratch, val);
4670 emit_store_conditional (mode, cond, mem, scratch);
4672 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4673 emit_unlikely_jump (x, label);
4675 emit_insn (gen_memory_barrier ());
4678 void
4679 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4681 enum machine_mode mode = GET_MODE (mem);
4682 rtx addr, align, wdst;
4683 rtx (*fn4) (rtx, rtx, rtx, rtx);
4685 /* Force the address into a register. */
4686 addr = force_reg (DImode, XEXP (mem, 0));
4688 /* Align it to a multiple of 8. */
4689 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4690 NULL_RTX, 1, OPTAB_DIRECT);
4692 /* Insert val into the correct byte location within the word. */
4693 val = emit_insxl (mode, val, addr);
4695 wdst = gen_reg_rtx (DImode);
4696 if (mode == QImode)
4697 fn4 = gen_sync_lock_test_and_setqi_1;
4698 else
4699 fn4 = gen_sync_lock_test_and_sethi_1;
4700 emit_insn (fn4 (wdst, addr, val, align));
4702 emit_move_insn (dst, gen_lowpart (mode, wdst));
4705 void
4706 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4707 rtx val, rtx align, rtx scratch)
4709 rtx label, mem, width, mask, x;
4711 mem = gen_rtx_MEM (DImode, align);
4712 MEM_VOLATILE_P (mem) = 1;
4714 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4715 emit_label (XEXP (label, 0));
4717 emit_load_locked (DImode, scratch, mem);
4719 width = GEN_INT (GET_MODE_BITSIZE (mode));
4720 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4721 if (WORDS_BIG_ENDIAN)
4723 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4724 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4726 else
4728 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4729 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4731 emit_insn (gen_iordi3 (scratch, scratch, val));
4733 emit_store_conditional (DImode, scratch, mem, scratch);
4735 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4736 emit_unlikely_jump (x, label);
4738 emit_insn (gen_memory_barrier ());
4741 /* Adjust the cost of a scheduling dependency. Return the new cost of
4742 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4744 static int
4745 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4747 enum attr_type dep_insn_type;
4749 /* If the dependence is an anti-dependence, there is no cost. For an
4750 output dependence, there is sometimes a cost, but it doesn't seem
4751 worth handling those few cases. */
4752 if (REG_NOTE_KIND (link) != 0)
4753 return cost;
4755 /* If we can't recognize the insns, we can't really do anything. */
4756 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4757 return cost;
4759 dep_insn_type = get_attr_type (dep_insn);
4761 /* Bring in the user-defined memory latency. */
4762 if (dep_insn_type == TYPE_ILD
4763 || dep_insn_type == TYPE_FLD
4764 || dep_insn_type == TYPE_LDSYM)
4765 cost += alpha_memory_latency-1;
4767 /* Everything else handled in DFA bypasses now. */
4769 return cost;
4772 /* The number of instructions that can be issued per cycle. */
4774 static int
4775 alpha_issue_rate (void)
4777 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4780 /* How many alternative schedules to try. This should be as wide as the
4781 scheduling freedom in the DFA, but no wider. Making this value too
4782 large results extra work for the scheduler.
4784 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4785 alternative schedules. For EV5, we can choose between E0/E1 and
4786 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4788 static int
4789 alpha_multipass_dfa_lookahead (void)
4791 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4794 /* Machine-specific function data. */
4796 struct GTY(()) machine_function
4798 /* For unicosmk. */
4799 /* List of call information words for calls from this function. */
4800 struct rtx_def *first_ciw;
4801 struct rtx_def *last_ciw;
4802 int ciw_count;
4804 /* List of deferred case vectors. */
4805 struct rtx_def *addr_list;
4807 /* For OSF. */
4808 const char *some_ld_name;
4810 /* For TARGET_LD_BUGGY_LDGP. */
4811 struct rtx_def *gp_save_rtx;
4813 /* For VMS condition handlers. */
4814 bool uses_condition_handler;
4817 /* How to allocate a 'struct machine_function'. */
4819 static struct machine_function *
4820 alpha_init_machine_status (void)
4822 return ggc_alloc_cleared_machine_function ();
4825 /* Support for frame based VMS condition handlers. */
4827 /* A VMS condition handler may be established for a function with a call to
4828 __builtin_establish_vms_condition_handler, and cancelled with a call to
4829 __builtin_revert_vms_condition_handler.
4831 The VMS Condition Handling Facility knows about the existence of a handler
4832 from the procedure descriptor .handler field. As the VMS native compilers,
4833 we store the user specified handler's address at a fixed location in the
4834 stack frame and point the procedure descriptor at a common wrapper which
4835 fetches the real handler's address and issues an indirect call.
4837 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4839 We force the procedure kind to PT_STACK, and the fixed frame location is
4840 fp+8, just before the register save area. We use the handler_data field in
4841 the procedure descriptor to state the fp offset at which the installed
4842 handler address can be found. */
4844 #define VMS_COND_HANDLER_FP_OFFSET 8
4846 /* Expand code to store the currently installed user VMS condition handler
4847 into TARGET and install HANDLER as the new condition handler. */
4849 void
4850 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4852 rtx handler_slot_address
4853 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4855 rtx handler_slot
4856 = gen_rtx_MEM (DImode, handler_slot_address);
4858 emit_move_insn (target, handler_slot);
4859 emit_move_insn (handler_slot, handler);
4861 /* Notify the start/prologue/epilogue emitters that the condition handler
4862 slot is needed. In addition to reserving the slot space, this will force
4863 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4864 use above is correct. */
4865 cfun->machine->uses_condition_handler = true;
4868 /* Expand code to store the current VMS condition handler into TARGET and
4869 nullify it. */
4871 void
4872 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4874 /* We implement this by establishing a null condition handler, with the tiny
4875 side effect of setting uses_condition_handler. This is a little bit
4876 pessimistic if no actual builtin_establish call is ever issued, which is
4877 not a real problem and expected never to happen anyway. */
4879 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4882 /* Functions to save and restore alpha_return_addr_rtx. */
4884 /* Start the ball rolling with RETURN_ADDR_RTX. */
4887 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4889 if (count != 0)
4890 return const0_rtx;
4892 return get_hard_reg_initial_val (Pmode, REG_RA);
4895 /* Return or create a memory slot containing the gp value for the current
4896 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4899 alpha_gp_save_rtx (void)
4901 rtx seq, m = cfun->machine->gp_save_rtx;
4903 if (m == NULL)
4905 start_sequence ();
4907 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4908 m = validize_mem (m);
4909 emit_move_insn (m, pic_offset_table_rtx);
4911 seq = get_insns ();
4912 end_sequence ();
4914 /* We used to simply emit the sequence after entry_of_function.
4915 However this breaks the CFG if the first instruction in the
4916 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4917 label. Emit the sequence properly on the edge. We are only
4918 invoked from dw2_build_landing_pads and finish_eh_generation
4919 will call commit_edge_insertions thanks to a kludge. */
4920 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4922 cfun->machine->gp_save_rtx = m;
4925 return m;
4928 static int
4929 alpha_ra_ever_killed (void)
4931 rtx top;
4933 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4934 return (int)df_regs_ever_live_p (REG_RA);
4936 push_topmost_sequence ();
4937 top = get_insns ();
4938 pop_topmost_sequence ();
4940 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4944 /* Return the trap mode suffix applicable to the current
4945 instruction, or NULL. */
4947 static const char *
4948 get_trap_mode_suffix (void)
4950 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4952 switch (s)
4954 case TRAP_SUFFIX_NONE:
4955 return NULL;
4957 case TRAP_SUFFIX_SU:
4958 if (alpha_fptm >= ALPHA_FPTM_SU)
4959 return "su";
4960 return NULL;
4962 case TRAP_SUFFIX_SUI:
4963 if (alpha_fptm >= ALPHA_FPTM_SUI)
4964 return "sui";
4965 return NULL;
4967 case TRAP_SUFFIX_V_SV:
4968 switch (alpha_fptm)
4970 case ALPHA_FPTM_N:
4971 return NULL;
4972 case ALPHA_FPTM_U:
4973 return "v";
4974 case ALPHA_FPTM_SU:
4975 case ALPHA_FPTM_SUI:
4976 return "sv";
4977 default:
4978 gcc_unreachable ();
4981 case TRAP_SUFFIX_V_SV_SVI:
4982 switch (alpha_fptm)
4984 case ALPHA_FPTM_N:
4985 return NULL;
4986 case ALPHA_FPTM_U:
4987 return "v";
4988 case ALPHA_FPTM_SU:
4989 return "sv";
4990 case ALPHA_FPTM_SUI:
4991 return "svi";
4992 default:
4993 gcc_unreachable ();
4995 break;
4997 case TRAP_SUFFIX_U_SU_SUI:
4998 switch (alpha_fptm)
5000 case ALPHA_FPTM_N:
5001 return NULL;
5002 case ALPHA_FPTM_U:
5003 return "u";
5004 case ALPHA_FPTM_SU:
5005 return "su";
5006 case ALPHA_FPTM_SUI:
5007 return "sui";
5008 default:
5009 gcc_unreachable ();
5011 break;
5013 default:
5014 gcc_unreachable ();
5016 gcc_unreachable ();
5019 /* Return the rounding mode suffix applicable to the current
5020 instruction, or NULL. */
5022 static const char *
5023 get_round_mode_suffix (void)
5025 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5027 switch (s)
5029 case ROUND_SUFFIX_NONE:
5030 return NULL;
5031 case ROUND_SUFFIX_NORMAL:
5032 switch (alpha_fprm)
5034 case ALPHA_FPRM_NORM:
5035 return NULL;
5036 case ALPHA_FPRM_MINF:
5037 return "m";
5038 case ALPHA_FPRM_CHOP:
5039 return "c";
5040 case ALPHA_FPRM_DYN:
5041 return "d";
5042 default:
5043 gcc_unreachable ();
5045 break;
5047 case ROUND_SUFFIX_C:
5048 return "c";
5050 default:
5051 gcc_unreachable ();
5053 gcc_unreachable ();
5056 /* Locate some local-dynamic symbol still in use by this function
5057 so that we can print its name in some movdi_er_tlsldm pattern. */
5059 static int
5060 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5062 rtx x = *px;
5064 if (GET_CODE (x) == SYMBOL_REF
5065 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5067 cfun->machine->some_ld_name = XSTR (x, 0);
5068 return 1;
5071 return 0;
5074 static const char *
5075 get_some_local_dynamic_name (void)
5077 rtx insn;
5079 if (cfun->machine->some_ld_name)
5080 return cfun->machine->some_ld_name;
5082 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5083 if (INSN_P (insn)
5084 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5085 return cfun->machine->some_ld_name;
5087 gcc_unreachable ();
5090 /* Print an operand. Recognize special options, documented below. */
5092 void
5093 print_operand (FILE *file, rtx x, int code)
5095 int i;
5097 switch (code)
5099 case '~':
5100 /* Print the assembler name of the current function. */
5101 assemble_name (file, alpha_fnname);
5102 break;
5104 case '&':
5105 assemble_name (file, get_some_local_dynamic_name ());
5106 break;
5108 case '/':
5110 const char *trap = get_trap_mode_suffix ();
5111 const char *round = get_round_mode_suffix ();
5113 if (trap || round)
5114 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5115 (trap ? trap : ""), (round ? round : ""));
5116 break;
5119 case ',':
5120 /* Generates single precision instruction suffix. */
5121 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5122 break;
5124 case '-':
5125 /* Generates double precision instruction suffix. */
5126 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5127 break;
5129 case '#':
5130 if (alpha_this_literal_sequence_number == 0)
5131 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5132 fprintf (file, "%d", alpha_this_literal_sequence_number);
5133 break;
5135 case '*':
5136 if (alpha_this_gpdisp_sequence_number == 0)
5137 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5138 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5139 break;
5141 case 'H':
5142 if (GET_CODE (x) == HIGH)
5143 output_addr_const (file, XEXP (x, 0));
5144 else
5145 output_operand_lossage ("invalid %%H value");
5146 break;
5148 case 'J':
5150 const char *lituse;
5152 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5154 x = XVECEXP (x, 0, 0);
5155 lituse = "lituse_tlsgd";
5157 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5159 x = XVECEXP (x, 0, 0);
5160 lituse = "lituse_tlsldm";
5162 else if (CONST_INT_P (x))
5163 lituse = "lituse_jsr";
5164 else
5166 output_operand_lossage ("invalid %%J value");
5167 break;
5170 if (x != const0_rtx)
5171 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5173 break;
5175 case 'j':
5177 const char *lituse;
5179 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5180 lituse = "lituse_jsrdirect";
5181 #else
5182 lituse = "lituse_jsr";
5183 #endif
5185 gcc_assert (INTVAL (x) != 0);
5186 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5188 break;
5189 case 'r':
5190 /* If this operand is the constant zero, write it as "$31". */
5191 if (REG_P (x))
5192 fprintf (file, "%s", reg_names[REGNO (x)]);
5193 else if (x == CONST0_RTX (GET_MODE (x)))
5194 fprintf (file, "$31");
5195 else
5196 output_operand_lossage ("invalid %%r value");
5197 break;
5199 case 'R':
5200 /* Similar, but for floating-point. */
5201 if (REG_P (x))
5202 fprintf (file, "%s", reg_names[REGNO (x)]);
5203 else if (x == CONST0_RTX (GET_MODE (x)))
5204 fprintf (file, "$f31");
5205 else
5206 output_operand_lossage ("invalid %%R value");
5207 break;
5209 case 'N':
5210 /* Write the 1's complement of a constant. */
5211 if (!CONST_INT_P (x))
5212 output_operand_lossage ("invalid %%N value");
5214 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5215 break;
5217 case 'P':
5218 /* Write 1 << C, for a constant C. */
5219 if (!CONST_INT_P (x))
5220 output_operand_lossage ("invalid %%P value");
5222 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5223 break;
5225 case 'h':
5226 /* Write the high-order 16 bits of a constant, sign-extended. */
5227 if (!CONST_INT_P (x))
5228 output_operand_lossage ("invalid %%h value");
5230 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5231 break;
5233 case 'L':
5234 /* Write the low-order 16 bits of a constant, sign-extended. */
5235 if (!CONST_INT_P (x))
5236 output_operand_lossage ("invalid %%L value");
5238 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5239 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5240 break;
5242 case 'm':
5243 /* Write mask for ZAP insn. */
5244 if (GET_CODE (x) == CONST_DOUBLE)
5246 HOST_WIDE_INT mask = 0;
5247 HOST_WIDE_INT value;
5249 value = CONST_DOUBLE_LOW (x);
5250 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5251 i++, value >>= 8)
5252 if (value & 0xff)
5253 mask |= (1 << i);
5255 value = CONST_DOUBLE_HIGH (x);
5256 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5257 i++, value >>= 8)
5258 if (value & 0xff)
5259 mask |= (1 << (i + sizeof (int)));
5261 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5264 else if (CONST_INT_P (x))
5266 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5268 for (i = 0; i < 8; i++, value >>= 8)
5269 if (value & 0xff)
5270 mask |= (1 << i);
5272 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5274 else
5275 output_operand_lossage ("invalid %%m value");
5276 break;
5278 case 'M':
5279 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5280 if (!CONST_INT_P (x)
5281 || (INTVAL (x) != 8 && INTVAL (x) != 16
5282 && INTVAL (x) != 32 && INTVAL (x) != 64))
5283 output_operand_lossage ("invalid %%M value");
5285 fprintf (file, "%s",
5286 (INTVAL (x) == 8 ? "b"
5287 : INTVAL (x) == 16 ? "w"
5288 : INTVAL (x) == 32 ? "l"
5289 : "q"));
5290 break;
5292 case 'U':
5293 /* Similar, except do it from the mask. */
5294 if (CONST_INT_P (x))
5296 HOST_WIDE_INT value = INTVAL (x);
5298 if (value == 0xff)
5300 fputc ('b', file);
5301 break;
5303 if (value == 0xffff)
5305 fputc ('w', file);
5306 break;
5308 if (value == 0xffffffff)
5310 fputc ('l', file);
5311 break;
5313 if (value == -1)
5315 fputc ('q', file);
5316 break;
5319 else if (HOST_BITS_PER_WIDE_INT == 32
5320 && GET_CODE (x) == CONST_DOUBLE
5321 && CONST_DOUBLE_LOW (x) == 0xffffffff
5322 && CONST_DOUBLE_HIGH (x) == 0)
5324 fputc ('l', file);
5325 break;
5327 output_operand_lossage ("invalid %%U value");
5328 break;
5330 case 's':
5331 /* Write the constant value divided by 8 for little-endian mode or
5332 (56 - value) / 8 for big-endian mode. */
5334 if (!CONST_INT_P (x)
5335 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5336 ? 56
5337 : 64)
5338 || (INTVAL (x) & 7) != 0)
5339 output_operand_lossage ("invalid %%s value");
5341 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5342 WORDS_BIG_ENDIAN
5343 ? (56 - INTVAL (x)) / 8
5344 : INTVAL (x) / 8);
5345 break;
5347 case 'S':
5348 /* Same, except compute (64 - c) / 8 */
5350 if (!CONST_INT_P (x)
5351 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5352 && (INTVAL (x) & 7) != 8)
5353 output_operand_lossage ("invalid %%s value");
5355 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5356 break;
5358 case 't':
5360 /* On Unicos/Mk systems: use a DEX expression if the symbol
5361 clashes with a register name. */
5362 int dex = unicosmk_need_dex (x);
5363 if (dex)
5364 fprintf (file, "DEX(%d)", dex);
5365 else
5366 output_addr_const (file, x);
5368 break;
5370 case 'C': case 'D': case 'c': case 'd':
5371 /* Write out comparison name. */
5373 enum rtx_code c = GET_CODE (x);
5375 if (!COMPARISON_P (x))
5376 output_operand_lossage ("invalid %%C value");
5378 else if (code == 'D')
5379 c = reverse_condition (c);
5380 else if (code == 'c')
5381 c = swap_condition (c);
5382 else if (code == 'd')
5383 c = swap_condition (reverse_condition (c));
5385 if (c == LEU)
5386 fprintf (file, "ule");
5387 else if (c == LTU)
5388 fprintf (file, "ult");
5389 else if (c == UNORDERED)
5390 fprintf (file, "un");
5391 else
5392 fprintf (file, "%s", GET_RTX_NAME (c));
5394 break;
5396 case 'E':
5397 /* Write the divide or modulus operator. */
5398 switch (GET_CODE (x))
5400 case DIV:
5401 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5402 break;
5403 case UDIV:
5404 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5405 break;
5406 case MOD:
5407 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5408 break;
5409 case UMOD:
5410 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5411 break;
5412 default:
5413 output_operand_lossage ("invalid %%E value");
5414 break;
5416 break;
5418 case 'A':
5419 /* Write "_u" for unaligned access. */
5420 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5421 fprintf (file, "_u");
5422 break;
5424 case 0:
5425 if (REG_P (x))
5426 fprintf (file, "%s", reg_names[REGNO (x)]);
5427 else if (MEM_P (x))
5428 output_address (XEXP (x, 0));
5429 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5431 switch (XINT (XEXP (x, 0), 1))
5433 case UNSPEC_DTPREL:
5434 case UNSPEC_TPREL:
5435 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5436 break;
5437 default:
5438 output_operand_lossage ("unknown relocation unspec");
5439 break;
5442 else
5443 output_addr_const (file, x);
5444 break;
5446 default:
5447 output_operand_lossage ("invalid %%xn code");
5451 void
5452 print_operand_address (FILE *file, rtx addr)
5454 int basereg = 31;
5455 HOST_WIDE_INT offset = 0;
5457 if (GET_CODE (addr) == AND)
5458 addr = XEXP (addr, 0);
5460 if (GET_CODE (addr) == PLUS
5461 && CONST_INT_P (XEXP (addr, 1)))
5463 offset = INTVAL (XEXP (addr, 1));
5464 addr = XEXP (addr, 0);
5467 if (GET_CODE (addr) == LO_SUM)
5469 const char *reloc16, *reloclo;
5470 rtx op1 = XEXP (addr, 1);
5472 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5474 op1 = XEXP (op1, 0);
5475 switch (XINT (op1, 1))
5477 case UNSPEC_DTPREL:
5478 reloc16 = NULL;
5479 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5480 break;
5481 case UNSPEC_TPREL:
5482 reloc16 = NULL;
5483 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5484 break;
5485 default:
5486 output_operand_lossage ("unknown relocation unspec");
5487 return;
5490 output_addr_const (file, XVECEXP (op1, 0, 0));
5492 else
5494 reloc16 = "gprel";
5495 reloclo = "gprellow";
5496 output_addr_const (file, op1);
5499 if (offset)
5500 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5502 addr = XEXP (addr, 0);
5503 switch (GET_CODE (addr))
5505 case REG:
5506 basereg = REGNO (addr);
5507 break;
5509 case SUBREG:
5510 basereg = subreg_regno (addr);
5511 break;
5513 default:
5514 gcc_unreachable ();
5517 fprintf (file, "($%d)\t\t!%s", basereg,
5518 (basereg == 29 ? reloc16 : reloclo));
5519 return;
5522 switch (GET_CODE (addr))
5524 case REG:
5525 basereg = REGNO (addr);
5526 break;
5528 case SUBREG:
5529 basereg = subreg_regno (addr);
5530 break;
5532 case CONST_INT:
5533 offset = INTVAL (addr);
5534 break;
5536 #if TARGET_ABI_OPEN_VMS
5537 case SYMBOL_REF:
5538 fprintf (file, "%s", XSTR (addr, 0));
5539 return;
5541 case CONST:
5542 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5543 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5544 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5545 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5546 INTVAL (XEXP (XEXP (addr, 0), 1)));
5547 return;
5549 #endif
5550 default:
5551 gcc_unreachable ();
5554 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5557 /* Emit RTL insns to initialize the variable parts of a trampoline at
5558 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5559 for the static chain value for the function. */
5561 static void
5562 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5564 rtx fnaddr, mem, word1, word2;
5566 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5568 #ifdef POINTERS_EXTEND_UNSIGNED
5569 fnaddr = convert_memory_address (Pmode, fnaddr);
5570 chain_value = convert_memory_address (Pmode, chain_value);
5571 #endif
5573 if (TARGET_ABI_OPEN_VMS)
5575 const char *fnname;
5576 char *trname;
5578 /* Construct the name of the trampoline entry point. */
5579 fnname = XSTR (fnaddr, 0);
5580 trname = (char *) alloca (strlen (fnname) + 5);
5581 strcpy (trname, fnname);
5582 strcat (trname, "..tr");
5583 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5584 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5586 /* Trampoline (or "bounded") procedure descriptor is constructed from
5587 the function's procedure descriptor with certain fields zeroed IAW
5588 the VMS calling standard. This is stored in the first quadword. */
5589 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5590 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5592 else
5594 /* These 4 instructions are:
5595 ldq $1,24($27)
5596 ldq $27,16($27)
5597 jmp $31,($27),0
5599 We don't bother setting the HINT field of the jump; the nop
5600 is merely there for padding. */
5601 word1 = GEN_INT (0xa77b0010a43b0018);
5602 word2 = GEN_INT (0x47ff041f6bfb0000);
5605 /* Store the first two words, as computed above. */
5606 mem = adjust_address (m_tramp, DImode, 0);
5607 emit_move_insn (mem, word1);
5608 mem = adjust_address (m_tramp, DImode, 8);
5609 emit_move_insn (mem, word2);
5611 /* Store function address and static chain value. */
5612 mem = adjust_address (m_tramp, Pmode, 16);
5613 emit_move_insn (mem, fnaddr);
5614 mem = adjust_address (m_tramp, Pmode, 24);
5615 emit_move_insn (mem, chain_value);
5617 if (!TARGET_ABI_OPEN_VMS)
5619 emit_insn (gen_imb ());
5620 #ifdef ENABLE_EXECUTE_STACK
5621 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5622 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5623 #endif
5627 /* Determine where to put an argument to a function.
5628 Value is zero to push the argument on the stack,
5629 or a hard register in which to store the argument.
5631 MODE is the argument's machine mode.
5632 TYPE is the data type of the argument (as a tree).
5633 This is null for libcalls where that information may
5634 not be available.
5635 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5636 the preceding args and about the function being called.
5637 NAMED is nonzero if this argument is a named parameter
5638 (otherwise it is an extra parameter matching an ellipsis).
5640 On Alpha the first 6 words of args are normally in registers
5641 and the rest are pushed. */
5643 static rtx
5644 alpha_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5645 const_tree type, bool named ATTRIBUTE_UNUSED)
5647 int basereg;
5648 int num_args;
5650 /* Don't get confused and pass small structures in FP registers. */
5651 if (type && AGGREGATE_TYPE_P (type))
5652 basereg = 16;
5653 else
5655 #ifdef ENABLE_CHECKING
5656 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5657 values here. */
5658 gcc_assert (!COMPLEX_MODE_P (mode));
5659 #endif
5661 /* Set up defaults for FP operands passed in FP registers, and
5662 integral operands passed in integer registers. */
5663 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5664 basereg = 32 + 16;
5665 else
5666 basereg = 16;
5669 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5670 the two platforms, so we can't avoid conditional compilation. */
5671 #if TARGET_ABI_OPEN_VMS
5673 if (mode == VOIDmode)
5674 return alpha_arg_info_reg_val (*cum);
5676 num_args = cum->num_args;
5677 if (num_args >= 6
5678 || targetm.calls.must_pass_in_stack (mode, type))
5679 return NULL_RTX;
5681 #elif TARGET_ABI_OSF
5683 if (*cum >= 6)
5684 return NULL_RTX;
5685 num_args = *cum;
5687 /* VOID is passed as a special flag for "last argument". */
5688 if (type == void_type_node)
5689 basereg = 16;
5690 else if (targetm.calls.must_pass_in_stack (mode, type))
5691 return NULL_RTX;
5693 #else
5694 #error Unhandled ABI
5695 #endif
5697 return gen_rtx_REG (mode, num_args + basereg);
5700 /* Update the data in CUM to advance over an argument
5701 of mode MODE and data type TYPE.
5702 (TYPE is null for libcalls where that information may not be available.) */
5704 static void
5705 alpha_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5706 const_tree type, bool named ATTRIBUTE_UNUSED)
5708 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5709 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5711 #if TARGET_ABI_OSF
5712 *cum += increment;
5713 #else
5714 if (!onstack && cum->num_args < 6)
5715 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5716 cum->num_args += increment;
5717 #endif
5720 static int
5721 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5722 enum machine_mode mode ATTRIBUTE_UNUSED,
5723 tree type ATTRIBUTE_UNUSED,
5724 bool named ATTRIBUTE_UNUSED)
5726 int words = 0;
5728 #if TARGET_ABI_OPEN_VMS
5729 if (cum->num_args < 6
5730 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5731 words = 6 - cum->num_args;
5732 #elif TARGET_ABI_UNICOSMK
5733 /* Never any split arguments. */
5734 #elif TARGET_ABI_OSF
5735 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5736 words = 6 - *cum;
5737 #else
5738 #error Unhandled ABI
5739 #endif
5741 return words * UNITS_PER_WORD;
5745 /* Return true if TYPE must be returned in memory, instead of in registers. */
5747 static bool
5748 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5750 enum machine_mode mode = VOIDmode;
5751 int size;
5753 if (type)
5755 mode = TYPE_MODE (type);
5757 /* All aggregates are returned in memory, except on OpenVMS where
5758 records that fit 64 bits should be returned by immediate value
5759 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5760 if (TARGET_ABI_OPEN_VMS
5761 && TREE_CODE (type) != ARRAY_TYPE
5762 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5763 return false;
5765 if (AGGREGATE_TYPE_P (type))
5766 return true;
5769 size = GET_MODE_SIZE (mode);
5770 switch (GET_MODE_CLASS (mode))
5772 case MODE_VECTOR_FLOAT:
5773 /* Pass all float vectors in memory, like an aggregate. */
5774 return true;
5776 case MODE_COMPLEX_FLOAT:
5777 /* We judge complex floats on the size of their element,
5778 not the size of the whole type. */
5779 size = GET_MODE_UNIT_SIZE (mode);
5780 break;
5782 case MODE_INT:
5783 case MODE_FLOAT:
5784 case MODE_COMPLEX_INT:
5785 case MODE_VECTOR_INT:
5786 break;
5788 default:
5789 /* ??? We get called on all sorts of random stuff from
5790 aggregate_value_p. We must return something, but it's not
5791 clear what's safe to return. Pretend it's a struct I
5792 guess. */
5793 return true;
5796 /* Otherwise types must fit in one register. */
5797 return size > UNITS_PER_WORD;
5800 /* Return true if TYPE should be passed by invisible reference. */
5802 static bool
5803 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5804 enum machine_mode mode,
5805 const_tree type ATTRIBUTE_UNUSED,
5806 bool named ATTRIBUTE_UNUSED)
5808 return mode == TFmode || mode == TCmode;
5811 /* Define how to find the value returned by a function. VALTYPE is the
5812 data type of the value (as a tree). If the precise function being
5813 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5814 MODE is set instead of VALTYPE for libcalls.
5816 On Alpha the value is found in $0 for integer functions and
5817 $f0 for floating-point functions. */
5820 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5821 enum machine_mode mode)
5823 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5824 enum mode_class mclass;
5826 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5828 if (valtype)
5829 mode = TYPE_MODE (valtype);
5831 mclass = GET_MODE_CLASS (mode);
5832 switch (mclass)
5834 case MODE_INT:
5835 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5836 where we have them returning both SImode and DImode. */
5837 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5838 PROMOTE_MODE (mode, dummy, valtype);
5839 /* FALLTHRU */
5841 case MODE_COMPLEX_INT:
5842 case MODE_VECTOR_INT:
5843 regnum = 0;
5844 break;
5846 case MODE_FLOAT:
5847 regnum = 32;
5848 break;
5850 case MODE_COMPLEX_FLOAT:
5852 enum machine_mode cmode = GET_MODE_INNER (mode);
5854 return gen_rtx_PARALLEL
5855 (VOIDmode,
5856 gen_rtvec (2,
5857 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5858 const0_rtx),
5859 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5860 GEN_INT (GET_MODE_SIZE (cmode)))));
5863 case MODE_RANDOM:
5864 /* We should only reach here for BLKmode on VMS. */
5865 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5866 regnum = 0;
5867 break;
5869 default:
5870 gcc_unreachable ();
5873 return gen_rtx_REG (mode, regnum);
5876 /* TCmode complex values are passed by invisible reference. We
5877 should not split these values. */
5879 static bool
5880 alpha_split_complex_arg (const_tree type)
5882 return TYPE_MODE (type) != TCmode;
5885 static tree
5886 alpha_build_builtin_va_list (void)
5888 tree base, ofs, space, record, type_decl;
5890 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5891 return ptr_type_node;
5893 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5894 type_decl = build_decl (BUILTINS_LOCATION,
5895 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5896 TYPE_STUB_DECL (record) = type_decl;
5897 TYPE_NAME (record) = type_decl;
5899 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5901 /* Dummy field to prevent alignment warnings. */
5902 space = build_decl (BUILTINS_LOCATION,
5903 FIELD_DECL, NULL_TREE, integer_type_node);
5904 DECL_FIELD_CONTEXT (space) = record;
5905 DECL_ARTIFICIAL (space) = 1;
5906 DECL_IGNORED_P (space) = 1;
5908 ofs = build_decl (BUILTINS_LOCATION,
5909 FIELD_DECL, get_identifier ("__offset"),
5910 integer_type_node);
5911 DECL_FIELD_CONTEXT (ofs) = record;
5912 DECL_CHAIN (ofs) = space;
5913 /* ??? This is a hack, __offset is marked volatile to prevent
5914 DCE that confuses stdarg optimization and results in
5915 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5916 TREE_THIS_VOLATILE (ofs) = 1;
5918 base = build_decl (BUILTINS_LOCATION,
5919 FIELD_DECL, get_identifier ("__base"),
5920 ptr_type_node);
5921 DECL_FIELD_CONTEXT (base) = record;
5922 DECL_CHAIN (base) = ofs;
5924 TYPE_FIELDS (record) = base;
5925 layout_type (record);
5927 va_list_gpr_counter_field = ofs;
5928 return record;
5931 #if TARGET_ABI_OSF
5932 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5933 and constant additions. */
5935 static gimple
5936 va_list_skip_additions (tree lhs)
5938 gimple stmt;
5940 for (;;)
5942 enum tree_code code;
5944 stmt = SSA_NAME_DEF_STMT (lhs);
5946 if (gimple_code (stmt) == GIMPLE_PHI)
5947 return stmt;
5949 if (!is_gimple_assign (stmt)
5950 || gimple_assign_lhs (stmt) != lhs)
5951 return NULL;
5953 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5954 return stmt;
5955 code = gimple_assign_rhs_code (stmt);
5956 if (!CONVERT_EXPR_CODE_P (code)
5957 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5958 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5959 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5960 return stmt;
5962 lhs = gimple_assign_rhs1 (stmt);
5966 /* Check if LHS = RHS statement is
5967 LHS = *(ap.__base + ap.__offset + cst)
5969 LHS = *(ap.__base
5970 + ((ap.__offset + cst <= 47)
5971 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5972 If the former, indicate that GPR registers are needed,
5973 if the latter, indicate that FPR registers are needed.
5975 Also look for LHS = (*ptr).field, where ptr is one of the forms
5976 listed above.
5978 On alpha, cfun->va_list_gpr_size is used as size of the needed
5979 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5980 registers are needed and bit 1 set if FPR registers are needed.
5981 Return true if va_list references should not be scanned for the
5982 current statement. */
5984 static bool
5985 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5987 tree base, offset, rhs;
5988 int offset_arg = 1;
5989 gimple base_stmt;
5991 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5992 != GIMPLE_SINGLE_RHS)
5993 return false;
5995 rhs = gimple_assign_rhs1 (stmt);
5996 while (handled_component_p (rhs))
5997 rhs = TREE_OPERAND (rhs, 0);
5998 if (TREE_CODE (rhs) != MEM_REF
5999 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6000 return false;
6002 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6003 if (stmt == NULL
6004 || !is_gimple_assign (stmt)
6005 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6006 return false;
6008 base = gimple_assign_rhs1 (stmt);
6009 if (TREE_CODE (base) == SSA_NAME)
6011 base_stmt = va_list_skip_additions (base);
6012 if (base_stmt
6013 && is_gimple_assign (base_stmt)
6014 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6015 base = gimple_assign_rhs1 (base_stmt);
6018 if (TREE_CODE (base) != COMPONENT_REF
6019 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6021 base = gimple_assign_rhs2 (stmt);
6022 if (TREE_CODE (base) == SSA_NAME)
6024 base_stmt = va_list_skip_additions (base);
6025 if (base_stmt
6026 && is_gimple_assign (base_stmt)
6027 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6028 base = gimple_assign_rhs1 (base_stmt);
6031 if (TREE_CODE (base) != COMPONENT_REF
6032 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6033 return false;
6035 offset_arg = 0;
6038 base = get_base_address (base);
6039 if (TREE_CODE (base) != VAR_DECL
6040 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
6041 return false;
6043 offset = gimple_op (stmt, 1 + offset_arg);
6044 if (TREE_CODE (offset) == SSA_NAME)
6046 gimple offset_stmt = va_list_skip_additions (offset);
6048 if (offset_stmt
6049 && gimple_code (offset_stmt) == GIMPLE_PHI)
6051 HOST_WIDE_INT sub;
6052 gimple arg1_stmt, arg2_stmt;
6053 tree arg1, arg2;
6054 enum tree_code code1, code2;
6056 if (gimple_phi_num_args (offset_stmt) != 2)
6057 goto escapes;
6059 arg1_stmt
6060 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6061 arg2_stmt
6062 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6063 if (arg1_stmt == NULL
6064 || !is_gimple_assign (arg1_stmt)
6065 || arg2_stmt == NULL
6066 || !is_gimple_assign (arg2_stmt))
6067 goto escapes;
6069 code1 = gimple_assign_rhs_code (arg1_stmt);
6070 code2 = gimple_assign_rhs_code (arg2_stmt);
6071 if (code1 == COMPONENT_REF
6072 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6073 /* Do nothing. */;
6074 else if (code2 == COMPONENT_REF
6075 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6077 gimple tem = arg1_stmt;
6078 code2 = code1;
6079 arg1_stmt = arg2_stmt;
6080 arg2_stmt = tem;
6082 else
6083 goto escapes;
6085 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6086 goto escapes;
6088 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6089 if (code2 == MINUS_EXPR)
6090 sub = -sub;
6091 if (sub < -48 || sub > -32)
6092 goto escapes;
6094 arg1 = gimple_assign_rhs1 (arg1_stmt);
6095 arg2 = gimple_assign_rhs1 (arg2_stmt);
6096 if (TREE_CODE (arg2) == SSA_NAME)
6098 arg2_stmt = va_list_skip_additions (arg2);
6099 if (arg2_stmt == NULL
6100 || !is_gimple_assign (arg2_stmt)
6101 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6102 goto escapes;
6103 arg2 = gimple_assign_rhs1 (arg2_stmt);
6105 if (arg1 != arg2)
6106 goto escapes;
6108 if (TREE_CODE (arg1) != COMPONENT_REF
6109 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6110 || get_base_address (arg1) != base)
6111 goto escapes;
6113 /* Need floating point regs. */
6114 cfun->va_list_fpr_size |= 2;
6115 return false;
6117 if (offset_stmt
6118 && is_gimple_assign (offset_stmt)
6119 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6120 offset = gimple_assign_rhs1 (offset_stmt);
6122 if (TREE_CODE (offset) != COMPONENT_REF
6123 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6124 || get_base_address (offset) != base)
6125 goto escapes;
6126 else
6127 /* Need general regs. */
6128 cfun->va_list_fpr_size |= 1;
6129 return false;
6131 escapes:
6132 si->va_list_escapes = true;
6133 return false;
6135 #endif
6137 /* Perform any needed actions needed for a function that is receiving a
6138 variable number of arguments. */
6140 static void
6141 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6142 tree type, int *pretend_size, int no_rtl)
6144 CUMULATIVE_ARGS cum = *pcum;
6146 /* Skip the current argument. */
6147 targetm.calls.function_arg_advance (&cum, mode, type, true);
6149 #if TARGET_ABI_UNICOSMK
6150 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6151 arguments on the stack. Unfortunately, it doesn't always store the first
6152 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6153 with stdargs as we always have at least one named argument there. */
6154 if (cum.num_reg_words < 6)
6156 if (!no_rtl)
6158 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6159 emit_insn (gen_arg_home_umk ());
6161 *pretend_size = 0;
6163 #elif TARGET_ABI_OPEN_VMS
6164 /* For VMS, we allocate space for all 6 arg registers plus a count.
6166 However, if NO registers need to be saved, don't allocate any space.
6167 This is not only because we won't need the space, but because AP
6168 includes the current_pretend_args_size and we don't want to mess up
6169 any ap-relative addresses already made. */
6170 if (cum.num_args < 6)
6172 if (!no_rtl)
6174 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6175 emit_insn (gen_arg_home ());
6177 *pretend_size = 7 * UNITS_PER_WORD;
6179 #else
6180 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6181 only push those that are remaining. However, if NO registers need to
6182 be saved, don't allocate any space. This is not only because we won't
6183 need the space, but because AP includes the current_pretend_args_size
6184 and we don't want to mess up any ap-relative addresses already made.
6186 If we are not to use the floating-point registers, save the integer
6187 registers where we would put the floating-point registers. This is
6188 not the most efficient way to implement varargs with just one register
6189 class, but it isn't worth doing anything more efficient in this rare
6190 case. */
6191 if (cum >= 6)
6192 return;
6194 if (!no_rtl)
6196 int count;
6197 alias_set_type set = get_varargs_alias_set ();
6198 rtx tmp;
6200 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6201 if (count > 6 - cum)
6202 count = 6 - cum;
6204 /* Detect whether integer registers or floating-point registers
6205 are needed by the detected va_arg statements. See above for
6206 how these values are computed. Note that the "escape" value
6207 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6208 these bits set. */
6209 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6211 if (cfun->va_list_fpr_size & 1)
6213 tmp = gen_rtx_MEM (BLKmode,
6214 plus_constant (virtual_incoming_args_rtx,
6215 (cum + 6) * UNITS_PER_WORD));
6216 MEM_NOTRAP_P (tmp) = 1;
6217 set_mem_alias_set (tmp, set);
6218 move_block_from_reg (16 + cum, tmp, count);
6221 if (cfun->va_list_fpr_size & 2)
6223 tmp = gen_rtx_MEM (BLKmode,
6224 plus_constant (virtual_incoming_args_rtx,
6225 cum * UNITS_PER_WORD));
6226 MEM_NOTRAP_P (tmp) = 1;
6227 set_mem_alias_set (tmp, set);
6228 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6231 *pretend_size = 12 * UNITS_PER_WORD;
6232 #endif
6235 static void
6236 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6238 HOST_WIDE_INT offset;
6239 tree t, offset_field, base_field;
6241 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6242 return;
6244 if (TARGET_ABI_UNICOSMK)
6245 std_expand_builtin_va_start (valist, nextarg);
6247 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6248 up by 48, storing fp arg registers in the first 48 bytes, and the
6249 integer arg registers in the next 48 bytes. This is only done,
6250 however, if any integer registers need to be stored.
6252 If no integer registers need be stored, then we must subtract 48
6253 in order to account for the integer arg registers which are counted
6254 in argsize above, but which are not actually stored on the stack.
6255 Must further be careful here about structures straddling the last
6256 integer argument register; that futzes with pretend_args_size,
6257 which changes the meaning of AP. */
6259 if (NUM_ARGS < 6)
6260 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6261 else
6262 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6264 if (TARGET_ABI_OPEN_VMS)
6266 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6267 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6268 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6269 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6270 TREE_SIDE_EFFECTS (t) = 1;
6271 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6273 else
6275 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6276 offset_field = DECL_CHAIN (base_field);
6278 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6279 valist, base_field, NULL_TREE);
6280 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6281 valist, offset_field, NULL_TREE);
6283 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6284 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6285 size_int (offset));
6286 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6287 TREE_SIDE_EFFECTS (t) = 1;
6288 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6290 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6291 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6292 TREE_SIDE_EFFECTS (t) = 1;
6293 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6297 static tree
6298 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6299 gimple_seq *pre_p)
6301 tree type_size, ptr_type, addend, t, addr;
6302 gimple_seq internal_post;
6304 /* If the type could not be passed in registers, skip the block
6305 reserved for the registers. */
6306 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6308 t = build_int_cst (TREE_TYPE (offset), 6*8);
6309 gimplify_assign (offset,
6310 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6311 pre_p);
6314 addend = offset;
6315 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6317 if (TREE_CODE (type) == COMPLEX_TYPE)
6319 tree real_part, imag_part, real_temp;
6321 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6322 offset, pre_p);
6324 /* Copy the value into a new temporary, lest the formal temporary
6325 be reused out from under us. */
6326 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6328 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6329 offset, pre_p);
6331 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6333 else if (TREE_CODE (type) == REAL_TYPE)
6335 tree fpaddend, cond, fourtyeight;
6337 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6338 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6339 addend, fourtyeight);
6340 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6341 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6342 fpaddend, addend);
6345 /* Build the final address and force that value into a temporary. */
6346 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6347 fold_convert (sizetype, addend));
6348 internal_post = NULL;
6349 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6350 gimple_seq_add_seq (pre_p, internal_post);
6352 /* Update the offset field. */
6353 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6354 if (type_size == NULL || TREE_OVERFLOW (type_size))
6355 t = size_zero_node;
6356 else
6358 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6359 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6360 t = size_binop (MULT_EXPR, t, size_int (8));
6362 t = fold_convert (TREE_TYPE (offset), t);
6363 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6364 pre_p);
6366 return build_va_arg_indirect_ref (addr);
6369 static tree
6370 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6371 gimple_seq *post_p)
6373 tree offset_field, base_field, offset, base, t, r;
6374 bool indirect;
6376 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6377 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6379 base_field = TYPE_FIELDS (va_list_type_node);
6380 offset_field = DECL_CHAIN (base_field);
6381 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6382 valist, base_field, NULL_TREE);
6383 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6384 valist, offset_field, NULL_TREE);
6386 /* Pull the fields of the structure out into temporaries. Since we never
6387 modify the base field, we can use a formal temporary. Sign-extend the
6388 offset field so that it's the proper width for pointer arithmetic. */
6389 base = get_formal_tmp_var (base_field, pre_p);
6391 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6392 offset = get_initialized_tmp_var (t, pre_p, NULL);
6394 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6395 if (indirect)
6396 type = build_pointer_type_for_mode (type, ptr_mode, true);
6398 /* Find the value. Note that this will be a stable indirection, or
6399 a composite of stable indirections in the case of complex. */
6400 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6402 /* Stuff the offset temporary back into its field. */
6403 gimplify_assign (unshare_expr (offset_field),
6404 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6406 if (indirect)
6407 r = build_va_arg_indirect_ref (r);
6409 return r;
6412 /* Builtins. */
6414 enum alpha_builtin
6416 ALPHA_BUILTIN_CMPBGE,
6417 ALPHA_BUILTIN_EXTBL,
6418 ALPHA_BUILTIN_EXTWL,
6419 ALPHA_BUILTIN_EXTLL,
6420 ALPHA_BUILTIN_EXTQL,
6421 ALPHA_BUILTIN_EXTWH,
6422 ALPHA_BUILTIN_EXTLH,
6423 ALPHA_BUILTIN_EXTQH,
6424 ALPHA_BUILTIN_INSBL,
6425 ALPHA_BUILTIN_INSWL,
6426 ALPHA_BUILTIN_INSLL,
6427 ALPHA_BUILTIN_INSQL,
6428 ALPHA_BUILTIN_INSWH,
6429 ALPHA_BUILTIN_INSLH,
6430 ALPHA_BUILTIN_INSQH,
6431 ALPHA_BUILTIN_MSKBL,
6432 ALPHA_BUILTIN_MSKWL,
6433 ALPHA_BUILTIN_MSKLL,
6434 ALPHA_BUILTIN_MSKQL,
6435 ALPHA_BUILTIN_MSKWH,
6436 ALPHA_BUILTIN_MSKLH,
6437 ALPHA_BUILTIN_MSKQH,
6438 ALPHA_BUILTIN_UMULH,
6439 ALPHA_BUILTIN_ZAP,
6440 ALPHA_BUILTIN_ZAPNOT,
6441 ALPHA_BUILTIN_AMASK,
6442 ALPHA_BUILTIN_IMPLVER,
6443 ALPHA_BUILTIN_RPCC,
6444 ALPHA_BUILTIN_THREAD_POINTER,
6445 ALPHA_BUILTIN_SET_THREAD_POINTER,
6446 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6447 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6449 /* TARGET_MAX */
6450 ALPHA_BUILTIN_MINUB8,
6451 ALPHA_BUILTIN_MINSB8,
6452 ALPHA_BUILTIN_MINUW4,
6453 ALPHA_BUILTIN_MINSW4,
6454 ALPHA_BUILTIN_MAXUB8,
6455 ALPHA_BUILTIN_MAXSB8,
6456 ALPHA_BUILTIN_MAXUW4,
6457 ALPHA_BUILTIN_MAXSW4,
6458 ALPHA_BUILTIN_PERR,
6459 ALPHA_BUILTIN_PKLB,
6460 ALPHA_BUILTIN_PKWB,
6461 ALPHA_BUILTIN_UNPKBL,
6462 ALPHA_BUILTIN_UNPKBW,
6464 /* TARGET_CIX */
6465 ALPHA_BUILTIN_CTTZ,
6466 ALPHA_BUILTIN_CTLZ,
6467 ALPHA_BUILTIN_CTPOP,
6469 ALPHA_BUILTIN_max
6472 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6473 CODE_FOR_builtin_cmpbge,
6474 CODE_FOR_builtin_extbl,
6475 CODE_FOR_builtin_extwl,
6476 CODE_FOR_builtin_extll,
6477 CODE_FOR_builtin_extql,
6478 CODE_FOR_builtin_extwh,
6479 CODE_FOR_builtin_extlh,
6480 CODE_FOR_builtin_extqh,
6481 CODE_FOR_builtin_insbl,
6482 CODE_FOR_builtin_inswl,
6483 CODE_FOR_builtin_insll,
6484 CODE_FOR_builtin_insql,
6485 CODE_FOR_builtin_inswh,
6486 CODE_FOR_builtin_inslh,
6487 CODE_FOR_builtin_insqh,
6488 CODE_FOR_builtin_mskbl,
6489 CODE_FOR_builtin_mskwl,
6490 CODE_FOR_builtin_mskll,
6491 CODE_FOR_builtin_mskql,
6492 CODE_FOR_builtin_mskwh,
6493 CODE_FOR_builtin_msklh,
6494 CODE_FOR_builtin_mskqh,
6495 CODE_FOR_umuldi3_highpart,
6496 CODE_FOR_builtin_zap,
6497 CODE_FOR_builtin_zapnot,
6498 CODE_FOR_builtin_amask,
6499 CODE_FOR_builtin_implver,
6500 CODE_FOR_builtin_rpcc,
6501 CODE_FOR_load_tp,
6502 CODE_FOR_set_tp,
6503 CODE_FOR_builtin_establish_vms_condition_handler,
6504 CODE_FOR_builtin_revert_vms_condition_handler,
6506 /* TARGET_MAX */
6507 CODE_FOR_builtin_minub8,
6508 CODE_FOR_builtin_minsb8,
6509 CODE_FOR_builtin_minuw4,
6510 CODE_FOR_builtin_minsw4,
6511 CODE_FOR_builtin_maxub8,
6512 CODE_FOR_builtin_maxsb8,
6513 CODE_FOR_builtin_maxuw4,
6514 CODE_FOR_builtin_maxsw4,
6515 CODE_FOR_builtin_perr,
6516 CODE_FOR_builtin_pklb,
6517 CODE_FOR_builtin_pkwb,
6518 CODE_FOR_builtin_unpkbl,
6519 CODE_FOR_builtin_unpkbw,
6521 /* TARGET_CIX */
6522 CODE_FOR_ctzdi2,
6523 CODE_FOR_clzdi2,
6524 CODE_FOR_popcountdi2
6527 struct alpha_builtin_def
6529 const char *name;
6530 enum alpha_builtin code;
6531 unsigned int target_mask;
6532 bool is_const;
6535 static struct alpha_builtin_def const zero_arg_builtins[] = {
6536 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6537 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6540 static struct alpha_builtin_def const one_arg_builtins[] = {
6541 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6542 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6543 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6544 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6545 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6546 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6547 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6548 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6551 static struct alpha_builtin_def const two_arg_builtins[] = {
6552 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6553 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6554 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6555 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6556 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6557 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6558 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6559 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6560 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6561 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6562 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6563 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6564 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6565 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6566 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6567 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6568 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6569 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6570 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6571 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6572 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6573 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6574 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6575 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6576 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6577 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6578 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6579 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6580 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6581 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6582 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6583 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6584 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6585 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6588 static GTY(()) tree alpha_v8qi_u;
6589 static GTY(()) tree alpha_v8qi_s;
6590 static GTY(()) tree alpha_v4hi_u;
6591 static GTY(()) tree alpha_v4hi_s;
6593 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6595 /* Return the alpha builtin for CODE. */
6597 static tree
6598 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6600 if (code >= ALPHA_BUILTIN_max)
6601 return error_mark_node;
6602 return alpha_builtins[code];
6605 /* Helper function of alpha_init_builtins. Add the built-in specified
6606 by NAME, TYPE, CODE, and ECF. */
6608 static void
6609 alpha_builtin_function (const char *name, tree ftype,
6610 enum alpha_builtin code, unsigned ecf)
6612 tree decl = add_builtin_function (name, ftype, (int) code,
6613 BUILT_IN_MD, NULL, NULL_TREE);
6615 if (ecf & ECF_CONST)
6616 TREE_READONLY (decl) = 1;
6617 if (ecf & ECF_NOTHROW)
6618 TREE_NOTHROW (decl) = 1;
6620 alpha_builtins [(int) code] = decl;
6623 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6624 functions pointed to by P, with function type FTYPE. */
6626 static void
6627 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6628 tree ftype)
6630 size_t i;
6632 for (i = 0; i < count; ++i, ++p)
6633 if ((target_flags & p->target_mask) == p->target_mask)
6634 alpha_builtin_function (p->name, ftype, p->code,
6635 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6638 static void
6639 alpha_init_builtins (void)
6641 tree dimode_integer_type_node;
6642 tree ftype;
6644 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6646 /* Fwrite on VMS is non-standard. */
6647 #if TARGET_ABI_OPEN_VMS
6648 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6649 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6650 #endif
6652 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6653 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6654 ftype);
6656 ftype = build_function_type_list (dimode_integer_type_node,
6657 dimode_integer_type_node, NULL_TREE);
6658 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6659 ftype);
6661 ftype = build_function_type_list (dimode_integer_type_node,
6662 dimode_integer_type_node,
6663 dimode_integer_type_node, NULL_TREE);
6664 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6665 ftype);
6667 ftype = build_function_type (ptr_type_node, void_list_node);
6668 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6669 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6671 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6672 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6673 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6675 if (TARGET_ABI_OPEN_VMS)
6677 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6678 NULL_TREE);
6679 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6680 ftype,
6681 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6684 ftype = build_function_type_list (ptr_type_node, void_type_node,
6685 NULL_TREE);
6686 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6687 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6690 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6691 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6692 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6693 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6696 /* Expand an expression EXP that calls a built-in function,
6697 with result going to TARGET if that's convenient
6698 (and in mode MODE if that's convenient).
6699 SUBTARGET may be used as the target for computing one of EXP's operands.
6700 IGNORE is nonzero if the value is to be ignored. */
6702 static rtx
6703 alpha_expand_builtin (tree exp, rtx target,
6704 rtx subtarget ATTRIBUTE_UNUSED,
6705 enum machine_mode mode ATTRIBUTE_UNUSED,
6706 int ignore ATTRIBUTE_UNUSED)
6708 #define MAX_ARGS 2
6710 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6711 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6712 tree arg;
6713 call_expr_arg_iterator iter;
6714 enum insn_code icode;
6715 rtx op[MAX_ARGS], pat;
6716 int arity;
6717 bool nonvoid;
6719 if (fcode >= ALPHA_BUILTIN_max)
6720 internal_error ("bad builtin fcode");
6721 icode = code_for_builtin[fcode];
6722 if (icode == 0)
6723 internal_error ("bad builtin fcode");
6725 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6727 arity = 0;
6728 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6730 const struct insn_operand_data *insn_op;
6732 if (arg == error_mark_node)
6733 return NULL_RTX;
6734 if (arity > MAX_ARGS)
6735 return NULL_RTX;
6737 insn_op = &insn_data[icode].operand[arity + nonvoid];
6739 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6741 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6742 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6743 arity++;
6746 if (nonvoid)
6748 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6749 if (!target
6750 || GET_MODE (target) != tmode
6751 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6752 target = gen_reg_rtx (tmode);
6755 switch (arity)
6757 case 0:
6758 pat = GEN_FCN (icode) (target);
6759 break;
6760 case 1:
6761 if (nonvoid)
6762 pat = GEN_FCN (icode) (target, op[0]);
6763 else
6764 pat = GEN_FCN (icode) (op[0]);
6765 break;
6766 case 2:
6767 pat = GEN_FCN (icode) (target, op[0], op[1]);
6768 break;
6769 default:
6770 gcc_unreachable ();
6772 if (!pat)
6773 return NULL_RTX;
6774 emit_insn (pat);
6776 if (nonvoid)
6777 return target;
6778 else
6779 return const0_rtx;
6783 /* Several bits below assume HWI >= 64 bits. This should be enforced
6784 by config.gcc. */
6785 #if HOST_BITS_PER_WIDE_INT < 64
6786 # error "HOST_WIDE_INT too small"
6787 #endif
6789 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6790 with an 8-bit output vector. OPINT contains the integer operands; bit N
6791 of OP_CONST is set if OPINT[N] is valid. */
6793 static tree
6794 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6796 if (op_const == 3)
6798 int i, val;
6799 for (i = 0, val = 0; i < 8; ++i)
6801 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6802 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6803 if (c0 >= c1)
6804 val |= 1 << i;
6806 return build_int_cst (long_integer_type_node, val);
6808 else if (op_const == 2 && opint[1] == 0)
6809 return build_int_cst (long_integer_type_node, 0xff);
6810 return NULL;
6813 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6814 specialized form of an AND operation. Other byte manipulation instructions
6815 are defined in terms of this instruction, so this is also used as a
6816 subroutine for other builtins.
6818 OP contains the tree operands; OPINT contains the extracted integer values.
6819 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6820 OPINT may be considered. */
6822 static tree
6823 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6824 long op_const)
6826 if (op_const & 2)
6828 unsigned HOST_WIDE_INT mask = 0;
6829 int i;
6831 for (i = 0; i < 8; ++i)
6832 if ((opint[1] >> i) & 1)
6833 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6835 if (op_const & 1)
6836 return build_int_cst (long_integer_type_node, opint[0] & mask);
6838 if (op)
6839 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6840 build_int_cst (long_integer_type_node, mask));
6842 else if ((op_const & 1) && opint[0] == 0)
6843 return build_int_cst (long_integer_type_node, 0);
6844 return NULL;
6847 /* Fold the builtins for the EXT family of instructions. */
6849 static tree
6850 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6851 long op_const, unsigned HOST_WIDE_INT bytemask,
6852 bool is_high)
6854 long zap_const = 2;
6855 tree *zap_op = NULL;
6857 if (op_const & 2)
6859 unsigned HOST_WIDE_INT loc;
6861 loc = opint[1] & 7;
6862 if (BYTES_BIG_ENDIAN)
6863 loc ^= 7;
6864 loc *= 8;
6866 if (loc != 0)
6868 if (op_const & 1)
6870 unsigned HOST_WIDE_INT temp = opint[0];
6871 if (is_high)
6872 temp <<= loc;
6873 else
6874 temp >>= loc;
6875 opint[0] = temp;
6876 zap_const = 3;
6879 else
6880 zap_op = op;
6883 opint[1] = bytemask;
6884 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6887 /* Fold the builtins for the INS family of instructions. */
6889 static tree
6890 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6891 long op_const, unsigned HOST_WIDE_INT bytemask,
6892 bool is_high)
6894 if ((op_const & 1) && opint[0] == 0)
6895 return build_int_cst (long_integer_type_node, 0);
6897 if (op_const & 2)
6899 unsigned HOST_WIDE_INT temp, loc, byteloc;
6900 tree *zap_op = NULL;
6902 loc = opint[1] & 7;
6903 if (BYTES_BIG_ENDIAN)
6904 loc ^= 7;
6905 bytemask <<= loc;
6907 temp = opint[0];
6908 if (is_high)
6910 byteloc = (64 - (loc * 8)) & 0x3f;
6911 if (byteloc == 0)
6912 zap_op = op;
6913 else
6914 temp >>= byteloc;
6915 bytemask >>= 8;
6917 else
6919 byteloc = loc * 8;
6920 if (byteloc == 0)
6921 zap_op = op;
6922 else
6923 temp <<= byteloc;
6926 opint[0] = temp;
6927 opint[1] = bytemask;
6928 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6931 return NULL;
6934 static tree
6935 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6936 long op_const, unsigned HOST_WIDE_INT bytemask,
6937 bool is_high)
6939 if (op_const & 2)
6941 unsigned HOST_WIDE_INT loc;
6943 loc = opint[1] & 7;
6944 if (BYTES_BIG_ENDIAN)
6945 loc ^= 7;
6946 bytemask <<= loc;
6948 if (is_high)
6949 bytemask >>= 8;
6951 opint[1] = bytemask ^ 0xff;
6954 return alpha_fold_builtin_zapnot (op, opint, op_const);
6957 static tree
6958 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6960 switch (op_const)
6962 case 3:
6964 unsigned HOST_WIDE_INT l;
6965 HOST_WIDE_INT h;
6967 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6969 #if HOST_BITS_PER_WIDE_INT > 64
6970 # error fixme
6971 #endif
6973 return build_int_cst (long_integer_type_node, h);
6976 case 1:
6977 opint[1] = opint[0];
6978 /* FALLTHRU */
6979 case 2:
6980 /* Note that (X*1) >> 64 == 0. */
6981 if (opint[1] == 0 || opint[1] == 1)
6982 return build_int_cst (long_integer_type_node, 0);
6983 break;
6985 return NULL;
6988 static tree
6989 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6991 tree op0 = fold_convert (vtype, op[0]);
6992 tree op1 = fold_convert (vtype, op[1]);
6993 tree val = fold_build2 (code, vtype, op0, op1);
6994 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6997 static tree
6998 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
7000 unsigned HOST_WIDE_INT temp = 0;
7001 int i;
7003 if (op_const != 3)
7004 return NULL;
7006 for (i = 0; i < 8; ++i)
7008 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
7009 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
7010 if (a >= b)
7011 temp += a - b;
7012 else
7013 temp += b - a;
7016 return build_int_cst (long_integer_type_node, temp);
7019 static tree
7020 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
7022 unsigned HOST_WIDE_INT temp;
7024 if (op_const == 0)
7025 return NULL;
7027 temp = opint[0] & 0xff;
7028 temp |= (opint[0] >> 24) & 0xff00;
7030 return build_int_cst (long_integer_type_node, temp);
7033 static tree
7034 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7036 unsigned HOST_WIDE_INT temp;
7038 if (op_const == 0)
7039 return NULL;
7041 temp = opint[0] & 0xff;
7042 temp |= (opint[0] >> 8) & 0xff00;
7043 temp |= (opint[0] >> 16) & 0xff0000;
7044 temp |= (opint[0] >> 24) & 0xff000000;
7046 return build_int_cst (long_integer_type_node, temp);
7049 static tree
7050 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7052 unsigned HOST_WIDE_INT temp;
7054 if (op_const == 0)
7055 return NULL;
7057 temp = opint[0] & 0xff;
7058 temp |= (opint[0] & 0xff00) << 24;
7060 return build_int_cst (long_integer_type_node, temp);
7063 static tree
7064 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7066 unsigned HOST_WIDE_INT temp;
7068 if (op_const == 0)
7069 return NULL;
7071 temp = opint[0] & 0xff;
7072 temp |= (opint[0] & 0x0000ff00) << 8;
7073 temp |= (opint[0] & 0x00ff0000) << 16;
7074 temp |= (opint[0] & 0xff000000) << 24;
7076 return build_int_cst (long_integer_type_node, temp);
7079 static tree
7080 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7082 unsigned HOST_WIDE_INT temp;
7084 if (op_const == 0)
7085 return NULL;
7087 if (opint[0] == 0)
7088 temp = 64;
7089 else
7090 temp = exact_log2 (opint[0] & -opint[0]);
7092 return build_int_cst (long_integer_type_node, temp);
7095 static tree
7096 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7098 unsigned HOST_WIDE_INT temp;
7100 if (op_const == 0)
7101 return NULL;
7103 if (opint[0] == 0)
7104 temp = 64;
7105 else
7106 temp = 64 - floor_log2 (opint[0]) - 1;
7108 return build_int_cst (long_integer_type_node, temp);
7111 static tree
7112 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7114 unsigned HOST_WIDE_INT temp, op;
7116 if (op_const == 0)
7117 return NULL;
7119 op = opint[0];
7120 temp = 0;
7121 while (op)
7122 temp++, op &= op - 1;
7124 return build_int_cst (long_integer_type_node, temp);
7127 /* Fold one of our builtin functions. */
7129 static tree
7130 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7131 bool ignore ATTRIBUTE_UNUSED)
7133 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7134 long op_const = 0;
7135 int i;
7137 if (n_args >= MAX_ARGS)
7138 return NULL;
7140 for (i = 0; i < n_args; i++)
7142 tree arg = op[i];
7143 if (arg == error_mark_node)
7144 return NULL;
7146 opint[i] = 0;
7147 if (TREE_CODE (arg) == INTEGER_CST)
7149 op_const |= 1L << i;
7150 opint[i] = int_cst_value (arg);
7154 switch (DECL_FUNCTION_CODE (fndecl))
7156 case ALPHA_BUILTIN_CMPBGE:
7157 return alpha_fold_builtin_cmpbge (opint, op_const);
7159 case ALPHA_BUILTIN_EXTBL:
7160 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7161 case ALPHA_BUILTIN_EXTWL:
7162 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7163 case ALPHA_BUILTIN_EXTLL:
7164 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7165 case ALPHA_BUILTIN_EXTQL:
7166 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7167 case ALPHA_BUILTIN_EXTWH:
7168 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7169 case ALPHA_BUILTIN_EXTLH:
7170 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7171 case ALPHA_BUILTIN_EXTQH:
7172 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7174 case ALPHA_BUILTIN_INSBL:
7175 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7176 case ALPHA_BUILTIN_INSWL:
7177 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7178 case ALPHA_BUILTIN_INSLL:
7179 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7180 case ALPHA_BUILTIN_INSQL:
7181 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7182 case ALPHA_BUILTIN_INSWH:
7183 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7184 case ALPHA_BUILTIN_INSLH:
7185 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7186 case ALPHA_BUILTIN_INSQH:
7187 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7189 case ALPHA_BUILTIN_MSKBL:
7190 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7191 case ALPHA_BUILTIN_MSKWL:
7192 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7193 case ALPHA_BUILTIN_MSKLL:
7194 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7195 case ALPHA_BUILTIN_MSKQL:
7196 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7197 case ALPHA_BUILTIN_MSKWH:
7198 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7199 case ALPHA_BUILTIN_MSKLH:
7200 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7201 case ALPHA_BUILTIN_MSKQH:
7202 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7204 case ALPHA_BUILTIN_UMULH:
7205 return alpha_fold_builtin_umulh (opint, op_const);
7207 case ALPHA_BUILTIN_ZAP:
7208 opint[1] ^= 0xff;
7209 /* FALLTHRU */
7210 case ALPHA_BUILTIN_ZAPNOT:
7211 return alpha_fold_builtin_zapnot (op, opint, op_const);
7213 case ALPHA_BUILTIN_MINUB8:
7214 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7215 case ALPHA_BUILTIN_MINSB8:
7216 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7217 case ALPHA_BUILTIN_MINUW4:
7218 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7219 case ALPHA_BUILTIN_MINSW4:
7220 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7221 case ALPHA_BUILTIN_MAXUB8:
7222 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7223 case ALPHA_BUILTIN_MAXSB8:
7224 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7225 case ALPHA_BUILTIN_MAXUW4:
7226 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7227 case ALPHA_BUILTIN_MAXSW4:
7228 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7230 case ALPHA_BUILTIN_PERR:
7231 return alpha_fold_builtin_perr (opint, op_const);
7232 case ALPHA_BUILTIN_PKLB:
7233 return alpha_fold_builtin_pklb (opint, op_const);
7234 case ALPHA_BUILTIN_PKWB:
7235 return alpha_fold_builtin_pkwb (opint, op_const);
7236 case ALPHA_BUILTIN_UNPKBL:
7237 return alpha_fold_builtin_unpkbl (opint, op_const);
7238 case ALPHA_BUILTIN_UNPKBW:
7239 return alpha_fold_builtin_unpkbw (opint, op_const);
7241 case ALPHA_BUILTIN_CTTZ:
7242 return alpha_fold_builtin_cttz (opint, op_const);
7243 case ALPHA_BUILTIN_CTLZ:
7244 return alpha_fold_builtin_ctlz (opint, op_const);
7245 case ALPHA_BUILTIN_CTPOP:
7246 return alpha_fold_builtin_ctpop (opint, op_const);
7248 case ALPHA_BUILTIN_AMASK:
7249 case ALPHA_BUILTIN_IMPLVER:
7250 case ALPHA_BUILTIN_RPCC:
7251 case ALPHA_BUILTIN_THREAD_POINTER:
7252 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7253 /* None of these are foldable at compile-time. */
7254 default:
7255 return NULL;
7259 /* This page contains routines that are used to determine what the function
7260 prologue and epilogue code will do and write them out. */
7262 /* Compute the size of the save area in the stack. */
7264 /* These variables are used for communication between the following functions.
7265 They indicate various things about the current function being compiled
7266 that are used to tell what kind of prologue, epilogue and procedure
7267 descriptor to generate. */
7269 /* Nonzero if we need a stack procedure. */
7270 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7271 static enum alpha_procedure_types alpha_procedure_type;
7273 /* Register number (either FP or SP) that is used to unwind the frame. */
7274 static int vms_unwind_regno;
7276 /* Register number used to save FP. We need not have one for RA since
7277 we don't modify it for register procedures. This is only defined
7278 for register frame procedures. */
7279 static int vms_save_fp_regno;
7281 /* Register number used to reference objects off our PV. */
7282 static int vms_base_regno;
7284 /* Compute register masks for saved registers. */
7286 static void
7287 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7289 unsigned long imask = 0;
7290 unsigned long fmask = 0;
7291 unsigned int i;
7293 /* When outputting a thunk, we don't have valid register life info,
7294 but assemble_start_function wants to output .frame and .mask
7295 directives. */
7296 if (cfun->is_thunk)
7298 *imaskP = 0;
7299 *fmaskP = 0;
7300 return;
7303 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7304 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7306 /* One for every register we have to save. */
7307 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7308 if (! fixed_regs[i] && ! call_used_regs[i]
7309 && df_regs_ever_live_p (i) && i != REG_RA
7310 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7312 if (i < 32)
7313 imask |= (1UL << i);
7314 else
7315 fmask |= (1UL << (i - 32));
7318 /* We need to restore these for the handler. */
7319 if (crtl->calls_eh_return)
7321 for (i = 0; ; ++i)
7323 unsigned regno = EH_RETURN_DATA_REGNO (i);
7324 if (regno == INVALID_REGNUM)
7325 break;
7326 imask |= 1UL << regno;
7330 /* If any register spilled, then spill the return address also. */
7331 /* ??? This is required by the Digital stack unwind specification
7332 and isn't needed if we're doing Dwarf2 unwinding. */
7333 if (imask || fmask || alpha_ra_ever_killed ())
7334 imask |= (1UL << REG_RA);
7336 *imaskP = imask;
7337 *fmaskP = fmask;
7341 alpha_sa_size (void)
7343 unsigned long mask[2];
7344 int sa_size = 0;
7345 int i, j;
7347 alpha_sa_mask (&mask[0], &mask[1]);
7349 if (TARGET_ABI_UNICOSMK)
7351 if (mask[0] || mask[1])
7352 sa_size = 14;
7354 else
7356 for (j = 0; j < 2; ++j)
7357 for (i = 0; i < 32; ++i)
7358 if ((mask[j] >> i) & 1)
7359 sa_size++;
7362 if (TARGET_ABI_UNICOSMK)
7364 /* We might not need to generate a frame if we don't make any calls
7365 (including calls to __T3E_MISMATCH if this is a vararg function),
7366 don't have any local variables which require stack slots, don't
7367 use alloca and have not determined that we need a frame for other
7368 reasons. */
7370 alpha_procedure_type
7371 = (sa_size || get_frame_size() != 0
7372 || crtl->outgoing_args_size
7373 || cfun->stdarg || cfun->calls_alloca
7374 || frame_pointer_needed)
7375 ? PT_STACK : PT_REGISTER;
7377 /* Always reserve space for saving callee-saved registers if we
7378 need a frame as required by the calling convention. */
7379 if (alpha_procedure_type == PT_STACK)
7380 sa_size = 14;
7382 else if (TARGET_ABI_OPEN_VMS)
7384 /* Start with a stack procedure if we make any calls (REG_RA used), or
7385 need a frame pointer, with a register procedure if we otherwise need
7386 at least a slot, and with a null procedure in other cases. */
7387 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7388 alpha_procedure_type = PT_STACK;
7389 else if (get_frame_size() != 0)
7390 alpha_procedure_type = PT_REGISTER;
7391 else
7392 alpha_procedure_type = PT_NULL;
7394 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7395 made the final decision on stack procedure vs register procedure. */
7396 if (alpha_procedure_type == PT_STACK)
7397 sa_size -= 2;
7399 /* Decide whether to refer to objects off our PV via FP or PV.
7400 If we need FP for something else or if we receive a nonlocal
7401 goto (which expects PV to contain the value), we must use PV.
7402 Otherwise, start by assuming we can use FP. */
7404 vms_base_regno
7405 = (frame_pointer_needed
7406 || cfun->has_nonlocal_label
7407 || alpha_procedure_type == PT_STACK
7408 || crtl->outgoing_args_size)
7409 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7411 /* If we want to copy PV into FP, we need to find some register
7412 in which to save FP. */
7414 vms_save_fp_regno = -1;
7415 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7416 for (i = 0; i < 32; i++)
7417 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7418 vms_save_fp_regno = i;
7420 /* A VMS condition handler requires a stack procedure in our
7421 implementation. (not required by the calling standard). */
7422 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7423 || cfun->machine->uses_condition_handler)
7424 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7425 else if (alpha_procedure_type == PT_NULL)
7426 vms_base_regno = REG_PV;
7428 /* Stack unwinding should be done via FP unless we use it for PV. */
7429 vms_unwind_regno = (vms_base_regno == REG_PV
7430 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7432 /* If this is a stack procedure, allow space for saving FP, RA and
7433 a condition handler slot if needed. */
7434 if (alpha_procedure_type == PT_STACK)
7435 sa_size += 2 + cfun->machine->uses_condition_handler;
7437 else
7439 /* Our size must be even (multiple of 16 bytes). */
7440 if (sa_size & 1)
7441 sa_size++;
7444 return sa_size * 8;
7447 /* Define the offset between two registers, one to be eliminated,
7448 and the other its replacement, at the start of a routine. */
7450 HOST_WIDE_INT
7451 alpha_initial_elimination_offset (unsigned int from,
7452 unsigned int to ATTRIBUTE_UNUSED)
7454 HOST_WIDE_INT ret;
7456 ret = alpha_sa_size ();
7457 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7459 switch (from)
7461 case FRAME_POINTER_REGNUM:
7462 break;
7464 case ARG_POINTER_REGNUM:
7465 ret += (ALPHA_ROUND (get_frame_size ()
7466 + crtl->args.pretend_args_size)
7467 - crtl->args.pretend_args_size);
7468 break;
7470 default:
7471 gcc_unreachable ();
7474 return ret;
7477 #if TARGET_ABI_OPEN_VMS
7479 /* Worker function for TARGET_CAN_ELIMINATE. */
7481 static bool
7482 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7484 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7485 alpha_sa_size ();
7487 switch (alpha_procedure_type)
7489 case PT_NULL:
7490 /* NULL procedures have no frame of their own and we only
7491 know how to resolve from the current stack pointer. */
7492 return to == STACK_POINTER_REGNUM;
7494 case PT_REGISTER:
7495 case PT_STACK:
7496 /* We always eliminate except to the stack pointer if there is no
7497 usable frame pointer at hand. */
7498 return (to != STACK_POINTER_REGNUM
7499 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7502 gcc_unreachable ();
7505 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7506 designates the same location as FROM. */
7508 HOST_WIDE_INT
7509 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7511 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7512 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7513 on the proper computations and will need the register save area size
7514 in most cases. */
7516 HOST_WIDE_INT sa_size = alpha_sa_size ();
7518 /* PT_NULL procedures have no frame of their own and we only allow
7519 elimination to the stack pointer. This is the argument pointer and we
7520 resolve the soft frame pointer to that as well. */
7522 if (alpha_procedure_type == PT_NULL)
7523 return 0;
7525 /* For a PT_STACK procedure the frame layout looks as follows
7527 -----> decreasing addresses
7529 < size rounded up to 16 | likewise >
7530 --------------#------------------------------+++--------------+++-------#
7531 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7532 --------------#---------------------------------------------------------#
7533 ^ ^ ^ ^
7534 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7537 PT_REGISTER procedures are similar in that they may have a frame of their
7538 own. They have no regs-sa/pv/outgoing-args area.
7540 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7541 to STACK_PTR if need be. */
7544 HOST_WIDE_INT offset;
7545 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7547 switch (from)
7549 case FRAME_POINTER_REGNUM:
7550 offset = ALPHA_ROUND (sa_size + pv_save_size);
7551 break;
7552 case ARG_POINTER_REGNUM:
7553 offset = (ALPHA_ROUND (sa_size + pv_save_size
7554 + get_frame_size ()
7555 + crtl->args.pretend_args_size)
7556 - crtl->args.pretend_args_size);
7557 break;
7558 default:
7559 gcc_unreachable ();
7562 if (to == STACK_POINTER_REGNUM)
7563 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7565 return offset;
7569 #define COMMON_OBJECT "common_object"
7571 static tree
7572 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7573 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7574 bool *no_add_attrs ATTRIBUTE_UNUSED)
7576 tree decl = *node;
7577 gcc_assert (DECL_P (decl));
7579 DECL_COMMON (decl) = 1;
7580 return NULL_TREE;
7583 static const struct attribute_spec vms_attribute_table[] =
7585 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7586 affects_type_identity } */
7587 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7588 { NULL, 0, 0, false, false, false, NULL, false }
7591 void
7592 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7593 unsigned HOST_WIDE_INT size,
7594 unsigned int align)
7596 tree attr = DECL_ATTRIBUTES (decl);
7597 fprintf (file, "%s", COMMON_ASM_OP);
7598 assemble_name (file, name);
7599 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7600 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7601 fprintf (file, ",%u", align / BITS_PER_UNIT);
7602 if (attr)
7604 attr = lookup_attribute (COMMON_OBJECT, attr);
7605 if (attr)
7606 fprintf (file, ",%s",
7607 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7609 fputc ('\n', file);
7612 #undef COMMON_OBJECT
7614 #endif
7616 static int
7617 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7619 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7623 alpha_find_lo_sum_using_gp (rtx insn)
7625 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7628 static int
7629 alpha_does_function_need_gp (void)
7631 rtx insn;
7633 /* The GP being variable is an OSF abi thing. */
7634 if (! TARGET_ABI_OSF)
7635 return 0;
7637 /* We need the gp to load the address of __mcount. */
7638 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7639 return 1;
7641 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7642 if (cfun->is_thunk)
7643 return 1;
7645 /* The nonlocal receiver pattern assumes that the gp is valid for
7646 the nested function. Reasonable because it's almost always set
7647 correctly already. For the cases where that's wrong, make sure
7648 the nested function loads its gp on entry. */
7649 if (crtl->has_nonlocal_goto)
7650 return 1;
7652 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7653 Even if we are a static function, we still need to do this in case
7654 our address is taken and passed to something like qsort. */
7656 push_topmost_sequence ();
7657 insn = get_insns ();
7658 pop_topmost_sequence ();
7660 for (; insn; insn = NEXT_INSN (insn))
7661 if (NONDEBUG_INSN_P (insn)
7662 && ! JUMP_TABLE_DATA_P (insn)
7663 && GET_CODE (PATTERN (insn)) != USE
7664 && GET_CODE (PATTERN (insn)) != CLOBBER
7665 && get_attr_usegp (insn))
7666 return 1;
7668 return 0;
7672 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7673 sequences. */
7675 static rtx
7676 set_frame_related_p (void)
7678 rtx seq = get_insns ();
7679 rtx insn;
7681 end_sequence ();
7683 if (!seq)
7684 return NULL_RTX;
7686 if (INSN_P (seq))
7688 insn = seq;
7689 while (insn != NULL_RTX)
7691 RTX_FRAME_RELATED_P (insn) = 1;
7692 insn = NEXT_INSN (insn);
7694 seq = emit_insn (seq);
7696 else
7698 seq = emit_insn (seq);
7699 RTX_FRAME_RELATED_P (seq) = 1;
7701 return seq;
7704 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7706 /* Generates a store with the proper unwind info attached. VALUE is
7707 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7708 contains SP+FRAME_BIAS, and that is the unwind info that should be
7709 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7710 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7712 static void
7713 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7714 HOST_WIDE_INT base_ofs, rtx frame_reg)
7716 rtx addr, mem, insn;
7718 addr = plus_constant (base_reg, base_ofs);
7719 mem = gen_rtx_MEM (DImode, addr);
7720 set_mem_alias_set (mem, alpha_sr_alias_set);
7722 insn = emit_move_insn (mem, value);
7723 RTX_FRAME_RELATED_P (insn) = 1;
7725 if (frame_bias || value != frame_reg)
7727 if (frame_bias)
7729 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7730 mem = gen_rtx_MEM (DImode, addr);
7733 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7734 gen_rtx_SET (VOIDmode, mem, frame_reg));
7738 static void
7739 emit_frame_store (unsigned int regno, rtx base_reg,
7740 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7742 rtx reg = gen_rtx_REG (DImode, regno);
7743 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7746 /* Compute the frame size. SIZE is the size of the "naked" frame
7747 and SA_SIZE is the size of the register save area. */
7749 static HOST_WIDE_INT
7750 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7752 if (TARGET_ABI_OPEN_VMS)
7753 return ALPHA_ROUND (sa_size
7754 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7755 + size
7756 + crtl->args.pretend_args_size);
7757 else if (TARGET_ABI_UNICOSMK)
7758 /* We have to allocate space for the DSIB if we generate a frame. */
7759 return ALPHA_ROUND (sa_size
7760 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7761 + ALPHA_ROUND (size
7762 + crtl->outgoing_args_size);
7763 else
7764 return ALPHA_ROUND (crtl->outgoing_args_size)
7765 + sa_size
7766 + ALPHA_ROUND (size
7767 + crtl->args.pretend_args_size);
7770 /* Write function prologue. */
7772 /* On vms we have two kinds of functions:
7774 - stack frame (PROC_STACK)
7775 these are 'normal' functions with local vars and which are
7776 calling other functions
7777 - register frame (PROC_REGISTER)
7778 keeps all data in registers, needs no stack
7780 We must pass this to the assembler so it can generate the
7781 proper pdsc (procedure descriptor)
7782 This is done with the '.pdesc' command.
7784 On not-vms, we don't really differentiate between the two, as we can
7785 simply allocate stack without saving registers. */
7787 void
7788 alpha_expand_prologue (void)
7790 /* Registers to save. */
7791 unsigned long imask = 0;
7792 unsigned long fmask = 0;
7793 /* Stack space needed for pushing registers clobbered by us. */
7794 HOST_WIDE_INT sa_size;
7795 /* Complete stack size needed. */
7796 HOST_WIDE_INT frame_size;
7797 /* Probed stack size; it additionally includes the size of
7798 the "reserve region" if any. */
7799 HOST_WIDE_INT probed_size;
7800 /* Offset from base reg to register save area. */
7801 HOST_WIDE_INT reg_offset;
7802 rtx sa_reg;
7803 int i;
7805 sa_size = alpha_sa_size ();
7806 frame_size = compute_frame_size (get_frame_size (), sa_size);
7808 if (flag_stack_usage)
7809 current_function_static_stack_size = frame_size;
7811 if (TARGET_ABI_OPEN_VMS)
7812 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7813 else
7814 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7816 alpha_sa_mask (&imask, &fmask);
7818 /* Emit an insn to reload GP, if needed. */
7819 if (TARGET_ABI_OSF)
7821 alpha_function_needs_gp = alpha_does_function_need_gp ();
7822 if (alpha_function_needs_gp)
7823 emit_insn (gen_prologue_ldgp ());
7826 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7827 the call to mcount ourselves, rather than having the linker do it
7828 magically in response to -pg. Since _mcount has special linkage,
7829 don't represent the call as a call. */
7830 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7831 emit_insn (gen_prologue_mcount ());
7833 if (TARGET_ABI_UNICOSMK)
7834 unicosmk_gen_dsib (&imask);
7836 /* Adjust the stack by the frame size. If the frame size is > 4096
7837 bytes, we need to be sure we probe somewhere in the first and last
7838 4096 bytes (we can probably get away without the latter test) and
7839 every 8192 bytes in between. If the frame size is > 32768, we
7840 do this in a loop. Otherwise, we generate the explicit probe
7841 instructions.
7843 Note that we are only allowed to adjust sp once in the prologue. */
7845 probed_size = frame_size;
7846 if (flag_stack_check)
7847 probed_size += STACK_CHECK_PROTECT;
7849 if (probed_size <= 32768)
7851 if (probed_size > 4096)
7853 int probed;
7855 for (probed = 4096; probed < probed_size; probed += 8192)
7856 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7857 ? -probed + 64
7858 : -probed)));
7860 /* We only have to do this probe if we aren't saving registers or
7861 if we are probing beyond the frame because of -fstack-check. */
7862 if ((sa_size == 0 && probed_size > probed - 4096)
7863 || flag_stack_check)
7864 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7867 if (frame_size != 0)
7868 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7869 GEN_INT (TARGET_ABI_UNICOSMK
7870 ? -frame_size + 64
7871 : -frame_size))));
7873 else
7875 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7876 number of 8192 byte blocks to probe. We then probe each block
7877 in the loop and then set SP to the proper location. If the
7878 amount remaining is > 4096, we have to do one more probe if we
7879 are not saving any registers or if we are probing beyond the
7880 frame because of -fstack-check. */
7882 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7883 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7884 rtx ptr = gen_rtx_REG (DImode, 22);
7885 rtx count = gen_rtx_REG (DImode, 23);
7886 rtx seq;
7888 emit_move_insn (count, GEN_INT (blocks));
7889 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7890 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7892 /* Because of the difficulty in emitting a new basic block this
7893 late in the compilation, generate the loop as a single insn. */
7894 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7896 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7898 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7899 MEM_VOLATILE_P (last) = 1;
7900 emit_move_insn (last, const0_rtx);
7903 if (TARGET_ABI_WINDOWS_NT || flag_stack_check)
7905 /* For NT stack unwind (done by 'reverse execution'), it's
7906 not OK to take the result of a loop, even though the value
7907 is already in ptr, so we reload it via a single operation
7908 and subtract it to sp.
7910 Same if -fstack-check is specified, because the probed stack
7911 size is not equal to the frame size.
7913 Yes, that's correct -- we have to reload the whole constant
7914 into a temporary via ldah+lda then subtract from sp. */
7916 HOST_WIDE_INT lo, hi;
7917 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7918 hi = frame_size - lo;
7920 emit_move_insn (ptr, GEN_INT (hi));
7921 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7922 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7923 ptr));
7925 else
7927 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7928 GEN_INT (-leftover)));
7931 /* This alternative is special, because the DWARF code cannot
7932 possibly intuit through the loop above. So we invent this
7933 note it looks at instead. */
7934 RTX_FRAME_RELATED_P (seq) = 1;
7935 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7936 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7937 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7938 GEN_INT (TARGET_ABI_UNICOSMK
7939 ? -frame_size + 64
7940 : -frame_size))));
7943 if (!TARGET_ABI_UNICOSMK)
7945 HOST_WIDE_INT sa_bias = 0;
7947 /* Cope with very large offsets to the register save area. */
7948 sa_reg = stack_pointer_rtx;
7949 if (reg_offset + sa_size > 0x8000)
7951 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7952 rtx sa_bias_rtx;
7954 if (low + sa_size <= 0x8000)
7955 sa_bias = reg_offset - low, reg_offset = low;
7956 else
7957 sa_bias = reg_offset, reg_offset = 0;
7959 sa_reg = gen_rtx_REG (DImode, 24);
7960 sa_bias_rtx = GEN_INT (sa_bias);
7962 if (add_operand (sa_bias_rtx, DImode))
7963 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7964 else
7966 emit_move_insn (sa_reg, sa_bias_rtx);
7967 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7971 /* Save regs in stack order. Beginning with VMS PV. */
7972 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7973 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7975 /* Save register RA next. */
7976 if (imask & (1UL << REG_RA))
7978 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7979 imask &= ~(1UL << REG_RA);
7980 reg_offset += 8;
7983 /* Now save any other registers required to be saved. */
7984 for (i = 0; i < 31; i++)
7985 if (imask & (1UL << i))
7987 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7988 reg_offset += 8;
7991 for (i = 0; i < 31; i++)
7992 if (fmask & (1UL << i))
7994 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7995 reg_offset += 8;
7998 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8000 /* The standard frame on the T3E includes space for saving registers.
8001 We just have to use it. We don't have to save the return address and
8002 the old frame pointer here - they are saved in the DSIB. */
8004 reg_offset = -56;
8005 for (i = 9; i < 15; i++)
8006 if (imask & (1UL << i))
8008 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
8009 reg_offset -= 8;
8011 for (i = 2; i < 10; i++)
8012 if (fmask & (1UL << i))
8014 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
8015 reg_offset -= 8;
8019 if (TARGET_ABI_OPEN_VMS)
8021 /* Register frame procedures save the fp. */
8022 if (alpha_procedure_type == PT_REGISTER)
8024 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
8025 hard_frame_pointer_rtx);
8026 add_reg_note (insn, REG_CFA_REGISTER, NULL);
8027 RTX_FRAME_RELATED_P (insn) = 1;
8030 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
8031 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
8032 gen_rtx_REG (DImode, REG_PV)));
8034 if (alpha_procedure_type != PT_NULL
8035 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8036 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8038 /* If we have to allocate space for outgoing args, do it now. */
8039 if (crtl->outgoing_args_size != 0)
8041 rtx seq
8042 = emit_move_insn (stack_pointer_rtx,
8043 plus_constant
8044 (hard_frame_pointer_rtx,
8045 - (ALPHA_ROUND
8046 (crtl->outgoing_args_size))));
8048 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
8049 if ! frame_pointer_needed. Setting the bit will change the CFA
8050 computation rule to use sp again, which would be wrong if we had
8051 frame_pointer_needed, as this means sp might move unpredictably
8052 later on.
8054 Also, note that
8055 frame_pointer_needed
8056 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8058 crtl->outgoing_args_size != 0
8059 => alpha_procedure_type != PT_NULL,
8061 so when we are not setting the bit here, we are guaranteed to
8062 have emitted an FRP frame pointer update just before. */
8063 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
8066 else if (!TARGET_ABI_UNICOSMK)
8068 /* If we need a frame pointer, set it from the stack pointer. */
8069 if (frame_pointer_needed)
8071 if (TARGET_CAN_FAULT_IN_PROLOGUE)
8072 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8073 else
8074 /* This must always be the last instruction in the
8075 prologue, thus we emit a special move + clobber. */
8076 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8077 stack_pointer_rtx, sa_reg)));
8081 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8082 the prologue, for exception handling reasons, we cannot do this for
8083 any insn that might fault. We could prevent this for mems with a
8084 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8085 have to prevent all such scheduling with a blockage.
8087 Linux, on the other hand, never bothered to implement OSF/1's
8088 exception handling, and so doesn't care about such things. Anyone
8089 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8091 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8092 emit_insn (gen_blockage ());
8095 /* Count the number of .file directives, so that .loc is up to date. */
8096 int num_source_filenames = 0;
8098 /* Output the textual info surrounding the prologue. */
8100 void
8101 alpha_start_function (FILE *file, const char *fnname,
8102 tree decl ATTRIBUTE_UNUSED)
8104 unsigned long imask = 0;
8105 unsigned long fmask = 0;
8106 /* Stack space needed for pushing registers clobbered by us. */
8107 HOST_WIDE_INT sa_size;
8108 /* Complete stack size needed. */
8109 unsigned HOST_WIDE_INT frame_size;
8110 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
8111 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
8112 ? 524288
8113 : 1UL << 31;
8114 /* Offset from base reg to register save area. */
8115 HOST_WIDE_INT reg_offset;
8116 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8117 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8118 int i;
8120 /* Don't emit an extern directive for functions defined in the same file. */
8121 if (TARGET_ABI_UNICOSMK)
8123 tree name_tree;
8124 name_tree = get_identifier (fnname);
8125 TREE_ASM_WRITTEN (name_tree) = 1;
8128 #if TARGET_ABI_OPEN_VMS
8129 if (vms_debug_main
8130 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
8132 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
8133 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
8134 switch_to_section (text_section);
8135 vms_debug_main = NULL;
8137 #endif
8139 alpha_fnname = fnname;
8140 sa_size = alpha_sa_size ();
8141 frame_size = compute_frame_size (get_frame_size (), sa_size);
8143 if (TARGET_ABI_OPEN_VMS)
8144 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8145 else
8146 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8148 alpha_sa_mask (&imask, &fmask);
8150 /* Ecoff can handle multiple .file directives, so put out file and lineno.
8151 We have to do that before the .ent directive as we cannot switch
8152 files within procedures with native ecoff because line numbers are
8153 linked to procedure descriptors.
8154 Outputting the lineno helps debugging of one line functions as they
8155 would otherwise get no line number at all. Please note that we would
8156 like to put out last_linenum from final.c, but it is not accessible. */
8158 if (write_symbols == SDB_DEBUG)
8160 #ifdef ASM_OUTPUT_SOURCE_FILENAME
8161 ASM_OUTPUT_SOURCE_FILENAME (file,
8162 DECL_SOURCE_FILE (current_function_decl));
8163 #endif
8164 #ifdef SDB_OUTPUT_SOURCE_LINE
8165 if (debug_info_level != DINFO_LEVEL_TERSE)
8166 SDB_OUTPUT_SOURCE_LINE (file,
8167 DECL_SOURCE_LINE (current_function_decl));
8168 #endif
8171 /* Issue function start and label. */
8172 if (TARGET_ABI_OPEN_VMS
8173 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
8175 fputs ("\t.ent ", file);
8176 assemble_name (file, fnname);
8177 putc ('\n', file);
8179 /* If the function needs GP, we'll write the "..ng" label there.
8180 Otherwise, do it here. */
8181 if (TARGET_ABI_OSF
8182 && ! alpha_function_needs_gp
8183 && ! cfun->is_thunk)
8185 putc ('$', file);
8186 assemble_name (file, fnname);
8187 fputs ("..ng:\n", file);
8190 /* Nested functions on VMS that are potentially called via trampoline
8191 get a special transfer entry point that loads the called functions
8192 procedure descriptor and static chain. */
8193 if (TARGET_ABI_OPEN_VMS
8194 && !TREE_PUBLIC (decl)
8195 && DECL_CONTEXT (decl)
8196 && !TYPE_P (DECL_CONTEXT (decl)))
8198 strcpy (tramp_label, fnname);
8199 strcat (tramp_label, "..tr");
8200 ASM_OUTPUT_LABEL (file, tramp_label);
8201 fprintf (file, "\tldq $1,24($27)\n");
8202 fprintf (file, "\tldq $27,16($27)\n");
8205 strcpy (entry_label, fnname);
8206 if (TARGET_ABI_OPEN_VMS)
8207 strcat (entry_label, "..en");
8209 /* For public functions, the label must be globalized by appending an
8210 additional colon. */
8211 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
8212 strcat (entry_label, ":");
8214 ASM_OUTPUT_LABEL (file, entry_label);
8215 inside_function = TRUE;
8217 if (TARGET_ABI_OPEN_VMS)
8218 fprintf (file, "\t.base $%d\n", vms_base_regno);
8220 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
8221 && !flag_inhibit_size_directive)
8223 /* Set flags in procedure descriptor to request IEEE-conformant
8224 math-library routines. The value we set it to is PDSC_EXC_IEEE
8225 (/usr/include/pdsc.h). */
8226 fputs ("\t.eflag 48\n", file);
8229 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8230 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8231 alpha_arg_offset = -frame_size + 48;
8233 /* Describe our frame. If the frame size is larger than an integer,
8234 print it as zero to avoid an assembler error. We won't be
8235 properly describing such a frame, but that's the best we can do. */
8236 if (TARGET_ABI_UNICOSMK)
8238 else if (TARGET_ABI_OPEN_VMS)
8239 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8240 HOST_WIDE_INT_PRINT_DEC "\n",
8241 vms_unwind_regno,
8242 frame_size >= (1UL << 31) ? 0 : frame_size,
8243 reg_offset);
8244 else if (!flag_inhibit_size_directive)
8245 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8246 (frame_pointer_needed
8247 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8248 frame_size >= max_frame_size ? 0 : frame_size,
8249 crtl->args.pretend_args_size);
8251 /* Describe which registers were spilled. */
8252 if (TARGET_ABI_UNICOSMK)
8254 else if (TARGET_ABI_OPEN_VMS)
8256 if (imask)
8257 /* ??? Does VMS care if mask contains ra? The old code didn't
8258 set it, so I don't here. */
8259 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8260 if (fmask)
8261 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8262 if (alpha_procedure_type == PT_REGISTER)
8263 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8265 else if (!flag_inhibit_size_directive)
8267 if (imask)
8269 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8270 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8272 for (i = 0; i < 32; ++i)
8273 if (imask & (1UL << i))
8274 reg_offset += 8;
8277 if (fmask)
8278 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8279 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8282 #if TARGET_ABI_OPEN_VMS
8283 /* If a user condition handler has been installed at some point, emit
8284 the procedure descriptor bits to point the Condition Handling Facility
8285 at the indirection wrapper, and state the fp offset at which the user
8286 handler may be found. */
8287 if (cfun->machine->uses_condition_handler)
8289 fprintf (file, "\t.handler __gcc_shell_handler\n");
8290 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8293 /* Ifdef'ed cause link_section are only available then. */
8294 switch_to_section (readonly_data_section);
8295 fprintf (file, "\t.align 3\n");
8296 assemble_name (file, fnname); fputs ("..na:\n", file);
8297 fputs ("\t.ascii \"", file);
8298 assemble_name (file, fnname);
8299 fputs ("\\0\"\n", file);
8300 alpha_need_linkage (fnname, 1);
8301 switch_to_section (text_section);
8302 #endif
8305 /* Emit the .prologue note at the scheduled end of the prologue. */
8307 static void
8308 alpha_output_function_end_prologue (FILE *file)
8310 if (TARGET_ABI_UNICOSMK)
8312 else if (TARGET_ABI_OPEN_VMS)
8313 fputs ("\t.prologue\n", file);
8314 else if (TARGET_ABI_WINDOWS_NT)
8315 fputs ("\t.prologue 0\n", file);
8316 else if (!flag_inhibit_size_directive)
8317 fprintf (file, "\t.prologue %d\n",
8318 alpha_function_needs_gp || cfun->is_thunk);
8321 /* Write function epilogue. */
8323 void
8324 alpha_expand_epilogue (void)
8326 /* Registers to save. */
8327 unsigned long imask = 0;
8328 unsigned long fmask = 0;
8329 /* Stack space needed for pushing registers clobbered by us. */
8330 HOST_WIDE_INT sa_size;
8331 /* Complete stack size needed. */
8332 HOST_WIDE_INT frame_size;
8333 /* Offset from base reg to register save area. */
8334 HOST_WIDE_INT reg_offset;
8335 int fp_is_frame_pointer, fp_offset;
8336 rtx sa_reg, sa_reg_exp = NULL;
8337 rtx sp_adj1, sp_adj2, mem, reg, insn;
8338 rtx eh_ofs;
8339 rtx cfa_restores = NULL_RTX;
8340 int i;
8342 sa_size = alpha_sa_size ();
8343 frame_size = compute_frame_size (get_frame_size (), sa_size);
8345 if (TARGET_ABI_OPEN_VMS)
8347 if (alpha_procedure_type == PT_STACK)
8348 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8349 else
8350 reg_offset = 0;
8352 else
8353 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8355 alpha_sa_mask (&imask, &fmask);
8357 fp_is_frame_pointer
8358 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8359 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8360 fp_offset = 0;
8361 sa_reg = stack_pointer_rtx;
8363 if (crtl->calls_eh_return)
8364 eh_ofs = EH_RETURN_STACKADJ_RTX;
8365 else
8366 eh_ofs = NULL_RTX;
8368 if (!TARGET_ABI_UNICOSMK && sa_size)
8370 /* If we have a frame pointer, restore SP from it. */
8371 if ((TARGET_ABI_OPEN_VMS
8372 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8373 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8374 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8376 /* Cope with very large offsets to the register save area. */
8377 if (reg_offset + sa_size > 0x8000)
8379 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8380 HOST_WIDE_INT bias;
8382 if (low + sa_size <= 0x8000)
8383 bias = reg_offset - low, reg_offset = low;
8384 else
8385 bias = reg_offset, reg_offset = 0;
8387 sa_reg = gen_rtx_REG (DImode, 22);
8388 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8390 emit_move_insn (sa_reg, sa_reg_exp);
8393 /* Restore registers in order, excepting a true frame pointer. */
8395 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8396 if (! eh_ofs)
8397 set_mem_alias_set (mem, alpha_sr_alias_set);
8398 reg = gen_rtx_REG (DImode, REG_RA);
8399 emit_move_insn (reg, mem);
8400 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8402 reg_offset += 8;
8403 imask &= ~(1UL << REG_RA);
8405 for (i = 0; i < 31; ++i)
8406 if (imask & (1UL << i))
8408 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8409 fp_offset = reg_offset;
8410 else
8412 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8413 set_mem_alias_set (mem, alpha_sr_alias_set);
8414 reg = gen_rtx_REG (DImode, i);
8415 emit_move_insn (reg, mem);
8416 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8417 cfa_restores);
8419 reg_offset += 8;
8422 for (i = 0; i < 31; ++i)
8423 if (fmask & (1UL << i))
8425 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8426 set_mem_alias_set (mem, alpha_sr_alias_set);
8427 reg = gen_rtx_REG (DFmode, i+32);
8428 emit_move_insn (reg, mem);
8429 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8430 reg_offset += 8;
8433 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8435 /* Restore callee-saved general-purpose registers. */
8437 reg_offset = -56;
8439 for (i = 9; i < 15; i++)
8440 if (imask & (1UL << i))
8442 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8443 reg_offset));
8444 set_mem_alias_set (mem, alpha_sr_alias_set);
8445 reg = gen_rtx_REG (DImode, i);
8446 emit_move_insn (reg, mem);
8447 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8448 reg_offset -= 8;
8451 for (i = 2; i < 10; i++)
8452 if (fmask & (1UL << i))
8454 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8455 reg_offset));
8456 set_mem_alias_set (mem, alpha_sr_alias_set);
8457 reg = gen_rtx_REG (DFmode, i+32);
8458 emit_move_insn (reg, mem);
8459 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8460 reg_offset -= 8;
8463 /* Restore the return address from the DSIB. */
8464 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8465 set_mem_alias_set (mem, alpha_sr_alias_set);
8466 reg = gen_rtx_REG (DImode, REG_RA);
8467 emit_move_insn (reg, mem);
8468 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8471 if (frame_size || eh_ofs)
8473 sp_adj1 = stack_pointer_rtx;
8475 if (eh_ofs)
8477 sp_adj1 = gen_rtx_REG (DImode, 23);
8478 emit_move_insn (sp_adj1,
8479 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8482 /* If the stack size is large, begin computation into a temporary
8483 register so as not to interfere with a potential fp restore,
8484 which must be consecutive with an SP restore. */
8485 if (frame_size < 32768
8486 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8487 sp_adj2 = GEN_INT (frame_size);
8488 else if (TARGET_ABI_UNICOSMK)
8490 sp_adj1 = gen_rtx_REG (DImode, 23);
8491 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8492 sp_adj2 = const0_rtx;
8494 else if (frame_size < 0x40007fffL)
8496 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8498 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8499 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8500 sp_adj1 = sa_reg;
8501 else
8503 sp_adj1 = gen_rtx_REG (DImode, 23);
8504 emit_move_insn (sp_adj1, sp_adj2);
8506 sp_adj2 = GEN_INT (low);
8508 else
8510 rtx tmp = gen_rtx_REG (DImode, 23);
8511 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8512 if (!sp_adj2)
8514 /* We can't drop new things to memory this late, afaik,
8515 so build it up by pieces. */
8516 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8517 -(frame_size < 0));
8518 gcc_assert (sp_adj2);
8522 /* From now on, things must be in order. So emit blockages. */
8524 /* Restore the frame pointer. */
8525 if (TARGET_ABI_UNICOSMK)
8527 emit_insn (gen_blockage ());
8528 mem = gen_rtx_MEM (DImode,
8529 plus_constant (hard_frame_pointer_rtx, -16));
8530 set_mem_alias_set (mem, alpha_sr_alias_set);
8531 emit_move_insn (hard_frame_pointer_rtx, mem);
8532 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8533 hard_frame_pointer_rtx, cfa_restores);
8535 else if (fp_is_frame_pointer)
8537 emit_insn (gen_blockage ());
8538 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8539 set_mem_alias_set (mem, alpha_sr_alias_set);
8540 emit_move_insn (hard_frame_pointer_rtx, mem);
8541 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8542 hard_frame_pointer_rtx, cfa_restores);
8544 else if (TARGET_ABI_OPEN_VMS)
8546 emit_insn (gen_blockage ());
8547 emit_move_insn (hard_frame_pointer_rtx,
8548 gen_rtx_REG (DImode, vms_save_fp_regno));
8549 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8550 hard_frame_pointer_rtx, cfa_restores);
8553 /* Restore the stack pointer. */
8554 emit_insn (gen_blockage ());
8555 if (sp_adj2 == const0_rtx)
8556 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8557 else
8558 insn = emit_move_insn (stack_pointer_rtx,
8559 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8560 REG_NOTES (insn) = cfa_restores;
8561 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8562 RTX_FRAME_RELATED_P (insn) = 1;
8564 else
8566 gcc_assert (cfa_restores == NULL);
8568 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8570 emit_insn (gen_blockage ());
8571 insn = emit_move_insn (hard_frame_pointer_rtx,
8572 gen_rtx_REG (DImode, vms_save_fp_regno));
8573 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8574 RTX_FRAME_RELATED_P (insn) = 1;
8576 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8578 /* Decrement the frame pointer if the function does not have a
8579 frame. */
8580 emit_insn (gen_blockage ());
8581 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8582 hard_frame_pointer_rtx, constm1_rtx));
8587 /* Output the rest of the textual info surrounding the epilogue. */
8589 void
8590 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8592 rtx insn;
8594 /* We output a nop after noreturn calls at the very end of the function to
8595 ensure that the return address always remains in the caller's code range,
8596 as not doing so might confuse unwinding engines. */
8597 insn = get_last_insn ();
8598 if (!INSN_P (insn))
8599 insn = prev_active_insn (insn);
8600 if (insn && CALL_P (insn))
8601 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8603 #if TARGET_ABI_OPEN_VMS
8604 alpha_write_linkage (file, fnname, decl);
8605 #endif
8607 /* End the function. */
8608 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8610 fputs ("\t.end ", file);
8611 assemble_name (file, fnname);
8612 putc ('\n', file);
8614 inside_function = FALSE;
8616 /* Output jump tables and the static subroutine information block. */
8617 if (TARGET_ABI_UNICOSMK)
8619 unicosmk_output_ssib (file, fnname);
8620 unicosmk_output_deferred_case_vectors (file);
8624 #if TARGET_ABI_OPEN_VMS
8625 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8627 #ifdef DO_CRTL_NAMES
8628 DO_CRTL_NAMES;
8629 #endif
8631 #endif
8633 #if TARGET_ABI_OSF
8634 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8636 In order to avoid the hordes of differences between generated code
8637 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8638 lots of code loading up large constants, generate rtl and emit it
8639 instead of going straight to text.
8641 Not sure why this idea hasn't been explored before... */
8643 static void
8644 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8645 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8646 tree function)
8648 HOST_WIDE_INT hi, lo;
8649 rtx this_rtx, insn, funexp;
8651 /* We always require a valid GP. */
8652 emit_insn (gen_prologue_ldgp ());
8653 emit_note (NOTE_INSN_PROLOGUE_END);
8655 /* Find the "this" pointer. If the function returns a structure,
8656 the structure return pointer is in $16. */
8657 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8658 this_rtx = gen_rtx_REG (Pmode, 17);
8659 else
8660 this_rtx = gen_rtx_REG (Pmode, 16);
8662 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8663 entire constant for the add. */
8664 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8665 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8666 if (hi + lo == delta)
8668 if (hi)
8669 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8670 if (lo)
8671 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8673 else
8675 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8676 delta, -(delta < 0));
8677 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8680 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8681 if (vcall_offset)
8683 rtx tmp, tmp2;
8685 tmp = gen_rtx_REG (Pmode, 0);
8686 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8688 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8689 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8690 if (hi + lo == vcall_offset)
8692 if (hi)
8693 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8695 else
8697 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8698 vcall_offset, -(vcall_offset < 0));
8699 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8700 lo = 0;
8702 if (lo)
8703 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8704 else
8705 tmp2 = tmp;
8706 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8708 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8711 /* Generate a tail call to the target function. */
8712 if (! TREE_USED (function))
8714 assemble_external (function);
8715 TREE_USED (function) = 1;
8717 funexp = XEXP (DECL_RTL (function), 0);
8718 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8719 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8720 SIBLING_CALL_P (insn) = 1;
8722 /* Run just enough of rest_of_compilation to get the insns emitted.
8723 There's not really enough bulk here to make other passes such as
8724 instruction scheduling worth while. Note that use_thunk calls
8725 assemble_start_function and assemble_end_function. */
8726 insn = get_insns ();
8727 insn_locators_alloc ();
8728 shorten_branches (insn);
8729 final_start_function (insn, file, 1);
8730 final (insn, file, 1);
8731 final_end_function ();
8733 #endif /* TARGET_ABI_OSF */
8735 /* Debugging support. */
8737 #include "gstab.h"
8739 /* Count the number of sdb related labels are generated (to find block
8740 start and end boundaries). */
8742 int sdb_label_count = 0;
8744 /* Name of the file containing the current function. */
8746 static const char *current_function_file = "";
8748 /* Offsets to alpha virtual arg/local debugging pointers. */
8750 long alpha_arg_offset;
8751 long alpha_auto_offset;
8753 /* Emit a new filename to a stream. */
8755 void
8756 alpha_output_filename (FILE *stream, const char *name)
8758 static int first_time = TRUE;
8760 if (first_time)
8762 first_time = FALSE;
8763 ++num_source_filenames;
8764 current_function_file = name;
8765 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8766 output_quoted_string (stream, name);
8767 fprintf (stream, "\n");
8768 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8769 fprintf (stream, "\t#@stabs\n");
8772 else if (write_symbols == DBX_DEBUG)
8773 /* dbxout.c will emit an appropriate .stabs directive. */
8774 return;
8776 else if (name != current_function_file
8777 && strcmp (name, current_function_file) != 0)
8779 if (inside_function && ! TARGET_GAS)
8780 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8781 else
8783 ++num_source_filenames;
8784 current_function_file = name;
8785 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8788 output_quoted_string (stream, name);
8789 fprintf (stream, "\n");
8793 /* Structure to show the current status of registers and memory. */
8795 struct shadow_summary
8797 struct {
8798 unsigned int i : 31; /* Mask of int regs */
8799 unsigned int fp : 31; /* Mask of fp regs */
8800 unsigned int mem : 1; /* mem == imem | fpmem */
8801 } used, defd;
8804 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8805 to the summary structure. SET is nonzero if the insn is setting the
8806 object, otherwise zero. */
8808 static void
8809 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8811 const char *format_ptr;
8812 int i, j;
8814 if (x == 0)
8815 return;
8817 switch (GET_CODE (x))
8819 /* ??? Note that this case would be incorrect if the Alpha had a
8820 ZERO_EXTRACT in SET_DEST. */
8821 case SET:
8822 summarize_insn (SET_SRC (x), sum, 0);
8823 summarize_insn (SET_DEST (x), sum, 1);
8824 break;
8826 case CLOBBER:
8827 summarize_insn (XEXP (x, 0), sum, 1);
8828 break;
8830 case USE:
8831 summarize_insn (XEXP (x, 0), sum, 0);
8832 break;
8834 case ASM_OPERANDS:
8835 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8836 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8837 break;
8839 case PARALLEL:
8840 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8841 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8842 break;
8844 case SUBREG:
8845 summarize_insn (SUBREG_REG (x), sum, 0);
8846 break;
8848 case REG:
8850 int regno = REGNO (x);
8851 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8853 if (regno == 31 || regno == 63)
8854 break;
8856 if (set)
8858 if (regno < 32)
8859 sum->defd.i |= mask;
8860 else
8861 sum->defd.fp |= mask;
8863 else
8865 if (regno < 32)
8866 sum->used.i |= mask;
8867 else
8868 sum->used.fp |= mask;
8871 break;
8873 case MEM:
8874 if (set)
8875 sum->defd.mem = 1;
8876 else
8877 sum->used.mem = 1;
8879 /* Find the regs used in memory address computation: */
8880 summarize_insn (XEXP (x, 0), sum, 0);
8881 break;
8883 case CONST_INT: case CONST_DOUBLE:
8884 case SYMBOL_REF: case LABEL_REF: case CONST:
8885 case SCRATCH: case ASM_INPUT:
8886 break;
8888 /* Handle common unary and binary ops for efficiency. */
8889 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8890 case MOD: case UDIV: case UMOD: case AND: case IOR:
8891 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8892 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8893 case NE: case EQ: case GE: case GT: case LE:
8894 case LT: case GEU: case GTU: case LEU: case LTU:
8895 summarize_insn (XEXP (x, 0), sum, 0);
8896 summarize_insn (XEXP (x, 1), sum, 0);
8897 break;
8899 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8900 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8901 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8902 case SQRT: case FFS:
8903 summarize_insn (XEXP (x, 0), sum, 0);
8904 break;
8906 default:
8907 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8908 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8909 switch (format_ptr[i])
8911 case 'e':
8912 summarize_insn (XEXP (x, i), sum, 0);
8913 break;
8915 case 'E':
8916 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8917 summarize_insn (XVECEXP (x, i, j), sum, 0);
8918 break;
8920 case 'i':
8921 break;
8923 default:
8924 gcc_unreachable ();
8929 /* Ensure a sufficient number of `trapb' insns are in the code when
8930 the user requests code with a trap precision of functions or
8931 instructions.
8933 In naive mode, when the user requests a trap-precision of
8934 "instruction", a trapb is needed after every instruction that may
8935 generate a trap. This ensures that the code is resumption safe but
8936 it is also slow.
8938 When optimizations are turned on, we delay issuing a trapb as long
8939 as possible. In this context, a trap shadow is the sequence of
8940 instructions that starts with a (potentially) trap generating
8941 instruction and extends to the next trapb or call_pal instruction
8942 (but GCC never generates call_pal by itself). We can delay (and
8943 therefore sometimes omit) a trapb subject to the following
8944 conditions:
8946 (a) On entry to the trap shadow, if any Alpha register or memory
8947 location contains a value that is used as an operand value by some
8948 instruction in the trap shadow (live on entry), then no instruction
8949 in the trap shadow may modify the register or memory location.
8951 (b) Within the trap shadow, the computation of the base register
8952 for a memory load or store instruction may not involve using the
8953 result of an instruction that might generate an UNPREDICTABLE
8954 result.
8956 (c) Within the trap shadow, no register may be used more than once
8957 as a destination register. (This is to make life easier for the
8958 trap-handler.)
8960 (d) The trap shadow may not include any branch instructions. */
8962 static void
8963 alpha_handle_trap_shadows (void)
8965 struct shadow_summary shadow;
8966 int trap_pending, exception_nesting;
8967 rtx i, n;
8969 trap_pending = 0;
8970 exception_nesting = 0;
8971 shadow.used.i = 0;
8972 shadow.used.fp = 0;
8973 shadow.used.mem = 0;
8974 shadow.defd = shadow.used;
8976 for (i = get_insns (); i ; i = NEXT_INSN (i))
8978 if (NOTE_P (i))
8980 switch (NOTE_KIND (i))
8982 case NOTE_INSN_EH_REGION_BEG:
8983 exception_nesting++;
8984 if (trap_pending)
8985 goto close_shadow;
8986 break;
8988 case NOTE_INSN_EH_REGION_END:
8989 exception_nesting--;
8990 if (trap_pending)
8991 goto close_shadow;
8992 break;
8994 case NOTE_INSN_EPILOGUE_BEG:
8995 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8996 goto close_shadow;
8997 break;
9000 else if (trap_pending)
9002 if (alpha_tp == ALPHA_TP_FUNC)
9004 if (JUMP_P (i)
9005 && GET_CODE (PATTERN (i)) == RETURN)
9006 goto close_shadow;
9008 else if (alpha_tp == ALPHA_TP_INSN)
9010 if (optimize > 0)
9012 struct shadow_summary sum;
9014 sum.used.i = 0;
9015 sum.used.fp = 0;
9016 sum.used.mem = 0;
9017 sum.defd = sum.used;
9019 switch (GET_CODE (i))
9021 case INSN:
9022 /* Annoyingly, get_attr_trap will die on these. */
9023 if (GET_CODE (PATTERN (i)) == USE
9024 || GET_CODE (PATTERN (i)) == CLOBBER)
9025 break;
9027 summarize_insn (PATTERN (i), &sum, 0);
9029 if ((sum.defd.i & shadow.defd.i)
9030 || (sum.defd.fp & shadow.defd.fp))
9032 /* (c) would be violated */
9033 goto close_shadow;
9036 /* Combine shadow with summary of current insn: */
9037 shadow.used.i |= sum.used.i;
9038 shadow.used.fp |= sum.used.fp;
9039 shadow.used.mem |= sum.used.mem;
9040 shadow.defd.i |= sum.defd.i;
9041 shadow.defd.fp |= sum.defd.fp;
9042 shadow.defd.mem |= sum.defd.mem;
9044 if ((sum.defd.i & shadow.used.i)
9045 || (sum.defd.fp & shadow.used.fp)
9046 || (sum.defd.mem & shadow.used.mem))
9048 /* (a) would be violated (also takes care of (b)) */
9049 gcc_assert (get_attr_trap (i) != TRAP_YES
9050 || (!(sum.defd.i & sum.used.i)
9051 && !(sum.defd.fp & sum.used.fp)));
9053 goto close_shadow;
9055 break;
9057 case JUMP_INSN:
9058 case CALL_INSN:
9059 case CODE_LABEL:
9060 goto close_shadow;
9062 default:
9063 gcc_unreachable ();
9066 else
9068 close_shadow:
9069 n = emit_insn_before (gen_trapb (), i);
9070 PUT_MODE (n, TImode);
9071 PUT_MODE (i, TImode);
9072 trap_pending = 0;
9073 shadow.used.i = 0;
9074 shadow.used.fp = 0;
9075 shadow.used.mem = 0;
9076 shadow.defd = shadow.used;
9081 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
9082 && NONJUMP_INSN_P (i)
9083 && GET_CODE (PATTERN (i)) != USE
9084 && GET_CODE (PATTERN (i)) != CLOBBER
9085 && get_attr_trap (i) == TRAP_YES)
9087 if (optimize && !trap_pending)
9088 summarize_insn (PATTERN (i), &shadow, 0);
9089 trap_pending = 1;
9094 /* Alpha can only issue instruction groups simultaneously if they are
9095 suitably aligned. This is very processor-specific. */
9096 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
9097 that are marked "fake". These instructions do not exist on that target,
9098 but it is possible to see these insns with deranged combinations of
9099 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
9100 choose a result at random. */
9102 enum alphaev4_pipe {
9103 EV4_STOP = 0,
9104 EV4_IB0 = 1,
9105 EV4_IB1 = 2,
9106 EV4_IBX = 4
9109 enum alphaev5_pipe {
9110 EV5_STOP = 0,
9111 EV5_NONE = 1,
9112 EV5_E01 = 2,
9113 EV5_E0 = 4,
9114 EV5_E1 = 8,
9115 EV5_FAM = 16,
9116 EV5_FA = 32,
9117 EV5_FM = 64
9120 static enum alphaev4_pipe
9121 alphaev4_insn_pipe (rtx insn)
9123 if (recog_memoized (insn) < 0)
9124 return EV4_STOP;
9125 if (get_attr_length (insn) != 4)
9126 return EV4_STOP;
9128 switch (get_attr_type (insn))
9130 case TYPE_ILD:
9131 case TYPE_LDSYM:
9132 case TYPE_FLD:
9133 case TYPE_LD_L:
9134 return EV4_IBX;
9136 case TYPE_IADD:
9137 case TYPE_ILOG:
9138 case TYPE_ICMOV:
9139 case TYPE_ICMP:
9140 case TYPE_FST:
9141 case TYPE_SHIFT:
9142 case TYPE_IMUL:
9143 case TYPE_FBR:
9144 case TYPE_MVI: /* fake */
9145 return EV4_IB0;
9147 case TYPE_IST:
9148 case TYPE_MISC:
9149 case TYPE_IBR:
9150 case TYPE_JSR:
9151 case TYPE_CALLPAL:
9152 case TYPE_FCPYS:
9153 case TYPE_FCMOV:
9154 case TYPE_FADD:
9155 case TYPE_FDIV:
9156 case TYPE_FMUL:
9157 case TYPE_ST_C:
9158 case TYPE_MB:
9159 case TYPE_FSQRT: /* fake */
9160 case TYPE_FTOI: /* fake */
9161 case TYPE_ITOF: /* fake */
9162 return EV4_IB1;
9164 default:
9165 gcc_unreachable ();
9169 static enum alphaev5_pipe
9170 alphaev5_insn_pipe (rtx insn)
9172 if (recog_memoized (insn) < 0)
9173 return EV5_STOP;
9174 if (get_attr_length (insn) != 4)
9175 return EV5_STOP;
9177 switch (get_attr_type (insn))
9179 case TYPE_ILD:
9180 case TYPE_FLD:
9181 case TYPE_LDSYM:
9182 case TYPE_IADD:
9183 case TYPE_ILOG:
9184 case TYPE_ICMOV:
9185 case TYPE_ICMP:
9186 return EV5_E01;
9188 case TYPE_IST:
9189 case TYPE_FST:
9190 case TYPE_SHIFT:
9191 case TYPE_IMUL:
9192 case TYPE_MISC:
9193 case TYPE_MVI:
9194 case TYPE_LD_L:
9195 case TYPE_ST_C:
9196 case TYPE_MB:
9197 case TYPE_FTOI: /* fake */
9198 case TYPE_ITOF: /* fake */
9199 return EV5_E0;
9201 case TYPE_IBR:
9202 case TYPE_JSR:
9203 case TYPE_CALLPAL:
9204 return EV5_E1;
9206 case TYPE_FCPYS:
9207 return EV5_FAM;
9209 case TYPE_FBR:
9210 case TYPE_FCMOV:
9211 case TYPE_FADD:
9212 case TYPE_FDIV:
9213 case TYPE_FSQRT: /* fake */
9214 return EV5_FA;
9216 case TYPE_FMUL:
9217 return EV5_FM;
9219 default:
9220 gcc_unreachable ();
9224 /* IN_USE is a mask of the slots currently filled within the insn group.
9225 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9226 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9228 LEN is, of course, the length of the group in bytes. */
9230 static rtx
9231 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9233 int len, in_use;
9235 len = in_use = 0;
9237 if (! INSN_P (insn)
9238 || GET_CODE (PATTERN (insn)) == CLOBBER
9239 || GET_CODE (PATTERN (insn)) == USE)
9240 goto next_and_done;
9242 while (1)
9244 enum alphaev4_pipe pipe;
9246 pipe = alphaev4_insn_pipe (insn);
9247 switch (pipe)
9249 case EV4_STOP:
9250 /* Force complex instructions to start new groups. */
9251 if (in_use)
9252 goto done;
9254 /* If this is a completely unrecognized insn, it's an asm.
9255 We don't know how long it is, so record length as -1 to
9256 signal a needed realignment. */
9257 if (recog_memoized (insn) < 0)
9258 len = -1;
9259 else
9260 len = get_attr_length (insn);
9261 goto next_and_done;
9263 case EV4_IBX:
9264 if (in_use & EV4_IB0)
9266 if (in_use & EV4_IB1)
9267 goto done;
9268 in_use |= EV4_IB1;
9270 else
9271 in_use |= EV4_IB0 | EV4_IBX;
9272 break;
9274 case EV4_IB0:
9275 if (in_use & EV4_IB0)
9277 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9278 goto done;
9279 in_use |= EV4_IB1;
9281 in_use |= EV4_IB0;
9282 break;
9284 case EV4_IB1:
9285 if (in_use & EV4_IB1)
9286 goto done;
9287 in_use |= EV4_IB1;
9288 break;
9290 default:
9291 gcc_unreachable ();
9293 len += 4;
9295 /* Haifa doesn't do well scheduling branches. */
9296 if (JUMP_P (insn))
9297 goto next_and_done;
9299 next:
9300 insn = next_nonnote_insn (insn);
9302 if (!insn || ! INSN_P (insn))
9303 goto done;
9305 /* Let Haifa tell us where it thinks insn group boundaries are. */
9306 if (GET_MODE (insn) == TImode)
9307 goto done;
9309 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9310 goto next;
9313 next_and_done:
9314 insn = next_nonnote_insn (insn);
9316 done:
9317 *plen = len;
9318 *pin_use = in_use;
9319 return insn;
9322 /* IN_USE is a mask of the slots currently filled within the insn group.
9323 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9324 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9326 LEN is, of course, the length of the group in bytes. */
9328 static rtx
9329 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9331 int len, in_use;
9333 len = in_use = 0;
9335 if (! INSN_P (insn)
9336 || GET_CODE (PATTERN (insn)) == CLOBBER
9337 || GET_CODE (PATTERN (insn)) == USE)
9338 goto next_and_done;
9340 while (1)
9342 enum alphaev5_pipe pipe;
9344 pipe = alphaev5_insn_pipe (insn);
9345 switch (pipe)
9347 case EV5_STOP:
9348 /* Force complex instructions to start new groups. */
9349 if (in_use)
9350 goto done;
9352 /* If this is a completely unrecognized insn, it's an asm.
9353 We don't know how long it is, so record length as -1 to
9354 signal a needed realignment. */
9355 if (recog_memoized (insn) < 0)
9356 len = -1;
9357 else
9358 len = get_attr_length (insn);
9359 goto next_and_done;
9361 /* ??? Most of the places below, we would like to assert never
9362 happen, as it would indicate an error either in Haifa, or
9363 in the scheduling description. Unfortunately, Haifa never
9364 schedules the last instruction of the BB, so we don't have
9365 an accurate TI bit to go off. */
9366 case EV5_E01:
9367 if (in_use & EV5_E0)
9369 if (in_use & EV5_E1)
9370 goto done;
9371 in_use |= EV5_E1;
9373 else
9374 in_use |= EV5_E0 | EV5_E01;
9375 break;
9377 case EV5_E0:
9378 if (in_use & EV5_E0)
9380 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9381 goto done;
9382 in_use |= EV5_E1;
9384 in_use |= EV5_E0;
9385 break;
9387 case EV5_E1:
9388 if (in_use & EV5_E1)
9389 goto done;
9390 in_use |= EV5_E1;
9391 break;
9393 case EV5_FAM:
9394 if (in_use & EV5_FA)
9396 if (in_use & EV5_FM)
9397 goto done;
9398 in_use |= EV5_FM;
9400 else
9401 in_use |= EV5_FA | EV5_FAM;
9402 break;
9404 case EV5_FA:
9405 if (in_use & EV5_FA)
9406 goto done;
9407 in_use |= EV5_FA;
9408 break;
9410 case EV5_FM:
9411 if (in_use & EV5_FM)
9412 goto done;
9413 in_use |= EV5_FM;
9414 break;
9416 case EV5_NONE:
9417 break;
9419 default:
9420 gcc_unreachable ();
9422 len += 4;
9424 /* Haifa doesn't do well scheduling branches. */
9425 /* ??? If this is predicted not-taken, slotting continues, except
9426 that no more IBR, FBR, or JSR insns may be slotted. */
9427 if (JUMP_P (insn))
9428 goto next_and_done;
9430 next:
9431 insn = next_nonnote_insn (insn);
9433 if (!insn || ! INSN_P (insn))
9434 goto done;
9436 /* Let Haifa tell us where it thinks insn group boundaries are. */
9437 if (GET_MODE (insn) == TImode)
9438 goto done;
9440 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9441 goto next;
9444 next_and_done:
9445 insn = next_nonnote_insn (insn);
9447 done:
9448 *plen = len;
9449 *pin_use = in_use;
9450 return insn;
9453 static rtx
9454 alphaev4_next_nop (int *pin_use)
9456 int in_use = *pin_use;
9457 rtx nop;
9459 if (!(in_use & EV4_IB0))
9461 in_use |= EV4_IB0;
9462 nop = gen_nop ();
9464 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9466 in_use |= EV4_IB1;
9467 nop = gen_nop ();
9469 else if (TARGET_FP && !(in_use & EV4_IB1))
9471 in_use |= EV4_IB1;
9472 nop = gen_fnop ();
9474 else
9475 nop = gen_unop ();
9477 *pin_use = in_use;
9478 return nop;
9481 static rtx
9482 alphaev5_next_nop (int *pin_use)
9484 int in_use = *pin_use;
9485 rtx nop;
9487 if (!(in_use & EV5_E1))
9489 in_use |= EV5_E1;
9490 nop = gen_nop ();
9492 else if (TARGET_FP && !(in_use & EV5_FA))
9494 in_use |= EV5_FA;
9495 nop = gen_fnop ();
9497 else if (TARGET_FP && !(in_use & EV5_FM))
9499 in_use |= EV5_FM;
9500 nop = gen_fnop ();
9502 else
9503 nop = gen_unop ();
9505 *pin_use = in_use;
9506 return nop;
9509 /* The instruction group alignment main loop. */
9511 static void
9512 alpha_align_insns (unsigned int max_align,
9513 rtx (*next_group) (rtx, int *, int *),
9514 rtx (*next_nop) (int *))
9516 /* ALIGN is the known alignment for the insn group. */
9517 unsigned int align;
9518 /* OFS is the offset of the current insn in the insn group. */
9519 int ofs;
9520 int prev_in_use, in_use, len, ldgp;
9521 rtx i, next;
9523 /* Let shorten branches care for assigning alignments to code labels. */
9524 shorten_branches (get_insns ());
9526 if (align_functions < 4)
9527 align = 4;
9528 else if ((unsigned int) align_functions < max_align)
9529 align = align_functions;
9530 else
9531 align = max_align;
9533 ofs = prev_in_use = 0;
9534 i = get_insns ();
9535 if (NOTE_P (i))
9536 i = next_nonnote_insn (i);
9538 ldgp = alpha_function_needs_gp ? 8 : 0;
9540 while (i)
9542 next = (*next_group) (i, &in_use, &len);
9544 /* When we see a label, resync alignment etc. */
9545 if (LABEL_P (i))
9547 unsigned int new_align = 1 << label_to_alignment (i);
9549 if (new_align >= align)
9551 align = new_align < max_align ? new_align : max_align;
9552 ofs = 0;
9555 else if (ofs & (new_align-1))
9556 ofs = (ofs | (new_align-1)) + 1;
9557 gcc_assert (!len);
9560 /* Handle complex instructions special. */
9561 else if (in_use == 0)
9563 /* Asms will have length < 0. This is a signal that we have
9564 lost alignment knowledge. Assume, however, that the asm
9565 will not mis-align instructions. */
9566 if (len < 0)
9568 ofs = 0;
9569 align = 4;
9570 len = 0;
9574 /* If the known alignment is smaller than the recognized insn group,
9575 realign the output. */
9576 else if ((int) align < len)
9578 unsigned int new_log_align = len > 8 ? 4 : 3;
9579 rtx prev, where;
9581 where = prev = prev_nonnote_insn (i);
9582 if (!where || !LABEL_P (where))
9583 where = i;
9585 /* Can't realign between a call and its gp reload. */
9586 if (! (TARGET_EXPLICIT_RELOCS
9587 && prev && CALL_P (prev)))
9589 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9590 align = 1 << new_log_align;
9591 ofs = 0;
9595 /* We may not insert padding inside the initial ldgp sequence. */
9596 else if (ldgp > 0)
9597 ldgp -= len;
9599 /* If the group won't fit in the same INT16 as the previous,
9600 we need to add padding to keep the group together. Rather
9601 than simply leaving the insn filling to the assembler, we
9602 can make use of the knowledge of what sorts of instructions
9603 were issued in the previous group to make sure that all of
9604 the added nops are really free. */
9605 else if (ofs + len > (int) align)
9607 int nop_count = (align - ofs) / 4;
9608 rtx where;
9610 /* Insert nops before labels, branches, and calls to truly merge
9611 the execution of the nops with the previous instruction group. */
9612 where = prev_nonnote_insn (i);
9613 if (where)
9615 if (LABEL_P (where))
9617 rtx where2 = prev_nonnote_insn (where);
9618 if (where2 && JUMP_P (where2))
9619 where = where2;
9621 else if (NONJUMP_INSN_P (where))
9622 where = i;
9624 else
9625 where = i;
9628 emit_insn_before ((*next_nop)(&prev_in_use), where);
9629 while (--nop_count);
9630 ofs = 0;
9633 ofs = (ofs + len) & (align - 1);
9634 prev_in_use = in_use;
9635 i = next;
9639 /* Insert an unop between a noreturn function call and GP load. */
9641 static void
9642 alpha_pad_noreturn (void)
9644 rtx insn, next;
9646 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9648 if (!CALL_P (insn)
9649 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9650 continue;
9652 next = next_active_insn (insn);
9654 if (next)
9656 rtx pat = PATTERN (next);
9658 if (GET_CODE (pat) == SET
9659 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9660 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9661 emit_insn_after (gen_unop (), insn);
9666 /* Machine dependent reorg pass. */
9668 static void
9669 alpha_reorg (void)
9671 /* Workaround for a linker error that triggers when an
9672 exception handler immediatelly follows a noreturn function.
9674 The instruction stream from an object file:
9676 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9677 58: 00 00 ba 27 ldah gp,0(ra)
9678 5c: 00 00 bd 23 lda gp,0(gp)
9679 60: 00 00 7d a7 ldq t12,0(gp)
9680 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9682 was converted in the final link pass to:
9684 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9685 fdb28: 00 00 fe 2f unop
9686 fdb2c: 00 00 fe 2f unop
9687 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9688 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9690 GP load instructions were wrongly cleared by the linker relaxation
9691 pass. This workaround prevents removal of GP loads by inserting
9692 an unop instruction between a noreturn function call and
9693 exception handler prologue. */
9695 if (current_function_has_exception_handlers ())
9696 alpha_pad_noreturn ();
9698 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9699 alpha_handle_trap_shadows ();
9701 /* Due to the number of extra trapb insns, don't bother fixing up
9702 alignment when trap precision is instruction. Moreover, we can
9703 only do our job when sched2 is run. */
9704 if (optimize && !optimize_size
9705 && alpha_tp != ALPHA_TP_INSN
9706 && flag_schedule_insns_after_reload)
9708 if (alpha_tune == PROCESSOR_EV4)
9709 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9710 else if (alpha_tune == PROCESSOR_EV5)
9711 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9715 #if !TARGET_ABI_UNICOSMK
9717 #ifdef HAVE_STAMP_H
9718 #include <stamp.h>
9719 #endif
9721 static void
9722 alpha_file_start (void)
9724 #ifdef OBJECT_FORMAT_ELF
9725 /* If emitting dwarf2 debug information, we cannot generate a .file
9726 directive to start the file, as it will conflict with dwarf2out
9727 file numbers. So it's only useful when emitting mdebug output. */
9728 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9729 #endif
9731 default_file_start ();
9732 #ifdef MS_STAMP
9733 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9734 #endif
9736 fputs ("\t.set noreorder\n", asm_out_file);
9737 fputs ("\t.set volatile\n", asm_out_file);
9738 if (!TARGET_ABI_OPEN_VMS)
9739 fputs ("\t.set noat\n", asm_out_file);
9740 if (TARGET_EXPLICIT_RELOCS)
9741 fputs ("\t.set nomacro\n", asm_out_file);
9742 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9744 const char *arch;
9746 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9747 arch = "ev6";
9748 else if (TARGET_MAX)
9749 arch = "pca56";
9750 else if (TARGET_BWX)
9751 arch = "ev56";
9752 else if (alpha_cpu == PROCESSOR_EV5)
9753 arch = "ev5";
9754 else
9755 arch = "ev4";
9757 fprintf (asm_out_file, "\t.arch %s\n", arch);
9760 #endif
9762 #ifdef OBJECT_FORMAT_ELF
9763 /* Since we don't have a .dynbss section, we should not allow global
9764 relocations in the .rodata section. */
9766 static int
9767 alpha_elf_reloc_rw_mask (void)
9769 return flag_pic ? 3 : 2;
9772 /* Return a section for X. The only special thing we do here is to
9773 honor small data. */
9775 static section *
9776 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9777 unsigned HOST_WIDE_INT align)
9779 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9780 /* ??? Consider using mergeable sdata sections. */
9781 return sdata_section;
9782 else
9783 return default_elf_select_rtx_section (mode, x, align);
9786 static unsigned int
9787 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9789 unsigned int flags = 0;
9791 if (strcmp (name, ".sdata") == 0
9792 || strncmp (name, ".sdata.", 7) == 0
9793 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9794 || strcmp (name, ".sbss") == 0
9795 || strncmp (name, ".sbss.", 6) == 0
9796 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9797 flags = SECTION_SMALL;
9799 flags |= default_section_type_flags (decl, name, reloc);
9800 return flags;
9802 #endif /* OBJECT_FORMAT_ELF */
9804 /* Structure to collect function names for final output in link section. */
9805 /* Note that items marked with GTY can't be ifdef'ed out. */
9807 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9808 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9810 struct GTY(()) alpha_links
9812 int num;
9813 const char *target;
9814 rtx linkage;
9815 enum links_kind lkind;
9816 enum reloc_kind rkind;
9819 struct GTY(()) alpha_funcs
9821 int num;
9822 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9823 links;
9826 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9827 splay_tree alpha_links_tree;
9828 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9829 splay_tree alpha_funcs_tree;
9831 static GTY(()) int alpha_funcs_num;
9833 #if TARGET_ABI_OPEN_VMS
9835 /* Return the VMS argument type corresponding to MODE. */
9837 enum avms_arg_type
9838 alpha_arg_type (enum machine_mode mode)
9840 switch (mode)
9842 case SFmode:
9843 return TARGET_FLOAT_VAX ? FF : FS;
9844 case DFmode:
9845 return TARGET_FLOAT_VAX ? FD : FT;
9846 default:
9847 return I64;
9851 /* Return an rtx for an integer representing the VMS Argument Information
9852 register value. */
9855 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9857 unsigned HOST_WIDE_INT regval = cum.num_args;
9858 int i;
9860 for (i = 0; i < 6; i++)
9861 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9863 return GEN_INT (regval);
9866 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9867 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9868 Return a SYMBOL_REF suited to the call instruction. */
9871 alpha_need_linkage (const char *name, int is_local)
9873 splay_tree_node node;
9874 struct alpha_links *al;
9875 const char *target;
9876 tree id;
9878 if (name[0] == '*')
9879 name++;
9881 if (is_local)
9883 struct alpha_funcs *cfaf;
9885 if (!alpha_funcs_tree)
9886 alpha_funcs_tree = splay_tree_new_ggc
9887 (splay_tree_compare_pointers,
9888 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9889 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9892 cfaf = ggc_alloc_alpha_funcs ();
9894 cfaf->links = 0;
9895 cfaf->num = ++alpha_funcs_num;
9897 splay_tree_insert (alpha_funcs_tree,
9898 (splay_tree_key) current_function_decl,
9899 (splay_tree_value) cfaf);
9902 if (alpha_links_tree)
9904 /* Is this name already defined? */
9906 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9907 if (node)
9909 al = (struct alpha_links *) node->value;
9910 if (is_local)
9912 /* Defined here but external assumed. */
9913 if (al->lkind == KIND_EXTERN)
9914 al->lkind = KIND_LOCAL;
9916 else
9918 /* Used here but unused assumed. */
9919 if (al->lkind == KIND_UNUSED)
9920 al->lkind = KIND_LOCAL;
9922 return al->linkage;
9925 else
9926 alpha_links_tree = splay_tree_new_ggc
9927 ((splay_tree_compare_fn) strcmp,
9928 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9929 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9931 al = ggc_alloc_alpha_links ();
9932 name = ggc_strdup (name);
9934 /* Assume external if no definition. */
9935 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9937 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9938 and find the ultimate alias target like assemble_name. */
9939 id = get_identifier (name);
9940 target = NULL;
9941 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9943 id = TREE_CHAIN (id);
9944 target = IDENTIFIER_POINTER (id);
9947 al->target = target ? target : name;
9948 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9950 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9951 (splay_tree_value) al);
9953 return al->linkage;
9956 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9957 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9958 this is the reference to the linkage pointer value, 0 if this is the
9959 reference to the function entry value. RFLAG is 1 if this a reduced
9960 reference (code address only), 0 if this is a full reference. */
9963 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9965 splay_tree_node cfunnode;
9966 struct alpha_funcs *cfaf;
9967 struct alpha_links *al;
9968 const char *name = XSTR (func, 0);
9970 cfaf = (struct alpha_funcs *) 0;
9971 al = (struct alpha_links *) 0;
9973 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9974 cfaf = (struct alpha_funcs *) cfunnode->value;
9976 if (cfaf->links)
9978 splay_tree_node lnode;
9980 /* Is this name already defined? */
9982 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9983 if (lnode)
9984 al = (struct alpha_links *) lnode->value;
9986 else
9987 cfaf->links = splay_tree_new_ggc
9988 ((splay_tree_compare_fn) strcmp,
9989 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9990 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9992 if (!al)
9994 size_t name_len;
9995 size_t buflen;
9996 char *linksym;
9997 splay_tree_node node = 0;
9998 struct alpha_links *anl;
10000 if (name[0] == '*')
10001 name++;
10003 name_len = strlen (name);
10004 linksym = (char *) alloca (name_len + 50);
10006 al = ggc_alloc_alpha_links ();
10007 al->num = cfaf->num;
10008 al->target = NULL;
10010 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
10011 if (node)
10013 anl = (struct alpha_links *) node->value;
10014 al->lkind = anl->lkind;
10015 name = anl->target;
10018 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
10019 buflen = strlen (linksym);
10021 al->linkage = gen_rtx_SYMBOL_REF
10022 (Pmode, ggc_alloc_string (linksym, buflen + 1));
10024 splay_tree_insert (cfaf->links, (splay_tree_key) name,
10025 (splay_tree_value) al);
10028 if (rflag)
10029 al->rkind = KIND_CODEADDR;
10030 else
10031 al->rkind = KIND_LINKAGE;
10033 if (lflag)
10034 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
10035 else
10036 return al->linkage;
10039 static int
10040 alpha_write_one_linkage (splay_tree_node node, void *data)
10042 const char *const name = (const char *) node->key;
10043 struct alpha_links *link = (struct alpha_links *) node->value;
10044 FILE *stream = (FILE *) data;
10046 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
10047 if (link->rkind == KIND_CODEADDR)
10049 if (link->lkind == KIND_LOCAL)
10051 /* Local and used */
10052 fprintf (stream, "\t.quad %s..en\n", name);
10054 else
10056 /* External and used, request code address. */
10057 fprintf (stream, "\t.code_address %s\n", name);
10060 else
10062 if (link->lkind == KIND_LOCAL)
10064 /* Local and used, build linkage pair. */
10065 fprintf (stream, "\t.quad %s..en\n", name);
10066 fprintf (stream, "\t.quad %s\n", name);
10068 else
10070 /* External and used, request linkage pair. */
10071 fprintf (stream, "\t.linkage %s\n", name);
10075 return 0;
10078 static void
10079 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
10081 splay_tree_node node;
10082 struct alpha_funcs *func;
10084 fprintf (stream, "\t.link\n");
10085 fprintf (stream, "\t.align 3\n");
10086 in_section = NULL;
10088 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
10089 func = (struct alpha_funcs *) node->value;
10091 fputs ("\t.name ", stream);
10092 assemble_name (stream, funname);
10093 fputs ("..na\n", stream);
10094 ASM_OUTPUT_LABEL (stream, funname);
10095 fprintf (stream, "\t.pdesc ");
10096 assemble_name (stream, funname);
10097 fprintf (stream, "..en,%s\n",
10098 alpha_procedure_type == PT_STACK ? "stack"
10099 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
10101 if (func->links)
10103 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
10104 /* splay_tree_delete (func->links); */
10108 /* Switch to an arbitrary section NAME with attributes as specified
10109 by FLAGS. ALIGN specifies any known alignment requirements for
10110 the section; 0 if the default should be used. */
10112 static void
10113 vms_asm_named_section (const char *name, unsigned int flags,
10114 tree decl ATTRIBUTE_UNUSED)
10116 fputc ('\n', asm_out_file);
10117 fprintf (asm_out_file, ".section\t%s", name);
10119 if (flags & SECTION_DEBUG)
10120 fprintf (asm_out_file, ",NOWRT");
10122 fputc ('\n', asm_out_file);
10125 /* Record an element in the table of global constructors. SYMBOL is
10126 a SYMBOL_REF of the function to be called; PRIORITY is a number
10127 between 0 and MAX_INIT_PRIORITY.
10129 Differs from default_ctors_section_asm_out_constructor in that the
10130 width of the .ctors entry is always 64 bits, rather than the 32 bits
10131 used by a normal pointer. */
10133 static void
10134 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10136 switch_to_section (ctors_section);
10137 assemble_align (BITS_PER_WORD);
10138 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10141 static void
10142 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10144 switch_to_section (dtors_section);
10145 assemble_align (BITS_PER_WORD);
10146 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10148 #else
10151 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
10152 int is_local ATTRIBUTE_UNUSED)
10154 return NULL_RTX;
10158 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
10159 tree cfundecl ATTRIBUTE_UNUSED,
10160 int lflag ATTRIBUTE_UNUSED,
10161 int rflag ATTRIBUTE_UNUSED)
10163 return NULL_RTX;
10166 #endif /* TARGET_ABI_OPEN_VMS */
10168 #if TARGET_ABI_UNICOSMK
10170 /* This evaluates to true if we do not know how to pass TYPE solely in
10171 registers. This is the case for all arguments that do not fit in two
10172 registers. */
10174 static bool
10175 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
10177 if (type == NULL)
10178 return false;
10180 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10181 return true;
10182 if (TREE_ADDRESSABLE (type))
10183 return true;
10185 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
10188 /* Define the offset between two registers, one to be eliminated, and the
10189 other its replacement, at the start of a routine. */
10192 unicosmk_initial_elimination_offset (int from, int to)
10194 int fixed_size;
10196 fixed_size = alpha_sa_size();
10197 if (fixed_size != 0)
10198 fixed_size += 48;
10200 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10201 return -fixed_size;
10202 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10203 return 0;
10204 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10205 return (ALPHA_ROUND (crtl->outgoing_args_size)
10206 + ALPHA_ROUND (get_frame_size()));
10207 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10208 return (ALPHA_ROUND (fixed_size)
10209 + ALPHA_ROUND (get_frame_size()
10210 + crtl->outgoing_args_size));
10211 else
10212 gcc_unreachable ();
10215 /* Output the module name for .ident and .end directives. We have to strip
10216 directories and add make sure that the module name starts with a letter
10217 or '$'. */
10219 static void
10220 unicosmk_output_module_name (FILE *file)
10222 const char *name = lbasename (main_input_filename);
10223 unsigned len = strlen (name);
10224 char *clean_name = alloca (len + 2);
10225 char *ptr = clean_name;
10227 /* CAM only accepts module names that start with a letter or '$'. We
10228 prefix the module name with a '$' if necessary. */
10230 if (!ISALPHA (*name))
10231 *ptr++ = '$';
10232 memcpy (ptr, name, len + 1);
10233 clean_symbol_name (clean_name);
10234 fputs (clean_name, file);
10237 /* Output the definition of a common variable. */
10239 void
10240 unicosmk_output_common (FILE *file, const char *name, int size, int align)
10242 tree name_tree;
10243 printf ("T3E__: common %s\n", name);
10245 in_section = NULL;
10246 fputs("\t.endp\n\n\t.psect ", file);
10247 assemble_name(file, name);
10248 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10249 fprintf(file, "\t.byte\t0:%d\n", size);
10251 /* Mark the symbol as defined in this module. */
10252 name_tree = get_identifier (name);
10253 TREE_ASM_WRITTEN (name_tree) = 1;
10256 #define SECTION_PUBLIC SECTION_MACH_DEP
10257 #define SECTION_MAIN (SECTION_PUBLIC << 1)
10258 static int current_section_align;
10260 /* A get_unnamed_section callback for switching to the text section. */
10262 static void
10263 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10265 static int count = 0;
10266 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10269 /* A get_unnamed_section callback for switching to the data section. */
10271 static void
10272 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10274 static int count = 1;
10275 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10278 /* Implement TARGET_ASM_INIT_SECTIONS.
10280 The Cray assembler is really weird with respect to sections. It has only
10281 named sections and you can't reopen a section once it has been closed.
10282 This means that we have to generate unique names whenever we want to
10283 reenter the text or the data section. */
10285 static void
10286 unicosmk_init_sections (void)
10288 text_section = get_unnamed_section (SECTION_CODE,
10289 unicosmk_output_text_section_asm_op,
10290 NULL);
10291 data_section = get_unnamed_section (SECTION_WRITE,
10292 unicosmk_output_data_section_asm_op,
10293 NULL);
10294 readonly_data_section = data_section;
10297 static unsigned int
10298 unicosmk_section_type_flags (tree decl, const char *name,
10299 int reloc ATTRIBUTE_UNUSED)
10301 unsigned int flags = default_section_type_flags (decl, name, reloc);
10303 if (!decl)
10304 return flags;
10306 if (TREE_CODE (decl) == FUNCTION_DECL)
10308 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10309 if (align_functions_log > current_section_align)
10310 current_section_align = align_functions_log;
10312 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10313 flags |= SECTION_MAIN;
10315 else
10316 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10318 if (TREE_PUBLIC (decl))
10319 flags |= SECTION_PUBLIC;
10321 return flags;
10324 /* Generate a section name for decl and associate it with the
10325 declaration. */
10327 static void
10328 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10330 const char *name;
10331 int len;
10333 gcc_assert (decl);
10335 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10336 name = default_strip_name_encoding (name);
10337 len = strlen (name);
10339 if (TREE_CODE (decl) == FUNCTION_DECL)
10341 char *string;
10343 /* It is essential that we prefix the section name here because
10344 otherwise the section names generated for constructors and
10345 destructors confuse collect2. */
10347 string = alloca (len + 6);
10348 sprintf (string, "code@%s", name);
10349 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10351 else if (TREE_PUBLIC (decl))
10352 DECL_SECTION_NAME (decl) = build_string (len, name);
10353 else
10355 char *string;
10357 string = alloca (len + 6);
10358 sprintf (string, "data@%s", name);
10359 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10363 /* Switch to an arbitrary section NAME with attributes as specified
10364 by FLAGS. ALIGN specifies any known alignment requirements for
10365 the section; 0 if the default should be used. */
10367 static void
10368 unicosmk_asm_named_section (const char *name, unsigned int flags,
10369 tree decl ATTRIBUTE_UNUSED)
10371 const char *kind;
10373 /* Close the previous section. */
10375 fputs ("\t.endp\n\n", asm_out_file);
10377 /* Find out what kind of section we are opening. */
10379 if (flags & SECTION_MAIN)
10380 fputs ("\t.start\tmain\n", asm_out_file);
10382 if (flags & SECTION_CODE)
10383 kind = "code";
10384 else if (flags & SECTION_PUBLIC)
10385 kind = "common";
10386 else
10387 kind = "data";
10389 if (current_section_align != 0)
10390 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10391 current_section_align, kind);
10392 else
10393 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10396 static void
10397 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10399 if (DECL_P (decl)
10400 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10401 unicosmk_unique_section (decl, 0);
10404 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10405 in code sections because .align fill unused space with zeroes. */
10407 void
10408 unicosmk_output_align (FILE *file, int align)
10410 if (inside_function)
10411 fprintf (file, "\tgcc@code@align\t%d\n", align);
10412 else
10413 fprintf (file, "\t.align\t%d\n", align);
10416 /* Add a case vector to the current function's list of deferred case
10417 vectors. Case vectors have to be put into a separate section because CAM
10418 does not allow data definitions in code sections. */
10420 void
10421 unicosmk_defer_case_vector (rtx lab, rtx vec)
10423 struct machine_function *machine = cfun->machine;
10425 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10426 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10427 machine->addr_list);
10430 /* Output a case vector. */
10432 static void
10433 unicosmk_output_addr_vec (FILE *file, rtx vec)
10435 rtx lab = XEXP (vec, 0);
10436 rtx body = XEXP (vec, 1);
10437 int vlen = XVECLEN (body, 0);
10438 int idx;
10440 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10442 for (idx = 0; idx < vlen; idx++)
10444 ASM_OUTPUT_ADDR_VEC_ELT
10445 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10449 /* Output current function's deferred case vectors. */
10451 static void
10452 unicosmk_output_deferred_case_vectors (FILE *file)
10454 struct machine_function *machine = cfun->machine;
10455 rtx t;
10457 if (machine->addr_list == NULL_RTX)
10458 return;
10460 switch_to_section (data_section);
10461 for (t = machine->addr_list; t; t = XEXP (t, 1))
10462 unicosmk_output_addr_vec (file, XEXP (t, 0));
10465 /* Generate the name of the SSIB section for the current function. */
10467 #define SSIB_PREFIX "__SSIB_"
10468 #define SSIB_PREFIX_LEN 7
10470 static const char *
10471 unicosmk_ssib_name (void)
10473 /* This is ok since CAM won't be able to deal with names longer than that
10474 anyway. */
10476 static char name[256];
10478 rtx x;
10479 const char *fnname;
10480 int len;
10482 x = DECL_RTL (cfun->decl);
10483 gcc_assert (MEM_P (x));
10484 x = XEXP (x, 0);
10485 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10486 fnname = XSTR (x, 0);
10488 len = strlen (fnname);
10489 if (len + SSIB_PREFIX_LEN > 255)
10490 len = 255 - SSIB_PREFIX_LEN;
10492 strcpy (name, SSIB_PREFIX);
10493 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10494 name[len + SSIB_PREFIX_LEN] = 0;
10496 return name;
10499 /* Set up the dynamic subprogram information block (DSIB) and update the
10500 frame pointer register ($15) for subroutines which have a frame. If the
10501 subroutine doesn't have a frame, simply increment $15. */
10503 static void
10504 unicosmk_gen_dsib (unsigned long *imaskP)
10506 if (alpha_procedure_type == PT_STACK)
10508 const char *ssib_name;
10509 rtx mem;
10511 /* Allocate 64 bytes for the DSIB. */
10513 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10514 GEN_INT (-64))));
10515 emit_insn (gen_blockage ());
10517 /* Save the return address. */
10519 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10520 set_mem_alias_set (mem, alpha_sr_alias_set);
10521 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10522 (*imaskP) &= ~(1UL << REG_RA);
10524 /* Save the old frame pointer. */
10526 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10527 set_mem_alias_set (mem, alpha_sr_alias_set);
10528 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10529 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10531 emit_insn (gen_blockage ());
10533 /* Store the SSIB pointer. */
10535 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10536 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10537 set_mem_alias_set (mem, alpha_sr_alias_set);
10539 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10540 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10541 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10543 /* Save the CIW index. */
10545 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10546 set_mem_alias_set (mem, alpha_sr_alias_set);
10547 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10549 emit_insn (gen_blockage ());
10551 /* Set the new frame pointer. */
10552 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10553 stack_pointer_rtx, GEN_INT (64))));
10555 else
10557 /* Increment the frame pointer register to indicate that we do not
10558 have a frame. */
10559 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10560 hard_frame_pointer_rtx, const1_rtx));
10564 /* Output the static subroutine information block for the current
10565 function. */
10567 static void
10568 unicosmk_output_ssib (FILE *file, const char *fnname)
10570 int len;
10571 int i;
10572 rtx x;
10573 rtx ciw;
10574 struct machine_function *machine = cfun->machine;
10576 in_section = NULL;
10577 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10578 unicosmk_ssib_name ());
10580 /* Some required stuff and the function name length. */
10582 len = strlen (fnname);
10583 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10585 /* Saved registers
10586 ??? We don't do that yet. */
10588 fputs ("\t.quad\t0\n", file);
10590 /* Function address. */
10592 fputs ("\t.quad\t", file);
10593 assemble_name (file, fnname);
10594 putc ('\n', file);
10596 fputs ("\t.quad\t0\n", file);
10597 fputs ("\t.quad\t0\n", file);
10599 /* Function name.
10600 ??? We do it the same way Cray CC does it but this could be
10601 simplified. */
10603 for( i = 0; i < len; i++ )
10604 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10605 if( (len % 8) == 0 )
10606 fputs ("\t.quad\t0\n", file);
10607 else
10608 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10610 /* All call information words used in the function. */
10612 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10614 ciw = XEXP (x, 0);
10615 #if HOST_BITS_PER_WIDE_INT == 32
10616 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10617 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10618 #else
10619 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10620 #endif
10624 /* Add a call information word (CIW) to the list of the current function's
10625 CIWs and return its index.
10627 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10630 unicosmk_add_call_info_word (rtx x)
10632 rtx node;
10633 struct machine_function *machine = cfun->machine;
10635 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10636 if (machine->first_ciw == NULL_RTX)
10637 machine->first_ciw = node;
10638 else
10639 XEXP (machine->last_ciw, 1) = node;
10641 machine->last_ciw = node;
10642 ++machine->ciw_count;
10644 return GEN_INT (machine->ciw_count
10645 + strlen (current_function_name ())/8 + 5);
10648 /* The Cray assembler doesn't accept extern declarations for symbols which
10649 are defined in the same file. We have to keep track of all global
10650 symbols which are referenced and/or defined in a source file and output
10651 extern declarations for those which are referenced but not defined at
10652 the end of file. */
10654 /* List of identifiers for which an extern declaration might have to be
10655 emitted. */
10656 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10658 struct unicosmk_extern_list
10660 struct unicosmk_extern_list *next;
10661 const char *name;
10664 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10666 /* Output extern declarations which are required for every asm file. */
10668 static void
10669 unicosmk_output_default_externs (FILE *file)
10671 static const char *const externs[] =
10672 { "__T3E_MISMATCH" };
10674 int i;
10675 int n;
10677 n = ARRAY_SIZE (externs);
10679 for (i = 0; i < n; i++)
10680 fprintf (file, "\t.extern\t%s\n", externs[i]);
10683 /* Output extern declarations for global symbols which are have been
10684 referenced but not defined. */
10686 static void
10687 unicosmk_output_externs (FILE *file)
10689 struct unicosmk_extern_list *p;
10690 const char *real_name;
10691 int len;
10692 tree name_tree;
10694 len = strlen (user_label_prefix);
10695 for (p = unicosmk_extern_head; p != 0; p = p->next)
10697 /* We have to strip the encoding and possibly remove user_label_prefix
10698 from the identifier in order to handle -fleading-underscore and
10699 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10700 real_name = default_strip_name_encoding (p->name);
10701 if (len && p->name[0] == '*'
10702 && !memcmp (real_name, user_label_prefix, len))
10703 real_name += len;
10705 name_tree = get_identifier (real_name);
10706 if (! TREE_ASM_WRITTEN (name_tree))
10708 TREE_ASM_WRITTEN (name_tree) = 1;
10709 fputs ("\t.extern\t", file);
10710 assemble_name (file, p->name);
10711 putc ('\n', file);
10716 /* Record an extern. */
10718 void
10719 unicosmk_add_extern (const char *name)
10721 struct unicosmk_extern_list *p;
10723 p = (struct unicosmk_extern_list *)
10724 xmalloc (sizeof (struct unicosmk_extern_list));
10725 p->next = unicosmk_extern_head;
10726 p->name = name;
10727 unicosmk_extern_head = p;
10730 /* The Cray assembler generates incorrect code if identifiers which
10731 conflict with register names are used as instruction operands. We have
10732 to replace such identifiers with DEX expressions. */
10734 /* Structure to collect identifiers which have been replaced by DEX
10735 expressions. */
10736 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10738 struct unicosmk_dex {
10739 struct unicosmk_dex *next;
10740 const char *name;
10743 /* List of identifiers which have been replaced by DEX expressions. The DEX
10744 number is determined by the position in the list. */
10746 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10748 /* The number of elements in the DEX list. */
10750 static int unicosmk_dex_count = 0;
10752 /* Check if NAME must be replaced by a DEX expression. */
10754 static int
10755 unicosmk_special_name (const char *name)
10757 if (name[0] == '*')
10758 ++name;
10760 if (name[0] == '$')
10761 ++name;
10763 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10764 return 0;
10766 switch (name[1])
10768 case '1': case '2':
10769 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10771 case '3':
10772 return (name[2] == '\0'
10773 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10775 default:
10776 return (ISDIGIT (name[1]) && name[2] == '\0');
10780 /* Return the DEX number if X must be replaced by a DEX expression and 0
10781 otherwise. */
10783 static int
10784 unicosmk_need_dex (rtx x)
10786 struct unicosmk_dex *dex;
10787 const char *name;
10788 int i;
10790 if (GET_CODE (x) != SYMBOL_REF)
10791 return 0;
10793 name = XSTR (x,0);
10794 if (! unicosmk_special_name (name))
10795 return 0;
10797 i = unicosmk_dex_count;
10798 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10800 if (! strcmp (name, dex->name))
10801 return i;
10802 --i;
10805 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10806 dex->name = name;
10807 dex->next = unicosmk_dex_list;
10808 unicosmk_dex_list = dex;
10810 ++unicosmk_dex_count;
10811 return unicosmk_dex_count;
10814 /* Output the DEX definitions for this file. */
10816 static void
10817 unicosmk_output_dex (FILE *file)
10819 struct unicosmk_dex *dex;
10820 int i;
10822 if (unicosmk_dex_list == NULL)
10823 return;
10825 fprintf (file, "\t.dexstart\n");
10827 i = unicosmk_dex_count;
10828 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10830 fprintf (file, "\tDEX (%d) = ", i);
10831 assemble_name (file, dex->name);
10832 putc ('\n', file);
10833 --i;
10836 fprintf (file, "\t.dexend\n");
10839 /* Output text that to appear at the beginning of an assembler file. */
10841 static void
10842 unicosmk_file_start (void)
10844 int i;
10846 fputs ("\t.ident\t", asm_out_file);
10847 unicosmk_output_module_name (asm_out_file);
10848 fputs ("\n\n", asm_out_file);
10850 /* The Unicos/Mk assembler uses different register names. Instead of trying
10851 to support them, we simply use micro definitions. */
10853 /* CAM has different register names: rN for the integer register N and fN
10854 for the floating-point register N. Instead of trying to use these in
10855 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10856 register. */
10858 for (i = 0; i < 32; ++i)
10859 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10861 for (i = 0; i < 32; ++i)
10862 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10864 putc ('\n', asm_out_file);
10866 /* The .align directive fill unused space with zeroes which does not work
10867 in code sections. We define the macro 'gcc@code@align' which uses nops
10868 instead. Note that it assumes that code sections always have the
10869 biggest possible alignment since . refers to the current offset from
10870 the beginning of the section. */
10872 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10873 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10874 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10875 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10876 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10877 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10878 fputs ("\t.endr\n", asm_out_file);
10879 fputs ("\t.endif\n", asm_out_file);
10880 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10882 /* Output extern declarations which should always be visible. */
10883 unicosmk_output_default_externs (asm_out_file);
10885 /* Open a dummy section. We always need to be inside a section for the
10886 section-switching code to work correctly.
10887 ??? This should be a module id or something like that. I still have to
10888 figure out what the rules for those are. */
10889 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10892 /* Output text to appear at the end of an assembler file. This includes all
10893 pending extern declarations and DEX expressions. */
10895 static void
10896 unicosmk_file_end (void)
10898 fputs ("\t.endp\n\n", asm_out_file);
10900 /* Output all pending externs. */
10902 unicosmk_output_externs (asm_out_file);
10904 /* Output dex definitions used for functions whose names conflict with
10905 register names. */
10907 unicosmk_output_dex (asm_out_file);
10909 fputs ("\t.end\t", asm_out_file);
10910 unicosmk_output_module_name (asm_out_file);
10911 putc ('\n', asm_out_file);
10914 #else
10916 static void
10917 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10920 static void
10921 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10924 static void
10925 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10926 const char * fnname ATTRIBUTE_UNUSED)
10930 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10932 return NULL_RTX;
10935 static int
10936 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10938 return 0;
10941 #endif /* TARGET_ABI_UNICOSMK */
10943 static void
10944 alpha_init_libfuncs (void)
10946 if (TARGET_ABI_UNICOSMK)
10948 /* Prevent gcc from generating calls to __divsi3. */
10949 set_optab_libfunc (sdiv_optab, SImode, 0);
10950 set_optab_libfunc (udiv_optab, SImode, 0);
10952 /* Use the functions provided by the system library
10953 for DImode integer division. */
10954 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10955 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10957 else if (TARGET_ABI_OPEN_VMS)
10959 /* Use the VMS runtime library functions for division and
10960 remainder. */
10961 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10962 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10963 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10964 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10965 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10966 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10967 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10968 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10969 abort_libfunc = init_one_libfunc ("decc$abort");
10970 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10971 #ifdef MEM_LIBFUNCS_INIT
10972 MEM_LIBFUNCS_INIT;
10973 #endif
10977 /* On the Alpha, we use this to disable the floating-point registers
10978 when they don't exist. */
10980 static void
10981 alpha_conditional_register_usage (void)
10983 int i;
10984 if (! TARGET_FPREGS)
10985 for (i = 32; i < 63; i++)
10986 fixed_regs[i] = call_used_regs[i] = 1;
10989 /* Initialize the GCC target structure. */
10990 #if TARGET_ABI_OPEN_VMS
10991 # undef TARGET_ATTRIBUTE_TABLE
10992 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10993 # undef TARGET_CAN_ELIMINATE
10994 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
10995 #endif
10997 #undef TARGET_IN_SMALL_DATA_P
10998 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
11000 #if TARGET_ABI_UNICOSMK
11001 # undef TARGET_INSERT_ATTRIBUTES
11002 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
11003 # undef TARGET_SECTION_TYPE_FLAGS
11004 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
11005 # undef TARGET_ASM_UNIQUE_SECTION
11006 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
11007 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
11008 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
11009 # undef TARGET_ASM_GLOBALIZE_LABEL
11010 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
11011 # undef TARGET_MUST_PASS_IN_STACK
11012 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
11013 #endif
11015 #undef TARGET_ASM_ALIGNED_HI_OP
11016 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
11017 #undef TARGET_ASM_ALIGNED_DI_OP
11018 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
11020 /* Default unaligned ops are provided for ELF systems. To get unaligned
11021 data for non-ELF systems, we have to turn off auto alignment. */
11022 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
11023 #undef TARGET_ASM_UNALIGNED_HI_OP
11024 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
11025 #undef TARGET_ASM_UNALIGNED_SI_OP
11026 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
11027 #undef TARGET_ASM_UNALIGNED_DI_OP
11028 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
11029 #endif
11031 #ifdef OBJECT_FORMAT_ELF
11032 #undef TARGET_ASM_RELOC_RW_MASK
11033 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
11034 #undef TARGET_ASM_SELECT_RTX_SECTION
11035 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
11036 #undef TARGET_SECTION_TYPE_FLAGS
11037 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
11038 #endif
11040 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
11041 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
11043 #undef TARGET_INIT_LIBFUNCS
11044 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
11046 #undef TARGET_LEGITIMIZE_ADDRESS
11047 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
11049 #if TARGET_ABI_UNICOSMK
11050 #undef TARGET_ASM_FILE_START
11051 #define TARGET_ASM_FILE_START unicosmk_file_start
11052 #undef TARGET_ASM_FILE_END
11053 #define TARGET_ASM_FILE_END unicosmk_file_end
11054 #else
11055 #undef TARGET_ASM_FILE_START
11056 #define TARGET_ASM_FILE_START alpha_file_start
11057 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
11058 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
11059 #endif
11061 #undef TARGET_SCHED_ADJUST_COST
11062 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
11063 #undef TARGET_SCHED_ISSUE_RATE
11064 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
11065 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11066 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
11067 alpha_multipass_dfa_lookahead
11069 #undef TARGET_HAVE_TLS
11070 #define TARGET_HAVE_TLS HAVE_AS_TLS
11072 #undef TARGET_BUILTIN_DECL
11073 #define TARGET_BUILTIN_DECL alpha_builtin_decl
11074 #undef TARGET_INIT_BUILTINS
11075 #define TARGET_INIT_BUILTINS alpha_init_builtins
11076 #undef TARGET_EXPAND_BUILTIN
11077 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
11078 #undef TARGET_FOLD_BUILTIN
11079 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
11081 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11082 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
11083 #undef TARGET_CANNOT_COPY_INSN_P
11084 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
11085 #undef TARGET_CANNOT_FORCE_CONST_MEM
11086 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
11088 #if TARGET_ABI_OSF
11089 #undef TARGET_ASM_OUTPUT_MI_THUNK
11090 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
11091 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11092 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11093 #undef TARGET_STDARG_OPTIMIZE_HOOK
11094 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
11095 #endif
11097 #undef TARGET_RTX_COSTS
11098 #define TARGET_RTX_COSTS alpha_rtx_costs
11099 #undef TARGET_ADDRESS_COST
11100 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
11102 #undef TARGET_MACHINE_DEPENDENT_REORG
11103 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
11105 #undef TARGET_PROMOTE_FUNCTION_MODE
11106 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
11107 #undef TARGET_PROMOTE_PROTOTYPES
11108 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
11109 #undef TARGET_RETURN_IN_MEMORY
11110 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
11111 #undef TARGET_PASS_BY_REFERENCE
11112 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
11113 #undef TARGET_SETUP_INCOMING_VARARGS
11114 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
11115 #undef TARGET_STRICT_ARGUMENT_NAMING
11116 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
11117 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
11118 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
11119 #undef TARGET_SPLIT_COMPLEX_ARG
11120 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
11121 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11122 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
11123 #undef TARGET_ARG_PARTIAL_BYTES
11124 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
11125 #undef TARGET_FUNCTION_ARG
11126 #define TARGET_FUNCTION_ARG alpha_function_arg
11127 #undef TARGET_FUNCTION_ARG_ADVANCE
11128 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
11129 #undef TARGET_TRAMPOLINE_INIT
11130 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
11132 #undef TARGET_SECONDARY_RELOAD
11133 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
11135 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11136 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
11137 #undef TARGET_VECTOR_MODE_SUPPORTED_P
11138 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
11140 #undef TARGET_BUILD_BUILTIN_VA_LIST
11141 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
11143 #undef TARGET_EXPAND_BUILTIN_VA_START
11144 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
11146 /* The Alpha architecture does not require sequential consistency. See
11147 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
11148 for an example of how it can be violated in practice. */
11149 #undef TARGET_RELAXED_ORDERING
11150 #define TARGET_RELAXED_ORDERING true
11152 #undef TARGET_DEFAULT_TARGET_FLAGS
11153 #define TARGET_DEFAULT_TARGET_FLAGS \
11154 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
11155 #undef TARGET_HANDLE_OPTION
11156 #define TARGET_HANDLE_OPTION alpha_handle_option
11158 #undef TARGET_OPTION_OVERRIDE
11159 #define TARGET_OPTION_OVERRIDE alpha_option_override
11161 #undef TARGET_OPTION_OPTIMIZATION_TABLE
11162 #define TARGET_OPTION_OPTIMIZATION_TABLE alpha_option_optimization_table
11164 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11165 #undef TARGET_MANGLE_TYPE
11166 #define TARGET_MANGLE_TYPE alpha_mangle_type
11167 #endif
11169 #undef TARGET_LEGITIMATE_ADDRESS_P
11170 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
11172 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11173 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
11175 struct gcc_target targetm = TARGET_INITIALIZER;
11178 #include "gt-alpha.h"