PR target/16201
[official-gcc.git] / gcc / config / alpha / alpha.c
blob037dc4ad33e70bcd3912f0f505bf37b7c3bab976
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
57 /* Specify which cpu to schedule for. */
59 enum processor_type alpha_cpu;
60 static const char * const alpha_cpu_name[] =
62 "ev4", "ev5", "ev6"
65 /* Specify how accurate floating-point traps need to be. */
67 enum alpha_trap_precision alpha_tp;
69 /* Specify the floating-point rounding mode. */
71 enum alpha_fp_rounding_mode alpha_fprm;
73 /* Specify which things cause traps. */
75 enum alpha_fp_trap_mode alpha_fptm;
77 /* Specify bit size of immediate TLS offsets. */
79 int alpha_tls_size = 32;
81 /* Strings decoded into the above options. */
83 const char *alpha_cpu_string; /* -mcpu= */
84 const char *alpha_tune_string; /* -mtune= */
85 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
86 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
87 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
88 const char *alpha_mlat_string; /* -mmemory-latency= */
89 const char *alpha_tls_size_string; /* -mtls-size=[16|32|64] */
91 /* Save information from a "cmpxx" operation until the branch or scc is
92 emitted. */
94 struct alpha_compare alpha_compare;
96 /* Nonzero if inside of a function, because the Alpha asm can't
97 handle .files inside of functions. */
99 static int inside_function = FALSE;
101 /* The number of cycles of latency we should assume on memory reads. */
103 int alpha_memory_latency = 3;
105 /* Whether the function needs the GP. */
107 static int alpha_function_needs_gp;
109 /* The alias set for prologue/epilogue register save/restore. */
111 static GTY(()) int alpha_sr_alias_set;
113 /* The assembler name of the current function. */
115 static const char *alpha_fnname;
117 /* The next explicit relocation sequence number. */
118 extern GTY(()) int alpha_next_sequence_number;
119 int alpha_next_sequence_number = 1;
121 /* The literal and gpdisp sequence numbers for this insn, as printed
122 by %# and %* respectively. */
123 extern GTY(()) int alpha_this_literal_sequence_number;
124 extern GTY(()) int alpha_this_gpdisp_sequence_number;
125 int alpha_this_literal_sequence_number;
126 int alpha_this_gpdisp_sequence_number;
128 /* Costs of various operations on the different architectures. */
130 struct alpha_rtx_cost_data
132 unsigned char fp_add;
133 unsigned char fp_mult;
134 unsigned char fp_div_sf;
135 unsigned char fp_div_df;
136 unsigned char int_mult_si;
137 unsigned char int_mult_di;
138 unsigned char int_shift;
139 unsigned char int_cmov;
140 unsigned short int_div;
143 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
145 { /* EV4 */
146 COSTS_N_INSNS (6), /* fp_add */
147 COSTS_N_INSNS (6), /* fp_mult */
148 COSTS_N_INSNS (34), /* fp_div_sf */
149 COSTS_N_INSNS (63), /* fp_div_df */
150 COSTS_N_INSNS (23), /* int_mult_si */
151 COSTS_N_INSNS (23), /* int_mult_di */
152 COSTS_N_INSNS (2), /* int_shift */
153 COSTS_N_INSNS (2), /* int_cmov */
154 COSTS_N_INSNS (97), /* int_div */
156 { /* EV5 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (15), /* fp_div_sf */
160 COSTS_N_INSNS (22), /* fp_div_df */
161 COSTS_N_INSNS (8), /* int_mult_si */
162 COSTS_N_INSNS (12), /* int_mult_di */
163 COSTS_N_INSNS (1) + 1, /* int_shift */
164 COSTS_N_INSNS (1), /* int_cmov */
165 COSTS_N_INSNS (83), /* int_div */
167 { /* EV6 */
168 COSTS_N_INSNS (4), /* fp_add */
169 COSTS_N_INSNS (4), /* fp_mult */
170 COSTS_N_INSNS (12), /* fp_div_sf */
171 COSTS_N_INSNS (15), /* fp_div_df */
172 COSTS_N_INSNS (7), /* int_mult_si */
173 COSTS_N_INSNS (7), /* int_mult_di */
174 COSTS_N_INSNS (1), /* int_shift */
175 COSTS_N_INSNS (2), /* int_cmov */
176 COSTS_N_INSNS (86), /* int_div */
180 /* Similar but tuned for code size instead of execution latency. The
181 extra +N is fractional cost tuning based on latency. It's used to
182 encourage use of cheaper insns like shift, but only if there's just
183 one of them. */
185 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
187 COSTS_N_INSNS (1), /* fp_add */
188 COSTS_N_INSNS (1), /* fp_mult */
189 COSTS_N_INSNS (1), /* fp_div_sf */
190 COSTS_N_INSNS (1) + 1, /* fp_div_df */
191 COSTS_N_INSNS (1) + 1, /* int_mult_si */
192 COSTS_N_INSNS (1) + 2, /* int_mult_di */
193 COSTS_N_INSNS (1), /* int_shift */
194 COSTS_N_INSNS (1), /* int_cmov */
195 COSTS_N_INSNS (6), /* int_div */
198 /* Get the number of args of a function in one of two ways. */
199 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
200 #define NUM_ARGS current_function_args_info.num_args
201 #else
202 #define NUM_ARGS current_function_args_info
203 #endif
205 #define REG_PV 27
206 #define REG_RA 26
208 /* Declarations of static functions. */
209 static struct machine_function *alpha_init_machine_status (void);
210 static rtx alpha_emit_xfloating_compare (enum rtx_code, rtx, rtx);
212 #if TARGET_ABI_OPEN_VMS
213 static void alpha_write_linkage (FILE *, const char *, tree);
214 #endif
216 static void unicosmk_output_deferred_case_vectors (FILE *);
217 static void unicosmk_gen_dsib (unsigned long *);
218 static void unicosmk_output_ssib (FILE *, const char *);
219 static int unicosmk_need_dex (rtx);
221 /* Parse target option strings. */
223 void
224 override_options (void)
226 int i;
227 static const struct cpu_table {
228 const char *const name;
229 const enum processor_type processor;
230 const int flags;
231 } cpu_table[] = {
232 #define EV5_MASK (MASK_CPU_EV5)
233 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
234 { "ev4", PROCESSOR_EV4, 0 },
235 { "ev45", PROCESSOR_EV4, 0 },
236 { "21064", PROCESSOR_EV4, 0 },
237 { "ev5", PROCESSOR_EV5, EV5_MASK },
238 { "21164", PROCESSOR_EV5, EV5_MASK },
239 { "ev56", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
240 { "21164a", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
241 { "pca56", PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
242 { "21164PC",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
243 { "21164pc",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
244 { "ev6", PROCESSOR_EV6, EV6_MASK },
245 { "21264", PROCESSOR_EV6, EV6_MASK },
246 { "ev67", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
247 { "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
248 { 0, 0, 0 }
251 /* Unicos/Mk doesn't have shared libraries. */
252 if (TARGET_ABI_UNICOSMK && flag_pic)
254 warning ("-f%s ignored for Unicos/Mk (not supported)",
255 (flag_pic > 1) ? "PIC" : "pic");
256 flag_pic = 0;
259 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
260 floating-point instructions. Make that the default for this target. */
261 if (TARGET_ABI_UNICOSMK)
262 alpha_fprm = ALPHA_FPRM_DYN;
263 else
264 alpha_fprm = ALPHA_FPRM_NORM;
266 alpha_tp = ALPHA_TP_PROG;
267 alpha_fptm = ALPHA_FPTM_N;
269 /* We cannot use su and sui qualifiers for conversion instructions on
270 Unicos/Mk. I'm not sure if this is due to assembler or hardware
271 limitations. Right now, we issue a warning if -mieee is specified
272 and then ignore it; eventually, we should either get it right or
273 disable the option altogether. */
275 if (TARGET_IEEE)
277 if (TARGET_ABI_UNICOSMK)
278 warning ("-mieee not supported on Unicos/Mk");
279 else
281 alpha_tp = ALPHA_TP_INSN;
282 alpha_fptm = ALPHA_FPTM_SU;
286 if (TARGET_IEEE_WITH_INEXACT)
288 if (TARGET_ABI_UNICOSMK)
289 warning ("-mieee-with-inexact not supported on Unicos/Mk");
290 else
292 alpha_tp = ALPHA_TP_INSN;
293 alpha_fptm = ALPHA_FPTM_SUI;
297 if (alpha_tp_string)
299 if (! strcmp (alpha_tp_string, "p"))
300 alpha_tp = ALPHA_TP_PROG;
301 else if (! strcmp (alpha_tp_string, "f"))
302 alpha_tp = ALPHA_TP_FUNC;
303 else if (! strcmp (alpha_tp_string, "i"))
304 alpha_tp = ALPHA_TP_INSN;
305 else
306 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
309 if (alpha_fprm_string)
311 if (! strcmp (alpha_fprm_string, "n"))
312 alpha_fprm = ALPHA_FPRM_NORM;
313 else if (! strcmp (alpha_fprm_string, "m"))
314 alpha_fprm = ALPHA_FPRM_MINF;
315 else if (! strcmp (alpha_fprm_string, "c"))
316 alpha_fprm = ALPHA_FPRM_CHOP;
317 else if (! strcmp (alpha_fprm_string,"d"))
318 alpha_fprm = ALPHA_FPRM_DYN;
319 else
320 error ("bad value %qs for -mfp-rounding-mode switch",
321 alpha_fprm_string);
324 if (alpha_fptm_string)
326 if (strcmp (alpha_fptm_string, "n") == 0)
327 alpha_fptm = ALPHA_FPTM_N;
328 else if (strcmp (alpha_fptm_string, "u") == 0)
329 alpha_fptm = ALPHA_FPTM_U;
330 else if (strcmp (alpha_fptm_string, "su") == 0)
331 alpha_fptm = ALPHA_FPTM_SU;
332 else if (strcmp (alpha_fptm_string, "sui") == 0)
333 alpha_fptm = ALPHA_FPTM_SUI;
334 else
335 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
338 if (alpha_tls_size_string)
340 if (strcmp (alpha_tls_size_string, "16") == 0)
341 alpha_tls_size = 16;
342 else if (strcmp (alpha_tls_size_string, "32") == 0)
343 alpha_tls_size = 32;
344 else if (strcmp (alpha_tls_size_string, "64") == 0)
345 alpha_tls_size = 64;
346 else
347 error ("bad value %qs for -mtls-size switch", alpha_tls_size_string);
350 alpha_cpu
351 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
352 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
354 if (alpha_cpu_string)
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
359 alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX
361 | MASK_CPU_EV5 | MASK_CPU_EV6);
362 target_flags |= cpu_table [i].flags;
363 break;
365 if (! cpu_table [i].name)
366 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
369 if (alpha_tune_string)
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_tune_string, cpu_table [i].name))
374 alpha_cpu = cpu_table [i].processor;
375 break;
377 if (! cpu_table [i].name)
378 error ("bad value %qs for -mcpu switch", alpha_tune_string);
381 /* Do some sanity checks on the above options. */
383 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
385 warning ("trap mode not supported on Unicos/Mk");
386 alpha_fptm = ALPHA_FPTM_N;
389 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
390 && alpha_tp != ALPHA_TP_INSN && ! TARGET_CPU_EV6)
392 warning ("fp software completion requires -mtrap-precision=i");
393 alpha_tp = ALPHA_TP_INSN;
396 if (TARGET_CPU_EV6)
398 /* Except for EV6 pass 1 (not released), we always have precise
399 arithmetic traps. Which means we can do software completion
400 without minding trap shadows. */
401 alpha_tp = ALPHA_TP_PROG;
404 if (TARGET_FLOAT_VAX)
406 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
408 warning ("rounding mode not supported for VAX floats");
409 alpha_fprm = ALPHA_FPRM_NORM;
411 if (alpha_fptm == ALPHA_FPTM_SUI)
413 warning ("trap mode not supported for VAX floats");
414 alpha_fptm = ALPHA_FPTM_SU;
416 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
417 warning ("128-bit long double not supported for VAX floats");
418 target_flags &= ~MASK_LONG_DOUBLE_128;
422 char *end;
423 int lat;
425 if (!alpha_mlat_string)
426 alpha_mlat_string = "L1";
428 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
429 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
431 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
432 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
433 && alpha_mlat_string[2] == '\0')
435 static int const cache_latency[][4] =
437 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
438 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
439 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
442 lat = alpha_mlat_string[1] - '0';
443 if (lat <= 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
445 warning ("L%d cache latency unknown for %s",
446 lat, alpha_cpu_name[alpha_cpu]);
447 lat = 3;
449 else
450 lat = cache_latency[alpha_cpu][lat-1];
452 else if (! strcmp (alpha_mlat_string, "main"))
454 /* Most current memories have about 370ns latency. This is
455 a reasonable guess for a fast cpu. */
456 lat = 150;
458 else
460 warning ("bad value %qs for -mmemory-latency", alpha_mlat_string);
461 lat = 3;
464 alpha_memory_latency = lat;
467 /* Default the definition of "small data" to 8 bytes. */
468 if (!g_switch_set)
469 g_switch_value = 8;
471 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
472 if (flag_pic == 1)
473 target_flags |= MASK_SMALL_DATA;
474 else if (flag_pic == 2)
475 target_flags &= ~MASK_SMALL_DATA;
477 /* Align labels and loops for optimal branching. */
478 /* ??? Kludge these by not doing anything if we don't optimize and also if
479 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
480 if (optimize > 0 && write_symbols != SDB_DEBUG)
482 if (align_loops <= 0)
483 align_loops = 16;
484 if (align_jumps <= 0)
485 align_jumps = 16;
487 if (align_functions <= 0)
488 align_functions = 16;
490 /* Acquire a unique set number for our register saves and restores. */
491 alpha_sr_alias_set = new_alias_set ();
493 /* Register variables and functions with the garbage collector. */
495 /* Set up function hooks. */
496 init_machine_status = alpha_init_machine_status;
498 /* Tell the compiler when we're using VAX floating point. */
499 if (TARGET_FLOAT_VAX)
501 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
502 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
503 REAL_MODE_FORMAT (TFmode) = NULL;
507 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
510 zap_mask (HOST_WIDE_INT value)
512 int i;
514 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
515 i++, value >>= 8)
516 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
517 return 0;
519 return 1;
522 /* Return true if OP is valid for a particular TLS relocation.
523 We are already guaranteed that OP is a CONST. */
526 tls_symbolic_operand_1 (rtx op, int size, int unspec)
528 op = XEXP (op, 0);
530 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
531 return 0;
532 op = XVECEXP (op, 0, 0);
534 if (GET_CODE (op) != SYMBOL_REF)
535 return 0;
537 if (SYMBOL_REF_LOCAL_P (op))
539 if (alpha_tls_size > size)
540 return 0;
542 else
544 if (size != 64)
545 return 0;
548 switch (SYMBOL_REF_TLS_MODEL (op))
550 case TLS_MODEL_LOCAL_DYNAMIC:
551 return unspec == UNSPEC_DTPREL;
552 case TLS_MODEL_INITIAL_EXEC:
553 return unspec == UNSPEC_TPREL && size == 64;
554 case TLS_MODEL_LOCAL_EXEC:
555 return unspec == UNSPEC_TPREL;
556 default:
557 abort ();
561 /* Used by aligned_memory_operand and unaligned_memory_operand to
562 resolve what reload is going to do with OP if it's a register. */
565 resolve_reload_operand (rtx op)
567 if (reload_in_progress)
569 rtx tmp = op;
570 if (GET_CODE (tmp) == SUBREG)
571 tmp = SUBREG_REG (tmp);
572 if (GET_CODE (tmp) == REG
573 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
575 op = reg_equiv_memory_loc[REGNO (tmp)];
576 if (op == 0)
577 return 0;
580 return op;
583 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
584 the range defined for C in [I-P]. */
586 bool
587 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
589 switch (c)
591 case 'I':
592 /* An unsigned 8 bit constant. */
593 return (unsigned HOST_WIDE_INT) value < 0x100;
594 case 'J':
595 /* The constant zero. */
596 return value == 0;
597 case 'K':
598 /* A signed 16 bit constant. */
599 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
600 case 'L':
601 /* A shifted signed 16 bit constant appropriate for LDAH. */
602 return ((value & 0xffff) == 0
603 && ((value) >> 31 == -1 || value >> 31 == 0));
604 case 'M':
605 /* A constant that can be AND'ed with using a ZAP insn. */
606 return zap_mask (value);
607 case 'N':
608 /* A complemented unsigned 8 bit constant. */
609 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
610 case 'O':
611 /* A negated unsigned 8 bit constant. */
612 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
613 case 'P':
614 /* The constant 1, 2 or 3. */
615 return value == 1 || value == 2 || value == 3;
617 default:
618 return false;
622 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
623 matches for C in [GH]. */
625 bool
626 alpha_const_double_ok_for_letter_p (rtx value, int c)
628 switch (c)
630 case 'G':
631 /* The floating point zero constant. */
632 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
633 && value == CONST0_RTX (GET_MODE (value)));
635 case 'H':
636 /* A valid operand of a ZAP insn. */
637 return (GET_MODE (value) == VOIDmode
638 && zap_mask (CONST_DOUBLE_LOW (value))
639 && zap_mask (CONST_DOUBLE_HIGH (value)));
641 default:
642 return false;
646 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
647 matches for C. */
649 bool
650 alpha_extra_constraint (rtx value, int c)
652 switch (c)
654 case 'Q':
655 return normal_memory_operand (value, VOIDmode);
656 case 'R':
657 return direct_call_operand (value, Pmode);
658 case 'S':
659 return (GET_CODE (value) == CONST_INT
660 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
661 case 'T':
662 return GET_CODE (value) == HIGH;
663 case 'U':
664 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
665 case 'W':
666 return (GET_CODE (value) == CONST_VECTOR
667 && value == CONST0_RTX (GET_MODE (value)));
668 default:
669 return false;
673 /* The scalar modes supported differs from the default check-what-c-supports
674 version in that sometimes TFmode is available even when long double
675 indicates only DFmode. On unicosmk, we have the situation that HImode
676 doesn't map to any C type, but of course we still support that. */
678 static bool
679 alpha_scalar_mode_supported_p (enum machine_mode mode)
681 switch (mode)
683 case QImode:
684 case HImode:
685 case SImode:
686 case DImode:
687 case TImode: /* via optabs.c */
688 return true;
690 case SFmode:
691 case DFmode:
692 return true;
694 case TFmode:
695 return TARGET_HAS_XFLOATING_LIBS;
697 default:
698 return false;
702 /* Alpha implements a couple of integer vector mode operations when
703 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
704 which allows the vectorizer to operate on e.g. move instructions,
705 or when expand_vector_operations can do something useful. */
707 static bool
708 alpha_vector_mode_supported_p (enum machine_mode mode)
710 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
713 /* Return 1 if this function can directly return via $26. */
716 direct_return (void)
718 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
719 && reload_completed
720 && alpha_sa_size () == 0
721 && get_frame_size () == 0
722 && current_function_outgoing_args_size == 0
723 && current_function_pretend_args_size == 0);
726 /* Return the ADDR_VEC associated with a tablejump insn. */
729 alpha_tablejump_addr_vec (rtx insn)
731 rtx tmp;
733 tmp = JUMP_LABEL (insn);
734 if (!tmp)
735 return NULL_RTX;
736 tmp = NEXT_INSN (tmp);
737 if (!tmp)
738 return NULL_RTX;
739 if (GET_CODE (tmp) == JUMP_INSN
740 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
741 return PATTERN (tmp);
742 return NULL_RTX;
745 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
748 alpha_tablejump_best_label (rtx insn)
750 rtx jump_table = alpha_tablejump_addr_vec (insn);
751 rtx best_label = NULL_RTX;
753 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
754 there for edge frequency counts from profile data. */
756 if (jump_table)
758 int n_labels = XVECLEN (jump_table, 1);
759 int best_count = -1;
760 int i, j;
762 for (i = 0; i < n_labels; i++)
764 int count = 1;
766 for (j = i + 1; j < n_labels; j++)
767 if (XEXP (XVECEXP (jump_table, 1, i), 0)
768 == XEXP (XVECEXP (jump_table, 1, j), 0))
769 count++;
771 if (count > best_count)
772 best_count = count, best_label = XVECEXP (jump_table, 1, i);
776 return best_label ? best_label : const0_rtx;
779 /* Return the TLS model to use for SYMBOL. */
781 static enum tls_model
782 tls_symbolic_operand_type (rtx symbol)
784 enum tls_model model;
786 if (GET_CODE (symbol) != SYMBOL_REF)
787 return 0;
788 model = SYMBOL_REF_TLS_MODEL (symbol);
790 /* Local-exec with a 64-bit size is the same code as initial-exec. */
791 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
792 model = TLS_MODEL_INITIAL_EXEC;
794 return model;
797 /* Return true if the function DECL will share the same GP as any
798 function in the current unit of translation. */
800 static bool
801 decl_has_samegp (tree decl)
803 /* Functions that are not local can be overridden, and thus may
804 not share the same gp. */
805 if (!(*targetm.binds_local_p) (decl))
806 return false;
808 /* If -msmall-data is in effect, assume that there is only one GP
809 for the module, and so any local symbol has this property. We
810 need explicit relocations to be able to enforce this for symbols
811 not defined in this unit of translation, however. */
812 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
813 return true;
815 /* Functions that are not external are defined in this UoT. */
816 /* ??? Irritatingly, static functions not yet emitted are still
817 marked "external". Apply this to non-static functions only. */
818 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
821 /* Return true if EXP should be placed in the small data section. */
823 static bool
824 alpha_in_small_data_p (tree exp)
826 /* We want to merge strings, so we never consider them small data. */
827 if (TREE_CODE (exp) == STRING_CST)
828 return false;
830 /* Functions are never in the small data area. Duh. */
831 if (TREE_CODE (exp) == FUNCTION_DECL)
832 return false;
834 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
836 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
837 if (strcmp (section, ".sdata") == 0
838 || strcmp (section, ".sbss") == 0)
839 return true;
841 else
843 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
845 /* If this is an incomplete type with size 0, then we can't put it
846 in sdata because it might be too big when completed. */
847 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
848 return true;
851 return false;
854 #if TARGET_ABI_OPEN_VMS
855 static bool
856 alpha_linkage_symbol_p (const char *symname)
858 int symlen = strlen (symname);
860 if (symlen > 4)
861 return strcmp (&symname [symlen - 4], "..lk") == 0;
863 return false;
866 #define LINKAGE_SYMBOL_REF_P(X) \
867 ((GET_CODE (X) == SYMBOL_REF \
868 && alpha_linkage_symbol_p (XSTR (X, 0))) \
869 || (GET_CODE (X) == CONST \
870 && GET_CODE (XEXP (X, 0)) == PLUS \
871 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
872 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
873 #endif
875 /* legitimate_address_p recognizes an RTL expression that is a valid
876 memory address for an instruction. The MODE argument is the
877 machine mode for the MEM expression that wants to use this address.
879 For Alpha, we have either a constant address or the sum of a
880 register and a constant address, or just a register. For DImode,
881 any of those forms can be surrounded with an AND that clear the
882 low-order three bits; this is an "unaligned" access. */
884 bool
885 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
887 /* If this is an ldq_u type address, discard the outer AND. */
888 if (mode == DImode
889 && GET_CODE (x) == AND
890 && GET_CODE (XEXP (x, 1)) == CONST_INT
891 && INTVAL (XEXP (x, 1)) == -8)
892 x = XEXP (x, 0);
894 /* Discard non-paradoxical subregs. */
895 if (GET_CODE (x) == SUBREG
896 && (GET_MODE_SIZE (GET_MODE (x))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
898 x = SUBREG_REG (x);
900 /* Unadorned general registers are valid. */
901 if (REG_P (x)
902 && (strict
903 ? STRICT_REG_OK_FOR_BASE_P (x)
904 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
905 return true;
907 /* Constant addresses (i.e. +/- 32k) are valid. */
908 if (CONSTANT_ADDRESS_P (x))
909 return true;
911 #if TARGET_ABI_OPEN_VMS
912 if (LINKAGE_SYMBOL_REF_P (x))
913 return true;
914 #endif
916 /* Register plus a small constant offset is valid. */
917 if (GET_CODE (x) == PLUS)
919 rtx ofs = XEXP (x, 1);
920 x = XEXP (x, 0);
922 /* Discard non-paradoxical subregs. */
923 if (GET_CODE (x) == SUBREG
924 && (GET_MODE_SIZE (GET_MODE (x))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
926 x = SUBREG_REG (x);
928 if (REG_P (x))
930 if (! strict
931 && NONSTRICT_REG_OK_FP_BASE_P (x)
932 && GET_CODE (ofs) == CONST_INT)
933 return true;
934 if ((strict
935 ? STRICT_REG_OK_FOR_BASE_P (x)
936 : NONSTRICT_REG_OK_FOR_BASE_P (x))
937 && CONSTANT_ADDRESS_P (ofs))
938 return true;
942 /* If we're managing explicit relocations, LO_SUM is valid, as
943 are small data symbols. */
944 else if (TARGET_EXPLICIT_RELOCS)
946 if (small_symbolic_operand (x, Pmode))
947 return true;
949 if (GET_CODE (x) == LO_SUM)
951 rtx ofs = XEXP (x, 1);
952 x = XEXP (x, 0);
954 /* Discard non-paradoxical subregs. */
955 if (GET_CODE (x) == SUBREG
956 && (GET_MODE_SIZE (GET_MODE (x))
957 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
958 x = SUBREG_REG (x);
960 /* Must have a valid base register. */
961 if (! (REG_P (x)
962 && (strict
963 ? STRICT_REG_OK_FOR_BASE_P (x)
964 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
965 return false;
967 /* The symbol must be local. */
968 if (local_symbolic_operand (ofs, Pmode)
969 || dtp32_symbolic_operand (ofs, Pmode)
970 || tp32_symbolic_operand (ofs, Pmode))
971 return true;
975 return false;
978 /* Build the SYMBOL_REF for __tls_get_addr. */
980 static GTY(()) rtx tls_get_addr_libfunc;
982 static rtx
983 get_tls_get_addr (void)
985 if (!tls_get_addr_libfunc)
986 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
987 return tls_get_addr_libfunc;
990 /* Try machine-dependent ways of modifying an illegitimate address
991 to be legitimate. If we find one, return the new, valid address. */
994 alpha_legitimize_address (rtx x, rtx scratch,
995 enum machine_mode mode ATTRIBUTE_UNUSED)
997 HOST_WIDE_INT addend;
999 /* If the address is (plus reg const_int) and the CONST_INT is not a
1000 valid offset, compute the high part of the constant and add it to
1001 the register. Then our address is (plus temp low-part-const). */
1002 if (GET_CODE (x) == PLUS
1003 && GET_CODE (XEXP (x, 0)) == REG
1004 && GET_CODE (XEXP (x, 1)) == CONST_INT
1005 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1007 addend = INTVAL (XEXP (x, 1));
1008 x = XEXP (x, 0);
1009 goto split_addend;
1012 /* If the address is (const (plus FOO const_int)), find the low-order
1013 part of the CONST_INT. Then load FOO plus any high-order part of the
1014 CONST_INT into a register. Our address is (plus reg low-part-const).
1015 This is done to reduce the number of GOT entries. */
1016 if (!no_new_pseudos
1017 && GET_CODE (x) == CONST
1018 && GET_CODE (XEXP (x, 0)) == PLUS
1019 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1021 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1022 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1023 goto split_addend;
1026 /* If we have a (plus reg const), emit the load as in (2), then add
1027 the two registers, and finally generate (plus reg low-part-const) as
1028 our address. */
1029 if (!no_new_pseudos
1030 && GET_CODE (x) == PLUS
1031 && GET_CODE (XEXP (x, 0)) == REG
1032 && GET_CODE (XEXP (x, 1)) == CONST
1033 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1034 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1036 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1037 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1038 XEXP (XEXP (XEXP (x, 1), 0), 0),
1039 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1040 goto split_addend;
1043 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1044 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1046 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1048 switch (tls_symbolic_operand_type (x))
1050 case TLS_MODEL_GLOBAL_DYNAMIC:
1051 start_sequence ();
1053 r0 = gen_rtx_REG (Pmode, 0);
1054 r16 = gen_rtx_REG (Pmode, 16);
1055 tga = get_tls_get_addr ();
1056 dest = gen_reg_rtx (Pmode);
1057 seq = GEN_INT (alpha_next_sequence_number++);
1059 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1060 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1061 insn = emit_call_insn (insn);
1062 CONST_OR_PURE_CALL_P (insn) = 1;
1063 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1065 insn = get_insns ();
1066 end_sequence ();
1068 emit_libcall_block (insn, dest, r0, x);
1069 return dest;
1071 case TLS_MODEL_LOCAL_DYNAMIC:
1072 start_sequence ();
1074 r0 = gen_rtx_REG (Pmode, 0);
1075 r16 = gen_rtx_REG (Pmode, 16);
1076 tga = get_tls_get_addr ();
1077 scratch = gen_reg_rtx (Pmode);
1078 seq = GEN_INT (alpha_next_sequence_number++);
1080 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1081 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1082 insn = emit_call_insn (insn);
1083 CONST_OR_PURE_CALL_P (insn) = 1;
1084 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1086 insn = get_insns ();
1087 end_sequence ();
1089 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1090 UNSPEC_TLSLDM_CALL);
1091 emit_libcall_block (insn, scratch, r0, eqv);
1093 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1094 eqv = gen_rtx_CONST (Pmode, eqv);
1096 if (alpha_tls_size == 64)
1098 dest = gen_reg_rtx (Pmode);
1099 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1100 emit_insn (gen_adddi3 (dest, dest, scratch));
1101 return dest;
1103 if (alpha_tls_size == 32)
1105 insn = gen_rtx_HIGH (Pmode, eqv);
1106 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1107 scratch = gen_reg_rtx (Pmode);
1108 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1110 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1112 case TLS_MODEL_INITIAL_EXEC:
1113 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1114 eqv = gen_rtx_CONST (Pmode, eqv);
1115 tp = gen_reg_rtx (Pmode);
1116 scratch = gen_reg_rtx (Pmode);
1117 dest = gen_reg_rtx (Pmode);
1119 emit_insn (gen_load_tp (tp));
1120 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1121 emit_insn (gen_adddi3 (dest, tp, scratch));
1122 return dest;
1124 case TLS_MODEL_LOCAL_EXEC:
1125 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1126 eqv = gen_rtx_CONST (Pmode, eqv);
1127 tp = gen_reg_rtx (Pmode);
1129 emit_insn (gen_load_tp (tp));
1130 if (alpha_tls_size == 32)
1132 insn = gen_rtx_HIGH (Pmode, eqv);
1133 insn = gen_rtx_PLUS (Pmode, tp, insn);
1134 tp = gen_reg_rtx (Pmode);
1135 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1137 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1140 if (local_symbolic_operand (x, Pmode))
1142 if (small_symbolic_operand (x, Pmode))
1143 return x;
1144 else
1146 if (!no_new_pseudos)
1147 scratch = gen_reg_rtx (Pmode);
1148 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1149 gen_rtx_HIGH (Pmode, x)));
1150 return gen_rtx_LO_SUM (Pmode, scratch, x);
1155 return NULL;
1157 split_addend:
1159 HOST_WIDE_INT low, high;
1161 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1162 addend -= low;
1163 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1164 addend -= high;
1166 if (addend)
1167 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1168 (no_new_pseudos ? scratch : NULL_RTX),
1169 1, OPTAB_LIB_WIDEN);
1170 if (high)
1171 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1172 (no_new_pseudos ? scratch : NULL_RTX),
1173 1, OPTAB_LIB_WIDEN);
1175 return plus_constant (x, low);
1179 /* Primarily this is required for TLS symbols, but given that our move
1180 patterns *ought* to be able to handle any symbol at any time, we
1181 should never be spilling symbolic operands to the constant pool, ever. */
1183 static bool
1184 alpha_cannot_force_const_mem (rtx x)
1186 enum rtx_code code = GET_CODE (x);
1187 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1190 /* We do not allow indirect calls to be optimized into sibling calls, nor
1191 can we allow a call to a function with a different GP to be optimized
1192 into a sibcall. */
1194 static bool
1195 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1197 /* Can't do indirect tail calls, since we don't know if the target
1198 uses the same GP. */
1199 if (!decl)
1200 return false;
1202 /* Otherwise, we can make a tail call if the target function shares
1203 the same GP. */
1204 return decl_has_samegp (decl);
1208 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1210 rtx x = *px;
1212 /* Don't re-split. */
1213 if (GET_CODE (x) == LO_SUM)
1214 return -1;
1216 return small_symbolic_operand (x, Pmode) != 0;
1219 static int
1220 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1222 rtx x = *px;
1224 /* Don't re-split. */
1225 if (GET_CODE (x) == LO_SUM)
1226 return -1;
1228 if (small_symbolic_operand (x, Pmode))
1230 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1231 *px = x;
1232 return -1;
1235 return 0;
1239 split_small_symbolic_operand (rtx x)
1241 x = copy_insn (x);
1242 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1243 return x;
1246 /* Indicate that INSN cannot be duplicated. This is true for any insn
1247 that we've marked with gpdisp relocs, since those have to stay in
1248 1-1 correspondence with one another.
1250 Technically we could copy them if we could set up a mapping from one
1251 sequence number to another, across the set of insns to be duplicated.
1252 This seems overly complicated and error-prone since interblock motion
1253 from sched-ebb could move one of the pair of insns to a different block.
1255 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1256 then they'll be in a different block from their ldgp. Which could lead
1257 the bb reorder code to think that it would be ok to copy just the block
1258 containing the call and branch to the block containing the ldgp. */
1260 static bool
1261 alpha_cannot_copy_insn_p (rtx insn)
1263 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1264 return false;
1265 if (recog_memoized (insn) >= 0)
1266 return get_attr_cannot_copy (insn);
1267 else
1268 return false;
1272 /* Try a machine-dependent way of reloading an illegitimate address
1273 operand. If we find one, push the reload and return the new rtx. */
1276 alpha_legitimize_reload_address (rtx x,
1277 enum machine_mode mode ATTRIBUTE_UNUSED,
1278 int opnum, int type,
1279 int ind_levels ATTRIBUTE_UNUSED)
1281 /* We must recognize output that we have already generated ourselves. */
1282 if (GET_CODE (x) == PLUS
1283 && GET_CODE (XEXP (x, 0)) == PLUS
1284 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1285 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1286 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1288 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1289 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1290 opnum, type);
1291 return x;
1294 /* We wish to handle large displacements off a base register by
1295 splitting the addend across an ldah and the mem insn. This
1296 cuts number of extra insns needed from 3 to 1. */
1297 if (GET_CODE (x) == PLUS
1298 && GET_CODE (XEXP (x, 0)) == REG
1299 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1300 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1301 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1303 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1304 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1305 HOST_WIDE_INT high
1306 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1308 /* Check for 32-bit overflow. */
1309 if (high + low != val)
1310 return NULL_RTX;
1312 /* Reload the high part into a base reg; leave the low part
1313 in the mem directly. */
1314 x = gen_rtx_PLUS (GET_MODE (x),
1315 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1316 GEN_INT (high)),
1317 GEN_INT (low));
1319 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1320 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1321 opnum, type);
1322 return x;
1325 return NULL_RTX;
1328 /* Compute a (partial) cost for rtx X. Return true if the complete
1329 cost has been computed, and false if subexpressions should be
1330 scanned. In either case, *TOTAL contains the cost result. */
1332 static bool
1333 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1335 enum machine_mode mode = GET_MODE (x);
1336 bool float_mode_p = FLOAT_MODE_P (mode);
1337 const struct alpha_rtx_cost_data *cost_data;
1339 if (optimize_size)
1340 cost_data = &alpha_rtx_cost_size;
1341 else
1342 cost_data = &alpha_rtx_cost_data[alpha_cpu];
1344 switch (code)
1346 case CONST_INT:
1347 /* If this is an 8-bit constant, return zero since it can be used
1348 nearly anywhere with no cost. If it is a valid operand for an
1349 ADD or AND, likewise return 0 if we know it will be used in that
1350 context. Otherwise, return 2 since it might be used there later.
1351 All other constants take at least two insns. */
1352 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1354 *total = 0;
1355 return true;
1357 /* FALLTHRU */
1359 case CONST_DOUBLE:
1360 if (x == CONST0_RTX (mode))
1361 *total = 0;
1362 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1363 || (outer_code == AND && and_operand (x, VOIDmode)))
1364 *total = 0;
1365 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1366 *total = 2;
1367 else
1368 *total = COSTS_N_INSNS (2);
1369 return true;
1371 case CONST:
1372 case SYMBOL_REF:
1373 case LABEL_REF:
1374 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1375 *total = COSTS_N_INSNS (outer_code != MEM);
1376 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1377 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1378 else if (tls_symbolic_operand_type (x))
1379 /* Estimate of cost for call_pal rduniq. */
1380 /* ??? How many insns do we emit here? More than one... */
1381 *total = COSTS_N_INSNS (15);
1382 else
1383 /* Otherwise we do a load from the GOT. */
1384 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1385 return true;
1387 case HIGH:
1388 /* This is effectively an add_operand. */
1389 *total = 2;
1390 return true;
1392 case PLUS:
1393 case MINUS:
1394 if (float_mode_p)
1395 *total = cost_data->fp_add;
1396 else if (GET_CODE (XEXP (x, 0)) == MULT
1397 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1399 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1400 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1401 return true;
1403 return false;
1405 case MULT:
1406 if (float_mode_p)
1407 *total = cost_data->fp_mult;
1408 else if (mode == DImode)
1409 *total = cost_data->int_mult_di;
1410 else
1411 *total = cost_data->int_mult_si;
1412 return false;
1414 case ASHIFT:
1415 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1416 && INTVAL (XEXP (x, 1)) <= 3)
1418 *total = COSTS_N_INSNS (1);
1419 return false;
1421 /* FALLTHRU */
1423 case ASHIFTRT:
1424 case LSHIFTRT:
1425 *total = cost_data->int_shift;
1426 return false;
1428 case IF_THEN_ELSE:
1429 if (float_mode_p)
1430 *total = cost_data->fp_add;
1431 else
1432 *total = cost_data->int_cmov;
1433 return false;
1435 case DIV:
1436 case UDIV:
1437 case MOD:
1438 case UMOD:
1439 if (!float_mode_p)
1440 *total = cost_data->int_div;
1441 else if (mode == SFmode)
1442 *total = cost_data->fp_div_sf;
1443 else
1444 *total = cost_data->fp_div_df;
1445 return false;
1447 case MEM:
1448 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1449 return true;
1451 case NEG:
1452 if (! float_mode_p)
1454 *total = COSTS_N_INSNS (1);
1455 return false;
1457 /* FALLTHRU */
1459 case ABS:
1460 if (! float_mode_p)
1462 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1463 return false;
1465 /* FALLTHRU */
1467 case FLOAT:
1468 case UNSIGNED_FLOAT:
1469 case FIX:
1470 case UNSIGNED_FIX:
1471 case FLOAT_EXTEND:
1472 case FLOAT_TRUNCATE:
1473 *total = cost_data->fp_add;
1474 return false;
1476 default:
1477 return false;
1481 /* REF is an alignable memory location. Place an aligned SImode
1482 reference into *PALIGNED_MEM and the number of bits to shift into
1483 *PBITNUM. SCRATCH is a free register for use in reloading out
1484 of range stack slots. */
1486 void
1487 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1489 rtx base;
1490 HOST_WIDE_INT offset = 0;
1492 if (GET_CODE (ref) != MEM)
1493 abort ();
1495 if (reload_in_progress
1496 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1498 base = find_replacement (&XEXP (ref, 0));
1500 if (! memory_address_p (GET_MODE (ref), base))
1501 abort ();
1503 else
1505 base = XEXP (ref, 0);
1508 if (GET_CODE (base) == PLUS)
1509 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1511 *paligned_mem
1512 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
1514 if (WORDS_BIG_ENDIAN)
1515 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
1516 + (offset & 3) * 8));
1517 else
1518 *pbitnum = GEN_INT ((offset & 3) * 8);
1521 /* Similar, but just get the address. Handle the two reload cases.
1522 Add EXTRA_OFFSET to the address we return. */
1525 get_unaligned_address (rtx ref, int extra_offset)
1527 rtx base;
1528 HOST_WIDE_INT offset = 0;
1530 if (GET_CODE (ref) != MEM)
1531 abort ();
1533 if (reload_in_progress
1534 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1536 base = find_replacement (&XEXP (ref, 0));
1538 if (! memory_address_p (GET_MODE (ref), base))
1539 abort ();
1541 else
1543 base = XEXP (ref, 0);
1546 if (GET_CODE (base) == PLUS)
1547 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1549 return plus_constant (base, offset + extra_offset);
1552 /* On the Alpha, all (non-symbolic) constants except zero go into
1553 a floating-point register via memory. Note that we cannot
1554 return anything that is not a subset of CLASS, and that some
1555 symbolic constants cannot be dropped to memory. */
1557 enum reg_class
1558 alpha_preferred_reload_class(rtx x, enum reg_class class)
1560 /* Zero is present in any register class. */
1561 if (x == CONST0_RTX (GET_MODE (x)))
1562 return class;
1564 /* These sorts of constants we can easily drop to memory. */
1565 if (GET_CODE (x) == CONST_INT
1566 || GET_CODE (x) == CONST_DOUBLE
1567 || GET_CODE (x) == CONST_VECTOR)
1569 if (class == FLOAT_REGS)
1570 return NO_REGS;
1571 if (class == ALL_REGS)
1572 return GENERAL_REGS;
1573 return class;
1576 /* All other kinds of constants should not (and in the case of HIGH
1577 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1578 secondary reload. */
1579 if (CONSTANT_P (x))
1580 return (class == ALL_REGS ? GENERAL_REGS : class);
1582 return class;
1585 /* Loading and storing HImode or QImode values to and from memory
1586 usually requires a scratch register. The exceptions are loading
1587 QImode and HImode from an aligned address to a general register
1588 unless byte instructions are permitted.
1590 We also cannot load an unaligned address or a paradoxical SUBREG
1591 into an FP register.
1593 We also cannot do integral arithmetic into FP regs, as might result
1594 from register elimination into a DImode fp register. */
1596 enum reg_class
1597 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1598 rtx x, int in)
1600 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1602 if (GET_CODE (x) == MEM
1603 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1604 || (GET_CODE (x) == SUBREG
1605 && (GET_CODE (SUBREG_REG (x)) == MEM
1606 || (GET_CODE (SUBREG_REG (x)) == REG
1607 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1609 if (!in || !aligned_memory_operand(x, mode))
1610 return GENERAL_REGS;
1614 if (class == FLOAT_REGS)
1616 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1617 return GENERAL_REGS;
1619 if (GET_CODE (x) == SUBREG
1620 && (GET_MODE_SIZE (GET_MODE (x))
1621 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1622 return GENERAL_REGS;
1624 if (in && INTEGRAL_MODE_P (mode)
1625 && ! (memory_operand (x, mode) || x == const0_rtx))
1626 return GENERAL_REGS;
1629 return NO_REGS;
1632 /* Subfunction of the following function. Update the flags of any MEM
1633 found in part of X. */
1635 static int
1636 alpha_set_memflags_1 (rtx *xp, void *data)
1638 rtx x = *xp, orig = (rtx) data;
1640 if (GET_CODE (x) != MEM)
1641 return 0;
1643 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1644 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1645 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1646 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1647 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1649 /* Sadly, we cannot use alias sets because the extra aliasing
1650 produced by the AND interferes. Given that two-byte quantities
1651 are the only thing we would be able to differentiate anyway,
1652 there does not seem to be any point in convoluting the early
1653 out of the alias check. */
1655 return -1;
1658 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1659 generated to perform a memory operation, look for any MEMs in either
1660 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1661 volatile flags from REF into each of the MEMs found. If REF is not
1662 a MEM, don't do anything. */
1664 void
1665 alpha_set_memflags (rtx insn, rtx ref)
1667 rtx *base_ptr;
1669 if (GET_CODE (ref) != MEM)
1670 return;
1672 /* This is only called from alpha.md, after having had something
1673 generated from one of the insn patterns. So if everything is
1674 zero, the pattern is already up-to-date. */
1675 if (!MEM_VOLATILE_P (ref)
1676 && !MEM_IN_STRUCT_P (ref)
1677 && !MEM_SCALAR_P (ref)
1678 && !MEM_NOTRAP_P (ref)
1679 && !MEM_READONLY_P (ref))
1680 return;
1682 if (INSN_P (insn))
1683 base_ptr = &PATTERN (insn);
1684 else
1685 base_ptr = &insn;
1686 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1689 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1690 int, bool);
1692 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1693 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1694 and return pc_rtx if successful. */
1696 static rtx
1697 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1698 HOST_WIDE_INT c, int n, bool no_output)
1700 HOST_WIDE_INT new;
1701 int i, bits;
1702 /* Use a pseudo if highly optimizing and still generating RTL. */
1703 rtx subtarget
1704 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1705 rtx temp, insn;
1707 /* If this is a sign-extended 32-bit constant, we can do this in at most
1708 three insns, so do it if we have enough insns left. We always have
1709 a sign-extended 32-bit constant when compiling on a narrow machine. */
1711 if (HOST_BITS_PER_WIDE_INT != 64
1712 || c >> 31 == -1 || c >> 31 == 0)
1714 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1715 HOST_WIDE_INT tmp1 = c - low;
1716 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1717 HOST_WIDE_INT extra = 0;
1719 /* If HIGH will be interpreted as negative but the constant is
1720 positive, we must adjust it to do two ldha insns. */
1722 if ((high & 0x8000) != 0 && c >= 0)
1724 extra = 0x4000;
1725 tmp1 -= 0x40000000;
1726 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1729 if (c == low || (low == 0 && extra == 0))
1731 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1732 but that meant that we can't handle INT_MIN on 32-bit machines
1733 (like NT/Alpha), because we recurse indefinitely through
1734 emit_move_insn to gen_movdi. So instead, since we know exactly
1735 what we want, create it explicitly. */
1737 if (no_output)
1738 return pc_rtx;
1739 if (target == NULL)
1740 target = gen_reg_rtx (mode);
1741 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1742 return target;
1744 else if (n >= 2 + (extra != 0))
1746 if (no_output)
1747 return pc_rtx;
1748 if (no_new_pseudos)
1750 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1751 temp = target;
1753 else
1754 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1755 subtarget, mode);
1757 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1758 This means that if we go through expand_binop, we'll try to
1759 generate extensions, etc, which will require new pseudos, which
1760 will fail during some split phases. The SImode add patterns
1761 still exist, but are not named. So build the insns by hand. */
1763 if (extra != 0)
1765 if (! subtarget)
1766 subtarget = gen_reg_rtx (mode);
1767 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1768 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1769 emit_insn (insn);
1770 temp = subtarget;
1773 if (target == NULL)
1774 target = gen_reg_rtx (mode);
1775 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1776 insn = gen_rtx_SET (VOIDmode, target, insn);
1777 emit_insn (insn);
1778 return target;
1782 /* If we couldn't do it that way, try some other methods. But if we have
1783 no instructions left, don't bother. Likewise, if this is SImode and
1784 we can't make pseudos, we can't do anything since the expand_binop
1785 and expand_unop calls will widen and try to make pseudos. */
1787 if (n == 1 || (mode == SImode && no_new_pseudos))
1788 return 0;
1790 /* Next, see if we can load a related constant and then shift and possibly
1791 negate it to get the constant we want. Try this once each increasing
1792 numbers of insns. */
1794 for (i = 1; i < n; i++)
1796 /* First, see if minus some low bits, we've an easy load of
1797 high bits. */
1799 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1800 if (new != 0)
1802 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1803 if (temp)
1805 if (no_output)
1806 return temp;
1807 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1808 target, 0, OPTAB_WIDEN);
1812 /* Next try complementing. */
1813 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1814 if (temp)
1816 if (no_output)
1817 return temp;
1818 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1821 /* Next try to form a constant and do a left shift. We can do this
1822 if some low-order bits are zero; the exact_log2 call below tells
1823 us that information. The bits we are shifting out could be any
1824 value, but here we'll just try the 0- and sign-extended forms of
1825 the constant. To try to increase the chance of having the same
1826 constant in more than one insn, start at the highest number of
1827 bits to shift, but try all possibilities in case a ZAPNOT will
1828 be useful. */
1830 bits = exact_log2 (c & -c);
1831 if (bits > 0)
1832 for (; bits > 0; bits--)
1834 new = c >> bits;
1835 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1836 if (!temp && c < 0)
1838 new = (unsigned HOST_WIDE_INT)c >> bits;
1839 temp = alpha_emit_set_const (subtarget, mode, new,
1840 i, no_output);
1842 if (temp)
1844 if (no_output)
1845 return temp;
1846 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1847 target, 0, OPTAB_WIDEN);
1851 /* Now try high-order zero bits. Here we try the shifted-in bits as
1852 all zero and all ones. Be careful to avoid shifting outside the
1853 mode and to avoid shifting outside the host wide int size. */
1854 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1855 confuse the recursive call and set all of the high 32 bits. */
1857 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1858 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1859 if (bits > 0)
1860 for (; bits > 0; bits--)
1862 new = c << bits;
1863 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1864 if (!temp)
1866 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1867 temp = alpha_emit_set_const (subtarget, mode, new,
1868 i, no_output);
1870 if (temp)
1872 if (no_output)
1873 return temp;
1874 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1875 target, 1, OPTAB_WIDEN);
1879 /* Now try high-order 1 bits. We get that with a sign-extension.
1880 But one bit isn't enough here. Be careful to avoid shifting outside
1881 the mode and to avoid shifting outside the host wide int size. */
1883 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1884 - floor_log2 (~ c) - 2);
1885 if (bits > 0)
1886 for (; bits > 0; bits--)
1888 new = c << bits;
1889 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1890 if (!temp)
1892 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1893 temp = alpha_emit_set_const (subtarget, mode, new,
1894 i, no_output);
1896 if (temp)
1898 if (no_output)
1899 return temp;
1900 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1901 target, 0, OPTAB_WIDEN);
1906 #if HOST_BITS_PER_WIDE_INT == 64
1907 /* Finally, see if can load a value into the target that is the same as the
1908 constant except that all bytes that are 0 are changed to be 0xff. If we
1909 can, then we can do a ZAPNOT to obtain the desired constant. */
1911 new = c;
1912 for (i = 0; i < 64; i += 8)
1913 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1914 new |= (HOST_WIDE_INT) 0xff << i;
1916 /* We are only called for SImode and DImode. If this is SImode, ensure that
1917 we are sign extended to a full word. */
1919 if (mode == SImode)
1920 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1922 if (new != c)
1924 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1925 if (temp)
1927 if (no_output)
1928 return temp;
1929 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1930 target, 0, OPTAB_WIDEN);
1933 #endif
1935 return 0;
1938 /* Try to output insns to set TARGET equal to the constant C if it can be
1939 done in less than N insns. Do all computations in MODE. Returns the place
1940 where the output has been placed if it can be done and the insns have been
1941 emitted. If it would take more than N insns, zero is returned and no
1942 insns and emitted. */
1944 static rtx
1945 alpha_emit_set_const (rtx target, enum machine_mode mode,
1946 HOST_WIDE_INT c, int n, bool no_output)
1948 enum machine_mode orig_mode = mode;
1949 rtx orig_target = target;
1950 rtx result = 0;
1951 int i;
1953 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1954 can't load this constant in one insn, do this in DImode. */
1955 if (no_new_pseudos && mode == SImode
1956 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1958 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1959 if (result)
1960 return result;
1962 target = no_output ? NULL : gen_lowpart (DImode, target);
1963 mode = DImode;
1965 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1967 target = no_output ? NULL : gen_lowpart (DImode, target);
1968 mode = DImode;
1971 /* Try 1 insn, then 2, then up to N. */
1972 for (i = 1; i <= n; i++)
1974 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1975 if (result)
1977 rtx insn, set;
1979 if (no_output)
1980 return result;
1982 insn = get_last_insn ();
1983 set = single_set (insn);
1984 if (! CONSTANT_P (SET_SRC (set)))
1985 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1986 break;
1990 /* Allow for the case where we changed the mode of TARGET. */
1991 if (result)
1993 if (result == target)
1994 result = orig_target;
1995 else if (mode != orig_mode)
1996 result = gen_lowpart (orig_mode, result);
1999 return result;
2002 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2003 fall back to a straight forward decomposition. We do this to avoid
2004 exponential run times encountered when looking for longer sequences
2005 with alpha_emit_set_const. */
2007 static rtx
2008 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2010 HOST_WIDE_INT d1, d2, d3, d4;
2012 /* Decompose the entire word */
2013 #if HOST_BITS_PER_WIDE_INT >= 64
2014 if (c2 != -(c1 < 0))
2015 abort ();
2016 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2017 c1 -= d1;
2018 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 c1 = (c1 - d2) >> 32;
2020 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2021 c1 -= d3;
2022 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2023 if (c1 != d4)
2024 abort ();
2025 #else
2026 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2027 c1 -= d1;
2028 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2029 if (c1 != d2)
2030 abort ();
2031 c2 += (d2 < 0);
2032 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2033 c2 -= d3;
2034 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2035 if (c2 != d4)
2036 abort ();
2037 #endif
2039 /* Construct the high word */
2040 if (d4)
2042 emit_move_insn (target, GEN_INT (d4));
2043 if (d3)
2044 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2046 else
2047 emit_move_insn (target, GEN_INT (d3));
2049 /* Shift it into place */
2050 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2052 /* Add in the low bits. */
2053 if (d2)
2054 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2055 if (d1)
2056 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2058 return target;
2061 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2062 the low 64 bits. */
2064 static void
2065 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2067 HOST_WIDE_INT i0, i1;
2069 if (GET_CODE (x) == CONST_VECTOR)
2070 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2073 if (GET_CODE (x) == CONST_INT)
2075 i0 = INTVAL (x);
2076 i1 = -(i0 < 0);
2078 else if (HOST_BITS_PER_WIDE_INT >= 64)
2080 i0 = CONST_DOUBLE_LOW (x);
2081 i1 = -(i0 < 0);
2083 else
2085 i0 = CONST_DOUBLE_LOW (x);
2086 i1 = CONST_DOUBLE_HIGH (x);
2089 *p0 = i0;
2090 *p1 = i1;
2093 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2094 are willing to load the value into a register via a move pattern.
2095 Normally this is all symbolic constants, integral constants that
2096 take three or fewer instructions, and floating-point zero. */
2098 bool
2099 alpha_legitimate_constant_p (rtx x)
2101 enum machine_mode mode = GET_MODE (x);
2102 HOST_WIDE_INT i0, i1;
2104 switch (GET_CODE (x))
2106 case CONST:
2107 case LABEL_REF:
2108 case SYMBOL_REF:
2109 case HIGH:
2110 return true;
2112 case CONST_DOUBLE:
2113 if (x == CONST0_RTX (mode))
2114 return true;
2115 if (FLOAT_MODE_P (mode))
2116 return false;
2117 goto do_integer;
2119 case CONST_VECTOR:
2120 if (x == CONST0_RTX (mode))
2121 return true;
2122 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2123 return false;
2124 if (GET_MODE_SIZE (mode) != 8)
2125 return false;
2126 goto do_integer;
2128 case CONST_INT:
2129 do_integer:
2130 if (TARGET_BUILD_CONSTANTS)
2131 return true;
2132 alpha_extract_integer (x, &i0, &i1);
2133 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2134 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2135 return false;
2137 default:
2138 return false;
2142 /* Operand 1 is known to be a constant, and should require more than one
2143 instruction to load. Emit that multi-part load. */
2145 bool
2146 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2148 HOST_WIDE_INT i0, i1;
2149 rtx temp = NULL_RTX;
2151 alpha_extract_integer (operands[1], &i0, &i1);
2153 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2154 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2156 if (!temp && TARGET_BUILD_CONSTANTS)
2157 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2159 if (temp)
2161 if (!rtx_equal_p (operands[0], temp))
2162 emit_move_insn (operands[0], temp);
2163 return true;
2166 return false;
2169 /* Expand a move instruction; return true if all work is done.
2170 We don't handle non-bwx subword loads here. */
2172 bool
2173 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2175 /* If the output is not a register, the input must be. */
2176 if (GET_CODE (operands[0]) == MEM
2177 && ! reg_or_0_operand (operands[1], mode))
2178 operands[1] = force_reg (mode, operands[1]);
2180 /* Allow legitimize_address to perform some simplifications. */
2181 if (mode == Pmode && symbolic_operand (operands[1], mode))
2183 rtx tmp;
2185 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2186 if (tmp)
2188 if (tmp == operands[0])
2189 return true;
2190 operands[1] = tmp;
2191 return false;
2195 /* Early out for non-constants and valid constants. */
2196 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2197 return false;
2199 /* Split large integers. */
2200 if (GET_CODE (operands[1]) == CONST_INT
2201 || GET_CODE (operands[1]) == CONST_DOUBLE
2202 || GET_CODE (operands[1]) == CONST_VECTOR)
2204 if (alpha_split_const_mov (mode, operands))
2205 return true;
2208 /* Otherwise we've nothing left but to drop the thing to memory. */
2209 operands[1] = force_const_mem (mode, operands[1]);
2210 if (reload_in_progress)
2212 emit_move_insn (operands[0], XEXP (operands[1], 0));
2213 operands[1] = copy_rtx (operands[1]);
2214 XEXP (operands[1], 0) = operands[0];
2216 else
2217 operands[1] = validize_mem (operands[1]);
2218 return false;
2221 /* Expand a non-bwx QImode or HImode move instruction;
2222 return true if all work is done. */
2224 bool
2225 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2227 /* If the output is not a register, the input must be. */
2228 if (GET_CODE (operands[0]) == MEM)
2229 operands[1] = force_reg (mode, operands[1]);
2231 /* Handle four memory cases, unaligned and aligned for either the input
2232 or the output. The only case where we can be called during reload is
2233 for aligned loads; all other cases require temporaries. */
2235 if (GET_CODE (operands[1]) == MEM
2236 || (GET_CODE (operands[1]) == SUBREG
2237 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2238 || (reload_in_progress && GET_CODE (operands[1]) == REG
2239 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2240 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2241 && GET_CODE (SUBREG_REG (operands[1])) == REG
2242 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2244 if (aligned_memory_operand (operands[1], mode))
2246 if (reload_in_progress)
2248 emit_insn ((mode == QImode
2249 ? gen_reload_inqi_help
2250 : gen_reload_inhi_help)
2251 (operands[0], operands[1],
2252 gen_rtx_REG (SImode, REGNO (operands[0]))));
2254 else
2256 rtx aligned_mem, bitnum;
2257 rtx scratch = gen_reg_rtx (SImode);
2258 rtx subtarget;
2259 bool copyout;
2261 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2263 subtarget = operands[0];
2264 if (GET_CODE (subtarget) == REG)
2265 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2266 else
2267 subtarget = gen_reg_rtx (DImode), copyout = true;
2269 emit_insn ((mode == QImode
2270 ? gen_aligned_loadqi
2271 : gen_aligned_loadhi)
2272 (subtarget, aligned_mem, bitnum, scratch));
2274 if (copyout)
2275 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2278 else
2280 /* Don't pass these as parameters since that makes the generated
2281 code depend on parameter evaluation order which will cause
2282 bootstrap failures. */
2284 rtx temp1, temp2, seq, subtarget;
2285 bool copyout;
2287 temp1 = gen_reg_rtx (DImode);
2288 temp2 = gen_reg_rtx (DImode);
2290 subtarget = operands[0];
2291 if (GET_CODE (subtarget) == REG)
2292 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2293 else
2294 subtarget = gen_reg_rtx (DImode), copyout = true;
2296 seq = ((mode == QImode
2297 ? gen_unaligned_loadqi
2298 : gen_unaligned_loadhi)
2299 (subtarget, get_unaligned_address (operands[1], 0),
2300 temp1, temp2));
2301 alpha_set_memflags (seq, operands[1]);
2302 emit_insn (seq);
2304 if (copyout)
2305 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2307 return true;
2310 if (GET_CODE (operands[0]) == MEM
2311 || (GET_CODE (operands[0]) == SUBREG
2312 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2313 || (reload_in_progress && GET_CODE (operands[0]) == REG
2314 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2315 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2316 && GET_CODE (SUBREG_REG (operands[0])) == REG
2317 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2319 if (aligned_memory_operand (operands[0], mode))
2321 rtx aligned_mem, bitnum;
2322 rtx temp1 = gen_reg_rtx (SImode);
2323 rtx temp2 = gen_reg_rtx (SImode);
2325 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2327 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2328 temp1, temp2));
2330 else
2332 rtx temp1 = gen_reg_rtx (DImode);
2333 rtx temp2 = gen_reg_rtx (DImode);
2334 rtx temp3 = gen_reg_rtx (DImode);
2335 rtx seq = ((mode == QImode
2336 ? gen_unaligned_storeqi
2337 : gen_unaligned_storehi)
2338 (get_unaligned_address (operands[0], 0),
2339 operands[1], temp1, temp2, temp3));
2341 alpha_set_memflags (seq, operands[0]);
2342 emit_insn (seq);
2344 return true;
2347 return false;
2350 /* Implement the movmisalign patterns. One of the operands is a memory
2351 that is not naturally aligned. Emit instructions to load it. */
2353 void
2354 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2356 /* Honor misaligned loads, for those we promised to do so. */
2357 if (MEM_P (operands[1]))
2359 rtx tmp;
2361 if (register_operand (operands[0], mode))
2362 tmp = operands[0];
2363 else
2364 tmp = gen_reg_rtx (mode);
2366 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2367 if (tmp != operands[0])
2368 emit_move_insn (operands[0], tmp);
2370 else if (MEM_P (operands[0]))
2372 if (!reg_or_0_operand (operands[1], mode))
2373 operands[1] = force_reg (mode, operands[1]);
2374 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2376 else
2377 gcc_unreachable ();
2380 /* Generate an unsigned DImode to FP conversion. This is the same code
2381 optabs would emit if we didn't have TFmode patterns.
2383 For SFmode, this is the only construction I've found that can pass
2384 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2385 intermediates will work, because you'll get intermediate rounding
2386 that ruins the end result. Some of this could be fixed by turning
2387 on round-to-positive-infinity, but that requires diddling the fpsr,
2388 which kills performance. I tried turning this around and converting
2389 to a negative number, so that I could turn on /m, but either I did
2390 it wrong or there's something else cause I wound up with the exact
2391 same single-bit error. There is a branch-less form of this same code:
2393 srl $16,1,$1
2394 and $16,1,$2
2395 cmplt $16,0,$3
2396 or $1,$2,$2
2397 cmovge $16,$16,$2
2398 itoft $3,$f10
2399 itoft $2,$f11
2400 cvtqs $f11,$f11
2401 adds $f11,$f11,$f0
2402 fcmoveq $f10,$f11,$f0
2404 I'm not using it because it's the same number of instructions as
2405 this branch-full form, and it has more serialized long latency
2406 instructions on the critical path.
2408 For DFmode, we can avoid rounding errors by breaking up the word
2409 into two pieces, converting them separately, and adding them back:
2411 LC0: .long 0,0x5f800000
2413 itoft $16,$f11
2414 lda $2,LC0
2415 cmplt $16,0,$1
2416 cpyse $f11,$f31,$f10
2417 cpyse $f31,$f11,$f11
2418 s4addq $1,$2,$1
2419 lds $f12,0($1)
2420 cvtqt $f10,$f10
2421 cvtqt $f11,$f11
2422 addt $f12,$f10,$f0
2423 addt $f0,$f11,$f0
2425 This doesn't seem to be a clear-cut win over the optabs form.
2426 It probably all depends on the distribution of numbers being
2427 converted -- in the optabs form, all but high-bit-set has a
2428 much lower minimum execution time. */
2430 void
2431 alpha_emit_floatuns (rtx operands[2])
2433 rtx neglab, donelab, i0, i1, f0, in, out;
2434 enum machine_mode mode;
2436 out = operands[0];
2437 in = force_reg (DImode, operands[1]);
2438 mode = GET_MODE (out);
2439 neglab = gen_label_rtx ();
2440 donelab = gen_label_rtx ();
2441 i0 = gen_reg_rtx (DImode);
2442 i1 = gen_reg_rtx (DImode);
2443 f0 = gen_reg_rtx (mode);
2445 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2447 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2448 emit_jump_insn (gen_jump (donelab));
2449 emit_barrier ();
2451 emit_label (neglab);
2453 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2454 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2455 emit_insn (gen_iordi3 (i0, i0, i1));
2456 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2457 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2459 emit_label (donelab);
2462 /* Generate the comparison for a conditional branch. */
2465 alpha_emit_conditional_branch (enum rtx_code code)
2467 enum rtx_code cmp_code, branch_code;
2468 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2469 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2470 rtx tem;
2472 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2474 if (! TARGET_HAS_XFLOATING_LIBS)
2475 abort ();
2477 /* X_floating library comparison functions return
2478 -1 unordered
2479 0 false
2480 1 true
2481 Convert the compare against the raw return value. */
2483 switch (code)
2485 case UNORDERED:
2486 cmp_code = EQ;
2487 code = LT;
2488 break;
2489 case ORDERED:
2490 cmp_code = EQ;
2491 code = GE;
2492 break;
2493 case NE:
2494 cmp_code = NE;
2495 code = NE;
2496 break;
2497 default:
2498 cmp_code = code;
2499 code = GT;
2500 break;
2503 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
2504 op1 = const0_rtx;
2505 alpha_compare.fp_p = 0;
2508 /* The general case: fold the comparison code to the types of compares
2509 that we have, choosing the branch as necessary. */
2510 switch (code)
2512 case EQ: case LE: case LT: case LEU: case LTU:
2513 case UNORDERED:
2514 /* We have these compares: */
2515 cmp_code = code, branch_code = NE;
2516 break;
2518 case NE:
2519 case ORDERED:
2520 /* These must be reversed. */
2521 cmp_code = reverse_condition (code), branch_code = EQ;
2522 break;
2524 case GE: case GT: case GEU: case GTU:
2525 /* For FP, we swap them, for INT, we reverse them. */
2526 if (alpha_compare.fp_p)
2528 cmp_code = swap_condition (code);
2529 branch_code = NE;
2530 tem = op0, op0 = op1, op1 = tem;
2532 else
2534 cmp_code = reverse_condition (code);
2535 branch_code = EQ;
2537 break;
2539 default:
2540 abort ();
2543 if (alpha_compare.fp_p)
2545 cmp_mode = DFmode;
2546 if (flag_unsafe_math_optimizations)
2548 /* When we are not as concerned about non-finite values, and we
2549 are comparing against zero, we can branch directly. */
2550 if (op1 == CONST0_RTX (DFmode))
2551 cmp_code = UNKNOWN, branch_code = code;
2552 else if (op0 == CONST0_RTX (DFmode))
2554 /* Undo the swap we probably did just above. */
2555 tem = op0, op0 = op1, op1 = tem;
2556 branch_code = swap_condition (cmp_code);
2557 cmp_code = UNKNOWN;
2560 else
2562 /* ??? We mark the branch mode to be CCmode to prevent the
2563 compare and branch from being combined, since the compare
2564 insn follows IEEE rules that the branch does not. */
2565 branch_mode = CCmode;
2568 else
2570 cmp_mode = DImode;
2572 /* The following optimizations are only for signed compares. */
2573 if (code != LEU && code != LTU && code != GEU && code != GTU)
2575 /* Whee. Compare and branch against 0 directly. */
2576 if (op1 == const0_rtx)
2577 cmp_code = UNKNOWN, branch_code = code;
2579 /* If the constants doesn't fit into an immediate, but can
2580 be generated by lda/ldah, we adjust the argument and
2581 compare against zero, so we can use beq/bne directly. */
2582 /* ??? Don't do this when comparing against symbols, otherwise
2583 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2584 be declared false out of hand (at least for non-weak). */
2585 else if (GET_CODE (op1) == CONST_INT
2586 && (code == EQ || code == NE)
2587 && !(symbolic_operand (op0, VOIDmode)
2588 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2590 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2592 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2593 && (CONST_OK_FOR_LETTER_P (n, 'K')
2594 || CONST_OK_FOR_LETTER_P (n, 'L')))
2596 cmp_code = PLUS, branch_code = code;
2597 op1 = GEN_INT (n);
2602 if (!reg_or_0_operand (op0, DImode))
2603 op0 = force_reg (DImode, op0);
2604 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2605 op1 = force_reg (DImode, op1);
2608 /* Emit an initial compare instruction, if necessary. */
2609 tem = op0;
2610 if (cmp_code != UNKNOWN)
2612 tem = gen_reg_rtx (cmp_mode);
2613 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2616 /* Zero the operands. */
2617 memset (&alpha_compare, 0, sizeof (alpha_compare));
2619 /* Return the branch comparison. */
2620 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2623 /* Certain simplifications can be done to make invalid setcc operations
2624 valid. Return the final comparison, or NULL if we can't work. */
2627 alpha_emit_setcc (enum rtx_code code)
2629 enum rtx_code cmp_code;
2630 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2631 int fp_p = alpha_compare.fp_p;
2632 rtx tmp;
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2637 if (fp_p && GET_MODE (op0) == TFmode)
2639 if (! TARGET_HAS_XFLOATING_LIBS)
2640 abort ();
2642 /* X_floating library comparison functions return
2643 -1 unordered
2644 0 false
2645 1 true
2646 Convert the compare against the raw return value. */
2648 if (code == UNORDERED || code == ORDERED)
2649 cmp_code = EQ;
2650 else
2651 cmp_code = code;
2653 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
2654 op1 = const0_rtx;
2655 fp_p = 0;
2657 if (code == UNORDERED)
2658 code = LT;
2659 else if (code == ORDERED)
2660 code = GE;
2661 else
2662 code = GT;
2665 if (fp_p && !TARGET_FIX)
2666 return NULL_RTX;
2668 /* The general case: fold the comparison code to the types of compares
2669 that we have, choosing the branch as necessary. */
2671 cmp_code = UNKNOWN;
2672 switch (code)
2674 case EQ: case LE: case LT: case LEU: case LTU:
2675 case UNORDERED:
2676 /* We have these compares. */
2677 if (fp_p)
2678 cmp_code = code, code = NE;
2679 break;
2681 case NE:
2682 if (!fp_p && op1 == const0_rtx)
2683 break;
2684 /* FALLTHRU */
2686 case ORDERED:
2687 cmp_code = reverse_condition (code);
2688 code = EQ;
2689 break;
2691 case GE: case GT: case GEU: case GTU:
2692 /* These normally need swapping, but for integer zero we have
2693 special patterns that recognize swapped operands. */
2694 if (!fp_p && op1 == const0_rtx)
2695 break;
2696 code = swap_condition (code);
2697 if (fp_p)
2698 cmp_code = code, code = NE;
2699 tmp = op0, op0 = op1, op1 = tmp;
2700 break;
2702 default:
2703 abort ();
2706 if (!fp_p)
2708 if (!register_operand (op0, DImode))
2709 op0 = force_reg (DImode, op0);
2710 if (!reg_or_8bit_operand (op1, DImode))
2711 op1 = force_reg (DImode, op1);
2714 /* Emit an initial compare instruction, if necessary. */
2715 if (cmp_code != UNKNOWN)
2717 enum machine_mode mode = fp_p ? DFmode : DImode;
2719 tmp = gen_reg_rtx (mode);
2720 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2721 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2723 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2724 op1 = const0_rtx;
2727 /* Return the setcc comparison. */
2728 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2732 /* Rewrite a comparison against zero CMP of the form
2733 (CODE (cc0) (const_int 0)) so it can be written validly in
2734 a conditional move (if_then_else CMP ...).
2735 If both of the operands that set cc0 are nonzero we must emit
2736 an insn to perform the compare (it can't be done within
2737 the conditional move). */
2740 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2742 enum rtx_code code = GET_CODE (cmp);
2743 enum rtx_code cmov_code = NE;
2744 rtx op0 = alpha_compare.op0;
2745 rtx op1 = alpha_compare.op1;
2746 int fp_p = alpha_compare.fp_p;
2747 enum machine_mode cmp_mode
2748 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2749 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2750 enum machine_mode cmov_mode = VOIDmode;
2751 int local_fast_math = flag_unsafe_math_optimizations;
2752 rtx tem;
2754 /* Zero the operands. */
2755 memset (&alpha_compare, 0, sizeof (alpha_compare));
2757 if (fp_p != FLOAT_MODE_P (mode))
2759 enum rtx_code cmp_code;
2761 if (! TARGET_FIX)
2762 return 0;
2764 /* If we have fp<->int register move instructions, do a cmov by
2765 performing the comparison in fp registers, and move the
2766 zero/nonzero value to integer registers, where we can then
2767 use a normal cmov, or vice-versa. */
2769 switch (code)
2771 case EQ: case LE: case LT: case LEU: case LTU:
2772 /* We have these compares. */
2773 cmp_code = code, code = NE;
2774 break;
2776 case NE:
2777 /* This must be reversed. */
2778 cmp_code = EQ, code = EQ;
2779 break;
2781 case GE: case GT: case GEU: case GTU:
2782 /* These normally need swapping, but for integer zero we have
2783 special patterns that recognize swapped operands. */
2784 if (!fp_p && op1 == const0_rtx)
2785 cmp_code = code, code = NE;
2786 else
2788 cmp_code = swap_condition (code);
2789 code = NE;
2790 tem = op0, op0 = op1, op1 = tem;
2792 break;
2794 default:
2795 abort ();
2798 tem = gen_reg_rtx (cmp_op_mode);
2799 emit_insn (gen_rtx_SET (VOIDmode, tem,
2800 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2801 op0, op1)));
2803 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2804 op0 = gen_lowpart (cmp_op_mode, tem);
2805 op1 = CONST0_RTX (cmp_op_mode);
2806 fp_p = !fp_p;
2807 local_fast_math = 1;
2810 /* We may be able to use a conditional move directly.
2811 This avoids emitting spurious compares. */
2812 if (signed_comparison_operator (cmp, VOIDmode)
2813 && (!fp_p || local_fast_math)
2814 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2815 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2817 /* We can't put the comparison inside the conditional move;
2818 emit a compare instruction and put that inside the
2819 conditional move. Make sure we emit only comparisons we have;
2820 swap or reverse as necessary. */
2822 if (no_new_pseudos)
2823 return NULL_RTX;
2825 switch (code)
2827 case EQ: case LE: case LT: case LEU: case LTU:
2828 /* We have these compares: */
2829 break;
2831 case NE:
2832 /* This must be reversed. */
2833 code = reverse_condition (code);
2834 cmov_code = EQ;
2835 break;
2837 case GE: case GT: case GEU: case GTU:
2838 /* These must be swapped. */
2839 if (op1 != CONST0_RTX (cmp_mode))
2841 code = swap_condition (code);
2842 tem = op0, op0 = op1, op1 = tem;
2844 break;
2846 default:
2847 abort ();
2850 if (!fp_p)
2852 if (!reg_or_0_operand (op0, DImode))
2853 op0 = force_reg (DImode, op0);
2854 if (!reg_or_8bit_operand (op1, DImode))
2855 op1 = force_reg (DImode, op1);
2858 /* ??? We mark the branch mode to be CCmode to prevent the compare
2859 and cmov from being combined, since the compare insn follows IEEE
2860 rules that the cmov does not. */
2861 if (fp_p && !local_fast_math)
2862 cmov_mode = CCmode;
2864 tem = gen_reg_rtx (cmp_op_mode);
2865 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2866 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2869 /* Simplify a conditional move of two constants into a setcc with
2870 arithmetic. This is done with a splitter since combine would
2871 just undo the work if done during code generation. It also catches
2872 cases we wouldn't have before cse. */
2875 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2876 rtx t_rtx, rtx f_rtx)
2878 HOST_WIDE_INT t, f, diff;
2879 enum machine_mode mode;
2880 rtx target, subtarget, tmp;
2882 mode = GET_MODE (dest);
2883 t = INTVAL (t_rtx);
2884 f = INTVAL (f_rtx);
2885 diff = t - f;
2887 if (((code == NE || code == EQ) && diff < 0)
2888 || (code == GE || code == GT))
2890 code = reverse_condition (code);
2891 diff = t, t = f, f = diff;
2892 diff = t - f;
2895 subtarget = target = dest;
2896 if (mode != DImode)
2898 target = gen_lowpart (DImode, dest);
2899 if (! no_new_pseudos)
2900 subtarget = gen_reg_rtx (DImode);
2901 else
2902 subtarget = target;
2904 /* Below, we must be careful to use copy_rtx on target and subtarget
2905 in intermediate insns, as they may be a subreg rtx, which may not
2906 be shared. */
2908 if (f == 0 && exact_log2 (diff) > 0
2909 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2910 viable over a longer latency cmove. On EV5, the E0 slot is a
2911 scarce resource, and on EV4 shift has the same latency as a cmove. */
2912 && (diff <= 8 || alpha_cpu == PROCESSOR_EV6))
2914 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2915 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2917 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2918 GEN_INT (exact_log2 (t)));
2919 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2921 else if (f == 0 && t == -1)
2923 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2924 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2926 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2928 else if (diff == 1 || diff == 4 || diff == 8)
2930 rtx add_op;
2932 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2933 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2935 if (diff == 1)
2936 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2937 else
2939 add_op = GEN_INT (f);
2940 if (sext_add_operand (add_op, mode))
2942 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2943 GEN_INT (diff));
2944 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2945 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2947 else
2948 return 0;
2951 else
2952 return 0;
2954 return 1;
2957 /* Look up the function X_floating library function name for the
2958 given operation. */
2960 struct xfloating_op GTY(())
2962 const enum rtx_code code;
2963 const char *const GTY((skip)) osf_func;
2964 const char *const GTY((skip)) vms_func;
2965 rtx libcall;
2968 static GTY(()) struct xfloating_op xfloating_ops[] =
2970 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2971 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2972 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2973 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2974 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2975 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2976 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2977 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2978 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2979 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2980 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2981 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2982 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2983 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2984 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2987 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2989 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2990 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2993 static rtx
2994 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2996 struct xfloating_op *ops = xfloating_ops;
2997 long n = ARRAY_SIZE (xfloating_ops);
2998 long i;
3000 /* How irritating. Nothing to key off for the main table. */
3001 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3003 ops = vax_cvt_ops;
3004 n = ARRAY_SIZE (vax_cvt_ops);
3007 for (i = 0; i < n; ++i, ++ops)
3008 if (ops->code == code)
3010 rtx func = ops->libcall;
3011 if (!func)
3013 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3014 ? ops->vms_func : ops->osf_func);
3015 ops->libcall = func;
3017 return func;
3020 abort();
3023 /* Most X_floating operations take the rounding mode as an argument.
3024 Compute that here. */
3026 static int
3027 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3028 enum alpha_fp_rounding_mode round)
3030 int mode;
3032 switch (round)
3034 case ALPHA_FPRM_NORM:
3035 mode = 2;
3036 break;
3037 case ALPHA_FPRM_MINF:
3038 mode = 1;
3039 break;
3040 case ALPHA_FPRM_CHOP:
3041 mode = 0;
3042 break;
3043 case ALPHA_FPRM_DYN:
3044 mode = 4;
3045 break;
3046 default:
3047 abort ();
3049 /* XXX For reference, round to +inf is mode = 3. */
3052 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3053 mode |= 0x10000;
3055 return mode;
3058 /* Emit an X_floating library function call.
3060 Note that these functions do not follow normal calling conventions:
3061 TFmode arguments are passed in two integer registers (as opposed to
3062 indirect); TFmode return values appear in R16+R17.
3064 FUNC is the function to call.
3065 TARGET is where the output belongs.
3066 OPERANDS are the inputs.
3067 NOPERANDS is the count of inputs.
3068 EQUIV is the expression equivalent for the function.
3071 static void
3072 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3073 int noperands, rtx equiv)
3075 rtx usage = NULL_RTX, tmp, reg;
3076 int regno = 16, i;
3078 start_sequence ();
3080 for (i = 0; i < noperands; ++i)
3082 switch (GET_MODE (operands[i]))
3084 case TFmode:
3085 reg = gen_rtx_REG (TFmode, regno);
3086 regno += 2;
3087 break;
3089 case DFmode:
3090 reg = gen_rtx_REG (DFmode, regno + 32);
3091 regno += 1;
3092 break;
3094 case VOIDmode:
3095 if (GET_CODE (operands[i]) != CONST_INT)
3096 abort ();
3097 /* FALLTHRU */
3098 case DImode:
3099 reg = gen_rtx_REG (DImode, regno);
3100 regno += 1;
3101 break;
3103 default:
3104 abort ();
3107 emit_move_insn (reg, operands[i]);
3108 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3111 switch (GET_MODE (target))
3113 case TFmode:
3114 reg = gen_rtx_REG (TFmode, 16);
3115 break;
3116 case DFmode:
3117 reg = gen_rtx_REG (DFmode, 32);
3118 break;
3119 case DImode:
3120 reg = gen_rtx_REG (DImode, 0);
3121 break;
3122 default:
3123 abort ();
3126 tmp = gen_rtx_MEM (QImode, func);
3127 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3128 const0_rtx, const0_rtx));
3129 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3130 CONST_OR_PURE_CALL_P (tmp) = 1;
3132 tmp = get_insns ();
3133 end_sequence ();
3135 emit_libcall_block (tmp, target, reg, equiv);
3138 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3140 void
3141 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3143 rtx func;
3144 int mode;
3145 rtx out_operands[3];
3147 func = alpha_lookup_xfloating_lib_func (code);
3148 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3150 out_operands[0] = operands[1];
3151 out_operands[1] = operands[2];
3152 out_operands[2] = GEN_INT (mode);
3153 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3154 gen_rtx_fmt_ee (code, TFmode, operands[1],
3155 operands[2]));
3158 /* Emit an X_floating library function call for a comparison. */
3160 static rtx
3161 alpha_emit_xfloating_compare (enum rtx_code code, rtx op0, rtx op1)
3163 rtx func;
3164 rtx out, operands[2];
3166 func = alpha_lookup_xfloating_lib_func (code);
3168 operands[0] = op0;
3169 operands[1] = op1;
3170 out = gen_reg_rtx (DImode);
3172 /* ??? Strange mode for equiv because what's actually returned
3173 is -1,0,1, not a proper boolean value. */
3174 alpha_emit_xfloating_libcall (func, out, operands, 2,
3175 gen_rtx_fmt_ee (code, CCmode, op0, op1));
3177 return out;
3180 /* Emit an X_floating library function call for a conversion. */
3182 void
3183 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3185 int noperands = 1, mode;
3186 rtx out_operands[2];
3187 rtx func;
3188 enum rtx_code code = orig_code;
3190 if (code == UNSIGNED_FIX)
3191 code = FIX;
3193 func = alpha_lookup_xfloating_lib_func (code);
3195 out_operands[0] = operands[1];
3197 switch (code)
3199 case FIX:
3200 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3201 out_operands[1] = GEN_INT (mode);
3202 noperands = 2;
3203 break;
3204 case FLOAT_TRUNCATE:
3205 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3206 out_operands[1] = GEN_INT (mode);
3207 noperands = 2;
3208 break;
3209 default:
3210 break;
3213 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3214 gen_rtx_fmt_e (orig_code,
3215 GET_MODE (operands[0]),
3216 operands[1]));
3219 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3220 OP[0] into OP[0,1]. Naturally, output operand ordering is
3221 little-endian. */
3223 void
3224 alpha_split_tfmode_pair (rtx operands[4])
3226 if (GET_CODE (operands[1]) == REG)
3228 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3229 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3231 else if (GET_CODE (operands[1]) == MEM)
3233 operands[3] = adjust_address (operands[1], DImode, 8);
3234 operands[2] = adjust_address (operands[1], DImode, 0);
3236 else if (operands[1] == CONST0_RTX (TFmode))
3237 operands[2] = operands[3] = const0_rtx;
3238 else
3239 abort ();
3241 if (GET_CODE (operands[0]) == REG)
3243 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3244 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3246 else if (GET_CODE (operands[0]) == MEM)
3248 operands[1] = adjust_address (operands[0], DImode, 8);
3249 operands[0] = adjust_address (operands[0], DImode, 0);
3251 else
3252 abort ();
3255 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3256 op2 is a register containing the sign bit, operation is the
3257 logical operation to be performed. */
3259 void
3260 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3262 rtx high_bit = operands[2];
3263 rtx scratch;
3264 int move;
3266 alpha_split_tfmode_pair (operands);
3268 /* Detect three flavors of operand overlap. */
3269 move = 1;
3270 if (rtx_equal_p (operands[0], operands[2]))
3271 move = 0;
3272 else if (rtx_equal_p (operands[1], operands[2]))
3274 if (rtx_equal_p (operands[0], high_bit))
3275 move = 2;
3276 else
3277 move = -1;
3280 if (move < 0)
3281 emit_move_insn (operands[0], operands[2]);
3283 /* ??? If the destination overlaps both source tf and high_bit, then
3284 assume source tf is dead in its entirety and use the other half
3285 for a scratch register. Otherwise "scratch" is just the proper
3286 destination register. */
3287 scratch = operands[move < 2 ? 1 : 3];
3289 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3291 if (move > 0)
3293 emit_move_insn (operands[0], operands[2]);
3294 if (move > 1)
3295 emit_move_insn (operands[1], scratch);
3299 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3300 unaligned data:
3302 unsigned: signed:
3303 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3304 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3305 lda r3,X(r11) lda r3,X+2(r11)
3306 extwl r1,r3,r1 extql r1,r3,r1
3307 extwh r2,r3,r2 extqh r2,r3,r2
3308 or r1.r2.r1 or r1,r2,r1
3309 sra r1,48,r1
3311 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3312 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3313 lda r3,X(r11) lda r3,X(r11)
3314 extll r1,r3,r1 extll r1,r3,r1
3315 extlh r2,r3,r2 extlh r2,r3,r2
3316 or r1.r2.r1 addl r1,r2,r1
3318 quad: ldq_u r1,X(r11)
3319 ldq_u r2,X+7(r11)
3320 lda r3,X(r11)
3321 extql r1,r3,r1
3322 extqh r2,r3,r2
3323 or r1.r2.r1
3326 void
3327 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3328 HOST_WIDE_INT ofs, int sign)
3330 rtx meml, memh, addr, extl, exth, tmp, mema;
3331 enum machine_mode mode;
3333 if (TARGET_BWX && size == 2)
3335 meml = adjust_address (mem, QImode, ofs);
3336 memh = adjust_address (mem, QImode, ofs+1);
3337 if (BYTES_BIG_ENDIAN)
3338 tmp = meml, meml = memh, memh = tmp;
3339 extl = gen_reg_rtx (DImode);
3340 exth = gen_reg_rtx (DImode);
3341 emit_insn (gen_zero_extendqidi2 (extl, meml));
3342 emit_insn (gen_zero_extendqidi2 (exth, memh));
3343 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3344 NULL, 1, OPTAB_LIB_WIDEN);
3345 addr = expand_simple_binop (DImode, IOR, extl, exth,
3346 NULL, 1, OPTAB_LIB_WIDEN);
3348 if (sign && GET_MODE (tgt) != HImode)
3350 addr = gen_lowpart (HImode, addr);
3351 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3353 else
3355 if (GET_MODE (tgt) != DImode)
3356 addr = gen_lowpart (GET_MODE (tgt), addr);
3357 emit_move_insn (tgt, addr);
3359 return;
3362 meml = gen_reg_rtx (DImode);
3363 memh = gen_reg_rtx (DImode);
3364 addr = gen_reg_rtx (DImode);
3365 extl = gen_reg_rtx (DImode);
3366 exth = gen_reg_rtx (DImode);
3368 mema = XEXP (mem, 0);
3369 if (GET_CODE (mema) == LO_SUM)
3370 mema = force_reg (Pmode, mema);
3372 /* AND addresses cannot be in any alias set, since they may implicitly
3373 alias surrounding code. Ideally we'd have some alias set that
3374 covered all types except those with alignment 8 or higher. */
3376 tmp = change_address (mem, DImode,
3377 gen_rtx_AND (DImode,
3378 plus_constant (mema, ofs),
3379 GEN_INT (-8)));
3380 set_mem_alias_set (tmp, 0);
3381 emit_move_insn (meml, tmp);
3383 tmp = change_address (mem, DImode,
3384 gen_rtx_AND (DImode,
3385 plus_constant (mema, ofs + size - 1),
3386 GEN_INT (-8)));
3387 set_mem_alias_set (tmp, 0);
3388 emit_move_insn (memh, tmp);
3390 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3392 emit_move_insn (addr, plus_constant (mema, -1));
3394 emit_insn (gen_extqh_be (extl, meml, addr));
3395 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3397 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3398 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3399 addr, 1, OPTAB_WIDEN);
3401 else if (sign && size == 2)
3403 emit_move_insn (addr, plus_constant (mema, ofs+2));
3405 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3406 emit_insn (gen_extqh_le (exth, memh, addr));
3408 /* We must use tgt here for the target. Alpha-vms port fails if we use
3409 addr for the target, because addr is marked as a pointer and combine
3410 knows that pointers are always sign-extended 32 bit values. */
3411 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3412 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3413 addr, 1, OPTAB_WIDEN);
3415 else
3417 if (WORDS_BIG_ENDIAN)
3419 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3420 switch ((int) size)
3422 case 2:
3423 emit_insn (gen_extwh_be (extl, meml, addr));
3424 mode = HImode;
3425 break;
3427 case 4:
3428 emit_insn (gen_extlh_be (extl, meml, addr));
3429 mode = SImode;
3430 break;
3432 case 8:
3433 emit_insn (gen_extqh_be (extl, meml, addr));
3434 mode = DImode;
3435 break;
3437 default:
3438 abort ();
3440 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3442 else
3444 emit_move_insn (addr, plus_constant (mema, ofs));
3445 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3446 switch ((int) size)
3448 case 2:
3449 emit_insn (gen_extwh_le (exth, memh, addr));
3450 mode = HImode;
3451 break;
3453 case 4:
3454 emit_insn (gen_extlh_le (exth, memh, addr));
3455 mode = SImode;
3456 break;
3458 case 8:
3459 emit_insn (gen_extqh_le (exth, memh, addr));
3460 mode = DImode;
3461 break;
3463 default:
3464 abort();
3468 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3469 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3470 sign, OPTAB_WIDEN);
3473 if (addr != tgt)
3474 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3477 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3479 void
3480 alpha_expand_unaligned_store (rtx dst, rtx src,
3481 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3483 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3485 if (TARGET_BWX && size == 2)
3487 if (src != const0_rtx)
3489 dstl = gen_lowpart (QImode, src);
3490 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3491 NULL, 1, OPTAB_LIB_WIDEN);
3492 dsth = gen_lowpart (QImode, dsth);
3494 else
3495 dstl = dsth = const0_rtx;
3497 meml = adjust_address (dst, QImode, ofs);
3498 memh = adjust_address (dst, QImode, ofs+1);
3499 if (BYTES_BIG_ENDIAN)
3500 addr = meml, meml = memh, memh = addr;
3502 emit_move_insn (meml, dstl);
3503 emit_move_insn (memh, dsth);
3504 return;
3507 dstl = gen_reg_rtx (DImode);
3508 dsth = gen_reg_rtx (DImode);
3509 insl = gen_reg_rtx (DImode);
3510 insh = gen_reg_rtx (DImode);
3512 dsta = XEXP (dst, 0);
3513 if (GET_CODE (dsta) == LO_SUM)
3514 dsta = force_reg (Pmode, dsta);
3516 /* AND addresses cannot be in any alias set, since they may implicitly
3517 alias surrounding code. Ideally we'd have some alias set that
3518 covered all types except those with alignment 8 or higher. */
3520 meml = change_address (dst, DImode,
3521 gen_rtx_AND (DImode,
3522 plus_constant (dsta, ofs),
3523 GEN_INT (-8)));
3524 set_mem_alias_set (meml, 0);
3526 memh = change_address (dst, DImode,
3527 gen_rtx_AND (DImode,
3528 plus_constant (dsta, ofs + size - 1),
3529 GEN_INT (-8)));
3530 set_mem_alias_set (memh, 0);
3532 emit_move_insn (dsth, memh);
3533 emit_move_insn (dstl, meml);
3534 if (WORDS_BIG_ENDIAN)
3536 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3538 if (src != const0_rtx)
3540 switch ((int) size)
3542 case 2:
3543 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3544 break;
3545 case 4:
3546 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3547 break;
3548 case 8:
3549 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3550 break;
3552 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3553 GEN_INT (size*8), addr));
3556 switch ((int) size)
3558 case 2:
3559 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3560 break;
3561 case 4:
3563 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3564 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3565 break;
3567 case 8:
3568 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3569 break;
3572 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3574 else
3576 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3578 if (src != CONST0_RTX (GET_MODE (src)))
3580 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3581 GEN_INT (size*8), addr));
3583 switch ((int) size)
3585 case 2:
3586 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3587 break;
3588 case 4:
3589 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3590 break;
3591 case 8:
3592 emit_insn (gen_insql_le (insl, src, addr));
3593 break;
3597 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3599 switch ((int) size)
3601 case 2:
3602 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3603 break;
3604 case 4:
3606 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3607 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3608 break;
3610 case 8:
3611 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3612 break;
3616 if (src != CONST0_RTX (GET_MODE (src)))
3618 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3619 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3622 if (WORDS_BIG_ENDIAN)
3624 emit_move_insn (meml, dstl);
3625 emit_move_insn (memh, dsth);
3627 else
3629 /* Must store high before low for degenerate case of aligned. */
3630 emit_move_insn (memh, dsth);
3631 emit_move_insn (meml, dstl);
3635 /* The block move code tries to maximize speed by separating loads and
3636 stores at the expense of register pressure: we load all of the data
3637 before we store it back out. There are two secondary effects worth
3638 mentioning, that this speeds copying to/from aligned and unaligned
3639 buffers, and that it makes the code significantly easier to write. */
3641 #define MAX_MOVE_WORDS 8
3643 /* Load an integral number of consecutive unaligned quadwords. */
3645 static void
3646 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3647 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3649 rtx const im8 = GEN_INT (-8);
3650 rtx const i64 = GEN_INT (64);
3651 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3652 rtx sreg, areg, tmp, smema;
3653 HOST_WIDE_INT i;
3655 smema = XEXP (smem, 0);
3656 if (GET_CODE (smema) == LO_SUM)
3657 smema = force_reg (Pmode, smema);
3659 /* Generate all the tmp registers we need. */
3660 for (i = 0; i < words; ++i)
3662 data_regs[i] = out_regs[i];
3663 ext_tmps[i] = gen_reg_rtx (DImode);
3665 data_regs[words] = gen_reg_rtx (DImode);
3667 if (ofs != 0)
3668 smem = adjust_address (smem, GET_MODE (smem), ofs);
3670 /* Load up all of the source data. */
3671 for (i = 0; i < words; ++i)
3673 tmp = change_address (smem, DImode,
3674 gen_rtx_AND (DImode,
3675 plus_constant (smema, 8*i),
3676 im8));
3677 set_mem_alias_set (tmp, 0);
3678 emit_move_insn (data_regs[i], tmp);
3681 tmp = change_address (smem, DImode,
3682 gen_rtx_AND (DImode,
3683 plus_constant (smema, 8*words - 1),
3684 im8));
3685 set_mem_alias_set (tmp, 0);
3686 emit_move_insn (data_regs[words], tmp);
3688 /* Extract the half-word fragments. Unfortunately DEC decided to make
3689 extxh with offset zero a noop instead of zeroing the register, so
3690 we must take care of that edge condition ourselves with cmov. */
3692 sreg = copy_addr_to_reg (smema);
3693 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3694 1, OPTAB_WIDEN);
3695 if (WORDS_BIG_ENDIAN)
3696 emit_move_insn (sreg, plus_constant (sreg, 7));
3697 for (i = 0; i < words; ++i)
3699 if (WORDS_BIG_ENDIAN)
3701 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3702 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3704 else
3706 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3707 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3709 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3710 gen_rtx_IF_THEN_ELSE (DImode,
3711 gen_rtx_EQ (DImode, areg,
3712 const0_rtx),
3713 const0_rtx, ext_tmps[i])));
3716 /* Merge the half-words into whole words. */
3717 for (i = 0; i < words; ++i)
3719 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3720 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3724 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3725 may be NULL to store zeros. */
3727 static void
3728 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3729 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3731 rtx const im8 = GEN_INT (-8);
3732 rtx const i64 = GEN_INT (64);
3733 rtx ins_tmps[MAX_MOVE_WORDS];
3734 rtx st_tmp_1, st_tmp_2, dreg;
3735 rtx st_addr_1, st_addr_2, dmema;
3736 HOST_WIDE_INT i;
3738 dmema = XEXP (dmem, 0);
3739 if (GET_CODE (dmema) == LO_SUM)
3740 dmema = force_reg (Pmode, dmema);
3742 /* Generate all the tmp registers we need. */
3743 if (data_regs != NULL)
3744 for (i = 0; i < words; ++i)
3745 ins_tmps[i] = gen_reg_rtx(DImode);
3746 st_tmp_1 = gen_reg_rtx(DImode);
3747 st_tmp_2 = gen_reg_rtx(DImode);
3749 if (ofs != 0)
3750 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3752 st_addr_2 = change_address (dmem, DImode,
3753 gen_rtx_AND (DImode,
3754 plus_constant (dmema, words*8 - 1),
3755 im8));
3756 set_mem_alias_set (st_addr_2, 0);
3758 st_addr_1 = change_address (dmem, DImode,
3759 gen_rtx_AND (DImode, dmema, im8));
3760 set_mem_alias_set (st_addr_1, 0);
3762 /* Load up the destination end bits. */
3763 emit_move_insn (st_tmp_2, st_addr_2);
3764 emit_move_insn (st_tmp_1, st_addr_1);
3766 /* Shift the input data into place. */
3767 dreg = copy_addr_to_reg (dmema);
3768 if (WORDS_BIG_ENDIAN)
3769 emit_move_insn (dreg, plus_constant (dreg, 7));
3770 if (data_regs != NULL)
3772 for (i = words-1; i >= 0; --i)
3774 if (WORDS_BIG_ENDIAN)
3776 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3777 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3779 else
3781 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3782 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3785 for (i = words-1; i > 0; --i)
3787 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3788 ins_tmps[i-1], ins_tmps[i-1], 1,
3789 OPTAB_WIDEN);
3793 /* Split and merge the ends with the destination data. */
3794 if (WORDS_BIG_ENDIAN)
3796 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3797 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3799 else
3801 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3802 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3805 if (data_regs != NULL)
3807 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3808 st_tmp_2, 1, OPTAB_WIDEN);
3809 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3810 st_tmp_1, 1, OPTAB_WIDEN);
3813 /* Store it all. */
3814 if (WORDS_BIG_ENDIAN)
3815 emit_move_insn (st_addr_1, st_tmp_1);
3816 else
3817 emit_move_insn (st_addr_2, st_tmp_2);
3818 for (i = words-1; i > 0; --i)
3820 rtx tmp = change_address (dmem, DImode,
3821 gen_rtx_AND (DImode,
3822 plus_constant(dmema,
3823 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3824 im8));
3825 set_mem_alias_set (tmp, 0);
3826 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3828 if (WORDS_BIG_ENDIAN)
3829 emit_move_insn (st_addr_2, st_tmp_2);
3830 else
3831 emit_move_insn (st_addr_1, st_tmp_1);
3835 /* Expand string/block move operations.
3837 operands[0] is the pointer to the destination.
3838 operands[1] is the pointer to the source.
3839 operands[2] is the number of bytes to move.
3840 operands[3] is the alignment. */
3843 alpha_expand_block_move (rtx operands[])
3845 rtx bytes_rtx = operands[2];
3846 rtx align_rtx = operands[3];
3847 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3848 HOST_WIDE_INT bytes = orig_bytes;
3849 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3850 HOST_WIDE_INT dst_align = src_align;
3851 rtx orig_src = operands[1];
3852 rtx orig_dst = operands[0];
3853 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3854 rtx tmp;
3855 unsigned int i, words, ofs, nregs = 0;
3857 if (orig_bytes <= 0)
3858 return 1;
3859 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3860 return 0;
3862 /* Look for additional alignment information from recorded register info. */
3864 tmp = XEXP (orig_src, 0);
3865 if (GET_CODE (tmp) == REG)
3866 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3867 else if (GET_CODE (tmp) == PLUS
3868 && GET_CODE (XEXP (tmp, 0)) == REG
3869 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3871 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3872 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3874 if (a > src_align)
3876 if (a >= 64 && c % 8 == 0)
3877 src_align = 64;
3878 else if (a >= 32 && c % 4 == 0)
3879 src_align = 32;
3880 else if (a >= 16 && c % 2 == 0)
3881 src_align = 16;
3885 tmp = XEXP (orig_dst, 0);
3886 if (GET_CODE (tmp) == REG)
3887 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3888 else if (GET_CODE (tmp) == PLUS
3889 && GET_CODE (XEXP (tmp, 0)) == REG
3890 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3892 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3893 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3895 if (a > dst_align)
3897 if (a >= 64 && c % 8 == 0)
3898 dst_align = 64;
3899 else if (a >= 32 && c % 4 == 0)
3900 dst_align = 32;
3901 else if (a >= 16 && c % 2 == 0)
3902 dst_align = 16;
3906 ofs = 0;
3907 if (src_align >= 64 && bytes >= 8)
3909 words = bytes / 8;
3911 for (i = 0; i < words; ++i)
3912 data_regs[nregs + i] = gen_reg_rtx (DImode);
3914 for (i = 0; i < words; ++i)
3915 emit_move_insn (data_regs[nregs + i],
3916 adjust_address (orig_src, DImode, ofs + i * 8));
3918 nregs += words;
3919 bytes -= words * 8;
3920 ofs += words * 8;
3923 if (src_align >= 32 && bytes >= 4)
3925 words = bytes / 4;
3927 for (i = 0; i < words; ++i)
3928 data_regs[nregs + i] = gen_reg_rtx (SImode);
3930 for (i = 0; i < words; ++i)
3931 emit_move_insn (data_regs[nregs + i],
3932 adjust_address (orig_src, SImode, ofs + i * 4));
3934 nregs += words;
3935 bytes -= words * 4;
3936 ofs += words * 4;
3939 if (bytes >= 8)
3941 words = bytes / 8;
3943 for (i = 0; i < words+1; ++i)
3944 data_regs[nregs + i] = gen_reg_rtx (DImode);
3946 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3947 words, ofs);
3949 nregs += words;
3950 bytes -= words * 8;
3951 ofs += words * 8;
3954 if (! TARGET_BWX && bytes >= 4)
3956 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3957 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3958 bytes -= 4;
3959 ofs += 4;
3962 if (bytes >= 2)
3964 if (src_align >= 16)
3966 do {
3967 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3968 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3969 bytes -= 2;
3970 ofs += 2;
3971 } while (bytes >= 2);
3973 else if (! TARGET_BWX)
3975 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3976 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3977 bytes -= 2;
3978 ofs += 2;
3982 while (bytes > 0)
3984 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3985 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3986 bytes -= 1;
3987 ofs += 1;
3990 if (nregs > ARRAY_SIZE (data_regs))
3991 abort ();
3993 /* Now save it back out again. */
3995 i = 0, ofs = 0;
3997 /* Write out the data in whatever chunks reading the source allowed. */
3998 if (dst_align >= 64)
4000 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4002 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4003 data_regs[i]);
4004 ofs += 8;
4005 i++;
4009 if (dst_align >= 32)
4011 /* If the source has remaining DImode regs, write them out in
4012 two pieces. */
4013 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4015 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4016 NULL_RTX, 1, OPTAB_WIDEN);
4018 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4019 gen_lowpart (SImode, data_regs[i]));
4020 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4021 gen_lowpart (SImode, tmp));
4022 ofs += 8;
4023 i++;
4026 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4028 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4029 data_regs[i]);
4030 ofs += 4;
4031 i++;
4035 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4037 /* Write out a remaining block of words using unaligned methods. */
4039 for (words = 1; i + words < nregs; words++)
4040 if (GET_MODE (data_regs[i + words]) != DImode)
4041 break;
4043 if (words == 1)
4044 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4045 else
4046 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4047 words, ofs);
4049 i += words;
4050 ofs += words * 8;
4053 /* Due to the above, this won't be aligned. */
4054 /* ??? If we have more than one of these, consider constructing full
4055 words in registers and using alpha_expand_unaligned_store_words. */
4056 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4058 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4059 ofs += 4;
4060 i++;
4063 if (dst_align >= 16)
4064 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4066 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4067 i++;
4068 ofs += 2;
4070 else
4071 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4073 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4074 i++;
4075 ofs += 2;
4078 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
4080 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4081 i++;
4082 ofs += 1;
4085 if (i != nregs)
4086 abort ();
4088 return 1;
4092 alpha_expand_block_clear (rtx operands[])
4094 rtx bytes_rtx = operands[1];
4095 rtx align_rtx = operands[2];
4096 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4097 HOST_WIDE_INT bytes = orig_bytes;
4098 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4099 HOST_WIDE_INT alignofs = 0;
4100 rtx orig_dst = operands[0];
4101 rtx tmp;
4102 int i, words, ofs = 0;
4104 if (orig_bytes <= 0)
4105 return 1;
4106 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4107 return 0;
4109 /* Look for stricter alignment. */
4110 tmp = XEXP (orig_dst, 0);
4111 if (GET_CODE (tmp) == REG)
4112 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4113 else if (GET_CODE (tmp) == PLUS
4114 && GET_CODE (XEXP (tmp, 0)) == REG
4115 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4117 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4118 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4120 if (a > align)
4122 if (a >= 64)
4123 align = a, alignofs = 8 - c % 8;
4124 else if (a >= 32)
4125 align = a, alignofs = 4 - c % 4;
4126 else if (a >= 16)
4127 align = a, alignofs = 2 - c % 2;
4131 /* Handle an unaligned prefix first. */
4133 if (alignofs > 0)
4135 #if HOST_BITS_PER_WIDE_INT >= 64
4136 /* Given that alignofs is bounded by align, the only time BWX could
4137 generate three stores is for a 7 byte fill. Prefer two individual
4138 stores over a load/mask/store sequence. */
4139 if ((!TARGET_BWX || alignofs == 7)
4140 && align >= 32
4141 && !(alignofs == 4 && bytes >= 4))
4143 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4144 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4145 rtx mem, tmp;
4146 HOST_WIDE_INT mask;
4148 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4149 set_mem_alias_set (mem, 0);
4151 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4152 if (bytes < alignofs)
4154 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4155 ofs += bytes;
4156 bytes = 0;
4158 else
4160 bytes -= alignofs;
4161 ofs += alignofs;
4163 alignofs = 0;
4165 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4166 NULL_RTX, 1, OPTAB_WIDEN);
4168 emit_move_insn (mem, tmp);
4170 #endif
4172 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4174 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4175 bytes -= 1;
4176 ofs += 1;
4177 alignofs -= 1;
4179 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4181 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4182 bytes -= 2;
4183 ofs += 2;
4184 alignofs -= 2;
4186 if (alignofs == 4 && bytes >= 4)
4188 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4189 bytes -= 4;
4190 ofs += 4;
4191 alignofs = 0;
4194 /* If we've not used the extra lead alignment information by now,
4195 we won't be able to. Downgrade align to match what's left over. */
4196 if (alignofs > 0)
4198 alignofs = alignofs & -alignofs;
4199 align = MIN (align, alignofs * BITS_PER_UNIT);
4203 /* Handle a block of contiguous long-words. */
4205 if (align >= 64 && bytes >= 8)
4207 words = bytes / 8;
4209 for (i = 0; i < words; ++i)
4210 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4211 const0_rtx);
4213 bytes -= words * 8;
4214 ofs += words * 8;
4217 /* If the block is large and appropriately aligned, emit a single
4218 store followed by a sequence of stq_u insns. */
4220 if (align >= 32 && bytes > 16)
4222 rtx orig_dsta;
4224 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4225 bytes -= 4;
4226 ofs += 4;
4228 orig_dsta = XEXP (orig_dst, 0);
4229 if (GET_CODE (orig_dsta) == LO_SUM)
4230 orig_dsta = force_reg (Pmode, orig_dsta);
4232 words = bytes / 8;
4233 for (i = 0; i < words; ++i)
4235 rtx mem
4236 = change_address (orig_dst, DImode,
4237 gen_rtx_AND (DImode,
4238 plus_constant (orig_dsta, ofs + i*8),
4239 GEN_INT (-8)));
4240 set_mem_alias_set (mem, 0);
4241 emit_move_insn (mem, const0_rtx);
4244 /* Depending on the alignment, the first stq_u may have overlapped
4245 with the initial stl, which means that the last stq_u didn't
4246 write as much as it would appear. Leave those questionable bytes
4247 unaccounted for. */
4248 bytes -= words * 8 - 4;
4249 ofs += words * 8 - 4;
4252 /* Handle a smaller block of aligned words. */
4254 if ((align >= 64 && bytes == 4)
4255 || (align == 32 && bytes >= 4))
4257 words = bytes / 4;
4259 for (i = 0; i < words; ++i)
4260 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4261 const0_rtx);
4263 bytes -= words * 4;
4264 ofs += words * 4;
4267 /* An unaligned block uses stq_u stores for as many as possible. */
4269 if (bytes >= 8)
4271 words = bytes / 8;
4273 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4275 bytes -= words * 8;
4276 ofs += words * 8;
4279 /* Next clean up any trailing pieces. */
4281 #if HOST_BITS_PER_WIDE_INT >= 64
4282 /* Count the number of bits in BYTES for which aligned stores could
4283 be emitted. */
4284 words = 0;
4285 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4286 if (bytes & i)
4287 words += 1;
4289 /* If we have appropriate alignment (and it wouldn't take too many
4290 instructions otherwise), mask out the bytes we need. */
4291 if (TARGET_BWX ? words > 2 : bytes > 0)
4293 if (align >= 64)
4295 rtx mem, tmp;
4296 HOST_WIDE_INT mask;
4298 mem = adjust_address (orig_dst, DImode, ofs);
4299 set_mem_alias_set (mem, 0);
4301 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4303 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4304 NULL_RTX, 1, OPTAB_WIDEN);
4306 emit_move_insn (mem, tmp);
4307 return 1;
4309 else if (align >= 32 && bytes < 4)
4311 rtx mem, tmp;
4312 HOST_WIDE_INT mask;
4314 mem = adjust_address (orig_dst, SImode, ofs);
4315 set_mem_alias_set (mem, 0);
4317 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4319 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4320 NULL_RTX, 1, OPTAB_WIDEN);
4322 emit_move_insn (mem, tmp);
4323 return 1;
4326 #endif
4328 if (!TARGET_BWX && bytes >= 4)
4330 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4331 bytes -= 4;
4332 ofs += 4;
4335 if (bytes >= 2)
4337 if (align >= 16)
4339 do {
4340 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4341 const0_rtx);
4342 bytes -= 2;
4343 ofs += 2;
4344 } while (bytes >= 2);
4346 else if (! TARGET_BWX)
4348 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4349 bytes -= 2;
4350 ofs += 2;
4354 while (bytes > 0)
4356 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4357 bytes -= 1;
4358 ofs += 1;
4361 return 1;
4364 /* Returns a mask so that zap(x, value) == x & mask. */
4367 alpha_expand_zap_mask (HOST_WIDE_INT value)
4369 rtx result;
4370 int i;
4372 if (HOST_BITS_PER_WIDE_INT >= 64)
4374 HOST_WIDE_INT mask = 0;
4376 for (i = 7; i >= 0; --i)
4378 mask <<= 8;
4379 if (!((value >> i) & 1))
4380 mask |= 0xff;
4383 result = gen_int_mode (mask, DImode);
4385 else if (HOST_BITS_PER_WIDE_INT == 32)
4387 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4389 for (i = 7; i >= 4; --i)
4391 mask_hi <<= 8;
4392 if (!((value >> i) & 1))
4393 mask_hi |= 0xff;
4396 for (i = 3; i >= 0; --i)
4398 mask_lo <<= 8;
4399 if (!((value >> i) & 1))
4400 mask_lo |= 0xff;
4403 result = immed_double_const (mask_lo, mask_hi, DImode);
4405 else
4406 abort ();
4408 return result;
4411 void
4412 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4413 enum machine_mode mode,
4414 rtx op0, rtx op1, rtx op2)
4416 op0 = gen_lowpart (mode, op0);
4418 if (op1 == const0_rtx)
4419 op1 = CONST0_RTX (mode);
4420 else
4421 op1 = gen_lowpart (mode, op1);
4423 if (op2 == const0_rtx)
4424 op2 = CONST0_RTX (mode);
4425 else
4426 op2 = gen_lowpart (mode, op2);
4428 emit_insn ((*gen) (op0, op1, op2));
4431 /* Adjust the cost of a scheduling dependency. Return the new cost of
4432 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4434 static int
4435 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4437 enum attr_type insn_type, dep_insn_type;
4439 /* If the dependence is an anti-dependence, there is no cost. For an
4440 output dependence, there is sometimes a cost, but it doesn't seem
4441 worth handling those few cases. */
4442 if (REG_NOTE_KIND (link) != 0)
4443 return cost;
4445 /* If we can't recognize the insns, we can't really do anything. */
4446 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4447 return cost;
4449 insn_type = get_attr_type (insn);
4450 dep_insn_type = get_attr_type (dep_insn);
4452 /* Bring in the user-defined memory latency. */
4453 if (dep_insn_type == TYPE_ILD
4454 || dep_insn_type == TYPE_FLD
4455 || dep_insn_type == TYPE_LDSYM)
4456 cost += alpha_memory_latency-1;
4458 /* Everything else handled in DFA bypasses now. */
4460 return cost;
4463 /* The number of instructions that can be issued per cycle. */
4465 static int
4466 alpha_issue_rate (void)
4468 return (alpha_cpu == PROCESSOR_EV4 ? 2 : 4);
4471 /* How many alternative schedules to try. This should be as wide as the
4472 scheduling freedom in the DFA, but no wider. Making this value too
4473 large results extra work for the scheduler.
4475 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4476 alternative schedules. For EV5, we can choose between E0/E1 and
4477 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4479 static int
4480 alpha_multipass_dfa_lookahead (void)
4482 return (alpha_cpu == PROCESSOR_EV6 ? 4 : 2);
4485 /* Machine-specific function data. */
4487 struct machine_function GTY(())
4489 /* For unicosmk. */
4490 /* List of call information words for calls from this function. */
4491 struct rtx_def *first_ciw;
4492 struct rtx_def *last_ciw;
4493 int ciw_count;
4495 /* List of deferred case vectors. */
4496 struct rtx_def *addr_list;
4498 /* For OSF. */
4499 const char *some_ld_name;
4501 /* For TARGET_LD_BUGGY_LDGP. */
4502 struct rtx_def *gp_save_rtx;
4505 /* How to allocate a 'struct machine_function'. */
4507 static struct machine_function *
4508 alpha_init_machine_status (void)
4510 return ((struct machine_function *)
4511 ggc_alloc_cleared (sizeof (struct machine_function)));
4514 /* Functions to save and restore alpha_return_addr_rtx. */
4516 /* Start the ball rolling with RETURN_ADDR_RTX. */
4519 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4521 if (count != 0)
4522 return const0_rtx;
4524 return get_hard_reg_initial_val (Pmode, REG_RA);
4527 /* Return or create a memory slot containing the gp value for the current
4528 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4531 alpha_gp_save_rtx (void)
4533 rtx seq, m = cfun->machine->gp_save_rtx;
4535 if (m == NULL)
4537 start_sequence ();
4539 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4540 m = validize_mem (m);
4541 emit_move_insn (m, pic_offset_table_rtx);
4543 seq = get_insns ();
4544 end_sequence ();
4545 emit_insn_after (seq, entry_of_function ());
4547 cfun->machine->gp_save_rtx = m;
4550 return m;
4553 static int
4554 alpha_ra_ever_killed (void)
4556 rtx top;
4558 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4559 return regs_ever_live[REG_RA];
4561 push_topmost_sequence ();
4562 top = get_insns ();
4563 pop_topmost_sequence ();
4565 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4569 /* Return the trap mode suffix applicable to the current
4570 instruction, or NULL. */
4572 static const char *
4573 get_trap_mode_suffix (void)
4575 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4577 switch (s)
4579 case TRAP_SUFFIX_NONE:
4580 return NULL;
4582 case TRAP_SUFFIX_SU:
4583 if (alpha_fptm >= ALPHA_FPTM_SU)
4584 return "su";
4585 return NULL;
4587 case TRAP_SUFFIX_SUI:
4588 if (alpha_fptm >= ALPHA_FPTM_SUI)
4589 return "sui";
4590 return NULL;
4592 case TRAP_SUFFIX_V_SV:
4593 switch (alpha_fptm)
4595 case ALPHA_FPTM_N:
4596 return NULL;
4597 case ALPHA_FPTM_U:
4598 return "v";
4599 case ALPHA_FPTM_SU:
4600 case ALPHA_FPTM_SUI:
4601 return "sv";
4603 break;
4605 case TRAP_SUFFIX_V_SV_SVI:
4606 switch (alpha_fptm)
4608 case ALPHA_FPTM_N:
4609 return NULL;
4610 case ALPHA_FPTM_U:
4611 return "v";
4612 case ALPHA_FPTM_SU:
4613 return "sv";
4614 case ALPHA_FPTM_SUI:
4615 return "svi";
4617 break;
4619 case TRAP_SUFFIX_U_SU_SUI:
4620 switch (alpha_fptm)
4622 case ALPHA_FPTM_N:
4623 return NULL;
4624 case ALPHA_FPTM_U:
4625 return "u";
4626 case ALPHA_FPTM_SU:
4627 return "su";
4628 case ALPHA_FPTM_SUI:
4629 return "sui";
4631 break;
4633 abort ();
4636 /* Return the rounding mode suffix applicable to the current
4637 instruction, or NULL. */
4639 static const char *
4640 get_round_mode_suffix (void)
4642 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4644 switch (s)
4646 case ROUND_SUFFIX_NONE:
4647 return NULL;
4648 case ROUND_SUFFIX_NORMAL:
4649 switch (alpha_fprm)
4651 case ALPHA_FPRM_NORM:
4652 return NULL;
4653 case ALPHA_FPRM_MINF:
4654 return "m";
4655 case ALPHA_FPRM_CHOP:
4656 return "c";
4657 case ALPHA_FPRM_DYN:
4658 return "d";
4660 break;
4662 case ROUND_SUFFIX_C:
4663 return "c";
4665 abort ();
4668 /* Locate some local-dynamic symbol still in use by this function
4669 so that we can print its name in some movdi_er_tlsldm pattern. */
4671 static int
4672 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4674 rtx x = *px;
4676 if (GET_CODE (x) == SYMBOL_REF
4677 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4679 cfun->machine->some_ld_name = XSTR (x, 0);
4680 return 1;
4683 return 0;
4686 static const char *
4687 get_some_local_dynamic_name (void)
4689 rtx insn;
4691 if (cfun->machine->some_ld_name)
4692 return cfun->machine->some_ld_name;
4694 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4695 if (INSN_P (insn)
4696 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4697 return cfun->machine->some_ld_name;
4699 abort ();
4702 /* Print an operand. Recognize special options, documented below. */
4704 void
4705 print_operand (FILE *file, rtx x, int code)
4707 int i;
4709 switch (code)
4711 case '~':
4712 /* Print the assembler name of the current function. */
4713 assemble_name (file, alpha_fnname);
4714 break;
4716 case '&':
4717 assemble_name (file, get_some_local_dynamic_name ());
4718 break;
4720 case '/':
4722 const char *trap = get_trap_mode_suffix ();
4723 const char *round = get_round_mode_suffix ();
4725 if (trap || round)
4726 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4727 (trap ? trap : ""), (round ? round : ""));
4728 break;
4731 case ',':
4732 /* Generates single precision instruction suffix. */
4733 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4734 break;
4736 case '-':
4737 /* Generates double precision instruction suffix. */
4738 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4739 break;
4741 case '+':
4742 /* Generates a nop after a noreturn call at the very end of the
4743 function. */
4744 if (next_real_insn (current_output_insn) == 0)
4745 fprintf (file, "\n\tnop");
4746 break;
4748 case '#':
4749 if (alpha_this_literal_sequence_number == 0)
4750 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4751 fprintf (file, "%d", alpha_this_literal_sequence_number);
4752 break;
4754 case '*':
4755 if (alpha_this_gpdisp_sequence_number == 0)
4756 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
4757 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
4758 break;
4760 case 'H':
4761 if (GET_CODE (x) == HIGH)
4762 output_addr_const (file, XEXP (x, 0));
4763 else
4764 output_operand_lossage ("invalid %%H value");
4765 break;
4767 case 'J':
4769 const char *lituse;
4771 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
4773 x = XVECEXP (x, 0, 0);
4774 lituse = "lituse_tlsgd";
4776 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
4778 x = XVECEXP (x, 0, 0);
4779 lituse = "lituse_tlsldm";
4781 else if (GET_CODE (x) == CONST_INT)
4782 lituse = "lituse_jsr";
4783 else
4785 output_operand_lossage ("invalid %%J value");
4786 break;
4789 if (x != const0_rtx)
4790 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4792 break;
4794 case 'r':
4795 /* If this operand is the constant zero, write it as "$31". */
4796 if (GET_CODE (x) == REG)
4797 fprintf (file, "%s", reg_names[REGNO (x)]);
4798 else if (x == CONST0_RTX (GET_MODE (x)))
4799 fprintf (file, "$31");
4800 else
4801 output_operand_lossage ("invalid %%r value");
4802 break;
4804 case 'R':
4805 /* Similar, but for floating-point. */
4806 if (GET_CODE (x) == REG)
4807 fprintf (file, "%s", reg_names[REGNO (x)]);
4808 else if (x == CONST0_RTX (GET_MODE (x)))
4809 fprintf (file, "$f31");
4810 else
4811 output_operand_lossage ("invalid %%R value");
4812 break;
4814 case 'N':
4815 /* Write the 1's complement of a constant. */
4816 if (GET_CODE (x) != CONST_INT)
4817 output_operand_lossage ("invalid %%N value");
4819 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
4820 break;
4822 case 'P':
4823 /* Write 1 << C, for a constant C. */
4824 if (GET_CODE (x) != CONST_INT)
4825 output_operand_lossage ("invalid %%P value");
4827 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
4828 break;
4830 case 'h':
4831 /* Write the high-order 16 bits of a constant, sign-extended. */
4832 if (GET_CODE (x) != CONST_INT)
4833 output_operand_lossage ("invalid %%h value");
4835 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
4836 break;
4838 case 'L':
4839 /* Write the low-order 16 bits of a constant, sign-extended. */
4840 if (GET_CODE (x) != CONST_INT)
4841 output_operand_lossage ("invalid %%L value");
4843 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4844 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
4845 break;
4847 case 'm':
4848 /* Write mask for ZAP insn. */
4849 if (GET_CODE (x) == CONST_DOUBLE)
4851 HOST_WIDE_INT mask = 0;
4852 HOST_WIDE_INT value;
4854 value = CONST_DOUBLE_LOW (x);
4855 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
4856 i++, value >>= 8)
4857 if (value & 0xff)
4858 mask |= (1 << i);
4860 value = CONST_DOUBLE_HIGH (x);
4861 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
4862 i++, value >>= 8)
4863 if (value & 0xff)
4864 mask |= (1 << (i + sizeof (int)));
4866 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
4869 else if (GET_CODE (x) == CONST_INT)
4871 HOST_WIDE_INT mask = 0, value = INTVAL (x);
4873 for (i = 0; i < 8; i++, value >>= 8)
4874 if (value & 0xff)
4875 mask |= (1 << i);
4877 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
4879 else
4880 output_operand_lossage ("invalid %%m value");
4881 break;
4883 case 'M':
4884 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
4885 if (GET_CODE (x) != CONST_INT
4886 || (INTVAL (x) != 8 && INTVAL (x) != 16
4887 && INTVAL (x) != 32 && INTVAL (x) != 64))
4888 output_operand_lossage ("invalid %%M value");
4890 fprintf (file, "%s",
4891 (INTVAL (x) == 8 ? "b"
4892 : INTVAL (x) == 16 ? "w"
4893 : INTVAL (x) == 32 ? "l"
4894 : "q"));
4895 break;
4897 case 'U':
4898 /* Similar, except do it from the mask. */
4899 if (GET_CODE (x) == CONST_INT)
4901 HOST_WIDE_INT value = INTVAL (x);
4903 if (value == 0xff)
4905 fputc ('b', file);
4906 break;
4908 if (value == 0xffff)
4910 fputc ('w', file);
4911 break;
4913 if (value == 0xffffffff)
4915 fputc ('l', file);
4916 break;
4918 if (value == -1)
4920 fputc ('q', file);
4921 break;
4924 else if (HOST_BITS_PER_WIDE_INT == 32
4925 && GET_CODE (x) == CONST_DOUBLE
4926 && CONST_DOUBLE_LOW (x) == 0xffffffff
4927 && CONST_DOUBLE_HIGH (x) == 0)
4929 fputc ('l', file);
4930 break;
4932 output_operand_lossage ("invalid %%U value");
4933 break;
4935 case 's':
4936 /* Write the constant value divided by 8 for little-endian mode or
4937 (56 - value) / 8 for big-endian mode. */
4939 if (GET_CODE (x) != CONST_INT
4940 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
4941 ? 56
4942 : 64)
4943 || (INTVAL (x) & 7) != 0)
4944 output_operand_lossage ("invalid %%s value");
4946 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4947 WORDS_BIG_ENDIAN
4948 ? (56 - INTVAL (x)) / 8
4949 : INTVAL (x) / 8);
4950 break;
4952 case 'S':
4953 /* Same, except compute (64 - c) / 8 */
4955 if (GET_CODE (x) != CONST_INT
4956 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
4957 && (INTVAL (x) & 7) != 8)
4958 output_operand_lossage ("invalid %%s value");
4960 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
4961 break;
4963 case 't':
4965 /* On Unicos/Mk systems: use a DEX expression if the symbol
4966 clashes with a register name. */
4967 int dex = unicosmk_need_dex (x);
4968 if (dex)
4969 fprintf (file, "DEX(%d)", dex);
4970 else
4971 output_addr_const (file, x);
4973 break;
4975 case 'C': case 'D': case 'c': case 'd':
4976 /* Write out comparison name. */
4978 enum rtx_code c = GET_CODE (x);
4980 if (!COMPARISON_P (x))
4981 output_operand_lossage ("invalid %%C value");
4983 else if (code == 'D')
4984 c = reverse_condition (c);
4985 else if (code == 'c')
4986 c = swap_condition (c);
4987 else if (code == 'd')
4988 c = swap_condition (reverse_condition (c));
4990 if (c == LEU)
4991 fprintf (file, "ule");
4992 else if (c == LTU)
4993 fprintf (file, "ult");
4994 else if (c == UNORDERED)
4995 fprintf (file, "un");
4996 else
4997 fprintf (file, "%s", GET_RTX_NAME (c));
4999 break;
5001 case 'E':
5002 /* Write the divide or modulus operator. */
5003 switch (GET_CODE (x))
5005 case DIV:
5006 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5007 break;
5008 case UDIV:
5009 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5010 break;
5011 case MOD:
5012 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5013 break;
5014 case UMOD:
5015 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5016 break;
5017 default:
5018 output_operand_lossage ("invalid %%E value");
5019 break;
5021 break;
5023 case 'A':
5024 /* Write "_u" for unaligned access. */
5025 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5026 fprintf (file, "_u");
5027 break;
5029 case 0:
5030 if (GET_CODE (x) == REG)
5031 fprintf (file, "%s", reg_names[REGNO (x)]);
5032 else if (GET_CODE (x) == MEM)
5033 output_address (XEXP (x, 0));
5034 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5036 switch (XINT (XEXP (x, 0), 1))
5038 case UNSPEC_DTPREL:
5039 case UNSPEC_TPREL:
5040 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5041 break;
5042 default:
5043 output_operand_lossage ("unknown relocation unspec");
5044 break;
5047 else
5048 output_addr_const (file, x);
5049 break;
5051 default:
5052 output_operand_lossage ("invalid %%xn code");
5056 void
5057 print_operand_address (FILE *file, rtx addr)
5059 int basereg = 31;
5060 HOST_WIDE_INT offset = 0;
5062 if (GET_CODE (addr) == AND)
5063 addr = XEXP (addr, 0);
5065 if (GET_CODE (addr) == PLUS
5066 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5068 offset = INTVAL (XEXP (addr, 1));
5069 addr = XEXP (addr, 0);
5072 if (GET_CODE (addr) == LO_SUM)
5074 const char *reloc16, *reloclo;
5075 rtx op1 = XEXP (addr, 1);
5077 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5079 op1 = XEXP (op1, 0);
5080 switch (XINT (op1, 1))
5082 case UNSPEC_DTPREL:
5083 reloc16 = NULL;
5084 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5085 break;
5086 case UNSPEC_TPREL:
5087 reloc16 = NULL;
5088 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5089 break;
5090 default:
5091 output_operand_lossage ("unknown relocation unspec");
5092 return;
5095 output_addr_const (file, XVECEXP (op1, 0, 0));
5097 else
5099 reloc16 = "gprel";
5100 reloclo = "gprellow";
5101 output_addr_const (file, op1);
5104 if (offset)
5105 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5107 addr = XEXP (addr, 0);
5108 if (GET_CODE (addr) == REG)
5109 basereg = REGNO (addr);
5110 else if (GET_CODE (addr) == SUBREG
5111 && GET_CODE (SUBREG_REG (addr)) == REG)
5112 basereg = subreg_regno (addr);
5113 else
5114 abort ();
5116 fprintf (file, "($%d)\t\t!%s", basereg,
5117 (basereg == 29 ? reloc16 : reloclo));
5118 return;
5121 if (GET_CODE (addr) == REG)
5122 basereg = REGNO (addr);
5123 else if (GET_CODE (addr) == SUBREG
5124 && GET_CODE (SUBREG_REG (addr)) == REG)
5125 basereg = subreg_regno (addr);
5126 else if (GET_CODE (addr) == CONST_INT)
5127 offset = INTVAL (addr);
5129 #if TARGET_ABI_OPEN_VMS
5130 else if (GET_CODE (addr) == SYMBOL_REF)
5132 fprintf (file, "%s", XSTR (addr, 0));
5133 return;
5135 else if (GET_CODE (addr) == CONST
5136 && GET_CODE (XEXP (addr, 0)) == PLUS
5137 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
5139 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5140 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5141 INTVAL (XEXP (XEXP (addr, 0), 1)));
5142 return;
5144 #endif
5146 else
5147 abort ();
5149 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5152 /* Emit RTL insns to initialize the variable parts of a trampoline at
5153 TRAMP. FNADDR is an RTX for the address of the function's pure
5154 code. CXT is an RTX for the static chain value for the function.
5156 The three offset parameters are for the individual template's
5157 layout. A JMPOFS < 0 indicates that the trampoline does not
5158 contain instructions at all.
5160 We assume here that a function will be called many more times than
5161 its address is taken (e.g., it might be passed to qsort), so we
5162 take the trouble to initialize the "hint" field in the JMP insn.
5163 Note that the hint field is PC (new) + 4 * bits 13:0. */
5165 void
5166 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5167 int fnofs, int cxtofs, int jmpofs)
5169 rtx temp, temp1, addr;
5170 /* VMS really uses DImode pointers in memory at this point. */
5171 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5173 #ifdef POINTERS_EXTEND_UNSIGNED
5174 fnaddr = convert_memory_address (mode, fnaddr);
5175 cxt = convert_memory_address (mode, cxt);
5176 #endif
5178 /* Store function address and CXT. */
5179 addr = memory_address (mode, plus_constant (tramp, fnofs));
5180 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5181 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5182 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5184 /* This has been disabled since the hint only has a 32k range, and in
5185 no existing OS is the stack within 32k of the text segment. */
5186 if (0 && jmpofs >= 0)
5188 /* Compute hint value. */
5189 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5190 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5191 OPTAB_WIDEN);
5192 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5193 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5194 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5195 GEN_INT (0x3fff), 0);
5197 /* Merge in the hint. */
5198 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5199 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5200 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5201 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5202 OPTAB_WIDEN);
5203 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5206 #ifdef ENABLE_EXECUTE_STACK
5207 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5208 0, VOIDmode, 1, tramp, Pmode);
5209 #endif
5211 if (jmpofs >= 0)
5212 emit_insn (gen_imb ());
5215 /* Determine where to put an argument to a function.
5216 Value is zero to push the argument on the stack,
5217 or a hard register in which to store the argument.
5219 MODE is the argument's machine mode.
5220 TYPE is the data type of the argument (as a tree).
5221 This is null for libcalls where that information may
5222 not be available.
5223 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5224 the preceding args and about the function being called.
5225 NAMED is nonzero if this argument is a named parameter
5226 (otherwise it is an extra parameter matching an ellipsis).
5228 On Alpha the first 6 words of args are normally in registers
5229 and the rest are pushed. */
5232 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5233 int named ATTRIBUTE_UNUSED)
5235 int basereg;
5236 int num_args;
5238 /* Don't get confused and pass small structures in FP registers. */
5239 if (type && AGGREGATE_TYPE_P (type))
5240 basereg = 16;
5241 else
5243 #ifdef ENABLE_CHECKING
5244 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5245 values here. */
5246 if (COMPLEX_MODE_P (mode))
5247 abort ();
5248 #endif
5250 /* Set up defaults for FP operands passed in FP registers, and
5251 integral operands passed in integer registers. */
5252 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5253 basereg = 32 + 16;
5254 else
5255 basereg = 16;
5258 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5259 the three platforms, so we can't avoid conditional compilation. */
5260 #if TARGET_ABI_OPEN_VMS
5262 if (mode == VOIDmode)
5263 return alpha_arg_info_reg_val (cum);
5265 num_args = cum.num_args;
5266 if (num_args >= 6
5267 || targetm.calls.must_pass_in_stack (mode, type))
5268 return NULL_RTX;
5270 #elif TARGET_ABI_UNICOSMK
5272 int size;
5274 /* If this is the last argument, generate the call info word (CIW). */
5275 /* ??? We don't include the caller's line number in the CIW because
5276 I don't know how to determine it if debug infos are turned off. */
5277 if (mode == VOIDmode)
5279 int i;
5280 HOST_WIDE_INT lo;
5281 HOST_WIDE_INT hi;
5282 rtx ciw;
5284 lo = 0;
5286 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5287 if (cum.reg_args_type[i])
5288 lo |= (1 << (7 - i));
5290 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5291 lo |= 7;
5292 else
5293 lo |= cum.num_reg_words;
5295 #if HOST_BITS_PER_WIDE_INT == 32
5296 hi = (cum.num_args << 20) | cum.num_arg_words;
5297 #else
5298 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5299 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5300 hi = 0;
5301 #endif
5302 ciw = immed_double_const (lo, hi, DImode);
5304 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5305 UNSPEC_UMK_LOAD_CIW);
5308 size = ALPHA_ARG_SIZE (mode, type, named);
5309 num_args = cum.num_reg_words;
5310 if (cum.force_stack
5311 || cum.num_reg_words + size > 6
5312 || targetm.calls.must_pass_in_stack (mode, type))
5313 return NULL_RTX;
5314 else if (type && TYPE_MODE (type) == BLKmode)
5316 rtx reg1, reg2;
5318 reg1 = gen_rtx_REG (DImode, num_args + 16);
5319 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5321 /* The argument fits in two registers. Note that we still need to
5322 reserve a register for empty structures. */
5323 if (size == 0)
5324 return NULL_RTX;
5325 else if (size == 1)
5326 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5327 else
5329 reg2 = gen_rtx_REG (DImode, num_args + 17);
5330 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5331 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5335 #elif TARGET_ABI_OSF
5337 if (cum >= 6)
5338 return NULL_RTX;
5339 num_args = cum;
5341 /* VOID is passed as a special flag for "last argument". */
5342 if (type == void_type_node)
5343 basereg = 16;
5344 else if (targetm.calls.must_pass_in_stack (mode, type))
5345 return NULL_RTX;
5347 #else
5348 #error Unhandled ABI
5349 #endif
5351 return gen_rtx_REG (mode, num_args + basereg);
5354 static int
5355 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5356 enum machine_mode mode ATTRIBUTE_UNUSED,
5357 tree type ATTRIBUTE_UNUSED,
5358 bool named ATTRIBUTE_UNUSED)
5360 int words = 0;
5362 #if TARGET_ABI_OPEN_VMS
5363 if (cum->num_args < 6
5364 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5365 words = 6 - (CUM).num_args;
5366 #elif TARGET_ABI_UNICOSMK
5367 /* Never any split arguments. */
5368 #elif TARGET_ABI_OSF
5369 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5370 words = 6 - *cum;
5371 #else
5372 #error Unhandled ABI
5373 #endif
5375 return words * UNITS_PER_WORD;
5379 /* Return true if TYPE must be returned in memory, instead of in registers. */
5381 static bool
5382 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5384 enum machine_mode mode = VOIDmode;
5385 int size;
5387 if (type)
5389 mode = TYPE_MODE (type);
5391 /* All aggregates are returned in memory. */
5392 if (AGGREGATE_TYPE_P (type))
5393 return true;
5396 size = GET_MODE_SIZE (mode);
5397 switch (GET_MODE_CLASS (mode))
5399 case MODE_VECTOR_FLOAT:
5400 /* Pass all float vectors in memory, like an aggregate. */
5401 return true;
5403 case MODE_COMPLEX_FLOAT:
5404 /* We judge complex floats on the size of their element,
5405 not the size of the whole type. */
5406 size = GET_MODE_UNIT_SIZE (mode);
5407 break;
5409 case MODE_INT:
5410 case MODE_FLOAT:
5411 case MODE_COMPLEX_INT:
5412 case MODE_VECTOR_INT:
5413 break;
5415 default:
5416 /* ??? We get called on all sorts of random stuff from
5417 aggregate_value_p. We can't abort, but it's not clear
5418 what's safe to return. Pretend it's a struct I guess. */
5419 return true;
5422 /* Otherwise types must fit in one register. */
5423 return size > UNITS_PER_WORD;
5426 /* Return true if TYPE should be passed by invisible reference. */
5428 static bool
5429 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5430 enum machine_mode mode,
5431 tree type ATTRIBUTE_UNUSED,
5432 bool named ATTRIBUTE_UNUSED)
5434 return mode == TFmode || mode == TCmode;
5437 /* Define how to find the value returned by a function. VALTYPE is the
5438 data type of the value (as a tree). If the precise function being
5439 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5440 MODE is set instead of VALTYPE for libcalls.
5442 On Alpha the value is found in $0 for integer functions and
5443 $f0 for floating-point functions. */
5446 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5447 enum machine_mode mode)
5449 unsigned int regnum, dummy;
5450 enum mode_class class;
5452 #ifdef ENABLE_CHECKING
5453 if (valtype && alpha_return_in_memory (valtype, func))
5454 abort ();
5455 #endif
5457 if (valtype)
5458 mode = TYPE_MODE (valtype);
5460 class = GET_MODE_CLASS (mode);
5461 switch (class)
5463 case MODE_INT:
5464 PROMOTE_MODE (mode, dummy, valtype);
5465 /* FALLTHRU */
5467 case MODE_COMPLEX_INT:
5468 case MODE_VECTOR_INT:
5469 regnum = 0;
5470 break;
5472 case MODE_FLOAT:
5473 regnum = 32;
5474 break;
5476 case MODE_COMPLEX_FLOAT:
5478 enum machine_mode cmode = GET_MODE_INNER (mode);
5480 return gen_rtx_PARALLEL
5481 (VOIDmode,
5482 gen_rtvec (2,
5483 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5484 const0_rtx),
5485 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5486 GEN_INT (GET_MODE_SIZE (cmode)))));
5489 default:
5490 abort ();
5493 return gen_rtx_REG (mode, regnum);
5496 /* TCmode complex values are passed by invisible reference. We
5497 should not split these values. */
5499 static bool
5500 alpha_split_complex_arg (tree type)
5502 return TYPE_MODE (type) != TCmode;
5505 static tree
5506 alpha_build_builtin_va_list (void)
5508 tree base, ofs, space, record, type_decl;
5510 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5511 return ptr_type_node;
5513 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5514 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5515 TREE_CHAIN (record) = type_decl;
5516 TYPE_NAME (record) = type_decl;
5518 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5520 /* Dummy field to prevent alignment warnings. */
5521 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5522 DECL_FIELD_CONTEXT (space) = record;
5523 DECL_ARTIFICIAL (space) = 1;
5524 DECL_IGNORED_P (space) = 1;
5526 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5527 integer_type_node);
5528 DECL_FIELD_CONTEXT (ofs) = record;
5529 TREE_CHAIN (ofs) = space;
5531 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5532 ptr_type_node);
5533 DECL_FIELD_CONTEXT (base) = record;
5534 TREE_CHAIN (base) = ofs;
5536 TYPE_FIELDS (record) = base;
5537 layout_type (record);
5539 return record;
5542 /* Perform any needed actions needed for a function that is receiving a
5543 variable number of arguments. */
5545 static void
5546 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum,
5547 enum machine_mode mode ATTRIBUTE_UNUSED,
5548 tree type ATTRIBUTE_UNUSED,
5549 int *pretend_size, int no_rtl)
5551 #if TARGET_ABI_UNICOSMK
5552 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5553 arguments on the stack. Unfortunately, it doesn't always store the first
5554 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5555 with stdargs as we always have at least one named argument there. */
5556 int num_reg_words = pcum->num_reg_words;
5557 if (num_reg_words < 6)
5559 if (!no_rtl)
5561 emit_insn (gen_umk_mismatch_args (GEN_INT (num_reg_words + 1)));
5562 emit_insn (gen_arg_home_umk ());
5564 *pretend_size = 0;
5566 #elif TARGET_ABI_OPEN_VMS
5567 /* For VMS, we allocate space for all 6 arg registers plus a count.
5569 However, if NO registers need to be saved, don't allocate any space.
5570 This is not only because we won't need the space, but because AP
5571 includes the current_pretend_args_size and we don't want to mess up
5572 any ap-relative addresses already made. */
5573 if (pcum->num_args < 6)
5575 if (!no_rtl)
5577 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
5578 emit_insn (gen_arg_home ());
5580 *pretend_size = 7 * UNITS_PER_WORD;
5582 #else
5583 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
5584 only push those that are remaining. However, if NO registers need to
5585 be saved, don't allocate any space. This is not only because we won't
5586 need the space, but because AP includes the current_pretend_args_size
5587 and we don't want to mess up any ap-relative addresses already made.
5589 If we are not to use the floating-point registers, save the integer
5590 registers where we would put the floating-point registers. This is
5591 not the most efficient way to implement varargs with just one register
5592 class, but it isn't worth doing anything more efficient in this rare
5593 case. */
5594 CUMULATIVE_ARGS cum = *pcum;
5596 if (cum >= 6)
5597 return;
5599 if (!no_rtl)
5601 int set = get_varargs_alias_set ();
5602 rtx tmp;
5604 tmp = gen_rtx_MEM (BLKmode,
5605 plus_constant (virtual_incoming_args_rtx,
5606 (cum + 6) * UNITS_PER_WORD));
5607 set_mem_alias_set (tmp, set);
5608 move_block_from_reg (16 + cum, tmp, 6 - cum);
5610 tmp = gen_rtx_MEM (BLKmode,
5611 plus_constant (virtual_incoming_args_rtx,
5612 cum * UNITS_PER_WORD));
5613 set_mem_alias_set (tmp, set);
5614 move_block_from_reg (16 + (TARGET_FPREGS ? 32 : 0) + cum, tmp,
5615 6 - cum);
5617 *pretend_size = 12 * UNITS_PER_WORD;
5618 #endif
5621 void
5622 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
5624 HOST_WIDE_INT offset;
5625 tree t, offset_field, base_field;
5627 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
5628 return;
5630 if (TARGET_ABI_UNICOSMK)
5631 std_expand_builtin_va_start (valist, nextarg);
5633 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
5634 up by 48, storing fp arg registers in the first 48 bytes, and the
5635 integer arg registers in the next 48 bytes. This is only done,
5636 however, if any integer registers need to be stored.
5638 If no integer registers need be stored, then we must subtract 48
5639 in order to account for the integer arg registers which are counted
5640 in argsize above, but which are not actually stored on the stack.
5641 Must further be careful here about structures straddling the last
5642 integer argument register; that futzes with pretend_args_size,
5643 which changes the meaning of AP. */
5645 if (NUM_ARGS <= 6)
5646 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
5647 else
5648 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
5650 if (TARGET_ABI_OPEN_VMS)
5652 nextarg = plus_constant (nextarg, offset);
5653 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
5654 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
5655 make_tree (ptr_type_node, nextarg));
5656 TREE_SIDE_EFFECTS (t) = 1;
5658 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5660 else
5662 base_field = TYPE_FIELDS (TREE_TYPE (valist));
5663 offset_field = TREE_CHAIN (base_field);
5665 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
5666 valist, base_field, NULL_TREE);
5667 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
5668 valist, offset_field, NULL_TREE);
5670 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5671 t = build (PLUS_EXPR, ptr_type_node, t,
5672 build_int_cst (NULL_TREE, offset));
5673 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
5674 TREE_SIDE_EFFECTS (t) = 1;
5675 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5677 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
5678 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
5679 TREE_SIDE_EFFECTS (t) = 1;
5680 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5684 static tree
5685 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
5687 tree type_size, ptr_type, addend, t, addr, internal_post;
5689 /* If the type could not be passed in registers, skip the block
5690 reserved for the registers. */
5691 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
5693 t = build_int_cst (TREE_TYPE (offset), 6*8);
5694 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
5695 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
5696 gimplify_and_add (t, pre_p);
5699 addend = offset;
5700 ptr_type = build_pointer_type (type);
5702 if (TREE_CODE (type) == COMPLEX_TYPE)
5704 tree real_part, imag_part, real_temp;
5706 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
5707 offset, pre_p);
5709 /* Copy the value into a new temporary, lest the formal temporary
5710 be reused out from under us. */
5711 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
5713 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
5714 offset, pre_p);
5716 return build (COMPLEX_EXPR, type, real_temp, imag_part);
5718 else if (TREE_CODE (type) == REAL_TYPE)
5720 tree fpaddend, cond, fourtyeight;
5722 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
5723 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
5724 addend, fourtyeight));
5725 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
5726 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
5727 fpaddend, addend));
5730 /* Build the final address and force that value into a temporary. */
5731 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
5732 fold_convert (ptr_type, addend));
5733 internal_post = NULL;
5734 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
5735 append_to_statement_list (internal_post, pre_p);
5737 /* Update the offset field. */
5738 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
5739 if (type_size == NULL || TREE_OVERFLOW (type_size))
5740 t = size_zero_node;
5741 else
5743 t = size_binop (PLUS_EXPR, type_size, size_int (7));
5744 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
5745 t = size_binop (MULT_EXPR, t, size_int (8));
5747 t = fold_convert (TREE_TYPE (offset), t);
5748 t = build (MODIFY_EXPR, void_type_node, offset,
5749 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
5750 gimplify_and_add (t, pre_p);
5752 return build_fold_indirect_ref (addr);
5755 static tree
5756 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5758 tree offset_field, base_field, offset, base, t, r;
5759 bool indirect;
5761 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5762 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5764 base_field = TYPE_FIELDS (va_list_type_node);
5765 offset_field = TREE_CHAIN (base_field);
5766 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
5767 valist, base_field, NULL_TREE);
5768 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
5769 valist, offset_field, NULL_TREE);
5771 /* Pull the fields of the structure out into temporaries. Since we never
5772 modify the base field, we can use a formal temporary. Sign-extend the
5773 offset field so that it's the proper width for pointer arithmetic. */
5774 base = get_formal_tmp_var (base_field, pre_p);
5776 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
5777 offset = get_initialized_tmp_var (t, pre_p, NULL);
5779 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5780 if (indirect)
5781 type = build_pointer_type (type);
5783 /* Find the value. Note that this will be a stable indirection, or
5784 a composite of stable indirections in the case of complex. */
5785 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
5787 /* Stuff the offset temporary back into its field. */
5788 t = build (MODIFY_EXPR, void_type_node, offset_field,
5789 fold_convert (TREE_TYPE (offset_field), offset));
5790 gimplify_and_add (t, pre_p);
5792 if (indirect)
5793 r = build_fold_indirect_ref (r);
5795 return r;
5798 /* Builtins. */
5800 enum alpha_builtin
5802 ALPHA_BUILTIN_CMPBGE,
5803 ALPHA_BUILTIN_EXTBL,
5804 ALPHA_BUILTIN_EXTWL,
5805 ALPHA_BUILTIN_EXTLL,
5806 ALPHA_BUILTIN_EXTQL,
5807 ALPHA_BUILTIN_EXTWH,
5808 ALPHA_BUILTIN_EXTLH,
5809 ALPHA_BUILTIN_EXTQH,
5810 ALPHA_BUILTIN_INSBL,
5811 ALPHA_BUILTIN_INSWL,
5812 ALPHA_BUILTIN_INSLL,
5813 ALPHA_BUILTIN_INSQL,
5814 ALPHA_BUILTIN_INSWH,
5815 ALPHA_BUILTIN_INSLH,
5816 ALPHA_BUILTIN_INSQH,
5817 ALPHA_BUILTIN_MSKBL,
5818 ALPHA_BUILTIN_MSKWL,
5819 ALPHA_BUILTIN_MSKLL,
5820 ALPHA_BUILTIN_MSKQL,
5821 ALPHA_BUILTIN_MSKWH,
5822 ALPHA_BUILTIN_MSKLH,
5823 ALPHA_BUILTIN_MSKQH,
5824 ALPHA_BUILTIN_UMULH,
5825 ALPHA_BUILTIN_ZAP,
5826 ALPHA_BUILTIN_ZAPNOT,
5827 ALPHA_BUILTIN_AMASK,
5828 ALPHA_BUILTIN_IMPLVER,
5829 ALPHA_BUILTIN_RPCC,
5830 ALPHA_BUILTIN_THREAD_POINTER,
5831 ALPHA_BUILTIN_SET_THREAD_POINTER,
5833 /* TARGET_MAX */
5834 ALPHA_BUILTIN_MINUB8,
5835 ALPHA_BUILTIN_MINSB8,
5836 ALPHA_BUILTIN_MINUW4,
5837 ALPHA_BUILTIN_MINSW4,
5838 ALPHA_BUILTIN_MAXUB8,
5839 ALPHA_BUILTIN_MAXSB8,
5840 ALPHA_BUILTIN_MAXUW4,
5841 ALPHA_BUILTIN_MAXSW4,
5842 ALPHA_BUILTIN_PERR,
5843 ALPHA_BUILTIN_PKLB,
5844 ALPHA_BUILTIN_PKWB,
5845 ALPHA_BUILTIN_UNPKBL,
5846 ALPHA_BUILTIN_UNPKBW,
5848 /* TARGET_CIX */
5849 ALPHA_BUILTIN_CTTZ,
5850 ALPHA_BUILTIN_CTLZ,
5851 ALPHA_BUILTIN_CTPOP,
5853 ALPHA_BUILTIN_max
5856 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
5857 CODE_FOR_builtin_cmpbge,
5858 CODE_FOR_builtin_extbl,
5859 CODE_FOR_builtin_extwl,
5860 CODE_FOR_builtin_extll,
5861 CODE_FOR_builtin_extql,
5862 CODE_FOR_builtin_extwh,
5863 CODE_FOR_builtin_extlh,
5864 CODE_FOR_builtin_extqh,
5865 CODE_FOR_builtin_insbl,
5866 CODE_FOR_builtin_inswl,
5867 CODE_FOR_builtin_insll,
5868 CODE_FOR_builtin_insql,
5869 CODE_FOR_builtin_inswh,
5870 CODE_FOR_builtin_inslh,
5871 CODE_FOR_builtin_insqh,
5872 CODE_FOR_builtin_mskbl,
5873 CODE_FOR_builtin_mskwl,
5874 CODE_FOR_builtin_mskll,
5875 CODE_FOR_builtin_mskql,
5876 CODE_FOR_builtin_mskwh,
5877 CODE_FOR_builtin_msklh,
5878 CODE_FOR_builtin_mskqh,
5879 CODE_FOR_umuldi3_highpart,
5880 CODE_FOR_builtin_zap,
5881 CODE_FOR_builtin_zapnot,
5882 CODE_FOR_builtin_amask,
5883 CODE_FOR_builtin_implver,
5884 CODE_FOR_builtin_rpcc,
5885 CODE_FOR_load_tp,
5886 CODE_FOR_set_tp,
5888 /* TARGET_MAX */
5889 CODE_FOR_builtin_minub8,
5890 CODE_FOR_builtin_minsb8,
5891 CODE_FOR_builtin_minuw4,
5892 CODE_FOR_builtin_minsw4,
5893 CODE_FOR_builtin_maxub8,
5894 CODE_FOR_builtin_maxsb8,
5895 CODE_FOR_builtin_maxuw4,
5896 CODE_FOR_builtin_maxsw4,
5897 CODE_FOR_builtin_perr,
5898 CODE_FOR_builtin_pklb,
5899 CODE_FOR_builtin_pkwb,
5900 CODE_FOR_builtin_unpkbl,
5901 CODE_FOR_builtin_unpkbw,
5903 /* TARGET_CIX */
5904 CODE_FOR_builtin_cttz,
5905 CODE_FOR_builtin_ctlz,
5906 CODE_FOR_builtin_ctpop
5909 struct alpha_builtin_def
5911 const char *name;
5912 enum alpha_builtin code;
5913 unsigned int target_mask;
5916 static struct alpha_builtin_def const zero_arg_builtins[] = {
5917 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0 },
5918 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0 }
5921 static struct alpha_builtin_def const one_arg_builtins[] = {
5922 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0 },
5923 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX },
5924 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX },
5925 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX },
5926 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX },
5927 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX },
5928 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX },
5929 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX }
5932 static struct alpha_builtin_def const two_arg_builtins[] = {
5933 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0 },
5934 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0 },
5935 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0 },
5936 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0 },
5937 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0 },
5938 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0 },
5939 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0 },
5940 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0 },
5941 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0 },
5942 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0 },
5943 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0 },
5944 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0 },
5945 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0 },
5946 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0 },
5947 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0 },
5948 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0 },
5949 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0 },
5950 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0 },
5951 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0 },
5952 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0 },
5953 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0 },
5954 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0 },
5955 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0 },
5956 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0 },
5957 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0 },
5958 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX },
5959 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX },
5960 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX },
5961 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX },
5962 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX },
5963 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX },
5964 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX },
5965 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX },
5966 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX }
5969 static void
5970 alpha_init_builtins (void)
5972 const struct alpha_builtin_def *p;
5973 tree ftype;
5974 size_t i;
5976 ftype = build_function_type (long_integer_type_node, void_list_node);
5978 p = zero_arg_builtins;
5979 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
5980 if ((target_flags & p->target_mask) == p->target_mask)
5981 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
5982 NULL, NULL_TREE);
5984 ftype = build_function_type_list (long_integer_type_node,
5985 long_integer_type_node, NULL_TREE);
5987 p = one_arg_builtins;
5988 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
5989 if ((target_flags & p->target_mask) == p->target_mask)
5990 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
5991 NULL, NULL_TREE);
5993 ftype = build_function_type_list (long_integer_type_node,
5994 long_integer_type_node,
5995 long_integer_type_node, NULL_TREE);
5997 p = two_arg_builtins;
5998 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
5999 if ((target_flags & p->target_mask) == p->target_mask)
6000 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6001 NULL, NULL_TREE);
6003 ftype = build_function_type (ptr_type_node, void_list_node);
6004 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6005 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6006 NULL, NULL_TREE);
6008 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6009 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6010 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6011 NULL, NULL_TREE);
6014 /* Expand an expression EXP that calls a built-in function,
6015 with result going to TARGET if that's convenient
6016 (and in mode MODE if that's convenient).
6017 SUBTARGET may be used as the target for computing one of EXP's operands.
6018 IGNORE is nonzero if the value is to be ignored. */
6020 static rtx
6021 alpha_expand_builtin (tree exp, rtx target,
6022 rtx subtarget ATTRIBUTE_UNUSED,
6023 enum machine_mode mode ATTRIBUTE_UNUSED,
6024 int ignore ATTRIBUTE_UNUSED)
6026 #define MAX_ARGS 2
6028 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6029 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6030 tree arglist = TREE_OPERAND (exp, 1);
6031 enum insn_code icode;
6032 rtx op[MAX_ARGS], pat;
6033 int arity;
6034 bool nonvoid;
6036 if (fcode >= ALPHA_BUILTIN_max)
6037 internal_error ("bad builtin fcode");
6038 icode = code_for_builtin[fcode];
6039 if (icode == 0)
6040 internal_error ("bad builtin fcode");
6042 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6044 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6045 arglist;
6046 arglist = TREE_CHAIN (arglist), arity++)
6048 const struct insn_operand_data *insn_op;
6050 tree arg = TREE_VALUE (arglist);
6051 if (arg == error_mark_node)
6052 return NULL_RTX;
6053 if (arity > MAX_ARGS)
6054 return NULL_RTX;
6056 insn_op = &insn_data[icode].operand[arity + nonvoid];
6058 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6060 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6061 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6064 if (nonvoid)
6066 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6067 if (!target
6068 || GET_MODE (target) != tmode
6069 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6070 target = gen_reg_rtx (tmode);
6073 switch (arity)
6075 case 0:
6076 pat = GEN_FCN (icode) (target);
6077 break;
6078 case 1:
6079 if (nonvoid)
6080 pat = GEN_FCN (icode) (target, op[0]);
6081 else
6082 pat = GEN_FCN (icode) (op[0]);
6083 break;
6084 case 2:
6085 pat = GEN_FCN (icode) (target, op[0], op[1]);
6086 break;
6087 default:
6088 abort ();
6090 if (!pat)
6091 return NULL_RTX;
6092 emit_insn (pat);
6094 if (nonvoid)
6095 return target;
6096 else
6097 return const0_rtx;
6100 /* This page contains routines that are used to determine what the function
6101 prologue and epilogue code will do and write them out. */
6103 /* Compute the size of the save area in the stack. */
6105 /* These variables are used for communication between the following functions.
6106 They indicate various things about the current function being compiled
6107 that are used to tell what kind of prologue, epilogue and procedure
6108 descriptor to generate. */
6110 /* Nonzero if we need a stack procedure. */
6111 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
6112 static enum alpha_procedure_types alpha_procedure_type;
6114 /* Register number (either FP or SP) that is used to unwind the frame. */
6115 static int vms_unwind_regno;
6117 /* Register number used to save FP. We need not have one for RA since
6118 we don't modify it for register procedures. This is only defined
6119 for register frame procedures. */
6120 static int vms_save_fp_regno;
6122 /* Register number used to reference objects off our PV. */
6123 static int vms_base_regno;
6125 /* Compute register masks for saved registers. */
6127 static void
6128 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
6130 unsigned long imask = 0;
6131 unsigned long fmask = 0;
6132 unsigned int i;
6134 /* When outputting a thunk, we don't have valid register life info,
6135 but assemble_start_function wants to output .frame and .mask
6136 directives. */
6137 if (current_function_is_thunk)
6139 *imaskP = 0;
6140 *fmaskP = 0;
6141 return;
6144 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6145 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
6147 /* One for every register we have to save. */
6148 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6149 if (! fixed_regs[i] && ! call_used_regs[i]
6150 && regs_ever_live[i] && i != REG_RA
6151 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
6153 if (i < 32)
6154 imask |= (1UL << i);
6155 else
6156 fmask |= (1UL << (i - 32));
6159 /* We need to restore these for the handler. */
6160 if (current_function_calls_eh_return)
6162 for (i = 0; ; ++i)
6164 unsigned regno = EH_RETURN_DATA_REGNO (i);
6165 if (regno == INVALID_REGNUM)
6166 break;
6167 imask |= 1UL << regno;
6171 /* If any register spilled, then spill the return address also. */
6172 /* ??? This is required by the Digital stack unwind specification
6173 and isn't needed if we're doing Dwarf2 unwinding. */
6174 if (imask || fmask || alpha_ra_ever_killed ())
6175 imask |= (1UL << REG_RA);
6177 *imaskP = imask;
6178 *fmaskP = fmask;
6182 alpha_sa_size (void)
6184 unsigned long mask[2];
6185 int sa_size = 0;
6186 int i, j;
6188 alpha_sa_mask (&mask[0], &mask[1]);
6190 if (TARGET_ABI_UNICOSMK)
6192 if (mask[0] || mask[1])
6193 sa_size = 14;
6195 else
6197 for (j = 0; j < 2; ++j)
6198 for (i = 0; i < 32; ++i)
6199 if ((mask[j] >> i) & 1)
6200 sa_size++;
6203 if (TARGET_ABI_UNICOSMK)
6205 /* We might not need to generate a frame if we don't make any calls
6206 (including calls to __T3E_MISMATCH if this is a vararg function),
6207 don't have any local variables which require stack slots, don't
6208 use alloca and have not determined that we need a frame for other
6209 reasons. */
6211 alpha_procedure_type
6212 = (sa_size || get_frame_size() != 0
6213 || current_function_outgoing_args_size
6214 || current_function_stdarg || current_function_calls_alloca
6215 || frame_pointer_needed)
6216 ? PT_STACK : PT_REGISTER;
6218 /* Always reserve space for saving callee-saved registers if we
6219 need a frame as required by the calling convention. */
6220 if (alpha_procedure_type == PT_STACK)
6221 sa_size = 14;
6223 else if (TARGET_ABI_OPEN_VMS)
6225 /* Start by assuming we can use a register procedure if we don't
6226 make any calls (REG_RA not used) or need to save any
6227 registers and a stack procedure if we do. */
6228 if ((mask[0] >> REG_RA) & 1)
6229 alpha_procedure_type = PT_STACK;
6230 else if (get_frame_size() != 0)
6231 alpha_procedure_type = PT_REGISTER;
6232 else
6233 alpha_procedure_type = PT_NULL;
6235 /* Don't reserve space for saving FP & RA yet. Do that later after we've
6236 made the final decision on stack procedure vs register procedure. */
6237 if (alpha_procedure_type == PT_STACK)
6238 sa_size -= 2;
6240 /* Decide whether to refer to objects off our PV via FP or PV.
6241 If we need FP for something else or if we receive a nonlocal
6242 goto (which expects PV to contain the value), we must use PV.
6243 Otherwise, start by assuming we can use FP. */
6245 vms_base_regno
6246 = (frame_pointer_needed
6247 || current_function_has_nonlocal_label
6248 || alpha_procedure_type == PT_STACK
6249 || current_function_outgoing_args_size)
6250 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
6252 /* If we want to copy PV into FP, we need to find some register
6253 in which to save FP. */
6255 vms_save_fp_regno = -1;
6256 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
6257 for (i = 0; i < 32; i++)
6258 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
6259 vms_save_fp_regno = i;
6261 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
6262 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
6263 else if (alpha_procedure_type == PT_NULL)
6264 vms_base_regno = REG_PV;
6266 /* Stack unwinding should be done via FP unless we use it for PV. */
6267 vms_unwind_regno = (vms_base_regno == REG_PV
6268 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
6270 /* If this is a stack procedure, allow space for saving FP and RA. */
6271 if (alpha_procedure_type == PT_STACK)
6272 sa_size += 2;
6274 else
6276 /* Our size must be even (multiple of 16 bytes). */
6277 if (sa_size & 1)
6278 sa_size++;
6281 return sa_size * 8;
6284 /* Define the offset between two registers, one to be eliminated,
6285 and the other its replacement, at the start of a routine. */
6287 HOST_WIDE_INT
6288 alpha_initial_elimination_offset (unsigned int from,
6289 unsigned int to ATTRIBUTE_UNUSED)
6291 HOST_WIDE_INT ret;
6293 ret = alpha_sa_size ();
6294 ret += ALPHA_ROUND (current_function_outgoing_args_size);
6296 if (from == FRAME_POINTER_REGNUM)
6298 else if (from == ARG_POINTER_REGNUM)
6299 ret += (ALPHA_ROUND (get_frame_size ()
6300 + current_function_pretend_args_size)
6301 - current_function_pretend_args_size);
6302 else
6303 abort ();
6305 return ret;
6309 alpha_pv_save_size (void)
6311 alpha_sa_size ();
6312 return alpha_procedure_type == PT_STACK ? 8 : 0;
6316 alpha_using_fp (void)
6318 alpha_sa_size ();
6319 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
6322 #if TARGET_ABI_OPEN_VMS
6324 const struct attribute_spec vms_attribute_table[] =
6326 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
6327 { "overlaid", 0, 0, true, false, false, NULL },
6328 { "global", 0, 0, true, false, false, NULL },
6329 { "initialize", 0, 0, true, false, false, NULL },
6330 { NULL, 0, 0, false, false, false, NULL }
6333 #endif
6335 static int
6336 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
6338 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
6342 alpha_find_lo_sum_using_gp (rtx insn)
6344 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
6347 static int
6348 alpha_does_function_need_gp (void)
6350 rtx insn;
6352 /* The GP being variable is an OSF abi thing. */
6353 if (! TARGET_ABI_OSF)
6354 return 0;
6356 /* We need the gp to load the address of __mcount. */
6357 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6358 return 1;
6360 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
6361 if (current_function_is_thunk)
6362 return 1;
6364 /* The nonlocal receiver pattern assumes that the gp is valid for
6365 the nested function. Reasonable because it's almost always set
6366 correctly already. For the cases where that's wrong, make sure
6367 the nested function loads its gp on entry. */
6368 if (current_function_has_nonlocal_goto)
6369 return 1;
6371 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
6372 Even if we are a static function, we still need to do this in case
6373 our address is taken and passed to something like qsort. */
6375 push_topmost_sequence ();
6376 insn = get_insns ();
6377 pop_topmost_sequence ();
6379 for (; insn; insn = NEXT_INSN (insn))
6380 if (INSN_P (insn)
6381 && GET_CODE (PATTERN (insn)) != USE
6382 && GET_CODE (PATTERN (insn)) != CLOBBER
6383 && get_attr_usegp (insn))
6384 return 1;
6386 return 0;
6390 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
6391 sequences. */
6393 static rtx
6394 set_frame_related_p (void)
6396 rtx seq = get_insns ();
6397 rtx insn;
6399 end_sequence ();
6401 if (!seq)
6402 return NULL_RTX;
6404 if (INSN_P (seq))
6406 insn = seq;
6407 while (insn != NULL_RTX)
6409 RTX_FRAME_RELATED_P (insn) = 1;
6410 insn = NEXT_INSN (insn);
6412 seq = emit_insn (seq);
6414 else
6416 seq = emit_insn (seq);
6417 RTX_FRAME_RELATED_P (seq) = 1;
6419 return seq;
6422 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
6424 /* Generates a store with the proper unwind info attached. VALUE is
6425 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
6426 contains SP+FRAME_BIAS, and that is the unwind info that should be
6427 generated. If FRAME_REG != VALUE, then VALUE is being stored on
6428 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
6430 static void
6431 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
6432 HOST_WIDE_INT base_ofs, rtx frame_reg)
6434 rtx addr, mem, insn;
6436 addr = plus_constant (base_reg, base_ofs);
6437 mem = gen_rtx_MEM (DImode, addr);
6438 set_mem_alias_set (mem, alpha_sr_alias_set);
6440 insn = emit_move_insn (mem, value);
6441 RTX_FRAME_RELATED_P (insn) = 1;
6443 if (frame_bias || value != frame_reg)
6445 if (frame_bias)
6447 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
6448 mem = gen_rtx_MEM (DImode, addr);
6451 REG_NOTES (insn)
6452 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6453 gen_rtx_SET (VOIDmode, mem, frame_reg),
6454 REG_NOTES (insn));
6458 static void
6459 emit_frame_store (unsigned int regno, rtx base_reg,
6460 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
6462 rtx reg = gen_rtx_REG (DImode, regno);
6463 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
6466 /* Write function prologue. */
6468 /* On vms we have two kinds of functions:
6470 - stack frame (PROC_STACK)
6471 these are 'normal' functions with local vars and which are
6472 calling other functions
6473 - register frame (PROC_REGISTER)
6474 keeps all data in registers, needs no stack
6476 We must pass this to the assembler so it can generate the
6477 proper pdsc (procedure descriptor)
6478 This is done with the '.pdesc' command.
6480 On not-vms, we don't really differentiate between the two, as we can
6481 simply allocate stack without saving registers. */
6483 void
6484 alpha_expand_prologue (void)
6486 /* Registers to save. */
6487 unsigned long imask = 0;
6488 unsigned long fmask = 0;
6489 /* Stack space needed for pushing registers clobbered by us. */
6490 HOST_WIDE_INT sa_size;
6491 /* Complete stack size needed. */
6492 HOST_WIDE_INT frame_size;
6493 /* Offset from base reg to register save area. */
6494 HOST_WIDE_INT reg_offset;
6495 rtx sa_reg;
6496 int i;
6498 sa_size = alpha_sa_size ();
6500 frame_size = get_frame_size ();
6501 if (TARGET_ABI_OPEN_VMS)
6502 frame_size = ALPHA_ROUND (sa_size
6503 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6504 + frame_size
6505 + current_function_pretend_args_size);
6506 else if (TARGET_ABI_UNICOSMK)
6507 /* We have to allocate space for the DSIB if we generate a frame. */
6508 frame_size = ALPHA_ROUND (sa_size
6509 + (alpha_procedure_type == PT_STACK ? 48 : 0))
6510 + ALPHA_ROUND (frame_size
6511 + current_function_outgoing_args_size);
6512 else
6513 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
6514 + sa_size
6515 + ALPHA_ROUND (frame_size
6516 + current_function_pretend_args_size));
6518 if (TARGET_ABI_OPEN_VMS)
6519 reg_offset = 8;
6520 else
6521 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
6523 alpha_sa_mask (&imask, &fmask);
6525 /* Emit an insn to reload GP, if needed. */
6526 if (TARGET_ABI_OSF)
6528 alpha_function_needs_gp = alpha_does_function_need_gp ();
6529 if (alpha_function_needs_gp)
6530 emit_insn (gen_prologue_ldgp ());
6533 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
6534 the call to mcount ourselves, rather than having the linker do it
6535 magically in response to -pg. Since _mcount has special linkage,
6536 don't represent the call as a call. */
6537 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6538 emit_insn (gen_prologue_mcount ());
6540 if (TARGET_ABI_UNICOSMK)
6541 unicosmk_gen_dsib (&imask);
6543 /* Adjust the stack by the frame size. If the frame size is > 4096
6544 bytes, we need to be sure we probe somewhere in the first and last
6545 4096 bytes (we can probably get away without the latter test) and
6546 every 8192 bytes in between. If the frame size is > 32768, we
6547 do this in a loop. Otherwise, we generate the explicit probe
6548 instructions.
6550 Note that we are only allowed to adjust sp once in the prologue. */
6552 if (frame_size <= 32768)
6554 if (frame_size > 4096)
6556 int probed = 4096;
6559 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
6560 ? -probed + 64
6561 : -probed)));
6562 while ((probed += 8192) < frame_size);
6564 /* We only have to do this probe if we aren't saving registers. */
6565 if (sa_size == 0 && probed + 4096 < frame_size)
6566 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
6569 if (frame_size != 0)
6570 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
6571 GEN_INT (TARGET_ABI_UNICOSMK
6572 ? -frame_size + 64
6573 : -frame_size))));
6575 else
6577 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
6578 number of 8192 byte blocks to probe. We then probe each block
6579 in the loop and then set SP to the proper location. If the
6580 amount remaining is > 4096, we have to do one more probe if we
6581 are not saving any registers. */
6583 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
6584 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
6585 rtx ptr = gen_rtx_REG (DImode, 22);
6586 rtx count = gen_rtx_REG (DImode, 23);
6587 rtx seq;
6589 emit_move_insn (count, GEN_INT (blocks));
6590 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
6591 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
6593 /* Because of the difficulty in emitting a new basic block this
6594 late in the compilation, generate the loop as a single insn. */
6595 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
6597 if (leftover > 4096 && sa_size == 0)
6599 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
6600 MEM_VOLATILE_P (last) = 1;
6601 emit_move_insn (last, const0_rtx);
6604 if (TARGET_ABI_WINDOWS_NT)
6606 /* For NT stack unwind (done by 'reverse execution'), it's
6607 not OK to take the result of a loop, even though the value
6608 is already in ptr, so we reload it via a single operation
6609 and subtract it to sp.
6611 Yes, that's correct -- we have to reload the whole constant
6612 into a temporary via ldah+lda then subtract from sp. */
6614 HOST_WIDE_INT lo, hi;
6615 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
6616 hi = frame_size - lo;
6618 emit_move_insn (ptr, GEN_INT (hi));
6619 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
6620 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
6621 ptr));
6623 else
6625 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
6626 GEN_INT (-leftover)));
6629 /* This alternative is special, because the DWARF code cannot
6630 possibly intuit through the loop above. So we invent this
6631 note it looks at instead. */
6632 RTX_FRAME_RELATED_P (seq) = 1;
6633 REG_NOTES (seq)
6634 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6635 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6636 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
6637 GEN_INT (TARGET_ABI_UNICOSMK
6638 ? -frame_size + 64
6639 : -frame_size))),
6640 REG_NOTES (seq));
6643 if (!TARGET_ABI_UNICOSMK)
6645 HOST_WIDE_INT sa_bias = 0;
6647 /* Cope with very large offsets to the register save area. */
6648 sa_reg = stack_pointer_rtx;
6649 if (reg_offset + sa_size > 0x8000)
6651 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
6652 rtx sa_bias_rtx;
6654 if (low + sa_size <= 0x8000)
6655 sa_bias = reg_offset - low, reg_offset = low;
6656 else
6657 sa_bias = reg_offset, reg_offset = 0;
6659 sa_reg = gen_rtx_REG (DImode, 24);
6660 sa_bias_rtx = GEN_INT (sa_bias);
6662 if (add_operand (sa_bias_rtx, DImode))
6663 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
6664 else
6666 emit_move_insn (sa_reg, sa_bias_rtx);
6667 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
6671 /* Save regs in stack order. Beginning with VMS PV. */
6672 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6673 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
6675 /* Save register RA next. */
6676 if (imask & (1UL << REG_RA))
6678 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
6679 imask &= ~(1UL << REG_RA);
6680 reg_offset += 8;
6683 /* Now save any other registers required to be saved. */
6684 for (i = 0; i < 31; i++)
6685 if (imask & (1UL << i))
6687 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
6688 reg_offset += 8;
6691 for (i = 0; i < 31; i++)
6692 if (fmask & (1UL << i))
6694 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
6695 reg_offset += 8;
6698 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
6700 /* The standard frame on the T3E includes space for saving registers.
6701 We just have to use it. We don't have to save the return address and
6702 the old frame pointer here - they are saved in the DSIB. */
6704 reg_offset = -56;
6705 for (i = 9; i < 15; i++)
6706 if (imask & (1UL << i))
6708 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
6709 reg_offset -= 8;
6711 for (i = 2; i < 10; i++)
6712 if (fmask & (1UL << i))
6714 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
6715 reg_offset -= 8;
6719 if (TARGET_ABI_OPEN_VMS)
6721 if (alpha_procedure_type == PT_REGISTER)
6722 /* Register frame procedures save the fp.
6723 ?? Ought to have a dwarf2 save for this. */
6724 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
6725 hard_frame_pointer_rtx);
6727 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
6728 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
6729 gen_rtx_REG (DImode, REG_PV)));
6731 if (alpha_procedure_type != PT_NULL
6732 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
6733 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
6735 /* If we have to allocate space for outgoing args, do it now. */
6736 if (current_function_outgoing_args_size != 0)
6738 rtx seq
6739 = emit_move_insn (stack_pointer_rtx,
6740 plus_constant
6741 (hard_frame_pointer_rtx,
6742 - (ALPHA_ROUND
6743 (current_function_outgoing_args_size))));
6745 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
6746 if ! frame_pointer_needed. Setting the bit will change the CFA
6747 computation rule to use sp again, which would be wrong if we had
6748 frame_pointer_needed, as this means sp might move unpredictably
6749 later on.
6751 Also, note that
6752 frame_pointer_needed
6753 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
6755 current_function_outgoing_args_size != 0
6756 => alpha_procedure_type != PT_NULL,
6758 so when we are not setting the bit here, we are guaranteed to
6759 have emitted an FRP frame pointer update just before. */
6760 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
6763 else if (!TARGET_ABI_UNICOSMK)
6765 /* If we need a frame pointer, set it from the stack pointer. */
6766 if (frame_pointer_needed)
6768 if (TARGET_CAN_FAULT_IN_PROLOGUE)
6769 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
6770 else
6771 /* This must always be the last instruction in the
6772 prologue, thus we emit a special move + clobber. */
6773 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
6774 stack_pointer_rtx, sa_reg)));
6778 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
6779 the prologue, for exception handling reasons, we cannot do this for
6780 any insn that might fault. We could prevent this for mems with a
6781 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
6782 have to prevent all such scheduling with a blockage.
6784 Linux, on the other hand, never bothered to implement OSF/1's
6785 exception handling, and so doesn't care about such things. Anyone
6786 planning to use dwarf2 frame-unwind info can also omit the blockage. */
6788 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
6789 emit_insn (gen_blockage ());
6792 /* Count the number of .file directives, so that .loc is up to date. */
6793 int num_source_filenames = 0;
6795 /* Output the textual info surrounding the prologue. */
6797 void
6798 alpha_start_function (FILE *file, const char *fnname,
6799 tree decl ATTRIBUTE_UNUSED)
6801 unsigned long imask = 0;
6802 unsigned long fmask = 0;
6803 /* Stack space needed for pushing registers clobbered by us. */
6804 HOST_WIDE_INT sa_size;
6805 /* Complete stack size needed. */
6806 unsigned HOST_WIDE_INT frame_size;
6807 /* Offset from base reg to register save area. */
6808 HOST_WIDE_INT reg_offset;
6809 char *entry_label = (char *) alloca (strlen (fnname) + 6);
6810 int i;
6812 /* Don't emit an extern directive for functions defined in the same file. */
6813 if (TARGET_ABI_UNICOSMK)
6815 tree name_tree;
6816 name_tree = get_identifier (fnname);
6817 TREE_ASM_WRITTEN (name_tree) = 1;
6820 alpha_fnname = fnname;
6821 sa_size = alpha_sa_size ();
6823 frame_size = get_frame_size ();
6824 if (TARGET_ABI_OPEN_VMS)
6825 frame_size = ALPHA_ROUND (sa_size
6826 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6827 + frame_size
6828 + current_function_pretend_args_size);
6829 else if (TARGET_ABI_UNICOSMK)
6830 frame_size = ALPHA_ROUND (sa_size
6831 + (alpha_procedure_type == PT_STACK ? 48 : 0))
6832 + ALPHA_ROUND (frame_size
6833 + current_function_outgoing_args_size);
6834 else
6835 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
6836 + sa_size
6837 + ALPHA_ROUND (frame_size
6838 + current_function_pretend_args_size));
6840 if (TARGET_ABI_OPEN_VMS)
6841 reg_offset = 8;
6842 else
6843 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
6845 alpha_sa_mask (&imask, &fmask);
6847 /* Ecoff can handle multiple .file directives, so put out file and lineno.
6848 We have to do that before the .ent directive as we cannot switch
6849 files within procedures with native ecoff because line numbers are
6850 linked to procedure descriptors.
6851 Outputting the lineno helps debugging of one line functions as they
6852 would otherwise get no line number at all. Please note that we would
6853 like to put out last_linenum from final.c, but it is not accessible. */
6855 if (write_symbols == SDB_DEBUG)
6857 #ifdef ASM_OUTPUT_SOURCE_FILENAME
6858 ASM_OUTPUT_SOURCE_FILENAME (file,
6859 DECL_SOURCE_FILE (current_function_decl));
6860 #endif
6861 #ifdef SDB_OUTPUT_SOURCE_LINE
6862 if (debug_info_level != DINFO_LEVEL_TERSE)
6863 SDB_OUTPUT_SOURCE_LINE (file,
6864 DECL_SOURCE_LINE (current_function_decl));
6865 #endif
6868 /* Issue function start and label. */
6869 if (TARGET_ABI_OPEN_VMS
6870 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
6872 fputs ("\t.ent ", file);
6873 assemble_name (file, fnname);
6874 putc ('\n', file);
6876 /* If the function needs GP, we'll write the "..ng" label there.
6877 Otherwise, do it here. */
6878 if (TARGET_ABI_OSF
6879 && ! alpha_function_needs_gp
6880 && ! current_function_is_thunk)
6882 putc ('$', file);
6883 assemble_name (file, fnname);
6884 fputs ("..ng:\n", file);
6888 strcpy (entry_label, fnname);
6889 if (TARGET_ABI_OPEN_VMS)
6890 strcat (entry_label, "..en");
6892 /* For public functions, the label must be globalized by appending an
6893 additional colon. */
6894 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
6895 strcat (entry_label, ":");
6897 ASM_OUTPUT_LABEL (file, entry_label);
6898 inside_function = TRUE;
6900 if (TARGET_ABI_OPEN_VMS)
6901 fprintf (file, "\t.base $%d\n", vms_base_regno);
6903 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
6904 && !flag_inhibit_size_directive)
6906 /* Set flags in procedure descriptor to request IEEE-conformant
6907 math-library routines. The value we set it to is PDSC_EXC_IEEE
6908 (/usr/include/pdsc.h). */
6909 fputs ("\t.eflag 48\n", file);
6912 /* Set up offsets to alpha virtual arg/local debugging pointer. */
6913 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
6914 alpha_arg_offset = -frame_size + 48;
6916 /* Describe our frame. If the frame size is larger than an integer,
6917 print it as zero to avoid an assembler error. We won't be
6918 properly describing such a frame, but that's the best we can do. */
6919 if (TARGET_ABI_UNICOSMK)
6921 else if (TARGET_ABI_OPEN_VMS)
6922 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
6923 HOST_WIDE_INT_PRINT_DEC "\n",
6924 vms_unwind_regno,
6925 frame_size >= (1UL << 31) ? 0 : frame_size,
6926 reg_offset);
6927 else if (!flag_inhibit_size_directive)
6928 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
6929 (frame_pointer_needed
6930 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
6931 frame_size >= (1UL << 31) ? 0 : frame_size,
6932 current_function_pretend_args_size);
6934 /* Describe which registers were spilled. */
6935 if (TARGET_ABI_UNICOSMK)
6937 else if (TARGET_ABI_OPEN_VMS)
6939 if (imask)
6940 /* ??? Does VMS care if mask contains ra? The old code didn't
6941 set it, so I don't here. */
6942 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
6943 if (fmask)
6944 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
6945 if (alpha_procedure_type == PT_REGISTER)
6946 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
6948 else if (!flag_inhibit_size_directive)
6950 if (imask)
6952 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
6953 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
6955 for (i = 0; i < 32; ++i)
6956 if (imask & (1UL << i))
6957 reg_offset += 8;
6960 if (fmask)
6961 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
6962 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
6965 #if TARGET_ABI_OPEN_VMS
6966 /* Ifdef'ed cause link_section are only available then. */
6967 readonly_data_section ();
6968 fprintf (file, "\t.align 3\n");
6969 assemble_name (file, fnname); fputs ("..na:\n", file);
6970 fputs ("\t.ascii \"", file);
6971 assemble_name (file, fnname);
6972 fputs ("\\0\"\n", file);
6973 alpha_need_linkage (fnname, 1);
6974 text_section ();
6975 #endif
6978 /* Emit the .prologue note at the scheduled end of the prologue. */
6980 static void
6981 alpha_output_function_end_prologue (FILE *file)
6983 if (TARGET_ABI_UNICOSMK)
6985 else if (TARGET_ABI_OPEN_VMS)
6986 fputs ("\t.prologue\n", file);
6987 else if (TARGET_ABI_WINDOWS_NT)
6988 fputs ("\t.prologue 0\n", file);
6989 else if (!flag_inhibit_size_directive)
6990 fprintf (file, "\t.prologue %d\n",
6991 alpha_function_needs_gp || current_function_is_thunk);
6994 /* Write function epilogue. */
6996 /* ??? At some point we will want to support full unwind, and so will
6997 need to mark the epilogue as well. At the moment, we just confuse
6998 dwarf2out. */
6999 #undef FRP
7000 #define FRP(exp) exp
7002 void
7003 alpha_expand_epilogue (void)
7005 /* Registers to save. */
7006 unsigned long imask = 0;
7007 unsigned long fmask = 0;
7008 /* Stack space needed for pushing registers clobbered by us. */
7009 HOST_WIDE_INT sa_size;
7010 /* Complete stack size needed. */
7011 HOST_WIDE_INT frame_size;
7012 /* Offset from base reg to register save area. */
7013 HOST_WIDE_INT reg_offset;
7014 int fp_is_frame_pointer, fp_offset;
7015 rtx sa_reg, sa_reg_exp = NULL;
7016 rtx sp_adj1, sp_adj2, mem;
7017 rtx eh_ofs;
7018 int i;
7020 sa_size = alpha_sa_size ();
7022 frame_size = get_frame_size ();
7023 if (TARGET_ABI_OPEN_VMS)
7024 frame_size = ALPHA_ROUND (sa_size
7025 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7026 + frame_size
7027 + current_function_pretend_args_size);
7028 else if (TARGET_ABI_UNICOSMK)
7029 frame_size = ALPHA_ROUND (sa_size
7030 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7031 + ALPHA_ROUND (frame_size
7032 + current_function_outgoing_args_size);
7033 else
7034 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7035 + sa_size
7036 + ALPHA_ROUND (frame_size
7037 + current_function_pretend_args_size));
7039 if (TARGET_ABI_OPEN_VMS)
7041 if (alpha_procedure_type == PT_STACK)
7042 reg_offset = 8;
7043 else
7044 reg_offset = 0;
7046 else
7047 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7049 alpha_sa_mask (&imask, &fmask);
7051 fp_is_frame_pointer
7052 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7053 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
7054 fp_offset = 0;
7055 sa_reg = stack_pointer_rtx;
7057 if (current_function_calls_eh_return)
7058 eh_ofs = EH_RETURN_STACKADJ_RTX;
7059 else
7060 eh_ofs = NULL_RTX;
7062 if (!TARGET_ABI_UNICOSMK && sa_size)
7064 /* If we have a frame pointer, restore SP from it. */
7065 if ((TARGET_ABI_OPEN_VMS
7066 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7067 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
7068 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
7070 /* Cope with very large offsets to the register save area. */
7071 if (reg_offset + sa_size > 0x8000)
7073 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7074 HOST_WIDE_INT bias;
7076 if (low + sa_size <= 0x8000)
7077 bias = reg_offset - low, reg_offset = low;
7078 else
7079 bias = reg_offset, reg_offset = 0;
7081 sa_reg = gen_rtx_REG (DImode, 22);
7082 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
7084 FRP (emit_move_insn (sa_reg, sa_reg_exp));
7087 /* Restore registers in order, excepting a true frame pointer. */
7089 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7090 if (! eh_ofs)
7091 set_mem_alias_set (mem, alpha_sr_alias_set);
7092 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7094 reg_offset += 8;
7095 imask &= ~(1UL << REG_RA);
7097 for (i = 0; i < 31; ++i)
7098 if (imask & (1UL << i))
7100 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
7101 fp_offset = reg_offset;
7102 else
7104 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
7105 set_mem_alias_set (mem, alpha_sr_alias_set);
7106 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7108 reg_offset += 8;
7111 for (i = 0; i < 31; ++i)
7112 if (fmask & (1UL << i))
7114 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
7115 set_mem_alias_set (mem, alpha_sr_alias_set);
7116 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7117 reg_offset += 8;
7120 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7122 /* Restore callee-saved general-purpose registers. */
7124 reg_offset = -56;
7126 for (i = 9; i < 15; i++)
7127 if (imask & (1UL << i))
7129 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7130 reg_offset));
7131 set_mem_alias_set (mem, alpha_sr_alias_set);
7132 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7133 reg_offset -= 8;
7136 for (i = 2; i < 10; i++)
7137 if (fmask & (1UL << i))
7139 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
7140 reg_offset));
7141 set_mem_alias_set (mem, alpha_sr_alias_set);
7142 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7143 reg_offset -= 8;
7146 /* Restore the return address from the DSIB. */
7148 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
7149 set_mem_alias_set (mem, alpha_sr_alias_set);
7150 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7153 if (frame_size || eh_ofs)
7155 sp_adj1 = stack_pointer_rtx;
7157 if (eh_ofs)
7159 sp_adj1 = gen_rtx_REG (DImode, 23);
7160 emit_move_insn (sp_adj1,
7161 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
7164 /* If the stack size is large, begin computation into a temporary
7165 register so as not to interfere with a potential fp restore,
7166 which must be consecutive with an SP restore. */
7167 if (frame_size < 32768
7168 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
7169 sp_adj2 = GEN_INT (frame_size);
7170 else if (TARGET_ABI_UNICOSMK)
7172 sp_adj1 = gen_rtx_REG (DImode, 23);
7173 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
7174 sp_adj2 = const0_rtx;
7176 else if (frame_size < 0x40007fffL)
7178 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7180 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
7181 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
7182 sp_adj1 = sa_reg;
7183 else
7185 sp_adj1 = gen_rtx_REG (DImode, 23);
7186 FRP (emit_move_insn (sp_adj1, sp_adj2));
7188 sp_adj2 = GEN_INT (low);
7190 else
7192 rtx tmp = gen_rtx_REG (DImode, 23);
7193 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
7194 3, false));
7195 if (!sp_adj2)
7197 /* We can't drop new things to memory this late, afaik,
7198 so build it up by pieces. */
7199 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
7200 -(frame_size < 0)));
7201 if (!sp_adj2)
7202 abort ();
7206 /* From now on, things must be in order. So emit blockages. */
7208 /* Restore the frame pointer. */
7209 if (TARGET_ABI_UNICOSMK)
7211 emit_insn (gen_blockage ());
7212 mem = gen_rtx_MEM (DImode,
7213 plus_constant (hard_frame_pointer_rtx, -16));
7214 set_mem_alias_set (mem, alpha_sr_alias_set);
7215 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7217 else if (fp_is_frame_pointer)
7219 emit_insn (gen_blockage ());
7220 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
7221 set_mem_alias_set (mem, alpha_sr_alias_set);
7222 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7224 else if (TARGET_ABI_OPEN_VMS)
7226 emit_insn (gen_blockage ());
7227 FRP (emit_move_insn (hard_frame_pointer_rtx,
7228 gen_rtx_REG (DImode, vms_save_fp_regno)));
7231 /* Restore the stack pointer. */
7232 emit_insn (gen_blockage ());
7233 if (sp_adj2 == const0_rtx)
7234 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
7235 else
7236 FRP (emit_move_insn (stack_pointer_rtx,
7237 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
7239 else
7241 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
7243 emit_insn (gen_blockage ());
7244 FRP (emit_move_insn (hard_frame_pointer_rtx,
7245 gen_rtx_REG (DImode, vms_save_fp_regno)));
7247 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
7249 /* Decrement the frame pointer if the function does not have a
7250 frame. */
7252 emit_insn (gen_blockage ());
7253 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
7254 hard_frame_pointer_rtx, constm1_rtx)));
7259 /* Output the rest of the textual info surrounding the epilogue. */
7261 void
7262 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
7264 #if TARGET_ABI_OPEN_VMS
7265 alpha_write_linkage (file, fnname, decl);
7266 #endif
7268 /* End the function. */
7269 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
7271 fputs ("\t.end ", file);
7272 assemble_name (file, fnname);
7273 putc ('\n', file);
7275 inside_function = FALSE;
7277 /* Output jump tables and the static subroutine information block. */
7278 if (TARGET_ABI_UNICOSMK)
7280 unicosmk_output_ssib (file, fnname);
7281 unicosmk_output_deferred_case_vectors (file);
7285 #if TARGET_ABI_OSF
7286 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
7288 In order to avoid the hordes of differences between generated code
7289 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
7290 lots of code loading up large constants, generate rtl and emit it
7291 instead of going straight to text.
7293 Not sure why this idea hasn't been explored before... */
7295 static void
7296 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7297 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7298 tree function)
7300 HOST_WIDE_INT hi, lo;
7301 rtx this, insn, funexp;
7303 reset_block_changes ();
7305 /* We always require a valid GP. */
7306 emit_insn (gen_prologue_ldgp ());
7307 emit_note (NOTE_INSN_PROLOGUE_END);
7309 /* Find the "this" pointer. If the function returns a structure,
7310 the structure return pointer is in $16. */
7311 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7312 this = gen_rtx_REG (Pmode, 17);
7313 else
7314 this = gen_rtx_REG (Pmode, 16);
7316 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
7317 entire constant for the add. */
7318 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
7319 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7320 if (hi + lo == delta)
7322 if (hi)
7323 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
7324 if (lo)
7325 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
7327 else
7329 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
7330 delta, -(delta < 0));
7331 emit_insn (gen_adddi3 (this, this, tmp));
7334 /* Add a delta stored in the vtable at VCALL_OFFSET. */
7335 if (vcall_offset)
7337 rtx tmp, tmp2;
7339 tmp = gen_rtx_REG (Pmode, 0);
7340 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
7342 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
7343 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7344 if (hi + lo == vcall_offset)
7346 if (hi)
7347 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
7349 else
7351 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
7352 vcall_offset, -(vcall_offset < 0));
7353 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
7354 lo = 0;
7356 if (lo)
7357 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
7358 else
7359 tmp2 = tmp;
7360 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
7362 emit_insn (gen_adddi3 (this, this, tmp));
7365 /* Generate a tail call to the target function. */
7366 if (! TREE_USED (function))
7368 assemble_external (function);
7369 TREE_USED (function) = 1;
7371 funexp = XEXP (DECL_RTL (function), 0);
7372 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
7373 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
7374 SIBLING_CALL_P (insn) = 1;
7376 /* Run just enough of rest_of_compilation to get the insns emitted.
7377 There's not really enough bulk here to make other passes such as
7378 instruction scheduling worth while. Note that use_thunk calls
7379 assemble_start_function and assemble_end_function. */
7380 insn = get_insns ();
7381 insn_locators_initialize ();
7382 shorten_branches (insn);
7383 final_start_function (insn, file, 1);
7384 final (insn, file, 1, 0);
7385 final_end_function ();
7387 #endif /* TARGET_ABI_OSF */
7389 /* Debugging support. */
7391 #include "gstab.h"
7393 /* Count the number of sdb related labels are generated (to find block
7394 start and end boundaries). */
7396 int sdb_label_count = 0;
7398 /* Name of the file containing the current function. */
7400 static const char *current_function_file = "";
7402 /* Offsets to alpha virtual arg/local debugging pointers. */
7404 long alpha_arg_offset;
7405 long alpha_auto_offset;
7407 /* Emit a new filename to a stream. */
7409 void
7410 alpha_output_filename (FILE *stream, const char *name)
7412 static int first_time = TRUE;
7414 if (first_time)
7416 first_time = FALSE;
7417 ++num_source_filenames;
7418 current_function_file = name;
7419 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7420 output_quoted_string (stream, name);
7421 fprintf (stream, "\n");
7422 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
7423 fprintf (stream, "\t#@stabs\n");
7426 else if (write_symbols == DBX_DEBUG)
7427 /* dbxout.c will emit an appropriate .stabs directive. */
7428 return;
7430 else if (name != current_function_file
7431 && strcmp (name, current_function_file) != 0)
7433 if (inside_function && ! TARGET_GAS)
7434 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
7435 else
7437 ++num_source_filenames;
7438 current_function_file = name;
7439 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7442 output_quoted_string (stream, name);
7443 fprintf (stream, "\n");
7447 /* Structure to show the current status of registers and memory. */
7449 struct shadow_summary
7451 struct {
7452 unsigned int i : 31; /* Mask of int regs */
7453 unsigned int fp : 31; /* Mask of fp regs */
7454 unsigned int mem : 1; /* mem == imem | fpmem */
7455 } used, defd;
7458 /* Summary the effects of expression X on the machine. Update SUM, a pointer
7459 to the summary structure. SET is nonzero if the insn is setting the
7460 object, otherwise zero. */
7462 static void
7463 summarize_insn (rtx x, struct shadow_summary *sum, int set)
7465 const char *format_ptr;
7466 int i, j;
7468 if (x == 0)
7469 return;
7471 switch (GET_CODE (x))
7473 /* ??? Note that this case would be incorrect if the Alpha had a
7474 ZERO_EXTRACT in SET_DEST. */
7475 case SET:
7476 summarize_insn (SET_SRC (x), sum, 0);
7477 summarize_insn (SET_DEST (x), sum, 1);
7478 break;
7480 case CLOBBER:
7481 summarize_insn (XEXP (x, 0), sum, 1);
7482 break;
7484 case USE:
7485 summarize_insn (XEXP (x, 0), sum, 0);
7486 break;
7488 case ASM_OPERANDS:
7489 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
7490 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
7491 break;
7493 case PARALLEL:
7494 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7495 summarize_insn (XVECEXP (x, 0, i), sum, 0);
7496 break;
7498 case SUBREG:
7499 summarize_insn (SUBREG_REG (x), sum, 0);
7500 break;
7502 case REG:
7504 int regno = REGNO (x);
7505 unsigned long mask = ((unsigned long) 1) << (regno % 32);
7507 if (regno == 31 || regno == 63)
7508 break;
7510 if (set)
7512 if (regno < 32)
7513 sum->defd.i |= mask;
7514 else
7515 sum->defd.fp |= mask;
7517 else
7519 if (regno < 32)
7520 sum->used.i |= mask;
7521 else
7522 sum->used.fp |= mask;
7525 break;
7527 case MEM:
7528 if (set)
7529 sum->defd.mem = 1;
7530 else
7531 sum->used.mem = 1;
7533 /* Find the regs used in memory address computation: */
7534 summarize_insn (XEXP (x, 0), sum, 0);
7535 break;
7537 case CONST_INT: case CONST_DOUBLE:
7538 case SYMBOL_REF: case LABEL_REF: case CONST:
7539 case SCRATCH: case ASM_INPUT:
7540 break;
7542 /* Handle common unary and binary ops for efficiency. */
7543 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
7544 case MOD: case UDIV: case UMOD: case AND: case IOR:
7545 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
7546 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
7547 case NE: case EQ: case GE: case GT: case LE:
7548 case LT: case GEU: case GTU: case LEU: case LTU:
7549 summarize_insn (XEXP (x, 0), sum, 0);
7550 summarize_insn (XEXP (x, 1), sum, 0);
7551 break;
7553 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
7554 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
7555 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
7556 case SQRT: case FFS:
7557 summarize_insn (XEXP (x, 0), sum, 0);
7558 break;
7560 default:
7561 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
7562 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7563 switch (format_ptr[i])
7565 case 'e':
7566 summarize_insn (XEXP (x, i), sum, 0);
7567 break;
7569 case 'E':
7570 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7571 summarize_insn (XVECEXP (x, i, j), sum, 0);
7572 break;
7574 case 'i':
7575 break;
7577 default:
7578 abort ();
7583 /* Ensure a sufficient number of `trapb' insns are in the code when
7584 the user requests code with a trap precision of functions or
7585 instructions.
7587 In naive mode, when the user requests a trap-precision of
7588 "instruction", a trapb is needed after every instruction that may
7589 generate a trap. This ensures that the code is resumption safe but
7590 it is also slow.
7592 When optimizations are turned on, we delay issuing a trapb as long
7593 as possible. In this context, a trap shadow is the sequence of
7594 instructions that starts with a (potentially) trap generating
7595 instruction and extends to the next trapb or call_pal instruction
7596 (but GCC never generates call_pal by itself). We can delay (and
7597 therefore sometimes omit) a trapb subject to the following
7598 conditions:
7600 (a) On entry to the trap shadow, if any Alpha register or memory
7601 location contains a value that is used as an operand value by some
7602 instruction in the trap shadow (live on entry), then no instruction
7603 in the trap shadow may modify the register or memory location.
7605 (b) Within the trap shadow, the computation of the base register
7606 for a memory load or store instruction may not involve using the
7607 result of an instruction that might generate an UNPREDICTABLE
7608 result.
7610 (c) Within the trap shadow, no register may be used more than once
7611 as a destination register. (This is to make life easier for the
7612 trap-handler.)
7614 (d) The trap shadow may not include any branch instructions. */
7616 static void
7617 alpha_handle_trap_shadows (void)
7619 struct shadow_summary shadow;
7620 int trap_pending, exception_nesting;
7621 rtx i, n;
7623 trap_pending = 0;
7624 exception_nesting = 0;
7625 shadow.used.i = 0;
7626 shadow.used.fp = 0;
7627 shadow.used.mem = 0;
7628 shadow.defd = shadow.used;
7630 for (i = get_insns (); i ; i = NEXT_INSN (i))
7632 if (GET_CODE (i) == NOTE)
7634 switch (NOTE_LINE_NUMBER (i))
7636 case NOTE_INSN_EH_REGION_BEG:
7637 exception_nesting++;
7638 if (trap_pending)
7639 goto close_shadow;
7640 break;
7642 case NOTE_INSN_EH_REGION_END:
7643 exception_nesting--;
7644 if (trap_pending)
7645 goto close_shadow;
7646 break;
7648 case NOTE_INSN_EPILOGUE_BEG:
7649 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
7650 goto close_shadow;
7651 break;
7654 else if (trap_pending)
7656 if (alpha_tp == ALPHA_TP_FUNC)
7658 if (GET_CODE (i) == JUMP_INSN
7659 && GET_CODE (PATTERN (i)) == RETURN)
7660 goto close_shadow;
7662 else if (alpha_tp == ALPHA_TP_INSN)
7664 if (optimize > 0)
7666 struct shadow_summary sum;
7668 sum.used.i = 0;
7669 sum.used.fp = 0;
7670 sum.used.mem = 0;
7671 sum.defd = sum.used;
7673 switch (GET_CODE (i))
7675 case INSN:
7676 /* Annoyingly, get_attr_trap will abort on these. */
7677 if (GET_CODE (PATTERN (i)) == USE
7678 || GET_CODE (PATTERN (i)) == CLOBBER)
7679 break;
7681 summarize_insn (PATTERN (i), &sum, 0);
7683 if ((sum.defd.i & shadow.defd.i)
7684 || (sum.defd.fp & shadow.defd.fp))
7686 /* (c) would be violated */
7687 goto close_shadow;
7690 /* Combine shadow with summary of current insn: */
7691 shadow.used.i |= sum.used.i;
7692 shadow.used.fp |= sum.used.fp;
7693 shadow.used.mem |= sum.used.mem;
7694 shadow.defd.i |= sum.defd.i;
7695 shadow.defd.fp |= sum.defd.fp;
7696 shadow.defd.mem |= sum.defd.mem;
7698 if ((sum.defd.i & shadow.used.i)
7699 || (sum.defd.fp & shadow.used.fp)
7700 || (sum.defd.mem & shadow.used.mem))
7702 /* (a) would be violated (also takes care of (b)) */
7703 if (get_attr_trap (i) == TRAP_YES
7704 && ((sum.defd.i & sum.used.i)
7705 || (sum.defd.fp & sum.used.fp)))
7706 abort ();
7708 goto close_shadow;
7710 break;
7712 case JUMP_INSN:
7713 case CALL_INSN:
7714 case CODE_LABEL:
7715 goto close_shadow;
7717 default:
7718 abort ();
7721 else
7723 close_shadow:
7724 n = emit_insn_before (gen_trapb (), i);
7725 PUT_MODE (n, TImode);
7726 PUT_MODE (i, TImode);
7727 trap_pending = 0;
7728 shadow.used.i = 0;
7729 shadow.used.fp = 0;
7730 shadow.used.mem = 0;
7731 shadow.defd = shadow.used;
7736 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
7737 && GET_CODE (i) == INSN
7738 && GET_CODE (PATTERN (i)) != USE
7739 && GET_CODE (PATTERN (i)) != CLOBBER
7740 && get_attr_trap (i) == TRAP_YES)
7742 if (optimize && !trap_pending)
7743 summarize_insn (PATTERN (i), &shadow, 0);
7744 trap_pending = 1;
7749 /* Alpha can only issue instruction groups simultaneously if they are
7750 suitably aligned. This is very processor-specific. */
7752 enum alphaev4_pipe {
7753 EV4_STOP = 0,
7754 EV4_IB0 = 1,
7755 EV4_IB1 = 2,
7756 EV4_IBX = 4
7759 enum alphaev5_pipe {
7760 EV5_STOP = 0,
7761 EV5_NONE = 1,
7762 EV5_E01 = 2,
7763 EV5_E0 = 4,
7764 EV5_E1 = 8,
7765 EV5_FAM = 16,
7766 EV5_FA = 32,
7767 EV5_FM = 64
7770 static enum alphaev4_pipe
7771 alphaev4_insn_pipe (rtx insn)
7773 if (recog_memoized (insn) < 0)
7774 return EV4_STOP;
7775 if (get_attr_length (insn) != 4)
7776 return EV4_STOP;
7778 switch (get_attr_type (insn))
7780 case TYPE_ILD:
7781 case TYPE_FLD:
7782 return EV4_IBX;
7784 case TYPE_LDSYM:
7785 case TYPE_IADD:
7786 case TYPE_ILOG:
7787 case TYPE_ICMOV:
7788 case TYPE_ICMP:
7789 case TYPE_IST:
7790 case TYPE_FST:
7791 case TYPE_SHIFT:
7792 case TYPE_IMUL:
7793 case TYPE_FBR:
7794 return EV4_IB0;
7796 case TYPE_MISC:
7797 case TYPE_IBR:
7798 case TYPE_JSR:
7799 case TYPE_CALLPAL:
7800 case TYPE_FCPYS:
7801 case TYPE_FCMOV:
7802 case TYPE_FADD:
7803 case TYPE_FDIV:
7804 case TYPE_FMUL:
7805 return EV4_IB1;
7807 default:
7808 abort ();
7812 static enum alphaev5_pipe
7813 alphaev5_insn_pipe (rtx insn)
7815 if (recog_memoized (insn) < 0)
7816 return EV5_STOP;
7817 if (get_attr_length (insn) != 4)
7818 return EV5_STOP;
7820 switch (get_attr_type (insn))
7822 case TYPE_ILD:
7823 case TYPE_FLD:
7824 case TYPE_LDSYM:
7825 case TYPE_IADD:
7826 case TYPE_ILOG:
7827 case TYPE_ICMOV:
7828 case TYPE_ICMP:
7829 return EV5_E01;
7831 case TYPE_IST:
7832 case TYPE_FST:
7833 case TYPE_SHIFT:
7834 case TYPE_IMUL:
7835 case TYPE_MISC:
7836 case TYPE_MVI:
7837 return EV5_E0;
7839 case TYPE_IBR:
7840 case TYPE_JSR:
7841 case TYPE_CALLPAL:
7842 return EV5_E1;
7844 case TYPE_FCPYS:
7845 return EV5_FAM;
7847 case TYPE_FBR:
7848 case TYPE_FCMOV:
7849 case TYPE_FADD:
7850 case TYPE_FDIV:
7851 return EV5_FA;
7853 case TYPE_FMUL:
7854 return EV5_FM;
7856 default:
7857 abort();
7861 /* IN_USE is a mask of the slots currently filled within the insn group.
7862 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
7863 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
7865 LEN is, of course, the length of the group in bytes. */
7867 static rtx
7868 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
7870 int len, in_use;
7872 len = in_use = 0;
7874 if (! INSN_P (insn)
7875 || GET_CODE (PATTERN (insn)) == CLOBBER
7876 || GET_CODE (PATTERN (insn)) == USE)
7877 goto next_and_done;
7879 while (1)
7881 enum alphaev4_pipe pipe;
7883 pipe = alphaev4_insn_pipe (insn);
7884 switch (pipe)
7886 case EV4_STOP:
7887 /* Force complex instructions to start new groups. */
7888 if (in_use)
7889 goto done;
7891 /* If this is a completely unrecognized insn, its an asm.
7892 We don't know how long it is, so record length as -1 to
7893 signal a needed realignment. */
7894 if (recog_memoized (insn) < 0)
7895 len = -1;
7896 else
7897 len = get_attr_length (insn);
7898 goto next_and_done;
7900 case EV4_IBX:
7901 if (in_use & EV4_IB0)
7903 if (in_use & EV4_IB1)
7904 goto done;
7905 in_use |= EV4_IB1;
7907 else
7908 in_use |= EV4_IB0 | EV4_IBX;
7909 break;
7911 case EV4_IB0:
7912 if (in_use & EV4_IB0)
7914 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
7915 goto done;
7916 in_use |= EV4_IB1;
7918 in_use |= EV4_IB0;
7919 break;
7921 case EV4_IB1:
7922 if (in_use & EV4_IB1)
7923 goto done;
7924 in_use |= EV4_IB1;
7925 break;
7927 default:
7928 abort();
7930 len += 4;
7932 /* Haifa doesn't do well scheduling branches. */
7933 if (GET_CODE (insn) == JUMP_INSN)
7934 goto next_and_done;
7936 next:
7937 insn = next_nonnote_insn (insn);
7939 if (!insn || ! INSN_P (insn))
7940 goto done;
7942 /* Let Haifa tell us where it thinks insn group boundaries are. */
7943 if (GET_MODE (insn) == TImode)
7944 goto done;
7946 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
7947 goto next;
7950 next_and_done:
7951 insn = next_nonnote_insn (insn);
7953 done:
7954 *plen = len;
7955 *pin_use = in_use;
7956 return insn;
7959 /* IN_USE is a mask of the slots currently filled within the insn group.
7960 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
7961 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
7963 LEN is, of course, the length of the group in bytes. */
7965 static rtx
7966 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
7968 int len, in_use;
7970 len = in_use = 0;
7972 if (! INSN_P (insn)
7973 || GET_CODE (PATTERN (insn)) == CLOBBER
7974 || GET_CODE (PATTERN (insn)) == USE)
7975 goto next_and_done;
7977 while (1)
7979 enum alphaev5_pipe pipe;
7981 pipe = alphaev5_insn_pipe (insn);
7982 switch (pipe)
7984 case EV5_STOP:
7985 /* Force complex instructions to start new groups. */
7986 if (in_use)
7987 goto done;
7989 /* If this is a completely unrecognized insn, its an asm.
7990 We don't know how long it is, so record length as -1 to
7991 signal a needed realignment. */
7992 if (recog_memoized (insn) < 0)
7993 len = -1;
7994 else
7995 len = get_attr_length (insn);
7996 goto next_and_done;
7998 /* ??? Most of the places below, we would like to abort, as
7999 it would indicate an error either in Haifa, or in the
8000 scheduling description. Unfortunately, Haifa never
8001 schedules the last instruction of the BB, so we don't
8002 have an accurate TI bit to go off. */
8003 case EV5_E01:
8004 if (in_use & EV5_E0)
8006 if (in_use & EV5_E1)
8007 goto done;
8008 in_use |= EV5_E1;
8010 else
8011 in_use |= EV5_E0 | EV5_E01;
8012 break;
8014 case EV5_E0:
8015 if (in_use & EV5_E0)
8017 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8018 goto done;
8019 in_use |= EV5_E1;
8021 in_use |= EV5_E0;
8022 break;
8024 case EV5_E1:
8025 if (in_use & EV5_E1)
8026 goto done;
8027 in_use |= EV5_E1;
8028 break;
8030 case EV5_FAM:
8031 if (in_use & EV5_FA)
8033 if (in_use & EV5_FM)
8034 goto done;
8035 in_use |= EV5_FM;
8037 else
8038 in_use |= EV5_FA | EV5_FAM;
8039 break;
8041 case EV5_FA:
8042 if (in_use & EV5_FA)
8043 goto done;
8044 in_use |= EV5_FA;
8045 break;
8047 case EV5_FM:
8048 if (in_use & EV5_FM)
8049 goto done;
8050 in_use |= EV5_FM;
8051 break;
8053 case EV5_NONE:
8054 break;
8056 default:
8057 abort();
8059 len += 4;
8061 /* Haifa doesn't do well scheduling branches. */
8062 /* ??? If this is predicted not-taken, slotting continues, except
8063 that no more IBR, FBR, or JSR insns may be slotted. */
8064 if (GET_CODE (insn) == JUMP_INSN)
8065 goto next_and_done;
8067 next:
8068 insn = next_nonnote_insn (insn);
8070 if (!insn || ! INSN_P (insn))
8071 goto done;
8073 /* Let Haifa tell us where it thinks insn group boundaries are. */
8074 if (GET_MODE (insn) == TImode)
8075 goto done;
8077 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8078 goto next;
8081 next_and_done:
8082 insn = next_nonnote_insn (insn);
8084 done:
8085 *plen = len;
8086 *pin_use = in_use;
8087 return insn;
8090 static rtx
8091 alphaev4_next_nop (int *pin_use)
8093 int in_use = *pin_use;
8094 rtx nop;
8096 if (!(in_use & EV4_IB0))
8098 in_use |= EV4_IB0;
8099 nop = gen_nop ();
8101 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
8103 in_use |= EV4_IB1;
8104 nop = gen_nop ();
8106 else if (TARGET_FP && !(in_use & EV4_IB1))
8108 in_use |= EV4_IB1;
8109 nop = gen_fnop ();
8111 else
8112 nop = gen_unop ();
8114 *pin_use = in_use;
8115 return nop;
8118 static rtx
8119 alphaev5_next_nop (int *pin_use)
8121 int in_use = *pin_use;
8122 rtx nop;
8124 if (!(in_use & EV5_E1))
8126 in_use |= EV5_E1;
8127 nop = gen_nop ();
8129 else if (TARGET_FP && !(in_use & EV5_FA))
8131 in_use |= EV5_FA;
8132 nop = gen_fnop ();
8134 else if (TARGET_FP && !(in_use & EV5_FM))
8136 in_use |= EV5_FM;
8137 nop = gen_fnop ();
8139 else
8140 nop = gen_unop ();
8142 *pin_use = in_use;
8143 return nop;
8146 /* The instruction group alignment main loop. */
8148 static void
8149 alpha_align_insns (unsigned int max_align,
8150 rtx (*next_group) (rtx, int *, int *),
8151 rtx (*next_nop) (int *))
8153 /* ALIGN is the known alignment for the insn group. */
8154 unsigned int align;
8155 /* OFS is the offset of the current insn in the insn group. */
8156 int ofs;
8157 int prev_in_use, in_use, len;
8158 rtx i, next;
8160 /* Let shorten branches care for assigning alignments to code labels. */
8161 shorten_branches (get_insns ());
8163 if (align_functions < 4)
8164 align = 4;
8165 else if ((unsigned int) align_functions < max_align)
8166 align = align_functions;
8167 else
8168 align = max_align;
8170 ofs = prev_in_use = 0;
8171 i = get_insns ();
8172 if (GET_CODE (i) == NOTE)
8173 i = next_nonnote_insn (i);
8175 while (i)
8177 next = (*next_group) (i, &in_use, &len);
8179 /* When we see a label, resync alignment etc. */
8180 if (GET_CODE (i) == CODE_LABEL)
8182 unsigned int new_align = 1 << label_to_alignment (i);
8184 if (new_align >= align)
8186 align = new_align < max_align ? new_align : max_align;
8187 ofs = 0;
8190 else if (ofs & (new_align-1))
8191 ofs = (ofs | (new_align-1)) + 1;
8192 if (len != 0)
8193 abort();
8196 /* Handle complex instructions special. */
8197 else if (in_use == 0)
8199 /* Asms will have length < 0. This is a signal that we have
8200 lost alignment knowledge. Assume, however, that the asm
8201 will not mis-align instructions. */
8202 if (len < 0)
8204 ofs = 0;
8205 align = 4;
8206 len = 0;
8210 /* If the known alignment is smaller than the recognized insn group,
8211 realign the output. */
8212 else if ((int) align < len)
8214 unsigned int new_log_align = len > 8 ? 4 : 3;
8215 rtx prev, where;
8217 where = prev = prev_nonnote_insn (i);
8218 if (!where || GET_CODE (where) != CODE_LABEL)
8219 where = i;
8221 /* Can't realign between a call and its gp reload. */
8222 if (! (TARGET_EXPLICIT_RELOCS
8223 && prev && GET_CODE (prev) == CALL_INSN))
8225 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
8226 align = 1 << new_log_align;
8227 ofs = 0;
8231 /* If the group won't fit in the same INT16 as the previous,
8232 we need to add padding to keep the group together. Rather
8233 than simply leaving the insn filling to the assembler, we
8234 can make use of the knowledge of what sorts of instructions
8235 were issued in the previous group to make sure that all of
8236 the added nops are really free. */
8237 else if (ofs + len > (int) align)
8239 int nop_count = (align - ofs) / 4;
8240 rtx where;
8242 /* Insert nops before labels, branches, and calls to truly merge
8243 the execution of the nops with the previous instruction group. */
8244 where = prev_nonnote_insn (i);
8245 if (where)
8247 if (GET_CODE (where) == CODE_LABEL)
8249 rtx where2 = prev_nonnote_insn (where);
8250 if (where2 && GET_CODE (where2) == JUMP_INSN)
8251 where = where2;
8253 else if (GET_CODE (where) == INSN)
8254 where = i;
8256 else
8257 where = i;
8260 emit_insn_before ((*next_nop)(&prev_in_use), where);
8261 while (--nop_count);
8262 ofs = 0;
8265 ofs = (ofs + len) & (align - 1);
8266 prev_in_use = in_use;
8267 i = next;
8271 /* Machine dependent reorg pass. */
8273 static void
8274 alpha_reorg (void)
8276 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
8277 alpha_handle_trap_shadows ();
8279 /* Due to the number of extra trapb insns, don't bother fixing up
8280 alignment when trap precision is instruction. Moreover, we can
8281 only do our job when sched2 is run. */
8282 if (optimize && !optimize_size
8283 && alpha_tp != ALPHA_TP_INSN
8284 && flag_schedule_insns_after_reload)
8286 if (alpha_cpu == PROCESSOR_EV4)
8287 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
8288 else if (alpha_cpu == PROCESSOR_EV5)
8289 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
8293 #if !TARGET_ABI_UNICOSMK
8295 #ifdef HAVE_STAMP_H
8296 #include <stamp.h>
8297 #endif
8299 static void
8300 alpha_file_start (void)
8302 #ifdef OBJECT_FORMAT_ELF
8303 /* If emitting dwarf2 debug information, we cannot generate a .file
8304 directive to start the file, as it will conflict with dwarf2out
8305 file numbers. So it's only useful when emitting mdebug output. */
8306 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
8307 #endif
8309 default_file_start ();
8310 #ifdef MS_STAMP
8311 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
8312 #endif
8314 fputs ("\t.set noreorder\n", asm_out_file);
8315 fputs ("\t.set volatile\n", asm_out_file);
8316 if (!TARGET_ABI_OPEN_VMS)
8317 fputs ("\t.set noat\n", asm_out_file);
8318 if (TARGET_EXPLICIT_RELOCS)
8319 fputs ("\t.set nomacro\n", asm_out_file);
8320 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
8321 fprintf (asm_out_file,
8322 "\t.arch %s\n",
8323 TARGET_CPU_EV6 ? "ev6"
8324 : (TARGET_CPU_EV5
8325 ? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5")
8326 : "ev4"));
8328 #endif
8330 #ifdef OBJECT_FORMAT_ELF
8332 /* Switch to the section to which we should output X. The only thing
8333 special we do here is to honor small data. */
8335 static void
8336 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
8337 unsigned HOST_WIDE_INT align)
8339 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
8340 /* ??? Consider using mergeable sdata sections. */
8341 sdata_section ();
8342 else
8343 default_elf_select_rtx_section (mode, x, align);
8346 #endif /* OBJECT_FORMAT_ELF */
8348 /* Structure to collect function names for final output in link section. */
8349 /* Note that items marked with GTY can't be ifdef'ed out. */
8351 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
8352 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
8354 struct alpha_links GTY(())
8356 int num;
8357 rtx linkage;
8358 enum links_kind lkind;
8359 enum reloc_kind rkind;
8362 struct alpha_funcs GTY(())
8364 int num;
8365 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8366 links;
8369 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8370 splay_tree alpha_links_tree;
8371 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
8372 splay_tree alpha_funcs_tree;
8374 static GTY(()) int alpha_funcs_num;
8376 #if TARGET_ABI_OPEN_VMS
8378 /* Return the VMS argument type corresponding to MODE. */
8380 enum avms_arg_type
8381 alpha_arg_type (enum machine_mode mode)
8383 switch (mode)
8385 case SFmode:
8386 return TARGET_FLOAT_VAX ? FF : FS;
8387 case DFmode:
8388 return TARGET_FLOAT_VAX ? FD : FT;
8389 default:
8390 return I64;
8394 /* Return an rtx for an integer representing the VMS Argument Information
8395 register value. */
8398 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
8400 unsigned HOST_WIDE_INT regval = cum.num_args;
8401 int i;
8403 for (i = 0; i < 6; i++)
8404 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
8406 return GEN_INT (regval);
8409 /* Make (or fake) .linkage entry for function call.
8411 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8413 Return an SYMBOL_REF rtx for the linkage. */
8416 alpha_need_linkage (const char *name, int is_local)
8418 splay_tree_node node;
8419 struct alpha_links *al;
8421 if (name[0] == '*')
8422 name++;
8424 if (is_local)
8426 struct alpha_funcs *cfaf;
8428 if (!alpha_funcs_tree)
8429 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
8430 splay_tree_compare_pointers);
8432 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
8434 cfaf->links = 0;
8435 cfaf->num = ++alpha_funcs_num;
8437 splay_tree_insert (alpha_funcs_tree,
8438 (splay_tree_key) current_function_decl,
8439 (splay_tree_value) cfaf);
8442 if (alpha_links_tree)
8444 /* Is this name already defined? */
8446 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
8447 if (node)
8449 al = (struct alpha_links *) node->value;
8450 if (is_local)
8452 /* Defined here but external assumed. */
8453 if (al->lkind == KIND_EXTERN)
8454 al->lkind = KIND_LOCAL;
8456 else
8458 /* Used here but unused assumed. */
8459 if (al->lkind == KIND_UNUSED)
8460 al->lkind = KIND_LOCAL;
8462 return al->linkage;
8465 else
8466 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8468 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
8469 name = ggc_strdup (name);
8471 /* Assume external if no definition. */
8472 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
8474 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
8475 get_identifier (name);
8477 /* Construct a SYMBOL_REF for us to call. */
8479 size_t name_len = strlen (name);
8480 char *linksym = alloca (name_len + 6);
8481 linksym[0] = '$';
8482 memcpy (linksym + 1, name, name_len);
8483 memcpy (linksym + 1 + name_len, "..lk", 5);
8484 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
8485 ggc_alloc_string (linksym, name_len + 5));
8488 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
8489 (splay_tree_value) al);
8491 return al->linkage;
8495 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
8497 splay_tree_node cfunnode;
8498 struct alpha_funcs *cfaf;
8499 struct alpha_links *al;
8500 const char *name = XSTR (linkage, 0);
8502 cfaf = (struct alpha_funcs *) 0;
8503 al = (struct alpha_links *) 0;
8505 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
8506 cfaf = (struct alpha_funcs *) cfunnode->value;
8508 if (cfaf->links)
8510 splay_tree_node lnode;
8512 /* Is this name already defined? */
8514 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
8515 if (lnode)
8516 al = (struct alpha_links *) lnode->value;
8518 else
8519 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8521 if (!al)
8523 size_t name_len;
8524 size_t buflen;
8525 char buf [512];
8526 char *linksym;
8527 splay_tree_node node = 0;
8528 struct alpha_links *anl;
8530 if (name[0] == '*')
8531 name++;
8533 name_len = strlen (name);
8535 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
8536 al->num = cfaf->num;
8538 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
8539 if (node)
8541 anl = (struct alpha_links *) node->value;
8542 al->lkind = anl->lkind;
8545 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
8546 buflen = strlen (buf);
8547 linksym = alloca (buflen + 1);
8548 memcpy (linksym, buf, buflen + 1);
8550 al->linkage = gen_rtx_SYMBOL_REF
8551 (Pmode, ggc_alloc_string (linksym, buflen + 1));
8553 splay_tree_insert (cfaf->links, (splay_tree_key) name,
8554 (splay_tree_value) al);
8557 if (rflag)
8558 al->rkind = KIND_CODEADDR;
8559 else
8560 al->rkind = KIND_LINKAGE;
8562 if (lflag)
8563 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
8564 else
8565 return al->linkage;
8568 static int
8569 alpha_write_one_linkage (splay_tree_node node, void *data)
8571 const char *const name = (const char *) node->key;
8572 struct alpha_links *link = (struct alpha_links *) node->value;
8573 FILE *stream = (FILE *) data;
8575 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
8576 if (link->rkind == KIND_CODEADDR)
8578 if (link->lkind == KIND_LOCAL)
8580 /* Local and used */
8581 fprintf (stream, "\t.quad %s..en\n", name);
8583 else
8585 /* External and used, request code address. */
8586 fprintf (stream, "\t.code_address %s\n", name);
8589 else
8591 if (link->lkind == KIND_LOCAL)
8593 /* Local and used, build linkage pair. */
8594 fprintf (stream, "\t.quad %s..en\n", name);
8595 fprintf (stream, "\t.quad %s\n", name);
8597 else
8599 /* External and used, request linkage pair. */
8600 fprintf (stream, "\t.linkage %s\n", name);
8604 return 0;
8607 static void
8608 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
8610 splay_tree_node node;
8611 struct alpha_funcs *func;
8613 link_section ();
8614 fprintf (stream, "\t.align 3\n");
8615 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
8616 func = (struct alpha_funcs *) node->value;
8618 fputs ("\t.name ", stream);
8619 assemble_name (stream, funname);
8620 fputs ("..na\n", stream);
8621 ASM_OUTPUT_LABEL (stream, funname);
8622 fprintf (stream, "\t.pdesc ");
8623 assemble_name (stream, funname);
8624 fprintf (stream, "..en,%s\n",
8625 alpha_procedure_type == PT_STACK ? "stack"
8626 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
8628 if (func->links)
8630 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
8631 /* splay_tree_delete (func->links); */
8635 /* Given a decl, a section name, and whether the decl initializer
8636 has relocs, choose attributes for the section. */
8638 #define SECTION_VMS_OVERLAY SECTION_FORGET
8639 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
8640 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
8642 static unsigned int
8643 vms_section_type_flags (tree decl, const char *name, int reloc)
8645 unsigned int flags = default_section_type_flags (decl, name, reloc);
8647 if (decl && DECL_ATTRIBUTES (decl)
8648 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
8649 flags |= SECTION_VMS_OVERLAY;
8650 if (decl && DECL_ATTRIBUTES (decl)
8651 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
8652 flags |= SECTION_VMS_GLOBAL;
8653 if (decl && DECL_ATTRIBUTES (decl)
8654 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
8655 flags |= SECTION_VMS_INITIALIZE;
8657 return flags;
8660 /* Switch to an arbitrary section NAME with attributes as specified
8661 by FLAGS. ALIGN specifies any known alignment requirements for
8662 the section; 0 if the default should be used. */
8664 static void
8665 vms_asm_named_section (const char *name, unsigned int flags,
8666 tree decl ATTRIBUTE_UNUSED)
8668 fputc ('\n', asm_out_file);
8669 fprintf (asm_out_file, ".section\t%s", name);
8671 if (flags & SECTION_VMS_OVERLAY)
8672 fprintf (asm_out_file, ",OVR");
8673 if (flags & SECTION_VMS_GLOBAL)
8674 fprintf (asm_out_file, ",GBL");
8675 if (flags & SECTION_VMS_INITIALIZE)
8676 fprintf (asm_out_file, ",NOMOD");
8677 if (flags & SECTION_DEBUG)
8678 fprintf (asm_out_file, ",NOWRT");
8680 fputc ('\n', asm_out_file);
8683 /* Record an element in the table of global constructors. SYMBOL is
8684 a SYMBOL_REF of the function to be called; PRIORITY is a number
8685 between 0 and MAX_INIT_PRIORITY.
8687 Differs from default_ctors_section_asm_out_constructor in that the
8688 width of the .ctors entry is always 64 bits, rather than the 32 bits
8689 used by a normal pointer. */
8691 static void
8692 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
8694 ctors_section ();
8695 assemble_align (BITS_PER_WORD);
8696 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
8699 static void
8700 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
8702 dtors_section ();
8703 assemble_align (BITS_PER_WORD);
8704 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
8706 #else
8709 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
8710 int is_local ATTRIBUTE_UNUSED)
8712 return NULL_RTX;
8716 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
8717 tree cfundecl ATTRIBUTE_UNUSED,
8718 int lflag ATTRIBUTE_UNUSED,
8719 int rflag ATTRIBUTE_UNUSED)
8721 return NULL_RTX;
8724 #endif /* TARGET_ABI_OPEN_VMS */
8726 #if TARGET_ABI_UNICOSMK
8728 /* This evaluates to true if we do not know how to pass TYPE solely in
8729 registers. This is the case for all arguments that do not fit in two
8730 registers. */
8732 static bool
8733 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
8735 if (type == NULL)
8736 return false;
8738 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8739 return true;
8740 if (TREE_ADDRESSABLE (type))
8741 return true;
8743 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
8746 /* Define the offset between two registers, one to be eliminated, and the
8747 other its replacement, at the start of a routine. */
8750 unicosmk_initial_elimination_offset (int from, int to)
8752 int fixed_size;
8754 fixed_size = alpha_sa_size();
8755 if (fixed_size != 0)
8756 fixed_size += 48;
8758 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8759 return -fixed_size;
8760 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8761 return 0;
8762 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8763 return (ALPHA_ROUND (current_function_outgoing_args_size)
8764 + ALPHA_ROUND (get_frame_size()));
8765 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8766 return (ALPHA_ROUND (fixed_size)
8767 + ALPHA_ROUND (get_frame_size()
8768 + current_function_outgoing_args_size));
8769 else
8770 abort ();
8773 /* Output the module name for .ident and .end directives. We have to strip
8774 directories and add make sure that the module name starts with a letter
8775 or '$'. */
8777 static void
8778 unicosmk_output_module_name (FILE *file)
8780 const char *name = lbasename (main_input_filename);
8781 unsigned len = strlen (name);
8782 char *clean_name = alloca (len + 2);
8783 char *ptr = clean_name;
8785 /* CAM only accepts module names that start with a letter or '$'. We
8786 prefix the module name with a '$' if necessary. */
8788 if (!ISALPHA (*name))
8789 *ptr++ = '$';
8790 memcpy (ptr, name, len + 1);
8791 clean_symbol_name (clean_name);
8792 fputs (clean_name, file);
8795 /* Output the definition of a common variable. */
8797 void
8798 unicosmk_output_common (FILE *file, const char *name, int size, int align)
8800 tree name_tree;
8801 printf ("T3E__: common %s\n", name);
8803 common_section ();
8804 fputs("\t.endp\n\n\t.psect ", file);
8805 assemble_name(file, name);
8806 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
8807 fprintf(file, "\t.byte\t0:%d\n", size);
8809 /* Mark the symbol as defined in this module. */
8810 name_tree = get_identifier (name);
8811 TREE_ASM_WRITTEN (name_tree) = 1;
8814 #define SECTION_PUBLIC SECTION_MACH_DEP
8815 #define SECTION_MAIN (SECTION_PUBLIC << 1)
8816 static int current_section_align;
8818 static unsigned int
8819 unicosmk_section_type_flags (tree decl, const char *name,
8820 int reloc ATTRIBUTE_UNUSED)
8822 unsigned int flags = default_section_type_flags (decl, name, reloc);
8824 if (!decl)
8825 return flags;
8827 if (TREE_CODE (decl) == FUNCTION_DECL)
8829 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8830 if (align_functions_log > current_section_align)
8831 current_section_align = align_functions_log;
8833 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
8834 flags |= SECTION_MAIN;
8836 else
8837 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
8839 if (TREE_PUBLIC (decl))
8840 flags |= SECTION_PUBLIC;
8842 return flags;
8845 /* Generate a section name for decl and associate it with the
8846 declaration. */
8848 static void
8849 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
8851 const char *name;
8852 int len;
8854 if (!decl)
8855 abort ();
8857 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
8858 name = default_strip_name_encoding (name);
8859 len = strlen (name);
8861 if (TREE_CODE (decl) == FUNCTION_DECL)
8863 char *string;
8865 /* It is essential that we prefix the section name here because
8866 otherwise the section names generated for constructors and
8867 destructors confuse collect2. */
8869 string = alloca (len + 6);
8870 sprintf (string, "code@%s", name);
8871 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
8873 else if (TREE_PUBLIC (decl))
8874 DECL_SECTION_NAME (decl) = build_string (len, name);
8875 else
8877 char *string;
8879 string = alloca (len + 6);
8880 sprintf (string, "data@%s", name);
8881 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
8885 /* Switch to an arbitrary section NAME with attributes as specified
8886 by FLAGS. ALIGN specifies any known alignment requirements for
8887 the section; 0 if the default should be used. */
8889 static void
8890 unicosmk_asm_named_section (const char *name, unsigned int flags,
8891 tree decl ATTRIBUTE_UNUSED)
8893 const char *kind;
8895 /* Close the previous section. */
8897 fputs ("\t.endp\n\n", asm_out_file);
8899 /* Find out what kind of section we are opening. */
8901 if (flags & SECTION_MAIN)
8902 fputs ("\t.start\tmain\n", asm_out_file);
8904 if (flags & SECTION_CODE)
8905 kind = "code";
8906 else if (flags & SECTION_PUBLIC)
8907 kind = "common";
8908 else
8909 kind = "data";
8911 if (current_section_align != 0)
8912 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
8913 current_section_align, kind);
8914 else
8915 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
8918 static void
8919 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
8921 if (DECL_P (decl)
8922 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
8923 unicosmk_unique_section (decl, 0);
8926 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
8927 in code sections because .align fill unused space with zeroes. */
8929 void
8930 unicosmk_output_align (FILE *file, int align)
8932 if (inside_function)
8933 fprintf (file, "\tgcc@code@align\t%d\n", align);
8934 else
8935 fprintf (file, "\t.align\t%d\n", align);
8938 /* Add a case vector to the current function's list of deferred case
8939 vectors. Case vectors have to be put into a separate section because CAM
8940 does not allow data definitions in code sections. */
8942 void
8943 unicosmk_defer_case_vector (rtx lab, rtx vec)
8945 struct machine_function *machine = cfun->machine;
8947 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8948 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
8949 machine->addr_list);
8952 /* Output a case vector. */
8954 static void
8955 unicosmk_output_addr_vec (FILE *file, rtx vec)
8957 rtx lab = XEXP (vec, 0);
8958 rtx body = XEXP (vec, 1);
8959 int vlen = XVECLEN (body, 0);
8960 int idx;
8962 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
8964 for (idx = 0; idx < vlen; idx++)
8966 ASM_OUTPUT_ADDR_VEC_ELT
8967 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8971 /* Output current function's deferred case vectors. */
8973 static void
8974 unicosmk_output_deferred_case_vectors (FILE *file)
8976 struct machine_function *machine = cfun->machine;
8977 rtx t;
8979 if (machine->addr_list == NULL_RTX)
8980 return;
8982 data_section ();
8983 for (t = machine->addr_list; t; t = XEXP (t, 1))
8984 unicosmk_output_addr_vec (file, XEXP (t, 0));
8987 /* Generate the name of the SSIB section for the current function. */
8989 #define SSIB_PREFIX "__SSIB_"
8990 #define SSIB_PREFIX_LEN 7
8992 static const char *
8993 unicosmk_ssib_name (void)
8995 /* This is ok since CAM won't be able to deal with names longer than that
8996 anyway. */
8998 static char name[256];
9000 rtx x;
9001 const char *fnname;
9002 int len;
9004 x = DECL_RTL (cfun->decl);
9005 if (GET_CODE (x) != MEM)
9006 abort ();
9007 x = XEXP (x, 0);
9008 if (GET_CODE (x) != SYMBOL_REF)
9009 abort ();
9010 fnname = XSTR (x, 0);
9012 len = strlen (fnname);
9013 if (len + SSIB_PREFIX_LEN > 255)
9014 len = 255 - SSIB_PREFIX_LEN;
9016 strcpy (name, SSIB_PREFIX);
9017 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
9018 name[len + SSIB_PREFIX_LEN] = 0;
9020 return name;
9023 /* Set up the dynamic subprogram information block (DSIB) and update the
9024 frame pointer register ($15) for subroutines which have a frame. If the
9025 subroutine doesn't have a frame, simply increment $15. */
9027 static void
9028 unicosmk_gen_dsib (unsigned long *imaskP)
9030 if (alpha_procedure_type == PT_STACK)
9032 const char *ssib_name;
9033 rtx mem;
9035 /* Allocate 64 bytes for the DSIB. */
9037 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9038 GEN_INT (-64))));
9039 emit_insn (gen_blockage ());
9041 /* Save the return address. */
9043 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
9044 set_mem_alias_set (mem, alpha_sr_alias_set);
9045 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
9046 (*imaskP) &= ~(1UL << REG_RA);
9048 /* Save the old frame pointer. */
9050 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
9051 set_mem_alias_set (mem, alpha_sr_alias_set);
9052 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
9053 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
9055 emit_insn (gen_blockage ());
9057 /* Store the SSIB pointer. */
9059 ssib_name = ggc_strdup (unicosmk_ssib_name ());
9060 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
9061 set_mem_alias_set (mem, alpha_sr_alias_set);
9063 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
9064 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
9065 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
9067 /* Save the CIW index. */
9069 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
9070 set_mem_alias_set (mem, alpha_sr_alias_set);
9071 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
9073 emit_insn (gen_blockage ());
9075 /* Set the new frame pointer. */
9077 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9078 stack_pointer_rtx, GEN_INT (64))));
9081 else
9083 /* Increment the frame pointer register to indicate that we do not
9084 have a frame. */
9086 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9087 hard_frame_pointer_rtx, const1_rtx)));
9091 /* Output the static subroutine information block for the current
9092 function. */
9094 static void
9095 unicosmk_output_ssib (FILE *file, const char *fnname)
9097 int len;
9098 int i;
9099 rtx x;
9100 rtx ciw;
9101 struct machine_function *machine = cfun->machine;
9103 ssib_section ();
9104 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
9105 unicosmk_ssib_name ());
9107 /* Some required stuff and the function name length. */
9109 len = strlen (fnname);
9110 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
9112 /* Saved registers
9113 ??? We don't do that yet. */
9115 fputs ("\t.quad\t0\n", file);
9117 /* Function address. */
9119 fputs ("\t.quad\t", file);
9120 assemble_name (file, fnname);
9121 putc ('\n', file);
9123 fputs ("\t.quad\t0\n", file);
9124 fputs ("\t.quad\t0\n", file);
9126 /* Function name.
9127 ??? We do it the same way Cray CC does it but this could be
9128 simplified. */
9130 for( i = 0; i < len; i++ )
9131 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
9132 if( (len % 8) == 0 )
9133 fputs ("\t.quad\t0\n", file);
9134 else
9135 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
9137 /* All call information words used in the function. */
9139 for (x = machine->first_ciw; x; x = XEXP (x, 1))
9141 ciw = XEXP (x, 0);
9142 #if HOST_BITS_PER_WIDE_INT == 32
9143 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
9144 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
9145 #else
9146 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
9147 #endif
9151 /* Add a call information word (CIW) to the list of the current function's
9152 CIWs and return its index.
9154 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
9157 unicosmk_add_call_info_word (rtx x)
9159 rtx node;
9160 struct machine_function *machine = cfun->machine;
9162 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
9163 if (machine->first_ciw == NULL_RTX)
9164 machine->first_ciw = node;
9165 else
9166 XEXP (machine->last_ciw, 1) = node;
9168 machine->last_ciw = node;
9169 ++machine->ciw_count;
9171 return GEN_INT (machine->ciw_count
9172 + strlen (current_function_name ())/8 + 5);
9175 static char unicosmk_section_buf[100];
9177 char *
9178 unicosmk_text_section (void)
9180 static int count = 0;
9181 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
9182 count++);
9183 return unicosmk_section_buf;
9186 char *
9187 unicosmk_data_section (void)
9189 static int count = 1;
9190 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
9191 count++);
9192 return unicosmk_section_buf;
9195 /* The Cray assembler doesn't accept extern declarations for symbols which
9196 are defined in the same file. We have to keep track of all global
9197 symbols which are referenced and/or defined in a source file and output
9198 extern declarations for those which are referenced but not defined at
9199 the end of file. */
9201 /* List of identifiers for which an extern declaration might have to be
9202 emitted. */
9203 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9205 struct unicosmk_extern_list
9207 struct unicosmk_extern_list *next;
9208 const char *name;
9211 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
9213 /* Output extern declarations which are required for every asm file. */
9215 static void
9216 unicosmk_output_default_externs (FILE *file)
9218 static const char *const externs[] =
9219 { "__T3E_MISMATCH" };
9221 int i;
9222 int n;
9224 n = ARRAY_SIZE (externs);
9226 for (i = 0; i < n; i++)
9227 fprintf (file, "\t.extern\t%s\n", externs[i]);
9230 /* Output extern declarations for global symbols which are have been
9231 referenced but not defined. */
9233 static void
9234 unicosmk_output_externs (FILE *file)
9236 struct unicosmk_extern_list *p;
9237 const char *real_name;
9238 int len;
9239 tree name_tree;
9241 len = strlen (user_label_prefix);
9242 for (p = unicosmk_extern_head; p != 0; p = p->next)
9244 /* We have to strip the encoding and possibly remove user_label_prefix
9245 from the identifier in order to handle -fleading-underscore and
9246 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
9247 real_name = default_strip_name_encoding (p->name);
9248 if (len && p->name[0] == '*'
9249 && !memcmp (real_name, user_label_prefix, len))
9250 real_name += len;
9252 name_tree = get_identifier (real_name);
9253 if (! TREE_ASM_WRITTEN (name_tree))
9255 TREE_ASM_WRITTEN (name_tree) = 1;
9256 fputs ("\t.extern\t", file);
9257 assemble_name (file, p->name);
9258 putc ('\n', file);
9263 /* Record an extern. */
9265 void
9266 unicosmk_add_extern (const char *name)
9268 struct unicosmk_extern_list *p;
9270 p = (struct unicosmk_extern_list *)
9271 xmalloc (sizeof (struct unicosmk_extern_list));
9272 p->next = unicosmk_extern_head;
9273 p->name = name;
9274 unicosmk_extern_head = p;
9277 /* The Cray assembler generates incorrect code if identifiers which
9278 conflict with register names are used as instruction operands. We have
9279 to replace such identifiers with DEX expressions. */
9281 /* Structure to collect identifiers which have been replaced by DEX
9282 expressions. */
9283 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9285 struct unicosmk_dex {
9286 struct unicosmk_dex *next;
9287 const char *name;
9290 /* List of identifiers which have been replaced by DEX expressions. The DEX
9291 number is determined by the position in the list. */
9293 static struct unicosmk_dex *unicosmk_dex_list = NULL;
9295 /* The number of elements in the DEX list. */
9297 static int unicosmk_dex_count = 0;
9299 /* Check if NAME must be replaced by a DEX expression. */
9301 static int
9302 unicosmk_special_name (const char *name)
9304 if (name[0] == '*')
9305 ++name;
9307 if (name[0] == '$')
9308 ++name;
9310 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
9311 return 0;
9313 switch (name[1])
9315 case '1': case '2':
9316 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
9318 case '3':
9319 return (name[2] == '\0'
9320 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
9322 default:
9323 return (ISDIGIT (name[1]) && name[2] == '\0');
9327 /* Return the DEX number if X must be replaced by a DEX expression and 0
9328 otherwise. */
9330 static int
9331 unicosmk_need_dex (rtx x)
9333 struct unicosmk_dex *dex;
9334 const char *name;
9335 int i;
9337 if (GET_CODE (x) != SYMBOL_REF)
9338 return 0;
9340 name = XSTR (x,0);
9341 if (! unicosmk_special_name (name))
9342 return 0;
9344 i = unicosmk_dex_count;
9345 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9347 if (! strcmp (name, dex->name))
9348 return i;
9349 --i;
9352 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9353 dex->name = name;
9354 dex->next = unicosmk_dex_list;
9355 unicosmk_dex_list = dex;
9357 ++unicosmk_dex_count;
9358 return unicosmk_dex_count;
9361 /* Output the DEX definitions for this file. */
9363 static void
9364 unicosmk_output_dex (FILE *file)
9366 struct unicosmk_dex *dex;
9367 int i;
9369 if (unicosmk_dex_list == NULL)
9370 return;
9372 fprintf (file, "\t.dexstart\n");
9374 i = unicosmk_dex_count;
9375 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9377 fprintf (file, "\tDEX (%d) = ", i);
9378 assemble_name (file, dex->name);
9379 putc ('\n', file);
9380 --i;
9383 fprintf (file, "\t.dexend\n");
9386 /* Output text that to appear at the beginning of an assembler file. */
9388 static void
9389 unicosmk_file_start (void)
9391 int i;
9393 fputs ("\t.ident\t", asm_out_file);
9394 unicosmk_output_module_name (asm_out_file);
9395 fputs ("\n\n", asm_out_file);
9397 /* The Unicos/Mk assembler uses different register names. Instead of trying
9398 to support them, we simply use micro definitions. */
9400 /* CAM has different register names: rN for the integer register N and fN
9401 for the floating-point register N. Instead of trying to use these in
9402 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9403 register. */
9405 for (i = 0; i < 32; ++i)
9406 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
9408 for (i = 0; i < 32; ++i)
9409 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
9411 putc ('\n', asm_out_file);
9413 /* The .align directive fill unused space with zeroes which does not work
9414 in code sections. We define the macro 'gcc@code@align' which uses nops
9415 instead. Note that it assumes that code sections always have the
9416 biggest possible alignment since . refers to the current offset from
9417 the beginning of the section. */
9419 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
9420 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
9421 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
9422 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
9423 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
9424 fputs ("\tbis r31,r31,r31\n", asm_out_file);
9425 fputs ("\t.endr\n", asm_out_file);
9426 fputs ("\t.endif\n", asm_out_file);
9427 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
9429 /* Output extern declarations which should always be visible. */
9430 unicosmk_output_default_externs (asm_out_file);
9432 /* Open a dummy section. We always need to be inside a section for the
9433 section-switching code to work correctly.
9434 ??? This should be a module id or something like that. I still have to
9435 figure out what the rules for those are. */
9436 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
9439 /* Output text to appear at the end of an assembler file. This includes all
9440 pending extern declarations and DEX expressions. */
9442 static void
9443 unicosmk_file_end (void)
9445 fputs ("\t.endp\n\n", asm_out_file);
9447 /* Output all pending externs. */
9449 unicosmk_output_externs (asm_out_file);
9451 /* Output dex definitions used for functions whose names conflict with
9452 register names. */
9454 unicosmk_output_dex (asm_out_file);
9456 fputs ("\t.end\t", asm_out_file);
9457 unicosmk_output_module_name (asm_out_file);
9458 putc ('\n', asm_out_file);
9461 #else
9463 static void
9464 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
9467 static void
9468 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
9471 static void
9472 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
9473 const char * fnname ATTRIBUTE_UNUSED)
9477 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
9479 return NULL_RTX;
9482 static int
9483 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
9485 return 0;
9488 #endif /* TARGET_ABI_UNICOSMK */
9490 static void
9491 alpha_init_libfuncs (void)
9493 if (TARGET_ABI_UNICOSMK)
9495 /* Prevent gcc from generating calls to __divsi3. */
9496 set_optab_libfunc (sdiv_optab, SImode, 0);
9497 set_optab_libfunc (udiv_optab, SImode, 0);
9499 /* Use the functions provided by the system library
9500 for DImode integer division. */
9501 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
9502 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
9504 else if (TARGET_ABI_OPEN_VMS)
9506 /* Use the VMS runtime library functions for division and
9507 remainder. */
9508 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9509 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9510 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9511 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9512 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9513 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9514 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9515 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9520 /* Initialize the GCC target structure. */
9521 #if TARGET_ABI_OPEN_VMS
9522 # undef TARGET_ATTRIBUTE_TABLE
9523 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9524 # undef TARGET_SECTION_TYPE_FLAGS
9525 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
9526 #endif
9528 #undef TARGET_IN_SMALL_DATA_P
9529 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9531 #if TARGET_ABI_UNICOSMK
9532 # undef TARGET_INSERT_ATTRIBUTES
9533 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
9534 # undef TARGET_SECTION_TYPE_FLAGS
9535 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
9536 # undef TARGET_ASM_UNIQUE_SECTION
9537 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
9538 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
9539 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
9540 # undef TARGET_ASM_GLOBALIZE_LABEL
9541 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
9542 # undef TARGET_MUST_PASS_IN_STACK
9543 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
9544 #endif
9546 #undef TARGET_ASM_ALIGNED_HI_OP
9547 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9548 #undef TARGET_ASM_ALIGNED_DI_OP
9549 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9551 /* Default unaligned ops are provided for ELF systems. To get unaligned
9552 data for non-ELF systems, we have to turn off auto alignment. */
9553 #ifndef OBJECT_FORMAT_ELF
9554 #undef TARGET_ASM_UNALIGNED_HI_OP
9555 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9556 #undef TARGET_ASM_UNALIGNED_SI_OP
9557 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9558 #undef TARGET_ASM_UNALIGNED_DI_OP
9559 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9560 #endif
9562 #ifdef OBJECT_FORMAT_ELF
9563 #undef TARGET_ASM_SELECT_RTX_SECTION
9564 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9565 #endif
9567 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9568 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9570 #undef TARGET_INIT_LIBFUNCS
9571 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9573 #if TARGET_ABI_UNICOSMK
9574 #undef TARGET_ASM_FILE_START
9575 #define TARGET_ASM_FILE_START unicosmk_file_start
9576 #undef TARGET_ASM_FILE_END
9577 #define TARGET_ASM_FILE_END unicosmk_file_end
9578 #else
9579 #undef TARGET_ASM_FILE_START
9580 #define TARGET_ASM_FILE_START alpha_file_start
9581 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9582 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9583 #endif
9585 #undef TARGET_SCHED_ADJUST_COST
9586 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9587 #undef TARGET_SCHED_ISSUE_RATE
9588 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9589 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9590 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9591 alpha_multipass_dfa_lookahead
9593 #undef TARGET_HAVE_TLS
9594 #define TARGET_HAVE_TLS HAVE_AS_TLS
9596 #undef TARGET_INIT_BUILTINS
9597 #define TARGET_INIT_BUILTINS alpha_init_builtins
9598 #undef TARGET_EXPAND_BUILTIN
9599 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9601 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9602 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9603 #undef TARGET_CANNOT_COPY_INSN_P
9604 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9605 #undef TARGET_CANNOT_FORCE_CONST_MEM
9606 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9608 #if TARGET_ABI_OSF
9609 #undef TARGET_ASM_OUTPUT_MI_THUNK
9610 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9611 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9612 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
9613 #endif
9615 #undef TARGET_RTX_COSTS
9616 #define TARGET_RTX_COSTS alpha_rtx_costs
9617 #undef TARGET_ADDRESS_COST
9618 #define TARGET_ADDRESS_COST hook_int_rtx_0
9620 #undef TARGET_MACHINE_DEPENDENT_REORG
9621 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9623 #undef TARGET_PROMOTE_FUNCTION_ARGS
9624 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
9625 #undef TARGET_PROMOTE_FUNCTION_RETURN
9626 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
9627 #undef TARGET_PROMOTE_PROTOTYPES
9628 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
9629 #undef TARGET_RETURN_IN_MEMORY
9630 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9631 #undef TARGET_PASS_BY_REFERENCE
9632 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9633 #undef TARGET_SETUP_INCOMING_VARARGS
9634 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9635 #undef TARGET_STRICT_ARGUMENT_NAMING
9636 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9637 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9638 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9639 #undef TARGET_SPLIT_COMPLEX_ARG
9640 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9641 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9642 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9643 #undef TARGET_ARG_PARTIAL_BYTES
9644 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9646 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9647 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9648 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9649 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9651 #undef TARGET_BUILD_BUILTIN_VA_LIST
9652 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9654 /* The Alpha architecture does not require sequential consistency. See
9655 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9656 for an example of how it can be violated in practice. */
9657 #undef TARGET_RELAXED_ORDERING
9658 #define TARGET_RELAXED_ORDERING true
9660 struct gcc_target targetm = TARGET_INITIALIZER;
9663 #include "gt-alpha.h"