* config/arm/arm.c (arm_legitimize_address): Limit the value passed
[official-gcc.git] / gcc / config / alpha / alpha.c
blob808116a158d03ee54c8a957cb9e469b517d8b0b2
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
65 static const char * const alpha_cpu_name[] =
67 "ev4", "ev5", "ev6"
70 /* Specify how accurate floating-point traps need to be. */
72 enum alpha_trap_precision alpha_tp;
74 /* Specify the floating-point rounding mode. */
76 enum alpha_fp_rounding_mode alpha_fprm;
78 /* Specify which things cause traps. */
80 enum alpha_fp_trap_mode alpha_fptm;
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
85 struct alpha_compare alpha_compare;
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency = 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static GTY(()) int alpha_sr_alias_set;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname;
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
119 /* Costs of various operations on the different architectures. */
121 struct alpha_rtx_cost_data
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
196 #define REG_PV 27
197 #define REG_RA 26
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
212 /* Implement TARGET_HANDLE_OPTION. */
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
217 switch (code)
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
235 return true;
238 /* Parse target option strings. */
240 void
241 override_options (void)
243 static const struct cpu_table {
244 const char *const name;
245 const enum processor_type processor;
246 const int flags;
247 } cpu_table[] = {
248 { "ev4", PROCESSOR_EV4, 0 },
249 { "ev45", PROCESSOR_EV4, 0 },
250 { "21064", PROCESSOR_EV4, 0 },
251 { "ev5", PROCESSOR_EV5, 0 },
252 { "21164", PROCESSOR_EV5, 0 },
253 { "ev56", PROCESSOR_EV5, MASK_BWX },
254 { "21164a", PROCESSOR_EV5, MASK_BWX },
255 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
256 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
257 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
258 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
259 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
260 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
261 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
262 { 0, 0, 0 }
265 int i;
267 /* Unicos/Mk doesn't have shared libraries. */
268 if (TARGET_ABI_UNICOSMK && flag_pic)
270 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
271 (flag_pic > 1) ? "PIC" : "pic");
272 flag_pic = 0;
275 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
276 floating-point instructions. Make that the default for this target. */
277 if (TARGET_ABI_UNICOSMK)
278 alpha_fprm = ALPHA_FPRM_DYN;
279 else
280 alpha_fprm = ALPHA_FPRM_NORM;
282 alpha_tp = ALPHA_TP_PROG;
283 alpha_fptm = ALPHA_FPTM_N;
285 /* We cannot use su and sui qualifiers for conversion instructions on
286 Unicos/Mk. I'm not sure if this is due to assembler or hardware
287 limitations. Right now, we issue a warning if -mieee is specified
288 and then ignore it; eventually, we should either get it right or
289 disable the option altogether. */
291 if (TARGET_IEEE)
293 if (TARGET_ABI_UNICOSMK)
294 warning (0, "-mieee not supported on Unicos/Mk");
295 else
297 alpha_tp = ALPHA_TP_INSN;
298 alpha_fptm = ALPHA_FPTM_SU;
302 if (TARGET_IEEE_WITH_INEXACT)
304 if (TARGET_ABI_UNICOSMK)
305 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
306 else
308 alpha_tp = ALPHA_TP_INSN;
309 alpha_fptm = ALPHA_FPTM_SUI;
313 if (alpha_tp_string)
315 if (! strcmp (alpha_tp_string, "p"))
316 alpha_tp = ALPHA_TP_PROG;
317 else if (! strcmp (alpha_tp_string, "f"))
318 alpha_tp = ALPHA_TP_FUNC;
319 else if (! strcmp (alpha_tp_string, "i"))
320 alpha_tp = ALPHA_TP_INSN;
321 else
322 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
325 if (alpha_fprm_string)
327 if (! strcmp (alpha_fprm_string, "n"))
328 alpha_fprm = ALPHA_FPRM_NORM;
329 else if (! strcmp (alpha_fprm_string, "m"))
330 alpha_fprm = ALPHA_FPRM_MINF;
331 else if (! strcmp (alpha_fprm_string, "c"))
332 alpha_fprm = ALPHA_FPRM_CHOP;
333 else if (! strcmp (alpha_fprm_string,"d"))
334 alpha_fprm = ALPHA_FPRM_DYN;
335 else
336 error ("bad value %qs for -mfp-rounding-mode switch",
337 alpha_fprm_string);
340 if (alpha_fptm_string)
342 if (strcmp (alpha_fptm_string, "n") == 0)
343 alpha_fptm = ALPHA_FPTM_N;
344 else if (strcmp (alpha_fptm_string, "u") == 0)
345 alpha_fptm = ALPHA_FPTM_U;
346 else if (strcmp (alpha_fptm_string, "su") == 0)
347 alpha_fptm = ALPHA_FPTM_SU;
348 else if (strcmp (alpha_fptm_string, "sui") == 0)
349 alpha_fptm = ALPHA_FPTM_SUI;
350 else
351 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
354 if (alpha_cpu_string)
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
359 alpha_tune = alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
361 target_flags |= cpu_table [i].flags;
362 break;
364 if (! cpu_table [i].name)
365 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
368 if (alpha_tune_string)
370 for (i = 0; cpu_table [i].name; i++)
371 if (! strcmp (alpha_tune_string, cpu_table [i].name))
373 alpha_tune = cpu_table [i].processor;
374 break;
376 if (! cpu_table [i].name)
377 error ("bad value %qs for -mcpu switch", alpha_tune_string);
380 /* Do some sanity checks on the above options. */
382 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
384 warning (0, "trap mode not supported on Unicos/Mk");
385 alpha_fptm = ALPHA_FPTM_N;
388 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
389 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
391 warning (0, "fp software completion requires -mtrap-precision=i");
392 alpha_tp = ALPHA_TP_INSN;
395 if (alpha_cpu == PROCESSOR_EV6)
397 /* Except for EV6 pass 1 (not released), we always have precise
398 arithmetic traps. Which means we can do software completion
399 without minding trap shadows. */
400 alpha_tp = ALPHA_TP_PROG;
403 if (TARGET_FLOAT_VAX)
405 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
407 warning (0, "rounding mode not supported for VAX floats");
408 alpha_fprm = ALPHA_FPRM_NORM;
410 if (alpha_fptm == ALPHA_FPTM_SUI)
412 warning (0, "trap mode not supported for VAX floats");
413 alpha_fptm = ALPHA_FPTM_SU;
415 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
416 warning (0, "128-bit long double not supported for VAX floats");
417 target_flags &= ~MASK_LONG_DOUBLE_128;
421 char *end;
422 int lat;
424 if (!alpha_mlat_string)
425 alpha_mlat_string = "L1";
427 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
428 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
430 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
431 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
432 && alpha_mlat_string[2] == '\0')
434 static int const cache_latency[][4] =
436 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
437 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
438 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
441 lat = alpha_mlat_string[1] - '0';
442 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
444 warning (0, "L%d cache latency unknown for %s",
445 lat, alpha_cpu_name[alpha_tune]);
446 lat = 3;
448 else
449 lat = cache_latency[alpha_tune][lat-1];
451 else if (! strcmp (alpha_mlat_string, "main"))
453 /* Most current memories have about 370ns latency. This is
454 a reasonable guess for a fast cpu. */
455 lat = 150;
457 else
459 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
460 lat = 3;
463 alpha_memory_latency = lat;
466 /* Default the definition of "small data" to 8 bytes. */
467 if (!g_switch_set)
468 g_switch_value = 8;
470 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
471 if (flag_pic == 1)
472 target_flags |= MASK_SMALL_DATA;
473 else if (flag_pic == 2)
474 target_flags &= ~MASK_SMALL_DATA;
476 /* Align labels and loops for optimal branching. */
477 /* ??? Kludge these by not doing anything if we don't optimize and also if
478 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
479 if (optimize > 0 && write_symbols != SDB_DEBUG)
481 if (align_loops <= 0)
482 align_loops = 16;
483 if (align_jumps <= 0)
484 align_jumps = 16;
486 if (align_functions <= 0)
487 align_functions = 16;
489 /* Acquire a unique set number for our register saves and restores. */
490 alpha_sr_alias_set = new_alias_set ();
492 /* Register variables and functions with the garbage collector. */
494 /* Set up function hooks. */
495 init_machine_status = alpha_init_machine_status;
497 /* Tell the compiler when we're using VAX floating point. */
498 if (TARGET_FLOAT_VAX)
500 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
501 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
502 REAL_MODE_FORMAT (TFmode) = NULL;
506 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
509 zap_mask (HOST_WIDE_INT value)
511 int i;
513 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
514 i++, value >>= 8)
515 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
516 return 0;
518 return 1;
521 /* Return true if OP is valid for a particular TLS relocation.
522 We are already guaranteed that OP is a CONST. */
525 tls_symbolic_operand_1 (rtx op, int size, int unspec)
527 op = XEXP (op, 0);
529 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
530 return 0;
531 op = XVECEXP (op, 0, 0);
533 if (GET_CODE (op) != SYMBOL_REF)
534 return 0;
536 if (SYMBOL_REF_LOCAL_P (op))
538 if (alpha_tls_size > size)
539 return 0;
541 else
543 if (size != 64)
544 return 0;
547 switch (SYMBOL_REF_TLS_MODEL (op))
549 case TLS_MODEL_LOCAL_DYNAMIC:
550 return unspec == UNSPEC_DTPREL;
551 case TLS_MODEL_INITIAL_EXEC:
552 return unspec == UNSPEC_TPREL && size == 64;
553 case TLS_MODEL_LOCAL_EXEC:
554 return unspec == UNSPEC_TPREL;
555 default:
556 gcc_unreachable ();
560 /* Used by aligned_memory_operand and unaligned_memory_operand to
561 resolve what reload is going to do with OP if it's a register. */
564 resolve_reload_operand (rtx op)
566 if (reload_in_progress)
568 rtx tmp = op;
569 if (GET_CODE (tmp) == SUBREG)
570 tmp = SUBREG_REG (tmp);
571 if (GET_CODE (tmp) == REG
572 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
574 op = reg_equiv_memory_loc[REGNO (tmp)];
575 if (op == 0)
576 return 0;
579 return op;
582 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
583 the range defined for C in [I-P]. */
585 bool
586 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
588 switch (c)
590 case 'I':
591 /* An unsigned 8 bit constant. */
592 return (unsigned HOST_WIDE_INT) value < 0x100;
593 case 'J':
594 /* The constant zero. */
595 return value == 0;
596 case 'K':
597 /* A signed 16 bit constant. */
598 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
599 case 'L':
600 /* A shifted signed 16 bit constant appropriate for LDAH. */
601 return ((value & 0xffff) == 0
602 && ((value) >> 31 == -1 || value >> 31 == 0));
603 case 'M':
604 /* A constant that can be AND'ed with using a ZAP insn. */
605 return zap_mask (value);
606 case 'N':
607 /* A complemented unsigned 8 bit constant. */
608 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
609 case 'O':
610 /* A negated unsigned 8 bit constant. */
611 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
612 case 'P':
613 /* The constant 1, 2 or 3. */
614 return value == 1 || value == 2 || value == 3;
616 default:
617 return false;
621 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
622 matches for C in [GH]. */
624 bool
625 alpha_const_double_ok_for_letter_p (rtx value, int c)
627 switch (c)
629 case 'G':
630 /* The floating point zero constant. */
631 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
632 && value == CONST0_RTX (GET_MODE (value)));
634 case 'H':
635 /* A valid operand of a ZAP insn. */
636 return (GET_MODE (value) == VOIDmode
637 && zap_mask (CONST_DOUBLE_LOW (value))
638 && zap_mask (CONST_DOUBLE_HIGH (value)));
640 default:
641 return false;
645 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
646 matches for C. */
648 bool
649 alpha_extra_constraint (rtx value, int c)
651 switch (c)
653 case 'Q':
654 return normal_memory_operand (value, VOIDmode);
655 case 'R':
656 return direct_call_operand (value, Pmode);
657 case 'S':
658 return (GET_CODE (value) == CONST_INT
659 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
660 case 'T':
661 return GET_CODE (value) == HIGH;
662 case 'U':
663 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
664 case 'W':
665 return (GET_CODE (value) == CONST_VECTOR
666 && value == CONST0_RTX (GET_MODE (value)));
667 default:
668 return false;
672 /* The scalar modes supported differs from the default check-what-c-supports
673 version in that sometimes TFmode is available even when long double
674 indicates only DFmode. On unicosmk, we have the situation that HImode
675 doesn't map to any C type, but of course we still support that. */
677 static bool
678 alpha_scalar_mode_supported_p (enum machine_mode mode)
680 switch (mode)
682 case QImode:
683 case HImode:
684 case SImode:
685 case DImode:
686 case TImode: /* via optabs.c */
687 return true;
689 case SFmode:
690 case DFmode:
691 return true;
693 case TFmode:
694 return TARGET_HAS_XFLOATING_LIBS;
696 default:
697 return false;
701 /* Alpha implements a couple of integer vector mode operations when
702 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
703 which allows the vectorizer to operate on e.g. move instructions,
704 or when expand_vector_operations can do something useful. */
706 static bool
707 alpha_vector_mode_supported_p (enum machine_mode mode)
709 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
712 /* Return 1 if this function can directly return via $26. */
715 direct_return (void)
717 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
718 && reload_completed
719 && alpha_sa_size () == 0
720 && get_frame_size () == 0
721 && current_function_outgoing_args_size == 0
722 && current_function_pretend_args_size == 0);
725 /* Return the ADDR_VEC associated with a tablejump insn. */
728 alpha_tablejump_addr_vec (rtx insn)
730 rtx tmp;
732 tmp = JUMP_LABEL (insn);
733 if (!tmp)
734 return NULL_RTX;
735 tmp = NEXT_INSN (tmp);
736 if (!tmp)
737 return NULL_RTX;
738 if (GET_CODE (tmp) == JUMP_INSN
739 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
740 return PATTERN (tmp);
741 return NULL_RTX;
744 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
747 alpha_tablejump_best_label (rtx insn)
749 rtx jump_table = alpha_tablejump_addr_vec (insn);
750 rtx best_label = NULL_RTX;
752 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
753 there for edge frequency counts from profile data. */
755 if (jump_table)
757 int n_labels = XVECLEN (jump_table, 1);
758 int best_count = -1;
759 int i, j;
761 for (i = 0; i < n_labels; i++)
763 int count = 1;
765 for (j = i + 1; j < n_labels; j++)
766 if (XEXP (XVECEXP (jump_table, 1, i), 0)
767 == XEXP (XVECEXP (jump_table, 1, j), 0))
768 count++;
770 if (count > best_count)
771 best_count = count, best_label = XVECEXP (jump_table, 1, i);
775 return best_label ? best_label : const0_rtx;
778 /* Return the TLS model to use for SYMBOL. */
780 static enum tls_model
781 tls_symbolic_operand_type (rtx symbol)
783 enum tls_model model;
785 if (GET_CODE (symbol) != SYMBOL_REF)
786 return 0;
787 model = SYMBOL_REF_TLS_MODEL (symbol);
789 /* Local-exec with a 64-bit size is the same code as initial-exec. */
790 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
791 model = TLS_MODEL_INITIAL_EXEC;
793 return model;
796 /* Return true if the function DECL will share the same GP as any
797 function in the current unit of translation. */
799 static bool
800 decl_has_samegp (tree decl)
802 /* Functions that are not local can be overridden, and thus may
803 not share the same gp. */
804 if (!(*targetm.binds_local_p) (decl))
805 return false;
807 /* If -msmall-data is in effect, assume that there is only one GP
808 for the module, and so any local symbol has this property. We
809 need explicit relocations to be able to enforce this for symbols
810 not defined in this unit of translation, however. */
811 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
812 return true;
814 /* Functions that are not external are defined in this UoT. */
815 /* ??? Irritatingly, static functions not yet emitted are still
816 marked "external". Apply this to non-static functions only. */
817 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
820 /* Return true if EXP should be placed in the small data section. */
822 static bool
823 alpha_in_small_data_p (tree exp)
825 /* We want to merge strings, so we never consider them small data. */
826 if (TREE_CODE (exp) == STRING_CST)
827 return false;
829 /* Functions are never in the small data area. Duh. */
830 if (TREE_CODE (exp) == FUNCTION_DECL)
831 return false;
833 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
835 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
836 if (strcmp (section, ".sdata") == 0
837 || strcmp (section, ".sbss") == 0)
838 return true;
840 else
842 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
844 /* If this is an incomplete type with size 0, then we can't put it
845 in sdata because it might be too big when completed. */
846 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
847 return true;
850 return false;
853 #if TARGET_ABI_OPEN_VMS
854 static bool
855 alpha_linkage_symbol_p (const char *symname)
857 int symlen = strlen (symname);
859 if (symlen > 4)
860 return strcmp (&symname [symlen - 4], "..lk") == 0;
862 return false;
865 #define LINKAGE_SYMBOL_REF_P(X) \
866 ((GET_CODE (X) == SYMBOL_REF \
867 && alpha_linkage_symbol_p (XSTR (X, 0))) \
868 || (GET_CODE (X) == CONST \
869 && GET_CODE (XEXP (X, 0)) == PLUS \
870 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
871 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
872 #endif
874 /* legitimate_address_p recognizes an RTL expression that is a valid
875 memory address for an instruction. The MODE argument is the
876 machine mode for the MEM expression that wants to use this address.
878 For Alpha, we have either a constant address or the sum of a
879 register and a constant address, or just a register. For DImode,
880 any of those forms can be surrounded with an AND that clear the
881 low-order three bits; this is an "unaligned" access. */
883 bool
884 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
886 /* If this is an ldq_u type address, discard the outer AND. */
887 if (mode == DImode
888 && GET_CODE (x) == AND
889 && GET_CODE (XEXP (x, 1)) == CONST_INT
890 && INTVAL (XEXP (x, 1)) == -8)
891 x = XEXP (x, 0);
893 /* Discard non-paradoxical subregs. */
894 if (GET_CODE (x) == SUBREG
895 && (GET_MODE_SIZE (GET_MODE (x))
896 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
897 x = SUBREG_REG (x);
899 /* Unadorned general registers are valid. */
900 if (REG_P (x)
901 && (strict
902 ? STRICT_REG_OK_FOR_BASE_P (x)
903 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
904 return true;
906 /* Constant addresses (i.e. +/- 32k) are valid. */
907 if (CONSTANT_ADDRESS_P (x))
908 return true;
910 #if TARGET_ABI_OPEN_VMS
911 if (LINKAGE_SYMBOL_REF_P (x))
912 return true;
913 #endif
915 /* Register plus a small constant offset is valid. */
916 if (GET_CODE (x) == PLUS)
918 rtx ofs = XEXP (x, 1);
919 x = XEXP (x, 0);
921 /* Discard non-paradoxical subregs. */
922 if (GET_CODE (x) == SUBREG
923 && (GET_MODE_SIZE (GET_MODE (x))
924 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
925 x = SUBREG_REG (x);
927 if (REG_P (x))
929 if (! strict
930 && NONSTRICT_REG_OK_FP_BASE_P (x)
931 && GET_CODE (ofs) == CONST_INT)
932 return true;
933 if ((strict
934 ? STRICT_REG_OK_FOR_BASE_P (x)
935 : NONSTRICT_REG_OK_FOR_BASE_P (x))
936 && CONSTANT_ADDRESS_P (ofs))
937 return true;
941 /* If we're managing explicit relocations, LO_SUM is valid, as
942 are small data symbols. */
943 else if (TARGET_EXPLICIT_RELOCS)
945 if (small_symbolic_operand (x, Pmode))
946 return true;
948 if (GET_CODE (x) == LO_SUM)
950 rtx ofs = XEXP (x, 1);
951 x = XEXP (x, 0);
953 /* Discard non-paradoxical subregs. */
954 if (GET_CODE (x) == SUBREG
955 && (GET_MODE_SIZE (GET_MODE (x))
956 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
957 x = SUBREG_REG (x);
959 /* Must have a valid base register. */
960 if (! (REG_P (x)
961 && (strict
962 ? STRICT_REG_OK_FOR_BASE_P (x)
963 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
964 return false;
966 /* The symbol must be local. */
967 if (local_symbolic_operand (ofs, Pmode)
968 || dtp32_symbolic_operand (ofs, Pmode)
969 || tp32_symbolic_operand (ofs, Pmode))
970 return true;
974 return false;
977 /* Build the SYMBOL_REF for __tls_get_addr. */
979 static GTY(()) rtx tls_get_addr_libfunc;
981 static rtx
982 get_tls_get_addr (void)
984 if (!tls_get_addr_libfunc)
985 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
986 return tls_get_addr_libfunc;
989 /* Try machine-dependent ways of modifying an illegitimate address
990 to be legitimate. If we find one, return the new, valid address. */
993 alpha_legitimize_address (rtx x, rtx scratch,
994 enum machine_mode mode ATTRIBUTE_UNUSED)
996 HOST_WIDE_INT addend;
998 /* If the address is (plus reg const_int) and the CONST_INT is not a
999 valid offset, compute the high part of the constant and add it to
1000 the register. Then our address is (plus temp low-part-const). */
1001 if (GET_CODE (x) == PLUS
1002 && GET_CODE (XEXP (x, 0)) == REG
1003 && GET_CODE (XEXP (x, 1)) == CONST_INT
1004 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1006 addend = INTVAL (XEXP (x, 1));
1007 x = XEXP (x, 0);
1008 goto split_addend;
1011 /* If the address is (const (plus FOO const_int)), find the low-order
1012 part of the CONST_INT. Then load FOO plus any high-order part of the
1013 CONST_INT into a register. Our address is (plus reg low-part-const).
1014 This is done to reduce the number of GOT entries. */
1015 if (!no_new_pseudos
1016 && GET_CODE (x) == CONST
1017 && GET_CODE (XEXP (x, 0)) == PLUS
1018 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1020 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1021 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1022 goto split_addend;
1025 /* If we have a (plus reg const), emit the load as in (2), then add
1026 the two registers, and finally generate (plus reg low-part-const) as
1027 our address. */
1028 if (!no_new_pseudos
1029 && GET_CODE (x) == PLUS
1030 && GET_CODE (XEXP (x, 0)) == REG
1031 && GET_CODE (XEXP (x, 1)) == CONST
1032 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1033 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1035 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1036 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1037 XEXP (XEXP (XEXP (x, 1), 0), 0),
1038 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1039 goto split_addend;
1042 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1043 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1045 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1047 switch (tls_symbolic_operand_type (x))
1049 case TLS_MODEL_NONE:
1050 break;
1052 case TLS_MODEL_GLOBAL_DYNAMIC:
1053 start_sequence ();
1055 r0 = gen_rtx_REG (Pmode, 0);
1056 r16 = gen_rtx_REG (Pmode, 16);
1057 tga = get_tls_get_addr ();
1058 dest = gen_reg_rtx (Pmode);
1059 seq = GEN_INT (alpha_next_sequence_number++);
1061 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1062 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1063 insn = emit_call_insn (insn);
1064 CONST_OR_PURE_CALL_P (insn) = 1;
1065 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1067 insn = get_insns ();
1068 end_sequence ();
1070 emit_libcall_block (insn, dest, r0, x);
1071 return dest;
1073 case TLS_MODEL_LOCAL_DYNAMIC:
1074 start_sequence ();
1076 r0 = gen_rtx_REG (Pmode, 0);
1077 r16 = gen_rtx_REG (Pmode, 16);
1078 tga = get_tls_get_addr ();
1079 scratch = gen_reg_rtx (Pmode);
1080 seq = GEN_INT (alpha_next_sequence_number++);
1082 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1083 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1084 insn = emit_call_insn (insn);
1085 CONST_OR_PURE_CALL_P (insn) = 1;
1086 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1088 insn = get_insns ();
1089 end_sequence ();
1091 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1092 UNSPEC_TLSLDM_CALL);
1093 emit_libcall_block (insn, scratch, r0, eqv);
1095 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1096 eqv = gen_rtx_CONST (Pmode, eqv);
1098 if (alpha_tls_size == 64)
1100 dest = gen_reg_rtx (Pmode);
1101 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1102 emit_insn (gen_adddi3 (dest, dest, scratch));
1103 return dest;
1105 if (alpha_tls_size == 32)
1107 insn = gen_rtx_HIGH (Pmode, eqv);
1108 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1109 scratch = gen_reg_rtx (Pmode);
1110 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1112 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1114 case TLS_MODEL_INITIAL_EXEC:
1115 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1116 eqv = gen_rtx_CONST (Pmode, eqv);
1117 tp = gen_reg_rtx (Pmode);
1118 scratch = gen_reg_rtx (Pmode);
1119 dest = gen_reg_rtx (Pmode);
1121 emit_insn (gen_load_tp (tp));
1122 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1123 emit_insn (gen_adddi3 (dest, tp, scratch));
1124 return dest;
1126 case TLS_MODEL_LOCAL_EXEC:
1127 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1128 eqv = gen_rtx_CONST (Pmode, eqv);
1129 tp = gen_reg_rtx (Pmode);
1131 emit_insn (gen_load_tp (tp));
1132 if (alpha_tls_size == 32)
1134 insn = gen_rtx_HIGH (Pmode, eqv);
1135 insn = gen_rtx_PLUS (Pmode, tp, insn);
1136 tp = gen_reg_rtx (Pmode);
1137 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1139 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1141 default:
1142 gcc_unreachable ();
1145 if (local_symbolic_operand (x, Pmode))
1147 if (small_symbolic_operand (x, Pmode))
1148 return x;
1149 else
1151 if (!no_new_pseudos)
1152 scratch = gen_reg_rtx (Pmode);
1153 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1154 gen_rtx_HIGH (Pmode, x)));
1155 return gen_rtx_LO_SUM (Pmode, scratch, x);
1160 return NULL;
1162 split_addend:
1164 HOST_WIDE_INT low, high;
1166 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1167 addend -= low;
1168 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1169 addend -= high;
1171 if (addend)
1172 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1173 (no_new_pseudos ? scratch : NULL_RTX),
1174 1, OPTAB_LIB_WIDEN);
1175 if (high)
1176 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1177 (no_new_pseudos ? scratch : NULL_RTX),
1178 1, OPTAB_LIB_WIDEN);
1180 return plus_constant (x, low);
1184 /* Primarily this is required for TLS symbols, but given that our move
1185 patterns *ought* to be able to handle any symbol at any time, we
1186 should never be spilling symbolic operands to the constant pool, ever. */
1188 static bool
1189 alpha_cannot_force_const_mem (rtx x)
1191 enum rtx_code code = GET_CODE (x);
1192 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1195 /* We do not allow indirect calls to be optimized into sibling calls, nor
1196 can we allow a call to a function with a different GP to be optimized
1197 into a sibcall. */
1199 static bool
1200 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1202 /* Can't do indirect tail calls, since we don't know if the target
1203 uses the same GP. */
1204 if (!decl)
1205 return false;
1207 /* Otherwise, we can make a tail call if the target function shares
1208 the same GP. */
1209 return decl_has_samegp (decl);
1213 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1215 rtx x = *px;
1217 /* Don't re-split. */
1218 if (GET_CODE (x) == LO_SUM)
1219 return -1;
1221 return small_symbolic_operand (x, Pmode) != 0;
1224 static int
1225 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1227 rtx x = *px;
1229 /* Don't re-split. */
1230 if (GET_CODE (x) == LO_SUM)
1231 return -1;
1233 if (small_symbolic_operand (x, Pmode))
1235 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1236 *px = x;
1237 return -1;
1240 return 0;
1244 split_small_symbolic_operand (rtx x)
1246 x = copy_insn (x);
1247 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1248 return x;
1251 /* Indicate that INSN cannot be duplicated. This is true for any insn
1252 that we've marked with gpdisp relocs, since those have to stay in
1253 1-1 correspondence with one another.
1255 Technically we could copy them if we could set up a mapping from one
1256 sequence number to another, across the set of insns to be duplicated.
1257 This seems overly complicated and error-prone since interblock motion
1258 from sched-ebb could move one of the pair of insns to a different block.
1260 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1261 then they'll be in a different block from their ldgp. Which could lead
1262 the bb reorder code to think that it would be ok to copy just the block
1263 containing the call and branch to the block containing the ldgp. */
1265 static bool
1266 alpha_cannot_copy_insn_p (rtx insn)
1268 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1269 return false;
1270 if (recog_memoized (insn) >= 0)
1271 return get_attr_cannot_copy (insn);
1272 else
1273 return false;
1277 /* Try a machine-dependent way of reloading an illegitimate address
1278 operand. If we find one, push the reload and return the new rtx. */
1281 alpha_legitimize_reload_address (rtx x,
1282 enum machine_mode mode ATTRIBUTE_UNUSED,
1283 int opnum, int type,
1284 int ind_levels ATTRIBUTE_UNUSED)
1286 /* We must recognize output that we have already generated ourselves. */
1287 if (GET_CODE (x) == PLUS
1288 && GET_CODE (XEXP (x, 0)) == PLUS
1289 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1291 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1293 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1294 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1295 opnum, type);
1296 return x;
1299 /* We wish to handle large displacements off a base register by
1300 splitting the addend across an ldah and the mem insn. This
1301 cuts number of extra insns needed from 3 to 1. */
1302 if (GET_CODE (x) == PLUS
1303 && GET_CODE (XEXP (x, 0)) == REG
1304 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1305 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1306 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1308 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1309 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1310 HOST_WIDE_INT high
1311 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1313 /* Check for 32-bit overflow. */
1314 if (high + low != val)
1315 return NULL_RTX;
1317 /* Reload the high part into a base reg; leave the low part
1318 in the mem directly. */
1319 x = gen_rtx_PLUS (GET_MODE (x),
1320 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1321 GEN_INT (high)),
1322 GEN_INT (low));
1324 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1325 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1326 opnum, type);
1327 return x;
1330 return NULL_RTX;
1333 /* Compute a (partial) cost for rtx X. Return true if the complete
1334 cost has been computed, and false if subexpressions should be
1335 scanned. In either case, *TOTAL contains the cost result. */
1337 static bool
1338 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1340 enum machine_mode mode = GET_MODE (x);
1341 bool float_mode_p = FLOAT_MODE_P (mode);
1342 const struct alpha_rtx_cost_data *cost_data;
1344 if (optimize_size)
1345 cost_data = &alpha_rtx_cost_size;
1346 else
1347 cost_data = &alpha_rtx_cost_data[alpha_tune];
1349 switch (code)
1351 case CONST_INT:
1352 /* If this is an 8-bit constant, return zero since it can be used
1353 nearly anywhere with no cost. If it is a valid operand for an
1354 ADD or AND, likewise return 0 if we know it will be used in that
1355 context. Otherwise, return 2 since it might be used there later.
1356 All other constants take at least two insns. */
1357 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1359 *total = 0;
1360 return true;
1362 /* FALLTHRU */
1364 case CONST_DOUBLE:
1365 if (x == CONST0_RTX (mode))
1366 *total = 0;
1367 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1368 || (outer_code == AND && and_operand (x, VOIDmode)))
1369 *total = 0;
1370 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1371 *total = 2;
1372 else
1373 *total = COSTS_N_INSNS (2);
1374 return true;
1376 case CONST:
1377 case SYMBOL_REF:
1378 case LABEL_REF:
1379 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1380 *total = COSTS_N_INSNS (outer_code != MEM);
1381 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1382 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1383 else if (tls_symbolic_operand_type (x))
1384 /* Estimate of cost for call_pal rduniq. */
1385 /* ??? How many insns do we emit here? More than one... */
1386 *total = COSTS_N_INSNS (15);
1387 else
1388 /* Otherwise we do a load from the GOT. */
1389 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1390 return true;
1392 case HIGH:
1393 /* This is effectively an add_operand. */
1394 *total = 2;
1395 return true;
1397 case PLUS:
1398 case MINUS:
1399 if (float_mode_p)
1400 *total = cost_data->fp_add;
1401 else if (GET_CODE (XEXP (x, 0)) == MULT
1402 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1404 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1405 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1406 return true;
1408 return false;
1410 case MULT:
1411 if (float_mode_p)
1412 *total = cost_data->fp_mult;
1413 else if (mode == DImode)
1414 *total = cost_data->int_mult_di;
1415 else
1416 *total = cost_data->int_mult_si;
1417 return false;
1419 case ASHIFT:
1420 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1421 && INTVAL (XEXP (x, 1)) <= 3)
1423 *total = COSTS_N_INSNS (1);
1424 return false;
1426 /* FALLTHRU */
1428 case ASHIFTRT:
1429 case LSHIFTRT:
1430 *total = cost_data->int_shift;
1431 return false;
1433 case IF_THEN_ELSE:
1434 if (float_mode_p)
1435 *total = cost_data->fp_add;
1436 else
1437 *total = cost_data->int_cmov;
1438 return false;
1440 case DIV:
1441 case UDIV:
1442 case MOD:
1443 case UMOD:
1444 if (!float_mode_p)
1445 *total = cost_data->int_div;
1446 else if (mode == SFmode)
1447 *total = cost_data->fp_div_sf;
1448 else
1449 *total = cost_data->fp_div_df;
1450 return false;
1452 case MEM:
1453 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1454 return true;
1456 case NEG:
1457 if (! float_mode_p)
1459 *total = COSTS_N_INSNS (1);
1460 return false;
1462 /* FALLTHRU */
1464 case ABS:
1465 if (! float_mode_p)
1467 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1468 return false;
1470 /* FALLTHRU */
1472 case FLOAT:
1473 case UNSIGNED_FLOAT:
1474 case FIX:
1475 case UNSIGNED_FIX:
1476 case FLOAT_EXTEND:
1477 case FLOAT_TRUNCATE:
1478 *total = cost_data->fp_add;
1479 return false;
1481 default:
1482 return false;
1486 /* REF is an alignable memory location. Place an aligned SImode
1487 reference into *PALIGNED_MEM and the number of bits to shift into
1488 *PBITNUM. SCRATCH is a free register for use in reloading out
1489 of range stack slots. */
1491 void
1492 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1494 rtx base;
1495 HOST_WIDE_INT offset = 0;
1497 gcc_assert (GET_CODE (ref) == MEM);
1499 if (reload_in_progress
1500 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1502 base = find_replacement (&XEXP (ref, 0));
1504 gcc_assert (memory_address_p (GET_MODE (ref), base));
1506 else
1507 base = XEXP (ref, 0);
1509 if (GET_CODE (base) == PLUS)
1510 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1512 *paligned_mem
1513 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
1515 if (WORDS_BIG_ENDIAN)
1516 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
1517 + (offset & 3) * 8));
1518 else
1519 *pbitnum = GEN_INT ((offset & 3) * 8);
1522 /* Similar, but just get the address. Handle the two reload cases.
1523 Add EXTRA_OFFSET to the address we return. */
1526 get_unaligned_address (rtx ref, int extra_offset)
1528 rtx base;
1529 HOST_WIDE_INT offset = 0;
1531 gcc_assert (GET_CODE (ref) == MEM);
1533 if (reload_in_progress
1534 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1536 base = find_replacement (&XEXP (ref, 0));
1538 gcc_assert (memory_address_p (GET_MODE (ref), base));
1540 else
1541 base = XEXP (ref, 0);
1543 if (GET_CODE (base) == PLUS)
1544 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1546 return plus_constant (base, offset + extra_offset);
1549 /* On the Alpha, all (non-symbolic) constants except zero go into
1550 a floating-point register via memory. Note that we cannot
1551 return anything that is not a subset of CLASS, and that some
1552 symbolic constants cannot be dropped to memory. */
1554 enum reg_class
1555 alpha_preferred_reload_class(rtx x, enum reg_class class)
1557 /* Zero is present in any register class. */
1558 if (x == CONST0_RTX (GET_MODE (x)))
1559 return class;
1561 /* These sorts of constants we can easily drop to memory. */
1562 if (GET_CODE (x) == CONST_INT
1563 || GET_CODE (x) == CONST_DOUBLE
1564 || GET_CODE (x) == CONST_VECTOR)
1566 if (class == FLOAT_REGS)
1567 return NO_REGS;
1568 if (class == ALL_REGS)
1569 return GENERAL_REGS;
1570 return class;
1573 /* All other kinds of constants should not (and in the case of HIGH
1574 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1575 secondary reload. */
1576 if (CONSTANT_P (x))
1577 return (class == ALL_REGS ? GENERAL_REGS : class);
1579 return class;
1582 /* Loading and storing HImode or QImode values to and from memory
1583 usually requires a scratch register. The exceptions are loading
1584 QImode and HImode from an aligned address to a general register
1585 unless byte instructions are permitted.
1587 We also cannot load an unaligned address or a paradoxical SUBREG
1588 into an FP register.
1590 We also cannot do integral arithmetic into FP regs, as might result
1591 from register elimination into a DImode fp register. */
1593 enum reg_class
1594 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1595 rtx x, int in)
1597 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1599 if (GET_CODE (x) == MEM
1600 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1601 || (GET_CODE (x) == SUBREG
1602 && (GET_CODE (SUBREG_REG (x)) == MEM
1603 || (GET_CODE (SUBREG_REG (x)) == REG
1604 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1606 if (!in || !aligned_memory_operand(x, mode))
1607 return GENERAL_REGS;
1611 if (class == FLOAT_REGS)
1613 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1614 return GENERAL_REGS;
1616 if (GET_CODE (x) == SUBREG
1617 && (GET_MODE_SIZE (GET_MODE (x))
1618 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1619 return GENERAL_REGS;
1621 if (in && INTEGRAL_MODE_P (mode)
1622 && ! (memory_operand (x, mode) || x == const0_rtx))
1623 return GENERAL_REGS;
1626 return NO_REGS;
1629 /* Subfunction of the following function. Update the flags of any MEM
1630 found in part of X. */
1632 static int
1633 alpha_set_memflags_1 (rtx *xp, void *data)
1635 rtx x = *xp, orig = (rtx) data;
1637 if (GET_CODE (x) != MEM)
1638 return 0;
1640 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1641 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1642 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1643 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1644 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1646 /* Sadly, we cannot use alias sets because the extra aliasing
1647 produced by the AND interferes. Given that two-byte quantities
1648 are the only thing we would be able to differentiate anyway,
1649 there does not seem to be any point in convoluting the early
1650 out of the alias check. */
1652 return -1;
1655 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1656 generated to perform a memory operation, look for any MEMs in either
1657 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1658 volatile flags from REF into each of the MEMs found. If REF is not
1659 a MEM, don't do anything. */
1661 void
1662 alpha_set_memflags (rtx insn, rtx ref)
1664 rtx *base_ptr;
1666 if (GET_CODE (ref) != MEM)
1667 return;
1669 /* This is only called from alpha.md, after having had something
1670 generated from one of the insn patterns. So if everything is
1671 zero, the pattern is already up-to-date. */
1672 if (!MEM_VOLATILE_P (ref)
1673 && !MEM_IN_STRUCT_P (ref)
1674 && !MEM_SCALAR_P (ref)
1675 && !MEM_NOTRAP_P (ref)
1676 && !MEM_READONLY_P (ref))
1677 return;
1679 if (INSN_P (insn))
1680 base_ptr = &PATTERN (insn);
1681 else
1682 base_ptr = &insn;
1683 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1686 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1687 int, bool);
1689 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1690 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1691 and return pc_rtx if successful. */
1693 static rtx
1694 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1695 HOST_WIDE_INT c, int n, bool no_output)
1697 HOST_WIDE_INT new;
1698 int i, bits;
1699 /* Use a pseudo if highly optimizing and still generating RTL. */
1700 rtx subtarget
1701 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1702 rtx temp, insn;
1704 /* If this is a sign-extended 32-bit constant, we can do this in at most
1705 three insns, so do it if we have enough insns left. We always have
1706 a sign-extended 32-bit constant when compiling on a narrow machine. */
1708 if (HOST_BITS_PER_WIDE_INT != 64
1709 || c >> 31 == -1 || c >> 31 == 0)
1711 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1712 HOST_WIDE_INT tmp1 = c - low;
1713 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1714 HOST_WIDE_INT extra = 0;
1716 /* If HIGH will be interpreted as negative but the constant is
1717 positive, we must adjust it to do two ldha insns. */
1719 if ((high & 0x8000) != 0 && c >= 0)
1721 extra = 0x4000;
1722 tmp1 -= 0x40000000;
1723 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1726 if (c == low || (low == 0 && extra == 0))
1728 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1729 but that meant that we can't handle INT_MIN on 32-bit machines
1730 (like NT/Alpha), because we recurse indefinitely through
1731 emit_move_insn to gen_movdi. So instead, since we know exactly
1732 what we want, create it explicitly. */
1734 if (no_output)
1735 return pc_rtx;
1736 if (target == NULL)
1737 target = gen_reg_rtx (mode);
1738 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1739 return target;
1741 else if (n >= 2 + (extra != 0))
1743 if (no_output)
1744 return pc_rtx;
1745 if (no_new_pseudos)
1747 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1748 temp = target;
1750 else
1751 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1752 subtarget, mode);
1754 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1755 This means that if we go through expand_binop, we'll try to
1756 generate extensions, etc, which will require new pseudos, which
1757 will fail during some split phases. The SImode add patterns
1758 still exist, but are not named. So build the insns by hand. */
1760 if (extra != 0)
1762 if (! subtarget)
1763 subtarget = gen_reg_rtx (mode);
1764 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1765 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1766 emit_insn (insn);
1767 temp = subtarget;
1770 if (target == NULL)
1771 target = gen_reg_rtx (mode);
1772 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1773 insn = gen_rtx_SET (VOIDmode, target, insn);
1774 emit_insn (insn);
1775 return target;
1779 /* If we couldn't do it that way, try some other methods. But if we have
1780 no instructions left, don't bother. Likewise, if this is SImode and
1781 we can't make pseudos, we can't do anything since the expand_binop
1782 and expand_unop calls will widen and try to make pseudos. */
1784 if (n == 1 || (mode == SImode && no_new_pseudos))
1785 return 0;
1787 /* Next, see if we can load a related constant and then shift and possibly
1788 negate it to get the constant we want. Try this once each increasing
1789 numbers of insns. */
1791 for (i = 1; i < n; i++)
1793 /* First, see if minus some low bits, we've an easy load of
1794 high bits. */
1796 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1797 if (new != 0)
1799 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1800 if (temp)
1802 if (no_output)
1803 return temp;
1804 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1805 target, 0, OPTAB_WIDEN);
1809 /* Next try complementing. */
1810 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1811 if (temp)
1813 if (no_output)
1814 return temp;
1815 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1818 /* Next try to form a constant and do a left shift. We can do this
1819 if some low-order bits are zero; the exact_log2 call below tells
1820 us that information. The bits we are shifting out could be any
1821 value, but here we'll just try the 0- and sign-extended forms of
1822 the constant. To try to increase the chance of having the same
1823 constant in more than one insn, start at the highest number of
1824 bits to shift, but try all possibilities in case a ZAPNOT will
1825 be useful. */
1827 bits = exact_log2 (c & -c);
1828 if (bits > 0)
1829 for (; bits > 0; bits--)
1831 new = c >> bits;
1832 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1833 if (!temp && c < 0)
1835 new = (unsigned HOST_WIDE_INT)c >> bits;
1836 temp = alpha_emit_set_const (subtarget, mode, new,
1837 i, no_output);
1839 if (temp)
1841 if (no_output)
1842 return temp;
1843 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1844 target, 0, OPTAB_WIDEN);
1848 /* Now try high-order zero bits. Here we try the shifted-in bits as
1849 all zero and all ones. Be careful to avoid shifting outside the
1850 mode and to avoid shifting outside the host wide int size. */
1851 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1852 confuse the recursive call and set all of the high 32 bits. */
1854 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1855 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1856 if (bits > 0)
1857 for (; bits > 0; bits--)
1859 new = c << bits;
1860 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1861 if (!temp)
1863 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1864 temp = alpha_emit_set_const (subtarget, mode, new,
1865 i, no_output);
1867 if (temp)
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1872 target, 1, OPTAB_WIDEN);
1876 /* Now try high-order 1 bits. We get that with a sign-extension.
1877 But one bit isn't enough here. Be careful to avoid shifting outside
1878 the mode and to avoid shifting outside the host wide int size. */
1880 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1881 - floor_log2 (~ c) - 2);
1882 if (bits > 0)
1883 for (; bits > 0; bits--)
1885 new = c << bits;
1886 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1887 if (!temp)
1889 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1890 temp = alpha_emit_set_const (subtarget, mode, new,
1891 i, no_output);
1893 if (temp)
1895 if (no_output)
1896 return temp;
1897 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1898 target, 0, OPTAB_WIDEN);
1903 #if HOST_BITS_PER_WIDE_INT == 64
1904 /* Finally, see if can load a value into the target that is the same as the
1905 constant except that all bytes that are 0 are changed to be 0xff. If we
1906 can, then we can do a ZAPNOT to obtain the desired constant. */
1908 new = c;
1909 for (i = 0; i < 64; i += 8)
1910 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1911 new |= (HOST_WIDE_INT) 0xff << i;
1913 /* We are only called for SImode and DImode. If this is SImode, ensure that
1914 we are sign extended to a full word. */
1916 if (mode == SImode)
1917 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1919 if (new != c)
1921 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1922 if (temp)
1924 if (no_output)
1925 return temp;
1926 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1927 target, 0, OPTAB_WIDEN);
1930 #endif
1932 return 0;
1935 /* Try to output insns to set TARGET equal to the constant C if it can be
1936 done in less than N insns. Do all computations in MODE. Returns the place
1937 where the output has been placed if it can be done and the insns have been
1938 emitted. If it would take more than N insns, zero is returned and no
1939 insns and emitted. */
1941 static rtx
1942 alpha_emit_set_const (rtx target, enum machine_mode mode,
1943 HOST_WIDE_INT c, int n, bool no_output)
1945 enum machine_mode orig_mode = mode;
1946 rtx orig_target = target;
1947 rtx result = 0;
1948 int i;
1950 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1951 can't load this constant in one insn, do this in DImode. */
1952 if (no_new_pseudos && mode == SImode
1953 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1955 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1956 if (result)
1957 return result;
1959 target = no_output ? NULL : gen_lowpart (DImode, target);
1960 mode = DImode;
1962 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1964 target = no_output ? NULL : gen_lowpart (DImode, target);
1965 mode = DImode;
1968 /* Try 1 insn, then 2, then up to N. */
1969 for (i = 1; i <= n; i++)
1971 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1972 if (result)
1974 rtx insn, set;
1976 if (no_output)
1977 return result;
1979 insn = get_last_insn ();
1980 set = single_set (insn);
1981 if (! CONSTANT_P (SET_SRC (set)))
1982 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1983 break;
1987 /* Allow for the case where we changed the mode of TARGET. */
1988 if (result)
1990 if (result == target)
1991 result = orig_target;
1992 else if (mode != orig_mode)
1993 result = gen_lowpart (orig_mode, result);
1996 return result;
1999 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2000 fall back to a straight forward decomposition. We do this to avoid
2001 exponential run times encountered when looking for longer sequences
2002 with alpha_emit_set_const. */
2004 static rtx
2005 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2007 HOST_WIDE_INT d1, d2, d3, d4;
2009 /* Decompose the entire word */
2010 #if HOST_BITS_PER_WIDE_INT >= 64
2011 gcc_assert (c2 == -(c1 < 0));
2012 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2013 c1 -= d1;
2014 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2015 c1 = (c1 - d2) >> 32;
2016 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2017 c1 -= d3;
2018 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 gcc_assert (c1 == d4);
2020 #else
2021 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2022 c1 -= d1;
2023 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2024 gcc_assert (c1 == d2);
2025 c2 += (d2 < 0);
2026 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2027 c2 -= d3;
2028 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2029 gcc_assert (c2 == d4);
2030 #endif
2032 /* Construct the high word */
2033 if (d4)
2035 emit_move_insn (target, GEN_INT (d4));
2036 if (d3)
2037 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2039 else
2040 emit_move_insn (target, GEN_INT (d3));
2042 /* Shift it into place */
2043 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2045 /* Add in the low bits. */
2046 if (d2)
2047 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2048 if (d1)
2049 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2051 return target;
2054 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2055 the low 64 bits. */
2057 static void
2058 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2060 HOST_WIDE_INT i0, i1;
2062 if (GET_CODE (x) == CONST_VECTOR)
2063 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2066 if (GET_CODE (x) == CONST_INT)
2068 i0 = INTVAL (x);
2069 i1 = -(i0 < 0);
2071 else if (HOST_BITS_PER_WIDE_INT >= 64)
2073 i0 = CONST_DOUBLE_LOW (x);
2074 i1 = -(i0 < 0);
2076 else
2078 i0 = CONST_DOUBLE_LOW (x);
2079 i1 = CONST_DOUBLE_HIGH (x);
2082 *p0 = i0;
2083 *p1 = i1;
2086 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2087 are willing to load the value into a register via a move pattern.
2088 Normally this is all symbolic constants, integral constants that
2089 take three or fewer instructions, and floating-point zero. */
2091 bool
2092 alpha_legitimate_constant_p (rtx x)
2094 enum machine_mode mode = GET_MODE (x);
2095 HOST_WIDE_INT i0, i1;
2097 switch (GET_CODE (x))
2099 case CONST:
2100 case LABEL_REF:
2101 case SYMBOL_REF:
2102 case HIGH:
2103 return true;
2105 case CONST_DOUBLE:
2106 if (x == CONST0_RTX (mode))
2107 return true;
2108 if (FLOAT_MODE_P (mode))
2109 return false;
2110 goto do_integer;
2112 case CONST_VECTOR:
2113 if (x == CONST0_RTX (mode))
2114 return true;
2115 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2116 return false;
2117 if (GET_MODE_SIZE (mode) != 8)
2118 return false;
2119 goto do_integer;
2121 case CONST_INT:
2122 do_integer:
2123 if (TARGET_BUILD_CONSTANTS)
2124 return true;
2125 alpha_extract_integer (x, &i0, &i1);
2126 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2127 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2128 return false;
2130 default:
2131 return false;
2135 /* Operand 1 is known to be a constant, and should require more than one
2136 instruction to load. Emit that multi-part load. */
2138 bool
2139 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2141 HOST_WIDE_INT i0, i1;
2142 rtx temp = NULL_RTX;
2144 alpha_extract_integer (operands[1], &i0, &i1);
2146 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2147 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2149 if (!temp && TARGET_BUILD_CONSTANTS)
2150 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2152 if (temp)
2154 if (!rtx_equal_p (operands[0], temp))
2155 emit_move_insn (operands[0], temp);
2156 return true;
2159 return false;
2162 /* Expand a move instruction; return true if all work is done.
2163 We don't handle non-bwx subword loads here. */
2165 bool
2166 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2168 /* If the output is not a register, the input must be. */
2169 if (GET_CODE (operands[0]) == MEM
2170 && ! reg_or_0_operand (operands[1], mode))
2171 operands[1] = force_reg (mode, operands[1]);
2173 /* Allow legitimize_address to perform some simplifications. */
2174 if (mode == Pmode && symbolic_operand (operands[1], mode))
2176 rtx tmp;
2178 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2179 if (tmp)
2181 if (tmp == operands[0])
2182 return true;
2183 operands[1] = tmp;
2184 return false;
2188 /* Early out for non-constants and valid constants. */
2189 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2190 return false;
2192 /* Split large integers. */
2193 if (GET_CODE (operands[1]) == CONST_INT
2194 || GET_CODE (operands[1]) == CONST_DOUBLE
2195 || GET_CODE (operands[1]) == CONST_VECTOR)
2197 if (alpha_split_const_mov (mode, operands))
2198 return true;
2201 /* Otherwise we've nothing left but to drop the thing to memory. */
2202 operands[1] = force_const_mem (mode, operands[1]);
2203 if (reload_in_progress)
2205 emit_move_insn (operands[0], XEXP (operands[1], 0));
2206 operands[1] = copy_rtx (operands[1]);
2207 XEXP (operands[1], 0) = operands[0];
2209 else
2210 operands[1] = validize_mem (operands[1]);
2211 return false;
2214 /* Expand a non-bwx QImode or HImode move instruction;
2215 return true if all work is done. */
2217 bool
2218 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2220 /* If the output is not a register, the input must be. */
2221 if (GET_CODE (operands[0]) == MEM)
2222 operands[1] = force_reg (mode, operands[1]);
2224 /* Handle four memory cases, unaligned and aligned for either the input
2225 or the output. The only case where we can be called during reload is
2226 for aligned loads; all other cases require temporaries. */
2228 if (GET_CODE (operands[1]) == MEM
2229 || (GET_CODE (operands[1]) == SUBREG
2230 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2231 || (reload_in_progress && GET_CODE (operands[1]) == REG
2232 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2233 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2234 && GET_CODE (SUBREG_REG (operands[1])) == REG
2235 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2237 if (aligned_memory_operand (operands[1], mode))
2239 if (reload_in_progress)
2241 emit_insn ((mode == QImode
2242 ? gen_reload_inqi_help
2243 : gen_reload_inhi_help)
2244 (operands[0], operands[1],
2245 gen_rtx_REG (SImode, REGNO (operands[0]))));
2247 else
2249 rtx aligned_mem, bitnum;
2250 rtx scratch = gen_reg_rtx (SImode);
2251 rtx subtarget;
2252 bool copyout;
2254 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2256 subtarget = operands[0];
2257 if (GET_CODE (subtarget) == REG)
2258 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2259 else
2260 subtarget = gen_reg_rtx (DImode), copyout = true;
2262 emit_insn ((mode == QImode
2263 ? gen_aligned_loadqi
2264 : gen_aligned_loadhi)
2265 (subtarget, aligned_mem, bitnum, scratch));
2267 if (copyout)
2268 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2271 else
2273 /* Don't pass these as parameters since that makes the generated
2274 code depend on parameter evaluation order which will cause
2275 bootstrap failures. */
2277 rtx temp1, temp2, seq, subtarget;
2278 bool copyout;
2280 temp1 = gen_reg_rtx (DImode);
2281 temp2 = gen_reg_rtx (DImode);
2283 subtarget = operands[0];
2284 if (GET_CODE (subtarget) == REG)
2285 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2286 else
2287 subtarget = gen_reg_rtx (DImode), copyout = true;
2289 seq = ((mode == QImode
2290 ? gen_unaligned_loadqi
2291 : gen_unaligned_loadhi)
2292 (subtarget, get_unaligned_address (operands[1], 0),
2293 temp1, temp2));
2294 alpha_set_memflags (seq, operands[1]);
2295 emit_insn (seq);
2297 if (copyout)
2298 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2300 return true;
2303 if (GET_CODE (operands[0]) == MEM
2304 || (GET_CODE (operands[0]) == SUBREG
2305 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2306 || (reload_in_progress && GET_CODE (operands[0]) == REG
2307 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2308 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2309 && GET_CODE (SUBREG_REG (operands[0])) == REG
2310 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2312 if (aligned_memory_operand (operands[0], mode))
2314 rtx aligned_mem, bitnum;
2315 rtx temp1 = gen_reg_rtx (SImode);
2316 rtx temp2 = gen_reg_rtx (SImode);
2318 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2320 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2321 temp1, temp2));
2323 else
2325 rtx temp1 = gen_reg_rtx (DImode);
2326 rtx temp2 = gen_reg_rtx (DImode);
2327 rtx temp3 = gen_reg_rtx (DImode);
2328 rtx seq = ((mode == QImode
2329 ? gen_unaligned_storeqi
2330 : gen_unaligned_storehi)
2331 (get_unaligned_address (operands[0], 0),
2332 operands[1], temp1, temp2, temp3));
2334 alpha_set_memflags (seq, operands[0]);
2335 emit_insn (seq);
2337 return true;
2340 return false;
2343 /* Implement the movmisalign patterns. One of the operands is a memory
2344 that is not naturally aligned. Emit instructions to load it. */
2346 void
2347 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2349 /* Honor misaligned loads, for those we promised to do so. */
2350 if (MEM_P (operands[1]))
2352 rtx tmp;
2354 if (register_operand (operands[0], mode))
2355 tmp = operands[0];
2356 else
2357 tmp = gen_reg_rtx (mode);
2359 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2360 if (tmp != operands[0])
2361 emit_move_insn (operands[0], tmp);
2363 else if (MEM_P (operands[0]))
2365 if (!reg_or_0_operand (operands[1], mode))
2366 operands[1] = force_reg (mode, operands[1]);
2367 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2369 else
2370 gcc_unreachable ();
2373 /* Generate an unsigned DImode to FP conversion. This is the same code
2374 optabs would emit if we didn't have TFmode patterns.
2376 For SFmode, this is the only construction I've found that can pass
2377 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2378 intermediates will work, because you'll get intermediate rounding
2379 that ruins the end result. Some of this could be fixed by turning
2380 on round-to-positive-infinity, but that requires diddling the fpsr,
2381 which kills performance. I tried turning this around and converting
2382 to a negative number, so that I could turn on /m, but either I did
2383 it wrong or there's something else cause I wound up with the exact
2384 same single-bit error. There is a branch-less form of this same code:
2386 srl $16,1,$1
2387 and $16,1,$2
2388 cmplt $16,0,$3
2389 or $1,$2,$2
2390 cmovge $16,$16,$2
2391 itoft $3,$f10
2392 itoft $2,$f11
2393 cvtqs $f11,$f11
2394 adds $f11,$f11,$f0
2395 fcmoveq $f10,$f11,$f0
2397 I'm not using it because it's the same number of instructions as
2398 this branch-full form, and it has more serialized long latency
2399 instructions on the critical path.
2401 For DFmode, we can avoid rounding errors by breaking up the word
2402 into two pieces, converting them separately, and adding them back:
2404 LC0: .long 0,0x5f800000
2406 itoft $16,$f11
2407 lda $2,LC0
2408 cmplt $16,0,$1
2409 cpyse $f11,$f31,$f10
2410 cpyse $f31,$f11,$f11
2411 s4addq $1,$2,$1
2412 lds $f12,0($1)
2413 cvtqt $f10,$f10
2414 cvtqt $f11,$f11
2415 addt $f12,$f10,$f0
2416 addt $f0,$f11,$f0
2418 This doesn't seem to be a clear-cut win over the optabs form.
2419 It probably all depends on the distribution of numbers being
2420 converted -- in the optabs form, all but high-bit-set has a
2421 much lower minimum execution time. */
2423 void
2424 alpha_emit_floatuns (rtx operands[2])
2426 rtx neglab, donelab, i0, i1, f0, in, out;
2427 enum machine_mode mode;
2429 out = operands[0];
2430 in = force_reg (DImode, operands[1]);
2431 mode = GET_MODE (out);
2432 neglab = gen_label_rtx ();
2433 donelab = gen_label_rtx ();
2434 i0 = gen_reg_rtx (DImode);
2435 i1 = gen_reg_rtx (DImode);
2436 f0 = gen_reg_rtx (mode);
2438 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2440 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2441 emit_jump_insn (gen_jump (donelab));
2442 emit_barrier ();
2444 emit_label (neglab);
2446 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2447 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2448 emit_insn (gen_iordi3 (i0, i0, i1));
2449 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2450 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2452 emit_label (donelab);
2455 /* Generate the comparison for a conditional branch. */
2458 alpha_emit_conditional_branch (enum rtx_code code)
2460 enum rtx_code cmp_code, branch_code;
2461 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2462 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2463 rtx tem;
2465 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2467 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2468 op1 = const0_rtx;
2469 alpha_compare.fp_p = 0;
2472 /* The general case: fold the comparison code to the types of compares
2473 that we have, choosing the branch as necessary. */
2474 switch (code)
2476 case EQ: case LE: case LT: case LEU: case LTU:
2477 case UNORDERED:
2478 /* We have these compares: */
2479 cmp_code = code, branch_code = NE;
2480 break;
2482 case NE:
2483 case ORDERED:
2484 /* These must be reversed. */
2485 cmp_code = reverse_condition (code), branch_code = EQ;
2486 break;
2488 case GE: case GT: case GEU: case GTU:
2489 /* For FP, we swap them, for INT, we reverse them. */
2490 if (alpha_compare.fp_p)
2492 cmp_code = swap_condition (code);
2493 branch_code = NE;
2494 tem = op0, op0 = op1, op1 = tem;
2496 else
2498 cmp_code = reverse_condition (code);
2499 branch_code = EQ;
2501 break;
2503 default:
2504 gcc_unreachable ();
2507 if (alpha_compare.fp_p)
2509 cmp_mode = DFmode;
2510 if (flag_unsafe_math_optimizations)
2512 /* When we are not as concerned about non-finite values, and we
2513 are comparing against zero, we can branch directly. */
2514 if (op1 == CONST0_RTX (DFmode))
2515 cmp_code = UNKNOWN, branch_code = code;
2516 else if (op0 == CONST0_RTX (DFmode))
2518 /* Undo the swap we probably did just above. */
2519 tem = op0, op0 = op1, op1 = tem;
2520 branch_code = swap_condition (cmp_code);
2521 cmp_code = UNKNOWN;
2524 else
2526 /* ??? We mark the branch mode to be CCmode to prevent the
2527 compare and branch from being combined, since the compare
2528 insn follows IEEE rules that the branch does not. */
2529 branch_mode = CCmode;
2532 else
2534 cmp_mode = DImode;
2536 /* The following optimizations are only for signed compares. */
2537 if (code != LEU && code != LTU && code != GEU && code != GTU)
2539 /* Whee. Compare and branch against 0 directly. */
2540 if (op1 == const0_rtx)
2541 cmp_code = UNKNOWN, branch_code = code;
2543 /* If the constants doesn't fit into an immediate, but can
2544 be generated by lda/ldah, we adjust the argument and
2545 compare against zero, so we can use beq/bne directly. */
2546 /* ??? Don't do this when comparing against symbols, otherwise
2547 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2548 be declared false out of hand (at least for non-weak). */
2549 else if (GET_CODE (op1) == CONST_INT
2550 && (code == EQ || code == NE)
2551 && !(symbolic_operand (op0, VOIDmode)
2552 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2554 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2556 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2557 && (CONST_OK_FOR_LETTER_P (n, 'K')
2558 || CONST_OK_FOR_LETTER_P (n, 'L')))
2560 cmp_code = PLUS, branch_code = code;
2561 op1 = GEN_INT (n);
2566 if (!reg_or_0_operand (op0, DImode))
2567 op0 = force_reg (DImode, op0);
2568 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2569 op1 = force_reg (DImode, op1);
2572 /* Emit an initial compare instruction, if necessary. */
2573 tem = op0;
2574 if (cmp_code != UNKNOWN)
2576 tem = gen_reg_rtx (cmp_mode);
2577 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2580 /* Zero the operands. */
2581 memset (&alpha_compare, 0, sizeof (alpha_compare));
2583 /* Return the branch comparison. */
2584 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2587 /* Certain simplifications can be done to make invalid setcc operations
2588 valid. Return the final comparison, or NULL if we can't work. */
2591 alpha_emit_setcc (enum rtx_code code)
2593 enum rtx_code cmp_code;
2594 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2595 int fp_p = alpha_compare.fp_p;
2596 rtx tmp;
2598 /* Zero the operands. */
2599 memset (&alpha_compare, 0, sizeof (alpha_compare));
2601 if (fp_p && GET_MODE (op0) == TFmode)
2603 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2604 op1 = const0_rtx;
2605 fp_p = 0;
2608 if (fp_p && !TARGET_FIX)
2609 return NULL_RTX;
2611 /* The general case: fold the comparison code to the types of compares
2612 that we have, choosing the branch as necessary. */
2614 cmp_code = UNKNOWN;
2615 switch (code)
2617 case EQ: case LE: case LT: case LEU: case LTU:
2618 case UNORDERED:
2619 /* We have these compares. */
2620 if (fp_p)
2621 cmp_code = code, code = NE;
2622 break;
2624 case NE:
2625 if (!fp_p && op1 == const0_rtx)
2626 break;
2627 /* FALLTHRU */
2629 case ORDERED:
2630 cmp_code = reverse_condition (code);
2631 code = EQ;
2632 break;
2634 case GE: case GT: case GEU: case GTU:
2635 /* These normally need swapping, but for integer zero we have
2636 special patterns that recognize swapped operands. */
2637 if (!fp_p && op1 == const0_rtx)
2638 break;
2639 code = swap_condition (code);
2640 if (fp_p)
2641 cmp_code = code, code = NE;
2642 tmp = op0, op0 = op1, op1 = tmp;
2643 break;
2645 default:
2646 gcc_unreachable ();
2649 if (!fp_p)
2651 if (!register_operand (op0, DImode))
2652 op0 = force_reg (DImode, op0);
2653 if (!reg_or_8bit_operand (op1, DImode))
2654 op1 = force_reg (DImode, op1);
2657 /* Emit an initial compare instruction, if necessary. */
2658 if (cmp_code != UNKNOWN)
2660 enum machine_mode mode = fp_p ? DFmode : DImode;
2662 tmp = gen_reg_rtx (mode);
2663 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2664 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2666 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2667 op1 = const0_rtx;
2670 /* Return the setcc comparison. */
2671 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2675 /* Rewrite a comparison against zero CMP of the form
2676 (CODE (cc0) (const_int 0)) so it can be written validly in
2677 a conditional move (if_then_else CMP ...).
2678 If both of the operands that set cc0 are nonzero we must emit
2679 an insn to perform the compare (it can't be done within
2680 the conditional move). */
2683 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2685 enum rtx_code code = GET_CODE (cmp);
2686 enum rtx_code cmov_code = NE;
2687 rtx op0 = alpha_compare.op0;
2688 rtx op1 = alpha_compare.op1;
2689 int fp_p = alpha_compare.fp_p;
2690 enum machine_mode cmp_mode
2691 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2692 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2693 enum machine_mode cmov_mode = VOIDmode;
2694 int local_fast_math = flag_unsafe_math_optimizations;
2695 rtx tem;
2697 /* Zero the operands. */
2698 memset (&alpha_compare, 0, sizeof (alpha_compare));
2700 if (fp_p != FLOAT_MODE_P (mode))
2702 enum rtx_code cmp_code;
2704 if (! TARGET_FIX)
2705 return 0;
2707 /* If we have fp<->int register move instructions, do a cmov by
2708 performing the comparison in fp registers, and move the
2709 zero/nonzero value to integer registers, where we can then
2710 use a normal cmov, or vice-versa. */
2712 switch (code)
2714 case EQ: case LE: case LT: case LEU: case LTU:
2715 /* We have these compares. */
2716 cmp_code = code, code = NE;
2717 break;
2719 case NE:
2720 /* This must be reversed. */
2721 cmp_code = EQ, code = EQ;
2722 break;
2724 case GE: case GT: case GEU: case GTU:
2725 /* These normally need swapping, but for integer zero we have
2726 special patterns that recognize swapped operands. */
2727 if (!fp_p && op1 == const0_rtx)
2728 cmp_code = code, code = NE;
2729 else
2731 cmp_code = swap_condition (code);
2732 code = NE;
2733 tem = op0, op0 = op1, op1 = tem;
2735 break;
2737 default:
2738 gcc_unreachable ();
2741 tem = gen_reg_rtx (cmp_op_mode);
2742 emit_insn (gen_rtx_SET (VOIDmode, tem,
2743 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2744 op0, op1)));
2746 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2747 op0 = gen_lowpart (cmp_op_mode, tem);
2748 op1 = CONST0_RTX (cmp_op_mode);
2749 fp_p = !fp_p;
2750 local_fast_math = 1;
2753 /* We may be able to use a conditional move directly.
2754 This avoids emitting spurious compares. */
2755 if (signed_comparison_operator (cmp, VOIDmode)
2756 && (!fp_p || local_fast_math)
2757 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2758 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2760 /* We can't put the comparison inside the conditional move;
2761 emit a compare instruction and put that inside the
2762 conditional move. Make sure we emit only comparisons we have;
2763 swap or reverse as necessary. */
2765 if (no_new_pseudos)
2766 return NULL_RTX;
2768 switch (code)
2770 case EQ: case LE: case LT: case LEU: case LTU:
2771 /* We have these compares: */
2772 break;
2774 case NE:
2775 /* This must be reversed. */
2776 code = reverse_condition (code);
2777 cmov_code = EQ;
2778 break;
2780 case GE: case GT: case GEU: case GTU:
2781 /* These must be swapped. */
2782 if (op1 != CONST0_RTX (cmp_mode))
2784 code = swap_condition (code);
2785 tem = op0, op0 = op1, op1 = tem;
2787 break;
2789 default:
2790 gcc_unreachable ();
2793 if (!fp_p)
2795 if (!reg_or_0_operand (op0, DImode))
2796 op0 = force_reg (DImode, op0);
2797 if (!reg_or_8bit_operand (op1, DImode))
2798 op1 = force_reg (DImode, op1);
2801 /* ??? We mark the branch mode to be CCmode to prevent the compare
2802 and cmov from being combined, since the compare insn follows IEEE
2803 rules that the cmov does not. */
2804 if (fp_p && !local_fast_math)
2805 cmov_mode = CCmode;
2807 tem = gen_reg_rtx (cmp_op_mode);
2808 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2809 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2812 /* Simplify a conditional move of two constants into a setcc with
2813 arithmetic. This is done with a splitter since combine would
2814 just undo the work if done during code generation. It also catches
2815 cases we wouldn't have before cse. */
2818 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2819 rtx t_rtx, rtx f_rtx)
2821 HOST_WIDE_INT t, f, diff;
2822 enum machine_mode mode;
2823 rtx target, subtarget, tmp;
2825 mode = GET_MODE (dest);
2826 t = INTVAL (t_rtx);
2827 f = INTVAL (f_rtx);
2828 diff = t - f;
2830 if (((code == NE || code == EQ) && diff < 0)
2831 || (code == GE || code == GT))
2833 code = reverse_condition (code);
2834 diff = t, t = f, f = diff;
2835 diff = t - f;
2838 subtarget = target = dest;
2839 if (mode != DImode)
2841 target = gen_lowpart (DImode, dest);
2842 if (! no_new_pseudos)
2843 subtarget = gen_reg_rtx (DImode);
2844 else
2845 subtarget = target;
2847 /* Below, we must be careful to use copy_rtx on target and subtarget
2848 in intermediate insns, as they may be a subreg rtx, which may not
2849 be shared. */
2851 if (f == 0 && exact_log2 (diff) > 0
2852 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2853 viable over a longer latency cmove. On EV5, the E0 slot is a
2854 scarce resource, and on EV4 shift has the same latency as a cmove. */
2855 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2857 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2858 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2860 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2861 GEN_INT (exact_log2 (t)));
2862 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2864 else if (f == 0 && t == -1)
2866 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2867 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2869 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2871 else if (diff == 1 || diff == 4 || diff == 8)
2873 rtx add_op;
2875 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2876 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2878 if (diff == 1)
2879 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2880 else
2882 add_op = GEN_INT (f);
2883 if (sext_add_operand (add_op, mode))
2885 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2886 GEN_INT (diff));
2887 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2888 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2890 else
2891 return 0;
2894 else
2895 return 0;
2897 return 1;
2900 /* Look up the function X_floating library function name for the
2901 given operation. */
2903 struct xfloating_op GTY(())
2905 const enum rtx_code code;
2906 const char *const GTY((skip)) osf_func;
2907 const char *const GTY((skip)) vms_func;
2908 rtx libcall;
2911 static GTY(()) struct xfloating_op xfloating_ops[] =
2913 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2914 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2915 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2916 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2917 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2918 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2919 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2920 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2921 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2922 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2923 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2924 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2925 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2926 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2927 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2930 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2932 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2933 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2936 static rtx
2937 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2939 struct xfloating_op *ops = xfloating_ops;
2940 long n = ARRAY_SIZE (xfloating_ops);
2941 long i;
2943 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2945 /* How irritating. Nothing to key off for the main table. */
2946 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2948 ops = vax_cvt_ops;
2949 n = ARRAY_SIZE (vax_cvt_ops);
2952 for (i = 0; i < n; ++i, ++ops)
2953 if (ops->code == code)
2955 rtx func = ops->libcall;
2956 if (!func)
2958 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2959 ? ops->vms_func : ops->osf_func);
2960 ops->libcall = func;
2962 return func;
2965 gcc_unreachable ();
2968 /* Most X_floating operations take the rounding mode as an argument.
2969 Compute that here. */
2971 static int
2972 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2973 enum alpha_fp_rounding_mode round)
2975 int mode;
2977 switch (round)
2979 case ALPHA_FPRM_NORM:
2980 mode = 2;
2981 break;
2982 case ALPHA_FPRM_MINF:
2983 mode = 1;
2984 break;
2985 case ALPHA_FPRM_CHOP:
2986 mode = 0;
2987 break;
2988 case ALPHA_FPRM_DYN:
2989 mode = 4;
2990 break;
2991 default:
2992 gcc_unreachable ();
2994 /* XXX For reference, round to +inf is mode = 3. */
2997 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2998 mode |= 0x10000;
3000 return mode;
3003 /* Emit an X_floating library function call.
3005 Note that these functions do not follow normal calling conventions:
3006 TFmode arguments are passed in two integer registers (as opposed to
3007 indirect); TFmode return values appear in R16+R17.
3009 FUNC is the function to call.
3010 TARGET is where the output belongs.
3011 OPERANDS are the inputs.
3012 NOPERANDS is the count of inputs.
3013 EQUIV is the expression equivalent for the function.
3016 static void
3017 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3018 int noperands, rtx equiv)
3020 rtx usage = NULL_RTX, tmp, reg;
3021 int regno = 16, i;
3023 start_sequence ();
3025 for (i = 0; i < noperands; ++i)
3027 switch (GET_MODE (operands[i]))
3029 case TFmode:
3030 reg = gen_rtx_REG (TFmode, regno);
3031 regno += 2;
3032 break;
3034 case DFmode:
3035 reg = gen_rtx_REG (DFmode, regno + 32);
3036 regno += 1;
3037 break;
3039 case VOIDmode:
3040 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3041 /* FALLTHRU */
3042 case DImode:
3043 reg = gen_rtx_REG (DImode, regno);
3044 regno += 1;
3045 break;
3047 default:
3048 gcc_unreachable ();
3051 emit_move_insn (reg, operands[i]);
3052 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3055 switch (GET_MODE (target))
3057 case TFmode:
3058 reg = gen_rtx_REG (TFmode, 16);
3059 break;
3060 case DFmode:
3061 reg = gen_rtx_REG (DFmode, 32);
3062 break;
3063 case DImode:
3064 reg = gen_rtx_REG (DImode, 0);
3065 break;
3066 default:
3067 gcc_unreachable ();
3070 tmp = gen_rtx_MEM (QImode, func);
3071 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3072 const0_rtx, const0_rtx));
3073 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3074 CONST_OR_PURE_CALL_P (tmp) = 1;
3076 tmp = get_insns ();
3077 end_sequence ();
3079 emit_libcall_block (tmp, target, reg, equiv);
3082 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3084 void
3085 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3087 rtx func;
3088 int mode;
3089 rtx out_operands[3];
3091 func = alpha_lookup_xfloating_lib_func (code);
3092 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3094 out_operands[0] = operands[1];
3095 out_operands[1] = operands[2];
3096 out_operands[2] = GEN_INT (mode);
3097 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3098 gen_rtx_fmt_ee (code, TFmode, operands[1],
3099 operands[2]));
3102 /* Emit an X_floating library function call for a comparison. */
3104 static rtx
3105 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3107 enum rtx_code cmp_code, res_code;
3108 rtx func, out, operands[2];
3110 /* X_floating library comparison functions return
3111 -1 unordered
3112 0 false
3113 1 true
3114 Convert the compare against the raw return value. */
3116 cmp_code = *pcode;
3117 switch (cmp_code)
3119 case UNORDERED:
3120 cmp_code = EQ;
3121 res_code = LT;
3122 break;
3123 case ORDERED:
3124 cmp_code = EQ;
3125 res_code = GE;
3126 break;
3127 case NE:
3128 res_code = NE;
3129 break;
3130 case EQ:
3131 case LT:
3132 case GT:
3133 case LE:
3134 case GE:
3135 res_code = GT;
3136 break;
3137 default:
3138 gcc_unreachable ();
3140 *pcode = res_code;
3142 func = alpha_lookup_xfloating_lib_func (cmp_code);
3144 operands[0] = op0;
3145 operands[1] = op1;
3146 out = gen_reg_rtx (DImode);
3148 /* ??? Strange mode for equiv because what's actually returned
3149 is -1,0,1, not a proper boolean value. */
3150 alpha_emit_xfloating_libcall (func, out, operands, 2,
3151 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3153 return out;
3156 /* Emit an X_floating library function call for a conversion. */
3158 void
3159 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3161 int noperands = 1, mode;
3162 rtx out_operands[2];
3163 rtx func;
3164 enum rtx_code code = orig_code;
3166 if (code == UNSIGNED_FIX)
3167 code = FIX;
3169 func = alpha_lookup_xfloating_lib_func (code);
3171 out_operands[0] = operands[1];
3173 switch (code)
3175 case FIX:
3176 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3177 out_operands[1] = GEN_INT (mode);
3178 noperands = 2;
3179 break;
3180 case FLOAT_TRUNCATE:
3181 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3182 out_operands[1] = GEN_INT (mode);
3183 noperands = 2;
3184 break;
3185 default:
3186 break;
3189 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3190 gen_rtx_fmt_e (orig_code,
3191 GET_MODE (operands[0]),
3192 operands[1]));
3195 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3196 OP[0] into OP[0,1]. Naturally, output operand ordering is
3197 little-endian. */
3199 void
3200 alpha_split_tfmode_pair (rtx operands[4])
3202 switch (GET_CODE (operands[1]))
3204 case REG:
3205 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3206 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3207 break;
3209 case MEM:
3210 operands[3] = adjust_address (operands[1], DImode, 8);
3211 operands[2] = adjust_address (operands[1], DImode, 0);
3212 break;
3214 case CONST_DOUBLE:
3215 gcc_assert (operands[1] == CONST0_RTX (TFmode));
3216 operands[2] = operands[3] = const0_rtx;
3217 break;
3219 default:
3220 gcc_unreachable ();
3223 switch (GET_CODE (operands[0]))
3225 case REG:
3226 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3227 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3228 break;
3230 case MEM:
3231 operands[1] = adjust_address (operands[0], DImode, 8);
3232 operands[0] = adjust_address (operands[0], DImode, 0);
3233 break;
3235 default:
3236 gcc_unreachable ();
3240 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3241 op2 is a register containing the sign bit, operation is the
3242 logical operation to be performed. */
3244 void
3245 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3247 rtx high_bit = operands[2];
3248 rtx scratch;
3249 int move;
3251 alpha_split_tfmode_pair (operands);
3253 /* Detect three flavors of operand overlap. */
3254 move = 1;
3255 if (rtx_equal_p (operands[0], operands[2]))
3256 move = 0;
3257 else if (rtx_equal_p (operands[1], operands[2]))
3259 if (rtx_equal_p (operands[0], high_bit))
3260 move = 2;
3261 else
3262 move = -1;
3265 if (move < 0)
3266 emit_move_insn (operands[0], operands[2]);
3268 /* ??? If the destination overlaps both source tf and high_bit, then
3269 assume source tf is dead in its entirety and use the other half
3270 for a scratch register. Otherwise "scratch" is just the proper
3271 destination register. */
3272 scratch = operands[move < 2 ? 1 : 3];
3274 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3276 if (move > 0)
3278 emit_move_insn (operands[0], operands[2]);
3279 if (move > 1)
3280 emit_move_insn (operands[1], scratch);
3284 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3285 unaligned data:
3287 unsigned: signed:
3288 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3289 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3290 lda r3,X(r11) lda r3,X+2(r11)
3291 extwl r1,r3,r1 extql r1,r3,r1
3292 extwh r2,r3,r2 extqh r2,r3,r2
3293 or r1.r2.r1 or r1,r2,r1
3294 sra r1,48,r1
3296 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3297 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3298 lda r3,X(r11) lda r3,X(r11)
3299 extll r1,r3,r1 extll r1,r3,r1
3300 extlh r2,r3,r2 extlh r2,r3,r2
3301 or r1.r2.r1 addl r1,r2,r1
3303 quad: ldq_u r1,X(r11)
3304 ldq_u r2,X+7(r11)
3305 lda r3,X(r11)
3306 extql r1,r3,r1
3307 extqh r2,r3,r2
3308 or r1.r2.r1
3311 void
3312 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3313 HOST_WIDE_INT ofs, int sign)
3315 rtx meml, memh, addr, extl, exth, tmp, mema;
3316 enum machine_mode mode;
3318 if (TARGET_BWX && size == 2)
3320 meml = adjust_address (mem, QImode, ofs);
3321 memh = adjust_address (mem, QImode, ofs+1);
3322 if (BYTES_BIG_ENDIAN)
3323 tmp = meml, meml = memh, memh = tmp;
3324 extl = gen_reg_rtx (DImode);
3325 exth = gen_reg_rtx (DImode);
3326 emit_insn (gen_zero_extendqidi2 (extl, meml));
3327 emit_insn (gen_zero_extendqidi2 (exth, memh));
3328 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3329 NULL, 1, OPTAB_LIB_WIDEN);
3330 addr = expand_simple_binop (DImode, IOR, extl, exth,
3331 NULL, 1, OPTAB_LIB_WIDEN);
3333 if (sign && GET_MODE (tgt) != HImode)
3335 addr = gen_lowpart (HImode, addr);
3336 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3338 else
3340 if (GET_MODE (tgt) != DImode)
3341 addr = gen_lowpart (GET_MODE (tgt), addr);
3342 emit_move_insn (tgt, addr);
3344 return;
3347 meml = gen_reg_rtx (DImode);
3348 memh = gen_reg_rtx (DImode);
3349 addr = gen_reg_rtx (DImode);
3350 extl = gen_reg_rtx (DImode);
3351 exth = gen_reg_rtx (DImode);
3353 mema = XEXP (mem, 0);
3354 if (GET_CODE (mema) == LO_SUM)
3355 mema = force_reg (Pmode, mema);
3357 /* AND addresses cannot be in any alias set, since they may implicitly
3358 alias surrounding code. Ideally we'd have some alias set that
3359 covered all types except those with alignment 8 or higher. */
3361 tmp = change_address (mem, DImode,
3362 gen_rtx_AND (DImode,
3363 plus_constant (mema, ofs),
3364 GEN_INT (-8)));
3365 set_mem_alias_set (tmp, 0);
3366 emit_move_insn (meml, tmp);
3368 tmp = change_address (mem, DImode,
3369 gen_rtx_AND (DImode,
3370 plus_constant (mema, ofs + size - 1),
3371 GEN_INT (-8)));
3372 set_mem_alias_set (tmp, 0);
3373 emit_move_insn (memh, tmp);
3375 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3377 emit_move_insn (addr, plus_constant (mema, -1));
3379 emit_insn (gen_extqh_be (extl, meml, addr));
3380 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3382 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3383 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3384 addr, 1, OPTAB_WIDEN);
3386 else if (sign && size == 2)
3388 emit_move_insn (addr, plus_constant (mema, ofs+2));
3390 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3391 emit_insn (gen_extqh_le (exth, memh, addr));
3393 /* We must use tgt here for the target. Alpha-vms port fails if we use
3394 addr for the target, because addr is marked as a pointer and combine
3395 knows that pointers are always sign-extended 32 bit values. */
3396 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3397 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3398 addr, 1, OPTAB_WIDEN);
3400 else
3402 if (WORDS_BIG_ENDIAN)
3404 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3405 switch ((int) size)
3407 case 2:
3408 emit_insn (gen_extwh_be (extl, meml, addr));
3409 mode = HImode;
3410 break;
3412 case 4:
3413 emit_insn (gen_extlh_be (extl, meml, addr));
3414 mode = SImode;
3415 break;
3417 case 8:
3418 emit_insn (gen_extqh_be (extl, meml, addr));
3419 mode = DImode;
3420 break;
3422 default:
3423 gcc_unreachable ();
3425 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3427 else
3429 emit_move_insn (addr, plus_constant (mema, ofs));
3430 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3431 switch ((int) size)
3433 case 2:
3434 emit_insn (gen_extwh_le (exth, memh, addr));
3435 mode = HImode;
3436 break;
3438 case 4:
3439 emit_insn (gen_extlh_le (exth, memh, addr));
3440 mode = SImode;
3441 break;
3443 case 8:
3444 emit_insn (gen_extqh_le (exth, memh, addr));
3445 mode = DImode;
3446 break;
3448 default:
3449 gcc_unreachable ();
3453 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3454 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3455 sign, OPTAB_WIDEN);
3458 if (addr != tgt)
3459 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3462 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3464 void
3465 alpha_expand_unaligned_store (rtx dst, rtx src,
3466 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3468 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3470 if (TARGET_BWX && size == 2)
3472 if (src != const0_rtx)
3474 dstl = gen_lowpart (QImode, src);
3475 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3476 NULL, 1, OPTAB_LIB_WIDEN);
3477 dsth = gen_lowpart (QImode, dsth);
3479 else
3480 dstl = dsth = const0_rtx;
3482 meml = adjust_address (dst, QImode, ofs);
3483 memh = adjust_address (dst, QImode, ofs+1);
3484 if (BYTES_BIG_ENDIAN)
3485 addr = meml, meml = memh, memh = addr;
3487 emit_move_insn (meml, dstl);
3488 emit_move_insn (memh, dsth);
3489 return;
3492 dstl = gen_reg_rtx (DImode);
3493 dsth = gen_reg_rtx (DImode);
3494 insl = gen_reg_rtx (DImode);
3495 insh = gen_reg_rtx (DImode);
3497 dsta = XEXP (dst, 0);
3498 if (GET_CODE (dsta) == LO_SUM)
3499 dsta = force_reg (Pmode, dsta);
3501 /* AND addresses cannot be in any alias set, since they may implicitly
3502 alias surrounding code. Ideally we'd have some alias set that
3503 covered all types except those with alignment 8 or higher. */
3505 meml = change_address (dst, DImode,
3506 gen_rtx_AND (DImode,
3507 plus_constant (dsta, ofs),
3508 GEN_INT (-8)));
3509 set_mem_alias_set (meml, 0);
3511 memh = change_address (dst, DImode,
3512 gen_rtx_AND (DImode,
3513 plus_constant (dsta, ofs + size - 1),
3514 GEN_INT (-8)));
3515 set_mem_alias_set (memh, 0);
3517 emit_move_insn (dsth, memh);
3518 emit_move_insn (dstl, meml);
3519 if (WORDS_BIG_ENDIAN)
3521 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3523 if (src != const0_rtx)
3525 switch ((int) size)
3527 case 2:
3528 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3529 break;
3530 case 4:
3531 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3532 break;
3533 case 8:
3534 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3535 break;
3537 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3538 GEN_INT (size*8), addr));
3541 switch ((int) size)
3543 case 2:
3544 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3545 break;
3546 case 4:
3548 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3549 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3550 break;
3552 case 8:
3553 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3554 break;
3557 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3559 else
3561 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3563 if (src != CONST0_RTX (GET_MODE (src)))
3565 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3566 GEN_INT (size*8), addr));
3568 switch ((int) size)
3570 case 2:
3571 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3572 break;
3573 case 4:
3574 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3575 break;
3576 case 8:
3577 emit_insn (gen_insql_le (insl, src, addr));
3578 break;
3582 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3584 switch ((int) size)
3586 case 2:
3587 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3588 break;
3589 case 4:
3591 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3592 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3593 break;
3595 case 8:
3596 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3597 break;
3601 if (src != CONST0_RTX (GET_MODE (src)))
3603 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3604 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3607 if (WORDS_BIG_ENDIAN)
3609 emit_move_insn (meml, dstl);
3610 emit_move_insn (memh, dsth);
3612 else
3614 /* Must store high before low for degenerate case of aligned. */
3615 emit_move_insn (memh, dsth);
3616 emit_move_insn (meml, dstl);
3620 /* The block move code tries to maximize speed by separating loads and
3621 stores at the expense of register pressure: we load all of the data
3622 before we store it back out. There are two secondary effects worth
3623 mentioning, that this speeds copying to/from aligned and unaligned
3624 buffers, and that it makes the code significantly easier to write. */
3626 #define MAX_MOVE_WORDS 8
3628 /* Load an integral number of consecutive unaligned quadwords. */
3630 static void
3631 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3632 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3634 rtx const im8 = GEN_INT (-8);
3635 rtx const i64 = GEN_INT (64);
3636 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3637 rtx sreg, areg, tmp, smema;
3638 HOST_WIDE_INT i;
3640 smema = XEXP (smem, 0);
3641 if (GET_CODE (smema) == LO_SUM)
3642 smema = force_reg (Pmode, smema);
3644 /* Generate all the tmp registers we need. */
3645 for (i = 0; i < words; ++i)
3647 data_regs[i] = out_regs[i];
3648 ext_tmps[i] = gen_reg_rtx (DImode);
3650 data_regs[words] = gen_reg_rtx (DImode);
3652 if (ofs != 0)
3653 smem = adjust_address (smem, GET_MODE (smem), ofs);
3655 /* Load up all of the source data. */
3656 for (i = 0; i < words; ++i)
3658 tmp = change_address (smem, DImode,
3659 gen_rtx_AND (DImode,
3660 plus_constant (smema, 8*i),
3661 im8));
3662 set_mem_alias_set (tmp, 0);
3663 emit_move_insn (data_regs[i], tmp);
3666 tmp = change_address (smem, DImode,
3667 gen_rtx_AND (DImode,
3668 plus_constant (smema, 8*words - 1),
3669 im8));
3670 set_mem_alias_set (tmp, 0);
3671 emit_move_insn (data_regs[words], tmp);
3673 /* Extract the half-word fragments. Unfortunately DEC decided to make
3674 extxh with offset zero a noop instead of zeroing the register, so
3675 we must take care of that edge condition ourselves with cmov. */
3677 sreg = copy_addr_to_reg (smema);
3678 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3679 1, OPTAB_WIDEN);
3680 if (WORDS_BIG_ENDIAN)
3681 emit_move_insn (sreg, plus_constant (sreg, 7));
3682 for (i = 0; i < words; ++i)
3684 if (WORDS_BIG_ENDIAN)
3686 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3687 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3689 else
3691 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3692 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3694 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3695 gen_rtx_IF_THEN_ELSE (DImode,
3696 gen_rtx_EQ (DImode, areg,
3697 const0_rtx),
3698 const0_rtx, ext_tmps[i])));
3701 /* Merge the half-words into whole words. */
3702 for (i = 0; i < words; ++i)
3704 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3705 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3709 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3710 may be NULL to store zeros. */
3712 static void
3713 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3714 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3716 rtx const im8 = GEN_INT (-8);
3717 rtx const i64 = GEN_INT (64);
3718 rtx ins_tmps[MAX_MOVE_WORDS];
3719 rtx st_tmp_1, st_tmp_2, dreg;
3720 rtx st_addr_1, st_addr_2, dmema;
3721 HOST_WIDE_INT i;
3723 dmema = XEXP (dmem, 0);
3724 if (GET_CODE (dmema) == LO_SUM)
3725 dmema = force_reg (Pmode, dmema);
3727 /* Generate all the tmp registers we need. */
3728 if (data_regs != NULL)
3729 for (i = 0; i < words; ++i)
3730 ins_tmps[i] = gen_reg_rtx(DImode);
3731 st_tmp_1 = gen_reg_rtx(DImode);
3732 st_tmp_2 = gen_reg_rtx(DImode);
3734 if (ofs != 0)
3735 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3737 st_addr_2 = change_address (dmem, DImode,
3738 gen_rtx_AND (DImode,
3739 plus_constant (dmema, words*8 - 1),
3740 im8));
3741 set_mem_alias_set (st_addr_2, 0);
3743 st_addr_1 = change_address (dmem, DImode,
3744 gen_rtx_AND (DImode, dmema, im8));
3745 set_mem_alias_set (st_addr_1, 0);
3747 /* Load up the destination end bits. */
3748 emit_move_insn (st_tmp_2, st_addr_2);
3749 emit_move_insn (st_tmp_1, st_addr_1);
3751 /* Shift the input data into place. */
3752 dreg = copy_addr_to_reg (dmema);
3753 if (WORDS_BIG_ENDIAN)
3754 emit_move_insn (dreg, plus_constant (dreg, 7));
3755 if (data_regs != NULL)
3757 for (i = words-1; i >= 0; --i)
3759 if (WORDS_BIG_ENDIAN)
3761 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3762 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3764 else
3766 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3767 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3770 for (i = words-1; i > 0; --i)
3772 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3773 ins_tmps[i-1], ins_tmps[i-1], 1,
3774 OPTAB_WIDEN);
3778 /* Split and merge the ends with the destination data. */
3779 if (WORDS_BIG_ENDIAN)
3781 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3782 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3784 else
3786 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3787 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3790 if (data_regs != NULL)
3792 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3793 st_tmp_2, 1, OPTAB_WIDEN);
3794 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3795 st_tmp_1, 1, OPTAB_WIDEN);
3798 /* Store it all. */
3799 if (WORDS_BIG_ENDIAN)
3800 emit_move_insn (st_addr_1, st_tmp_1);
3801 else
3802 emit_move_insn (st_addr_2, st_tmp_2);
3803 for (i = words-1; i > 0; --i)
3805 rtx tmp = change_address (dmem, DImode,
3806 gen_rtx_AND (DImode,
3807 plus_constant(dmema,
3808 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3809 im8));
3810 set_mem_alias_set (tmp, 0);
3811 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3813 if (WORDS_BIG_ENDIAN)
3814 emit_move_insn (st_addr_2, st_tmp_2);
3815 else
3816 emit_move_insn (st_addr_1, st_tmp_1);
3820 /* Expand string/block move operations.
3822 operands[0] is the pointer to the destination.
3823 operands[1] is the pointer to the source.
3824 operands[2] is the number of bytes to move.
3825 operands[3] is the alignment. */
3828 alpha_expand_block_move (rtx operands[])
3830 rtx bytes_rtx = operands[2];
3831 rtx align_rtx = operands[3];
3832 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3833 HOST_WIDE_INT bytes = orig_bytes;
3834 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3835 HOST_WIDE_INT dst_align = src_align;
3836 rtx orig_src = operands[1];
3837 rtx orig_dst = operands[0];
3838 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3839 rtx tmp;
3840 unsigned int i, words, ofs, nregs = 0;
3842 if (orig_bytes <= 0)
3843 return 1;
3844 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3845 return 0;
3847 /* Look for additional alignment information from recorded register info. */
3849 tmp = XEXP (orig_src, 0);
3850 if (GET_CODE (tmp) == REG)
3851 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3852 else if (GET_CODE (tmp) == PLUS
3853 && GET_CODE (XEXP (tmp, 0)) == REG
3854 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3856 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3857 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3859 if (a > src_align)
3861 if (a >= 64 && c % 8 == 0)
3862 src_align = 64;
3863 else if (a >= 32 && c % 4 == 0)
3864 src_align = 32;
3865 else if (a >= 16 && c % 2 == 0)
3866 src_align = 16;
3870 tmp = XEXP (orig_dst, 0);
3871 if (GET_CODE (tmp) == REG)
3872 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3873 else if (GET_CODE (tmp) == PLUS
3874 && GET_CODE (XEXP (tmp, 0)) == REG
3875 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3877 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3878 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3880 if (a > dst_align)
3882 if (a >= 64 && c % 8 == 0)
3883 dst_align = 64;
3884 else if (a >= 32 && c % 4 == 0)
3885 dst_align = 32;
3886 else if (a >= 16 && c % 2 == 0)
3887 dst_align = 16;
3891 ofs = 0;
3892 if (src_align >= 64 && bytes >= 8)
3894 words = bytes / 8;
3896 for (i = 0; i < words; ++i)
3897 data_regs[nregs + i] = gen_reg_rtx (DImode);
3899 for (i = 0; i < words; ++i)
3900 emit_move_insn (data_regs[nregs + i],
3901 adjust_address (orig_src, DImode, ofs + i * 8));
3903 nregs += words;
3904 bytes -= words * 8;
3905 ofs += words * 8;
3908 if (src_align >= 32 && bytes >= 4)
3910 words = bytes / 4;
3912 for (i = 0; i < words; ++i)
3913 data_regs[nregs + i] = gen_reg_rtx (SImode);
3915 for (i = 0; i < words; ++i)
3916 emit_move_insn (data_regs[nregs + i],
3917 adjust_address (orig_src, SImode, ofs + i * 4));
3919 nregs += words;
3920 bytes -= words * 4;
3921 ofs += words * 4;
3924 if (bytes >= 8)
3926 words = bytes / 8;
3928 for (i = 0; i < words+1; ++i)
3929 data_regs[nregs + i] = gen_reg_rtx (DImode);
3931 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3932 words, ofs);
3934 nregs += words;
3935 bytes -= words * 8;
3936 ofs += words * 8;
3939 if (! TARGET_BWX && bytes >= 4)
3941 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3942 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3943 bytes -= 4;
3944 ofs += 4;
3947 if (bytes >= 2)
3949 if (src_align >= 16)
3951 do {
3952 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3953 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3954 bytes -= 2;
3955 ofs += 2;
3956 } while (bytes >= 2);
3958 else if (! TARGET_BWX)
3960 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3961 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3962 bytes -= 2;
3963 ofs += 2;
3967 while (bytes > 0)
3969 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3970 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3971 bytes -= 1;
3972 ofs += 1;
3975 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3977 /* Now save it back out again. */
3979 i = 0, ofs = 0;
3981 /* Write out the data in whatever chunks reading the source allowed. */
3982 if (dst_align >= 64)
3984 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3986 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3987 data_regs[i]);
3988 ofs += 8;
3989 i++;
3993 if (dst_align >= 32)
3995 /* If the source has remaining DImode regs, write them out in
3996 two pieces. */
3997 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3999 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4000 NULL_RTX, 1, OPTAB_WIDEN);
4002 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4003 gen_lowpart (SImode, data_regs[i]));
4004 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4005 gen_lowpart (SImode, tmp));
4006 ofs += 8;
4007 i++;
4010 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4012 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4013 data_regs[i]);
4014 ofs += 4;
4015 i++;
4019 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4021 /* Write out a remaining block of words using unaligned methods. */
4023 for (words = 1; i + words < nregs; words++)
4024 if (GET_MODE (data_regs[i + words]) != DImode)
4025 break;
4027 if (words == 1)
4028 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4029 else
4030 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4031 words, ofs);
4033 i += words;
4034 ofs += words * 8;
4037 /* Due to the above, this won't be aligned. */
4038 /* ??? If we have more than one of these, consider constructing full
4039 words in registers and using alpha_expand_unaligned_store_words. */
4040 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4042 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4043 ofs += 4;
4044 i++;
4047 if (dst_align >= 16)
4048 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4050 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4051 i++;
4052 ofs += 2;
4054 else
4055 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4057 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4058 i++;
4059 ofs += 2;
4062 /* The remainder must be byte copies. */
4063 while (i < nregs)
4065 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4066 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4067 i++;
4068 ofs += 1;
4071 return 1;
4075 alpha_expand_block_clear (rtx operands[])
4077 rtx bytes_rtx = operands[1];
4078 rtx align_rtx = operands[3];
4079 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4080 HOST_WIDE_INT bytes = orig_bytes;
4081 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4082 HOST_WIDE_INT alignofs = 0;
4083 rtx orig_dst = operands[0];
4084 rtx tmp;
4085 int i, words, ofs = 0;
4087 if (orig_bytes <= 0)
4088 return 1;
4089 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4090 return 0;
4092 /* Look for stricter alignment. */
4093 tmp = XEXP (orig_dst, 0);
4094 if (GET_CODE (tmp) == REG)
4095 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4096 else if (GET_CODE (tmp) == PLUS
4097 && GET_CODE (XEXP (tmp, 0)) == REG
4098 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4100 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4101 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4103 if (a > align)
4105 if (a >= 64)
4106 align = a, alignofs = 8 - c % 8;
4107 else if (a >= 32)
4108 align = a, alignofs = 4 - c % 4;
4109 else if (a >= 16)
4110 align = a, alignofs = 2 - c % 2;
4114 /* Handle an unaligned prefix first. */
4116 if (alignofs > 0)
4118 #if HOST_BITS_PER_WIDE_INT >= 64
4119 /* Given that alignofs is bounded by align, the only time BWX could
4120 generate three stores is for a 7 byte fill. Prefer two individual
4121 stores over a load/mask/store sequence. */
4122 if ((!TARGET_BWX || alignofs == 7)
4123 && align >= 32
4124 && !(alignofs == 4 && bytes >= 4))
4126 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4127 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4128 rtx mem, tmp;
4129 HOST_WIDE_INT mask;
4131 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4132 set_mem_alias_set (mem, 0);
4134 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4135 if (bytes < alignofs)
4137 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4138 ofs += bytes;
4139 bytes = 0;
4141 else
4143 bytes -= alignofs;
4144 ofs += alignofs;
4146 alignofs = 0;
4148 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4149 NULL_RTX, 1, OPTAB_WIDEN);
4151 emit_move_insn (mem, tmp);
4153 #endif
4155 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4157 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4158 bytes -= 1;
4159 ofs += 1;
4160 alignofs -= 1;
4162 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4164 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4165 bytes -= 2;
4166 ofs += 2;
4167 alignofs -= 2;
4169 if (alignofs == 4 && bytes >= 4)
4171 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4172 bytes -= 4;
4173 ofs += 4;
4174 alignofs = 0;
4177 /* If we've not used the extra lead alignment information by now,
4178 we won't be able to. Downgrade align to match what's left over. */
4179 if (alignofs > 0)
4181 alignofs = alignofs & -alignofs;
4182 align = MIN (align, alignofs * BITS_PER_UNIT);
4186 /* Handle a block of contiguous long-words. */
4188 if (align >= 64 && bytes >= 8)
4190 words = bytes / 8;
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4194 const0_rtx);
4196 bytes -= words * 8;
4197 ofs += words * 8;
4200 /* If the block is large and appropriately aligned, emit a single
4201 store followed by a sequence of stq_u insns. */
4203 if (align >= 32 && bytes > 16)
4205 rtx orig_dsta;
4207 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4208 bytes -= 4;
4209 ofs += 4;
4211 orig_dsta = XEXP (orig_dst, 0);
4212 if (GET_CODE (orig_dsta) == LO_SUM)
4213 orig_dsta = force_reg (Pmode, orig_dsta);
4215 words = bytes / 8;
4216 for (i = 0; i < words; ++i)
4218 rtx mem
4219 = change_address (orig_dst, DImode,
4220 gen_rtx_AND (DImode,
4221 plus_constant (orig_dsta, ofs + i*8),
4222 GEN_INT (-8)));
4223 set_mem_alias_set (mem, 0);
4224 emit_move_insn (mem, const0_rtx);
4227 /* Depending on the alignment, the first stq_u may have overlapped
4228 with the initial stl, which means that the last stq_u didn't
4229 write as much as it would appear. Leave those questionable bytes
4230 unaccounted for. */
4231 bytes -= words * 8 - 4;
4232 ofs += words * 8 - 4;
4235 /* Handle a smaller block of aligned words. */
4237 if ((align >= 64 && bytes == 4)
4238 || (align == 32 && bytes >= 4))
4240 words = bytes / 4;
4242 for (i = 0; i < words; ++i)
4243 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4244 const0_rtx);
4246 bytes -= words * 4;
4247 ofs += words * 4;
4250 /* An unaligned block uses stq_u stores for as many as possible. */
4252 if (bytes >= 8)
4254 words = bytes / 8;
4256 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4258 bytes -= words * 8;
4259 ofs += words * 8;
4262 /* Next clean up any trailing pieces. */
4264 #if HOST_BITS_PER_WIDE_INT >= 64
4265 /* Count the number of bits in BYTES for which aligned stores could
4266 be emitted. */
4267 words = 0;
4268 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4269 if (bytes & i)
4270 words += 1;
4272 /* If we have appropriate alignment (and it wouldn't take too many
4273 instructions otherwise), mask out the bytes we need. */
4274 if (TARGET_BWX ? words > 2 : bytes > 0)
4276 if (align >= 64)
4278 rtx mem, tmp;
4279 HOST_WIDE_INT mask;
4281 mem = adjust_address (orig_dst, DImode, ofs);
4282 set_mem_alias_set (mem, 0);
4284 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4286 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4287 NULL_RTX, 1, OPTAB_WIDEN);
4289 emit_move_insn (mem, tmp);
4290 return 1;
4292 else if (align >= 32 && bytes < 4)
4294 rtx mem, tmp;
4295 HOST_WIDE_INT mask;
4297 mem = adjust_address (orig_dst, SImode, ofs);
4298 set_mem_alias_set (mem, 0);
4300 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4302 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4303 NULL_RTX, 1, OPTAB_WIDEN);
4305 emit_move_insn (mem, tmp);
4306 return 1;
4309 #endif
4311 if (!TARGET_BWX && bytes >= 4)
4313 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4314 bytes -= 4;
4315 ofs += 4;
4318 if (bytes >= 2)
4320 if (align >= 16)
4322 do {
4323 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4324 const0_rtx);
4325 bytes -= 2;
4326 ofs += 2;
4327 } while (bytes >= 2);
4329 else if (! TARGET_BWX)
4331 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4332 bytes -= 2;
4333 ofs += 2;
4337 while (bytes > 0)
4339 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4340 bytes -= 1;
4341 ofs += 1;
4344 return 1;
4347 /* Returns a mask so that zap(x, value) == x & mask. */
4350 alpha_expand_zap_mask (HOST_WIDE_INT value)
4352 rtx result;
4353 int i;
4355 if (HOST_BITS_PER_WIDE_INT >= 64)
4357 HOST_WIDE_INT mask = 0;
4359 for (i = 7; i >= 0; --i)
4361 mask <<= 8;
4362 if (!((value >> i) & 1))
4363 mask |= 0xff;
4366 result = gen_int_mode (mask, DImode);
4368 else
4370 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4372 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4374 for (i = 7; i >= 4; --i)
4376 mask_hi <<= 8;
4377 if (!((value >> i) & 1))
4378 mask_hi |= 0xff;
4381 for (i = 3; i >= 0; --i)
4383 mask_lo <<= 8;
4384 if (!((value >> i) & 1))
4385 mask_lo |= 0xff;
4388 result = immed_double_const (mask_lo, mask_hi, DImode);
4391 return result;
4394 void
4395 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4396 enum machine_mode mode,
4397 rtx op0, rtx op1, rtx op2)
4399 op0 = gen_lowpart (mode, op0);
4401 if (op1 == const0_rtx)
4402 op1 = CONST0_RTX (mode);
4403 else
4404 op1 = gen_lowpart (mode, op1);
4406 if (op2 == const0_rtx)
4407 op2 = CONST0_RTX (mode);
4408 else
4409 op2 = gen_lowpart (mode, op2);
4411 emit_insn ((*gen) (op0, op1, op2));
4414 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4415 COND is true. Mark the jump as unlikely to be taken. */
4417 static void
4418 emit_unlikely_jump (rtx cond, rtx label)
4420 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4421 rtx x;
4423 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4424 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4425 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4428 /* A subroutine of the atomic operation splitters. Emit a load-locked
4429 instruction in MODE. */
4431 static void
4432 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4434 rtx (*fn) (rtx, rtx) = NULL;
4435 if (mode == SImode)
4436 fn = gen_load_locked_si;
4437 else if (mode == DImode)
4438 fn = gen_load_locked_di;
4439 emit_insn (fn (reg, mem));
4442 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4443 instruction in MODE. */
4445 static void
4446 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4448 rtx (*fn) (rtx, rtx, rtx) = NULL;
4449 if (mode == SImode)
4450 fn = gen_store_conditional_si;
4451 else if (mode == DImode)
4452 fn = gen_store_conditional_di;
4453 emit_insn (fn (res, mem, val));
4456 /* A subroutine of the atomic operation splitters. Emit an insxl
4457 instruction in MODE. */
4459 static rtx
4460 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4462 rtx ret = gen_reg_rtx (DImode);
4463 rtx (*fn) (rtx, rtx, rtx);
4465 if (WORDS_BIG_ENDIAN)
4467 if (mode == QImode)
4468 fn = gen_insbl_be;
4469 else
4470 fn = gen_inswl_be;
4472 else
4474 if (mode == QImode)
4475 fn = gen_insbl_le;
4476 else
4477 fn = gen_inswl_le;
4479 emit_insn (fn (ret, op1, op2));
4481 return ret;
4484 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4485 to perform. MEM is the memory on which to operate. VAL is the second
4486 operand of the binary operator. BEFORE and AFTER are optional locations to
4487 return the value of MEM either before of after the operation. SCRATCH is
4488 a scratch register. */
4490 void
4491 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4492 rtx before, rtx after, rtx scratch)
4494 enum machine_mode mode = GET_MODE (mem);
4495 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4497 emit_insn (gen_memory_barrier ());
4499 label = gen_label_rtx ();
4500 emit_label (label);
4501 label = gen_rtx_LABEL_REF (DImode, label);
4503 if (before == NULL)
4504 before = scratch;
4505 emit_load_locked (mode, before, mem);
4507 if (code == NOT)
4508 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4509 else
4510 x = gen_rtx_fmt_ee (code, mode, before, val);
4511 if (after)
4512 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4513 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4515 emit_store_conditional (mode, cond, mem, scratch);
4517 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4518 emit_unlikely_jump (x, label);
4520 emit_insn (gen_memory_barrier ());
4523 /* Expand a compare and swap operation. */
4525 void
4526 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4527 rtx scratch)
4529 enum machine_mode mode = GET_MODE (mem);
4530 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4532 emit_insn (gen_memory_barrier ());
4534 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4535 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4536 emit_label (XEXP (label1, 0));
4538 emit_load_locked (mode, retval, mem);
4540 x = gen_lowpart (DImode, retval);
4541 if (oldval == const0_rtx)
4542 x = gen_rtx_NE (DImode, x, const0_rtx);
4543 else
4545 x = gen_rtx_EQ (DImode, x, oldval);
4546 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4547 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4549 emit_unlikely_jump (x, label2);
4551 emit_move_insn (scratch, newval);
4552 emit_store_conditional (mode, cond, mem, scratch);
4554 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4555 emit_unlikely_jump (x, label1);
4557 emit_insn (gen_memory_barrier ());
4558 emit_label (XEXP (label2, 0));
4561 void
4562 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4564 enum machine_mode mode = GET_MODE (mem);
4565 rtx addr, align, wdst;
4566 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4568 addr = force_reg (DImode, XEXP (mem, 0));
4569 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4570 NULL_RTX, 1, OPTAB_DIRECT);
4572 oldval = convert_modes (DImode, mode, oldval, 1);
4573 newval = emit_insxl (mode, newval, addr);
4575 wdst = gen_reg_rtx (DImode);
4576 if (mode == QImode)
4577 fn5 = gen_sync_compare_and_swapqi_1;
4578 else
4579 fn5 = gen_sync_compare_and_swaphi_1;
4580 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4582 emit_move_insn (dst, gen_lowpart (mode, wdst));
4585 void
4586 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4587 rtx oldval, rtx newval, rtx align,
4588 rtx scratch, rtx cond)
4590 rtx label1, label2, mem, width, mask, x;
4592 mem = gen_rtx_MEM (DImode, align);
4593 MEM_VOLATILE_P (mem) = 1;
4595 emit_insn (gen_memory_barrier ());
4596 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4598 emit_label (XEXP (label1, 0));
4600 emit_load_locked (DImode, scratch, mem);
4602 width = GEN_INT (GET_MODE_BITSIZE (mode));
4603 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4604 if (WORDS_BIG_ENDIAN)
4605 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4606 else
4607 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4609 if (oldval == const0_rtx)
4610 x = gen_rtx_NE (DImode, dest, const0_rtx);
4611 else
4613 x = gen_rtx_EQ (DImode, dest, oldval);
4614 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4615 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4617 emit_unlikely_jump (x, label2);
4619 if (WORDS_BIG_ENDIAN)
4620 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4621 else
4622 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4623 emit_insn (gen_iordi3 (scratch, scratch, newval));
4625 emit_store_conditional (DImode, scratch, mem, scratch);
4627 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4628 emit_unlikely_jump (x, label1);
4630 emit_insn (gen_memory_barrier ());
4631 emit_label (XEXP (label2, 0));
4634 /* Expand an atomic exchange operation. */
4636 void
4637 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4639 enum machine_mode mode = GET_MODE (mem);
4640 rtx label, x, cond = gen_lowpart (DImode, scratch);
4642 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4647 emit_load_locked (mode, retval, mem);
4648 emit_move_insn (scratch, val);
4649 emit_store_conditional (mode, cond, mem, scratch);
4651 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4652 emit_unlikely_jump (x, label);
4655 void
4656 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4658 enum machine_mode mode = GET_MODE (mem);
4659 rtx addr, align, wdst;
4660 rtx (*fn4) (rtx, rtx, rtx, rtx);
4662 /* Force the address into a register. */
4663 addr = force_reg (DImode, XEXP (mem, 0));
4665 /* Align it to a multiple of 8. */
4666 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4667 NULL_RTX, 1, OPTAB_DIRECT);
4669 /* Insert val into the correct byte location within the word. */
4670 val = emit_insxl (mode, val, addr);
4672 wdst = gen_reg_rtx (DImode);
4673 if (mode == QImode)
4674 fn4 = gen_sync_lock_test_and_setqi_1;
4675 else
4676 fn4 = gen_sync_lock_test_and_sethi_1;
4677 emit_insn (fn4 (wdst, addr, val, align));
4679 emit_move_insn (dst, gen_lowpart (mode, wdst));
4682 void
4683 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4684 rtx val, rtx align, rtx scratch)
4686 rtx label, mem, width, mask, x;
4688 mem = gen_rtx_MEM (DImode, align);
4689 MEM_VOLATILE_P (mem) = 1;
4691 emit_insn (gen_memory_barrier ());
4692 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4693 emit_label (XEXP (label, 0));
4695 emit_load_locked (DImode, scratch, mem);
4697 width = GEN_INT (GET_MODE_BITSIZE (mode));
4698 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4699 if (WORDS_BIG_ENDIAN)
4701 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4702 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4704 else
4706 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4707 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4709 emit_insn (gen_iordi3 (scratch, scratch, val));
4711 emit_store_conditional (DImode, scratch, mem, scratch);
4713 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4714 emit_unlikely_jump (x, label);
4717 /* Adjust the cost of a scheduling dependency. Return the new cost of
4718 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4720 static int
4721 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4723 enum attr_type insn_type, dep_insn_type;
4725 /* If the dependence is an anti-dependence, there is no cost. For an
4726 output dependence, there is sometimes a cost, but it doesn't seem
4727 worth handling those few cases. */
4728 if (REG_NOTE_KIND (link) != 0)
4729 return cost;
4731 /* If we can't recognize the insns, we can't really do anything. */
4732 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4733 return cost;
4735 insn_type = get_attr_type (insn);
4736 dep_insn_type = get_attr_type (dep_insn);
4738 /* Bring in the user-defined memory latency. */
4739 if (dep_insn_type == TYPE_ILD
4740 || dep_insn_type == TYPE_FLD
4741 || dep_insn_type == TYPE_LDSYM)
4742 cost += alpha_memory_latency-1;
4744 /* Everything else handled in DFA bypasses now. */
4746 return cost;
4749 /* The number of instructions that can be issued per cycle. */
4751 static int
4752 alpha_issue_rate (void)
4754 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4757 /* How many alternative schedules to try. This should be as wide as the
4758 scheduling freedom in the DFA, but no wider. Making this value too
4759 large results extra work for the scheduler.
4761 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4762 alternative schedules. For EV5, we can choose between E0/E1 and
4763 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4765 static int
4766 alpha_multipass_dfa_lookahead (void)
4768 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4771 /* Machine-specific function data. */
4773 struct machine_function GTY(())
4775 /* For unicosmk. */
4776 /* List of call information words for calls from this function. */
4777 struct rtx_def *first_ciw;
4778 struct rtx_def *last_ciw;
4779 int ciw_count;
4781 /* List of deferred case vectors. */
4782 struct rtx_def *addr_list;
4784 /* For OSF. */
4785 const char *some_ld_name;
4787 /* For TARGET_LD_BUGGY_LDGP. */
4788 struct rtx_def *gp_save_rtx;
4791 /* How to allocate a 'struct machine_function'. */
4793 static struct machine_function *
4794 alpha_init_machine_status (void)
4796 return ((struct machine_function *)
4797 ggc_alloc_cleared (sizeof (struct machine_function)));
4800 /* Functions to save and restore alpha_return_addr_rtx. */
4802 /* Start the ball rolling with RETURN_ADDR_RTX. */
4805 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4807 if (count != 0)
4808 return const0_rtx;
4810 return get_hard_reg_initial_val (Pmode, REG_RA);
4813 /* Return or create a memory slot containing the gp value for the current
4814 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4817 alpha_gp_save_rtx (void)
4819 rtx seq, m = cfun->machine->gp_save_rtx;
4821 if (m == NULL)
4823 start_sequence ();
4825 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4826 m = validize_mem (m);
4827 emit_move_insn (m, pic_offset_table_rtx);
4829 seq = get_insns ();
4830 end_sequence ();
4831 emit_insn_after (seq, entry_of_function ());
4833 cfun->machine->gp_save_rtx = m;
4836 return m;
4839 static int
4840 alpha_ra_ever_killed (void)
4842 rtx top;
4844 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4845 return regs_ever_live[REG_RA];
4847 push_topmost_sequence ();
4848 top = get_insns ();
4849 pop_topmost_sequence ();
4851 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4855 /* Return the trap mode suffix applicable to the current
4856 instruction, or NULL. */
4858 static const char *
4859 get_trap_mode_suffix (void)
4861 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4863 switch (s)
4865 case TRAP_SUFFIX_NONE:
4866 return NULL;
4868 case TRAP_SUFFIX_SU:
4869 if (alpha_fptm >= ALPHA_FPTM_SU)
4870 return "su";
4871 return NULL;
4873 case TRAP_SUFFIX_SUI:
4874 if (alpha_fptm >= ALPHA_FPTM_SUI)
4875 return "sui";
4876 return NULL;
4878 case TRAP_SUFFIX_V_SV:
4879 switch (alpha_fptm)
4881 case ALPHA_FPTM_N:
4882 return NULL;
4883 case ALPHA_FPTM_U:
4884 return "v";
4885 case ALPHA_FPTM_SU:
4886 case ALPHA_FPTM_SUI:
4887 return "sv";
4888 default:
4889 gcc_unreachable ();
4892 case TRAP_SUFFIX_V_SV_SVI:
4893 switch (alpha_fptm)
4895 case ALPHA_FPTM_N:
4896 return NULL;
4897 case ALPHA_FPTM_U:
4898 return "v";
4899 case ALPHA_FPTM_SU:
4900 return "sv";
4901 case ALPHA_FPTM_SUI:
4902 return "svi";
4903 default:
4904 gcc_unreachable ();
4906 break;
4908 case TRAP_SUFFIX_U_SU_SUI:
4909 switch (alpha_fptm)
4911 case ALPHA_FPTM_N:
4912 return NULL;
4913 case ALPHA_FPTM_U:
4914 return "u";
4915 case ALPHA_FPTM_SU:
4916 return "su";
4917 case ALPHA_FPTM_SUI:
4918 return "sui";
4919 default:
4920 gcc_unreachable ();
4922 break;
4924 default:
4925 gcc_unreachable ();
4927 gcc_unreachable ();
4930 /* Return the rounding mode suffix applicable to the current
4931 instruction, or NULL. */
4933 static const char *
4934 get_round_mode_suffix (void)
4936 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4938 switch (s)
4940 case ROUND_SUFFIX_NONE:
4941 return NULL;
4942 case ROUND_SUFFIX_NORMAL:
4943 switch (alpha_fprm)
4945 case ALPHA_FPRM_NORM:
4946 return NULL;
4947 case ALPHA_FPRM_MINF:
4948 return "m";
4949 case ALPHA_FPRM_CHOP:
4950 return "c";
4951 case ALPHA_FPRM_DYN:
4952 return "d";
4953 default:
4954 gcc_unreachable ();
4956 break;
4958 case ROUND_SUFFIX_C:
4959 return "c";
4961 default:
4962 gcc_unreachable ();
4964 gcc_unreachable ();
4967 /* Locate some local-dynamic symbol still in use by this function
4968 so that we can print its name in some movdi_er_tlsldm pattern. */
4970 static int
4971 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4973 rtx x = *px;
4975 if (GET_CODE (x) == SYMBOL_REF
4976 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4978 cfun->machine->some_ld_name = XSTR (x, 0);
4979 return 1;
4982 return 0;
4985 static const char *
4986 get_some_local_dynamic_name (void)
4988 rtx insn;
4990 if (cfun->machine->some_ld_name)
4991 return cfun->machine->some_ld_name;
4993 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4994 if (INSN_P (insn)
4995 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4996 return cfun->machine->some_ld_name;
4998 gcc_unreachable ();
5001 /* Print an operand. Recognize special options, documented below. */
5003 void
5004 print_operand (FILE *file, rtx x, int code)
5006 int i;
5008 switch (code)
5010 case '~':
5011 /* Print the assembler name of the current function. */
5012 assemble_name (file, alpha_fnname);
5013 break;
5015 case '&':
5016 assemble_name (file, get_some_local_dynamic_name ());
5017 break;
5019 case '/':
5021 const char *trap = get_trap_mode_suffix ();
5022 const char *round = get_round_mode_suffix ();
5024 if (trap || round)
5025 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5026 (trap ? trap : ""), (round ? round : ""));
5027 break;
5030 case ',':
5031 /* Generates single precision instruction suffix. */
5032 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5033 break;
5035 case '-':
5036 /* Generates double precision instruction suffix. */
5037 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5038 break;
5040 case '+':
5041 /* Generates a nop after a noreturn call at the very end of the
5042 function. */
5043 if (next_real_insn (current_output_insn) == 0)
5044 fprintf (file, "\n\tnop");
5045 break;
5047 case '#':
5048 if (alpha_this_literal_sequence_number == 0)
5049 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5050 fprintf (file, "%d", alpha_this_literal_sequence_number);
5051 break;
5053 case '*':
5054 if (alpha_this_gpdisp_sequence_number == 0)
5055 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5056 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5057 break;
5059 case 'H':
5060 if (GET_CODE (x) == HIGH)
5061 output_addr_const (file, XEXP (x, 0));
5062 else
5063 output_operand_lossage ("invalid %%H value");
5064 break;
5066 case 'J':
5068 const char *lituse;
5070 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5072 x = XVECEXP (x, 0, 0);
5073 lituse = "lituse_tlsgd";
5075 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5077 x = XVECEXP (x, 0, 0);
5078 lituse = "lituse_tlsldm";
5080 else if (GET_CODE (x) == CONST_INT)
5081 lituse = "lituse_jsr";
5082 else
5084 output_operand_lossage ("invalid %%J value");
5085 break;
5088 if (x != const0_rtx)
5089 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5091 break;
5093 case 'j':
5095 const char *lituse;
5097 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5098 lituse = "lituse_jsrdirect";
5099 #else
5100 lituse = "lituse_jsr";
5101 #endif
5103 gcc_assert (INTVAL (x) != 0);
5104 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5106 break;
5107 case 'r':
5108 /* If this operand is the constant zero, write it as "$31". */
5109 if (GET_CODE (x) == REG)
5110 fprintf (file, "%s", reg_names[REGNO (x)]);
5111 else if (x == CONST0_RTX (GET_MODE (x)))
5112 fprintf (file, "$31");
5113 else
5114 output_operand_lossage ("invalid %%r value");
5115 break;
5117 case 'R':
5118 /* Similar, but for floating-point. */
5119 if (GET_CODE (x) == REG)
5120 fprintf (file, "%s", reg_names[REGNO (x)]);
5121 else if (x == CONST0_RTX (GET_MODE (x)))
5122 fprintf (file, "$f31");
5123 else
5124 output_operand_lossage ("invalid %%R value");
5125 break;
5127 case 'N':
5128 /* Write the 1's complement of a constant. */
5129 if (GET_CODE (x) != CONST_INT)
5130 output_operand_lossage ("invalid %%N value");
5132 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5133 break;
5135 case 'P':
5136 /* Write 1 << C, for a constant C. */
5137 if (GET_CODE (x) != CONST_INT)
5138 output_operand_lossage ("invalid %%P value");
5140 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5141 break;
5143 case 'h':
5144 /* Write the high-order 16 bits of a constant, sign-extended. */
5145 if (GET_CODE (x) != CONST_INT)
5146 output_operand_lossage ("invalid %%h value");
5148 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5149 break;
5151 case 'L':
5152 /* Write the low-order 16 bits of a constant, sign-extended. */
5153 if (GET_CODE (x) != CONST_INT)
5154 output_operand_lossage ("invalid %%L value");
5156 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5157 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5158 break;
5160 case 'm':
5161 /* Write mask for ZAP insn. */
5162 if (GET_CODE (x) == CONST_DOUBLE)
5164 HOST_WIDE_INT mask = 0;
5165 HOST_WIDE_INT value;
5167 value = CONST_DOUBLE_LOW (x);
5168 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5169 i++, value >>= 8)
5170 if (value & 0xff)
5171 mask |= (1 << i);
5173 value = CONST_DOUBLE_HIGH (x);
5174 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5175 i++, value >>= 8)
5176 if (value & 0xff)
5177 mask |= (1 << (i + sizeof (int)));
5179 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5182 else if (GET_CODE (x) == CONST_INT)
5184 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5186 for (i = 0; i < 8; i++, value >>= 8)
5187 if (value & 0xff)
5188 mask |= (1 << i);
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5192 else
5193 output_operand_lossage ("invalid %%m value");
5194 break;
5196 case 'M':
5197 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5198 if (GET_CODE (x) != CONST_INT
5199 || (INTVAL (x) != 8 && INTVAL (x) != 16
5200 && INTVAL (x) != 32 && INTVAL (x) != 64))
5201 output_operand_lossage ("invalid %%M value");
5203 fprintf (file, "%s",
5204 (INTVAL (x) == 8 ? "b"
5205 : INTVAL (x) == 16 ? "w"
5206 : INTVAL (x) == 32 ? "l"
5207 : "q"));
5208 break;
5210 case 'U':
5211 /* Similar, except do it from the mask. */
5212 if (GET_CODE (x) == CONST_INT)
5214 HOST_WIDE_INT value = INTVAL (x);
5216 if (value == 0xff)
5218 fputc ('b', file);
5219 break;
5221 if (value == 0xffff)
5223 fputc ('w', file);
5224 break;
5226 if (value == 0xffffffff)
5228 fputc ('l', file);
5229 break;
5231 if (value == -1)
5233 fputc ('q', file);
5234 break;
5237 else if (HOST_BITS_PER_WIDE_INT == 32
5238 && GET_CODE (x) == CONST_DOUBLE
5239 && CONST_DOUBLE_LOW (x) == 0xffffffff
5240 && CONST_DOUBLE_HIGH (x) == 0)
5242 fputc ('l', file);
5243 break;
5245 output_operand_lossage ("invalid %%U value");
5246 break;
5248 case 's':
5249 /* Write the constant value divided by 8 for little-endian mode or
5250 (56 - value) / 8 for big-endian mode. */
5252 if (GET_CODE (x) != CONST_INT
5253 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5254 ? 56
5255 : 64)
5256 || (INTVAL (x) & 7) != 0)
5257 output_operand_lossage ("invalid %%s value");
5259 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5260 WORDS_BIG_ENDIAN
5261 ? (56 - INTVAL (x)) / 8
5262 : INTVAL (x) / 8);
5263 break;
5265 case 'S':
5266 /* Same, except compute (64 - c) / 8 */
5268 if (GET_CODE (x) != CONST_INT
5269 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5270 && (INTVAL (x) & 7) != 8)
5271 output_operand_lossage ("invalid %%s value");
5273 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5274 break;
5276 case 't':
5278 /* On Unicos/Mk systems: use a DEX expression if the symbol
5279 clashes with a register name. */
5280 int dex = unicosmk_need_dex (x);
5281 if (dex)
5282 fprintf (file, "DEX(%d)", dex);
5283 else
5284 output_addr_const (file, x);
5286 break;
5288 case 'C': case 'D': case 'c': case 'd':
5289 /* Write out comparison name. */
5291 enum rtx_code c = GET_CODE (x);
5293 if (!COMPARISON_P (x))
5294 output_operand_lossage ("invalid %%C value");
5296 else if (code == 'D')
5297 c = reverse_condition (c);
5298 else if (code == 'c')
5299 c = swap_condition (c);
5300 else if (code == 'd')
5301 c = swap_condition (reverse_condition (c));
5303 if (c == LEU)
5304 fprintf (file, "ule");
5305 else if (c == LTU)
5306 fprintf (file, "ult");
5307 else if (c == UNORDERED)
5308 fprintf (file, "un");
5309 else
5310 fprintf (file, "%s", GET_RTX_NAME (c));
5312 break;
5314 case 'E':
5315 /* Write the divide or modulus operator. */
5316 switch (GET_CODE (x))
5318 case DIV:
5319 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5320 break;
5321 case UDIV:
5322 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5323 break;
5324 case MOD:
5325 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5326 break;
5327 case UMOD:
5328 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5329 break;
5330 default:
5331 output_operand_lossage ("invalid %%E value");
5332 break;
5334 break;
5336 case 'A':
5337 /* Write "_u" for unaligned access. */
5338 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5339 fprintf (file, "_u");
5340 break;
5342 case 0:
5343 if (GET_CODE (x) == REG)
5344 fprintf (file, "%s", reg_names[REGNO (x)]);
5345 else if (GET_CODE (x) == MEM)
5346 output_address (XEXP (x, 0));
5347 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5349 switch (XINT (XEXP (x, 0), 1))
5351 case UNSPEC_DTPREL:
5352 case UNSPEC_TPREL:
5353 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5354 break;
5355 default:
5356 output_operand_lossage ("unknown relocation unspec");
5357 break;
5360 else
5361 output_addr_const (file, x);
5362 break;
5364 default:
5365 output_operand_lossage ("invalid %%xn code");
5369 void
5370 print_operand_address (FILE *file, rtx addr)
5372 int basereg = 31;
5373 HOST_WIDE_INT offset = 0;
5375 if (GET_CODE (addr) == AND)
5376 addr = XEXP (addr, 0);
5378 if (GET_CODE (addr) == PLUS
5379 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5381 offset = INTVAL (XEXP (addr, 1));
5382 addr = XEXP (addr, 0);
5385 if (GET_CODE (addr) == LO_SUM)
5387 const char *reloc16, *reloclo;
5388 rtx op1 = XEXP (addr, 1);
5390 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5392 op1 = XEXP (op1, 0);
5393 switch (XINT (op1, 1))
5395 case UNSPEC_DTPREL:
5396 reloc16 = NULL;
5397 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5398 break;
5399 case UNSPEC_TPREL:
5400 reloc16 = NULL;
5401 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5402 break;
5403 default:
5404 output_operand_lossage ("unknown relocation unspec");
5405 return;
5408 output_addr_const (file, XVECEXP (op1, 0, 0));
5410 else
5412 reloc16 = "gprel";
5413 reloclo = "gprellow";
5414 output_addr_const (file, op1);
5417 if (offset)
5418 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5420 addr = XEXP (addr, 0);
5421 switch (GET_CODE (addr))
5423 case REG:
5424 basereg = REGNO (addr);
5425 break;
5427 case SUBREG:
5428 basereg = subreg_regno (addr);
5429 break;
5431 default:
5432 gcc_unreachable ();
5435 fprintf (file, "($%d)\t\t!%s", basereg,
5436 (basereg == 29 ? reloc16 : reloclo));
5437 return;
5440 switch (GET_CODE (addr))
5442 case REG:
5443 basereg = REGNO (addr);
5444 break;
5446 case SUBREG:
5447 basereg = subreg_regno (addr);
5448 break;
5450 case CONST_INT:
5451 offset = INTVAL (addr);
5452 break;
5454 #if TARGET_ABI_OPEN_VMS
5455 case SYMBOL_REF:
5456 fprintf (file, "%s", XSTR (addr, 0));
5457 return;
5459 case CONST:
5460 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5461 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5462 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5463 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5464 INTVAL (XEXP (XEXP (addr, 0), 1)));
5465 return;
5467 #endif
5468 default:
5469 gcc_unreachable ();
5472 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5475 /* Emit RTL insns to initialize the variable parts of a trampoline at
5476 TRAMP. FNADDR is an RTX for the address of the function's pure
5477 code. CXT is an RTX for the static chain value for the function.
5479 The three offset parameters are for the individual template's
5480 layout. A JMPOFS < 0 indicates that the trampoline does not
5481 contain instructions at all.
5483 We assume here that a function will be called many more times than
5484 its address is taken (e.g., it might be passed to qsort), so we
5485 take the trouble to initialize the "hint" field in the JMP insn.
5486 Note that the hint field is PC (new) + 4 * bits 13:0. */
5488 void
5489 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5490 int fnofs, int cxtofs, int jmpofs)
5492 rtx temp, temp1, addr;
5493 /* VMS really uses DImode pointers in memory at this point. */
5494 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5496 #ifdef POINTERS_EXTEND_UNSIGNED
5497 fnaddr = convert_memory_address (mode, fnaddr);
5498 cxt = convert_memory_address (mode, cxt);
5499 #endif
5501 /* Store function address and CXT. */
5502 addr = memory_address (mode, plus_constant (tramp, fnofs));
5503 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5504 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5505 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5507 /* This has been disabled since the hint only has a 32k range, and in
5508 no existing OS is the stack within 32k of the text segment. */
5509 if (0 && jmpofs >= 0)
5511 /* Compute hint value. */
5512 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5513 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5514 OPTAB_WIDEN);
5515 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5516 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5517 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5518 GEN_INT (0x3fff), 0);
5520 /* Merge in the hint. */
5521 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5522 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5523 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5524 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5525 OPTAB_WIDEN);
5526 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5529 #ifdef ENABLE_EXECUTE_STACK
5530 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5531 0, VOIDmode, 1, tramp, Pmode);
5532 #endif
5534 if (jmpofs >= 0)
5535 emit_insn (gen_imb ());
5538 /* Determine where to put an argument to a function.
5539 Value is zero to push the argument on the stack,
5540 or a hard register in which to store the argument.
5542 MODE is the argument's machine mode.
5543 TYPE is the data type of the argument (as a tree).
5544 This is null for libcalls where that information may
5545 not be available.
5546 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5547 the preceding args and about the function being called.
5548 NAMED is nonzero if this argument is a named parameter
5549 (otherwise it is an extra parameter matching an ellipsis).
5551 On Alpha the first 6 words of args are normally in registers
5552 and the rest are pushed. */
5555 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5556 int named ATTRIBUTE_UNUSED)
5558 int basereg;
5559 int num_args;
5561 /* Don't get confused and pass small structures in FP registers. */
5562 if (type && AGGREGATE_TYPE_P (type))
5563 basereg = 16;
5564 else
5566 #ifdef ENABLE_CHECKING
5567 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5568 values here. */
5569 gcc_assert (!COMPLEX_MODE_P (mode));
5570 #endif
5572 /* Set up defaults for FP operands passed in FP registers, and
5573 integral operands passed in integer registers. */
5574 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5575 basereg = 32 + 16;
5576 else
5577 basereg = 16;
5580 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5581 the three platforms, so we can't avoid conditional compilation. */
5582 #if TARGET_ABI_OPEN_VMS
5584 if (mode == VOIDmode)
5585 return alpha_arg_info_reg_val (cum);
5587 num_args = cum.num_args;
5588 if (num_args >= 6
5589 || targetm.calls.must_pass_in_stack (mode, type))
5590 return NULL_RTX;
5592 #elif TARGET_ABI_UNICOSMK
5594 int size;
5596 /* If this is the last argument, generate the call info word (CIW). */
5597 /* ??? We don't include the caller's line number in the CIW because
5598 I don't know how to determine it if debug infos are turned off. */
5599 if (mode == VOIDmode)
5601 int i;
5602 HOST_WIDE_INT lo;
5603 HOST_WIDE_INT hi;
5604 rtx ciw;
5606 lo = 0;
5608 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5609 if (cum.reg_args_type[i])
5610 lo |= (1 << (7 - i));
5612 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5613 lo |= 7;
5614 else
5615 lo |= cum.num_reg_words;
5617 #if HOST_BITS_PER_WIDE_INT == 32
5618 hi = (cum.num_args << 20) | cum.num_arg_words;
5619 #else
5620 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5621 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5622 hi = 0;
5623 #endif
5624 ciw = immed_double_const (lo, hi, DImode);
5626 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5627 UNSPEC_UMK_LOAD_CIW);
5630 size = ALPHA_ARG_SIZE (mode, type, named);
5631 num_args = cum.num_reg_words;
5632 if (cum.force_stack
5633 || cum.num_reg_words + size > 6
5634 || targetm.calls.must_pass_in_stack (mode, type))
5635 return NULL_RTX;
5636 else if (type && TYPE_MODE (type) == BLKmode)
5638 rtx reg1, reg2;
5640 reg1 = gen_rtx_REG (DImode, num_args + 16);
5641 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5643 /* The argument fits in two registers. Note that we still need to
5644 reserve a register for empty structures. */
5645 if (size == 0)
5646 return NULL_RTX;
5647 else if (size == 1)
5648 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5649 else
5651 reg2 = gen_rtx_REG (DImode, num_args + 17);
5652 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5653 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5657 #elif TARGET_ABI_OSF
5659 if (cum >= 6)
5660 return NULL_RTX;
5661 num_args = cum;
5663 /* VOID is passed as a special flag for "last argument". */
5664 if (type == void_type_node)
5665 basereg = 16;
5666 else if (targetm.calls.must_pass_in_stack (mode, type))
5667 return NULL_RTX;
5669 #else
5670 #error Unhandled ABI
5671 #endif
5673 return gen_rtx_REG (mode, num_args + basereg);
5676 static int
5677 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5678 enum machine_mode mode ATTRIBUTE_UNUSED,
5679 tree type ATTRIBUTE_UNUSED,
5680 bool named ATTRIBUTE_UNUSED)
5682 int words = 0;
5684 #if TARGET_ABI_OPEN_VMS
5685 if (cum->num_args < 6
5686 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5687 words = 6 - cum->num_args;
5688 #elif TARGET_ABI_UNICOSMK
5689 /* Never any split arguments. */
5690 #elif TARGET_ABI_OSF
5691 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5692 words = 6 - *cum;
5693 #else
5694 #error Unhandled ABI
5695 #endif
5697 return words * UNITS_PER_WORD;
5701 /* Return true if TYPE must be returned in memory, instead of in registers. */
5703 static bool
5704 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5706 enum machine_mode mode = VOIDmode;
5707 int size;
5709 if (type)
5711 mode = TYPE_MODE (type);
5713 /* All aggregates are returned in memory. */
5714 if (AGGREGATE_TYPE_P (type))
5715 return true;
5718 size = GET_MODE_SIZE (mode);
5719 switch (GET_MODE_CLASS (mode))
5721 case MODE_VECTOR_FLOAT:
5722 /* Pass all float vectors in memory, like an aggregate. */
5723 return true;
5725 case MODE_COMPLEX_FLOAT:
5726 /* We judge complex floats on the size of their element,
5727 not the size of the whole type. */
5728 size = GET_MODE_UNIT_SIZE (mode);
5729 break;
5731 case MODE_INT:
5732 case MODE_FLOAT:
5733 case MODE_COMPLEX_INT:
5734 case MODE_VECTOR_INT:
5735 break;
5737 default:
5738 /* ??? We get called on all sorts of random stuff from
5739 aggregate_value_p. We must return something, but it's not
5740 clear what's safe to return. Pretend it's a struct I
5741 guess. */
5742 return true;
5745 /* Otherwise types must fit in one register. */
5746 return size > UNITS_PER_WORD;
5749 /* Return true if TYPE should be passed by invisible reference. */
5751 static bool
5752 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5753 enum machine_mode mode,
5754 tree type ATTRIBUTE_UNUSED,
5755 bool named ATTRIBUTE_UNUSED)
5757 return mode == TFmode || mode == TCmode;
5760 /* Define how to find the value returned by a function. VALTYPE is the
5761 data type of the value (as a tree). If the precise function being
5762 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5763 MODE is set instead of VALTYPE for libcalls.
5765 On Alpha the value is found in $0 for integer functions and
5766 $f0 for floating-point functions. */
5769 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5770 enum machine_mode mode)
5772 unsigned int regnum, dummy;
5773 enum mode_class class;
5775 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5777 if (valtype)
5778 mode = TYPE_MODE (valtype);
5780 class = GET_MODE_CLASS (mode);
5781 switch (class)
5783 case MODE_INT:
5784 PROMOTE_MODE (mode, dummy, valtype);
5785 /* FALLTHRU */
5787 case MODE_COMPLEX_INT:
5788 case MODE_VECTOR_INT:
5789 regnum = 0;
5790 break;
5792 case MODE_FLOAT:
5793 regnum = 32;
5794 break;
5796 case MODE_COMPLEX_FLOAT:
5798 enum machine_mode cmode = GET_MODE_INNER (mode);
5800 return gen_rtx_PARALLEL
5801 (VOIDmode,
5802 gen_rtvec (2,
5803 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5804 const0_rtx),
5805 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5806 GEN_INT (GET_MODE_SIZE (cmode)))));
5809 default:
5810 gcc_unreachable ();
5813 return gen_rtx_REG (mode, regnum);
5816 /* TCmode complex values are passed by invisible reference. We
5817 should not split these values. */
5819 static bool
5820 alpha_split_complex_arg (tree type)
5822 return TYPE_MODE (type) != TCmode;
5825 static tree
5826 alpha_build_builtin_va_list (void)
5828 tree base, ofs, space, record, type_decl;
5830 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5831 return ptr_type_node;
5833 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5834 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5835 TREE_CHAIN (record) = type_decl;
5836 TYPE_NAME (record) = type_decl;
5838 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5840 /* Dummy field to prevent alignment warnings. */
5841 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5842 DECL_FIELD_CONTEXT (space) = record;
5843 DECL_ARTIFICIAL (space) = 1;
5844 DECL_IGNORED_P (space) = 1;
5846 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5847 integer_type_node);
5848 DECL_FIELD_CONTEXT (ofs) = record;
5849 TREE_CHAIN (ofs) = space;
5851 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5852 ptr_type_node);
5853 DECL_FIELD_CONTEXT (base) = record;
5854 TREE_CHAIN (base) = ofs;
5856 TYPE_FIELDS (record) = base;
5857 layout_type (record);
5859 va_list_gpr_counter_field = ofs;
5860 return record;
5863 #if TARGET_ABI_OSF
5864 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5865 and constant additions. */
5867 static tree
5868 va_list_skip_additions (tree lhs)
5870 tree rhs, stmt;
5872 if (TREE_CODE (lhs) != SSA_NAME)
5873 return lhs;
5875 for (;;)
5877 stmt = SSA_NAME_DEF_STMT (lhs);
5879 if (TREE_CODE (stmt) == PHI_NODE)
5880 return stmt;
5882 if (TREE_CODE (stmt) != MODIFY_EXPR
5883 || TREE_OPERAND (stmt, 0) != lhs)
5884 return lhs;
5886 rhs = TREE_OPERAND (stmt, 1);
5887 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5888 rhs = TREE_OPERAND (rhs, 0);
5890 if ((TREE_CODE (rhs) != NOP_EXPR
5891 && TREE_CODE (rhs) != CONVERT_EXPR
5892 && (TREE_CODE (rhs) != PLUS_EXPR
5893 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5894 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5895 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5896 return rhs;
5898 lhs = TREE_OPERAND (rhs, 0);
5902 /* Check if LHS = RHS statement is
5903 LHS = *(ap.__base + ap.__offset + cst)
5905 LHS = *(ap.__base
5906 + ((ap.__offset + cst <= 47)
5907 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5908 If the former, indicate that GPR registers are needed,
5909 if the latter, indicate that FPR registers are needed.
5910 On alpha, cfun->va_list_gpr_size is used as size of the needed
5911 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5912 GPR registers are needed and bit 1 set if FPR registers are needed.
5913 Return true if va_list references should not be scanned for the current
5914 statement. */
5916 static bool
5917 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5919 tree base, offset, arg1, arg2;
5920 int offset_arg = 1;
5922 if (TREE_CODE (rhs) != INDIRECT_REF
5923 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5924 return false;
5926 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5927 if (lhs == NULL_TREE
5928 || TREE_CODE (lhs) != PLUS_EXPR)
5929 return false;
5931 base = TREE_OPERAND (lhs, 0);
5932 if (TREE_CODE (base) == SSA_NAME)
5933 base = va_list_skip_additions (base);
5935 if (TREE_CODE (base) != COMPONENT_REF
5936 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5938 base = TREE_OPERAND (lhs, 0);
5939 if (TREE_CODE (base) == SSA_NAME)
5940 base = va_list_skip_additions (base);
5942 if (TREE_CODE (base) != COMPONENT_REF
5943 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5944 return false;
5946 offset_arg = 0;
5949 base = get_base_address (base);
5950 if (TREE_CODE (base) != VAR_DECL
5951 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5952 return false;
5954 offset = TREE_OPERAND (lhs, offset_arg);
5955 if (TREE_CODE (offset) == SSA_NAME)
5956 offset = va_list_skip_additions (offset);
5958 if (TREE_CODE (offset) == PHI_NODE)
5960 HOST_WIDE_INT sub;
5962 if (PHI_NUM_ARGS (offset) != 2)
5963 goto escapes;
5965 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5966 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5967 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5969 tree tem = arg1;
5970 arg1 = arg2;
5971 arg2 = tem;
5973 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5974 goto escapes;
5976 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5977 goto escapes;
5979 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5980 if (TREE_CODE (arg2) == MINUS_EXPR)
5981 sub = -sub;
5982 if (sub < -48 || sub > -32)
5983 goto escapes;
5985 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5986 if (arg1 != arg2)
5987 goto escapes;
5989 if (TREE_CODE (arg1) == SSA_NAME)
5990 arg1 = va_list_skip_additions (arg1);
5992 if (TREE_CODE (arg1) != COMPONENT_REF
5993 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5994 || get_base_address (arg1) != base)
5995 goto escapes;
5997 /* Need floating point regs. */
5998 cfun->va_list_fpr_size |= 2;
6000 else if (TREE_CODE (offset) != COMPONENT_REF
6001 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6002 || get_base_address (offset) != base)
6003 goto escapes;
6004 else
6005 /* Need general regs. */
6006 cfun->va_list_fpr_size |= 1;
6007 return false;
6009 escapes:
6010 si->va_list_escapes = true;
6011 return false;
6013 #endif
6015 /* Perform any needed actions needed for a function that is receiving a
6016 variable number of arguments. */
6018 static void
6019 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6020 tree type, int *pretend_size, int no_rtl)
6022 CUMULATIVE_ARGS cum = *pcum;
6024 /* Skip the current argument. */
6025 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6027 #if TARGET_ABI_UNICOSMK
6028 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6029 arguments on the stack. Unfortunately, it doesn't always store the first
6030 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6031 with stdargs as we always have at least one named argument there. */
6032 if (cum.num_reg_words < 6)
6034 if (!no_rtl)
6036 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6037 emit_insn (gen_arg_home_umk ());
6039 *pretend_size = 0;
6041 #elif TARGET_ABI_OPEN_VMS
6042 /* For VMS, we allocate space for all 6 arg registers plus a count.
6044 However, if NO registers need to be saved, don't allocate any space.
6045 This is not only because we won't need the space, but because AP
6046 includes the current_pretend_args_size and we don't want to mess up
6047 any ap-relative addresses already made. */
6048 if (cum.num_args < 6)
6050 if (!no_rtl)
6052 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6053 emit_insn (gen_arg_home ());
6055 *pretend_size = 7 * UNITS_PER_WORD;
6057 #else
6058 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6059 only push those that are remaining. However, if NO registers need to
6060 be saved, don't allocate any space. This is not only because we won't
6061 need the space, but because AP includes the current_pretend_args_size
6062 and we don't want to mess up any ap-relative addresses already made.
6064 If we are not to use the floating-point registers, save the integer
6065 registers where we would put the floating-point registers. This is
6066 not the most efficient way to implement varargs with just one register
6067 class, but it isn't worth doing anything more efficient in this rare
6068 case. */
6069 if (cum >= 6)
6070 return;
6072 if (!no_rtl)
6074 int count, set = get_varargs_alias_set ();
6075 rtx tmp;
6077 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6078 if (count > 6 - cum)
6079 count = 6 - cum;
6081 /* Detect whether integer registers or floating-point registers
6082 are needed by the detected va_arg statements. See above for
6083 how these values are computed. Note that the "escape" value
6084 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6085 these bits set. */
6086 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6088 if (cfun->va_list_fpr_size & 1)
6090 tmp = gen_rtx_MEM (BLKmode,
6091 plus_constant (virtual_incoming_args_rtx,
6092 (cum + 6) * UNITS_PER_WORD));
6093 MEM_NOTRAP_P (tmp) = 1;
6094 set_mem_alias_set (tmp, set);
6095 move_block_from_reg (16 + cum, tmp, count);
6098 if (cfun->va_list_fpr_size & 2)
6100 tmp = gen_rtx_MEM (BLKmode,
6101 plus_constant (virtual_incoming_args_rtx,
6102 cum * UNITS_PER_WORD));
6103 MEM_NOTRAP_P (tmp) = 1;
6104 set_mem_alias_set (tmp, set);
6105 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6108 *pretend_size = 12 * UNITS_PER_WORD;
6109 #endif
6112 void
6113 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6115 HOST_WIDE_INT offset;
6116 tree t, offset_field, base_field;
6118 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6119 return;
6121 if (TARGET_ABI_UNICOSMK)
6122 std_expand_builtin_va_start (valist, nextarg);
6124 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6125 up by 48, storing fp arg registers in the first 48 bytes, and the
6126 integer arg registers in the next 48 bytes. This is only done,
6127 however, if any integer registers need to be stored.
6129 If no integer registers need be stored, then we must subtract 48
6130 in order to account for the integer arg registers which are counted
6131 in argsize above, but which are not actually stored on the stack.
6132 Must further be careful here about structures straddling the last
6133 integer argument register; that futzes with pretend_args_size,
6134 which changes the meaning of AP. */
6136 if (NUM_ARGS < 6)
6137 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6138 else
6139 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6141 if (TARGET_ABI_OPEN_VMS)
6143 nextarg = plus_constant (nextarg, offset);
6144 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6145 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6146 make_tree (ptr_type_node, nextarg));
6147 TREE_SIDE_EFFECTS (t) = 1;
6149 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6151 else
6153 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6154 offset_field = TREE_CHAIN (base_field);
6156 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6157 valist, base_field, NULL_TREE);
6158 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6159 valist, offset_field, NULL_TREE);
6161 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6162 t = build (PLUS_EXPR, ptr_type_node, t,
6163 build_int_cst (NULL_TREE, offset));
6164 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6165 TREE_SIDE_EFFECTS (t) = 1;
6166 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6168 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6169 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6170 TREE_SIDE_EFFECTS (t) = 1;
6171 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6175 static tree
6176 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6178 tree type_size, ptr_type, addend, t, addr, internal_post;
6180 /* If the type could not be passed in registers, skip the block
6181 reserved for the registers. */
6182 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6184 t = build_int_cst (TREE_TYPE (offset), 6*8);
6185 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
6186 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
6187 gimplify_and_add (t, pre_p);
6190 addend = offset;
6191 ptr_type = build_pointer_type (type);
6193 if (TREE_CODE (type) == COMPLEX_TYPE)
6195 tree real_part, imag_part, real_temp;
6197 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6198 offset, pre_p);
6200 /* Copy the value into a new temporary, lest the formal temporary
6201 be reused out from under us. */
6202 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6204 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6205 offset, pre_p);
6207 return build (COMPLEX_EXPR, type, real_temp, imag_part);
6209 else if (TREE_CODE (type) == REAL_TYPE)
6211 tree fpaddend, cond, fourtyeight;
6213 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6214 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
6215 addend, fourtyeight));
6216 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
6217 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6218 fpaddend, addend));
6221 /* Build the final address and force that value into a temporary. */
6222 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6223 fold_convert (ptr_type, addend));
6224 internal_post = NULL;
6225 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6226 append_to_statement_list (internal_post, pre_p);
6228 /* Update the offset field. */
6229 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6230 if (type_size == NULL || TREE_OVERFLOW (type_size))
6231 t = size_zero_node;
6232 else
6234 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6235 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6236 t = size_binop (MULT_EXPR, t, size_int (8));
6238 t = fold_convert (TREE_TYPE (offset), t);
6239 t = build (MODIFY_EXPR, void_type_node, offset,
6240 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6241 gimplify_and_add (t, pre_p);
6243 return build_va_arg_indirect_ref (addr);
6246 static tree
6247 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6249 tree offset_field, base_field, offset, base, t, r;
6250 bool indirect;
6252 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6253 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6255 base_field = TYPE_FIELDS (va_list_type_node);
6256 offset_field = TREE_CHAIN (base_field);
6257 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6258 valist, base_field, NULL_TREE);
6259 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6260 valist, offset_field, NULL_TREE);
6262 /* Pull the fields of the structure out into temporaries. Since we never
6263 modify the base field, we can use a formal temporary. Sign-extend the
6264 offset field so that it's the proper width for pointer arithmetic. */
6265 base = get_formal_tmp_var (base_field, pre_p);
6267 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6268 offset = get_initialized_tmp_var (t, pre_p, NULL);
6270 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6271 if (indirect)
6272 type = build_pointer_type (type);
6274 /* Find the value. Note that this will be a stable indirection, or
6275 a composite of stable indirections in the case of complex. */
6276 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6278 /* Stuff the offset temporary back into its field. */
6279 t = build (MODIFY_EXPR, void_type_node, offset_field,
6280 fold_convert (TREE_TYPE (offset_field), offset));
6281 gimplify_and_add (t, pre_p);
6283 if (indirect)
6284 r = build_va_arg_indirect_ref (r);
6286 return r;
6289 /* Builtins. */
6291 enum alpha_builtin
6293 ALPHA_BUILTIN_CMPBGE,
6294 ALPHA_BUILTIN_EXTBL,
6295 ALPHA_BUILTIN_EXTWL,
6296 ALPHA_BUILTIN_EXTLL,
6297 ALPHA_BUILTIN_EXTQL,
6298 ALPHA_BUILTIN_EXTWH,
6299 ALPHA_BUILTIN_EXTLH,
6300 ALPHA_BUILTIN_EXTQH,
6301 ALPHA_BUILTIN_INSBL,
6302 ALPHA_BUILTIN_INSWL,
6303 ALPHA_BUILTIN_INSLL,
6304 ALPHA_BUILTIN_INSQL,
6305 ALPHA_BUILTIN_INSWH,
6306 ALPHA_BUILTIN_INSLH,
6307 ALPHA_BUILTIN_INSQH,
6308 ALPHA_BUILTIN_MSKBL,
6309 ALPHA_BUILTIN_MSKWL,
6310 ALPHA_BUILTIN_MSKLL,
6311 ALPHA_BUILTIN_MSKQL,
6312 ALPHA_BUILTIN_MSKWH,
6313 ALPHA_BUILTIN_MSKLH,
6314 ALPHA_BUILTIN_MSKQH,
6315 ALPHA_BUILTIN_UMULH,
6316 ALPHA_BUILTIN_ZAP,
6317 ALPHA_BUILTIN_ZAPNOT,
6318 ALPHA_BUILTIN_AMASK,
6319 ALPHA_BUILTIN_IMPLVER,
6320 ALPHA_BUILTIN_RPCC,
6321 ALPHA_BUILTIN_THREAD_POINTER,
6322 ALPHA_BUILTIN_SET_THREAD_POINTER,
6324 /* TARGET_MAX */
6325 ALPHA_BUILTIN_MINUB8,
6326 ALPHA_BUILTIN_MINSB8,
6327 ALPHA_BUILTIN_MINUW4,
6328 ALPHA_BUILTIN_MINSW4,
6329 ALPHA_BUILTIN_MAXUB8,
6330 ALPHA_BUILTIN_MAXSB8,
6331 ALPHA_BUILTIN_MAXUW4,
6332 ALPHA_BUILTIN_MAXSW4,
6333 ALPHA_BUILTIN_PERR,
6334 ALPHA_BUILTIN_PKLB,
6335 ALPHA_BUILTIN_PKWB,
6336 ALPHA_BUILTIN_UNPKBL,
6337 ALPHA_BUILTIN_UNPKBW,
6339 /* TARGET_CIX */
6340 ALPHA_BUILTIN_CTTZ,
6341 ALPHA_BUILTIN_CTLZ,
6342 ALPHA_BUILTIN_CTPOP,
6344 ALPHA_BUILTIN_max
6347 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6348 CODE_FOR_builtin_cmpbge,
6349 CODE_FOR_builtin_extbl,
6350 CODE_FOR_builtin_extwl,
6351 CODE_FOR_builtin_extll,
6352 CODE_FOR_builtin_extql,
6353 CODE_FOR_builtin_extwh,
6354 CODE_FOR_builtin_extlh,
6355 CODE_FOR_builtin_extqh,
6356 CODE_FOR_builtin_insbl,
6357 CODE_FOR_builtin_inswl,
6358 CODE_FOR_builtin_insll,
6359 CODE_FOR_builtin_insql,
6360 CODE_FOR_builtin_inswh,
6361 CODE_FOR_builtin_inslh,
6362 CODE_FOR_builtin_insqh,
6363 CODE_FOR_builtin_mskbl,
6364 CODE_FOR_builtin_mskwl,
6365 CODE_FOR_builtin_mskll,
6366 CODE_FOR_builtin_mskql,
6367 CODE_FOR_builtin_mskwh,
6368 CODE_FOR_builtin_msklh,
6369 CODE_FOR_builtin_mskqh,
6370 CODE_FOR_umuldi3_highpart,
6371 CODE_FOR_builtin_zap,
6372 CODE_FOR_builtin_zapnot,
6373 CODE_FOR_builtin_amask,
6374 CODE_FOR_builtin_implver,
6375 CODE_FOR_builtin_rpcc,
6376 CODE_FOR_load_tp,
6377 CODE_FOR_set_tp,
6379 /* TARGET_MAX */
6380 CODE_FOR_builtin_minub8,
6381 CODE_FOR_builtin_minsb8,
6382 CODE_FOR_builtin_minuw4,
6383 CODE_FOR_builtin_minsw4,
6384 CODE_FOR_builtin_maxub8,
6385 CODE_FOR_builtin_maxsb8,
6386 CODE_FOR_builtin_maxuw4,
6387 CODE_FOR_builtin_maxsw4,
6388 CODE_FOR_builtin_perr,
6389 CODE_FOR_builtin_pklb,
6390 CODE_FOR_builtin_pkwb,
6391 CODE_FOR_builtin_unpkbl,
6392 CODE_FOR_builtin_unpkbw,
6394 /* TARGET_CIX */
6395 CODE_FOR_ctzdi2,
6396 CODE_FOR_clzdi2,
6397 CODE_FOR_popcountdi2
6400 struct alpha_builtin_def
6402 const char *name;
6403 enum alpha_builtin code;
6404 unsigned int target_mask;
6405 bool is_const;
6408 static struct alpha_builtin_def const zero_arg_builtins[] = {
6409 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6410 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6413 static struct alpha_builtin_def const one_arg_builtins[] = {
6414 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6415 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6416 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6417 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6418 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6419 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6420 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6421 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6424 static struct alpha_builtin_def const two_arg_builtins[] = {
6425 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6426 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6427 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6428 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6429 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6430 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6431 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6432 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6433 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6434 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6435 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6436 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6437 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6438 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6439 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6440 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6441 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6442 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6443 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6444 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6445 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6446 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6447 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6448 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6449 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6450 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6451 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6452 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6453 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6454 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6455 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6456 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6457 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6458 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6461 static GTY(()) tree alpha_v8qi_u;
6462 static GTY(()) tree alpha_v8qi_s;
6463 static GTY(()) tree alpha_v4hi_u;
6464 static GTY(()) tree alpha_v4hi_s;
6466 static void
6467 alpha_init_builtins (void)
6469 const struct alpha_builtin_def *p;
6470 tree ftype, attrs[2];
6471 size_t i;
6473 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6474 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6476 ftype = build_function_type (long_integer_type_node, void_list_node);
6478 p = zero_arg_builtins;
6479 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6480 if ((target_flags & p->target_mask) == p->target_mask)
6481 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6482 NULL, attrs[p->is_const]);
6484 ftype = build_function_type_list (long_integer_type_node,
6485 long_integer_type_node, NULL_TREE);
6487 p = one_arg_builtins;
6488 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6489 if ((target_flags & p->target_mask) == p->target_mask)
6490 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6491 NULL, attrs[p->is_const]);
6493 ftype = build_function_type_list (long_integer_type_node,
6494 long_integer_type_node,
6495 long_integer_type_node, NULL_TREE);
6497 p = two_arg_builtins;
6498 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6499 if ((target_flags & p->target_mask) == p->target_mask)
6500 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6501 NULL, attrs[p->is_const]);
6503 ftype = build_function_type (ptr_type_node, void_list_node);
6504 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6505 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6506 NULL, attrs[0]);
6508 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6509 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6510 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6511 NULL, attrs[0]);
6513 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6514 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6515 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6516 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6519 /* Expand an expression EXP that calls a built-in function,
6520 with result going to TARGET if that's convenient
6521 (and in mode MODE if that's convenient).
6522 SUBTARGET may be used as the target for computing one of EXP's operands.
6523 IGNORE is nonzero if the value is to be ignored. */
6525 static rtx
6526 alpha_expand_builtin (tree exp, rtx target,
6527 rtx subtarget ATTRIBUTE_UNUSED,
6528 enum machine_mode mode ATTRIBUTE_UNUSED,
6529 int ignore ATTRIBUTE_UNUSED)
6531 #define MAX_ARGS 2
6533 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6534 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6535 tree arglist = TREE_OPERAND (exp, 1);
6536 enum insn_code icode;
6537 rtx op[MAX_ARGS], pat;
6538 int arity;
6539 bool nonvoid;
6541 if (fcode >= ALPHA_BUILTIN_max)
6542 internal_error ("bad builtin fcode");
6543 icode = code_for_builtin[fcode];
6544 if (icode == 0)
6545 internal_error ("bad builtin fcode");
6547 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6549 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6550 arglist;
6551 arglist = TREE_CHAIN (arglist), arity++)
6553 const struct insn_operand_data *insn_op;
6555 tree arg = TREE_VALUE (arglist);
6556 if (arg == error_mark_node)
6557 return NULL_RTX;
6558 if (arity > MAX_ARGS)
6559 return NULL_RTX;
6561 insn_op = &insn_data[icode].operand[arity + nonvoid];
6563 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6565 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6566 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6569 if (nonvoid)
6571 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6572 if (!target
6573 || GET_MODE (target) != tmode
6574 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6575 target = gen_reg_rtx (tmode);
6578 switch (arity)
6580 case 0:
6581 pat = GEN_FCN (icode) (target);
6582 break;
6583 case 1:
6584 if (nonvoid)
6585 pat = GEN_FCN (icode) (target, op[0]);
6586 else
6587 pat = GEN_FCN (icode) (op[0]);
6588 break;
6589 case 2:
6590 pat = GEN_FCN (icode) (target, op[0], op[1]);
6591 break;
6592 default:
6593 gcc_unreachable ();
6595 if (!pat)
6596 return NULL_RTX;
6597 emit_insn (pat);
6599 if (nonvoid)
6600 return target;
6601 else
6602 return const0_rtx;
6606 /* Several bits below assume HWI >= 64 bits. This should be enforced
6607 by config.gcc. */
6608 #if HOST_BITS_PER_WIDE_INT < 64
6609 # error "HOST_WIDE_INT too small"
6610 #endif
6612 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6613 with an 8 bit output vector. OPINT contains the integer operands; bit N
6614 of OP_CONST is set if OPINT[N] is valid. */
6616 static tree
6617 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6619 if (op_const == 3)
6621 int i, val;
6622 for (i = 0, val = 0; i < 8; ++i)
6624 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6625 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6626 if (c0 >= c1)
6627 val |= 1 << i;
6629 return build_int_cst (long_integer_type_node, val);
6631 else if (op_const == 2 && opint[1] == 0)
6632 return build_int_cst (long_integer_type_node, 0xff);
6633 return NULL;
6636 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6637 specialized form of an AND operation. Other byte manipulation instructions
6638 are defined in terms of this instruction, so this is also used as a
6639 subroutine for other builtins.
6641 OP contains the tree operands; OPINT contains the extracted integer values.
6642 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6643 OPINT may be considered. */
6645 static tree
6646 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6647 long op_const)
6649 if (op_const & 2)
6651 unsigned HOST_WIDE_INT mask = 0;
6652 int i;
6654 for (i = 0; i < 8; ++i)
6655 if ((opint[1] >> i) & 1)
6656 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6658 if (op_const & 1)
6659 return build_int_cst (long_integer_type_node, opint[0] & mask);
6661 if (op)
6662 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6663 build_int_cst (long_integer_type_node, mask)));
6665 else if ((op_const & 1) && opint[0] == 0)
6666 return build_int_cst (long_integer_type_node, 0);
6667 return NULL;
6670 /* Fold the builtins for the EXT family of instructions. */
6672 static tree
6673 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6674 long op_const, unsigned HOST_WIDE_INT bytemask,
6675 bool is_high)
6677 long zap_const = 2;
6678 tree *zap_op = NULL;
6680 if (op_const & 2)
6682 unsigned HOST_WIDE_INT loc;
6684 loc = opint[1] & 7;
6685 if (BYTES_BIG_ENDIAN)
6686 loc ^= 7;
6687 loc *= 8;
6689 if (loc != 0)
6691 if (op_const & 1)
6693 unsigned HOST_WIDE_INT temp = opint[0];
6694 if (is_high)
6695 temp <<= loc;
6696 else
6697 temp >>= loc;
6698 opint[0] = temp;
6699 zap_const = 3;
6702 else
6703 zap_op = op;
6706 opint[1] = bytemask;
6707 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6710 /* Fold the builtins for the INS family of instructions. */
6712 static tree
6713 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6714 long op_const, unsigned HOST_WIDE_INT bytemask,
6715 bool is_high)
6717 if ((op_const & 1) && opint[0] == 0)
6718 return build_int_cst (long_integer_type_node, 0);
6720 if (op_const & 2)
6722 unsigned HOST_WIDE_INT temp, loc, byteloc;
6723 tree *zap_op = NULL;
6725 loc = opint[1] & 7;
6726 if (BYTES_BIG_ENDIAN)
6727 loc ^= 7;
6728 bytemask <<= loc;
6730 temp = opint[0];
6731 if (is_high)
6733 byteloc = (64 - (loc * 8)) & 0x3f;
6734 if (byteloc == 0)
6735 zap_op = op;
6736 else
6737 temp >>= byteloc;
6738 bytemask >>= 8;
6740 else
6742 byteloc = loc * 8;
6743 if (byteloc == 0)
6744 zap_op = op;
6745 else
6746 temp <<= byteloc;
6749 opint[0] = temp;
6750 opint[1] = bytemask;
6751 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6754 return NULL;
6757 static tree
6758 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6759 long op_const, unsigned HOST_WIDE_INT bytemask,
6760 bool is_high)
6762 if (op_const & 2)
6764 unsigned HOST_WIDE_INT loc;
6766 loc = opint[1] & 7;
6767 if (BYTES_BIG_ENDIAN)
6768 loc ^= 7;
6769 bytemask <<= loc;
6771 if (is_high)
6772 bytemask >>= 8;
6774 opint[1] = bytemask ^ 0xff;
6777 return alpha_fold_builtin_zapnot (op, opint, op_const);
6780 static tree
6781 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6783 switch (op_const)
6785 case 3:
6787 unsigned HOST_WIDE_INT l;
6788 HOST_WIDE_INT h;
6790 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6792 #if HOST_BITS_PER_WIDE_INT > 64
6793 # error fixme
6794 #endif
6796 return build_int_cst (long_integer_type_node, h);
6799 case 1:
6800 opint[1] = opint[0];
6801 /* FALLTHRU */
6802 case 2:
6803 /* Note that (X*1) >> 64 == 0. */
6804 if (opint[1] == 0 || opint[1] == 1)
6805 return build_int_cst (long_integer_type_node, 0);
6806 break;
6808 return NULL;
6811 static tree
6812 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6814 tree op0 = fold_convert (vtype, op[0]);
6815 tree op1 = fold_convert (vtype, op[1]);
6816 tree val = fold (build2 (code, vtype, op0, op1));
6817 return fold_convert (long_integer_type_node, val);
6820 static tree
6821 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6823 unsigned HOST_WIDE_INT temp = 0;
6824 int i;
6826 if (op_const != 3)
6827 return NULL;
6829 for (i = 0; i < 8; ++i)
6831 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6832 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6833 if (a >= b)
6834 temp += a - b;
6835 else
6836 temp += b - a;
6839 return build_int_cst (long_integer_type_node, temp);
6842 static tree
6843 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6845 unsigned HOST_WIDE_INT temp;
6847 if (op_const == 0)
6848 return NULL;
6850 temp = opint[0] & 0xff;
6851 temp |= (opint[0] >> 24) & 0xff00;
6853 return build_int_cst (long_integer_type_node, temp);
6856 static tree
6857 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6859 unsigned HOST_WIDE_INT temp;
6861 if (op_const == 0)
6862 return NULL;
6864 temp = opint[0] & 0xff;
6865 temp |= (opint[0] >> 8) & 0xff00;
6866 temp |= (opint[0] >> 16) & 0xff0000;
6867 temp |= (opint[0] >> 24) & 0xff000000;
6869 return build_int_cst (long_integer_type_node, temp);
6872 static tree
6873 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6875 unsigned HOST_WIDE_INT temp;
6877 if (op_const == 0)
6878 return NULL;
6880 temp = opint[0] & 0xff;
6881 temp |= (opint[0] & 0xff00) << 24;
6883 return build_int_cst (long_integer_type_node, temp);
6886 static tree
6887 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6889 unsigned HOST_WIDE_INT temp;
6891 if (op_const == 0)
6892 return NULL;
6894 temp = opint[0] & 0xff;
6895 temp |= (opint[0] & 0x0000ff00) << 8;
6896 temp |= (opint[0] & 0x00ff0000) << 16;
6897 temp |= (opint[0] & 0xff000000) << 24;
6899 return build_int_cst (long_integer_type_node, temp);
6902 static tree
6903 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6905 unsigned HOST_WIDE_INT temp;
6907 if (op_const == 0)
6908 return NULL;
6910 if (opint[0] == 0)
6911 temp = 64;
6912 else
6913 temp = exact_log2 (opint[0] & -opint[0]);
6915 return build_int_cst (long_integer_type_node, temp);
6918 static tree
6919 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6921 unsigned HOST_WIDE_INT temp;
6923 if (op_const == 0)
6924 return NULL;
6926 if (opint[0] == 0)
6927 temp = 64;
6928 else
6929 temp = 64 - floor_log2 (opint[0]) - 1;
6931 return build_int_cst (long_integer_type_node, temp);
6934 static tree
6935 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6937 unsigned HOST_WIDE_INT temp, op;
6939 if (op_const == 0)
6940 return NULL;
6942 op = opint[0];
6943 temp = 0;
6944 while (op)
6945 temp++, op &= op - 1;
6947 return build_int_cst (long_integer_type_node, temp);
6950 /* Fold one of our builtin functions. */
6952 static tree
6953 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6955 tree op[MAX_ARGS], t;
6956 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6957 long op_const = 0, arity = 0;
6959 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6961 tree arg = TREE_VALUE (t);
6962 if (arg == error_mark_node)
6963 return NULL;
6964 if (arity >= MAX_ARGS)
6965 return NULL;
6967 op[arity] = arg;
6968 opint[arity] = 0;
6969 if (TREE_CODE (arg) == INTEGER_CST)
6971 op_const |= 1L << arity;
6972 opint[arity] = int_cst_value (arg);
6976 switch (DECL_FUNCTION_CODE (fndecl))
6978 case ALPHA_BUILTIN_CMPBGE:
6979 return alpha_fold_builtin_cmpbge (opint, op_const);
6981 case ALPHA_BUILTIN_EXTBL:
6982 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6983 case ALPHA_BUILTIN_EXTWL:
6984 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6985 case ALPHA_BUILTIN_EXTLL:
6986 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6987 case ALPHA_BUILTIN_EXTQL:
6988 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6989 case ALPHA_BUILTIN_EXTWH:
6990 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6991 case ALPHA_BUILTIN_EXTLH:
6992 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6993 case ALPHA_BUILTIN_EXTQH:
6994 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6996 case ALPHA_BUILTIN_INSBL:
6997 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6998 case ALPHA_BUILTIN_INSWL:
6999 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7000 case ALPHA_BUILTIN_INSLL:
7001 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7002 case ALPHA_BUILTIN_INSQL:
7003 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7004 case ALPHA_BUILTIN_INSWH:
7005 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7006 case ALPHA_BUILTIN_INSLH:
7007 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7008 case ALPHA_BUILTIN_INSQH:
7009 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7011 case ALPHA_BUILTIN_MSKBL:
7012 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7013 case ALPHA_BUILTIN_MSKWL:
7014 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7015 case ALPHA_BUILTIN_MSKLL:
7016 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7017 case ALPHA_BUILTIN_MSKQL:
7018 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7019 case ALPHA_BUILTIN_MSKWH:
7020 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7021 case ALPHA_BUILTIN_MSKLH:
7022 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7023 case ALPHA_BUILTIN_MSKQH:
7024 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7026 case ALPHA_BUILTIN_UMULH:
7027 return alpha_fold_builtin_umulh (opint, op_const);
7029 case ALPHA_BUILTIN_ZAP:
7030 opint[1] ^= 0xff;
7031 /* FALLTHRU */
7032 case ALPHA_BUILTIN_ZAPNOT:
7033 return alpha_fold_builtin_zapnot (op, opint, op_const);
7035 case ALPHA_BUILTIN_MINUB8:
7036 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7037 case ALPHA_BUILTIN_MINSB8:
7038 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7039 case ALPHA_BUILTIN_MINUW4:
7040 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7041 case ALPHA_BUILTIN_MINSW4:
7042 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7043 case ALPHA_BUILTIN_MAXUB8:
7044 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7045 case ALPHA_BUILTIN_MAXSB8:
7046 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7047 case ALPHA_BUILTIN_MAXUW4:
7048 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7049 case ALPHA_BUILTIN_MAXSW4:
7050 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7052 case ALPHA_BUILTIN_PERR:
7053 return alpha_fold_builtin_perr (opint, op_const);
7054 case ALPHA_BUILTIN_PKLB:
7055 return alpha_fold_builtin_pklb (opint, op_const);
7056 case ALPHA_BUILTIN_PKWB:
7057 return alpha_fold_builtin_pkwb (opint, op_const);
7058 case ALPHA_BUILTIN_UNPKBL:
7059 return alpha_fold_builtin_unpkbl (opint, op_const);
7060 case ALPHA_BUILTIN_UNPKBW:
7061 return alpha_fold_builtin_unpkbw (opint, op_const);
7063 case ALPHA_BUILTIN_CTTZ:
7064 return alpha_fold_builtin_cttz (opint, op_const);
7065 case ALPHA_BUILTIN_CTLZ:
7066 return alpha_fold_builtin_ctlz (opint, op_const);
7067 case ALPHA_BUILTIN_CTPOP:
7068 return alpha_fold_builtin_ctpop (opint, op_const);
7070 case ALPHA_BUILTIN_AMASK:
7071 case ALPHA_BUILTIN_IMPLVER:
7072 case ALPHA_BUILTIN_RPCC:
7073 case ALPHA_BUILTIN_THREAD_POINTER:
7074 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7075 /* None of these are foldable at compile-time. */
7076 default:
7077 return NULL;
7081 /* This page contains routines that are used to determine what the function
7082 prologue and epilogue code will do and write them out. */
7084 /* Compute the size of the save area in the stack. */
7086 /* These variables are used for communication between the following functions.
7087 They indicate various things about the current function being compiled
7088 that are used to tell what kind of prologue, epilogue and procedure
7089 descriptor to generate. */
7091 /* Nonzero if we need a stack procedure. */
7092 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7093 static enum alpha_procedure_types alpha_procedure_type;
7095 /* Register number (either FP or SP) that is used to unwind the frame. */
7096 static int vms_unwind_regno;
7098 /* Register number used to save FP. We need not have one for RA since
7099 we don't modify it for register procedures. This is only defined
7100 for register frame procedures. */
7101 static int vms_save_fp_regno;
7103 /* Register number used to reference objects off our PV. */
7104 static int vms_base_regno;
7106 /* Compute register masks for saved registers. */
7108 static void
7109 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7111 unsigned long imask = 0;
7112 unsigned long fmask = 0;
7113 unsigned int i;
7115 /* When outputting a thunk, we don't have valid register life info,
7116 but assemble_start_function wants to output .frame and .mask
7117 directives. */
7118 if (current_function_is_thunk)
7120 *imaskP = 0;
7121 *fmaskP = 0;
7122 return;
7125 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7126 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7128 /* One for every register we have to save. */
7129 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7130 if (! fixed_regs[i] && ! call_used_regs[i]
7131 && regs_ever_live[i] && i != REG_RA
7132 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7134 if (i < 32)
7135 imask |= (1UL << i);
7136 else
7137 fmask |= (1UL << (i - 32));
7140 /* We need to restore these for the handler. */
7141 if (current_function_calls_eh_return)
7143 for (i = 0; ; ++i)
7145 unsigned regno = EH_RETURN_DATA_REGNO (i);
7146 if (regno == INVALID_REGNUM)
7147 break;
7148 imask |= 1UL << regno;
7152 /* If any register spilled, then spill the return address also. */
7153 /* ??? This is required by the Digital stack unwind specification
7154 and isn't needed if we're doing Dwarf2 unwinding. */
7155 if (imask || fmask || alpha_ra_ever_killed ())
7156 imask |= (1UL << REG_RA);
7158 *imaskP = imask;
7159 *fmaskP = fmask;
7163 alpha_sa_size (void)
7165 unsigned long mask[2];
7166 int sa_size = 0;
7167 int i, j;
7169 alpha_sa_mask (&mask[0], &mask[1]);
7171 if (TARGET_ABI_UNICOSMK)
7173 if (mask[0] || mask[1])
7174 sa_size = 14;
7176 else
7178 for (j = 0; j < 2; ++j)
7179 for (i = 0; i < 32; ++i)
7180 if ((mask[j] >> i) & 1)
7181 sa_size++;
7184 if (TARGET_ABI_UNICOSMK)
7186 /* We might not need to generate a frame if we don't make any calls
7187 (including calls to __T3E_MISMATCH if this is a vararg function),
7188 don't have any local variables which require stack slots, don't
7189 use alloca and have not determined that we need a frame for other
7190 reasons. */
7192 alpha_procedure_type
7193 = (sa_size || get_frame_size() != 0
7194 || current_function_outgoing_args_size
7195 || current_function_stdarg || current_function_calls_alloca
7196 || frame_pointer_needed)
7197 ? PT_STACK : PT_REGISTER;
7199 /* Always reserve space for saving callee-saved registers if we
7200 need a frame as required by the calling convention. */
7201 if (alpha_procedure_type == PT_STACK)
7202 sa_size = 14;
7204 else if (TARGET_ABI_OPEN_VMS)
7206 /* Start by assuming we can use a register procedure if we don't
7207 make any calls (REG_RA not used) or need to save any
7208 registers and a stack procedure if we do. */
7209 if ((mask[0] >> REG_RA) & 1)
7210 alpha_procedure_type = PT_STACK;
7211 else if (get_frame_size() != 0)
7212 alpha_procedure_type = PT_REGISTER;
7213 else
7214 alpha_procedure_type = PT_NULL;
7216 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7217 made the final decision on stack procedure vs register procedure. */
7218 if (alpha_procedure_type == PT_STACK)
7219 sa_size -= 2;
7221 /* Decide whether to refer to objects off our PV via FP or PV.
7222 If we need FP for something else or if we receive a nonlocal
7223 goto (which expects PV to contain the value), we must use PV.
7224 Otherwise, start by assuming we can use FP. */
7226 vms_base_regno
7227 = (frame_pointer_needed
7228 || current_function_has_nonlocal_label
7229 || alpha_procedure_type == PT_STACK
7230 || current_function_outgoing_args_size)
7231 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7233 /* If we want to copy PV into FP, we need to find some register
7234 in which to save FP. */
7236 vms_save_fp_regno = -1;
7237 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7238 for (i = 0; i < 32; i++)
7239 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7240 vms_save_fp_regno = i;
7242 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7243 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7244 else if (alpha_procedure_type == PT_NULL)
7245 vms_base_regno = REG_PV;
7247 /* Stack unwinding should be done via FP unless we use it for PV. */
7248 vms_unwind_regno = (vms_base_regno == REG_PV
7249 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7251 /* If this is a stack procedure, allow space for saving FP and RA. */
7252 if (alpha_procedure_type == PT_STACK)
7253 sa_size += 2;
7255 else
7257 /* Our size must be even (multiple of 16 bytes). */
7258 if (sa_size & 1)
7259 sa_size++;
7262 return sa_size * 8;
7265 /* Define the offset between two registers, one to be eliminated,
7266 and the other its replacement, at the start of a routine. */
7268 HOST_WIDE_INT
7269 alpha_initial_elimination_offset (unsigned int from,
7270 unsigned int to ATTRIBUTE_UNUSED)
7272 HOST_WIDE_INT ret;
7274 ret = alpha_sa_size ();
7275 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7277 switch (from)
7279 case FRAME_POINTER_REGNUM:
7280 break;
7282 case ARG_POINTER_REGNUM:
7283 ret += (ALPHA_ROUND (get_frame_size ()
7284 + current_function_pretend_args_size)
7285 - current_function_pretend_args_size);
7286 break;
7288 default:
7289 gcc_unreachable ();
7292 return ret;
7296 alpha_pv_save_size (void)
7298 alpha_sa_size ();
7299 return alpha_procedure_type == PT_STACK ? 8 : 0;
7303 alpha_using_fp (void)
7305 alpha_sa_size ();
7306 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7309 #if TARGET_ABI_OPEN_VMS
7311 const struct attribute_spec vms_attribute_table[] =
7313 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7314 { "overlaid", 0, 0, true, false, false, NULL },
7315 { "global", 0, 0, true, false, false, NULL },
7316 { "initialize", 0, 0, true, false, false, NULL },
7317 { NULL, 0, 0, false, false, false, NULL }
7320 #endif
7322 static int
7323 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7325 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7329 alpha_find_lo_sum_using_gp (rtx insn)
7331 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7334 static int
7335 alpha_does_function_need_gp (void)
7337 rtx insn;
7339 /* The GP being variable is an OSF abi thing. */
7340 if (! TARGET_ABI_OSF)
7341 return 0;
7343 /* We need the gp to load the address of __mcount. */
7344 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7345 return 1;
7347 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7348 if (current_function_is_thunk)
7349 return 1;
7351 /* The nonlocal receiver pattern assumes that the gp is valid for
7352 the nested function. Reasonable because it's almost always set
7353 correctly already. For the cases where that's wrong, make sure
7354 the nested function loads its gp on entry. */
7355 if (current_function_has_nonlocal_goto)
7356 return 1;
7358 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7359 Even if we are a static function, we still need to do this in case
7360 our address is taken and passed to something like qsort. */
7362 push_topmost_sequence ();
7363 insn = get_insns ();
7364 pop_topmost_sequence ();
7366 for (; insn; insn = NEXT_INSN (insn))
7367 if (INSN_P (insn)
7368 && GET_CODE (PATTERN (insn)) != USE
7369 && GET_CODE (PATTERN (insn)) != CLOBBER
7370 && get_attr_usegp (insn))
7371 return 1;
7373 return 0;
7377 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7378 sequences. */
7380 static rtx
7381 set_frame_related_p (void)
7383 rtx seq = get_insns ();
7384 rtx insn;
7386 end_sequence ();
7388 if (!seq)
7389 return NULL_RTX;
7391 if (INSN_P (seq))
7393 insn = seq;
7394 while (insn != NULL_RTX)
7396 RTX_FRAME_RELATED_P (insn) = 1;
7397 insn = NEXT_INSN (insn);
7399 seq = emit_insn (seq);
7401 else
7403 seq = emit_insn (seq);
7404 RTX_FRAME_RELATED_P (seq) = 1;
7406 return seq;
7409 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7411 /* Generates a store with the proper unwind info attached. VALUE is
7412 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7413 contains SP+FRAME_BIAS, and that is the unwind info that should be
7414 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7415 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7417 static void
7418 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7419 HOST_WIDE_INT base_ofs, rtx frame_reg)
7421 rtx addr, mem, insn;
7423 addr = plus_constant (base_reg, base_ofs);
7424 mem = gen_rtx_MEM (DImode, addr);
7425 set_mem_alias_set (mem, alpha_sr_alias_set);
7427 insn = emit_move_insn (mem, value);
7428 RTX_FRAME_RELATED_P (insn) = 1;
7430 if (frame_bias || value != frame_reg)
7432 if (frame_bias)
7434 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7435 mem = gen_rtx_MEM (DImode, addr);
7438 REG_NOTES (insn)
7439 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7440 gen_rtx_SET (VOIDmode, mem, frame_reg),
7441 REG_NOTES (insn));
7445 static void
7446 emit_frame_store (unsigned int regno, rtx base_reg,
7447 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7449 rtx reg = gen_rtx_REG (DImode, regno);
7450 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7453 /* Write function prologue. */
7455 /* On vms we have two kinds of functions:
7457 - stack frame (PROC_STACK)
7458 these are 'normal' functions with local vars and which are
7459 calling other functions
7460 - register frame (PROC_REGISTER)
7461 keeps all data in registers, needs no stack
7463 We must pass this to the assembler so it can generate the
7464 proper pdsc (procedure descriptor)
7465 This is done with the '.pdesc' command.
7467 On not-vms, we don't really differentiate between the two, as we can
7468 simply allocate stack without saving registers. */
7470 void
7471 alpha_expand_prologue (void)
7473 /* Registers to save. */
7474 unsigned long imask = 0;
7475 unsigned long fmask = 0;
7476 /* Stack space needed for pushing registers clobbered by us. */
7477 HOST_WIDE_INT sa_size;
7478 /* Complete stack size needed. */
7479 HOST_WIDE_INT frame_size;
7480 /* Offset from base reg to register save area. */
7481 HOST_WIDE_INT reg_offset;
7482 rtx sa_reg;
7483 int i;
7485 sa_size = alpha_sa_size ();
7487 frame_size = get_frame_size ();
7488 if (TARGET_ABI_OPEN_VMS)
7489 frame_size = ALPHA_ROUND (sa_size
7490 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7491 + frame_size
7492 + current_function_pretend_args_size);
7493 else if (TARGET_ABI_UNICOSMK)
7494 /* We have to allocate space for the DSIB if we generate a frame. */
7495 frame_size = ALPHA_ROUND (sa_size
7496 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7497 + ALPHA_ROUND (frame_size
7498 + current_function_outgoing_args_size);
7499 else
7500 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7501 + sa_size
7502 + ALPHA_ROUND (frame_size
7503 + current_function_pretend_args_size));
7505 if (TARGET_ABI_OPEN_VMS)
7506 reg_offset = 8;
7507 else
7508 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7510 alpha_sa_mask (&imask, &fmask);
7512 /* Emit an insn to reload GP, if needed. */
7513 if (TARGET_ABI_OSF)
7515 alpha_function_needs_gp = alpha_does_function_need_gp ();
7516 if (alpha_function_needs_gp)
7517 emit_insn (gen_prologue_ldgp ());
7520 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7521 the call to mcount ourselves, rather than having the linker do it
7522 magically in response to -pg. Since _mcount has special linkage,
7523 don't represent the call as a call. */
7524 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7525 emit_insn (gen_prologue_mcount ());
7527 if (TARGET_ABI_UNICOSMK)
7528 unicosmk_gen_dsib (&imask);
7530 /* Adjust the stack by the frame size. If the frame size is > 4096
7531 bytes, we need to be sure we probe somewhere in the first and last
7532 4096 bytes (we can probably get away without the latter test) and
7533 every 8192 bytes in between. If the frame size is > 32768, we
7534 do this in a loop. Otherwise, we generate the explicit probe
7535 instructions.
7537 Note that we are only allowed to adjust sp once in the prologue. */
7539 if (frame_size <= 32768)
7541 if (frame_size > 4096)
7543 int probed = 4096;
7546 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7547 ? -probed + 64
7548 : -probed)));
7549 while ((probed += 8192) < frame_size);
7551 /* We only have to do this probe if we aren't saving registers. */
7552 if (sa_size == 0 && probed + 4096 < frame_size)
7553 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7556 if (frame_size != 0)
7557 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7558 GEN_INT (TARGET_ABI_UNICOSMK
7559 ? -frame_size + 64
7560 : -frame_size))));
7562 else
7564 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7565 number of 8192 byte blocks to probe. We then probe each block
7566 in the loop and then set SP to the proper location. If the
7567 amount remaining is > 4096, we have to do one more probe if we
7568 are not saving any registers. */
7570 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7571 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7572 rtx ptr = gen_rtx_REG (DImode, 22);
7573 rtx count = gen_rtx_REG (DImode, 23);
7574 rtx seq;
7576 emit_move_insn (count, GEN_INT (blocks));
7577 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7578 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7580 /* Because of the difficulty in emitting a new basic block this
7581 late in the compilation, generate the loop as a single insn. */
7582 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7584 if (leftover > 4096 && sa_size == 0)
7586 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7587 MEM_VOLATILE_P (last) = 1;
7588 emit_move_insn (last, const0_rtx);
7591 if (TARGET_ABI_WINDOWS_NT)
7593 /* For NT stack unwind (done by 'reverse execution'), it's
7594 not OK to take the result of a loop, even though the value
7595 is already in ptr, so we reload it via a single operation
7596 and subtract it to sp.
7598 Yes, that's correct -- we have to reload the whole constant
7599 into a temporary via ldah+lda then subtract from sp. */
7601 HOST_WIDE_INT lo, hi;
7602 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7603 hi = frame_size - lo;
7605 emit_move_insn (ptr, GEN_INT (hi));
7606 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7607 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7608 ptr));
7610 else
7612 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7613 GEN_INT (-leftover)));
7616 /* This alternative is special, because the DWARF code cannot
7617 possibly intuit through the loop above. So we invent this
7618 note it looks at instead. */
7619 RTX_FRAME_RELATED_P (seq) = 1;
7620 REG_NOTES (seq)
7621 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7622 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7623 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7624 GEN_INT (TARGET_ABI_UNICOSMK
7625 ? -frame_size + 64
7626 : -frame_size))),
7627 REG_NOTES (seq));
7630 if (!TARGET_ABI_UNICOSMK)
7632 HOST_WIDE_INT sa_bias = 0;
7634 /* Cope with very large offsets to the register save area. */
7635 sa_reg = stack_pointer_rtx;
7636 if (reg_offset + sa_size > 0x8000)
7638 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7639 rtx sa_bias_rtx;
7641 if (low + sa_size <= 0x8000)
7642 sa_bias = reg_offset - low, reg_offset = low;
7643 else
7644 sa_bias = reg_offset, reg_offset = 0;
7646 sa_reg = gen_rtx_REG (DImode, 24);
7647 sa_bias_rtx = GEN_INT (sa_bias);
7649 if (add_operand (sa_bias_rtx, DImode))
7650 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7651 else
7653 emit_move_insn (sa_reg, sa_bias_rtx);
7654 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7658 /* Save regs in stack order. Beginning with VMS PV. */
7659 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7660 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7662 /* Save register RA next. */
7663 if (imask & (1UL << REG_RA))
7665 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7666 imask &= ~(1UL << REG_RA);
7667 reg_offset += 8;
7670 /* Now save any other registers required to be saved. */
7671 for (i = 0; i < 31; i++)
7672 if (imask & (1UL << i))
7674 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7675 reg_offset += 8;
7678 for (i = 0; i < 31; i++)
7679 if (fmask & (1UL << i))
7681 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7682 reg_offset += 8;
7685 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7687 /* The standard frame on the T3E includes space for saving registers.
7688 We just have to use it. We don't have to save the return address and
7689 the old frame pointer here - they are saved in the DSIB. */
7691 reg_offset = -56;
7692 for (i = 9; i < 15; i++)
7693 if (imask & (1UL << i))
7695 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7696 reg_offset -= 8;
7698 for (i = 2; i < 10; i++)
7699 if (fmask & (1UL << i))
7701 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7702 reg_offset -= 8;
7706 if (TARGET_ABI_OPEN_VMS)
7708 if (alpha_procedure_type == PT_REGISTER)
7709 /* Register frame procedures save the fp.
7710 ?? Ought to have a dwarf2 save for this. */
7711 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7712 hard_frame_pointer_rtx);
7714 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7715 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7716 gen_rtx_REG (DImode, REG_PV)));
7718 if (alpha_procedure_type != PT_NULL
7719 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7720 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7722 /* If we have to allocate space for outgoing args, do it now. */
7723 if (current_function_outgoing_args_size != 0)
7725 rtx seq
7726 = emit_move_insn (stack_pointer_rtx,
7727 plus_constant
7728 (hard_frame_pointer_rtx,
7729 - (ALPHA_ROUND
7730 (current_function_outgoing_args_size))));
7732 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7733 if ! frame_pointer_needed. Setting the bit will change the CFA
7734 computation rule to use sp again, which would be wrong if we had
7735 frame_pointer_needed, as this means sp might move unpredictably
7736 later on.
7738 Also, note that
7739 frame_pointer_needed
7740 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7742 current_function_outgoing_args_size != 0
7743 => alpha_procedure_type != PT_NULL,
7745 so when we are not setting the bit here, we are guaranteed to
7746 have emitted an FRP frame pointer update just before. */
7747 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7750 else if (!TARGET_ABI_UNICOSMK)
7752 /* If we need a frame pointer, set it from the stack pointer. */
7753 if (frame_pointer_needed)
7755 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7756 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7757 else
7758 /* This must always be the last instruction in the
7759 prologue, thus we emit a special move + clobber. */
7760 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7761 stack_pointer_rtx, sa_reg)));
7765 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7766 the prologue, for exception handling reasons, we cannot do this for
7767 any insn that might fault. We could prevent this for mems with a
7768 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7769 have to prevent all such scheduling with a blockage.
7771 Linux, on the other hand, never bothered to implement OSF/1's
7772 exception handling, and so doesn't care about such things. Anyone
7773 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7775 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7776 emit_insn (gen_blockage ());
7779 /* Count the number of .file directives, so that .loc is up to date. */
7780 int num_source_filenames = 0;
7782 /* Output the textual info surrounding the prologue. */
7784 void
7785 alpha_start_function (FILE *file, const char *fnname,
7786 tree decl ATTRIBUTE_UNUSED)
7788 unsigned long imask = 0;
7789 unsigned long fmask = 0;
7790 /* Stack space needed for pushing registers clobbered by us. */
7791 HOST_WIDE_INT sa_size;
7792 /* Complete stack size needed. */
7793 unsigned HOST_WIDE_INT frame_size;
7794 /* Offset from base reg to register save area. */
7795 HOST_WIDE_INT reg_offset;
7796 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7797 int i;
7799 /* Don't emit an extern directive for functions defined in the same file. */
7800 if (TARGET_ABI_UNICOSMK)
7802 tree name_tree;
7803 name_tree = get_identifier (fnname);
7804 TREE_ASM_WRITTEN (name_tree) = 1;
7807 alpha_fnname = fnname;
7808 sa_size = alpha_sa_size ();
7810 frame_size = get_frame_size ();
7811 if (TARGET_ABI_OPEN_VMS)
7812 frame_size = ALPHA_ROUND (sa_size
7813 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7814 + frame_size
7815 + current_function_pretend_args_size);
7816 else if (TARGET_ABI_UNICOSMK)
7817 frame_size = ALPHA_ROUND (sa_size
7818 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7819 + ALPHA_ROUND (frame_size
7820 + current_function_outgoing_args_size);
7821 else
7822 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7823 + sa_size
7824 + ALPHA_ROUND (frame_size
7825 + current_function_pretend_args_size));
7827 if (TARGET_ABI_OPEN_VMS)
7828 reg_offset = 8;
7829 else
7830 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7832 alpha_sa_mask (&imask, &fmask);
7834 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7835 We have to do that before the .ent directive as we cannot switch
7836 files within procedures with native ecoff because line numbers are
7837 linked to procedure descriptors.
7838 Outputting the lineno helps debugging of one line functions as they
7839 would otherwise get no line number at all. Please note that we would
7840 like to put out last_linenum from final.c, but it is not accessible. */
7842 if (write_symbols == SDB_DEBUG)
7844 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7845 ASM_OUTPUT_SOURCE_FILENAME (file,
7846 DECL_SOURCE_FILE (current_function_decl));
7847 #endif
7848 #ifdef SDB_OUTPUT_SOURCE_LINE
7849 if (debug_info_level != DINFO_LEVEL_TERSE)
7850 SDB_OUTPUT_SOURCE_LINE (file,
7851 DECL_SOURCE_LINE (current_function_decl));
7852 #endif
7855 /* Issue function start and label. */
7856 if (TARGET_ABI_OPEN_VMS
7857 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7859 fputs ("\t.ent ", file);
7860 assemble_name (file, fnname);
7861 putc ('\n', file);
7863 /* If the function needs GP, we'll write the "..ng" label there.
7864 Otherwise, do it here. */
7865 if (TARGET_ABI_OSF
7866 && ! alpha_function_needs_gp
7867 && ! current_function_is_thunk)
7869 putc ('$', file);
7870 assemble_name (file, fnname);
7871 fputs ("..ng:\n", file);
7875 strcpy (entry_label, fnname);
7876 if (TARGET_ABI_OPEN_VMS)
7877 strcat (entry_label, "..en");
7879 /* For public functions, the label must be globalized by appending an
7880 additional colon. */
7881 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7882 strcat (entry_label, ":");
7884 ASM_OUTPUT_LABEL (file, entry_label);
7885 inside_function = TRUE;
7887 if (TARGET_ABI_OPEN_VMS)
7888 fprintf (file, "\t.base $%d\n", vms_base_regno);
7890 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7891 && !flag_inhibit_size_directive)
7893 /* Set flags in procedure descriptor to request IEEE-conformant
7894 math-library routines. The value we set it to is PDSC_EXC_IEEE
7895 (/usr/include/pdsc.h). */
7896 fputs ("\t.eflag 48\n", file);
7899 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7900 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7901 alpha_arg_offset = -frame_size + 48;
7903 /* Describe our frame. If the frame size is larger than an integer,
7904 print it as zero to avoid an assembler error. We won't be
7905 properly describing such a frame, but that's the best we can do. */
7906 if (TARGET_ABI_UNICOSMK)
7908 else if (TARGET_ABI_OPEN_VMS)
7909 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7910 HOST_WIDE_INT_PRINT_DEC "\n",
7911 vms_unwind_regno,
7912 frame_size >= (1UL << 31) ? 0 : frame_size,
7913 reg_offset);
7914 else if (!flag_inhibit_size_directive)
7915 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7916 (frame_pointer_needed
7917 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7918 frame_size >= (1UL << 31) ? 0 : frame_size,
7919 current_function_pretend_args_size);
7921 /* Describe which registers were spilled. */
7922 if (TARGET_ABI_UNICOSMK)
7924 else if (TARGET_ABI_OPEN_VMS)
7926 if (imask)
7927 /* ??? Does VMS care if mask contains ra? The old code didn't
7928 set it, so I don't here. */
7929 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7930 if (fmask)
7931 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7932 if (alpha_procedure_type == PT_REGISTER)
7933 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7935 else if (!flag_inhibit_size_directive)
7937 if (imask)
7939 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7940 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7942 for (i = 0; i < 32; ++i)
7943 if (imask & (1UL << i))
7944 reg_offset += 8;
7947 if (fmask)
7948 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7949 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7952 #if TARGET_ABI_OPEN_VMS
7953 /* Ifdef'ed cause link_section are only available then. */
7954 readonly_data_section ();
7955 fprintf (file, "\t.align 3\n");
7956 assemble_name (file, fnname); fputs ("..na:\n", file);
7957 fputs ("\t.ascii \"", file);
7958 assemble_name (file, fnname);
7959 fputs ("\\0\"\n", file);
7960 alpha_need_linkage (fnname, 1);
7961 text_section ();
7962 #endif
7965 /* Emit the .prologue note at the scheduled end of the prologue. */
7967 static void
7968 alpha_output_function_end_prologue (FILE *file)
7970 if (TARGET_ABI_UNICOSMK)
7972 else if (TARGET_ABI_OPEN_VMS)
7973 fputs ("\t.prologue\n", file);
7974 else if (TARGET_ABI_WINDOWS_NT)
7975 fputs ("\t.prologue 0\n", file);
7976 else if (!flag_inhibit_size_directive)
7977 fprintf (file, "\t.prologue %d\n",
7978 alpha_function_needs_gp || current_function_is_thunk);
7981 /* Write function epilogue. */
7983 /* ??? At some point we will want to support full unwind, and so will
7984 need to mark the epilogue as well. At the moment, we just confuse
7985 dwarf2out. */
7986 #undef FRP
7987 #define FRP(exp) exp
7989 void
7990 alpha_expand_epilogue (void)
7992 /* Registers to save. */
7993 unsigned long imask = 0;
7994 unsigned long fmask = 0;
7995 /* Stack space needed for pushing registers clobbered by us. */
7996 HOST_WIDE_INT sa_size;
7997 /* Complete stack size needed. */
7998 HOST_WIDE_INT frame_size;
7999 /* Offset from base reg to register save area. */
8000 HOST_WIDE_INT reg_offset;
8001 int fp_is_frame_pointer, fp_offset;
8002 rtx sa_reg, sa_reg_exp = NULL;
8003 rtx sp_adj1, sp_adj2, mem;
8004 rtx eh_ofs;
8005 int i;
8007 sa_size = alpha_sa_size ();
8009 frame_size = get_frame_size ();
8010 if (TARGET_ABI_OPEN_VMS)
8011 frame_size = ALPHA_ROUND (sa_size
8012 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8013 + frame_size
8014 + current_function_pretend_args_size);
8015 else if (TARGET_ABI_UNICOSMK)
8016 frame_size = ALPHA_ROUND (sa_size
8017 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8018 + ALPHA_ROUND (frame_size
8019 + current_function_outgoing_args_size);
8020 else
8021 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8022 + sa_size
8023 + ALPHA_ROUND (frame_size
8024 + current_function_pretend_args_size));
8026 if (TARGET_ABI_OPEN_VMS)
8028 if (alpha_procedure_type == PT_STACK)
8029 reg_offset = 8;
8030 else
8031 reg_offset = 0;
8033 else
8034 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8036 alpha_sa_mask (&imask, &fmask);
8038 fp_is_frame_pointer
8039 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8040 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8041 fp_offset = 0;
8042 sa_reg = stack_pointer_rtx;
8044 if (current_function_calls_eh_return)
8045 eh_ofs = EH_RETURN_STACKADJ_RTX;
8046 else
8047 eh_ofs = NULL_RTX;
8049 if (!TARGET_ABI_UNICOSMK && sa_size)
8051 /* If we have a frame pointer, restore SP from it. */
8052 if ((TARGET_ABI_OPEN_VMS
8053 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8054 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8055 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8057 /* Cope with very large offsets to the register save area. */
8058 if (reg_offset + sa_size > 0x8000)
8060 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8061 HOST_WIDE_INT bias;
8063 if (low + sa_size <= 0x8000)
8064 bias = reg_offset - low, reg_offset = low;
8065 else
8066 bias = reg_offset, reg_offset = 0;
8068 sa_reg = gen_rtx_REG (DImode, 22);
8069 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8071 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8074 /* Restore registers in order, excepting a true frame pointer. */
8076 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8077 if (! eh_ofs)
8078 set_mem_alias_set (mem, alpha_sr_alias_set);
8079 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8081 reg_offset += 8;
8082 imask &= ~(1UL << REG_RA);
8084 for (i = 0; i < 31; ++i)
8085 if (imask & (1UL << i))
8087 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8088 fp_offset = reg_offset;
8089 else
8091 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8092 set_mem_alias_set (mem, alpha_sr_alias_set);
8093 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8095 reg_offset += 8;
8098 for (i = 0; i < 31; ++i)
8099 if (fmask & (1UL << i))
8101 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8102 set_mem_alias_set (mem, alpha_sr_alias_set);
8103 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8104 reg_offset += 8;
8107 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8109 /* Restore callee-saved general-purpose registers. */
8111 reg_offset = -56;
8113 for (i = 9; i < 15; i++)
8114 if (imask & (1UL << i))
8116 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8117 reg_offset));
8118 set_mem_alias_set (mem, alpha_sr_alias_set);
8119 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8120 reg_offset -= 8;
8123 for (i = 2; i < 10; i++)
8124 if (fmask & (1UL << i))
8126 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8127 reg_offset));
8128 set_mem_alias_set (mem, alpha_sr_alias_set);
8129 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8130 reg_offset -= 8;
8133 /* Restore the return address from the DSIB. */
8135 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8136 set_mem_alias_set (mem, alpha_sr_alias_set);
8137 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8140 if (frame_size || eh_ofs)
8142 sp_adj1 = stack_pointer_rtx;
8144 if (eh_ofs)
8146 sp_adj1 = gen_rtx_REG (DImode, 23);
8147 emit_move_insn (sp_adj1,
8148 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8151 /* If the stack size is large, begin computation into a temporary
8152 register so as not to interfere with a potential fp restore,
8153 which must be consecutive with an SP restore. */
8154 if (frame_size < 32768
8155 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8156 sp_adj2 = GEN_INT (frame_size);
8157 else if (TARGET_ABI_UNICOSMK)
8159 sp_adj1 = gen_rtx_REG (DImode, 23);
8160 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8161 sp_adj2 = const0_rtx;
8163 else if (frame_size < 0x40007fffL)
8165 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8167 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8168 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8169 sp_adj1 = sa_reg;
8170 else
8172 sp_adj1 = gen_rtx_REG (DImode, 23);
8173 FRP (emit_move_insn (sp_adj1, sp_adj2));
8175 sp_adj2 = GEN_INT (low);
8177 else
8179 rtx tmp = gen_rtx_REG (DImode, 23);
8180 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8181 3, false));
8182 if (!sp_adj2)
8184 /* We can't drop new things to memory this late, afaik,
8185 so build it up by pieces. */
8186 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8187 -(frame_size < 0)));
8188 gcc_assert (sp_adj2);
8192 /* From now on, things must be in order. So emit blockages. */
8194 /* Restore the frame pointer. */
8195 if (TARGET_ABI_UNICOSMK)
8197 emit_insn (gen_blockage ());
8198 mem = gen_rtx_MEM (DImode,
8199 plus_constant (hard_frame_pointer_rtx, -16));
8200 set_mem_alias_set (mem, alpha_sr_alias_set);
8201 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8203 else if (fp_is_frame_pointer)
8205 emit_insn (gen_blockage ());
8206 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8207 set_mem_alias_set (mem, alpha_sr_alias_set);
8208 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8210 else if (TARGET_ABI_OPEN_VMS)
8212 emit_insn (gen_blockage ());
8213 FRP (emit_move_insn (hard_frame_pointer_rtx,
8214 gen_rtx_REG (DImode, vms_save_fp_regno)));
8217 /* Restore the stack pointer. */
8218 emit_insn (gen_blockage ());
8219 if (sp_adj2 == const0_rtx)
8220 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8221 else
8222 FRP (emit_move_insn (stack_pointer_rtx,
8223 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8225 else
8227 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8229 emit_insn (gen_blockage ());
8230 FRP (emit_move_insn (hard_frame_pointer_rtx,
8231 gen_rtx_REG (DImode, vms_save_fp_regno)));
8233 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8235 /* Decrement the frame pointer if the function does not have a
8236 frame. */
8238 emit_insn (gen_blockage ());
8239 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8240 hard_frame_pointer_rtx, constm1_rtx)));
8245 /* Output the rest of the textual info surrounding the epilogue. */
8247 void
8248 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8250 #if TARGET_ABI_OPEN_VMS
8251 alpha_write_linkage (file, fnname, decl);
8252 #endif
8254 /* End the function. */
8255 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8257 fputs ("\t.end ", file);
8258 assemble_name (file, fnname);
8259 putc ('\n', file);
8261 inside_function = FALSE;
8263 /* Output jump tables and the static subroutine information block. */
8264 if (TARGET_ABI_UNICOSMK)
8266 unicosmk_output_ssib (file, fnname);
8267 unicosmk_output_deferred_case_vectors (file);
8271 #if TARGET_ABI_OSF
8272 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8274 In order to avoid the hordes of differences between generated code
8275 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8276 lots of code loading up large constants, generate rtl and emit it
8277 instead of going straight to text.
8279 Not sure why this idea hasn't been explored before... */
8281 static void
8282 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8283 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8284 tree function)
8286 HOST_WIDE_INT hi, lo;
8287 rtx this, insn, funexp;
8289 reset_block_changes ();
8291 /* We always require a valid GP. */
8292 emit_insn (gen_prologue_ldgp ());
8293 emit_note (NOTE_INSN_PROLOGUE_END);
8295 /* Find the "this" pointer. If the function returns a structure,
8296 the structure return pointer is in $16. */
8297 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8298 this = gen_rtx_REG (Pmode, 17);
8299 else
8300 this = gen_rtx_REG (Pmode, 16);
8302 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8303 entire constant for the add. */
8304 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8305 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8306 if (hi + lo == delta)
8308 if (hi)
8309 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8310 if (lo)
8311 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8313 else
8315 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8316 delta, -(delta < 0));
8317 emit_insn (gen_adddi3 (this, this, tmp));
8320 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8321 if (vcall_offset)
8323 rtx tmp, tmp2;
8325 tmp = gen_rtx_REG (Pmode, 0);
8326 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8328 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8329 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8330 if (hi + lo == vcall_offset)
8332 if (hi)
8333 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8335 else
8337 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8338 vcall_offset, -(vcall_offset < 0));
8339 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8340 lo = 0;
8342 if (lo)
8343 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8344 else
8345 tmp2 = tmp;
8346 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8348 emit_insn (gen_adddi3 (this, this, tmp));
8351 /* Generate a tail call to the target function. */
8352 if (! TREE_USED (function))
8354 assemble_external (function);
8355 TREE_USED (function) = 1;
8357 funexp = XEXP (DECL_RTL (function), 0);
8358 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8359 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8360 SIBLING_CALL_P (insn) = 1;
8362 /* Run just enough of rest_of_compilation to get the insns emitted.
8363 There's not really enough bulk here to make other passes such as
8364 instruction scheduling worth while. Note that use_thunk calls
8365 assemble_start_function and assemble_end_function. */
8366 insn = get_insns ();
8367 insn_locators_initialize ();
8368 shorten_branches (insn);
8369 final_start_function (insn, file, 1);
8370 final (insn, file, 1);
8371 final_end_function ();
8373 #endif /* TARGET_ABI_OSF */
8375 /* Debugging support. */
8377 #include "gstab.h"
8379 /* Count the number of sdb related labels are generated (to find block
8380 start and end boundaries). */
8382 int sdb_label_count = 0;
8384 /* Name of the file containing the current function. */
8386 static const char *current_function_file = "";
8388 /* Offsets to alpha virtual arg/local debugging pointers. */
8390 long alpha_arg_offset;
8391 long alpha_auto_offset;
8393 /* Emit a new filename to a stream. */
8395 void
8396 alpha_output_filename (FILE *stream, const char *name)
8398 static int first_time = TRUE;
8400 if (first_time)
8402 first_time = FALSE;
8403 ++num_source_filenames;
8404 current_function_file = name;
8405 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8406 output_quoted_string (stream, name);
8407 fprintf (stream, "\n");
8408 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8409 fprintf (stream, "\t#@stabs\n");
8412 else if (write_symbols == DBX_DEBUG)
8413 /* dbxout.c will emit an appropriate .stabs directive. */
8414 return;
8416 else if (name != current_function_file
8417 && strcmp (name, current_function_file) != 0)
8419 if (inside_function && ! TARGET_GAS)
8420 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8421 else
8423 ++num_source_filenames;
8424 current_function_file = name;
8425 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8428 output_quoted_string (stream, name);
8429 fprintf (stream, "\n");
8433 /* Structure to show the current status of registers and memory. */
8435 struct shadow_summary
8437 struct {
8438 unsigned int i : 31; /* Mask of int regs */
8439 unsigned int fp : 31; /* Mask of fp regs */
8440 unsigned int mem : 1; /* mem == imem | fpmem */
8441 } used, defd;
8444 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8445 to the summary structure. SET is nonzero if the insn is setting the
8446 object, otherwise zero. */
8448 static void
8449 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8451 const char *format_ptr;
8452 int i, j;
8454 if (x == 0)
8455 return;
8457 switch (GET_CODE (x))
8459 /* ??? Note that this case would be incorrect if the Alpha had a
8460 ZERO_EXTRACT in SET_DEST. */
8461 case SET:
8462 summarize_insn (SET_SRC (x), sum, 0);
8463 summarize_insn (SET_DEST (x), sum, 1);
8464 break;
8466 case CLOBBER:
8467 summarize_insn (XEXP (x, 0), sum, 1);
8468 break;
8470 case USE:
8471 summarize_insn (XEXP (x, 0), sum, 0);
8472 break;
8474 case ASM_OPERANDS:
8475 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8476 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8477 break;
8479 case PARALLEL:
8480 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8481 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8482 break;
8484 case SUBREG:
8485 summarize_insn (SUBREG_REG (x), sum, 0);
8486 break;
8488 case REG:
8490 int regno = REGNO (x);
8491 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8493 if (regno == 31 || regno == 63)
8494 break;
8496 if (set)
8498 if (regno < 32)
8499 sum->defd.i |= mask;
8500 else
8501 sum->defd.fp |= mask;
8503 else
8505 if (regno < 32)
8506 sum->used.i |= mask;
8507 else
8508 sum->used.fp |= mask;
8511 break;
8513 case MEM:
8514 if (set)
8515 sum->defd.mem = 1;
8516 else
8517 sum->used.mem = 1;
8519 /* Find the regs used in memory address computation: */
8520 summarize_insn (XEXP (x, 0), sum, 0);
8521 break;
8523 case CONST_INT: case CONST_DOUBLE:
8524 case SYMBOL_REF: case LABEL_REF: case CONST:
8525 case SCRATCH: case ASM_INPUT:
8526 break;
8528 /* Handle common unary and binary ops for efficiency. */
8529 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8530 case MOD: case UDIV: case UMOD: case AND: case IOR:
8531 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8532 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8533 case NE: case EQ: case GE: case GT: case LE:
8534 case LT: case GEU: case GTU: case LEU: case LTU:
8535 summarize_insn (XEXP (x, 0), sum, 0);
8536 summarize_insn (XEXP (x, 1), sum, 0);
8537 break;
8539 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8540 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8541 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8542 case SQRT: case FFS:
8543 summarize_insn (XEXP (x, 0), sum, 0);
8544 break;
8546 default:
8547 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8548 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8549 switch (format_ptr[i])
8551 case 'e':
8552 summarize_insn (XEXP (x, i), sum, 0);
8553 break;
8555 case 'E':
8556 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8557 summarize_insn (XVECEXP (x, i, j), sum, 0);
8558 break;
8560 case 'i':
8561 break;
8563 default:
8564 gcc_unreachable ();
8569 /* Ensure a sufficient number of `trapb' insns are in the code when
8570 the user requests code with a trap precision of functions or
8571 instructions.
8573 In naive mode, when the user requests a trap-precision of
8574 "instruction", a trapb is needed after every instruction that may
8575 generate a trap. This ensures that the code is resumption safe but
8576 it is also slow.
8578 When optimizations are turned on, we delay issuing a trapb as long
8579 as possible. In this context, a trap shadow is the sequence of
8580 instructions that starts with a (potentially) trap generating
8581 instruction and extends to the next trapb or call_pal instruction
8582 (but GCC never generates call_pal by itself). We can delay (and
8583 therefore sometimes omit) a trapb subject to the following
8584 conditions:
8586 (a) On entry to the trap shadow, if any Alpha register or memory
8587 location contains a value that is used as an operand value by some
8588 instruction in the trap shadow (live on entry), then no instruction
8589 in the trap shadow may modify the register or memory location.
8591 (b) Within the trap shadow, the computation of the base register
8592 for a memory load or store instruction may not involve using the
8593 result of an instruction that might generate an UNPREDICTABLE
8594 result.
8596 (c) Within the trap shadow, no register may be used more than once
8597 as a destination register. (This is to make life easier for the
8598 trap-handler.)
8600 (d) The trap shadow may not include any branch instructions. */
8602 static void
8603 alpha_handle_trap_shadows (void)
8605 struct shadow_summary shadow;
8606 int trap_pending, exception_nesting;
8607 rtx i, n;
8609 trap_pending = 0;
8610 exception_nesting = 0;
8611 shadow.used.i = 0;
8612 shadow.used.fp = 0;
8613 shadow.used.mem = 0;
8614 shadow.defd = shadow.used;
8616 for (i = get_insns (); i ; i = NEXT_INSN (i))
8618 if (GET_CODE (i) == NOTE)
8620 switch (NOTE_LINE_NUMBER (i))
8622 case NOTE_INSN_EH_REGION_BEG:
8623 exception_nesting++;
8624 if (trap_pending)
8625 goto close_shadow;
8626 break;
8628 case NOTE_INSN_EH_REGION_END:
8629 exception_nesting--;
8630 if (trap_pending)
8631 goto close_shadow;
8632 break;
8634 case NOTE_INSN_EPILOGUE_BEG:
8635 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8636 goto close_shadow;
8637 break;
8640 else if (trap_pending)
8642 if (alpha_tp == ALPHA_TP_FUNC)
8644 if (GET_CODE (i) == JUMP_INSN
8645 && GET_CODE (PATTERN (i)) == RETURN)
8646 goto close_shadow;
8648 else if (alpha_tp == ALPHA_TP_INSN)
8650 if (optimize > 0)
8652 struct shadow_summary sum;
8654 sum.used.i = 0;
8655 sum.used.fp = 0;
8656 sum.used.mem = 0;
8657 sum.defd = sum.used;
8659 switch (GET_CODE (i))
8661 case INSN:
8662 /* Annoyingly, get_attr_trap will die on these. */
8663 if (GET_CODE (PATTERN (i)) == USE
8664 || GET_CODE (PATTERN (i)) == CLOBBER)
8665 break;
8667 summarize_insn (PATTERN (i), &sum, 0);
8669 if ((sum.defd.i & shadow.defd.i)
8670 || (sum.defd.fp & shadow.defd.fp))
8672 /* (c) would be violated */
8673 goto close_shadow;
8676 /* Combine shadow with summary of current insn: */
8677 shadow.used.i |= sum.used.i;
8678 shadow.used.fp |= sum.used.fp;
8679 shadow.used.mem |= sum.used.mem;
8680 shadow.defd.i |= sum.defd.i;
8681 shadow.defd.fp |= sum.defd.fp;
8682 shadow.defd.mem |= sum.defd.mem;
8684 if ((sum.defd.i & shadow.used.i)
8685 || (sum.defd.fp & shadow.used.fp)
8686 || (sum.defd.mem & shadow.used.mem))
8688 /* (a) would be violated (also takes care of (b)) */
8689 gcc_assert (get_attr_trap (i) != TRAP_YES
8690 || (!(sum.defd.i & sum.used.i)
8691 && !(sum.defd.fp & sum.used.fp)));
8693 goto close_shadow;
8695 break;
8697 case JUMP_INSN:
8698 case CALL_INSN:
8699 case CODE_LABEL:
8700 goto close_shadow;
8702 default:
8703 gcc_unreachable ();
8706 else
8708 close_shadow:
8709 n = emit_insn_before (gen_trapb (), i);
8710 PUT_MODE (n, TImode);
8711 PUT_MODE (i, TImode);
8712 trap_pending = 0;
8713 shadow.used.i = 0;
8714 shadow.used.fp = 0;
8715 shadow.used.mem = 0;
8716 shadow.defd = shadow.used;
8721 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8722 && GET_CODE (i) == INSN
8723 && GET_CODE (PATTERN (i)) != USE
8724 && GET_CODE (PATTERN (i)) != CLOBBER
8725 && get_attr_trap (i) == TRAP_YES)
8727 if (optimize && !trap_pending)
8728 summarize_insn (PATTERN (i), &shadow, 0);
8729 trap_pending = 1;
8734 /* Alpha can only issue instruction groups simultaneously if they are
8735 suitably aligned. This is very processor-specific. */
8736 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8737 that are marked "fake". These instructions do not exist on that target,
8738 but it is possible to see these insns with deranged combinations of
8739 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8740 choose a result at random. */
8742 enum alphaev4_pipe {
8743 EV4_STOP = 0,
8744 EV4_IB0 = 1,
8745 EV4_IB1 = 2,
8746 EV4_IBX = 4
8749 enum alphaev5_pipe {
8750 EV5_STOP = 0,
8751 EV5_NONE = 1,
8752 EV5_E01 = 2,
8753 EV5_E0 = 4,
8754 EV5_E1 = 8,
8755 EV5_FAM = 16,
8756 EV5_FA = 32,
8757 EV5_FM = 64
8760 static enum alphaev4_pipe
8761 alphaev4_insn_pipe (rtx insn)
8763 if (recog_memoized (insn) < 0)
8764 return EV4_STOP;
8765 if (get_attr_length (insn) != 4)
8766 return EV4_STOP;
8768 switch (get_attr_type (insn))
8770 case TYPE_ILD:
8771 case TYPE_LDSYM:
8772 case TYPE_FLD:
8773 case TYPE_LD_L:
8774 return EV4_IBX;
8776 case TYPE_IADD:
8777 case TYPE_ILOG:
8778 case TYPE_ICMOV:
8779 case TYPE_ICMP:
8780 case TYPE_FST:
8781 case TYPE_SHIFT:
8782 case TYPE_IMUL:
8783 case TYPE_FBR:
8784 case TYPE_MVI: /* fake */
8785 return EV4_IB0;
8787 case TYPE_IST:
8788 case TYPE_MISC:
8789 case TYPE_IBR:
8790 case TYPE_JSR:
8791 case TYPE_CALLPAL:
8792 case TYPE_FCPYS:
8793 case TYPE_FCMOV:
8794 case TYPE_FADD:
8795 case TYPE_FDIV:
8796 case TYPE_FMUL:
8797 case TYPE_ST_C:
8798 case TYPE_MB:
8799 case TYPE_FSQRT: /* fake */
8800 case TYPE_FTOI: /* fake */
8801 case TYPE_ITOF: /* fake */
8802 return EV4_IB1;
8804 default:
8805 gcc_unreachable ();
8809 static enum alphaev5_pipe
8810 alphaev5_insn_pipe (rtx insn)
8812 if (recog_memoized (insn) < 0)
8813 return EV5_STOP;
8814 if (get_attr_length (insn) != 4)
8815 return EV5_STOP;
8817 switch (get_attr_type (insn))
8819 case TYPE_ILD:
8820 case TYPE_FLD:
8821 case TYPE_LDSYM:
8822 case TYPE_IADD:
8823 case TYPE_ILOG:
8824 case TYPE_ICMOV:
8825 case TYPE_ICMP:
8826 return EV5_E01;
8828 case TYPE_IST:
8829 case TYPE_FST:
8830 case TYPE_SHIFT:
8831 case TYPE_IMUL:
8832 case TYPE_MISC:
8833 case TYPE_MVI:
8834 case TYPE_LD_L:
8835 case TYPE_ST_C:
8836 case TYPE_MB:
8837 case TYPE_FTOI: /* fake */
8838 case TYPE_ITOF: /* fake */
8839 return EV5_E0;
8841 case TYPE_IBR:
8842 case TYPE_JSR:
8843 case TYPE_CALLPAL:
8844 return EV5_E1;
8846 case TYPE_FCPYS:
8847 return EV5_FAM;
8849 case TYPE_FBR:
8850 case TYPE_FCMOV:
8851 case TYPE_FADD:
8852 case TYPE_FDIV:
8853 case TYPE_FSQRT: /* fake */
8854 return EV5_FA;
8856 case TYPE_FMUL:
8857 return EV5_FM;
8859 default:
8860 gcc_unreachable ();
8864 /* IN_USE is a mask of the slots currently filled within the insn group.
8865 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8866 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8868 LEN is, of course, the length of the group in bytes. */
8870 static rtx
8871 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8873 int len, in_use;
8875 len = in_use = 0;
8877 if (! INSN_P (insn)
8878 || GET_CODE (PATTERN (insn)) == CLOBBER
8879 || GET_CODE (PATTERN (insn)) == USE)
8880 goto next_and_done;
8882 while (1)
8884 enum alphaev4_pipe pipe;
8886 pipe = alphaev4_insn_pipe (insn);
8887 switch (pipe)
8889 case EV4_STOP:
8890 /* Force complex instructions to start new groups. */
8891 if (in_use)
8892 goto done;
8894 /* If this is a completely unrecognized insn, it's an asm.
8895 We don't know how long it is, so record length as -1 to
8896 signal a needed realignment. */
8897 if (recog_memoized (insn) < 0)
8898 len = -1;
8899 else
8900 len = get_attr_length (insn);
8901 goto next_and_done;
8903 case EV4_IBX:
8904 if (in_use & EV4_IB0)
8906 if (in_use & EV4_IB1)
8907 goto done;
8908 in_use |= EV4_IB1;
8910 else
8911 in_use |= EV4_IB0 | EV4_IBX;
8912 break;
8914 case EV4_IB0:
8915 if (in_use & EV4_IB0)
8917 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8918 goto done;
8919 in_use |= EV4_IB1;
8921 in_use |= EV4_IB0;
8922 break;
8924 case EV4_IB1:
8925 if (in_use & EV4_IB1)
8926 goto done;
8927 in_use |= EV4_IB1;
8928 break;
8930 default:
8931 gcc_unreachable ();
8933 len += 4;
8935 /* Haifa doesn't do well scheduling branches. */
8936 if (GET_CODE (insn) == JUMP_INSN)
8937 goto next_and_done;
8939 next:
8940 insn = next_nonnote_insn (insn);
8942 if (!insn || ! INSN_P (insn))
8943 goto done;
8945 /* Let Haifa tell us where it thinks insn group boundaries are. */
8946 if (GET_MODE (insn) == TImode)
8947 goto done;
8949 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8950 goto next;
8953 next_and_done:
8954 insn = next_nonnote_insn (insn);
8956 done:
8957 *plen = len;
8958 *pin_use = in_use;
8959 return insn;
8962 /* IN_USE is a mask of the slots currently filled within the insn group.
8963 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8964 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8966 LEN is, of course, the length of the group in bytes. */
8968 static rtx
8969 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8971 int len, in_use;
8973 len = in_use = 0;
8975 if (! INSN_P (insn)
8976 || GET_CODE (PATTERN (insn)) == CLOBBER
8977 || GET_CODE (PATTERN (insn)) == USE)
8978 goto next_and_done;
8980 while (1)
8982 enum alphaev5_pipe pipe;
8984 pipe = alphaev5_insn_pipe (insn);
8985 switch (pipe)
8987 case EV5_STOP:
8988 /* Force complex instructions to start new groups. */
8989 if (in_use)
8990 goto done;
8992 /* If this is a completely unrecognized insn, it's an asm.
8993 We don't know how long it is, so record length as -1 to
8994 signal a needed realignment. */
8995 if (recog_memoized (insn) < 0)
8996 len = -1;
8997 else
8998 len = get_attr_length (insn);
8999 goto next_and_done;
9001 /* ??? Most of the places below, we would like to assert never
9002 happen, as it would indicate an error either in Haifa, or
9003 in the scheduling description. Unfortunately, Haifa never
9004 schedules the last instruction of the BB, so we don't have
9005 an accurate TI bit to go off. */
9006 case EV5_E01:
9007 if (in_use & EV5_E0)
9009 if (in_use & EV5_E1)
9010 goto done;
9011 in_use |= EV5_E1;
9013 else
9014 in_use |= EV5_E0 | EV5_E01;
9015 break;
9017 case EV5_E0:
9018 if (in_use & EV5_E0)
9020 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9021 goto done;
9022 in_use |= EV5_E1;
9024 in_use |= EV5_E0;
9025 break;
9027 case EV5_E1:
9028 if (in_use & EV5_E1)
9029 goto done;
9030 in_use |= EV5_E1;
9031 break;
9033 case EV5_FAM:
9034 if (in_use & EV5_FA)
9036 if (in_use & EV5_FM)
9037 goto done;
9038 in_use |= EV5_FM;
9040 else
9041 in_use |= EV5_FA | EV5_FAM;
9042 break;
9044 case EV5_FA:
9045 if (in_use & EV5_FA)
9046 goto done;
9047 in_use |= EV5_FA;
9048 break;
9050 case EV5_FM:
9051 if (in_use & EV5_FM)
9052 goto done;
9053 in_use |= EV5_FM;
9054 break;
9056 case EV5_NONE:
9057 break;
9059 default:
9060 gcc_unreachable ();
9062 len += 4;
9064 /* Haifa doesn't do well scheduling branches. */
9065 /* ??? If this is predicted not-taken, slotting continues, except
9066 that no more IBR, FBR, or JSR insns may be slotted. */
9067 if (GET_CODE (insn) == JUMP_INSN)
9068 goto next_and_done;
9070 next:
9071 insn = next_nonnote_insn (insn);
9073 if (!insn || ! INSN_P (insn))
9074 goto done;
9076 /* Let Haifa tell us where it thinks insn group boundaries are. */
9077 if (GET_MODE (insn) == TImode)
9078 goto done;
9080 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9081 goto next;
9084 next_and_done:
9085 insn = next_nonnote_insn (insn);
9087 done:
9088 *plen = len;
9089 *pin_use = in_use;
9090 return insn;
9093 static rtx
9094 alphaev4_next_nop (int *pin_use)
9096 int in_use = *pin_use;
9097 rtx nop;
9099 if (!(in_use & EV4_IB0))
9101 in_use |= EV4_IB0;
9102 nop = gen_nop ();
9104 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9106 in_use |= EV4_IB1;
9107 nop = gen_nop ();
9109 else if (TARGET_FP && !(in_use & EV4_IB1))
9111 in_use |= EV4_IB1;
9112 nop = gen_fnop ();
9114 else
9115 nop = gen_unop ();
9117 *pin_use = in_use;
9118 return nop;
9121 static rtx
9122 alphaev5_next_nop (int *pin_use)
9124 int in_use = *pin_use;
9125 rtx nop;
9127 if (!(in_use & EV5_E1))
9129 in_use |= EV5_E1;
9130 nop = gen_nop ();
9132 else if (TARGET_FP && !(in_use & EV5_FA))
9134 in_use |= EV5_FA;
9135 nop = gen_fnop ();
9137 else if (TARGET_FP && !(in_use & EV5_FM))
9139 in_use |= EV5_FM;
9140 nop = gen_fnop ();
9142 else
9143 nop = gen_unop ();
9145 *pin_use = in_use;
9146 return nop;
9149 /* The instruction group alignment main loop. */
9151 static void
9152 alpha_align_insns (unsigned int max_align,
9153 rtx (*next_group) (rtx, int *, int *),
9154 rtx (*next_nop) (int *))
9156 /* ALIGN is the known alignment for the insn group. */
9157 unsigned int align;
9158 /* OFS is the offset of the current insn in the insn group. */
9159 int ofs;
9160 int prev_in_use, in_use, len, ldgp;
9161 rtx i, next;
9163 /* Let shorten branches care for assigning alignments to code labels. */
9164 shorten_branches (get_insns ());
9166 if (align_functions < 4)
9167 align = 4;
9168 else if ((unsigned int) align_functions < max_align)
9169 align = align_functions;
9170 else
9171 align = max_align;
9173 ofs = prev_in_use = 0;
9174 i = get_insns ();
9175 if (GET_CODE (i) == NOTE)
9176 i = next_nonnote_insn (i);
9178 ldgp = alpha_function_needs_gp ? 8 : 0;
9180 while (i)
9182 next = (*next_group) (i, &in_use, &len);
9184 /* When we see a label, resync alignment etc. */
9185 if (GET_CODE (i) == CODE_LABEL)
9187 unsigned int new_align = 1 << label_to_alignment (i);
9189 if (new_align >= align)
9191 align = new_align < max_align ? new_align : max_align;
9192 ofs = 0;
9195 else if (ofs & (new_align-1))
9196 ofs = (ofs | (new_align-1)) + 1;
9197 gcc_assert (!len);
9200 /* Handle complex instructions special. */
9201 else if (in_use == 0)
9203 /* Asms will have length < 0. This is a signal that we have
9204 lost alignment knowledge. Assume, however, that the asm
9205 will not mis-align instructions. */
9206 if (len < 0)
9208 ofs = 0;
9209 align = 4;
9210 len = 0;
9214 /* If the known alignment is smaller than the recognized insn group,
9215 realign the output. */
9216 else if ((int) align < len)
9218 unsigned int new_log_align = len > 8 ? 4 : 3;
9219 rtx prev, where;
9221 where = prev = prev_nonnote_insn (i);
9222 if (!where || GET_CODE (where) != CODE_LABEL)
9223 where = i;
9225 /* Can't realign between a call and its gp reload. */
9226 if (! (TARGET_EXPLICIT_RELOCS
9227 && prev && GET_CODE (prev) == CALL_INSN))
9229 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9230 align = 1 << new_log_align;
9231 ofs = 0;
9235 /* We may not insert padding inside the initial ldgp sequence. */
9236 else if (ldgp > 0)
9237 ldgp -= len;
9239 /* If the group won't fit in the same INT16 as the previous,
9240 we need to add padding to keep the group together. Rather
9241 than simply leaving the insn filling to the assembler, we
9242 can make use of the knowledge of what sorts of instructions
9243 were issued in the previous group to make sure that all of
9244 the added nops are really free. */
9245 else if (ofs + len > (int) align)
9247 int nop_count = (align - ofs) / 4;
9248 rtx where;
9250 /* Insert nops before labels, branches, and calls to truly merge
9251 the execution of the nops with the previous instruction group. */
9252 where = prev_nonnote_insn (i);
9253 if (where)
9255 if (GET_CODE (where) == CODE_LABEL)
9257 rtx where2 = prev_nonnote_insn (where);
9258 if (where2 && GET_CODE (where2) == JUMP_INSN)
9259 where = where2;
9261 else if (GET_CODE (where) == INSN)
9262 where = i;
9264 else
9265 where = i;
9268 emit_insn_before ((*next_nop)(&prev_in_use), where);
9269 while (--nop_count);
9270 ofs = 0;
9273 ofs = (ofs + len) & (align - 1);
9274 prev_in_use = in_use;
9275 i = next;
9279 /* Machine dependent reorg pass. */
9281 static void
9282 alpha_reorg (void)
9284 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9285 alpha_handle_trap_shadows ();
9287 /* Due to the number of extra trapb insns, don't bother fixing up
9288 alignment when trap precision is instruction. Moreover, we can
9289 only do our job when sched2 is run. */
9290 if (optimize && !optimize_size
9291 && alpha_tp != ALPHA_TP_INSN
9292 && flag_schedule_insns_after_reload)
9294 if (alpha_tune == PROCESSOR_EV4)
9295 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9296 else if (alpha_tune == PROCESSOR_EV5)
9297 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9301 #if !TARGET_ABI_UNICOSMK
9303 #ifdef HAVE_STAMP_H
9304 #include <stamp.h>
9305 #endif
9307 static void
9308 alpha_file_start (void)
9310 #ifdef OBJECT_FORMAT_ELF
9311 /* If emitting dwarf2 debug information, we cannot generate a .file
9312 directive to start the file, as it will conflict with dwarf2out
9313 file numbers. So it's only useful when emitting mdebug output. */
9314 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9315 #endif
9317 default_file_start ();
9318 #ifdef MS_STAMP
9319 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9320 #endif
9322 fputs ("\t.set noreorder\n", asm_out_file);
9323 fputs ("\t.set volatile\n", asm_out_file);
9324 if (!TARGET_ABI_OPEN_VMS)
9325 fputs ("\t.set noat\n", asm_out_file);
9326 if (TARGET_EXPLICIT_RELOCS)
9327 fputs ("\t.set nomacro\n", asm_out_file);
9328 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9330 const char *arch;
9332 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9333 arch = "ev6";
9334 else if (TARGET_MAX)
9335 arch = "pca56";
9336 else if (TARGET_BWX)
9337 arch = "ev56";
9338 else if (alpha_cpu == PROCESSOR_EV5)
9339 arch = "ev5";
9340 else
9341 arch = "ev4";
9343 fprintf (asm_out_file, "\t.arch %s\n", arch);
9346 #endif
9348 #ifdef OBJECT_FORMAT_ELF
9350 /* Switch to the section to which we should output X. The only thing
9351 special we do here is to honor small data. */
9353 static void
9354 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9355 unsigned HOST_WIDE_INT align)
9357 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9358 /* ??? Consider using mergeable sdata sections. */
9359 sdata_section ();
9360 else
9361 default_elf_select_rtx_section (mode, x, align);
9364 #endif /* OBJECT_FORMAT_ELF */
9366 /* Structure to collect function names for final output in link section. */
9367 /* Note that items marked with GTY can't be ifdef'ed out. */
9369 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9370 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9372 struct alpha_links GTY(())
9374 int num;
9375 rtx linkage;
9376 enum links_kind lkind;
9377 enum reloc_kind rkind;
9380 struct alpha_funcs GTY(())
9382 int num;
9383 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9384 links;
9387 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9388 splay_tree alpha_links_tree;
9389 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9390 splay_tree alpha_funcs_tree;
9392 static GTY(()) int alpha_funcs_num;
9394 #if TARGET_ABI_OPEN_VMS
9396 /* Return the VMS argument type corresponding to MODE. */
9398 enum avms_arg_type
9399 alpha_arg_type (enum machine_mode mode)
9401 switch (mode)
9403 case SFmode:
9404 return TARGET_FLOAT_VAX ? FF : FS;
9405 case DFmode:
9406 return TARGET_FLOAT_VAX ? FD : FT;
9407 default:
9408 return I64;
9412 /* Return an rtx for an integer representing the VMS Argument Information
9413 register value. */
9416 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9418 unsigned HOST_WIDE_INT regval = cum.num_args;
9419 int i;
9421 for (i = 0; i < 6; i++)
9422 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9424 return GEN_INT (regval);
9427 /* Make (or fake) .linkage entry for function call.
9429 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9431 Return an SYMBOL_REF rtx for the linkage. */
9434 alpha_need_linkage (const char *name, int is_local)
9436 splay_tree_node node;
9437 struct alpha_links *al;
9439 if (name[0] == '*')
9440 name++;
9442 if (is_local)
9444 struct alpha_funcs *cfaf;
9446 if (!alpha_funcs_tree)
9447 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9448 splay_tree_compare_pointers);
9450 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9452 cfaf->links = 0;
9453 cfaf->num = ++alpha_funcs_num;
9455 splay_tree_insert (alpha_funcs_tree,
9456 (splay_tree_key) current_function_decl,
9457 (splay_tree_value) cfaf);
9460 if (alpha_links_tree)
9462 /* Is this name already defined? */
9464 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9465 if (node)
9467 al = (struct alpha_links *) node->value;
9468 if (is_local)
9470 /* Defined here but external assumed. */
9471 if (al->lkind == KIND_EXTERN)
9472 al->lkind = KIND_LOCAL;
9474 else
9476 /* Used here but unused assumed. */
9477 if (al->lkind == KIND_UNUSED)
9478 al->lkind = KIND_LOCAL;
9480 return al->linkage;
9483 else
9484 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9486 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9487 name = ggc_strdup (name);
9489 /* Assume external if no definition. */
9490 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9492 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9493 get_identifier (name);
9495 /* Construct a SYMBOL_REF for us to call. */
9497 size_t name_len = strlen (name);
9498 char *linksym = alloca (name_len + 6);
9499 linksym[0] = '$';
9500 memcpy (linksym + 1, name, name_len);
9501 memcpy (linksym + 1 + name_len, "..lk", 5);
9502 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9503 ggc_alloc_string (linksym, name_len + 5));
9506 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9507 (splay_tree_value) al);
9509 return al->linkage;
9513 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9515 splay_tree_node cfunnode;
9516 struct alpha_funcs *cfaf;
9517 struct alpha_links *al;
9518 const char *name = XSTR (linkage, 0);
9520 cfaf = (struct alpha_funcs *) 0;
9521 al = (struct alpha_links *) 0;
9523 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9524 cfaf = (struct alpha_funcs *) cfunnode->value;
9526 if (cfaf->links)
9528 splay_tree_node lnode;
9530 /* Is this name already defined? */
9532 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9533 if (lnode)
9534 al = (struct alpha_links *) lnode->value;
9536 else
9537 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9539 if (!al)
9541 size_t name_len;
9542 size_t buflen;
9543 char buf [512];
9544 char *linksym;
9545 splay_tree_node node = 0;
9546 struct alpha_links *anl;
9548 if (name[0] == '*')
9549 name++;
9551 name_len = strlen (name);
9553 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9554 al->num = cfaf->num;
9556 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9557 if (node)
9559 anl = (struct alpha_links *) node->value;
9560 al->lkind = anl->lkind;
9563 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9564 buflen = strlen (buf);
9565 linksym = alloca (buflen + 1);
9566 memcpy (linksym, buf, buflen + 1);
9568 al->linkage = gen_rtx_SYMBOL_REF
9569 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9571 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9572 (splay_tree_value) al);
9575 if (rflag)
9576 al->rkind = KIND_CODEADDR;
9577 else
9578 al->rkind = KIND_LINKAGE;
9580 if (lflag)
9581 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9582 else
9583 return al->linkage;
9586 static int
9587 alpha_write_one_linkage (splay_tree_node node, void *data)
9589 const char *const name = (const char *) node->key;
9590 struct alpha_links *link = (struct alpha_links *) node->value;
9591 FILE *stream = (FILE *) data;
9593 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9594 if (link->rkind == KIND_CODEADDR)
9596 if (link->lkind == KIND_LOCAL)
9598 /* Local and used */
9599 fprintf (stream, "\t.quad %s..en\n", name);
9601 else
9603 /* External and used, request code address. */
9604 fprintf (stream, "\t.code_address %s\n", name);
9607 else
9609 if (link->lkind == KIND_LOCAL)
9611 /* Local and used, build linkage pair. */
9612 fprintf (stream, "\t.quad %s..en\n", name);
9613 fprintf (stream, "\t.quad %s\n", name);
9615 else
9617 /* External and used, request linkage pair. */
9618 fprintf (stream, "\t.linkage %s\n", name);
9622 return 0;
9625 static void
9626 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9628 splay_tree_node node;
9629 struct alpha_funcs *func;
9631 link_section ();
9632 fprintf (stream, "\t.align 3\n");
9633 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9634 func = (struct alpha_funcs *) node->value;
9636 fputs ("\t.name ", stream);
9637 assemble_name (stream, funname);
9638 fputs ("..na\n", stream);
9639 ASM_OUTPUT_LABEL (stream, funname);
9640 fprintf (stream, "\t.pdesc ");
9641 assemble_name (stream, funname);
9642 fprintf (stream, "..en,%s\n",
9643 alpha_procedure_type == PT_STACK ? "stack"
9644 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9646 if (func->links)
9648 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9649 /* splay_tree_delete (func->links); */
9653 /* Given a decl, a section name, and whether the decl initializer
9654 has relocs, choose attributes for the section. */
9656 #define SECTION_VMS_OVERLAY SECTION_FORGET
9657 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9658 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9660 static unsigned int
9661 vms_section_type_flags (tree decl, const char *name, int reloc)
9663 unsigned int flags = default_section_type_flags (decl, name, reloc);
9665 if (decl && DECL_ATTRIBUTES (decl)
9666 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9667 flags |= SECTION_VMS_OVERLAY;
9668 if (decl && DECL_ATTRIBUTES (decl)
9669 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9670 flags |= SECTION_VMS_GLOBAL;
9671 if (decl && DECL_ATTRIBUTES (decl)
9672 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9673 flags |= SECTION_VMS_INITIALIZE;
9675 return flags;
9678 /* Switch to an arbitrary section NAME with attributes as specified
9679 by FLAGS. ALIGN specifies any known alignment requirements for
9680 the section; 0 if the default should be used. */
9682 static void
9683 vms_asm_named_section (const char *name, unsigned int flags,
9684 tree decl ATTRIBUTE_UNUSED)
9686 fputc ('\n', asm_out_file);
9687 fprintf (asm_out_file, ".section\t%s", name);
9689 if (flags & SECTION_VMS_OVERLAY)
9690 fprintf (asm_out_file, ",OVR");
9691 if (flags & SECTION_VMS_GLOBAL)
9692 fprintf (asm_out_file, ",GBL");
9693 if (flags & SECTION_VMS_INITIALIZE)
9694 fprintf (asm_out_file, ",NOMOD");
9695 if (flags & SECTION_DEBUG)
9696 fprintf (asm_out_file, ",NOWRT");
9698 fputc ('\n', asm_out_file);
9701 /* Record an element in the table of global constructors. SYMBOL is
9702 a SYMBOL_REF of the function to be called; PRIORITY is a number
9703 between 0 and MAX_INIT_PRIORITY.
9705 Differs from default_ctors_section_asm_out_constructor in that the
9706 width of the .ctors entry is always 64 bits, rather than the 32 bits
9707 used by a normal pointer. */
9709 static void
9710 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9712 ctors_section ();
9713 assemble_align (BITS_PER_WORD);
9714 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9717 static void
9718 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9720 dtors_section ();
9721 assemble_align (BITS_PER_WORD);
9722 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9724 #else
9727 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9728 int is_local ATTRIBUTE_UNUSED)
9730 return NULL_RTX;
9734 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9735 tree cfundecl ATTRIBUTE_UNUSED,
9736 int lflag ATTRIBUTE_UNUSED,
9737 int rflag ATTRIBUTE_UNUSED)
9739 return NULL_RTX;
9742 #endif /* TARGET_ABI_OPEN_VMS */
9744 #if TARGET_ABI_UNICOSMK
9746 /* This evaluates to true if we do not know how to pass TYPE solely in
9747 registers. This is the case for all arguments that do not fit in two
9748 registers. */
9750 static bool
9751 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9753 if (type == NULL)
9754 return false;
9756 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9757 return true;
9758 if (TREE_ADDRESSABLE (type))
9759 return true;
9761 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9764 /* Define the offset between two registers, one to be eliminated, and the
9765 other its replacement, at the start of a routine. */
9768 unicosmk_initial_elimination_offset (int from, int to)
9770 int fixed_size;
9772 fixed_size = alpha_sa_size();
9773 if (fixed_size != 0)
9774 fixed_size += 48;
9776 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9777 return -fixed_size;
9778 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9779 return 0;
9780 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9781 return (ALPHA_ROUND (current_function_outgoing_args_size)
9782 + ALPHA_ROUND (get_frame_size()));
9783 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9784 return (ALPHA_ROUND (fixed_size)
9785 + ALPHA_ROUND (get_frame_size()
9786 + current_function_outgoing_args_size));
9787 else
9788 gcc_unreachable ();
9791 /* Output the module name for .ident and .end directives. We have to strip
9792 directories and add make sure that the module name starts with a letter
9793 or '$'. */
9795 static void
9796 unicosmk_output_module_name (FILE *file)
9798 const char *name = lbasename (main_input_filename);
9799 unsigned len = strlen (name);
9800 char *clean_name = alloca (len + 2);
9801 char *ptr = clean_name;
9803 /* CAM only accepts module names that start with a letter or '$'. We
9804 prefix the module name with a '$' if necessary. */
9806 if (!ISALPHA (*name))
9807 *ptr++ = '$';
9808 memcpy (ptr, name, len + 1);
9809 clean_symbol_name (clean_name);
9810 fputs (clean_name, file);
9813 /* Output the definition of a common variable. */
9815 void
9816 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9818 tree name_tree;
9819 printf ("T3E__: common %s\n", name);
9821 common_section ();
9822 fputs("\t.endp\n\n\t.psect ", file);
9823 assemble_name(file, name);
9824 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9825 fprintf(file, "\t.byte\t0:%d\n", size);
9827 /* Mark the symbol as defined in this module. */
9828 name_tree = get_identifier (name);
9829 TREE_ASM_WRITTEN (name_tree) = 1;
9832 #define SECTION_PUBLIC SECTION_MACH_DEP
9833 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9834 static int current_section_align;
9836 static unsigned int
9837 unicosmk_section_type_flags (tree decl, const char *name,
9838 int reloc ATTRIBUTE_UNUSED)
9840 unsigned int flags = default_section_type_flags (decl, name, reloc);
9842 if (!decl)
9843 return flags;
9845 if (TREE_CODE (decl) == FUNCTION_DECL)
9847 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9848 if (align_functions_log > current_section_align)
9849 current_section_align = align_functions_log;
9851 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9852 flags |= SECTION_MAIN;
9854 else
9855 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9857 if (TREE_PUBLIC (decl))
9858 flags |= SECTION_PUBLIC;
9860 return flags;
9863 /* Generate a section name for decl and associate it with the
9864 declaration. */
9866 static void
9867 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9869 const char *name;
9870 int len;
9872 gcc_assert (decl);
9874 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9875 name = default_strip_name_encoding (name);
9876 len = strlen (name);
9878 if (TREE_CODE (decl) == FUNCTION_DECL)
9880 char *string;
9882 /* It is essential that we prefix the section name here because
9883 otherwise the section names generated for constructors and
9884 destructors confuse collect2. */
9886 string = alloca (len + 6);
9887 sprintf (string, "code@%s", name);
9888 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9890 else if (TREE_PUBLIC (decl))
9891 DECL_SECTION_NAME (decl) = build_string (len, name);
9892 else
9894 char *string;
9896 string = alloca (len + 6);
9897 sprintf (string, "data@%s", name);
9898 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9902 /* Switch to an arbitrary section NAME with attributes as specified
9903 by FLAGS. ALIGN specifies any known alignment requirements for
9904 the section; 0 if the default should be used. */
9906 static void
9907 unicosmk_asm_named_section (const char *name, unsigned int flags,
9908 tree decl ATTRIBUTE_UNUSED)
9910 const char *kind;
9912 /* Close the previous section. */
9914 fputs ("\t.endp\n\n", asm_out_file);
9916 /* Find out what kind of section we are opening. */
9918 if (flags & SECTION_MAIN)
9919 fputs ("\t.start\tmain\n", asm_out_file);
9921 if (flags & SECTION_CODE)
9922 kind = "code";
9923 else if (flags & SECTION_PUBLIC)
9924 kind = "common";
9925 else
9926 kind = "data";
9928 if (current_section_align != 0)
9929 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9930 current_section_align, kind);
9931 else
9932 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9935 static void
9936 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9938 if (DECL_P (decl)
9939 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9940 unicosmk_unique_section (decl, 0);
9943 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9944 in code sections because .align fill unused space with zeroes. */
9946 void
9947 unicosmk_output_align (FILE *file, int align)
9949 if (inside_function)
9950 fprintf (file, "\tgcc@code@align\t%d\n", align);
9951 else
9952 fprintf (file, "\t.align\t%d\n", align);
9955 /* Add a case vector to the current function's list of deferred case
9956 vectors. Case vectors have to be put into a separate section because CAM
9957 does not allow data definitions in code sections. */
9959 void
9960 unicosmk_defer_case_vector (rtx lab, rtx vec)
9962 struct machine_function *machine = cfun->machine;
9964 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9965 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9966 machine->addr_list);
9969 /* Output a case vector. */
9971 static void
9972 unicosmk_output_addr_vec (FILE *file, rtx vec)
9974 rtx lab = XEXP (vec, 0);
9975 rtx body = XEXP (vec, 1);
9976 int vlen = XVECLEN (body, 0);
9977 int idx;
9979 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9981 for (idx = 0; idx < vlen; idx++)
9983 ASM_OUTPUT_ADDR_VEC_ELT
9984 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9988 /* Output current function's deferred case vectors. */
9990 static void
9991 unicosmk_output_deferred_case_vectors (FILE *file)
9993 struct machine_function *machine = cfun->machine;
9994 rtx t;
9996 if (machine->addr_list == NULL_RTX)
9997 return;
9999 data_section ();
10000 for (t = machine->addr_list; t; t = XEXP (t, 1))
10001 unicosmk_output_addr_vec (file, XEXP (t, 0));
10004 /* Generate the name of the SSIB section for the current function. */
10006 #define SSIB_PREFIX "__SSIB_"
10007 #define SSIB_PREFIX_LEN 7
10009 static const char *
10010 unicosmk_ssib_name (void)
10012 /* This is ok since CAM won't be able to deal with names longer than that
10013 anyway. */
10015 static char name[256];
10017 rtx x;
10018 const char *fnname;
10019 int len;
10021 x = DECL_RTL (cfun->decl);
10022 gcc_assert (GET_CODE (x) == MEM);
10023 x = XEXP (x, 0);
10024 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10025 fnname = XSTR (x, 0);
10027 len = strlen (fnname);
10028 if (len + SSIB_PREFIX_LEN > 255)
10029 len = 255 - SSIB_PREFIX_LEN;
10031 strcpy (name, SSIB_PREFIX);
10032 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10033 name[len + SSIB_PREFIX_LEN] = 0;
10035 return name;
10038 /* Set up the dynamic subprogram information block (DSIB) and update the
10039 frame pointer register ($15) for subroutines which have a frame. If the
10040 subroutine doesn't have a frame, simply increment $15. */
10042 static void
10043 unicosmk_gen_dsib (unsigned long *imaskP)
10045 if (alpha_procedure_type == PT_STACK)
10047 const char *ssib_name;
10048 rtx mem;
10050 /* Allocate 64 bytes for the DSIB. */
10052 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10053 GEN_INT (-64))));
10054 emit_insn (gen_blockage ());
10056 /* Save the return address. */
10058 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10059 set_mem_alias_set (mem, alpha_sr_alias_set);
10060 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10061 (*imaskP) &= ~(1UL << REG_RA);
10063 /* Save the old frame pointer. */
10065 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10066 set_mem_alias_set (mem, alpha_sr_alias_set);
10067 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10068 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10070 emit_insn (gen_blockage ());
10072 /* Store the SSIB pointer. */
10074 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10075 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10076 set_mem_alias_set (mem, alpha_sr_alias_set);
10078 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10079 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10080 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10082 /* Save the CIW index. */
10084 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10085 set_mem_alias_set (mem, alpha_sr_alias_set);
10086 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10088 emit_insn (gen_blockage ());
10090 /* Set the new frame pointer. */
10092 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10093 stack_pointer_rtx, GEN_INT (64))));
10096 else
10098 /* Increment the frame pointer register to indicate that we do not
10099 have a frame. */
10101 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10102 hard_frame_pointer_rtx, const1_rtx)));
10106 /* Output the static subroutine information block for the current
10107 function. */
10109 static void
10110 unicosmk_output_ssib (FILE *file, const char *fnname)
10112 int len;
10113 int i;
10114 rtx x;
10115 rtx ciw;
10116 struct machine_function *machine = cfun->machine;
10118 ssib_section ();
10119 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10120 unicosmk_ssib_name ());
10122 /* Some required stuff and the function name length. */
10124 len = strlen (fnname);
10125 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10127 /* Saved registers
10128 ??? We don't do that yet. */
10130 fputs ("\t.quad\t0\n", file);
10132 /* Function address. */
10134 fputs ("\t.quad\t", file);
10135 assemble_name (file, fnname);
10136 putc ('\n', file);
10138 fputs ("\t.quad\t0\n", file);
10139 fputs ("\t.quad\t0\n", file);
10141 /* Function name.
10142 ??? We do it the same way Cray CC does it but this could be
10143 simplified. */
10145 for( i = 0; i < len; i++ )
10146 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10147 if( (len % 8) == 0 )
10148 fputs ("\t.quad\t0\n", file);
10149 else
10150 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10152 /* All call information words used in the function. */
10154 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10156 ciw = XEXP (x, 0);
10157 #if HOST_BITS_PER_WIDE_INT == 32
10158 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10159 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10160 #else
10161 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10162 #endif
10166 /* Add a call information word (CIW) to the list of the current function's
10167 CIWs and return its index.
10169 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10172 unicosmk_add_call_info_word (rtx x)
10174 rtx node;
10175 struct machine_function *machine = cfun->machine;
10177 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10178 if (machine->first_ciw == NULL_RTX)
10179 machine->first_ciw = node;
10180 else
10181 XEXP (machine->last_ciw, 1) = node;
10183 machine->last_ciw = node;
10184 ++machine->ciw_count;
10186 return GEN_INT (machine->ciw_count
10187 + strlen (current_function_name ())/8 + 5);
10190 static char unicosmk_section_buf[100];
10192 char *
10193 unicosmk_text_section (void)
10195 static int count = 0;
10196 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10197 count++);
10198 return unicosmk_section_buf;
10201 char *
10202 unicosmk_data_section (void)
10204 static int count = 1;
10205 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10206 count++);
10207 return unicosmk_section_buf;
10210 /* The Cray assembler doesn't accept extern declarations for symbols which
10211 are defined in the same file. We have to keep track of all global
10212 symbols which are referenced and/or defined in a source file and output
10213 extern declarations for those which are referenced but not defined at
10214 the end of file. */
10216 /* List of identifiers for which an extern declaration might have to be
10217 emitted. */
10218 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10220 struct unicosmk_extern_list
10222 struct unicosmk_extern_list *next;
10223 const char *name;
10226 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10228 /* Output extern declarations which are required for every asm file. */
10230 static void
10231 unicosmk_output_default_externs (FILE *file)
10233 static const char *const externs[] =
10234 { "__T3E_MISMATCH" };
10236 int i;
10237 int n;
10239 n = ARRAY_SIZE (externs);
10241 for (i = 0; i < n; i++)
10242 fprintf (file, "\t.extern\t%s\n", externs[i]);
10245 /* Output extern declarations for global symbols which are have been
10246 referenced but not defined. */
10248 static void
10249 unicosmk_output_externs (FILE *file)
10251 struct unicosmk_extern_list *p;
10252 const char *real_name;
10253 int len;
10254 tree name_tree;
10256 len = strlen (user_label_prefix);
10257 for (p = unicosmk_extern_head; p != 0; p = p->next)
10259 /* We have to strip the encoding and possibly remove user_label_prefix
10260 from the identifier in order to handle -fleading-underscore and
10261 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10262 real_name = default_strip_name_encoding (p->name);
10263 if (len && p->name[0] == '*'
10264 && !memcmp (real_name, user_label_prefix, len))
10265 real_name += len;
10267 name_tree = get_identifier (real_name);
10268 if (! TREE_ASM_WRITTEN (name_tree))
10270 TREE_ASM_WRITTEN (name_tree) = 1;
10271 fputs ("\t.extern\t", file);
10272 assemble_name (file, p->name);
10273 putc ('\n', file);
10278 /* Record an extern. */
10280 void
10281 unicosmk_add_extern (const char *name)
10283 struct unicosmk_extern_list *p;
10285 p = (struct unicosmk_extern_list *)
10286 xmalloc (sizeof (struct unicosmk_extern_list));
10287 p->next = unicosmk_extern_head;
10288 p->name = name;
10289 unicosmk_extern_head = p;
10292 /* The Cray assembler generates incorrect code if identifiers which
10293 conflict with register names are used as instruction operands. We have
10294 to replace such identifiers with DEX expressions. */
10296 /* Structure to collect identifiers which have been replaced by DEX
10297 expressions. */
10298 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10300 struct unicosmk_dex {
10301 struct unicosmk_dex *next;
10302 const char *name;
10305 /* List of identifiers which have been replaced by DEX expressions. The DEX
10306 number is determined by the position in the list. */
10308 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10310 /* The number of elements in the DEX list. */
10312 static int unicosmk_dex_count = 0;
10314 /* Check if NAME must be replaced by a DEX expression. */
10316 static int
10317 unicosmk_special_name (const char *name)
10319 if (name[0] == '*')
10320 ++name;
10322 if (name[0] == '$')
10323 ++name;
10325 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10326 return 0;
10328 switch (name[1])
10330 case '1': case '2':
10331 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10333 case '3':
10334 return (name[2] == '\0'
10335 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10337 default:
10338 return (ISDIGIT (name[1]) && name[2] == '\0');
10342 /* Return the DEX number if X must be replaced by a DEX expression and 0
10343 otherwise. */
10345 static int
10346 unicosmk_need_dex (rtx x)
10348 struct unicosmk_dex *dex;
10349 const char *name;
10350 int i;
10352 if (GET_CODE (x) != SYMBOL_REF)
10353 return 0;
10355 name = XSTR (x,0);
10356 if (! unicosmk_special_name (name))
10357 return 0;
10359 i = unicosmk_dex_count;
10360 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10362 if (! strcmp (name, dex->name))
10363 return i;
10364 --i;
10367 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10368 dex->name = name;
10369 dex->next = unicosmk_dex_list;
10370 unicosmk_dex_list = dex;
10372 ++unicosmk_dex_count;
10373 return unicosmk_dex_count;
10376 /* Output the DEX definitions for this file. */
10378 static void
10379 unicosmk_output_dex (FILE *file)
10381 struct unicosmk_dex *dex;
10382 int i;
10384 if (unicosmk_dex_list == NULL)
10385 return;
10387 fprintf (file, "\t.dexstart\n");
10389 i = unicosmk_dex_count;
10390 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10392 fprintf (file, "\tDEX (%d) = ", i);
10393 assemble_name (file, dex->name);
10394 putc ('\n', file);
10395 --i;
10398 fprintf (file, "\t.dexend\n");
10401 /* Output text that to appear at the beginning of an assembler file. */
10403 static void
10404 unicosmk_file_start (void)
10406 int i;
10408 fputs ("\t.ident\t", asm_out_file);
10409 unicosmk_output_module_name (asm_out_file);
10410 fputs ("\n\n", asm_out_file);
10412 /* The Unicos/Mk assembler uses different register names. Instead of trying
10413 to support them, we simply use micro definitions. */
10415 /* CAM has different register names: rN for the integer register N and fN
10416 for the floating-point register N. Instead of trying to use these in
10417 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10418 register. */
10420 for (i = 0; i < 32; ++i)
10421 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10423 for (i = 0; i < 32; ++i)
10424 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10426 putc ('\n', asm_out_file);
10428 /* The .align directive fill unused space with zeroes which does not work
10429 in code sections. We define the macro 'gcc@code@align' which uses nops
10430 instead. Note that it assumes that code sections always have the
10431 biggest possible alignment since . refers to the current offset from
10432 the beginning of the section. */
10434 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10435 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10436 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10437 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10438 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10439 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10440 fputs ("\t.endr\n", asm_out_file);
10441 fputs ("\t.endif\n", asm_out_file);
10442 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10444 /* Output extern declarations which should always be visible. */
10445 unicosmk_output_default_externs (asm_out_file);
10447 /* Open a dummy section. We always need to be inside a section for the
10448 section-switching code to work correctly.
10449 ??? This should be a module id or something like that. I still have to
10450 figure out what the rules for those are. */
10451 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10454 /* Output text to appear at the end of an assembler file. This includes all
10455 pending extern declarations and DEX expressions. */
10457 static void
10458 unicosmk_file_end (void)
10460 fputs ("\t.endp\n\n", asm_out_file);
10462 /* Output all pending externs. */
10464 unicosmk_output_externs (asm_out_file);
10466 /* Output dex definitions used for functions whose names conflict with
10467 register names. */
10469 unicosmk_output_dex (asm_out_file);
10471 fputs ("\t.end\t", asm_out_file);
10472 unicosmk_output_module_name (asm_out_file);
10473 putc ('\n', asm_out_file);
10476 #else
10478 static void
10479 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10482 static void
10483 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10486 static void
10487 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10488 const char * fnname ATTRIBUTE_UNUSED)
10492 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10494 return NULL_RTX;
10497 static int
10498 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10500 return 0;
10503 #endif /* TARGET_ABI_UNICOSMK */
10505 static void
10506 alpha_init_libfuncs (void)
10508 if (TARGET_ABI_UNICOSMK)
10510 /* Prevent gcc from generating calls to __divsi3. */
10511 set_optab_libfunc (sdiv_optab, SImode, 0);
10512 set_optab_libfunc (udiv_optab, SImode, 0);
10514 /* Use the functions provided by the system library
10515 for DImode integer division. */
10516 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10517 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10519 else if (TARGET_ABI_OPEN_VMS)
10521 /* Use the VMS runtime library functions for division and
10522 remainder. */
10523 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10524 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10525 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10526 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10527 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10528 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10529 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10530 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10535 /* Initialize the GCC target structure. */
10536 #if TARGET_ABI_OPEN_VMS
10537 # undef TARGET_ATTRIBUTE_TABLE
10538 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10539 # undef TARGET_SECTION_TYPE_FLAGS
10540 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10541 #endif
10543 #undef TARGET_IN_SMALL_DATA_P
10544 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10546 #if TARGET_ABI_UNICOSMK
10547 # undef TARGET_INSERT_ATTRIBUTES
10548 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10549 # undef TARGET_SECTION_TYPE_FLAGS
10550 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10551 # undef TARGET_ASM_UNIQUE_SECTION
10552 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10553 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10554 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10555 # undef TARGET_ASM_GLOBALIZE_LABEL
10556 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10557 # undef TARGET_MUST_PASS_IN_STACK
10558 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10559 #endif
10561 #undef TARGET_ASM_ALIGNED_HI_OP
10562 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10563 #undef TARGET_ASM_ALIGNED_DI_OP
10564 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10566 /* Default unaligned ops are provided for ELF systems. To get unaligned
10567 data for non-ELF systems, we have to turn off auto alignment. */
10568 #ifndef OBJECT_FORMAT_ELF
10569 #undef TARGET_ASM_UNALIGNED_HI_OP
10570 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10571 #undef TARGET_ASM_UNALIGNED_SI_OP
10572 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10573 #undef TARGET_ASM_UNALIGNED_DI_OP
10574 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10575 #endif
10577 #ifdef OBJECT_FORMAT_ELF
10578 #undef TARGET_ASM_SELECT_RTX_SECTION
10579 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10580 #endif
10582 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10583 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10585 #undef TARGET_INIT_LIBFUNCS
10586 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10588 #if TARGET_ABI_UNICOSMK
10589 #undef TARGET_ASM_FILE_START
10590 #define TARGET_ASM_FILE_START unicosmk_file_start
10591 #undef TARGET_ASM_FILE_END
10592 #define TARGET_ASM_FILE_END unicosmk_file_end
10593 #else
10594 #undef TARGET_ASM_FILE_START
10595 #define TARGET_ASM_FILE_START alpha_file_start
10596 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10597 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10598 #endif
10600 #undef TARGET_SCHED_ADJUST_COST
10601 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10602 #undef TARGET_SCHED_ISSUE_RATE
10603 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10604 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10605 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10606 alpha_multipass_dfa_lookahead
10608 #undef TARGET_HAVE_TLS
10609 #define TARGET_HAVE_TLS HAVE_AS_TLS
10611 #undef TARGET_INIT_BUILTINS
10612 #define TARGET_INIT_BUILTINS alpha_init_builtins
10613 #undef TARGET_EXPAND_BUILTIN
10614 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10615 #undef TARGET_FOLD_BUILTIN
10616 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10618 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10619 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10620 #undef TARGET_CANNOT_COPY_INSN_P
10621 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10622 #undef TARGET_CANNOT_FORCE_CONST_MEM
10623 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10625 #if TARGET_ABI_OSF
10626 #undef TARGET_ASM_OUTPUT_MI_THUNK
10627 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10628 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10629 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10630 #undef TARGET_STDARG_OPTIMIZE_HOOK
10631 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10632 #endif
10634 #undef TARGET_RTX_COSTS
10635 #define TARGET_RTX_COSTS alpha_rtx_costs
10636 #undef TARGET_ADDRESS_COST
10637 #define TARGET_ADDRESS_COST hook_int_rtx_0
10639 #undef TARGET_MACHINE_DEPENDENT_REORG
10640 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10642 #undef TARGET_PROMOTE_FUNCTION_ARGS
10643 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10644 #undef TARGET_PROMOTE_FUNCTION_RETURN
10645 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10646 #undef TARGET_PROMOTE_PROTOTYPES
10647 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10648 #undef TARGET_RETURN_IN_MEMORY
10649 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10650 #undef TARGET_PASS_BY_REFERENCE
10651 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10652 #undef TARGET_SETUP_INCOMING_VARARGS
10653 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10654 #undef TARGET_STRICT_ARGUMENT_NAMING
10655 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10656 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10657 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10658 #undef TARGET_SPLIT_COMPLEX_ARG
10659 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10660 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10661 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10662 #undef TARGET_ARG_PARTIAL_BYTES
10663 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10665 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10666 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10667 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10668 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10670 #undef TARGET_BUILD_BUILTIN_VA_LIST
10671 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10673 /* The Alpha architecture does not require sequential consistency. See
10674 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10675 for an example of how it can be violated in practice. */
10676 #undef TARGET_RELAXED_ORDERING
10677 #define TARGET_RELAXED_ORDERING true
10679 #undef TARGET_DEFAULT_TARGET_FLAGS
10680 #define TARGET_DEFAULT_TARGET_FLAGS \
10681 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10682 #undef TARGET_HANDLE_OPTION
10683 #define TARGET_HANDLE_OPTION alpha_handle_option
10685 struct gcc_target targetm = TARGET_INITIALIZER;
10688 #include "gt-alpha.h"