* Mainline merge as of 2006-02-16 (@111136).
[official-gcc.git] / gcc / config / alpha / alpha.c
blob909035a269400562850a20b08d51ea55ea1524fc
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
65 static const char * const alpha_cpu_name[] =
67 "ev4", "ev5", "ev6"
70 /* Specify how accurate floating-point traps need to be. */
72 enum alpha_trap_precision alpha_tp;
74 /* Specify the floating-point rounding mode. */
76 enum alpha_fp_rounding_mode alpha_fprm;
78 /* Specify which things cause traps. */
80 enum alpha_fp_trap_mode alpha_fptm;
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
85 struct alpha_compare alpha_compare;
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency = 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static GTY(()) int alpha_sr_alias_set;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname;
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
119 /* Costs of various operations on the different architectures. */
121 struct alpha_rtx_cost_data
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
196 #define REG_PV 27
197 #define REG_RA 26
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
212 /* Implement TARGET_HANDLE_OPTION. */
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
217 switch (code)
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
235 return true;
238 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
239 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
241 static const char *
242 alpha_mangle_fundamental_type (tree type)
244 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
245 && TARGET_LONG_DOUBLE_128)
246 return "g";
248 /* For all other types, use normal C++ mangling. */
249 return NULL;
251 #endif
253 /* Parse target option strings. */
255 void
256 override_options (void)
258 static const struct cpu_table {
259 const char *const name;
260 const enum processor_type processor;
261 const int flags;
262 } cpu_table[] = {
263 { "ev4", PROCESSOR_EV4, 0 },
264 { "ev45", PROCESSOR_EV4, 0 },
265 { "21064", PROCESSOR_EV4, 0 },
266 { "ev5", PROCESSOR_EV5, 0 },
267 { "21164", PROCESSOR_EV5, 0 },
268 { "ev56", PROCESSOR_EV5, MASK_BWX },
269 { "21164a", PROCESSOR_EV5, MASK_BWX },
270 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
276 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { 0, 0, 0 }
280 int i;
282 /* Unicos/Mk doesn't have shared libraries. */
283 if (TARGET_ABI_UNICOSMK && flag_pic)
285 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
286 (flag_pic > 1) ? "PIC" : "pic");
287 flag_pic = 0;
290 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
291 floating-point instructions. Make that the default for this target. */
292 if (TARGET_ABI_UNICOSMK)
293 alpha_fprm = ALPHA_FPRM_DYN;
294 else
295 alpha_fprm = ALPHA_FPRM_NORM;
297 alpha_tp = ALPHA_TP_PROG;
298 alpha_fptm = ALPHA_FPTM_N;
300 /* We cannot use su and sui qualifiers for conversion instructions on
301 Unicos/Mk. I'm not sure if this is due to assembler or hardware
302 limitations. Right now, we issue a warning if -mieee is specified
303 and then ignore it; eventually, we should either get it right or
304 disable the option altogether. */
306 if (TARGET_IEEE)
308 if (TARGET_ABI_UNICOSMK)
309 warning (0, "-mieee not supported on Unicos/Mk");
310 else
312 alpha_tp = ALPHA_TP_INSN;
313 alpha_fptm = ALPHA_FPTM_SU;
317 if (TARGET_IEEE_WITH_INEXACT)
319 if (TARGET_ABI_UNICOSMK)
320 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
321 else
323 alpha_tp = ALPHA_TP_INSN;
324 alpha_fptm = ALPHA_FPTM_SUI;
328 if (alpha_tp_string)
330 if (! strcmp (alpha_tp_string, "p"))
331 alpha_tp = ALPHA_TP_PROG;
332 else if (! strcmp (alpha_tp_string, "f"))
333 alpha_tp = ALPHA_TP_FUNC;
334 else if (! strcmp (alpha_tp_string, "i"))
335 alpha_tp = ALPHA_TP_INSN;
336 else
337 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
340 if (alpha_fprm_string)
342 if (! strcmp (alpha_fprm_string, "n"))
343 alpha_fprm = ALPHA_FPRM_NORM;
344 else if (! strcmp (alpha_fprm_string, "m"))
345 alpha_fprm = ALPHA_FPRM_MINF;
346 else if (! strcmp (alpha_fprm_string, "c"))
347 alpha_fprm = ALPHA_FPRM_CHOP;
348 else if (! strcmp (alpha_fprm_string,"d"))
349 alpha_fprm = ALPHA_FPRM_DYN;
350 else
351 error ("bad value %qs for -mfp-rounding-mode switch",
352 alpha_fprm_string);
355 if (alpha_fptm_string)
357 if (strcmp (alpha_fptm_string, "n") == 0)
358 alpha_fptm = ALPHA_FPTM_N;
359 else if (strcmp (alpha_fptm_string, "u") == 0)
360 alpha_fptm = ALPHA_FPTM_U;
361 else if (strcmp (alpha_fptm_string, "su") == 0)
362 alpha_fptm = ALPHA_FPTM_SU;
363 else if (strcmp (alpha_fptm_string, "sui") == 0)
364 alpha_fptm = ALPHA_FPTM_SUI;
365 else
366 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
369 if (alpha_cpu_string)
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 alpha_tune = alpha_cpu = cpu_table [i].processor;
375 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
376 target_flags |= cpu_table [i].flags;
377 break;
379 if (! cpu_table [i].name)
380 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
383 if (alpha_tune_string)
385 for (i = 0; cpu_table [i].name; i++)
386 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 alpha_tune = cpu_table [i].processor;
389 break;
391 if (! cpu_table [i].name)
392 error ("bad value %qs for -mcpu switch", alpha_tune_string);
395 /* Do some sanity checks on the above options. */
397 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 warning (0, "trap mode not supported on Unicos/Mk");
400 alpha_fptm = ALPHA_FPTM_N;
403 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
404 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
406 warning (0, "fp software completion requires -mtrap-precision=i");
407 alpha_tp = ALPHA_TP_INSN;
410 if (alpha_cpu == PROCESSOR_EV6)
412 /* Except for EV6 pass 1 (not released), we always have precise
413 arithmetic traps. Which means we can do software completion
414 without minding trap shadows. */
415 alpha_tp = ALPHA_TP_PROG;
418 if (TARGET_FLOAT_VAX)
420 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 warning (0, "rounding mode not supported for VAX floats");
423 alpha_fprm = ALPHA_FPRM_NORM;
425 if (alpha_fptm == ALPHA_FPTM_SUI)
427 warning (0, "trap mode not supported for VAX floats");
428 alpha_fptm = ALPHA_FPTM_SU;
430 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
431 warning (0, "128-bit long double not supported for VAX floats");
432 target_flags &= ~MASK_LONG_DOUBLE_128;
436 char *end;
437 int lat;
439 if (!alpha_mlat_string)
440 alpha_mlat_string = "L1";
442 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
443 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
446 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
447 && alpha_mlat_string[2] == '\0')
449 static int const cache_latency[][4] =
451 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
452 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
453 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
456 lat = alpha_mlat_string[1] - '0';
457 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
459 warning (0, "L%d cache latency unknown for %s",
460 lat, alpha_cpu_name[alpha_tune]);
461 lat = 3;
463 else
464 lat = cache_latency[alpha_tune][lat-1];
466 else if (! strcmp (alpha_mlat_string, "main"))
468 /* Most current memories have about 370ns latency. This is
469 a reasonable guess for a fast cpu. */
470 lat = 150;
472 else
474 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
475 lat = 3;
478 alpha_memory_latency = lat;
481 /* Default the definition of "small data" to 8 bytes. */
482 if (!g_switch_set)
483 g_switch_value = 8;
485 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
486 if (flag_pic == 1)
487 target_flags |= MASK_SMALL_DATA;
488 else if (flag_pic == 2)
489 target_flags &= ~MASK_SMALL_DATA;
491 /* Align labels and loops for optimal branching. */
492 /* ??? Kludge these by not doing anything if we don't optimize and also if
493 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
494 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 if (align_loops <= 0)
497 align_loops = 16;
498 if (align_jumps <= 0)
499 align_jumps = 16;
501 if (align_functions <= 0)
502 align_functions = 16;
504 /* Acquire a unique set number for our register saves and restores. */
505 alpha_sr_alias_set = new_alias_set ();
507 /* Register variables and functions with the garbage collector. */
509 /* Set up function hooks. */
510 init_machine_status = alpha_init_machine_status;
512 /* Tell the compiler when we're using VAX floating point. */
513 if (TARGET_FLOAT_VAX)
515 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
516 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
517 REAL_MODE_FORMAT (TFmode) = NULL;
521 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
524 zap_mask (HOST_WIDE_INT value)
526 int i;
528 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
529 i++, value >>= 8)
530 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
531 return 0;
533 return 1;
536 /* Return true if OP is valid for a particular TLS relocation.
537 We are already guaranteed that OP is a CONST. */
540 tls_symbolic_operand_1 (rtx op, int size, int unspec)
542 op = XEXP (op, 0);
544 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
545 return 0;
546 op = XVECEXP (op, 0, 0);
548 if (GET_CODE (op) != SYMBOL_REF)
549 return 0;
551 switch (SYMBOL_REF_TLS_MODEL (op))
553 case TLS_MODEL_LOCAL_DYNAMIC:
554 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
555 case TLS_MODEL_INITIAL_EXEC:
556 return unspec == UNSPEC_TPREL && size == 64;
557 case TLS_MODEL_LOCAL_EXEC:
558 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
559 default:
560 gcc_unreachable ();
564 /* Used by aligned_memory_operand and unaligned_memory_operand to
565 resolve what reload is going to do with OP if it's a register. */
568 resolve_reload_operand (rtx op)
570 if (reload_in_progress)
572 rtx tmp = op;
573 if (GET_CODE (tmp) == SUBREG)
574 tmp = SUBREG_REG (tmp);
575 if (GET_CODE (tmp) == REG
576 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
578 op = reg_equiv_memory_loc[REGNO (tmp)];
579 if (op == 0)
580 return 0;
583 return op;
586 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
587 the range defined for C in [I-P]. */
589 bool
590 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
592 switch (c)
594 case 'I':
595 /* An unsigned 8 bit constant. */
596 return (unsigned HOST_WIDE_INT) value < 0x100;
597 case 'J':
598 /* The constant zero. */
599 return value == 0;
600 case 'K':
601 /* A signed 16 bit constant. */
602 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
603 case 'L':
604 /* A shifted signed 16 bit constant appropriate for LDAH. */
605 return ((value & 0xffff) == 0
606 && ((value) >> 31 == -1 || value >> 31 == 0));
607 case 'M':
608 /* A constant that can be AND'ed with using a ZAP insn. */
609 return zap_mask (value);
610 case 'N':
611 /* A complemented unsigned 8 bit constant. */
612 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
613 case 'O':
614 /* A negated unsigned 8 bit constant. */
615 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
616 case 'P':
617 /* The constant 1, 2 or 3. */
618 return value == 1 || value == 2 || value == 3;
620 default:
621 return false;
625 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
626 matches for C in [GH]. */
628 bool
629 alpha_const_double_ok_for_letter_p (rtx value, int c)
631 switch (c)
633 case 'G':
634 /* The floating point zero constant. */
635 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
636 && value == CONST0_RTX (GET_MODE (value)));
638 case 'H':
639 /* A valid operand of a ZAP insn. */
640 return (GET_MODE (value) == VOIDmode
641 && zap_mask (CONST_DOUBLE_LOW (value))
642 && zap_mask (CONST_DOUBLE_HIGH (value)));
644 default:
645 return false;
649 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
650 matches for C. */
652 bool
653 alpha_extra_constraint (rtx value, int c)
655 switch (c)
657 case 'Q':
658 return normal_memory_operand (value, VOIDmode);
659 case 'R':
660 return direct_call_operand (value, Pmode);
661 case 'S':
662 return (GET_CODE (value) == CONST_INT
663 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
664 case 'T':
665 return GET_CODE (value) == HIGH;
666 case 'U':
667 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
668 case 'W':
669 return (GET_CODE (value) == CONST_VECTOR
670 && value == CONST0_RTX (GET_MODE (value)));
671 default:
672 return false;
676 /* The scalar modes supported differs from the default check-what-c-supports
677 version in that sometimes TFmode is available even when long double
678 indicates only DFmode. On unicosmk, we have the situation that HImode
679 doesn't map to any C type, but of course we still support that. */
681 static bool
682 alpha_scalar_mode_supported_p (enum machine_mode mode)
684 switch (mode)
686 case QImode:
687 case HImode:
688 case SImode:
689 case DImode:
690 case TImode: /* via optabs.c */
691 return true;
693 case SFmode:
694 case DFmode:
695 return true;
697 case TFmode:
698 return TARGET_HAS_XFLOATING_LIBS;
700 default:
701 return false;
705 /* Alpha implements a couple of integer vector mode operations when
706 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
707 which allows the vectorizer to operate on e.g. move instructions,
708 or when expand_vector_operations can do something useful. */
710 static bool
711 alpha_vector_mode_supported_p (enum machine_mode mode)
713 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
716 /* Return 1 if this function can directly return via $26. */
719 direct_return (void)
721 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
722 && reload_completed
723 && alpha_sa_size () == 0
724 && get_frame_size () == 0
725 && current_function_outgoing_args_size == 0
726 && current_function_pretend_args_size == 0);
729 /* Return the ADDR_VEC associated with a tablejump insn. */
732 alpha_tablejump_addr_vec (rtx insn)
734 rtx tmp;
736 tmp = JUMP_LABEL (insn);
737 if (!tmp)
738 return NULL_RTX;
739 tmp = NEXT_INSN (tmp);
740 if (!tmp)
741 return NULL_RTX;
742 if (GET_CODE (tmp) == JUMP_INSN
743 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
744 return PATTERN (tmp);
745 return NULL_RTX;
748 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
751 alpha_tablejump_best_label (rtx insn)
753 rtx jump_table = alpha_tablejump_addr_vec (insn);
754 rtx best_label = NULL_RTX;
756 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
757 there for edge frequency counts from profile data. */
759 if (jump_table)
761 int n_labels = XVECLEN (jump_table, 1);
762 int best_count = -1;
763 int i, j;
765 for (i = 0; i < n_labels; i++)
767 int count = 1;
769 for (j = i + 1; j < n_labels; j++)
770 if (XEXP (XVECEXP (jump_table, 1, i), 0)
771 == XEXP (XVECEXP (jump_table, 1, j), 0))
772 count++;
774 if (count > best_count)
775 best_count = count, best_label = XVECEXP (jump_table, 1, i);
779 return best_label ? best_label : const0_rtx;
782 /* Return the TLS model to use for SYMBOL. */
784 static enum tls_model
785 tls_symbolic_operand_type (rtx symbol)
787 enum tls_model model;
789 if (GET_CODE (symbol) != SYMBOL_REF)
790 return 0;
791 model = SYMBOL_REF_TLS_MODEL (symbol);
793 /* Local-exec with a 64-bit size is the same code as initial-exec. */
794 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
795 model = TLS_MODEL_INITIAL_EXEC;
797 return model;
800 /* Return true if the function DECL will share the same GP as any
801 function in the current unit of translation. */
803 static bool
804 decl_has_samegp (tree decl)
806 /* Functions that are not local can be overridden, and thus may
807 not share the same gp. */
808 if (!(*targetm.binds_local_p) (decl))
809 return false;
811 /* If -msmall-data is in effect, assume that there is only one GP
812 for the module, and so any local symbol has this property. We
813 need explicit relocations to be able to enforce this for symbols
814 not defined in this unit of translation, however. */
815 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
816 return true;
818 /* Functions that are not external are defined in this UoT. */
819 /* ??? Irritatingly, static functions not yet emitted are still
820 marked "external". Apply this to non-static functions only. */
821 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
824 /* Return true if EXP should be placed in the small data section. */
826 static bool
827 alpha_in_small_data_p (tree exp)
829 /* We want to merge strings, so we never consider them small data. */
830 if (TREE_CODE (exp) == STRING_CST)
831 return false;
833 /* Functions are never in the small data area. Duh. */
834 if (TREE_CODE (exp) == FUNCTION_DECL)
835 return false;
837 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
839 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
840 if (strcmp (section, ".sdata") == 0
841 || strcmp (section, ".sbss") == 0)
842 return true;
844 else
846 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
848 /* If this is an incomplete type with size 0, then we can't put it
849 in sdata because it might be too big when completed. */
850 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
851 return true;
854 return false;
857 #if TARGET_ABI_OPEN_VMS
858 static bool
859 alpha_linkage_symbol_p (const char *symname)
861 int symlen = strlen (symname);
863 if (symlen > 4)
864 return strcmp (&symname [symlen - 4], "..lk") == 0;
866 return false;
869 #define LINKAGE_SYMBOL_REF_P(X) \
870 ((GET_CODE (X) == SYMBOL_REF \
871 && alpha_linkage_symbol_p (XSTR (X, 0))) \
872 || (GET_CODE (X) == CONST \
873 && GET_CODE (XEXP (X, 0)) == PLUS \
874 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
875 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
876 #endif
878 /* legitimate_address_p recognizes an RTL expression that is a valid
879 memory address for an instruction. The MODE argument is the
880 machine mode for the MEM expression that wants to use this address.
882 For Alpha, we have either a constant address or the sum of a
883 register and a constant address, or just a register. For DImode,
884 any of those forms can be surrounded with an AND that clear the
885 low-order three bits; this is an "unaligned" access. */
887 bool
888 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
890 /* If this is an ldq_u type address, discard the outer AND. */
891 if (mode == DImode
892 && GET_CODE (x) == AND
893 && GET_CODE (XEXP (x, 1)) == CONST_INT
894 && INTVAL (XEXP (x, 1)) == -8)
895 x = XEXP (x, 0);
897 /* Discard non-paradoxical subregs. */
898 if (GET_CODE (x) == SUBREG
899 && (GET_MODE_SIZE (GET_MODE (x))
900 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
901 x = SUBREG_REG (x);
903 /* Unadorned general registers are valid. */
904 if (REG_P (x)
905 && (strict
906 ? STRICT_REG_OK_FOR_BASE_P (x)
907 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
908 return true;
910 /* Constant addresses (i.e. +/- 32k) are valid. */
911 if (CONSTANT_ADDRESS_P (x))
912 return true;
914 #if TARGET_ABI_OPEN_VMS
915 if (LINKAGE_SYMBOL_REF_P (x))
916 return true;
917 #endif
919 /* Register plus a small constant offset is valid. */
920 if (GET_CODE (x) == PLUS)
922 rtx ofs = XEXP (x, 1);
923 x = XEXP (x, 0);
925 /* Discard non-paradoxical subregs. */
926 if (GET_CODE (x) == SUBREG
927 && (GET_MODE_SIZE (GET_MODE (x))
928 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
929 x = SUBREG_REG (x);
931 if (REG_P (x))
933 if (! strict
934 && NONSTRICT_REG_OK_FP_BASE_P (x)
935 && GET_CODE (ofs) == CONST_INT)
936 return true;
937 if ((strict
938 ? STRICT_REG_OK_FOR_BASE_P (x)
939 : NONSTRICT_REG_OK_FOR_BASE_P (x))
940 && CONSTANT_ADDRESS_P (ofs))
941 return true;
945 /* If we're managing explicit relocations, LO_SUM is valid, as
946 are small data symbols. */
947 else if (TARGET_EXPLICIT_RELOCS)
949 if (small_symbolic_operand (x, Pmode))
950 return true;
952 if (GET_CODE (x) == LO_SUM)
954 rtx ofs = XEXP (x, 1);
955 x = XEXP (x, 0);
957 /* Discard non-paradoxical subregs. */
958 if (GET_CODE (x) == SUBREG
959 && (GET_MODE_SIZE (GET_MODE (x))
960 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
961 x = SUBREG_REG (x);
963 /* Must have a valid base register. */
964 if (! (REG_P (x)
965 && (strict
966 ? STRICT_REG_OK_FOR_BASE_P (x)
967 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
968 return false;
970 /* The symbol must be local. */
971 if (local_symbolic_operand (ofs, Pmode)
972 || dtp32_symbolic_operand (ofs, Pmode)
973 || tp32_symbolic_operand (ofs, Pmode))
974 return true;
978 return false;
981 /* Build the SYMBOL_REF for __tls_get_addr. */
983 static GTY(()) rtx tls_get_addr_libfunc;
985 static rtx
986 get_tls_get_addr (void)
988 if (!tls_get_addr_libfunc)
989 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
990 return tls_get_addr_libfunc;
993 /* Try machine-dependent ways of modifying an illegitimate address
994 to be legitimate. If we find one, return the new, valid address. */
997 alpha_legitimize_address (rtx x, rtx scratch,
998 enum machine_mode mode ATTRIBUTE_UNUSED)
1000 HOST_WIDE_INT addend;
1002 /* If the address is (plus reg const_int) and the CONST_INT is not a
1003 valid offset, compute the high part of the constant and add it to
1004 the register. Then our address is (plus temp low-part-const). */
1005 if (GET_CODE (x) == PLUS
1006 && GET_CODE (XEXP (x, 0)) == REG
1007 && GET_CODE (XEXP (x, 1)) == CONST_INT
1008 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1010 addend = INTVAL (XEXP (x, 1));
1011 x = XEXP (x, 0);
1012 goto split_addend;
1015 /* If the address is (const (plus FOO const_int)), find the low-order
1016 part of the CONST_INT. Then load FOO plus any high-order part of the
1017 CONST_INT into a register. Our address is (plus reg low-part-const).
1018 This is done to reduce the number of GOT entries. */
1019 if (!no_new_pseudos
1020 && GET_CODE (x) == CONST
1021 && GET_CODE (XEXP (x, 0)) == PLUS
1022 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1024 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1025 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1026 goto split_addend;
1029 /* If we have a (plus reg const), emit the load as in (2), then add
1030 the two registers, and finally generate (plus reg low-part-const) as
1031 our address. */
1032 if (!no_new_pseudos
1033 && GET_CODE (x) == PLUS
1034 && GET_CODE (XEXP (x, 0)) == REG
1035 && GET_CODE (XEXP (x, 1)) == CONST
1036 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1037 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1039 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1040 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1041 XEXP (XEXP (XEXP (x, 1), 0), 0),
1042 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1043 goto split_addend;
1046 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1047 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1049 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1051 switch (tls_symbolic_operand_type (x))
1053 case TLS_MODEL_NONE:
1054 break;
1056 case TLS_MODEL_GLOBAL_DYNAMIC:
1057 start_sequence ();
1059 r0 = gen_rtx_REG (Pmode, 0);
1060 r16 = gen_rtx_REG (Pmode, 16);
1061 tga = get_tls_get_addr ();
1062 dest = gen_reg_rtx (Pmode);
1063 seq = GEN_INT (alpha_next_sequence_number++);
1065 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1066 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1067 insn = emit_call_insn (insn);
1068 CONST_OR_PURE_CALL_P (insn) = 1;
1069 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1071 insn = get_insns ();
1072 end_sequence ();
1074 emit_libcall_block (insn, dest, r0, x);
1075 return dest;
1077 case TLS_MODEL_LOCAL_DYNAMIC:
1078 start_sequence ();
1080 r0 = gen_rtx_REG (Pmode, 0);
1081 r16 = gen_rtx_REG (Pmode, 16);
1082 tga = get_tls_get_addr ();
1083 scratch = gen_reg_rtx (Pmode);
1084 seq = GEN_INT (alpha_next_sequence_number++);
1086 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1087 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1088 insn = emit_call_insn (insn);
1089 CONST_OR_PURE_CALL_P (insn) = 1;
1090 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1092 insn = get_insns ();
1093 end_sequence ();
1095 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1096 UNSPEC_TLSLDM_CALL);
1097 emit_libcall_block (insn, scratch, r0, eqv);
1099 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1100 eqv = gen_rtx_CONST (Pmode, eqv);
1102 if (alpha_tls_size == 64)
1104 dest = gen_reg_rtx (Pmode);
1105 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1106 emit_insn (gen_adddi3 (dest, dest, scratch));
1107 return dest;
1109 if (alpha_tls_size == 32)
1111 insn = gen_rtx_HIGH (Pmode, eqv);
1112 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1113 scratch = gen_reg_rtx (Pmode);
1114 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1116 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1118 case TLS_MODEL_INITIAL_EXEC:
1119 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1120 eqv = gen_rtx_CONST (Pmode, eqv);
1121 tp = gen_reg_rtx (Pmode);
1122 scratch = gen_reg_rtx (Pmode);
1123 dest = gen_reg_rtx (Pmode);
1125 emit_insn (gen_load_tp (tp));
1126 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1127 emit_insn (gen_adddi3 (dest, tp, scratch));
1128 return dest;
1130 case TLS_MODEL_LOCAL_EXEC:
1131 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1132 eqv = gen_rtx_CONST (Pmode, eqv);
1133 tp = gen_reg_rtx (Pmode);
1135 emit_insn (gen_load_tp (tp));
1136 if (alpha_tls_size == 32)
1138 insn = gen_rtx_HIGH (Pmode, eqv);
1139 insn = gen_rtx_PLUS (Pmode, tp, insn);
1140 tp = gen_reg_rtx (Pmode);
1141 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1143 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1145 default:
1146 gcc_unreachable ();
1149 if (local_symbolic_operand (x, Pmode))
1151 if (small_symbolic_operand (x, Pmode))
1152 return x;
1153 else
1155 if (!no_new_pseudos)
1156 scratch = gen_reg_rtx (Pmode);
1157 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1158 gen_rtx_HIGH (Pmode, x)));
1159 return gen_rtx_LO_SUM (Pmode, scratch, x);
1164 return NULL;
1166 split_addend:
1168 HOST_WIDE_INT low, high;
1170 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1171 addend -= low;
1172 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1173 addend -= high;
1175 if (addend)
1176 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1177 (no_new_pseudos ? scratch : NULL_RTX),
1178 1, OPTAB_LIB_WIDEN);
1179 if (high)
1180 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1181 (no_new_pseudos ? scratch : NULL_RTX),
1182 1, OPTAB_LIB_WIDEN);
1184 return plus_constant (x, low);
1188 /* Primarily this is required for TLS symbols, but given that our move
1189 patterns *ought* to be able to handle any symbol at any time, we
1190 should never be spilling symbolic operands to the constant pool, ever. */
1192 static bool
1193 alpha_cannot_force_const_mem (rtx x)
1195 enum rtx_code code = GET_CODE (x);
1196 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1199 /* We do not allow indirect calls to be optimized into sibling calls, nor
1200 can we allow a call to a function with a different GP to be optimized
1201 into a sibcall. */
1203 static bool
1204 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1206 /* Can't do indirect tail calls, since we don't know if the target
1207 uses the same GP. */
1208 if (!decl)
1209 return false;
1211 /* Otherwise, we can make a tail call if the target function shares
1212 the same GP. */
1213 return decl_has_samegp (decl);
1217 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1219 rtx x = *px;
1221 /* Don't re-split. */
1222 if (GET_CODE (x) == LO_SUM)
1223 return -1;
1225 return small_symbolic_operand (x, Pmode) != 0;
1228 static int
1229 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1231 rtx x = *px;
1233 /* Don't re-split. */
1234 if (GET_CODE (x) == LO_SUM)
1235 return -1;
1237 if (small_symbolic_operand (x, Pmode))
1239 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1240 *px = x;
1241 return -1;
1244 return 0;
1248 split_small_symbolic_operand (rtx x)
1250 x = copy_insn (x);
1251 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1252 return x;
1255 /* Indicate that INSN cannot be duplicated. This is true for any insn
1256 that we've marked with gpdisp relocs, since those have to stay in
1257 1-1 correspondence with one another.
1259 Technically we could copy them if we could set up a mapping from one
1260 sequence number to another, across the set of insns to be duplicated.
1261 This seems overly complicated and error-prone since interblock motion
1262 from sched-ebb could move one of the pair of insns to a different block.
1264 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1265 then they'll be in a different block from their ldgp. Which could lead
1266 the bb reorder code to think that it would be ok to copy just the block
1267 containing the call and branch to the block containing the ldgp. */
1269 static bool
1270 alpha_cannot_copy_insn_p (rtx insn)
1272 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1273 return false;
1274 if (recog_memoized (insn) >= 0)
1275 return get_attr_cannot_copy (insn);
1276 else
1277 return false;
1281 /* Try a machine-dependent way of reloading an illegitimate address
1282 operand. If we find one, push the reload and return the new rtx. */
1285 alpha_legitimize_reload_address (rtx x,
1286 enum machine_mode mode ATTRIBUTE_UNUSED,
1287 int opnum, int type,
1288 int ind_levels ATTRIBUTE_UNUSED)
1290 /* We must recognize output that we have already generated ourselves. */
1291 if (GET_CODE (x) == PLUS
1292 && GET_CODE (XEXP (x, 0)) == PLUS
1293 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1294 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1295 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1297 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1298 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1299 opnum, type);
1300 return x;
1303 /* We wish to handle large displacements off a base register by
1304 splitting the addend across an ldah and the mem insn. This
1305 cuts number of extra insns needed from 3 to 1. */
1306 if (GET_CODE (x) == PLUS
1307 && GET_CODE (XEXP (x, 0)) == REG
1308 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1309 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1310 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1312 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1313 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1314 HOST_WIDE_INT high
1315 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1317 /* Check for 32-bit overflow. */
1318 if (high + low != val)
1319 return NULL_RTX;
1321 /* Reload the high part into a base reg; leave the low part
1322 in the mem directly. */
1323 x = gen_rtx_PLUS (GET_MODE (x),
1324 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1325 GEN_INT (high)),
1326 GEN_INT (low));
1328 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1329 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1330 opnum, type);
1331 return x;
1334 return NULL_RTX;
1337 /* Compute a (partial) cost for rtx X. Return true if the complete
1338 cost has been computed, and false if subexpressions should be
1339 scanned. In either case, *TOTAL contains the cost result. */
1341 static bool
1342 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1344 enum machine_mode mode = GET_MODE (x);
1345 bool float_mode_p = FLOAT_MODE_P (mode);
1346 const struct alpha_rtx_cost_data *cost_data;
1348 if (optimize_size)
1349 cost_data = &alpha_rtx_cost_size;
1350 else
1351 cost_data = &alpha_rtx_cost_data[alpha_tune];
1353 switch (code)
1355 case CONST_INT:
1356 /* If this is an 8-bit constant, return zero since it can be used
1357 nearly anywhere with no cost. If it is a valid operand for an
1358 ADD or AND, likewise return 0 if we know it will be used in that
1359 context. Otherwise, return 2 since it might be used there later.
1360 All other constants take at least two insns. */
1361 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1363 *total = 0;
1364 return true;
1366 /* FALLTHRU */
1368 case CONST_DOUBLE:
1369 if (x == CONST0_RTX (mode))
1370 *total = 0;
1371 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1372 || (outer_code == AND && and_operand (x, VOIDmode)))
1373 *total = 0;
1374 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1375 *total = 2;
1376 else
1377 *total = COSTS_N_INSNS (2);
1378 return true;
1380 case CONST:
1381 case SYMBOL_REF:
1382 case LABEL_REF:
1383 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1384 *total = COSTS_N_INSNS (outer_code != MEM);
1385 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1386 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1387 else if (tls_symbolic_operand_type (x))
1388 /* Estimate of cost for call_pal rduniq. */
1389 /* ??? How many insns do we emit here? More than one... */
1390 *total = COSTS_N_INSNS (15);
1391 else
1392 /* Otherwise we do a load from the GOT. */
1393 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1394 return true;
1396 case HIGH:
1397 /* This is effectively an add_operand. */
1398 *total = 2;
1399 return true;
1401 case PLUS:
1402 case MINUS:
1403 if (float_mode_p)
1404 *total = cost_data->fp_add;
1405 else if (GET_CODE (XEXP (x, 0)) == MULT
1406 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1408 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1409 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1410 return true;
1412 return false;
1414 case MULT:
1415 if (float_mode_p)
1416 *total = cost_data->fp_mult;
1417 else if (mode == DImode)
1418 *total = cost_data->int_mult_di;
1419 else
1420 *total = cost_data->int_mult_si;
1421 return false;
1423 case ASHIFT:
1424 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1425 && INTVAL (XEXP (x, 1)) <= 3)
1427 *total = COSTS_N_INSNS (1);
1428 return false;
1430 /* FALLTHRU */
1432 case ASHIFTRT:
1433 case LSHIFTRT:
1434 *total = cost_data->int_shift;
1435 return false;
1437 case IF_THEN_ELSE:
1438 if (float_mode_p)
1439 *total = cost_data->fp_add;
1440 else
1441 *total = cost_data->int_cmov;
1442 return false;
1444 case DIV:
1445 case UDIV:
1446 case MOD:
1447 case UMOD:
1448 if (!float_mode_p)
1449 *total = cost_data->int_div;
1450 else if (mode == SFmode)
1451 *total = cost_data->fp_div_sf;
1452 else
1453 *total = cost_data->fp_div_df;
1454 return false;
1456 case MEM:
1457 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1458 return true;
1460 case NEG:
1461 if (! float_mode_p)
1463 *total = COSTS_N_INSNS (1);
1464 return false;
1466 /* FALLTHRU */
1468 case ABS:
1469 if (! float_mode_p)
1471 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1472 return false;
1474 /* FALLTHRU */
1476 case FLOAT:
1477 case UNSIGNED_FLOAT:
1478 case FIX:
1479 case UNSIGNED_FIX:
1480 case FLOAT_TRUNCATE:
1481 *total = cost_data->fp_add;
1482 return false;
1484 case FLOAT_EXTEND:
1485 if (GET_CODE (XEXP (x, 0)) == MEM)
1486 *total = 0;
1487 else
1488 *total = cost_data->fp_add;
1489 return false;
1491 default:
1492 return false;
1496 /* REF is an alignable memory location. Place an aligned SImode
1497 reference into *PALIGNED_MEM and the number of bits to shift into
1498 *PBITNUM. SCRATCH is a free register for use in reloading out
1499 of range stack slots. */
1501 void
1502 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1504 rtx base;
1505 HOST_WIDE_INT disp, offset;
1507 gcc_assert (GET_CODE (ref) == MEM);
1509 if (reload_in_progress
1510 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1512 base = find_replacement (&XEXP (ref, 0));
1513 gcc_assert (memory_address_p (GET_MODE (ref), base));
1515 else
1516 base = XEXP (ref, 0);
1518 if (GET_CODE (base) == PLUS)
1519 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1520 else
1521 disp = 0;
1523 /* Find the byte offset within an aligned word. If the memory itself is
1524 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1525 will have examined the base register and determined it is aligned, and
1526 thus displacements from it are naturally alignable. */
1527 if (MEM_ALIGN (ref) >= 32)
1528 offset = 0;
1529 else
1530 offset = disp & 3;
1532 /* Access the entire aligned word. */
1533 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1535 /* Convert the byte offset within the word to a bit offset. */
1536 if (WORDS_BIG_ENDIAN)
1537 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1538 else
1539 offset *= 8;
1540 *pbitnum = GEN_INT (offset);
1543 /* Similar, but just get the address. Handle the two reload cases.
1544 Add EXTRA_OFFSET to the address we return. */
1547 get_unaligned_address (rtx ref, int extra_offset)
1549 rtx base;
1550 HOST_WIDE_INT offset = 0;
1552 gcc_assert (GET_CODE (ref) == MEM);
1554 if (reload_in_progress
1555 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1557 base = find_replacement (&XEXP (ref, 0));
1559 gcc_assert (memory_address_p (GET_MODE (ref), base));
1561 else
1562 base = XEXP (ref, 0);
1564 if (GET_CODE (base) == PLUS)
1565 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1567 return plus_constant (base, offset + extra_offset);
1570 /* On the Alpha, all (non-symbolic) constants except zero go into
1571 a floating-point register via memory. Note that we cannot
1572 return anything that is not a subset of CLASS, and that some
1573 symbolic constants cannot be dropped to memory. */
1575 enum reg_class
1576 alpha_preferred_reload_class(rtx x, enum reg_class class)
1578 /* Zero is present in any register class. */
1579 if (x == CONST0_RTX (GET_MODE (x)))
1580 return class;
1582 /* These sorts of constants we can easily drop to memory. */
1583 if (GET_CODE (x) == CONST_INT
1584 || GET_CODE (x) == CONST_DOUBLE
1585 || GET_CODE (x) == CONST_VECTOR)
1587 if (class == FLOAT_REGS)
1588 return NO_REGS;
1589 if (class == ALL_REGS)
1590 return GENERAL_REGS;
1591 return class;
1594 /* All other kinds of constants should not (and in the case of HIGH
1595 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1596 secondary reload. */
1597 if (CONSTANT_P (x))
1598 return (class == ALL_REGS ? GENERAL_REGS : class);
1600 return class;
1603 /* Loading and storing HImode or QImode values to and from memory
1604 usually requires a scratch register. The exceptions are loading
1605 QImode and HImode from an aligned address to a general register
1606 unless byte instructions are permitted.
1608 We also cannot load an unaligned address or a paradoxical SUBREG
1609 into an FP register.
1611 We also cannot do integral arithmetic into FP regs, as might result
1612 from register elimination into a DImode fp register. */
1614 enum reg_class
1615 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1616 rtx x, int in)
1618 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1620 if (GET_CODE (x) == MEM
1621 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1622 || (GET_CODE (x) == SUBREG
1623 && (GET_CODE (SUBREG_REG (x)) == MEM
1624 || (GET_CODE (SUBREG_REG (x)) == REG
1625 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1627 if (!in || !aligned_memory_operand(x, mode))
1628 return GENERAL_REGS;
1632 if (class == FLOAT_REGS)
1634 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1635 return GENERAL_REGS;
1637 if (GET_CODE (x) == SUBREG
1638 && (GET_MODE_SIZE (GET_MODE (x))
1639 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1640 return GENERAL_REGS;
1642 if (in && INTEGRAL_MODE_P (mode)
1643 && ! (memory_operand (x, mode) || x == const0_rtx))
1644 return GENERAL_REGS;
1647 return NO_REGS;
1650 /* Subfunction of the following function. Update the flags of any MEM
1651 found in part of X. */
1653 static int
1654 alpha_set_memflags_1 (rtx *xp, void *data)
1656 rtx x = *xp, orig = (rtx) data;
1658 if (GET_CODE (x) != MEM)
1659 return 0;
1661 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1662 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1663 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1664 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1665 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1667 /* Sadly, we cannot use alias sets because the extra aliasing
1668 produced by the AND interferes. Given that two-byte quantities
1669 are the only thing we would be able to differentiate anyway,
1670 there does not seem to be any point in convoluting the early
1671 out of the alias check. */
1673 return -1;
1676 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1677 generated to perform a memory operation, look for any MEMs in either
1678 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1679 volatile flags from REF into each of the MEMs found. If REF is not
1680 a MEM, don't do anything. */
1682 void
1683 alpha_set_memflags (rtx insn, rtx ref)
1685 rtx *base_ptr;
1687 if (GET_CODE (ref) != MEM)
1688 return;
1690 /* This is only called from alpha.md, after having had something
1691 generated from one of the insn patterns. So if everything is
1692 zero, the pattern is already up-to-date. */
1693 if (!MEM_VOLATILE_P (ref)
1694 && !MEM_IN_STRUCT_P (ref)
1695 && !MEM_SCALAR_P (ref)
1696 && !MEM_NOTRAP_P (ref)
1697 && !MEM_READONLY_P (ref))
1698 return;
1700 if (INSN_P (insn))
1701 base_ptr = &PATTERN (insn);
1702 else
1703 base_ptr = &insn;
1704 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1707 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1708 int, bool);
1710 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1711 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1712 and return pc_rtx if successful. */
1714 static rtx
1715 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1716 HOST_WIDE_INT c, int n, bool no_output)
1718 HOST_WIDE_INT new;
1719 int i, bits;
1720 /* Use a pseudo if highly optimizing and still generating RTL. */
1721 rtx subtarget
1722 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1723 rtx temp, insn;
1725 /* If this is a sign-extended 32-bit constant, we can do this in at most
1726 three insns, so do it if we have enough insns left. We always have
1727 a sign-extended 32-bit constant when compiling on a narrow machine. */
1729 if (HOST_BITS_PER_WIDE_INT != 64
1730 || c >> 31 == -1 || c >> 31 == 0)
1732 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1733 HOST_WIDE_INT tmp1 = c - low;
1734 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1735 HOST_WIDE_INT extra = 0;
1737 /* If HIGH will be interpreted as negative but the constant is
1738 positive, we must adjust it to do two ldha insns. */
1740 if ((high & 0x8000) != 0 && c >= 0)
1742 extra = 0x4000;
1743 tmp1 -= 0x40000000;
1744 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1747 if (c == low || (low == 0 && extra == 0))
1749 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1750 but that meant that we can't handle INT_MIN on 32-bit machines
1751 (like NT/Alpha), because we recurse indefinitely through
1752 emit_move_insn to gen_movdi. So instead, since we know exactly
1753 what we want, create it explicitly. */
1755 if (no_output)
1756 return pc_rtx;
1757 if (target == NULL)
1758 target = gen_reg_rtx (mode);
1759 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1760 return target;
1762 else if (n >= 2 + (extra != 0))
1764 if (no_output)
1765 return pc_rtx;
1766 if (no_new_pseudos)
1768 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1769 temp = target;
1771 else
1772 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1773 subtarget, mode);
1775 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1776 This means that if we go through expand_binop, we'll try to
1777 generate extensions, etc, which will require new pseudos, which
1778 will fail during some split phases. The SImode add patterns
1779 still exist, but are not named. So build the insns by hand. */
1781 if (extra != 0)
1783 if (! subtarget)
1784 subtarget = gen_reg_rtx (mode);
1785 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1786 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1787 emit_insn (insn);
1788 temp = subtarget;
1791 if (target == NULL)
1792 target = gen_reg_rtx (mode);
1793 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1794 insn = gen_rtx_SET (VOIDmode, target, insn);
1795 emit_insn (insn);
1796 return target;
1800 /* If we couldn't do it that way, try some other methods. But if we have
1801 no instructions left, don't bother. Likewise, if this is SImode and
1802 we can't make pseudos, we can't do anything since the expand_binop
1803 and expand_unop calls will widen and try to make pseudos. */
1805 if (n == 1 || (mode == SImode && no_new_pseudos))
1806 return 0;
1808 /* Next, see if we can load a related constant and then shift and possibly
1809 negate it to get the constant we want. Try this once each increasing
1810 numbers of insns. */
1812 for (i = 1; i < n; i++)
1814 /* First, see if minus some low bits, we've an easy load of
1815 high bits. */
1817 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1818 if (new != 0)
1820 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1821 if (temp)
1823 if (no_output)
1824 return temp;
1825 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1826 target, 0, OPTAB_WIDEN);
1830 /* Next try complementing. */
1831 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1832 if (temp)
1834 if (no_output)
1835 return temp;
1836 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1839 /* Next try to form a constant and do a left shift. We can do this
1840 if some low-order bits are zero; the exact_log2 call below tells
1841 us that information. The bits we are shifting out could be any
1842 value, but here we'll just try the 0- and sign-extended forms of
1843 the constant. To try to increase the chance of having the same
1844 constant in more than one insn, start at the highest number of
1845 bits to shift, but try all possibilities in case a ZAPNOT will
1846 be useful. */
1848 bits = exact_log2 (c & -c);
1849 if (bits > 0)
1850 for (; bits > 0; bits--)
1852 new = c >> bits;
1853 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1854 if (!temp && c < 0)
1856 new = (unsigned HOST_WIDE_INT)c >> bits;
1857 temp = alpha_emit_set_const (subtarget, mode, new,
1858 i, no_output);
1860 if (temp)
1862 if (no_output)
1863 return temp;
1864 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1865 target, 0, OPTAB_WIDEN);
1869 /* Now try high-order zero bits. Here we try the shifted-in bits as
1870 all zero and all ones. Be careful to avoid shifting outside the
1871 mode and to avoid shifting outside the host wide int size. */
1872 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1873 confuse the recursive call and set all of the high 32 bits. */
1875 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1876 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1877 if (bits > 0)
1878 for (; bits > 0; bits--)
1880 new = c << bits;
1881 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1882 if (!temp)
1884 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1885 temp = alpha_emit_set_const (subtarget, mode, new,
1886 i, no_output);
1888 if (temp)
1890 if (no_output)
1891 return temp;
1892 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1893 target, 1, OPTAB_WIDEN);
1897 /* Now try high-order 1 bits. We get that with a sign-extension.
1898 But one bit isn't enough here. Be careful to avoid shifting outside
1899 the mode and to avoid shifting outside the host wide int size. */
1901 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1902 - floor_log2 (~ c) - 2);
1903 if (bits > 0)
1904 for (; bits > 0; bits--)
1906 new = c << bits;
1907 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1908 if (!temp)
1910 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1911 temp = alpha_emit_set_const (subtarget, mode, new,
1912 i, no_output);
1914 if (temp)
1916 if (no_output)
1917 return temp;
1918 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1919 target, 0, OPTAB_WIDEN);
1924 #if HOST_BITS_PER_WIDE_INT == 64
1925 /* Finally, see if can load a value into the target that is the same as the
1926 constant except that all bytes that are 0 are changed to be 0xff. If we
1927 can, then we can do a ZAPNOT to obtain the desired constant. */
1929 new = c;
1930 for (i = 0; i < 64; i += 8)
1931 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1932 new |= (HOST_WIDE_INT) 0xff << i;
1934 /* We are only called for SImode and DImode. If this is SImode, ensure that
1935 we are sign extended to a full word. */
1937 if (mode == SImode)
1938 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1940 if (new != c)
1942 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1943 if (temp)
1945 if (no_output)
1946 return temp;
1947 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1948 target, 0, OPTAB_WIDEN);
1951 #endif
1953 return 0;
1956 /* Try to output insns to set TARGET equal to the constant C if it can be
1957 done in less than N insns. Do all computations in MODE. Returns the place
1958 where the output has been placed if it can be done and the insns have been
1959 emitted. If it would take more than N insns, zero is returned and no
1960 insns and emitted. */
1962 static rtx
1963 alpha_emit_set_const (rtx target, enum machine_mode mode,
1964 HOST_WIDE_INT c, int n, bool no_output)
1966 enum machine_mode orig_mode = mode;
1967 rtx orig_target = target;
1968 rtx result = 0;
1969 int i;
1971 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1972 can't load this constant in one insn, do this in DImode. */
1973 if (no_new_pseudos && mode == SImode
1974 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1976 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1977 if (result)
1978 return result;
1980 target = no_output ? NULL : gen_lowpart (DImode, target);
1981 mode = DImode;
1983 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1985 target = no_output ? NULL : gen_lowpart (DImode, target);
1986 mode = DImode;
1989 /* Try 1 insn, then 2, then up to N. */
1990 for (i = 1; i <= n; i++)
1992 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1993 if (result)
1995 rtx insn, set;
1997 if (no_output)
1998 return result;
2000 insn = get_last_insn ();
2001 set = single_set (insn);
2002 if (! CONSTANT_P (SET_SRC (set)))
2003 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2004 break;
2008 /* Allow for the case where we changed the mode of TARGET. */
2009 if (result)
2011 if (result == target)
2012 result = orig_target;
2013 else if (mode != orig_mode)
2014 result = gen_lowpart (orig_mode, result);
2017 return result;
2020 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2021 fall back to a straight forward decomposition. We do this to avoid
2022 exponential run times encountered when looking for longer sequences
2023 with alpha_emit_set_const. */
2025 static rtx
2026 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2028 HOST_WIDE_INT d1, d2, d3, d4;
2030 /* Decompose the entire word */
2031 #if HOST_BITS_PER_WIDE_INT >= 64
2032 gcc_assert (c2 == -(c1 < 0));
2033 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2034 c1 -= d1;
2035 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2036 c1 = (c1 - d2) >> 32;
2037 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2038 c1 -= d3;
2039 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2040 gcc_assert (c1 == d4);
2041 #else
2042 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2043 c1 -= d1;
2044 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2045 gcc_assert (c1 == d2);
2046 c2 += (d2 < 0);
2047 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2048 c2 -= d3;
2049 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2050 gcc_assert (c2 == d4);
2051 #endif
2053 /* Construct the high word */
2054 if (d4)
2056 emit_move_insn (target, GEN_INT (d4));
2057 if (d3)
2058 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2060 else
2061 emit_move_insn (target, GEN_INT (d3));
2063 /* Shift it into place */
2064 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2066 /* Add in the low bits. */
2067 if (d2)
2068 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2069 if (d1)
2070 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2072 return target;
2075 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2076 the low 64 bits. */
2078 static void
2079 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2081 HOST_WIDE_INT i0, i1;
2083 if (GET_CODE (x) == CONST_VECTOR)
2084 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2087 if (GET_CODE (x) == CONST_INT)
2089 i0 = INTVAL (x);
2090 i1 = -(i0 < 0);
2092 else if (HOST_BITS_PER_WIDE_INT >= 64)
2094 i0 = CONST_DOUBLE_LOW (x);
2095 i1 = -(i0 < 0);
2097 else
2099 i0 = CONST_DOUBLE_LOW (x);
2100 i1 = CONST_DOUBLE_HIGH (x);
2103 *p0 = i0;
2104 *p1 = i1;
2107 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2108 are willing to load the value into a register via a move pattern.
2109 Normally this is all symbolic constants, integral constants that
2110 take three or fewer instructions, and floating-point zero. */
2112 bool
2113 alpha_legitimate_constant_p (rtx x)
2115 enum machine_mode mode = GET_MODE (x);
2116 HOST_WIDE_INT i0, i1;
2118 switch (GET_CODE (x))
2120 case CONST:
2121 case LABEL_REF:
2122 case SYMBOL_REF:
2123 case HIGH:
2124 return true;
2126 case CONST_DOUBLE:
2127 if (x == CONST0_RTX (mode))
2128 return true;
2129 if (FLOAT_MODE_P (mode))
2130 return false;
2131 goto do_integer;
2133 case CONST_VECTOR:
2134 if (x == CONST0_RTX (mode))
2135 return true;
2136 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2137 return false;
2138 if (GET_MODE_SIZE (mode) != 8)
2139 return false;
2140 goto do_integer;
2142 case CONST_INT:
2143 do_integer:
2144 if (TARGET_BUILD_CONSTANTS)
2145 return true;
2146 alpha_extract_integer (x, &i0, &i1);
2147 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2148 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2149 return false;
2151 default:
2152 return false;
2156 /* Operand 1 is known to be a constant, and should require more than one
2157 instruction to load. Emit that multi-part load. */
2159 bool
2160 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2162 HOST_WIDE_INT i0, i1;
2163 rtx temp = NULL_RTX;
2165 alpha_extract_integer (operands[1], &i0, &i1);
2167 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2168 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2170 if (!temp && TARGET_BUILD_CONSTANTS)
2171 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2173 if (temp)
2175 if (!rtx_equal_p (operands[0], temp))
2176 emit_move_insn (operands[0], temp);
2177 return true;
2180 return false;
2183 /* Expand a move instruction; return true if all work is done.
2184 We don't handle non-bwx subword loads here. */
2186 bool
2187 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2189 /* If the output is not a register, the input must be. */
2190 if (GET_CODE (operands[0]) == MEM
2191 && ! reg_or_0_operand (operands[1], mode))
2192 operands[1] = force_reg (mode, operands[1]);
2194 /* Allow legitimize_address to perform some simplifications. */
2195 if (mode == Pmode && symbolic_operand (operands[1], mode))
2197 rtx tmp;
2199 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2200 if (tmp)
2202 if (tmp == operands[0])
2203 return true;
2204 operands[1] = tmp;
2205 return false;
2209 /* Early out for non-constants and valid constants. */
2210 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2211 return false;
2213 /* Split large integers. */
2214 if (GET_CODE (operands[1]) == CONST_INT
2215 || GET_CODE (operands[1]) == CONST_DOUBLE
2216 || GET_CODE (operands[1]) == CONST_VECTOR)
2218 if (alpha_split_const_mov (mode, operands))
2219 return true;
2222 /* Otherwise we've nothing left but to drop the thing to memory. */
2223 operands[1] = force_const_mem (mode, operands[1]);
2224 if (reload_in_progress)
2226 emit_move_insn (operands[0], XEXP (operands[1], 0));
2227 operands[1] = copy_rtx (operands[1]);
2228 XEXP (operands[1], 0) = operands[0];
2230 else
2231 operands[1] = validize_mem (operands[1]);
2232 return false;
2235 /* Expand a non-bwx QImode or HImode move instruction;
2236 return true if all work is done. */
2238 bool
2239 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2241 /* If the output is not a register, the input must be. */
2242 if (GET_CODE (operands[0]) == MEM)
2243 operands[1] = force_reg (mode, operands[1]);
2245 /* Handle four memory cases, unaligned and aligned for either the input
2246 or the output. The only case where we can be called during reload is
2247 for aligned loads; all other cases require temporaries. */
2249 if (GET_CODE (operands[1]) == MEM
2250 || (GET_CODE (operands[1]) == SUBREG
2251 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2252 || (reload_in_progress && GET_CODE (operands[1]) == REG
2253 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2254 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2255 && GET_CODE (SUBREG_REG (operands[1])) == REG
2256 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2258 if (aligned_memory_operand (operands[1], mode))
2260 if (reload_in_progress)
2262 emit_insn ((mode == QImode
2263 ? gen_reload_inqi_help
2264 : gen_reload_inhi_help)
2265 (operands[0], operands[1],
2266 gen_rtx_REG (SImode, REGNO (operands[0]))));
2268 else
2270 rtx aligned_mem, bitnum;
2271 rtx scratch = gen_reg_rtx (SImode);
2272 rtx subtarget;
2273 bool copyout;
2275 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2277 subtarget = operands[0];
2278 if (GET_CODE (subtarget) == REG)
2279 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2280 else
2281 subtarget = gen_reg_rtx (DImode), copyout = true;
2283 emit_insn ((mode == QImode
2284 ? gen_aligned_loadqi
2285 : gen_aligned_loadhi)
2286 (subtarget, aligned_mem, bitnum, scratch));
2288 if (copyout)
2289 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2292 else
2294 /* Don't pass these as parameters since that makes the generated
2295 code depend on parameter evaluation order which will cause
2296 bootstrap failures. */
2298 rtx temp1, temp2, seq, subtarget;
2299 bool copyout;
2301 temp1 = gen_reg_rtx (DImode);
2302 temp2 = gen_reg_rtx (DImode);
2304 subtarget = operands[0];
2305 if (GET_CODE (subtarget) == REG)
2306 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2307 else
2308 subtarget = gen_reg_rtx (DImode), copyout = true;
2310 seq = ((mode == QImode
2311 ? gen_unaligned_loadqi
2312 : gen_unaligned_loadhi)
2313 (subtarget, get_unaligned_address (operands[1], 0),
2314 temp1, temp2));
2315 alpha_set_memflags (seq, operands[1]);
2316 emit_insn (seq);
2318 if (copyout)
2319 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2321 return true;
2324 if (GET_CODE (operands[0]) == MEM
2325 || (GET_CODE (operands[0]) == SUBREG
2326 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2327 || (reload_in_progress && GET_CODE (operands[0]) == REG
2328 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2329 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2330 && GET_CODE (SUBREG_REG (operands[0])) == REG
2331 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2333 if (aligned_memory_operand (operands[0], mode))
2335 rtx aligned_mem, bitnum;
2336 rtx temp1 = gen_reg_rtx (SImode);
2337 rtx temp2 = gen_reg_rtx (SImode);
2339 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2341 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2342 temp1, temp2));
2344 else
2346 rtx temp1 = gen_reg_rtx (DImode);
2347 rtx temp2 = gen_reg_rtx (DImode);
2348 rtx temp3 = gen_reg_rtx (DImode);
2349 rtx seq = ((mode == QImode
2350 ? gen_unaligned_storeqi
2351 : gen_unaligned_storehi)
2352 (get_unaligned_address (operands[0], 0),
2353 operands[1], temp1, temp2, temp3));
2355 alpha_set_memflags (seq, operands[0]);
2356 emit_insn (seq);
2358 return true;
2361 return false;
2364 /* Implement the movmisalign patterns. One of the operands is a memory
2365 that is not naturally aligned. Emit instructions to load it. */
2367 void
2368 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2370 /* Honor misaligned loads, for those we promised to do so. */
2371 if (MEM_P (operands[1]))
2373 rtx tmp;
2375 if (register_operand (operands[0], mode))
2376 tmp = operands[0];
2377 else
2378 tmp = gen_reg_rtx (mode);
2380 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2381 if (tmp != operands[0])
2382 emit_move_insn (operands[0], tmp);
2384 else if (MEM_P (operands[0]))
2386 if (!reg_or_0_operand (operands[1], mode))
2387 operands[1] = force_reg (mode, operands[1]);
2388 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2390 else
2391 gcc_unreachable ();
2394 /* Generate an unsigned DImode to FP conversion. This is the same code
2395 optabs would emit if we didn't have TFmode patterns.
2397 For SFmode, this is the only construction I've found that can pass
2398 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2399 intermediates will work, because you'll get intermediate rounding
2400 that ruins the end result. Some of this could be fixed by turning
2401 on round-to-positive-infinity, but that requires diddling the fpsr,
2402 which kills performance. I tried turning this around and converting
2403 to a negative number, so that I could turn on /m, but either I did
2404 it wrong or there's something else cause I wound up with the exact
2405 same single-bit error. There is a branch-less form of this same code:
2407 srl $16,1,$1
2408 and $16,1,$2
2409 cmplt $16,0,$3
2410 or $1,$2,$2
2411 cmovge $16,$16,$2
2412 itoft $3,$f10
2413 itoft $2,$f11
2414 cvtqs $f11,$f11
2415 adds $f11,$f11,$f0
2416 fcmoveq $f10,$f11,$f0
2418 I'm not using it because it's the same number of instructions as
2419 this branch-full form, and it has more serialized long latency
2420 instructions on the critical path.
2422 For DFmode, we can avoid rounding errors by breaking up the word
2423 into two pieces, converting them separately, and adding them back:
2425 LC0: .long 0,0x5f800000
2427 itoft $16,$f11
2428 lda $2,LC0
2429 cmplt $16,0,$1
2430 cpyse $f11,$f31,$f10
2431 cpyse $f31,$f11,$f11
2432 s4addq $1,$2,$1
2433 lds $f12,0($1)
2434 cvtqt $f10,$f10
2435 cvtqt $f11,$f11
2436 addt $f12,$f10,$f0
2437 addt $f0,$f11,$f0
2439 This doesn't seem to be a clear-cut win over the optabs form.
2440 It probably all depends on the distribution of numbers being
2441 converted -- in the optabs form, all but high-bit-set has a
2442 much lower minimum execution time. */
2444 void
2445 alpha_emit_floatuns (rtx operands[2])
2447 rtx neglab, donelab, i0, i1, f0, in, out;
2448 enum machine_mode mode;
2450 out = operands[0];
2451 in = force_reg (DImode, operands[1]);
2452 mode = GET_MODE (out);
2453 neglab = gen_label_rtx ();
2454 donelab = gen_label_rtx ();
2455 i0 = gen_reg_rtx (DImode);
2456 i1 = gen_reg_rtx (DImode);
2457 f0 = gen_reg_rtx (mode);
2459 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2461 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2462 emit_jump_insn (gen_jump (donelab));
2463 emit_barrier ();
2465 emit_label (neglab);
2467 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2468 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2469 emit_insn (gen_iordi3 (i0, i0, i1));
2470 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2471 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2473 emit_label (donelab);
2476 /* Generate the comparison for a conditional branch. */
2479 alpha_emit_conditional_branch (enum rtx_code code)
2481 enum rtx_code cmp_code, branch_code;
2482 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2483 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2484 rtx tem;
2486 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2488 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2489 op1 = const0_rtx;
2490 alpha_compare.fp_p = 0;
2493 /* The general case: fold the comparison code to the types of compares
2494 that we have, choosing the branch as necessary. */
2495 switch (code)
2497 case EQ: case LE: case LT: case LEU: case LTU:
2498 case UNORDERED:
2499 /* We have these compares: */
2500 cmp_code = code, branch_code = NE;
2501 break;
2503 case NE:
2504 case ORDERED:
2505 /* These must be reversed. */
2506 cmp_code = reverse_condition (code), branch_code = EQ;
2507 break;
2509 case GE: case GT: case GEU: case GTU:
2510 /* For FP, we swap them, for INT, we reverse them. */
2511 if (alpha_compare.fp_p)
2513 cmp_code = swap_condition (code);
2514 branch_code = NE;
2515 tem = op0, op0 = op1, op1 = tem;
2517 else
2519 cmp_code = reverse_condition (code);
2520 branch_code = EQ;
2522 break;
2524 default:
2525 gcc_unreachable ();
2528 if (alpha_compare.fp_p)
2530 cmp_mode = DFmode;
2531 if (flag_unsafe_math_optimizations)
2533 /* When we are not as concerned about non-finite values, and we
2534 are comparing against zero, we can branch directly. */
2535 if (op1 == CONST0_RTX (DFmode))
2536 cmp_code = UNKNOWN, branch_code = code;
2537 else if (op0 == CONST0_RTX (DFmode))
2539 /* Undo the swap we probably did just above. */
2540 tem = op0, op0 = op1, op1 = tem;
2541 branch_code = swap_condition (cmp_code);
2542 cmp_code = UNKNOWN;
2545 else
2547 /* ??? We mark the branch mode to be CCmode to prevent the
2548 compare and branch from being combined, since the compare
2549 insn follows IEEE rules that the branch does not. */
2550 branch_mode = CCmode;
2553 else
2555 cmp_mode = DImode;
2557 /* The following optimizations are only for signed compares. */
2558 if (code != LEU && code != LTU && code != GEU && code != GTU)
2560 /* Whee. Compare and branch against 0 directly. */
2561 if (op1 == const0_rtx)
2562 cmp_code = UNKNOWN, branch_code = code;
2564 /* If the constants doesn't fit into an immediate, but can
2565 be generated by lda/ldah, we adjust the argument and
2566 compare against zero, so we can use beq/bne directly. */
2567 /* ??? Don't do this when comparing against symbols, otherwise
2568 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2569 be declared false out of hand (at least for non-weak). */
2570 else if (GET_CODE (op1) == CONST_INT
2571 && (code == EQ || code == NE)
2572 && !(symbolic_operand (op0, VOIDmode)
2573 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2575 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2577 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2578 && (CONST_OK_FOR_LETTER_P (n, 'K')
2579 || CONST_OK_FOR_LETTER_P (n, 'L')))
2581 cmp_code = PLUS, branch_code = code;
2582 op1 = GEN_INT (n);
2587 if (!reg_or_0_operand (op0, DImode))
2588 op0 = force_reg (DImode, op0);
2589 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2590 op1 = force_reg (DImode, op1);
2593 /* Emit an initial compare instruction, if necessary. */
2594 tem = op0;
2595 if (cmp_code != UNKNOWN)
2597 tem = gen_reg_rtx (cmp_mode);
2598 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2601 /* Zero the operands. */
2602 memset (&alpha_compare, 0, sizeof (alpha_compare));
2604 /* Return the branch comparison. */
2605 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2608 /* Certain simplifications can be done to make invalid setcc operations
2609 valid. Return the final comparison, or NULL if we can't work. */
2612 alpha_emit_setcc (enum rtx_code code)
2614 enum rtx_code cmp_code;
2615 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2616 int fp_p = alpha_compare.fp_p;
2617 rtx tmp;
2619 /* Zero the operands. */
2620 memset (&alpha_compare, 0, sizeof (alpha_compare));
2622 if (fp_p && GET_MODE (op0) == TFmode)
2624 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2625 op1 = const0_rtx;
2626 fp_p = 0;
2629 if (fp_p && !TARGET_FIX)
2630 return NULL_RTX;
2632 /* The general case: fold the comparison code to the types of compares
2633 that we have, choosing the branch as necessary. */
2635 cmp_code = UNKNOWN;
2636 switch (code)
2638 case EQ: case LE: case LT: case LEU: case LTU:
2639 case UNORDERED:
2640 /* We have these compares. */
2641 if (fp_p)
2642 cmp_code = code, code = NE;
2643 break;
2645 case NE:
2646 if (!fp_p && op1 == const0_rtx)
2647 break;
2648 /* FALLTHRU */
2650 case ORDERED:
2651 cmp_code = reverse_condition (code);
2652 code = EQ;
2653 break;
2655 case GE: case GT: case GEU: case GTU:
2656 /* These normally need swapping, but for integer zero we have
2657 special patterns that recognize swapped operands. */
2658 if (!fp_p && op1 == const0_rtx)
2659 break;
2660 code = swap_condition (code);
2661 if (fp_p)
2662 cmp_code = code, code = NE;
2663 tmp = op0, op0 = op1, op1 = tmp;
2664 break;
2666 default:
2667 gcc_unreachable ();
2670 if (!fp_p)
2672 if (!register_operand (op0, DImode))
2673 op0 = force_reg (DImode, op0);
2674 if (!reg_or_8bit_operand (op1, DImode))
2675 op1 = force_reg (DImode, op1);
2678 /* Emit an initial compare instruction, if necessary. */
2679 if (cmp_code != UNKNOWN)
2681 enum machine_mode mode = fp_p ? DFmode : DImode;
2683 tmp = gen_reg_rtx (mode);
2684 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2685 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2687 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2688 op1 = const0_rtx;
2691 /* Return the setcc comparison. */
2692 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2696 /* Rewrite a comparison against zero CMP of the form
2697 (CODE (cc0) (const_int 0)) so it can be written validly in
2698 a conditional move (if_then_else CMP ...).
2699 If both of the operands that set cc0 are nonzero we must emit
2700 an insn to perform the compare (it can't be done within
2701 the conditional move). */
2704 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2706 enum rtx_code code = GET_CODE (cmp);
2707 enum rtx_code cmov_code = NE;
2708 rtx op0 = alpha_compare.op0;
2709 rtx op1 = alpha_compare.op1;
2710 int fp_p = alpha_compare.fp_p;
2711 enum machine_mode cmp_mode
2712 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2713 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2714 enum machine_mode cmov_mode = VOIDmode;
2715 int local_fast_math = flag_unsafe_math_optimizations;
2716 rtx tem;
2718 /* Zero the operands. */
2719 memset (&alpha_compare, 0, sizeof (alpha_compare));
2721 if (fp_p != FLOAT_MODE_P (mode))
2723 enum rtx_code cmp_code;
2725 if (! TARGET_FIX)
2726 return 0;
2728 /* If we have fp<->int register move instructions, do a cmov by
2729 performing the comparison in fp registers, and move the
2730 zero/nonzero value to integer registers, where we can then
2731 use a normal cmov, or vice-versa. */
2733 switch (code)
2735 case EQ: case LE: case LT: case LEU: case LTU:
2736 /* We have these compares. */
2737 cmp_code = code, code = NE;
2738 break;
2740 case NE:
2741 /* This must be reversed. */
2742 cmp_code = EQ, code = EQ;
2743 break;
2745 case GE: case GT: case GEU: case GTU:
2746 /* These normally need swapping, but for integer zero we have
2747 special patterns that recognize swapped operands. */
2748 if (!fp_p && op1 == const0_rtx)
2749 cmp_code = code, code = NE;
2750 else
2752 cmp_code = swap_condition (code);
2753 code = NE;
2754 tem = op0, op0 = op1, op1 = tem;
2756 break;
2758 default:
2759 gcc_unreachable ();
2762 tem = gen_reg_rtx (cmp_op_mode);
2763 emit_insn (gen_rtx_SET (VOIDmode, tem,
2764 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2765 op0, op1)));
2767 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2768 op0 = gen_lowpart (cmp_op_mode, tem);
2769 op1 = CONST0_RTX (cmp_op_mode);
2770 fp_p = !fp_p;
2771 local_fast_math = 1;
2774 /* We may be able to use a conditional move directly.
2775 This avoids emitting spurious compares. */
2776 if (signed_comparison_operator (cmp, VOIDmode)
2777 && (!fp_p || local_fast_math)
2778 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2779 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2781 /* We can't put the comparison inside the conditional move;
2782 emit a compare instruction and put that inside the
2783 conditional move. Make sure we emit only comparisons we have;
2784 swap or reverse as necessary. */
2786 if (no_new_pseudos)
2787 return NULL_RTX;
2789 switch (code)
2791 case EQ: case LE: case LT: case LEU: case LTU:
2792 /* We have these compares: */
2793 break;
2795 case NE:
2796 /* This must be reversed. */
2797 code = reverse_condition (code);
2798 cmov_code = EQ;
2799 break;
2801 case GE: case GT: case GEU: case GTU:
2802 /* These must be swapped. */
2803 if (op1 != CONST0_RTX (cmp_mode))
2805 code = swap_condition (code);
2806 tem = op0, op0 = op1, op1 = tem;
2808 break;
2810 default:
2811 gcc_unreachable ();
2814 if (!fp_p)
2816 if (!reg_or_0_operand (op0, DImode))
2817 op0 = force_reg (DImode, op0);
2818 if (!reg_or_8bit_operand (op1, DImode))
2819 op1 = force_reg (DImode, op1);
2822 /* ??? We mark the branch mode to be CCmode to prevent the compare
2823 and cmov from being combined, since the compare insn follows IEEE
2824 rules that the cmov does not. */
2825 if (fp_p && !local_fast_math)
2826 cmov_mode = CCmode;
2828 tem = gen_reg_rtx (cmp_op_mode);
2829 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2830 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2833 /* Simplify a conditional move of two constants into a setcc with
2834 arithmetic. This is done with a splitter since combine would
2835 just undo the work if done during code generation. It also catches
2836 cases we wouldn't have before cse. */
2839 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2840 rtx t_rtx, rtx f_rtx)
2842 HOST_WIDE_INT t, f, diff;
2843 enum machine_mode mode;
2844 rtx target, subtarget, tmp;
2846 mode = GET_MODE (dest);
2847 t = INTVAL (t_rtx);
2848 f = INTVAL (f_rtx);
2849 diff = t - f;
2851 if (((code == NE || code == EQ) && diff < 0)
2852 || (code == GE || code == GT))
2854 code = reverse_condition (code);
2855 diff = t, t = f, f = diff;
2856 diff = t - f;
2859 subtarget = target = dest;
2860 if (mode != DImode)
2862 target = gen_lowpart (DImode, dest);
2863 if (! no_new_pseudos)
2864 subtarget = gen_reg_rtx (DImode);
2865 else
2866 subtarget = target;
2868 /* Below, we must be careful to use copy_rtx on target and subtarget
2869 in intermediate insns, as they may be a subreg rtx, which may not
2870 be shared. */
2872 if (f == 0 && exact_log2 (diff) > 0
2873 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2874 viable over a longer latency cmove. On EV5, the E0 slot is a
2875 scarce resource, and on EV4 shift has the same latency as a cmove. */
2876 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2878 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2879 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2881 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2882 GEN_INT (exact_log2 (t)));
2883 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2885 else if (f == 0 && t == -1)
2887 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2888 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2890 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2892 else if (diff == 1 || diff == 4 || diff == 8)
2894 rtx add_op;
2896 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2897 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2899 if (diff == 1)
2900 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2901 else
2903 add_op = GEN_INT (f);
2904 if (sext_add_operand (add_op, mode))
2906 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2907 GEN_INT (diff));
2908 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2909 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2911 else
2912 return 0;
2915 else
2916 return 0;
2918 return 1;
2921 /* Look up the function X_floating library function name for the
2922 given operation. */
2924 struct xfloating_op GTY(())
2926 const enum rtx_code code;
2927 const char *const GTY((skip)) osf_func;
2928 const char *const GTY((skip)) vms_func;
2929 rtx libcall;
2932 static GTY(()) struct xfloating_op xfloating_ops[] =
2934 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2935 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2936 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2937 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2938 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2939 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2940 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2941 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2942 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2943 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2944 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2945 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2946 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2947 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2948 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2951 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2953 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2954 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2957 static rtx
2958 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2960 struct xfloating_op *ops = xfloating_ops;
2961 long n = ARRAY_SIZE (xfloating_ops);
2962 long i;
2964 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2966 /* How irritating. Nothing to key off for the main table. */
2967 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2969 ops = vax_cvt_ops;
2970 n = ARRAY_SIZE (vax_cvt_ops);
2973 for (i = 0; i < n; ++i, ++ops)
2974 if (ops->code == code)
2976 rtx func = ops->libcall;
2977 if (!func)
2979 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2980 ? ops->vms_func : ops->osf_func);
2981 ops->libcall = func;
2983 return func;
2986 gcc_unreachable ();
2989 /* Most X_floating operations take the rounding mode as an argument.
2990 Compute that here. */
2992 static int
2993 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2994 enum alpha_fp_rounding_mode round)
2996 int mode;
2998 switch (round)
3000 case ALPHA_FPRM_NORM:
3001 mode = 2;
3002 break;
3003 case ALPHA_FPRM_MINF:
3004 mode = 1;
3005 break;
3006 case ALPHA_FPRM_CHOP:
3007 mode = 0;
3008 break;
3009 case ALPHA_FPRM_DYN:
3010 mode = 4;
3011 break;
3012 default:
3013 gcc_unreachable ();
3015 /* XXX For reference, round to +inf is mode = 3. */
3018 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3019 mode |= 0x10000;
3021 return mode;
3024 /* Emit an X_floating library function call.
3026 Note that these functions do not follow normal calling conventions:
3027 TFmode arguments are passed in two integer registers (as opposed to
3028 indirect); TFmode return values appear in R16+R17.
3030 FUNC is the function to call.
3031 TARGET is where the output belongs.
3032 OPERANDS are the inputs.
3033 NOPERANDS is the count of inputs.
3034 EQUIV is the expression equivalent for the function.
3037 static void
3038 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3039 int noperands, rtx equiv)
3041 rtx usage = NULL_RTX, tmp, reg;
3042 int regno = 16, i;
3044 start_sequence ();
3046 for (i = 0; i < noperands; ++i)
3048 switch (GET_MODE (operands[i]))
3050 case TFmode:
3051 reg = gen_rtx_REG (TFmode, regno);
3052 regno += 2;
3053 break;
3055 case DFmode:
3056 reg = gen_rtx_REG (DFmode, regno + 32);
3057 regno += 1;
3058 break;
3060 case VOIDmode:
3061 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3062 /* FALLTHRU */
3063 case DImode:
3064 reg = gen_rtx_REG (DImode, regno);
3065 regno += 1;
3066 break;
3068 default:
3069 gcc_unreachable ();
3072 emit_move_insn (reg, operands[i]);
3073 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3076 switch (GET_MODE (target))
3078 case TFmode:
3079 reg = gen_rtx_REG (TFmode, 16);
3080 break;
3081 case DFmode:
3082 reg = gen_rtx_REG (DFmode, 32);
3083 break;
3084 case DImode:
3085 reg = gen_rtx_REG (DImode, 0);
3086 break;
3087 default:
3088 gcc_unreachable ();
3091 tmp = gen_rtx_MEM (QImode, func);
3092 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3093 const0_rtx, const0_rtx));
3094 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3095 CONST_OR_PURE_CALL_P (tmp) = 1;
3097 tmp = get_insns ();
3098 end_sequence ();
3100 emit_libcall_block (tmp, target, reg, equiv);
3103 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3105 void
3106 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3108 rtx func;
3109 int mode;
3110 rtx out_operands[3];
3112 func = alpha_lookup_xfloating_lib_func (code);
3113 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3115 out_operands[0] = operands[1];
3116 out_operands[1] = operands[2];
3117 out_operands[2] = GEN_INT (mode);
3118 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3119 gen_rtx_fmt_ee (code, TFmode, operands[1],
3120 operands[2]));
3123 /* Emit an X_floating library function call for a comparison. */
3125 static rtx
3126 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3128 enum rtx_code cmp_code, res_code;
3129 rtx func, out, operands[2];
3131 /* X_floating library comparison functions return
3132 -1 unordered
3133 0 false
3134 1 true
3135 Convert the compare against the raw return value. */
3137 cmp_code = *pcode;
3138 switch (cmp_code)
3140 case UNORDERED:
3141 cmp_code = EQ;
3142 res_code = LT;
3143 break;
3144 case ORDERED:
3145 cmp_code = EQ;
3146 res_code = GE;
3147 break;
3148 case NE:
3149 res_code = NE;
3150 break;
3151 case EQ:
3152 case LT:
3153 case GT:
3154 case LE:
3155 case GE:
3156 res_code = GT;
3157 break;
3158 default:
3159 gcc_unreachable ();
3161 *pcode = res_code;
3163 func = alpha_lookup_xfloating_lib_func (cmp_code);
3165 operands[0] = op0;
3166 operands[1] = op1;
3167 out = gen_reg_rtx (DImode);
3169 /* ??? Strange mode for equiv because what's actually returned
3170 is -1,0,1, not a proper boolean value. */
3171 alpha_emit_xfloating_libcall (func, out, operands, 2,
3172 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3174 return out;
3177 /* Emit an X_floating library function call for a conversion. */
3179 void
3180 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3182 int noperands = 1, mode;
3183 rtx out_operands[2];
3184 rtx func;
3185 enum rtx_code code = orig_code;
3187 if (code == UNSIGNED_FIX)
3188 code = FIX;
3190 func = alpha_lookup_xfloating_lib_func (code);
3192 out_operands[0] = operands[1];
3194 switch (code)
3196 case FIX:
3197 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3198 out_operands[1] = GEN_INT (mode);
3199 noperands = 2;
3200 break;
3201 case FLOAT_TRUNCATE:
3202 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3203 out_operands[1] = GEN_INT (mode);
3204 noperands = 2;
3205 break;
3206 default:
3207 break;
3210 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3211 gen_rtx_fmt_e (orig_code,
3212 GET_MODE (operands[0]),
3213 operands[1]));
3216 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3217 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3218 guarantee that the sequence
3219 set (OP[0] OP[2])
3220 set (OP[1] OP[3])
3221 is valid. Naturally, output operand ordering is little-endian.
3222 This is used by *movtf_internal and *movti_internal. */
3224 void
3225 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3226 bool fixup_overlap)
3228 switch (GET_CODE (operands[1]))
3230 case REG:
3231 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3232 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3233 break;
3235 case MEM:
3236 operands[3] = adjust_address (operands[1], DImode, 8);
3237 operands[2] = adjust_address (operands[1], DImode, 0);
3238 break;
3240 case CONST_INT:
3241 case CONST_DOUBLE:
3242 gcc_assert (operands[1] == CONST0_RTX (mode));
3243 operands[2] = operands[3] = const0_rtx;
3244 break;
3246 default:
3247 gcc_unreachable ();
3250 switch (GET_CODE (operands[0]))
3252 case REG:
3253 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3254 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3255 break;
3257 case MEM:
3258 operands[1] = adjust_address (operands[0], DImode, 8);
3259 operands[0] = adjust_address (operands[0], DImode, 0);
3260 break;
3262 default:
3263 gcc_unreachable ();
3266 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3268 rtx tmp;
3269 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3270 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3274 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3275 op2 is a register containing the sign bit, operation is the
3276 logical operation to be performed. */
3278 void
3279 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3281 rtx high_bit = operands[2];
3282 rtx scratch;
3283 int move;
3285 alpha_split_tmode_pair (operands, TFmode, false);
3287 /* Detect three flavors of operand overlap. */
3288 move = 1;
3289 if (rtx_equal_p (operands[0], operands[2]))
3290 move = 0;
3291 else if (rtx_equal_p (operands[1], operands[2]))
3293 if (rtx_equal_p (operands[0], high_bit))
3294 move = 2;
3295 else
3296 move = -1;
3299 if (move < 0)
3300 emit_move_insn (operands[0], operands[2]);
3302 /* ??? If the destination overlaps both source tf and high_bit, then
3303 assume source tf is dead in its entirety and use the other half
3304 for a scratch register. Otherwise "scratch" is just the proper
3305 destination register. */
3306 scratch = operands[move < 2 ? 1 : 3];
3308 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3310 if (move > 0)
3312 emit_move_insn (operands[0], operands[2]);
3313 if (move > 1)
3314 emit_move_insn (operands[1], scratch);
3318 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3319 unaligned data:
3321 unsigned: signed:
3322 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3323 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3324 lda r3,X(r11) lda r3,X+2(r11)
3325 extwl r1,r3,r1 extql r1,r3,r1
3326 extwh r2,r3,r2 extqh r2,r3,r2
3327 or r1.r2.r1 or r1,r2,r1
3328 sra r1,48,r1
3330 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3331 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3332 lda r3,X(r11) lda r3,X(r11)
3333 extll r1,r3,r1 extll r1,r3,r1
3334 extlh r2,r3,r2 extlh r2,r3,r2
3335 or r1.r2.r1 addl r1,r2,r1
3337 quad: ldq_u r1,X(r11)
3338 ldq_u r2,X+7(r11)
3339 lda r3,X(r11)
3340 extql r1,r3,r1
3341 extqh r2,r3,r2
3342 or r1.r2.r1
3345 void
3346 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3347 HOST_WIDE_INT ofs, int sign)
3349 rtx meml, memh, addr, extl, exth, tmp, mema;
3350 enum machine_mode mode;
3352 if (TARGET_BWX && size == 2)
3354 meml = adjust_address (mem, QImode, ofs);
3355 memh = adjust_address (mem, QImode, ofs+1);
3356 if (BYTES_BIG_ENDIAN)
3357 tmp = meml, meml = memh, memh = tmp;
3358 extl = gen_reg_rtx (DImode);
3359 exth = gen_reg_rtx (DImode);
3360 emit_insn (gen_zero_extendqidi2 (extl, meml));
3361 emit_insn (gen_zero_extendqidi2 (exth, memh));
3362 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3363 NULL, 1, OPTAB_LIB_WIDEN);
3364 addr = expand_simple_binop (DImode, IOR, extl, exth,
3365 NULL, 1, OPTAB_LIB_WIDEN);
3367 if (sign && GET_MODE (tgt) != HImode)
3369 addr = gen_lowpart (HImode, addr);
3370 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3372 else
3374 if (GET_MODE (tgt) != DImode)
3375 addr = gen_lowpart (GET_MODE (tgt), addr);
3376 emit_move_insn (tgt, addr);
3378 return;
3381 meml = gen_reg_rtx (DImode);
3382 memh = gen_reg_rtx (DImode);
3383 addr = gen_reg_rtx (DImode);
3384 extl = gen_reg_rtx (DImode);
3385 exth = gen_reg_rtx (DImode);
3387 mema = XEXP (mem, 0);
3388 if (GET_CODE (mema) == LO_SUM)
3389 mema = force_reg (Pmode, mema);
3391 /* AND addresses cannot be in any alias set, since they may implicitly
3392 alias surrounding code. Ideally we'd have some alias set that
3393 covered all types except those with alignment 8 or higher. */
3395 tmp = change_address (mem, DImode,
3396 gen_rtx_AND (DImode,
3397 plus_constant (mema, ofs),
3398 GEN_INT (-8)));
3399 set_mem_alias_set (tmp, 0);
3400 emit_move_insn (meml, tmp);
3402 tmp = change_address (mem, DImode,
3403 gen_rtx_AND (DImode,
3404 plus_constant (mema, ofs + size - 1),
3405 GEN_INT (-8)));
3406 set_mem_alias_set (tmp, 0);
3407 emit_move_insn (memh, tmp);
3409 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3411 emit_move_insn (addr, plus_constant (mema, -1));
3413 emit_insn (gen_extqh_be (extl, meml, addr));
3414 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3416 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3417 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3418 addr, 1, OPTAB_WIDEN);
3420 else if (sign && size == 2)
3422 emit_move_insn (addr, plus_constant (mema, ofs+2));
3424 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3425 emit_insn (gen_extqh_le (exth, memh, addr));
3427 /* We must use tgt here for the target. Alpha-vms port fails if we use
3428 addr for the target, because addr is marked as a pointer and combine
3429 knows that pointers are always sign-extended 32 bit values. */
3430 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3431 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3432 addr, 1, OPTAB_WIDEN);
3434 else
3436 if (WORDS_BIG_ENDIAN)
3438 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3439 switch ((int) size)
3441 case 2:
3442 emit_insn (gen_extwh_be (extl, meml, addr));
3443 mode = HImode;
3444 break;
3446 case 4:
3447 emit_insn (gen_extlh_be (extl, meml, addr));
3448 mode = SImode;
3449 break;
3451 case 8:
3452 emit_insn (gen_extqh_be (extl, meml, addr));
3453 mode = DImode;
3454 break;
3456 default:
3457 gcc_unreachable ();
3459 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3461 else
3463 emit_move_insn (addr, plus_constant (mema, ofs));
3464 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3465 switch ((int) size)
3467 case 2:
3468 emit_insn (gen_extwh_le (exth, memh, addr));
3469 mode = HImode;
3470 break;
3472 case 4:
3473 emit_insn (gen_extlh_le (exth, memh, addr));
3474 mode = SImode;
3475 break;
3477 case 8:
3478 emit_insn (gen_extqh_le (exth, memh, addr));
3479 mode = DImode;
3480 break;
3482 default:
3483 gcc_unreachable ();
3487 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3488 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3489 sign, OPTAB_WIDEN);
3492 if (addr != tgt)
3493 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3496 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3498 void
3499 alpha_expand_unaligned_store (rtx dst, rtx src,
3500 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3502 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3504 if (TARGET_BWX && size == 2)
3506 if (src != const0_rtx)
3508 dstl = gen_lowpart (QImode, src);
3509 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3510 NULL, 1, OPTAB_LIB_WIDEN);
3511 dsth = gen_lowpart (QImode, dsth);
3513 else
3514 dstl = dsth = const0_rtx;
3516 meml = adjust_address (dst, QImode, ofs);
3517 memh = adjust_address (dst, QImode, ofs+1);
3518 if (BYTES_BIG_ENDIAN)
3519 addr = meml, meml = memh, memh = addr;
3521 emit_move_insn (meml, dstl);
3522 emit_move_insn (memh, dsth);
3523 return;
3526 dstl = gen_reg_rtx (DImode);
3527 dsth = gen_reg_rtx (DImode);
3528 insl = gen_reg_rtx (DImode);
3529 insh = gen_reg_rtx (DImode);
3531 dsta = XEXP (dst, 0);
3532 if (GET_CODE (dsta) == LO_SUM)
3533 dsta = force_reg (Pmode, dsta);
3535 /* AND addresses cannot be in any alias set, since they may implicitly
3536 alias surrounding code. Ideally we'd have some alias set that
3537 covered all types except those with alignment 8 or higher. */
3539 meml = change_address (dst, DImode,
3540 gen_rtx_AND (DImode,
3541 plus_constant (dsta, ofs),
3542 GEN_INT (-8)));
3543 set_mem_alias_set (meml, 0);
3545 memh = change_address (dst, DImode,
3546 gen_rtx_AND (DImode,
3547 plus_constant (dsta, ofs + size - 1),
3548 GEN_INT (-8)));
3549 set_mem_alias_set (memh, 0);
3551 emit_move_insn (dsth, memh);
3552 emit_move_insn (dstl, meml);
3553 if (WORDS_BIG_ENDIAN)
3555 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3557 if (src != const0_rtx)
3559 switch ((int) size)
3561 case 2:
3562 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3563 break;
3564 case 4:
3565 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3566 break;
3567 case 8:
3568 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3569 break;
3571 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3572 GEN_INT (size*8), addr));
3575 switch ((int) size)
3577 case 2:
3578 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3579 break;
3580 case 4:
3582 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3583 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3584 break;
3586 case 8:
3587 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3588 break;
3591 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3593 else
3595 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3597 if (src != CONST0_RTX (GET_MODE (src)))
3599 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3600 GEN_INT (size*8), addr));
3602 switch ((int) size)
3604 case 2:
3605 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3606 break;
3607 case 4:
3608 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3609 break;
3610 case 8:
3611 emit_insn (gen_insql_le (insl, src, addr));
3612 break;
3616 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3618 switch ((int) size)
3620 case 2:
3621 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3622 break;
3623 case 4:
3625 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3626 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3627 break;
3629 case 8:
3630 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3631 break;
3635 if (src != CONST0_RTX (GET_MODE (src)))
3637 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3638 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3641 if (WORDS_BIG_ENDIAN)
3643 emit_move_insn (meml, dstl);
3644 emit_move_insn (memh, dsth);
3646 else
3648 /* Must store high before low for degenerate case of aligned. */
3649 emit_move_insn (memh, dsth);
3650 emit_move_insn (meml, dstl);
3654 /* The block move code tries to maximize speed by separating loads and
3655 stores at the expense of register pressure: we load all of the data
3656 before we store it back out. There are two secondary effects worth
3657 mentioning, that this speeds copying to/from aligned and unaligned
3658 buffers, and that it makes the code significantly easier to write. */
3660 #define MAX_MOVE_WORDS 8
3662 /* Load an integral number of consecutive unaligned quadwords. */
3664 static void
3665 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3666 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3668 rtx const im8 = GEN_INT (-8);
3669 rtx const i64 = GEN_INT (64);
3670 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3671 rtx sreg, areg, tmp, smema;
3672 HOST_WIDE_INT i;
3674 smema = XEXP (smem, 0);
3675 if (GET_CODE (smema) == LO_SUM)
3676 smema = force_reg (Pmode, smema);
3678 /* Generate all the tmp registers we need. */
3679 for (i = 0; i < words; ++i)
3681 data_regs[i] = out_regs[i];
3682 ext_tmps[i] = gen_reg_rtx (DImode);
3684 data_regs[words] = gen_reg_rtx (DImode);
3686 if (ofs != 0)
3687 smem = adjust_address (smem, GET_MODE (smem), ofs);
3689 /* Load up all of the source data. */
3690 for (i = 0; i < words; ++i)
3692 tmp = change_address (smem, DImode,
3693 gen_rtx_AND (DImode,
3694 plus_constant (smema, 8*i),
3695 im8));
3696 set_mem_alias_set (tmp, 0);
3697 emit_move_insn (data_regs[i], tmp);
3700 tmp = change_address (smem, DImode,
3701 gen_rtx_AND (DImode,
3702 plus_constant (smema, 8*words - 1),
3703 im8));
3704 set_mem_alias_set (tmp, 0);
3705 emit_move_insn (data_regs[words], tmp);
3707 /* Extract the half-word fragments. Unfortunately DEC decided to make
3708 extxh with offset zero a noop instead of zeroing the register, so
3709 we must take care of that edge condition ourselves with cmov. */
3711 sreg = copy_addr_to_reg (smema);
3712 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3713 1, OPTAB_WIDEN);
3714 if (WORDS_BIG_ENDIAN)
3715 emit_move_insn (sreg, plus_constant (sreg, 7));
3716 for (i = 0; i < words; ++i)
3718 if (WORDS_BIG_ENDIAN)
3720 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3721 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3723 else
3725 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3726 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3728 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3729 gen_rtx_IF_THEN_ELSE (DImode,
3730 gen_rtx_EQ (DImode, areg,
3731 const0_rtx),
3732 const0_rtx, ext_tmps[i])));
3735 /* Merge the half-words into whole words. */
3736 for (i = 0; i < words; ++i)
3738 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3739 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3743 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3744 may be NULL to store zeros. */
3746 static void
3747 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3748 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3750 rtx const im8 = GEN_INT (-8);
3751 rtx const i64 = GEN_INT (64);
3752 rtx ins_tmps[MAX_MOVE_WORDS];
3753 rtx st_tmp_1, st_tmp_2, dreg;
3754 rtx st_addr_1, st_addr_2, dmema;
3755 HOST_WIDE_INT i;
3757 dmema = XEXP (dmem, 0);
3758 if (GET_CODE (dmema) == LO_SUM)
3759 dmema = force_reg (Pmode, dmema);
3761 /* Generate all the tmp registers we need. */
3762 if (data_regs != NULL)
3763 for (i = 0; i < words; ++i)
3764 ins_tmps[i] = gen_reg_rtx(DImode);
3765 st_tmp_1 = gen_reg_rtx(DImode);
3766 st_tmp_2 = gen_reg_rtx(DImode);
3768 if (ofs != 0)
3769 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3771 st_addr_2 = change_address (dmem, DImode,
3772 gen_rtx_AND (DImode,
3773 plus_constant (dmema, words*8 - 1),
3774 im8));
3775 set_mem_alias_set (st_addr_2, 0);
3777 st_addr_1 = change_address (dmem, DImode,
3778 gen_rtx_AND (DImode, dmema, im8));
3779 set_mem_alias_set (st_addr_1, 0);
3781 /* Load up the destination end bits. */
3782 emit_move_insn (st_tmp_2, st_addr_2);
3783 emit_move_insn (st_tmp_1, st_addr_1);
3785 /* Shift the input data into place. */
3786 dreg = copy_addr_to_reg (dmema);
3787 if (WORDS_BIG_ENDIAN)
3788 emit_move_insn (dreg, plus_constant (dreg, 7));
3789 if (data_regs != NULL)
3791 for (i = words-1; i >= 0; --i)
3793 if (WORDS_BIG_ENDIAN)
3795 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3796 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3798 else
3800 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3801 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3804 for (i = words-1; i > 0; --i)
3806 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3807 ins_tmps[i-1], ins_tmps[i-1], 1,
3808 OPTAB_WIDEN);
3812 /* Split and merge the ends with the destination data. */
3813 if (WORDS_BIG_ENDIAN)
3815 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3816 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3818 else
3820 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3821 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3824 if (data_regs != NULL)
3826 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3827 st_tmp_2, 1, OPTAB_WIDEN);
3828 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3829 st_tmp_1, 1, OPTAB_WIDEN);
3832 /* Store it all. */
3833 if (WORDS_BIG_ENDIAN)
3834 emit_move_insn (st_addr_1, st_tmp_1);
3835 else
3836 emit_move_insn (st_addr_2, st_tmp_2);
3837 for (i = words-1; i > 0; --i)
3839 rtx tmp = change_address (dmem, DImode,
3840 gen_rtx_AND (DImode,
3841 plus_constant(dmema,
3842 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3843 im8));
3844 set_mem_alias_set (tmp, 0);
3845 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3847 if (WORDS_BIG_ENDIAN)
3848 emit_move_insn (st_addr_2, st_tmp_2);
3849 else
3850 emit_move_insn (st_addr_1, st_tmp_1);
3854 /* Expand string/block move operations.
3856 operands[0] is the pointer to the destination.
3857 operands[1] is the pointer to the source.
3858 operands[2] is the number of bytes to move.
3859 operands[3] is the alignment. */
3862 alpha_expand_block_move (rtx operands[])
3864 rtx bytes_rtx = operands[2];
3865 rtx align_rtx = operands[3];
3866 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3867 HOST_WIDE_INT bytes = orig_bytes;
3868 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3869 HOST_WIDE_INT dst_align = src_align;
3870 rtx orig_src = operands[1];
3871 rtx orig_dst = operands[0];
3872 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3873 rtx tmp;
3874 unsigned int i, words, ofs, nregs = 0;
3876 if (orig_bytes <= 0)
3877 return 1;
3878 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3879 return 0;
3881 /* Look for additional alignment information from recorded register info. */
3883 tmp = XEXP (orig_src, 0);
3884 if (GET_CODE (tmp) == REG)
3885 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3886 else if (GET_CODE (tmp) == PLUS
3887 && GET_CODE (XEXP (tmp, 0)) == REG
3888 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3890 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3891 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3893 if (a > src_align)
3895 if (a >= 64 && c % 8 == 0)
3896 src_align = 64;
3897 else if (a >= 32 && c % 4 == 0)
3898 src_align = 32;
3899 else if (a >= 16 && c % 2 == 0)
3900 src_align = 16;
3904 tmp = XEXP (orig_dst, 0);
3905 if (GET_CODE (tmp) == REG)
3906 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3907 else if (GET_CODE (tmp) == PLUS
3908 && GET_CODE (XEXP (tmp, 0)) == REG
3909 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3911 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3912 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3914 if (a > dst_align)
3916 if (a >= 64 && c % 8 == 0)
3917 dst_align = 64;
3918 else if (a >= 32 && c % 4 == 0)
3919 dst_align = 32;
3920 else if (a >= 16 && c % 2 == 0)
3921 dst_align = 16;
3925 ofs = 0;
3926 if (src_align >= 64 && bytes >= 8)
3928 words = bytes / 8;
3930 for (i = 0; i < words; ++i)
3931 data_regs[nregs + i] = gen_reg_rtx (DImode);
3933 for (i = 0; i < words; ++i)
3934 emit_move_insn (data_regs[nregs + i],
3935 adjust_address (orig_src, DImode, ofs + i * 8));
3937 nregs += words;
3938 bytes -= words * 8;
3939 ofs += words * 8;
3942 if (src_align >= 32 && bytes >= 4)
3944 words = bytes / 4;
3946 for (i = 0; i < words; ++i)
3947 data_regs[nregs + i] = gen_reg_rtx (SImode);
3949 for (i = 0; i < words; ++i)
3950 emit_move_insn (data_regs[nregs + i],
3951 adjust_address (orig_src, SImode, ofs + i * 4));
3953 nregs += words;
3954 bytes -= words * 4;
3955 ofs += words * 4;
3958 if (bytes >= 8)
3960 words = bytes / 8;
3962 for (i = 0; i < words+1; ++i)
3963 data_regs[nregs + i] = gen_reg_rtx (DImode);
3965 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3966 words, ofs);
3968 nregs += words;
3969 bytes -= words * 8;
3970 ofs += words * 8;
3973 if (! TARGET_BWX && bytes >= 4)
3975 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3976 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3977 bytes -= 4;
3978 ofs += 4;
3981 if (bytes >= 2)
3983 if (src_align >= 16)
3985 do {
3986 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3987 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3988 bytes -= 2;
3989 ofs += 2;
3990 } while (bytes >= 2);
3992 else if (! TARGET_BWX)
3994 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3995 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3996 bytes -= 2;
3997 ofs += 2;
4001 while (bytes > 0)
4003 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4004 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4005 bytes -= 1;
4006 ofs += 1;
4009 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4011 /* Now save it back out again. */
4013 i = 0, ofs = 0;
4015 /* Write out the data in whatever chunks reading the source allowed. */
4016 if (dst_align >= 64)
4018 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4020 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4021 data_regs[i]);
4022 ofs += 8;
4023 i++;
4027 if (dst_align >= 32)
4029 /* If the source has remaining DImode regs, write them out in
4030 two pieces. */
4031 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4033 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4034 NULL_RTX, 1, OPTAB_WIDEN);
4036 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4037 gen_lowpart (SImode, data_regs[i]));
4038 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4039 gen_lowpart (SImode, tmp));
4040 ofs += 8;
4041 i++;
4044 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4046 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4047 data_regs[i]);
4048 ofs += 4;
4049 i++;
4053 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4055 /* Write out a remaining block of words using unaligned methods. */
4057 for (words = 1; i + words < nregs; words++)
4058 if (GET_MODE (data_regs[i + words]) != DImode)
4059 break;
4061 if (words == 1)
4062 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4063 else
4064 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4065 words, ofs);
4067 i += words;
4068 ofs += words * 8;
4071 /* Due to the above, this won't be aligned. */
4072 /* ??? If we have more than one of these, consider constructing full
4073 words in registers and using alpha_expand_unaligned_store_words. */
4074 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4076 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4077 ofs += 4;
4078 i++;
4081 if (dst_align >= 16)
4082 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4084 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4085 i++;
4086 ofs += 2;
4088 else
4089 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4091 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4092 i++;
4093 ofs += 2;
4096 /* The remainder must be byte copies. */
4097 while (i < nregs)
4099 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4100 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4101 i++;
4102 ofs += 1;
4105 return 1;
4109 alpha_expand_block_clear (rtx operands[])
4111 rtx bytes_rtx = operands[1];
4112 rtx align_rtx = operands[3];
4113 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4114 HOST_WIDE_INT bytes = orig_bytes;
4115 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4116 HOST_WIDE_INT alignofs = 0;
4117 rtx orig_dst = operands[0];
4118 rtx tmp;
4119 int i, words, ofs = 0;
4121 if (orig_bytes <= 0)
4122 return 1;
4123 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4124 return 0;
4126 /* Look for stricter alignment. */
4127 tmp = XEXP (orig_dst, 0);
4128 if (GET_CODE (tmp) == REG)
4129 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4130 else if (GET_CODE (tmp) == PLUS
4131 && GET_CODE (XEXP (tmp, 0)) == REG
4132 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4134 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4135 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4137 if (a > align)
4139 if (a >= 64)
4140 align = a, alignofs = 8 - c % 8;
4141 else if (a >= 32)
4142 align = a, alignofs = 4 - c % 4;
4143 else if (a >= 16)
4144 align = a, alignofs = 2 - c % 2;
4148 /* Handle an unaligned prefix first. */
4150 if (alignofs > 0)
4152 #if HOST_BITS_PER_WIDE_INT >= 64
4153 /* Given that alignofs is bounded by align, the only time BWX could
4154 generate three stores is for a 7 byte fill. Prefer two individual
4155 stores over a load/mask/store sequence. */
4156 if ((!TARGET_BWX || alignofs == 7)
4157 && align >= 32
4158 && !(alignofs == 4 && bytes >= 4))
4160 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4161 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4162 rtx mem, tmp;
4163 HOST_WIDE_INT mask;
4165 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4166 set_mem_alias_set (mem, 0);
4168 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4169 if (bytes < alignofs)
4171 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4172 ofs += bytes;
4173 bytes = 0;
4175 else
4177 bytes -= alignofs;
4178 ofs += alignofs;
4180 alignofs = 0;
4182 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4183 NULL_RTX, 1, OPTAB_WIDEN);
4185 emit_move_insn (mem, tmp);
4187 #endif
4189 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4191 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4192 bytes -= 1;
4193 ofs += 1;
4194 alignofs -= 1;
4196 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4198 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4199 bytes -= 2;
4200 ofs += 2;
4201 alignofs -= 2;
4203 if (alignofs == 4 && bytes >= 4)
4205 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4206 bytes -= 4;
4207 ofs += 4;
4208 alignofs = 0;
4211 /* If we've not used the extra lead alignment information by now,
4212 we won't be able to. Downgrade align to match what's left over. */
4213 if (alignofs > 0)
4215 alignofs = alignofs & -alignofs;
4216 align = MIN (align, alignofs * BITS_PER_UNIT);
4220 /* Handle a block of contiguous long-words. */
4222 if (align >= 64 && bytes >= 8)
4224 words = bytes / 8;
4226 for (i = 0; i < words; ++i)
4227 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4228 const0_rtx);
4230 bytes -= words * 8;
4231 ofs += words * 8;
4234 /* If the block is large and appropriately aligned, emit a single
4235 store followed by a sequence of stq_u insns. */
4237 if (align >= 32 && bytes > 16)
4239 rtx orig_dsta;
4241 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4242 bytes -= 4;
4243 ofs += 4;
4245 orig_dsta = XEXP (orig_dst, 0);
4246 if (GET_CODE (orig_dsta) == LO_SUM)
4247 orig_dsta = force_reg (Pmode, orig_dsta);
4249 words = bytes / 8;
4250 for (i = 0; i < words; ++i)
4252 rtx mem
4253 = change_address (orig_dst, DImode,
4254 gen_rtx_AND (DImode,
4255 plus_constant (orig_dsta, ofs + i*8),
4256 GEN_INT (-8)));
4257 set_mem_alias_set (mem, 0);
4258 emit_move_insn (mem, const0_rtx);
4261 /* Depending on the alignment, the first stq_u may have overlapped
4262 with the initial stl, which means that the last stq_u didn't
4263 write as much as it would appear. Leave those questionable bytes
4264 unaccounted for. */
4265 bytes -= words * 8 - 4;
4266 ofs += words * 8 - 4;
4269 /* Handle a smaller block of aligned words. */
4271 if ((align >= 64 && bytes == 4)
4272 || (align == 32 && bytes >= 4))
4274 words = bytes / 4;
4276 for (i = 0; i < words; ++i)
4277 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4278 const0_rtx);
4280 bytes -= words * 4;
4281 ofs += words * 4;
4284 /* An unaligned block uses stq_u stores for as many as possible. */
4286 if (bytes >= 8)
4288 words = bytes / 8;
4290 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4292 bytes -= words * 8;
4293 ofs += words * 8;
4296 /* Next clean up any trailing pieces. */
4298 #if HOST_BITS_PER_WIDE_INT >= 64
4299 /* Count the number of bits in BYTES for which aligned stores could
4300 be emitted. */
4301 words = 0;
4302 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4303 if (bytes & i)
4304 words += 1;
4306 /* If we have appropriate alignment (and it wouldn't take too many
4307 instructions otherwise), mask out the bytes we need. */
4308 if (TARGET_BWX ? words > 2 : bytes > 0)
4310 if (align >= 64)
4312 rtx mem, tmp;
4313 HOST_WIDE_INT mask;
4315 mem = adjust_address (orig_dst, DImode, ofs);
4316 set_mem_alias_set (mem, 0);
4318 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4320 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4321 NULL_RTX, 1, OPTAB_WIDEN);
4323 emit_move_insn (mem, tmp);
4324 return 1;
4326 else if (align >= 32 && bytes < 4)
4328 rtx mem, tmp;
4329 HOST_WIDE_INT mask;
4331 mem = adjust_address (orig_dst, SImode, ofs);
4332 set_mem_alias_set (mem, 0);
4334 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4336 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4337 NULL_RTX, 1, OPTAB_WIDEN);
4339 emit_move_insn (mem, tmp);
4340 return 1;
4343 #endif
4345 if (!TARGET_BWX && bytes >= 4)
4347 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4348 bytes -= 4;
4349 ofs += 4;
4352 if (bytes >= 2)
4354 if (align >= 16)
4356 do {
4357 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4358 const0_rtx);
4359 bytes -= 2;
4360 ofs += 2;
4361 } while (bytes >= 2);
4363 else if (! TARGET_BWX)
4365 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4366 bytes -= 2;
4367 ofs += 2;
4371 while (bytes > 0)
4373 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4374 bytes -= 1;
4375 ofs += 1;
4378 return 1;
4381 /* Returns a mask so that zap(x, value) == x & mask. */
4384 alpha_expand_zap_mask (HOST_WIDE_INT value)
4386 rtx result;
4387 int i;
4389 if (HOST_BITS_PER_WIDE_INT >= 64)
4391 HOST_WIDE_INT mask = 0;
4393 for (i = 7; i >= 0; --i)
4395 mask <<= 8;
4396 if (!((value >> i) & 1))
4397 mask |= 0xff;
4400 result = gen_int_mode (mask, DImode);
4402 else
4404 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4406 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4408 for (i = 7; i >= 4; --i)
4410 mask_hi <<= 8;
4411 if (!((value >> i) & 1))
4412 mask_hi |= 0xff;
4415 for (i = 3; i >= 0; --i)
4417 mask_lo <<= 8;
4418 if (!((value >> i) & 1))
4419 mask_lo |= 0xff;
4422 result = immed_double_const (mask_lo, mask_hi, DImode);
4425 return result;
4428 void
4429 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4430 enum machine_mode mode,
4431 rtx op0, rtx op1, rtx op2)
4433 op0 = gen_lowpart (mode, op0);
4435 if (op1 == const0_rtx)
4436 op1 = CONST0_RTX (mode);
4437 else
4438 op1 = gen_lowpart (mode, op1);
4440 if (op2 == const0_rtx)
4441 op2 = CONST0_RTX (mode);
4442 else
4443 op2 = gen_lowpart (mode, op2);
4445 emit_insn ((*gen) (op0, op1, op2));
4448 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4449 COND is true. Mark the jump as unlikely to be taken. */
4451 static void
4452 emit_unlikely_jump (rtx cond, rtx label)
4454 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4455 rtx x;
4457 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4458 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4459 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4462 /* A subroutine of the atomic operation splitters. Emit a load-locked
4463 instruction in MODE. */
4465 static void
4466 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4468 rtx (*fn) (rtx, rtx) = NULL;
4469 if (mode == SImode)
4470 fn = gen_load_locked_si;
4471 else if (mode == DImode)
4472 fn = gen_load_locked_di;
4473 emit_insn (fn (reg, mem));
4476 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4477 instruction in MODE. */
4479 static void
4480 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4482 rtx (*fn) (rtx, rtx, rtx) = NULL;
4483 if (mode == SImode)
4484 fn = gen_store_conditional_si;
4485 else if (mode == DImode)
4486 fn = gen_store_conditional_di;
4487 emit_insn (fn (res, mem, val));
4490 /* A subroutine of the atomic operation splitters. Emit an insxl
4491 instruction in MODE. */
4493 static rtx
4494 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4496 rtx ret = gen_reg_rtx (DImode);
4497 rtx (*fn) (rtx, rtx, rtx);
4499 if (WORDS_BIG_ENDIAN)
4501 if (mode == QImode)
4502 fn = gen_insbl_be;
4503 else
4504 fn = gen_inswl_be;
4506 else
4508 if (mode == QImode)
4509 fn = gen_insbl_le;
4510 else
4511 fn = gen_inswl_le;
4513 emit_insn (fn (ret, op1, op2));
4515 return ret;
4518 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4519 to perform. MEM is the memory on which to operate. VAL is the second
4520 operand of the binary operator. BEFORE and AFTER are optional locations to
4521 return the value of MEM either before of after the operation. SCRATCH is
4522 a scratch register. */
4524 void
4525 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4526 rtx before, rtx after, rtx scratch)
4528 enum machine_mode mode = GET_MODE (mem);
4529 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4531 emit_insn (gen_memory_barrier ());
4533 label = gen_label_rtx ();
4534 emit_label (label);
4535 label = gen_rtx_LABEL_REF (DImode, label);
4537 if (before == NULL)
4538 before = scratch;
4539 emit_load_locked (mode, before, mem);
4541 if (code == NOT)
4542 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4543 else
4544 x = gen_rtx_fmt_ee (code, mode, before, val);
4545 if (after)
4546 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4547 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4549 emit_store_conditional (mode, cond, mem, scratch);
4551 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4552 emit_unlikely_jump (x, label);
4554 emit_insn (gen_memory_barrier ());
4557 /* Expand a compare and swap operation. */
4559 void
4560 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4561 rtx scratch)
4563 enum machine_mode mode = GET_MODE (mem);
4564 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4566 emit_insn (gen_memory_barrier ());
4568 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4569 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4570 emit_label (XEXP (label1, 0));
4572 emit_load_locked (mode, retval, mem);
4574 x = gen_lowpart (DImode, retval);
4575 if (oldval == const0_rtx)
4576 x = gen_rtx_NE (DImode, x, const0_rtx);
4577 else
4579 x = gen_rtx_EQ (DImode, x, oldval);
4580 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4581 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4583 emit_unlikely_jump (x, label2);
4585 emit_move_insn (scratch, newval);
4586 emit_store_conditional (mode, cond, mem, scratch);
4588 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4589 emit_unlikely_jump (x, label1);
4591 emit_insn (gen_memory_barrier ());
4592 emit_label (XEXP (label2, 0));
4595 void
4596 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4598 enum machine_mode mode = GET_MODE (mem);
4599 rtx addr, align, wdst;
4600 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4602 addr = force_reg (DImode, XEXP (mem, 0));
4603 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4604 NULL_RTX, 1, OPTAB_DIRECT);
4606 oldval = convert_modes (DImode, mode, oldval, 1);
4607 newval = emit_insxl (mode, newval, addr);
4609 wdst = gen_reg_rtx (DImode);
4610 if (mode == QImode)
4611 fn5 = gen_sync_compare_and_swapqi_1;
4612 else
4613 fn5 = gen_sync_compare_and_swaphi_1;
4614 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4616 emit_move_insn (dst, gen_lowpart (mode, wdst));
4619 void
4620 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4621 rtx oldval, rtx newval, rtx align,
4622 rtx scratch, rtx cond)
4624 rtx label1, label2, mem, width, mask, x;
4626 mem = gen_rtx_MEM (DImode, align);
4627 MEM_VOLATILE_P (mem) = 1;
4629 emit_insn (gen_memory_barrier ());
4630 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4631 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4632 emit_label (XEXP (label1, 0));
4634 emit_load_locked (DImode, scratch, mem);
4636 width = GEN_INT (GET_MODE_BITSIZE (mode));
4637 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4638 if (WORDS_BIG_ENDIAN)
4639 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4640 else
4641 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4643 if (oldval == const0_rtx)
4644 x = gen_rtx_NE (DImode, dest, const0_rtx);
4645 else
4647 x = gen_rtx_EQ (DImode, dest, oldval);
4648 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4649 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4651 emit_unlikely_jump (x, label2);
4653 if (WORDS_BIG_ENDIAN)
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4655 else
4656 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4657 emit_insn (gen_iordi3 (scratch, scratch, newval));
4659 emit_store_conditional (DImode, scratch, mem, scratch);
4661 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4662 emit_unlikely_jump (x, label1);
4664 emit_insn (gen_memory_barrier ());
4665 emit_label (XEXP (label2, 0));
4668 /* Expand an atomic exchange operation. */
4670 void
4671 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4673 enum machine_mode mode = GET_MODE (mem);
4674 rtx label, x, cond = gen_lowpart (DImode, scratch);
4676 emit_insn (gen_memory_barrier ());
4678 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4679 emit_label (XEXP (label, 0));
4681 emit_load_locked (mode, retval, mem);
4682 emit_move_insn (scratch, val);
4683 emit_store_conditional (mode, cond, mem, scratch);
4685 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4686 emit_unlikely_jump (x, label);
4689 void
4690 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4692 enum machine_mode mode = GET_MODE (mem);
4693 rtx addr, align, wdst;
4694 rtx (*fn4) (rtx, rtx, rtx, rtx);
4696 /* Force the address into a register. */
4697 addr = force_reg (DImode, XEXP (mem, 0));
4699 /* Align it to a multiple of 8. */
4700 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4701 NULL_RTX, 1, OPTAB_DIRECT);
4703 /* Insert val into the correct byte location within the word. */
4704 val = emit_insxl (mode, val, addr);
4706 wdst = gen_reg_rtx (DImode);
4707 if (mode == QImode)
4708 fn4 = gen_sync_lock_test_and_setqi_1;
4709 else
4710 fn4 = gen_sync_lock_test_and_sethi_1;
4711 emit_insn (fn4 (wdst, addr, val, align));
4713 emit_move_insn (dst, gen_lowpart (mode, wdst));
4716 void
4717 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4718 rtx val, rtx align, rtx scratch)
4720 rtx label, mem, width, mask, x;
4722 mem = gen_rtx_MEM (DImode, align);
4723 MEM_VOLATILE_P (mem) = 1;
4725 emit_insn (gen_memory_barrier ());
4726 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4727 emit_label (XEXP (label, 0));
4729 emit_load_locked (DImode, scratch, mem);
4731 width = GEN_INT (GET_MODE_BITSIZE (mode));
4732 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4733 if (WORDS_BIG_ENDIAN)
4735 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4736 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4738 else
4740 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4741 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4743 emit_insn (gen_iordi3 (scratch, scratch, val));
4745 emit_store_conditional (DImode, scratch, mem, scratch);
4747 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4748 emit_unlikely_jump (x, label);
4751 /* Adjust the cost of a scheduling dependency. Return the new cost of
4752 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4754 static int
4755 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4757 enum attr_type insn_type, dep_insn_type;
4759 /* If the dependence is an anti-dependence, there is no cost. For an
4760 output dependence, there is sometimes a cost, but it doesn't seem
4761 worth handling those few cases. */
4762 if (REG_NOTE_KIND (link) != 0)
4763 return cost;
4765 /* If we can't recognize the insns, we can't really do anything. */
4766 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4767 return cost;
4769 insn_type = get_attr_type (insn);
4770 dep_insn_type = get_attr_type (dep_insn);
4772 /* Bring in the user-defined memory latency. */
4773 if (dep_insn_type == TYPE_ILD
4774 || dep_insn_type == TYPE_FLD
4775 || dep_insn_type == TYPE_LDSYM)
4776 cost += alpha_memory_latency-1;
4778 /* Everything else handled in DFA bypasses now. */
4780 return cost;
4783 /* The number of instructions that can be issued per cycle. */
4785 static int
4786 alpha_issue_rate (void)
4788 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4791 /* How many alternative schedules to try. This should be as wide as the
4792 scheduling freedom in the DFA, but no wider. Making this value too
4793 large results extra work for the scheduler.
4795 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4796 alternative schedules. For EV5, we can choose between E0/E1 and
4797 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4799 static int
4800 alpha_multipass_dfa_lookahead (void)
4802 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4805 /* Machine-specific function data. */
4807 struct machine_function GTY(())
4809 /* For unicosmk. */
4810 /* List of call information words for calls from this function. */
4811 struct rtx_def *first_ciw;
4812 struct rtx_def *last_ciw;
4813 int ciw_count;
4815 /* List of deferred case vectors. */
4816 struct rtx_def *addr_list;
4818 /* For OSF. */
4819 const char *some_ld_name;
4821 /* For TARGET_LD_BUGGY_LDGP. */
4822 struct rtx_def *gp_save_rtx;
4825 /* How to allocate a 'struct machine_function'. */
4827 static struct machine_function *
4828 alpha_init_machine_status (void)
4830 return ((struct machine_function *)
4831 ggc_alloc_cleared (sizeof (struct machine_function)));
4834 /* Functions to save and restore alpha_return_addr_rtx. */
4836 /* Start the ball rolling with RETURN_ADDR_RTX. */
4839 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4841 if (count != 0)
4842 return const0_rtx;
4844 return get_hard_reg_initial_val (Pmode, REG_RA);
4847 /* Return or create a memory slot containing the gp value for the current
4848 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4851 alpha_gp_save_rtx (void)
4853 rtx seq, m = cfun->machine->gp_save_rtx;
4855 if (m == NULL)
4857 start_sequence ();
4859 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4860 m = validize_mem (m);
4861 emit_move_insn (m, pic_offset_table_rtx);
4863 seq = get_insns ();
4864 end_sequence ();
4865 emit_insn_after (seq, entry_of_function ());
4867 cfun->machine->gp_save_rtx = m;
4870 return m;
4873 static int
4874 alpha_ra_ever_killed (void)
4876 rtx top;
4878 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4879 return regs_ever_live[REG_RA];
4881 push_topmost_sequence ();
4882 top = get_insns ();
4883 pop_topmost_sequence ();
4885 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4889 /* Return the trap mode suffix applicable to the current
4890 instruction, or NULL. */
4892 static const char *
4893 get_trap_mode_suffix (void)
4895 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4897 switch (s)
4899 case TRAP_SUFFIX_NONE:
4900 return NULL;
4902 case TRAP_SUFFIX_SU:
4903 if (alpha_fptm >= ALPHA_FPTM_SU)
4904 return "su";
4905 return NULL;
4907 case TRAP_SUFFIX_SUI:
4908 if (alpha_fptm >= ALPHA_FPTM_SUI)
4909 return "sui";
4910 return NULL;
4912 case TRAP_SUFFIX_V_SV:
4913 switch (alpha_fptm)
4915 case ALPHA_FPTM_N:
4916 return NULL;
4917 case ALPHA_FPTM_U:
4918 return "v";
4919 case ALPHA_FPTM_SU:
4920 case ALPHA_FPTM_SUI:
4921 return "sv";
4922 default:
4923 gcc_unreachable ();
4926 case TRAP_SUFFIX_V_SV_SVI:
4927 switch (alpha_fptm)
4929 case ALPHA_FPTM_N:
4930 return NULL;
4931 case ALPHA_FPTM_U:
4932 return "v";
4933 case ALPHA_FPTM_SU:
4934 return "sv";
4935 case ALPHA_FPTM_SUI:
4936 return "svi";
4937 default:
4938 gcc_unreachable ();
4940 break;
4942 case TRAP_SUFFIX_U_SU_SUI:
4943 switch (alpha_fptm)
4945 case ALPHA_FPTM_N:
4946 return NULL;
4947 case ALPHA_FPTM_U:
4948 return "u";
4949 case ALPHA_FPTM_SU:
4950 return "su";
4951 case ALPHA_FPTM_SUI:
4952 return "sui";
4953 default:
4954 gcc_unreachable ();
4956 break;
4958 default:
4959 gcc_unreachable ();
4961 gcc_unreachable ();
4964 /* Return the rounding mode suffix applicable to the current
4965 instruction, or NULL. */
4967 static const char *
4968 get_round_mode_suffix (void)
4970 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4972 switch (s)
4974 case ROUND_SUFFIX_NONE:
4975 return NULL;
4976 case ROUND_SUFFIX_NORMAL:
4977 switch (alpha_fprm)
4979 case ALPHA_FPRM_NORM:
4980 return NULL;
4981 case ALPHA_FPRM_MINF:
4982 return "m";
4983 case ALPHA_FPRM_CHOP:
4984 return "c";
4985 case ALPHA_FPRM_DYN:
4986 return "d";
4987 default:
4988 gcc_unreachable ();
4990 break;
4992 case ROUND_SUFFIX_C:
4993 return "c";
4995 default:
4996 gcc_unreachable ();
4998 gcc_unreachable ();
5001 /* Locate some local-dynamic symbol still in use by this function
5002 so that we can print its name in some movdi_er_tlsldm pattern. */
5004 static int
5005 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5007 rtx x = *px;
5009 if (GET_CODE (x) == SYMBOL_REF
5010 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5012 cfun->machine->some_ld_name = XSTR (x, 0);
5013 return 1;
5016 return 0;
5019 static const char *
5020 get_some_local_dynamic_name (void)
5022 rtx insn;
5024 if (cfun->machine->some_ld_name)
5025 return cfun->machine->some_ld_name;
5027 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5028 if (INSN_P (insn)
5029 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5030 return cfun->machine->some_ld_name;
5032 gcc_unreachable ();
5035 /* Print an operand. Recognize special options, documented below. */
5037 void
5038 print_operand (FILE *file, rtx x, int code)
5040 int i;
5042 switch (code)
5044 case '~':
5045 /* Print the assembler name of the current function. */
5046 assemble_name (file, alpha_fnname);
5047 break;
5049 case '&':
5050 assemble_name (file, get_some_local_dynamic_name ());
5051 break;
5053 case '/':
5055 const char *trap = get_trap_mode_suffix ();
5056 const char *round = get_round_mode_suffix ();
5058 if (trap || round)
5059 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5060 (trap ? trap : ""), (round ? round : ""));
5061 break;
5064 case ',':
5065 /* Generates single precision instruction suffix. */
5066 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5067 break;
5069 case '-':
5070 /* Generates double precision instruction suffix. */
5071 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5072 break;
5074 case '+':
5075 /* Generates a nop after a noreturn call at the very end of the
5076 function. */
5077 if (next_real_insn (current_output_insn) == 0)
5078 fprintf (file, "\n\tnop");
5079 break;
5081 case '#':
5082 if (alpha_this_literal_sequence_number == 0)
5083 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5084 fprintf (file, "%d", alpha_this_literal_sequence_number);
5085 break;
5087 case '*':
5088 if (alpha_this_gpdisp_sequence_number == 0)
5089 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5090 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5091 break;
5093 case 'H':
5094 if (GET_CODE (x) == HIGH)
5095 output_addr_const (file, XEXP (x, 0));
5096 else
5097 output_operand_lossage ("invalid %%H value");
5098 break;
5100 case 'J':
5102 const char *lituse;
5104 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5106 x = XVECEXP (x, 0, 0);
5107 lituse = "lituse_tlsgd";
5109 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5111 x = XVECEXP (x, 0, 0);
5112 lituse = "lituse_tlsldm";
5114 else if (GET_CODE (x) == CONST_INT)
5115 lituse = "lituse_jsr";
5116 else
5118 output_operand_lossage ("invalid %%J value");
5119 break;
5122 if (x != const0_rtx)
5123 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5125 break;
5127 case 'j':
5129 const char *lituse;
5131 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5132 lituse = "lituse_jsrdirect";
5133 #else
5134 lituse = "lituse_jsr";
5135 #endif
5137 gcc_assert (INTVAL (x) != 0);
5138 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5140 break;
5141 case 'r':
5142 /* If this operand is the constant zero, write it as "$31". */
5143 if (GET_CODE (x) == REG)
5144 fprintf (file, "%s", reg_names[REGNO (x)]);
5145 else if (x == CONST0_RTX (GET_MODE (x)))
5146 fprintf (file, "$31");
5147 else
5148 output_operand_lossage ("invalid %%r value");
5149 break;
5151 case 'R':
5152 /* Similar, but for floating-point. */
5153 if (GET_CODE (x) == REG)
5154 fprintf (file, "%s", reg_names[REGNO (x)]);
5155 else if (x == CONST0_RTX (GET_MODE (x)))
5156 fprintf (file, "$f31");
5157 else
5158 output_operand_lossage ("invalid %%R value");
5159 break;
5161 case 'N':
5162 /* Write the 1's complement of a constant. */
5163 if (GET_CODE (x) != CONST_INT)
5164 output_operand_lossage ("invalid %%N value");
5166 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5167 break;
5169 case 'P':
5170 /* Write 1 << C, for a constant C. */
5171 if (GET_CODE (x) != CONST_INT)
5172 output_operand_lossage ("invalid %%P value");
5174 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5175 break;
5177 case 'h':
5178 /* Write the high-order 16 bits of a constant, sign-extended. */
5179 if (GET_CODE (x) != CONST_INT)
5180 output_operand_lossage ("invalid %%h value");
5182 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5183 break;
5185 case 'L':
5186 /* Write the low-order 16 bits of a constant, sign-extended. */
5187 if (GET_CODE (x) != CONST_INT)
5188 output_operand_lossage ("invalid %%L value");
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5191 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5192 break;
5194 case 'm':
5195 /* Write mask for ZAP insn. */
5196 if (GET_CODE (x) == CONST_DOUBLE)
5198 HOST_WIDE_INT mask = 0;
5199 HOST_WIDE_INT value;
5201 value = CONST_DOUBLE_LOW (x);
5202 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5203 i++, value >>= 8)
5204 if (value & 0xff)
5205 mask |= (1 << i);
5207 value = CONST_DOUBLE_HIGH (x);
5208 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5209 i++, value >>= 8)
5210 if (value & 0xff)
5211 mask |= (1 << (i + sizeof (int)));
5213 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5216 else if (GET_CODE (x) == CONST_INT)
5218 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5220 for (i = 0; i < 8; i++, value >>= 8)
5221 if (value & 0xff)
5222 mask |= (1 << i);
5224 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5226 else
5227 output_operand_lossage ("invalid %%m value");
5228 break;
5230 case 'M':
5231 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5232 if (GET_CODE (x) != CONST_INT
5233 || (INTVAL (x) != 8 && INTVAL (x) != 16
5234 && INTVAL (x) != 32 && INTVAL (x) != 64))
5235 output_operand_lossage ("invalid %%M value");
5237 fprintf (file, "%s",
5238 (INTVAL (x) == 8 ? "b"
5239 : INTVAL (x) == 16 ? "w"
5240 : INTVAL (x) == 32 ? "l"
5241 : "q"));
5242 break;
5244 case 'U':
5245 /* Similar, except do it from the mask. */
5246 if (GET_CODE (x) == CONST_INT)
5248 HOST_WIDE_INT value = INTVAL (x);
5250 if (value == 0xff)
5252 fputc ('b', file);
5253 break;
5255 if (value == 0xffff)
5257 fputc ('w', file);
5258 break;
5260 if (value == 0xffffffff)
5262 fputc ('l', file);
5263 break;
5265 if (value == -1)
5267 fputc ('q', file);
5268 break;
5271 else if (HOST_BITS_PER_WIDE_INT == 32
5272 && GET_CODE (x) == CONST_DOUBLE
5273 && CONST_DOUBLE_LOW (x) == 0xffffffff
5274 && CONST_DOUBLE_HIGH (x) == 0)
5276 fputc ('l', file);
5277 break;
5279 output_operand_lossage ("invalid %%U value");
5280 break;
5282 case 's':
5283 /* Write the constant value divided by 8 for little-endian mode or
5284 (56 - value) / 8 for big-endian mode. */
5286 if (GET_CODE (x) != CONST_INT
5287 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5288 ? 56
5289 : 64)
5290 || (INTVAL (x) & 7) != 0)
5291 output_operand_lossage ("invalid %%s value");
5293 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5294 WORDS_BIG_ENDIAN
5295 ? (56 - INTVAL (x)) / 8
5296 : INTVAL (x) / 8);
5297 break;
5299 case 'S':
5300 /* Same, except compute (64 - c) / 8 */
5302 if (GET_CODE (x) != CONST_INT
5303 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5304 && (INTVAL (x) & 7) != 8)
5305 output_operand_lossage ("invalid %%s value");
5307 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5308 break;
5310 case 't':
5312 /* On Unicos/Mk systems: use a DEX expression if the symbol
5313 clashes with a register name. */
5314 int dex = unicosmk_need_dex (x);
5315 if (dex)
5316 fprintf (file, "DEX(%d)", dex);
5317 else
5318 output_addr_const (file, x);
5320 break;
5322 case 'C': case 'D': case 'c': case 'd':
5323 /* Write out comparison name. */
5325 enum rtx_code c = GET_CODE (x);
5327 if (!COMPARISON_P (x))
5328 output_operand_lossage ("invalid %%C value");
5330 else if (code == 'D')
5331 c = reverse_condition (c);
5332 else if (code == 'c')
5333 c = swap_condition (c);
5334 else if (code == 'd')
5335 c = swap_condition (reverse_condition (c));
5337 if (c == LEU)
5338 fprintf (file, "ule");
5339 else if (c == LTU)
5340 fprintf (file, "ult");
5341 else if (c == UNORDERED)
5342 fprintf (file, "un");
5343 else
5344 fprintf (file, "%s", GET_RTX_NAME (c));
5346 break;
5348 case 'E':
5349 /* Write the divide or modulus operator. */
5350 switch (GET_CODE (x))
5352 case DIV:
5353 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5354 break;
5355 case UDIV:
5356 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5357 break;
5358 case MOD:
5359 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5360 break;
5361 case UMOD:
5362 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5363 break;
5364 default:
5365 output_operand_lossage ("invalid %%E value");
5366 break;
5368 break;
5370 case 'A':
5371 /* Write "_u" for unaligned access. */
5372 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5373 fprintf (file, "_u");
5374 break;
5376 case 0:
5377 if (GET_CODE (x) == REG)
5378 fprintf (file, "%s", reg_names[REGNO (x)]);
5379 else if (GET_CODE (x) == MEM)
5380 output_address (XEXP (x, 0));
5381 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5383 switch (XINT (XEXP (x, 0), 1))
5385 case UNSPEC_DTPREL:
5386 case UNSPEC_TPREL:
5387 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5388 break;
5389 default:
5390 output_operand_lossage ("unknown relocation unspec");
5391 break;
5394 else
5395 output_addr_const (file, x);
5396 break;
5398 default:
5399 output_operand_lossage ("invalid %%xn code");
5403 void
5404 print_operand_address (FILE *file, rtx addr)
5406 int basereg = 31;
5407 HOST_WIDE_INT offset = 0;
5409 if (GET_CODE (addr) == AND)
5410 addr = XEXP (addr, 0);
5412 if (GET_CODE (addr) == PLUS
5413 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5415 offset = INTVAL (XEXP (addr, 1));
5416 addr = XEXP (addr, 0);
5419 if (GET_CODE (addr) == LO_SUM)
5421 const char *reloc16, *reloclo;
5422 rtx op1 = XEXP (addr, 1);
5424 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5426 op1 = XEXP (op1, 0);
5427 switch (XINT (op1, 1))
5429 case UNSPEC_DTPREL:
5430 reloc16 = NULL;
5431 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5432 break;
5433 case UNSPEC_TPREL:
5434 reloc16 = NULL;
5435 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5436 break;
5437 default:
5438 output_operand_lossage ("unknown relocation unspec");
5439 return;
5442 output_addr_const (file, XVECEXP (op1, 0, 0));
5444 else
5446 reloc16 = "gprel";
5447 reloclo = "gprellow";
5448 output_addr_const (file, op1);
5451 if (offset)
5452 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5454 addr = XEXP (addr, 0);
5455 switch (GET_CODE (addr))
5457 case REG:
5458 basereg = REGNO (addr);
5459 break;
5461 case SUBREG:
5462 basereg = subreg_regno (addr);
5463 break;
5465 default:
5466 gcc_unreachable ();
5469 fprintf (file, "($%d)\t\t!%s", basereg,
5470 (basereg == 29 ? reloc16 : reloclo));
5471 return;
5474 switch (GET_CODE (addr))
5476 case REG:
5477 basereg = REGNO (addr);
5478 break;
5480 case SUBREG:
5481 basereg = subreg_regno (addr);
5482 break;
5484 case CONST_INT:
5485 offset = INTVAL (addr);
5486 break;
5488 #if TARGET_ABI_OPEN_VMS
5489 case SYMBOL_REF:
5490 fprintf (file, "%s", XSTR (addr, 0));
5491 return;
5493 case CONST:
5494 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5495 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5496 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5497 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5498 INTVAL (XEXP (XEXP (addr, 0), 1)));
5499 return;
5501 #endif
5502 default:
5503 gcc_unreachable ();
5506 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5509 /* Emit RTL insns to initialize the variable parts of a trampoline at
5510 TRAMP. FNADDR is an RTX for the address of the function's pure
5511 code. CXT is an RTX for the static chain value for the function.
5513 The three offset parameters are for the individual template's
5514 layout. A JMPOFS < 0 indicates that the trampoline does not
5515 contain instructions at all.
5517 We assume here that a function will be called many more times than
5518 its address is taken (e.g., it might be passed to qsort), so we
5519 take the trouble to initialize the "hint" field in the JMP insn.
5520 Note that the hint field is PC (new) + 4 * bits 13:0. */
5522 void
5523 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5524 int fnofs, int cxtofs, int jmpofs)
5526 rtx temp, temp1, addr;
5527 /* VMS really uses DImode pointers in memory at this point. */
5528 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5530 #ifdef POINTERS_EXTEND_UNSIGNED
5531 fnaddr = convert_memory_address (mode, fnaddr);
5532 cxt = convert_memory_address (mode, cxt);
5533 #endif
5535 /* Store function address and CXT. */
5536 addr = memory_address (mode, plus_constant (tramp, fnofs));
5537 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5538 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5539 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5541 /* This has been disabled since the hint only has a 32k range, and in
5542 no existing OS is the stack within 32k of the text segment. */
5543 if (0 && jmpofs >= 0)
5545 /* Compute hint value. */
5546 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5547 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5548 OPTAB_WIDEN);
5549 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5550 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5551 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5552 GEN_INT (0x3fff), 0);
5554 /* Merge in the hint. */
5555 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5556 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5557 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5558 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5559 OPTAB_WIDEN);
5560 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5563 #ifdef ENABLE_EXECUTE_STACK
5564 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5565 0, VOIDmode, 1, tramp, Pmode);
5566 #endif
5568 if (jmpofs >= 0)
5569 emit_insn (gen_imb ());
5572 /* Determine where to put an argument to a function.
5573 Value is zero to push the argument on the stack,
5574 or a hard register in which to store the argument.
5576 MODE is the argument's machine mode.
5577 TYPE is the data type of the argument (as a tree).
5578 This is null for libcalls where that information may
5579 not be available.
5580 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5581 the preceding args and about the function being called.
5582 NAMED is nonzero if this argument is a named parameter
5583 (otherwise it is an extra parameter matching an ellipsis).
5585 On Alpha the first 6 words of args are normally in registers
5586 and the rest are pushed. */
5589 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5590 int named ATTRIBUTE_UNUSED)
5592 int basereg;
5593 int num_args;
5595 /* Don't get confused and pass small structures in FP registers. */
5596 if (type && AGGREGATE_TYPE_P (type))
5597 basereg = 16;
5598 else
5600 #ifdef ENABLE_CHECKING
5601 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5602 values here. */
5603 gcc_assert (!COMPLEX_MODE_P (mode));
5604 #endif
5606 /* Set up defaults for FP operands passed in FP registers, and
5607 integral operands passed in integer registers. */
5608 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5609 basereg = 32 + 16;
5610 else
5611 basereg = 16;
5614 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5615 the three platforms, so we can't avoid conditional compilation. */
5616 #if TARGET_ABI_OPEN_VMS
5618 if (mode == VOIDmode)
5619 return alpha_arg_info_reg_val (cum);
5621 num_args = cum.num_args;
5622 if (num_args >= 6
5623 || targetm.calls.must_pass_in_stack (mode, type))
5624 return NULL_RTX;
5626 #elif TARGET_ABI_UNICOSMK
5628 int size;
5630 /* If this is the last argument, generate the call info word (CIW). */
5631 /* ??? We don't include the caller's line number in the CIW because
5632 I don't know how to determine it if debug infos are turned off. */
5633 if (mode == VOIDmode)
5635 int i;
5636 HOST_WIDE_INT lo;
5637 HOST_WIDE_INT hi;
5638 rtx ciw;
5640 lo = 0;
5642 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5643 if (cum.reg_args_type[i])
5644 lo |= (1 << (7 - i));
5646 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5647 lo |= 7;
5648 else
5649 lo |= cum.num_reg_words;
5651 #if HOST_BITS_PER_WIDE_INT == 32
5652 hi = (cum.num_args << 20) | cum.num_arg_words;
5653 #else
5654 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5655 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5656 hi = 0;
5657 #endif
5658 ciw = immed_double_const (lo, hi, DImode);
5660 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5661 UNSPEC_UMK_LOAD_CIW);
5664 size = ALPHA_ARG_SIZE (mode, type, named);
5665 num_args = cum.num_reg_words;
5666 if (cum.force_stack
5667 || cum.num_reg_words + size > 6
5668 || targetm.calls.must_pass_in_stack (mode, type))
5669 return NULL_RTX;
5670 else if (type && TYPE_MODE (type) == BLKmode)
5672 rtx reg1, reg2;
5674 reg1 = gen_rtx_REG (DImode, num_args + 16);
5675 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5677 /* The argument fits in two registers. Note that we still need to
5678 reserve a register for empty structures. */
5679 if (size == 0)
5680 return NULL_RTX;
5681 else if (size == 1)
5682 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5683 else
5685 reg2 = gen_rtx_REG (DImode, num_args + 17);
5686 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5687 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5691 #elif TARGET_ABI_OSF
5693 if (cum >= 6)
5694 return NULL_RTX;
5695 num_args = cum;
5697 /* VOID is passed as a special flag for "last argument". */
5698 if (type == void_type_node)
5699 basereg = 16;
5700 else if (targetm.calls.must_pass_in_stack (mode, type))
5701 return NULL_RTX;
5703 #else
5704 #error Unhandled ABI
5705 #endif
5707 return gen_rtx_REG (mode, num_args + basereg);
5710 static int
5711 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5712 enum machine_mode mode ATTRIBUTE_UNUSED,
5713 tree type ATTRIBUTE_UNUSED,
5714 bool named ATTRIBUTE_UNUSED)
5716 int words = 0;
5718 #if TARGET_ABI_OPEN_VMS
5719 if (cum->num_args < 6
5720 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5721 words = 6 - cum->num_args;
5722 #elif TARGET_ABI_UNICOSMK
5723 /* Never any split arguments. */
5724 #elif TARGET_ABI_OSF
5725 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5726 words = 6 - *cum;
5727 #else
5728 #error Unhandled ABI
5729 #endif
5731 return words * UNITS_PER_WORD;
5735 /* Return true if TYPE must be returned in memory, instead of in registers. */
5737 static bool
5738 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5740 enum machine_mode mode = VOIDmode;
5741 int size;
5743 if (type)
5745 mode = TYPE_MODE (type);
5747 /* All aggregates are returned in memory. */
5748 if (AGGREGATE_TYPE_P (type))
5749 return true;
5752 size = GET_MODE_SIZE (mode);
5753 switch (GET_MODE_CLASS (mode))
5755 case MODE_VECTOR_FLOAT:
5756 /* Pass all float vectors in memory, like an aggregate. */
5757 return true;
5759 case MODE_COMPLEX_FLOAT:
5760 /* We judge complex floats on the size of their element,
5761 not the size of the whole type. */
5762 size = GET_MODE_UNIT_SIZE (mode);
5763 break;
5765 case MODE_INT:
5766 case MODE_FLOAT:
5767 case MODE_COMPLEX_INT:
5768 case MODE_VECTOR_INT:
5769 break;
5771 default:
5772 /* ??? We get called on all sorts of random stuff from
5773 aggregate_value_p. We must return something, but it's not
5774 clear what's safe to return. Pretend it's a struct I
5775 guess. */
5776 return true;
5779 /* Otherwise types must fit in one register. */
5780 return size > UNITS_PER_WORD;
5783 /* Return true if TYPE should be passed by invisible reference. */
5785 static bool
5786 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5787 enum machine_mode mode,
5788 tree type ATTRIBUTE_UNUSED,
5789 bool named ATTRIBUTE_UNUSED)
5791 return mode == TFmode || mode == TCmode;
5794 /* Define how to find the value returned by a function. VALTYPE is the
5795 data type of the value (as a tree). If the precise function being
5796 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5797 MODE is set instead of VALTYPE for libcalls.
5799 On Alpha the value is found in $0 for integer functions and
5800 $f0 for floating-point functions. */
5803 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5804 enum machine_mode mode)
5806 unsigned int regnum, dummy;
5807 enum mode_class class;
5809 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5811 if (valtype)
5812 mode = TYPE_MODE (valtype);
5814 class = GET_MODE_CLASS (mode);
5815 switch (class)
5817 case MODE_INT:
5818 PROMOTE_MODE (mode, dummy, valtype);
5819 /* FALLTHRU */
5821 case MODE_COMPLEX_INT:
5822 case MODE_VECTOR_INT:
5823 regnum = 0;
5824 break;
5826 case MODE_FLOAT:
5827 regnum = 32;
5828 break;
5830 case MODE_COMPLEX_FLOAT:
5832 enum machine_mode cmode = GET_MODE_INNER (mode);
5834 return gen_rtx_PARALLEL
5835 (VOIDmode,
5836 gen_rtvec (2,
5837 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5838 const0_rtx),
5839 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5840 GEN_INT (GET_MODE_SIZE (cmode)))));
5843 default:
5844 gcc_unreachable ();
5847 return gen_rtx_REG (mode, regnum);
5850 /* TCmode complex values are passed by invisible reference. We
5851 should not split these values. */
5853 static bool
5854 alpha_split_complex_arg (tree type)
5856 return TYPE_MODE (type) != TCmode;
5859 static tree
5860 alpha_build_builtin_va_list (void)
5862 tree base, ofs, space, record, type_decl;
5864 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5865 return ptr_type_node;
5867 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5868 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5869 TREE_CHAIN (record) = type_decl;
5870 TYPE_NAME (record) = type_decl;
5872 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5874 /* Dummy field to prevent alignment warnings. */
5875 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5876 DECL_FIELD_CONTEXT (space) = record;
5877 DECL_ARTIFICIAL (space) = 1;
5878 DECL_IGNORED_P (space) = 1;
5880 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5881 integer_type_node);
5882 DECL_FIELD_CONTEXT (ofs) = record;
5883 TREE_CHAIN (ofs) = space;
5885 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5886 ptr_type_node);
5887 DECL_FIELD_CONTEXT (base) = record;
5888 TREE_CHAIN (base) = ofs;
5890 TYPE_FIELDS (record) = base;
5891 layout_type (record);
5893 va_list_gpr_counter_field = ofs;
5894 return record;
5897 #if TARGET_ABI_OSF
5898 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5899 and constant additions. */
5901 static tree
5902 va_list_skip_additions (tree lhs)
5904 tree rhs, stmt;
5906 if (TREE_CODE (lhs) != SSA_NAME)
5907 return lhs;
5909 for (;;)
5911 stmt = SSA_NAME_DEF_STMT (lhs);
5913 if (TREE_CODE (stmt) == PHI_NODE)
5914 return stmt;
5916 if (TREE_CODE (stmt) != MODIFY_EXPR
5917 || TREE_OPERAND (stmt, 0) != lhs)
5918 return lhs;
5920 rhs = TREE_OPERAND (stmt, 1);
5921 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5922 rhs = TREE_OPERAND (rhs, 0);
5924 if ((TREE_CODE (rhs) != NOP_EXPR
5925 && TREE_CODE (rhs) != CONVERT_EXPR
5926 && (TREE_CODE (rhs) != PLUS_EXPR
5927 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5928 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5929 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5930 return rhs;
5932 lhs = TREE_OPERAND (rhs, 0);
5936 /* Check if LHS = RHS statement is
5937 LHS = *(ap.__base + ap.__offset + cst)
5939 LHS = *(ap.__base
5940 + ((ap.__offset + cst <= 47)
5941 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5942 If the former, indicate that GPR registers are needed,
5943 if the latter, indicate that FPR registers are needed.
5944 On alpha, cfun->va_list_gpr_size is used as size of the needed
5945 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5946 GPR registers are needed and bit 1 set if FPR registers are needed.
5947 Return true if va_list references should not be scanned for the current
5948 statement. */
5950 static bool
5951 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5953 tree base, offset, arg1, arg2;
5954 int offset_arg = 1;
5956 if (TREE_CODE (rhs) != INDIRECT_REF
5957 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5958 return false;
5960 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5961 if (lhs == NULL_TREE
5962 || TREE_CODE (lhs) != PLUS_EXPR)
5963 return false;
5965 base = TREE_OPERAND (lhs, 0);
5966 if (TREE_CODE (base) == SSA_NAME)
5967 base = va_list_skip_additions (base);
5969 if (TREE_CODE (base) != COMPONENT_REF
5970 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5972 base = TREE_OPERAND (lhs, 0);
5973 if (TREE_CODE (base) == SSA_NAME)
5974 base = va_list_skip_additions (base);
5976 if (TREE_CODE (base) != COMPONENT_REF
5977 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5978 return false;
5980 offset_arg = 0;
5983 base = get_base_address (base);
5984 if (TREE_CODE (base) != VAR_DECL
5985 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5986 return false;
5988 offset = TREE_OPERAND (lhs, offset_arg);
5989 if (TREE_CODE (offset) == SSA_NAME)
5990 offset = va_list_skip_additions (offset);
5992 if (TREE_CODE (offset) == PHI_NODE)
5994 HOST_WIDE_INT sub;
5996 if (PHI_NUM_ARGS (offset) != 2)
5997 goto escapes;
5999 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
6000 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
6001 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6003 tree tem = arg1;
6004 arg1 = arg2;
6005 arg2 = tem;
6007 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6008 goto escapes;
6010 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
6011 goto escapes;
6013 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
6014 if (TREE_CODE (arg2) == MINUS_EXPR)
6015 sub = -sub;
6016 if (sub < -48 || sub > -32)
6017 goto escapes;
6019 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
6020 if (arg1 != arg2)
6021 goto escapes;
6023 if (TREE_CODE (arg1) == SSA_NAME)
6024 arg1 = va_list_skip_additions (arg1);
6026 if (TREE_CODE (arg1) != COMPONENT_REF
6027 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6028 || get_base_address (arg1) != base)
6029 goto escapes;
6031 /* Need floating point regs. */
6032 cfun->va_list_fpr_size |= 2;
6034 else if (TREE_CODE (offset) != COMPONENT_REF
6035 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6036 || get_base_address (offset) != base)
6037 goto escapes;
6038 else
6039 /* Need general regs. */
6040 cfun->va_list_fpr_size |= 1;
6041 return false;
6043 escapes:
6044 si->va_list_escapes = true;
6045 return false;
6047 #endif
6049 /* Perform any needed actions needed for a function that is receiving a
6050 variable number of arguments. */
6052 static void
6053 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6054 tree type, int *pretend_size, int no_rtl)
6056 CUMULATIVE_ARGS cum = *pcum;
6058 /* Skip the current argument. */
6059 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6061 #if TARGET_ABI_UNICOSMK
6062 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6063 arguments on the stack. Unfortunately, it doesn't always store the first
6064 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6065 with stdargs as we always have at least one named argument there. */
6066 if (cum.num_reg_words < 6)
6068 if (!no_rtl)
6070 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6071 emit_insn (gen_arg_home_umk ());
6073 *pretend_size = 0;
6075 #elif TARGET_ABI_OPEN_VMS
6076 /* For VMS, we allocate space for all 6 arg registers plus a count.
6078 However, if NO registers need to be saved, don't allocate any space.
6079 This is not only because we won't need the space, but because AP
6080 includes the current_pretend_args_size and we don't want to mess up
6081 any ap-relative addresses already made. */
6082 if (cum.num_args < 6)
6084 if (!no_rtl)
6086 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6087 emit_insn (gen_arg_home ());
6089 *pretend_size = 7 * UNITS_PER_WORD;
6091 #else
6092 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6093 only push those that are remaining. However, if NO registers need to
6094 be saved, don't allocate any space. This is not only because we won't
6095 need the space, but because AP includes the current_pretend_args_size
6096 and we don't want to mess up any ap-relative addresses already made.
6098 If we are not to use the floating-point registers, save the integer
6099 registers where we would put the floating-point registers. This is
6100 not the most efficient way to implement varargs with just one register
6101 class, but it isn't worth doing anything more efficient in this rare
6102 case. */
6103 if (cum >= 6)
6104 return;
6106 if (!no_rtl)
6108 int count, set = get_varargs_alias_set ();
6109 rtx tmp;
6111 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6112 if (count > 6 - cum)
6113 count = 6 - cum;
6115 /* Detect whether integer registers or floating-point registers
6116 are needed by the detected va_arg statements. See above for
6117 how these values are computed. Note that the "escape" value
6118 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6119 these bits set. */
6120 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6122 if (cfun->va_list_fpr_size & 1)
6124 tmp = gen_rtx_MEM (BLKmode,
6125 plus_constant (virtual_incoming_args_rtx,
6126 (cum + 6) * UNITS_PER_WORD));
6127 MEM_NOTRAP_P (tmp) = 1;
6128 set_mem_alias_set (tmp, set);
6129 move_block_from_reg (16 + cum, tmp, count);
6132 if (cfun->va_list_fpr_size & 2)
6134 tmp = gen_rtx_MEM (BLKmode,
6135 plus_constant (virtual_incoming_args_rtx,
6136 cum * UNITS_PER_WORD));
6137 MEM_NOTRAP_P (tmp) = 1;
6138 set_mem_alias_set (tmp, set);
6139 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6142 *pretend_size = 12 * UNITS_PER_WORD;
6143 #endif
6146 void
6147 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6149 HOST_WIDE_INT offset;
6150 tree t, offset_field, base_field;
6152 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6153 return;
6155 if (TARGET_ABI_UNICOSMK)
6156 std_expand_builtin_va_start (valist, nextarg);
6158 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6159 up by 48, storing fp arg registers in the first 48 bytes, and the
6160 integer arg registers in the next 48 bytes. This is only done,
6161 however, if any integer registers need to be stored.
6163 If no integer registers need be stored, then we must subtract 48
6164 in order to account for the integer arg registers which are counted
6165 in argsize above, but which are not actually stored on the stack.
6166 Must further be careful here about structures straddling the last
6167 integer argument register; that futzes with pretend_args_size,
6168 which changes the meaning of AP. */
6170 if (NUM_ARGS < 6)
6171 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6172 else
6173 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6175 if (TARGET_ABI_OPEN_VMS)
6177 nextarg = plus_constant (nextarg, offset);
6178 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6179 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6180 make_tree (ptr_type_node, nextarg));
6181 TREE_SIDE_EFFECTS (t) = 1;
6183 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6185 else
6187 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6188 offset_field = TREE_CHAIN (base_field);
6190 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6191 valist, base_field, NULL_TREE);
6192 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6193 valist, offset_field, NULL_TREE);
6195 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6196 t = build2 (PLUS_EXPR, ptr_type_node, t,
6197 build_int_cst (NULL_TREE, offset));
6198 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6199 TREE_SIDE_EFFECTS (t) = 1;
6200 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6202 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6203 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6204 TREE_SIDE_EFFECTS (t) = 1;
6205 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6209 static tree
6210 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6212 tree type_size, ptr_type, addend, t, addr, internal_post;
6214 /* If the type could not be passed in registers, skip the block
6215 reserved for the registers. */
6216 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6218 t = build_int_cst (TREE_TYPE (offset), 6*8);
6219 t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
6220 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6221 gimplify_and_add (t, pre_p);
6224 addend = offset;
6225 ptr_type = build_pointer_type (type);
6227 if (TREE_CODE (type) == COMPLEX_TYPE)
6229 tree real_part, imag_part, real_temp;
6231 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6232 offset, pre_p);
6234 /* Copy the value into a new temporary, lest the formal temporary
6235 be reused out from under us. */
6236 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6238 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6239 offset, pre_p);
6241 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6243 else if (TREE_CODE (type) == REAL_TYPE)
6245 tree fpaddend, cond, fourtyeight;
6247 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6248 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6249 addend, fourtyeight);
6250 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6251 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6252 fpaddend, addend);
6255 /* Build the final address and force that value into a temporary. */
6256 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6257 fold_convert (ptr_type, addend));
6258 internal_post = NULL;
6259 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6260 append_to_statement_list (internal_post, pre_p);
6262 /* Update the offset field. */
6263 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6264 if (type_size == NULL || TREE_OVERFLOW (type_size))
6265 t = size_zero_node;
6266 else
6268 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6269 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6270 t = size_binop (MULT_EXPR, t, size_int (8));
6272 t = fold_convert (TREE_TYPE (offset), t);
6273 t = build2 (MODIFY_EXPR, void_type_node, offset,
6274 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6275 gimplify_and_add (t, pre_p);
6277 return build_va_arg_indirect_ref (addr);
6280 static tree
6281 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6283 tree offset_field, base_field, offset, base, t, r;
6284 bool indirect;
6286 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6287 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6289 base_field = TYPE_FIELDS (va_list_type_node);
6290 offset_field = TREE_CHAIN (base_field);
6291 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6292 valist, base_field, NULL_TREE);
6293 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6294 valist, offset_field, NULL_TREE);
6296 /* Pull the fields of the structure out into temporaries. Since we never
6297 modify the base field, we can use a formal temporary. Sign-extend the
6298 offset field so that it's the proper width for pointer arithmetic. */
6299 base = get_formal_tmp_var (base_field, pre_p);
6301 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6302 offset = get_initialized_tmp_var (t, pre_p, NULL);
6304 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6305 if (indirect)
6306 type = build_pointer_type (type);
6308 /* Find the value. Note that this will be a stable indirection, or
6309 a composite of stable indirections in the case of complex. */
6310 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6312 /* Stuff the offset temporary back into its field. */
6313 t = build2 (MODIFY_EXPR, void_type_node, offset_field,
6314 fold_convert (TREE_TYPE (offset_field), offset));
6315 gimplify_and_add (t, pre_p);
6317 if (indirect)
6318 r = build_va_arg_indirect_ref (r);
6320 return r;
6323 /* Builtins. */
6325 enum alpha_builtin
6327 ALPHA_BUILTIN_CMPBGE,
6328 ALPHA_BUILTIN_EXTBL,
6329 ALPHA_BUILTIN_EXTWL,
6330 ALPHA_BUILTIN_EXTLL,
6331 ALPHA_BUILTIN_EXTQL,
6332 ALPHA_BUILTIN_EXTWH,
6333 ALPHA_BUILTIN_EXTLH,
6334 ALPHA_BUILTIN_EXTQH,
6335 ALPHA_BUILTIN_INSBL,
6336 ALPHA_BUILTIN_INSWL,
6337 ALPHA_BUILTIN_INSLL,
6338 ALPHA_BUILTIN_INSQL,
6339 ALPHA_BUILTIN_INSWH,
6340 ALPHA_BUILTIN_INSLH,
6341 ALPHA_BUILTIN_INSQH,
6342 ALPHA_BUILTIN_MSKBL,
6343 ALPHA_BUILTIN_MSKWL,
6344 ALPHA_BUILTIN_MSKLL,
6345 ALPHA_BUILTIN_MSKQL,
6346 ALPHA_BUILTIN_MSKWH,
6347 ALPHA_BUILTIN_MSKLH,
6348 ALPHA_BUILTIN_MSKQH,
6349 ALPHA_BUILTIN_UMULH,
6350 ALPHA_BUILTIN_ZAP,
6351 ALPHA_BUILTIN_ZAPNOT,
6352 ALPHA_BUILTIN_AMASK,
6353 ALPHA_BUILTIN_IMPLVER,
6354 ALPHA_BUILTIN_RPCC,
6355 ALPHA_BUILTIN_THREAD_POINTER,
6356 ALPHA_BUILTIN_SET_THREAD_POINTER,
6358 /* TARGET_MAX */
6359 ALPHA_BUILTIN_MINUB8,
6360 ALPHA_BUILTIN_MINSB8,
6361 ALPHA_BUILTIN_MINUW4,
6362 ALPHA_BUILTIN_MINSW4,
6363 ALPHA_BUILTIN_MAXUB8,
6364 ALPHA_BUILTIN_MAXSB8,
6365 ALPHA_BUILTIN_MAXUW4,
6366 ALPHA_BUILTIN_MAXSW4,
6367 ALPHA_BUILTIN_PERR,
6368 ALPHA_BUILTIN_PKLB,
6369 ALPHA_BUILTIN_PKWB,
6370 ALPHA_BUILTIN_UNPKBL,
6371 ALPHA_BUILTIN_UNPKBW,
6373 /* TARGET_CIX */
6374 ALPHA_BUILTIN_CTTZ,
6375 ALPHA_BUILTIN_CTLZ,
6376 ALPHA_BUILTIN_CTPOP,
6378 ALPHA_BUILTIN_max
6381 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6382 CODE_FOR_builtin_cmpbge,
6383 CODE_FOR_builtin_extbl,
6384 CODE_FOR_builtin_extwl,
6385 CODE_FOR_builtin_extll,
6386 CODE_FOR_builtin_extql,
6387 CODE_FOR_builtin_extwh,
6388 CODE_FOR_builtin_extlh,
6389 CODE_FOR_builtin_extqh,
6390 CODE_FOR_builtin_insbl,
6391 CODE_FOR_builtin_inswl,
6392 CODE_FOR_builtin_insll,
6393 CODE_FOR_builtin_insql,
6394 CODE_FOR_builtin_inswh,
6395 CODE_FOR_builtin_inslh,
6396 CODE_FOR_builtin_insqh,
6397 CODE_FOR_builtin_mskbl,
6398 CODE_FOR_builtin_mskwl,
6399 CODE_FOR_builtin_mskll,
6400 CODE_FOR_builtin_mskql,
6401 CODE_FOR_builtin_mskwh,
6402 CODE_FOR_builtin_msklh,
6403 CODE_FOR_builtin_mskqh,
6404 CODE_FOR_umuldi3_highpart,
6405 CODE_FOR_builtin_zap,
6406 CODE_FOR_builtin_zapnot,
6407 CODE_FOR_builtin_amask,
6408 CODE_FOR_builtin_implver,
6409 CODE_FOR_builtin_rpcc,
6410 CODE_FOR_load_tp,
6411 CODE_FOR_set_tp,
6413 /* TARGET_MAX */
6414 CODE_FOR_builtin_minub8,
6415 CODE_FOR_builtin_minsb8,
6416 CODE_FOR_builtin_minuw4,
6417 CODE_FOR_builtin_minsw4,
6418 CODE_FOR_builtin_maxub8,
6419 CODE_FOR_builtin_maxsb8,
6420 CODE_FOR_builtin_maxuw4,
6421 CODE_FOR_builtin_maxsw4,
6422 CODE_FOR_builtin_perr,
6423 CODE_FOR_builtin_pklb,
6424 CODE_FOR_builtin_pkwb,
6425 CODE_FOR_builtin_unpkbl,
6426 CODE_FOR_builtin_unpkbw,
6428 /* TARGET_CIX */
6429 CODE_FOR_ctzdi2,
6430 CODE_FOR_clzdi2,
6431 CODE_FOR_popcountdi2
6434 struct alpha_builtin_def
6436 const char *name;
6437 enum alpha_builtin code;
6438 unsigned int target_mask;
6439 bool is_const;
6442 static struct alpha_builtin_def const zero_arg_builtins[] = {
6443 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6444 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6447 static struct alpha_builtin_def const one_arg_builtins[] = {
6448 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6449 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6450 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6451 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6452 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6453 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6454 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6455 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6458 static struct alpha_builtin_def const two_arg_builtins[] = {
6459 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6460 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6461 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6462 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6463 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6464 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6465 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6466 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6467 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6468 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6469 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6470 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6471 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6472 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6473 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6474 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6475 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6476 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6477 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6478 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6479 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6480 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6481 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6482 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6483 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6484 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6485 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6486 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6487 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6488 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6489 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6490 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6491 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6492 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6495 static GTY(()) tree alpha_v8qi_u;
6496 static GTY(()) tree alpha_v8qi_s;
6497 static GTY(()) tree alpha_v4hi_u;
6498 static GTY(()) tree alpha_v4hi_s;
6500 static void
6501 alpha_init_builtins (void)
6503 const struct alpha_builtin_def *p;
6504 tree dimode_integer_type_node;
6505 tree ftype, attrs[2];
6506 size_t i;
6508 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6510 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6511 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6513 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6515 p = zero_arg_builtins;
6516 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6517 if ((target_flags & p->target_mask) == p->target_mask)
6518 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6519 NULL, attrs[p->is_const]);
6521 ftype = build_function_type_list (dimode_integer_type_node,
6522 dimode_integer_type_node, NULL_TREE);
6524 p = one_arg_builtins;
6525 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6526 if ((target_flags & p->target_mask) == p->target_mask)
6527 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6528 NULL, attrs[p->is_const]);
6530 ftype = build_function_type_list (dimode_integer_type_node,
6531 dimode_integer_type_node,
6532 dimode_integer_type_node, NULL_TREE);
6534 p = two_arg_builtins;
6535 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6536 if ((target_flags & p->target_mask) == p->target_mask)
6537 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6538 NULL, attrs[p->is_const]);
6540 ftype = build_function_type (ptr_type_node, void_list_node);
6541 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6542 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6543 NULL, attrs[0]);
6545 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6546 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6547 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6548 NULL, attrs[0]);
6550 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6551 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6552 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6553 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6556 /* Expand an expression EXP that calls a built-in function,
6557 with result going to TARGET if that's convenient
6558 (and in mode MODE if that's convenient).
6559 SUBTARGET may be used as the target for computing one of EXP's operands.
6560 IGNORE is nonzero if the value is to be ignored. */
6562 static rtx
6563 alpha_expand_builtin (tree exp, rtx target,
6564 rtx subtarget ATTRIBUTE_UNUSED,
6565 enum machine_mode mode ATTRIBUTE_UNUSED,
6566 int ignore ATTRIBUTE_UNUSED)
6568 #define MAX_ARGS 2
6570 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6571 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6572 tree arglist = TREE_OPERAND (exp, 1);
6573 enum insn_code icode;
6574 rtx op[MAX_ARGS], pat;
6575 int arity;
6576 bool nonvoid;
6578 if (fcode >= ALPHA_BUILTIN_max)
6579 internal_error ("bad builtin fcode");
6580 icode = code_for_builtin[fcode];
6581 if (icode == 0)
6582 internal_error ("bad builtin fcode");
6584 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6586 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6587 arglist;
6588 arglist = TREE_CHAIN (arglist), arity++)
6590 const struct insn_operand_data *insn_op;
6592 tree arg = TREE_VALUE (arglist);
6593 if (arg == error_mark_node)
6594 return NULL_RTX;
6595 if (arity > MAX_ARGS)
6596 return NULL_RTX;
6598 insn_op = &insn_data[icode].operand[arity + nonvoid];
6600 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6602 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6603 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6606 if (nonvoid)
6608 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6609 if (!target
6610 || GET_MODE (target) != tmode
6611 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6612 target = gen_reg_rtx (tmode);
6615 switch (arity)
6617 case 0:
6618 pat = GEN_FCN (icode) (target);
6619 break;
6620 case 1:
6621 if (nonvoid)
6622 pat = GEN_FCN (icode) (target, op[0]);
6623 else
6624 pat = GEN_FCN (icode) (op[0]);
6625 break;
6626 case 2:
6627 pat = GEN_FCN (icode) (target, op[0], op[1]);
6628 break;
6629 default:
6630 gcc_unreachable ();
6632 if (!pat)
6633 return NULL_RTX;
6634 emit_insn (pat);
6636 if (nonvoid)
6637 return target;
6638 else
6639 return const0_rtx;
6643 /* Several bits below assume HWI >= 64 bits. This should be enforced
6644 by config.gcc. */
6645 #if HOST_BITS_PER_WIDE_INT < 64
6646 # error "HOST_WIDE_INT too small"
6647 #endif
6649 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6650 with an 8 bit output vector. OPINT contains the integer operands; bit N
6651 of OP_CONST is set if OPINT[N] is valid. */
6653 static tree
6654 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6656 if (op_const == 3)
6658 int i, val;
6659 for (i = 0, val = 0; i < 8; ++i)
6661 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6662 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6663 if (c0 >= c1)
6664 val |= 1 << i;
6666 return build_int_cst (long_integer_type_node, val);
6668 else if (op_const == 2 && opint[1] == 0)
6669 return build_int_cst (long_integer_type_node, 0xff);
6670 return NULL;
6673 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6674 specialized form of an AND operation. Other byte manipulation instructions
6675 are defined in terms of this instruction, so this is also used as a
6676 subroutine for other builtins.
6678 OP contains the tree operands; OPINT contains the extracted integer values.
6679 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6680 OPINT may be considered. */
6682 static tree
6683 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6684 long op_const)
6686 if (op_const & 2)
6688 unsigned HOST_WIDE_INT mask = 0;
6689 int i;
6691 for (i = 0; i < 8; ++i)
6692 if ((opint[1] >> i) & 1)
6693 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6695 if (op_const & 1)
6696 return build_int_cst (long_integer_type_node, opint[0] & mask);
6698 if (op)
6699 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6700 build_int_cst (long_integer_type_node, mask)));
6702 else if ((op_const & 1) && opint[0] == 0)
6703 return build_int_cst (long_integer_type_node, 0);
6704 return NULL;
6707 /* Fold the builtins for the EXT family of instructions. */
6709 static tree
6710 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6711 long op_const, unsigned HOST_WIDE_INT bytemask,
6712 bool is_high)
6714 long zap_const = 2;
6715 tree *zap_op = NULL;
6717 if (op_const & 2)
6719 unsigned HOST_WIDE_INT loc;
6721 loc = opint[1] & 7;
6722 if (BYTES_BIG_ENDIAN)
6723 loc ^= 7;
6724 loc *= 8;
6726 if (loc != 0)
6728 if (op_const & 1)
6730 unsigned HOST_WIDE_INT temp = opint[0];
6731 if (is_high)
6732 temp <<= loc;
6733 else
6734 temp >>= loc;
6735 opint[0] = temp;
6736 zap_const = 3;
6739 else
6740 zap_op = op;
6743 opint[1] = bytemask;
6744 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6747 /* Fold the builtins for the INS family of instructions. */
6749 static tree
6750 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6751 long op_const, unsigned HOST_WIDE_INT bytemask,
6752 bool is_high)
6754 if ((op_const & 1) && opint[0] == 0)
6755 return build_int_cst (long_integer_type_node, 0);
6757 if (op_const & 2)
6759 unsigned HOST_WIDE_INT temp, loc, byteloc;
6760 tree *zap_op = NULL;
6762 loc = opint[1] & 7;
6763 if (BYTES_BIG_ENDIAN)
6764 loc ^= 7;
6765 bytemask <<= loc;
6767 temp = opint[0];
6768 if (is_high)
6770 byteloc = (64 - (loc * 8)) & 0x3f;
6771 if (byteloc == 0)
6772 zap_op = op;
6773 else
6774 temp >>= byteloc;
6775 bytemask >>= 8;
6777 else
6779 byteloc = loc * 8;
6780 if (byteloc == 0)
6781 zap_op = op;
6782 else
6783 temp <<= byteloc;
6786 opint[0] = temp;
6787 opint[1] = bytemask;
6788 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6791 return NULL;
6794 static tree
6795 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6796 long op_const, unsigned HOST_WIDE_INT bytemask,
6797 bool is_high)
6799 if (op_const & 2)
6801 unsigned HOST_WIDE_INT loc;
6803 loc = opint[1] & 7;
6804 if (BYTES_BIG_ENDIAN)
6805 loc ^= 7;
6806 bytemask <<= loc;
6808 if (is_high)
6809 bytemask >>= 8;
6811 opint[1] = bytemask ^ 0xff;
6814 return alpha_fold_builtin_zapnot (op, opint, op_const);
6817 static tree
6818 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6820 switch (op_const)
6822 case 3:
6824 unsigned HOST_WIDE_INT l;
6825 HOST_WIDE_INT h;
6827 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6829 #if HOST_BITS_PER_WIDE_INT > 64
6830 # error fixme
6831 #endif
6833 return build_int_cst (long_integer_type_node, h);
6836 case 1:
6837 opint[1] = opint[0];
6838 /* FALLTHRU */
6839 case 2:
6840 /* Note that (X*1) >> 64 == 0. */
6841 if (opint[1] == 0 || opint[1] == 1)
6842 return build_int_cst (long_integer_type_node, 0);
6843 break;
6845 return NULL;
6848 static tree
6849 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6851 tree op0 = fold_convert (vtype, op[0]);
6852 tree op1 = fold_convert (vtype, op[1]);
6853 tree val = fold (build2 (code, vtype, op0, op1));
6854 return fold_convert (long_integer_type_node, val);
6857 static tree
6858 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6860 unsigned HOST_WIDE_INT temp = 0;
6861 int i;
6863 if (op_const != 3)
6864 return NULL;
6866 for (i = 0; i < 8; ++i)
6868 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6869 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6870 if (a >= b)
6871 temp += a - b;
6872 else
6873 temp += b - a;
6876 return build_int_cst (long_integer_type_node, temp);
6879 static tree
6880 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6882 unsigned HOST_WIDE_INT temp;
6884 if (op_const == 0)
6885 return NULL;
6887 temp = opint[0] & 0xff;
6888 temp |= (opint[0] >> 24) & 0xff00;
6890 return build_int_cst (long_integer_type_node, temp);
6893 static tree
6894 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6896 unsigned HOST_WIDE_INT temp;
6898 if (op_const == 0)
6899 return NULL;
6901 temp = opint[0] & 0xff;
6902 temp |= (opint[0] >> 8) & 0xff00;
6903 temp |= (opint[0] >> 16) & 0xff0000;
6904 temp |= (opint[0] >> 24) & 0xff000000;
6906 return build_int_cst (long_integer_type_node, temp);
6909 static tree
6910 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6912 unsigned HOST_WIDE_INT temp;
6914 if (op_const == 0)
6915 return NULL;
6917 temp = opint[0] & 0xff;
6918 temp |= (opint[0] & 0xff00) << 24;
6920 return build_int_cst (long_integer_type_node, temp);
6923 static tree
6924 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6926 unsigned HOST_WIDE_INT temp;
6928 if (op_const == 0)
6929 return NULL;
6931 temp = opint[0] & 0xff;
6932 temp |= (opint[0] & 0x0000ff00) << 8;
6933 temp |= (opint[0] & 0x00ff0000) << 16;
6934 temp |= (opint[0] & 0xff000000) << 24;
6936 return build_int_cst (long_integer_type_node, temp);
6939 static tree
6940 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6942 unsigned HOST_WIDE_INT temp;
6944 if (op_const == 0)
6945 return NULL;
6947 if (opint[0] == 0)
6948 temp = 64;
6949 else
6950 temp = exact_log2 (opint[0] & -opint[0]);
6952 return build_int_cst (long_integer_type_node, temp);
6955 static tree
6956 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6958 unsigned HOST_WIDE_INT temp;
6960 if (op_const == 0)
6961 return NULL;
6963 if (opint[0] == 0)
6964 temp = 64;
6965 else
6966 temp = 64 - floor_log2 (opint[0]) - 1;
6968 return build_int_cst (long_integer_type_node, temp);
6971 static tree
6972 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6974 unsigned HOST_WIDE_INT temp, op;
6976 if (op_const == 0)
6977 return NULL;
6979 op = opint[0];
6980 temp = 0;
6981 while (op)
6982 temp++, op &= op - 1;
6984 return build_int_cst (long_integer_type_node, temp);
6987 /* Fold one of our builtin functions. */
6989 static tree
6990 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6992 tree op[MAX_ARGS], t;
6993 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6994 long op_const = 0, arity = 0;
6996 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6998 tree arg = TREE_VALUE (t);
6999 if (arg == error_mark_node)
7000 return NULL;
7001 if (arity >= MAX_ARGS)
7002 return NULL;
7004 op[arity] = arg;
7005 opint[arity] = 0;
7006 if (TREE_CODE (arg) == INTEGER_CST)
7008 op_const |= 1L << arity;
7009 opint[arity] = int_cst_value (arg);
7013 switch (DECL_FUNCTION_CODE (fndecl))
7015 case ALPHA_BUILTIN_CMPBGE:
7016 return alpha_fold_builtin_cmpbge (opint, op_const);
7018 case ALPHA_BUILTIN_EXTBL:
7019 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7020 case ALPHA_BUILTIN_EXTWL:
7021 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7022 case ALPHA_BUILTIN_EXTLL:
7023 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7024 case ALPHA_BUILTIN_EXTQL:
7025 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7026 case ALPHA_BUILTIN_EXTWH:
7027 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7028 case ALPHA_BUILTIN_EXTLH:
7029 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7030 case ALPHA_BUILTIN_EXTQH:
7031 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7033 case ALPHA_BUILTIN_INSBL:
7034 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7035 case ALPHA_BUILTIN_INSWL:
7036 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7037 case ALPHA_BUILTIN_INSLL:
7038 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7039 case ALPHA_BUILTIN_INSQL:
7040 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7041 case ALPHA_BUILTIN_INSWH:
7042 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7043 case ALPHA_BUILTIN_INSLH:
7044 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7045 case ALPHA_BUILTIN_INSQH:
7046 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7048 case ALPHA_BUILTIN_MSKBL:
7049 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7050 case ALPHA_BUILTIN_MSKWL:
7051 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7052 case ALPHA_BUILTIN_MSKLL:
7053 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7054 case ALPHA_BUILTIN_MSKQL:
7055 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7056 case ALPHA_BUILTIN_MSKWH:
7057 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7058 case ALPHA_BUILTIN_MSKLH:
7059 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7060 case ALPHA_BUILTIN_MSKQH:
7061 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7063 case ALPHA_BUILTIN_UMULH:
7064 return alpha_fold_builtin_umulh (opint, op_const);
7066 case ALPHA_BUILTIN_ZAP:
7067 opint[1] ^= 0xff;
7068 /* FALLTHRU */
7069 case ALPHA_BUILTIN_ZAPNOT:
7070 return alpha_fold_builtin_zapnot (op, opint, op_const);
7072 case ALPHA_BUILTIN_MINUB8:
7073 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7074 case ALPHA_BUILTIN_MINSB8:
7075 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7076 case ALPHA_BUILTIN_MINUW4:
7077 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7078 case ALPHA_BUILTIN_MINSW4:
7079 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7080 case ALPHA_BUILTIN_MAXUB8:
7081 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7082 case ALPHA_BUILTIN_MAXSB8:
7083 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7084 case ALPHA_BUILTIN_MAXUW4:
7085 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7086 case ALPHA_BUILTIN_MAXSW4:
7087 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7089 case ALPHA_BUILTIN_PERR:
7090 return alpha_fold_builtin_perr (opint, op_const);
7091 case ALPHA_BUILTIN_PKLB:
7092 return alpha_fold_builtin_pklb (opint, op_const);
7093 case ALPHA_BUILTIN_PKWB:
7094 return alpha_fold_builtin_pkwb (opint, op_const);
7095 case ALPHA_BUILTIN_UNPKBL:
7096 return alpha_fold_builtin_unpkbl (opint, op_const);
7097 case ALPHA_BUILTIN_UNPKBW:
7098 return alpha_fold_builtin_unpkbw (opint, op_const);
7100 case ALPHA_BUILTIN_CTTZ:
7101 return alpha_fold_builtin_cttz (opint, op_const);
7102 case ALPHA_BUILTIN_CTLZ:
7103 return alpha_fold_builtin_ctlz (opint, op_const);
7104 case ALPHA_BUILTIN_CTPOP:
7105 return alpha_fold_builtin_ctpop (opint, op_const);
7107 case ALPHA_BUILTIN_AMASK:
7108 case ALPHA_BUILTIN_IMPLVER:
7109 case ALPHA_BUILTIN_RPCC:
7110 case ALPHA_BUILTIN_THREAD_POINTER:
7111 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7112 /* None of these are foldable at compile-time. */
7113 default:
7114 return NULL;
7118 /* This page contains routines that are used to determine what the function
7119 prologue and epilogue code will do and write them out. */
7121 /* Compute the size of the save area in the stack. */
7123 /* These variables are used for communication between the following functions.
7124 They indicate various things about the current function being compiled
7125 that are used to tell what kind of prologue, epilogue and procedure
7126 descriptor to generate. */
7128 /* Nonzero if we need a stack procedure. */
7129 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7130 static enum alpha_procedure_types alpha_procedure_type;
7132 /* Register number (either FP or SP) that is used to unwind the frame. */
7133 static int vms_unwind_regno;
7135 /* Register number used to save FP. We need not have one for RA since
7136 we don't modify it for register procedures. This is only defined
7137 for register frame procedures. */
7138 static int vms_save_fp_regno;
7140 /* Register number used to reference objects off our PV. */
7141 static int vms_base_regno;
7143 /* Compute register masks for saved registers. */
7145 static void
7146 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7148 unsigned long imask = 0;
7149 unsigned long fmask = 0;
7150 unsigned int i;
7152 /* When outputting a thunk, we don't have valid register life info,
7153 but assemble_start_function wants to output .frame and .mask
7154 directives. */
7155 if (current_function_is_thunk)
7157 *imaskP = 0;
7158 *fmaskP = 0;
7159 return;
7162 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7163 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7165 /* One for every register we have to save. */
7166 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7167 if (! fixed_regs[i] && ! call_used_regs[i]
7168 && regs_ever_live[i] && i != REG_RA
7169 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7171 if (i < 32)
7172 imask |= (1UL << i);
7173 else
7174 fmask |= (1UL << (i - 32));
7177 /* We need to restore these for the handler. */
7178 if (current_function_calls_eh_return)
7180 for (i = 0; ; ++i)
7182 unsigned regno = EH_RETURN_DATA_REGNO (i);
7183 if (regno == INVALID_REGNUM)
7184 break;
7185 imask |= 1UL << regno;
7189 /* If any register spilled, then spill the return address also. */
7190 /* ??? This is required by the Digital stack unwind specification
7191 and isn't needed if we're doing Dwarf2 unwinding. */
7192 if (imask || fmask || alpha_ra_ever_killed ())
7193 imask |= (1UL << REG_RA);
7195 *imaskP = imask;
7196 *fmaskP = fmask;
7200 alpha_sa_size (void)
7202 unsigned long mask[2];
7203 int sa_size = 0;
7204 int i, j;
7206 alpha_sa_mask (&mask[0], &mask[1]);
7208 if (TARGET_ABI_UNICOSMK)
7210 if (mask[0] || mask[1])
7211 sa_size = 14;
7213 else
7215 for (j = 0; j < 2; ++j)
7216 for (i = 0; i < 32; ++i)
7217 if ((mask[j] >> i) & 1)
7218 sa_size++;
7221 if (TARGET_ABI_UNICOSMK)
7223 /* We might not need to generate a frame if we don't make any calls
7224 (including calls to __T3E_MISMATCH if this is a vararg function),
7225 don't have any local variables which require stack slots, don't
7226 use alloca and have not determined that we need a frame for other
7227 reasons. */
7229 alpha_procedure_type
7230 = (sa_size || get_frame_size() != 0
7231 || current_function_outgoing_args_size
7232 || current_function_stdarg || current_function_calls_alloca
7233 || frame_pointer_needed)
7234 ? PT_STACK : PT_REGISTER;
7236 /* Always reserve space for saving callee-saved registers if we
7237 need a frame as required by the calling convention. */
7238 if (alpha_procedure_type == PT_STACK)
7239 sa_size = 14;
7241 else if (TARGET_ABI_OPEN_VMS)
7243 /* Start by assuming we can use a register procedure if we don't
7244 make any calls (REG_RA not used) or need to save any
7245 registers and a stack procedure if we do. */
7246 if ((mask[0] >> REG_RA) & 1)
7247 alpha_procedure_type = PT_STACK;
7248 else if (get_frame_size() != 0)
7249 alpha_procedure_type = PT_REGISTER;
7250 else
7251 alpha_procedure_type = PT_NULL;
7253 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7254 made the final decision on stack procedure vs register procedure. */
7255 if (alpha_procedure_type == PT_STACK)
7256 sa_size -= 2;
7258 /* Decide whether to refer to objects off our PV via FP or PV.
7259 If we need FP for something else or if we receive a nonlocal
7260 goto (which expects PV to contain the value), we must use PV.
7261 Otherwise, start by assuming we can use FP. */
7263 vms_base_regno
7264 = (frame_pointer_needed
7265 || current_function_has_nonlocal_label
7266 || alpha_procedure_type == PT_STACK
7267 || current_function_outgoing_args_size)
7268 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7270 /* If we want to copy PV into FP, we need to find some register
7271 in which to save FP. */
7273 vms_save_fp_regno = -1;
7274 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7275 for (i = 0; i < 32; i++)
7276 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7277 vms_save_fp_regno = i;
7279 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7280 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7281 else if (alpha_procedure_type == PT_NULL)
7282 vms_base_regno = REG_PV;
7284 /* Stack unwinding should be done via FP unless we use it for PV. */
7285 vms_unwind_regno = (vms_base_regno == REG_PV
7286 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7288 /* If this is a stack procedure, allow space for saving FP and RA. */
7289 if (alpha_procedure_type == PT_STACK)
7290 sa_size += 2;
7292 else
7294 /* Our size must be even (multiple of 16 bytes). */
7295 if (sa_size & 1)
7296 sa_size++;
7299 return sa_size * 8;
7302 /* Define the offset between two registers, one to be eliminated,
7303 and the other its replacement, at the start of a routine. */
7305 HOST_WIDE_INT
7306 alpha_initial_elimination_offset (unsigned int from,
7307 unsigned int to ATTRIBUTE_UNUSED)
7309 HOST_WIDE_INT ret;
7311 ret = alpha_sa_size ();
7312 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7314 switch (from)
7316 case FRAME_POINTER_REGNUM:
7317 break;
7319 case ARG_POINTER_REGNUM:
7320 ret += (ALPHA_ROUND (get_frame_size ()
7321 + current_function_pretend_args_size)
7322 - current_function_pretend_args_size);
7323 break;
7325 default:
7326 gcc_unreachable ();
7329 return ret;
7333 alpha_pv_save_size (void)
7335 alpha_sa_size ();
7336 return alpha_procedure_type == PT_STACK ? 8 : 0;
7340 alpha_using_fp (void)
7342 alpha_sa_size ();
7343 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7346 #if TARGET_ABI_OPEN_VMS
7348 const struct attribute_spec vms_attribute_table[] =
7350 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7351 { "overlaid", 0, 0, true, false, false, NULL },
7352 { "global", 0, 0, true, false, false, NULL },
7353 { "initialize", 0, 0, true, false, false, NULL },
7354 { NULL, 0, 0, false, false, false, NULL }
7357 #endif
7359 static int
7360 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7362 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7366 alpha_find_lo_sum_using_gp (rtx insn)
7368 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7371 static int
7372 alpha_does_function_need_gp (void)
7374 rtx insn;
7376 /* The GP being variable is an OSF abi thing. */
7377 if (! TARGET_ABI_OSF)
7378 return 0;
7380 /* We need the gp to load the address of __mcount. */
7381 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7382 return 1;
7384 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7385 if (current_function_is_thunk)
7386 return 1;
7388 /* The nonlocal receiver pattern assumes that the gp is valid for
7389 the nested function. Reasonable because it's almost always set
7390 correctly already. For the cases where that's wrong, make sure
7391 the nested function loads its gp on entry. */
7392 if (current_function_has_nonlocal_goto)
7393 return 1;
7395 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7396 Even if we are a static function, we still need to do this in case
7397 our address is taken and passed to something like qsort. */
7399 push_topmost_sequence ();
7400 insn = get_insns ();
7401 pop_topmost_sequence ();
7403 for (; insn; insn = NEXT_INSN (insn))
7404 if (INSN_P (insn)
7405 && GET_CODE (PATTERN (insn)) != USE
7406 && GET_CODE (PATTERN (insn)) != CLOBBER
7407 && get_attr_usegp (insn))
7408 return 1;
7410 return 0;
7414 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7415 sequences. */
7417 static rtx
7418 set_frame_related_p (void)
7420 rtx seq = get_insns ();
7421 rtx insn;
7423 end_sequence ();
7425 if (!seq)
7426 return NULL_RTX;
7428 if (INSN_P (seq))
7430 insn = seq;
7431 while (insn != NULL_RTX)
7433 RTX_FRAME_RELATED_P (insn) = 1;
7434 insn = NEXT_INSN (insn);
7436 seq = emit_insn (seq);
7438 else
7440 seq = emit_insn (seq);
7441 RTX_FRAME_RELATED_P (seq) = 1;
7443 return seq;
7446 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7448 /* Generates a store with the proper unwind info attached. VALUE is
7449 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7450 contains SP+FRAME_BIAS, and that is the unwind info that should be
7451 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7452 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7454 static void
7455 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7456 HOST_WIDE_INT base_ofs, rtx frame_reg)
7458 rtx addr, mem, insn;
7460 addr = plus_constant (base_reg, base_ofs);
7461 mem = gen_rtx_MEM (DImode, addr);
7462 set_mem_alias_set (mem, alpha_sr_alias_set);
7464 insn = emit_move_insn (mem, value);
7465 RTX_FRAME_RELATED_P (insn) = 1;
7467 if (frame_bias || value != frame_reg)
7469 if (frame_bias)
7471 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7472 mem = gen_rtx_MEM (DImode, addr);
7475 REG_NOTES (insn)
7476 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7477 gen_rtx_SET (VOIDmode, mem, frame_reg),
7478 REG_NOTES (insn));
7482 static void
7483 emit_frame_store (unsigned int regno, rtx base_reg,
7484 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7486 rtx reg = gen_rtx_REG (DImode, regno);
7487 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7490 /* Write function prologue. */
7492 /* On vms we have two kinds of functions:
7494 - stack frame (PROC_STACK)
7495 these are 'normal' functions with local vars and which are
7496 calling other functions
7497 - register frame (PROC_REGISTER)
7498 keeps all data in registers, needs no stack
7500 We must pass this to the assembler so it can generate the
7501 proper pdsc (procedure descriptor)
7502 This is done with the '.pdesc' command.
7504 On not-vms, we don't really differentiate between the two, as we can
7505 simply allocate stack without saving registers. */
7507 void
7508 alpha_expand_prologue (void)
7510 /* Registers to save. */
7511 unsigned long imask = 0;
7512 unsigned long fmask = 0;
7513 /* Stack space needed for pushing registers clobbered by us. */
7514 HOST_WIDE_INT sa_size;
7515 /* Complete stack size needed. */
7516 HOST_WIDE_INT frame_size;
7517 /* Offset from base reg to register save area. */
7518 HOST_WIDE_INT reg_offset;
7519 rtx sa_reg;
7520 int i;
7522 sa_size = alpha_sa_size ();
7524 frame_size = get_frame_size ();
7525 if (TARGET_ABI_OPEN_VMS)
7526 frame_size = ALPHA_ROUND (sa_size
7527 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7528 + frame_size
7529 + current_function_pretend_args_size);
7530 else if (TARGET_ABI_UNICOSMK)
7531 /* We have to allocate space for the DSIB if we generate a frame. */
7532 frame_size = ALPHA_ROUND (sa_size
7533 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7534 + ALPHA_ROUND (frame_size
7535 + current_function_outgoing_args_size);
7536 else
7537 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7538 + sa_size
7539 + ALPHA_ROUND (frame_size
7540 + current_function_pretend_args_size));
7542 if (TARGET_ABI_OPEN_VMS)
7543 reg_offset = 8;
7544 else
7545 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7547 alpha_sa_mask (&imask, &fmask);
7549 /* Emit an insn to reload GP, if needed. */
7550 if (TARGET_ABI_OSF)
7552 alpha_function_needs_gp = alpha_does_function_need_gp ();
7553 if (alpha_function_needs_gp)
7554 emit_insn (gen_prologue_ldgp ());
7557 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7558 the call to mcount ourselves, rather than having the linker do it
7559 magically in response to -pg. Since _mcount has special linkage,
7560 don't represent the call as a call. */
7561 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7562 emit_insn (gen_prologue_mcount ());
7564 if (TARGET_ABI_UNICOSMK)
7565 unicosmk_gen_dsib (&imask);
7567 /* Adjust the stack by the frame size. If the frame size is > 4096
7568 bytes, we need to be sure we probe somewhere in the first and last
7569 4096 bytes (we can probably get away without the latter test) and
7570 every 8192 bytes in between. If the frame size is > 32768, we
7571 do this in a loop. Otherwise, we generate the explicit probe
7572 instructions.
7574 Note that we are only allowed to adjust sp once in the prologue. */
7576 if (frame_size <= 32768)
7578 if (frame_size > 4096)
7580 int probed;
7582 for (probed = 4096; probed < frame_size; probed += 8192)
7583 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7584 ? -probed + 64
7585 : -probed)));
7587 /* We only have to do this probe if we aren't saving registers. */
7588 if (sa_size == 0 && frame_size > probed - 4096)
7589 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7592 if (frame_size != 0)
7593 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7594 GEN_INT (TARGET_ABI_UNICOSMK
7595 ? -frame_size + 64
7596 : -frame_size))));
7598 else
7600 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7601 number of 8192 byte blocks to probe. We then probe each block
7602 in the loop and then set SP to the proper location. If the
7603 amount remaining is > 4096, we have to do one more probe if we
7604 are not saving any registers. */
7606 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7607 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7608 rtx ptr = gen_rtx_REG (DImode, 22);
7609 rtx count = gen_rtx_REG (DImode, 23);
7610 rtx seq;
7612 emit_move_insn (count, GEN_INT (blocks));
7613 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7614 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7616 /* Because of the difficulty in emitting a new basic block this
7617 late in the compilation, generate the loop as a single insn. */
7618 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7620 if (leftover > 4096 && sa_size == 0)
7622 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7623 MEM_VOLATILE_P (last) = 1;
7624 emit_move_insn (last, const0_rtx);
7627 if (TARGET_ABI_WINDOWS_NT)
7629 /* For NT stack unwind (done by 'reverse execution'), it's
7630 not OK to take the result of a loop, even though the value
7631 is already in ptr, so we reload it via a single operation
7632 and subtract it to sp.
7634 Yes, that's correct -- we have to reload the whole constant
7635 into a temporary via ldah+lda then subtract from sp. */
7637 HOST_WIDE_INT lo, hi;
7638 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7639 hi = frame_size - lo;
7641 emit_move_insn (ptr, GEN_INT (hi));
7642 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7643 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7644 ptr));
7646 else
7648 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7649 GEN_INT (-leftover)));
7652 /* This alternative is special, because the DWARF code cannot
7653 possibly intuit through the loop above. So we invent this
7654 note it looks at instead. */
7655 RTX_FRAME_RELATED_P (seq) = 1;
7656 REG_NOTES (seq)
7657 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7658 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7659 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7660 GEN_INT (TARGET_ABI_UNICOSMK
7661 ? -frame_size + 64
7662 : -frame_size))),
7663 REG_NOTES (seq));
7666 if (!TARGET_ABI_UNICOSMK)
7668 HOST_WIDE_INT sa_bias = 0;
7670 /* Cope with very large offsets to the register save area. */
7671 sa_reg = stack_pointer_rtx;
7672 if (reg_offset + sa_size > 0x8000)
7674 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7675 rtx sa_bias_rtx;
7677 if (low + sa_size <= 0x8000)
7678 sa_bias = reg_offset - low, reg_offset = low;
7679 else
7680 sa_bias = reg_offset, reg_offset = 0;
7682 sa_reg = gen_rtx_REG (DImode, 24);
7683 sa_bias_rtx = GEN_INT (sa_bias);
7685 if (add_operand (sa_bias_rtx, DImode))
7686 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7687 else
7689 emit_move_insn (sa_reg, sa_bias_rtx);
7690 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7694 /* Save regs in stack order. Beginning with VMS PV. */
7695 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7696 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7698 /* Save register RA next. */
7699 if (imask & (1UL << REG_RA))
7701 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7702 imask &= ~(1UL << REG_RA);
7703 reg_offset += 8;
7706 /* Now save any other registers required to be saved. */
7707 for (i = 0; i < 31; i++)
7708 if (imask & (1UL << i))
7710 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7711 reg_offset += 8;
7714 for (i = 0; i < 31; i++)
7715 if (fmask & (1UL << i))
7717 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7718 reg_offset += 8;
7721 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7723 /* The standard frame on the T3E includes space for saving registers.
7724 We just have to use it. We don't have to save the return address and
7725 the old frame pointer here - they are saved in the DSIB. */
7727 reg_offset = -56;
7728 for (i = 9; i < 15; i++)
7729 if (imask & (1UL << i))
7731 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7732 reg_offset -= 8;
7734 for (i = 2; i < 10; i++)
7735 if (fmask & (1UL << i))
7737 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7738 reg_offset -= 8;
7742 if (TARGET_ABI_OPEN_VMS)
7744 if (alpha_procedure_type == PT_REGISTER)
7745 /* Register frame procedures save the fp.
7746 ?? Ought to have a dwarf2 save for this. */
7747 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7748 hard_frame_pointer_rtx);
7750 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7751 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7752 gen_rtx_REG (DImode, REG_PV)));
7754 if (alpha_procedure_type != PT_NULL
7755 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7756 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7758 /* If we have to allocate space for outgoing args, do it now. */
7759 if (current_function_outgoing_args_size != 0)
7761 rtx seq
7762 = emit_move_insn (stack_pointer_rtx,
7763 plus_constant
7764 (hard_frame_pointer_rtx,
7765 - (ALPHA_ROUND
7766 (current_function_outgoing_args_size))));
7768 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7769 if ! frame_pointer_needed. Setting the bit will change the CFA
7770 computation rule to use sp again, which would be wrong if we had
7771 frame_pointer_needed, as this means sp might move unpredictably
7772 later on.
7774 Also, note that
7775 frame_pointer_needed
7776 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7778 current_function_outgoing_args_size != 0
7779 => alpha_procedure_type != PT_NULL,
7781 so when we are not setting the bit here, we are guaranteed to
7782 have emitted an FRP frame pointer update just before. */
7783 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7786 else if (!TARGET_ABI_UNICOSMK)
7788 /* If we need a frame pointer, set it from the stack pointer. */
7789 if (frame_pointer_needed)
7791 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7792 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7793 else
7794 /* This must always be the last instruction in the
7795 prologue, thus we emit a special move + clobber. */
7796 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7797 stack_pointer_rtx, sa_reg)));
7801 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7802 the prologue, for exception handling reasons, we cannot do this for
7803 any insn that might fault. We could prevent this for mems with a
7804 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7805 have to prevent all such scheduling with a blockage.
7807 Linux, on the other hand, never bothered to implement OSF/1's
7808 exception handling, and so doesn't care about such things. Anyone
7809 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7811 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7812 emit_insn (gen_blockage ());
7815 /* Count the number of .file directives, so that .loc is up to date. */
7816 int num_source_filenames = 0;
7818 /* Output the textual info surrounding the prologue. */
7820 void
7821 alpha_start_function (FILE *file, const char *fnname,
7822 tree decl ATTRIBUTE_UNUSED)
7824 unsigned long imask = 0;
7825 unsigned long fmask = 0;
7826 /* Stack space needed for pushing registers clobbered by us. */
7827 HOST_WIDE_INT sa_size;
7828 /* Complete stack size needed. */
7829 unsigned HOST_WIDE_INT frame_size;
7830 /* Offset from base reg to register save area. */
7831 HOST_WIDE_INT reg_offset;
7832 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7833 int i;
7835 /* Don't emit an extern directive for functions defined in the same file. */
7836 if (TARGET_ABI_UNICOSMK)
7838 tree name_tree;
7839 name_tree = get_identifier (fnname);
7840 TREE_ASM_WRITTEN (name_tree) = 1;
7843 alpha_fnname = fnname;
7844 sa_size = alpha_sa_size ();
7846 frame_size = get_frame_size ();
7847 if (TARGET_ABI_OPEN_VMS)
7848 frame_size = ALPHA_ROUND (sa_size
7849 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7850 + frame_size
7851 + current_function_pretend_args_size);
7852 else if (TARGET_ABI_UNICOSMK)
7853 frame_size = ALPHA_ROUND (sa_size
7854 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7855 + ALPHA_ROUND (frame_size
7856 + current_function_outgoing_args_size);
7857 else
7858 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7859 + sa_size
7860 + ALPHA_ROUND (frame_size
7861 + current_function_pretend_args_size));
7863 if (TARGET_ABI_OPEN_VMS)
7864 reg_offset = 8;
7865 else
7866 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7868 alpha_sa_mask (&imask, &fmask);
7870 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7871 We have to do that before the .ent directive as we cannot switch
7872 files within procedures with native ecoff because line numbers are
7873 linked to procedure descriptors.
7874 Outputting the lineno helps debugging of one line functions as they
7875 would otherwise get no line number at all. Please note that we would
7876 like to put out last_linenum from final.c, but it is not accessible. */
7878 if (write_symbols == SDB_DEBUG)
7880 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7881 ASM_OUTPUT_SOURCE_FILENAME (file,
7882 DECL_SOURCE_FILE (current_function_decl));
7883 #endif
7884 #ifdef SDB_OUTPUT_SOURCE_LINE
7885 if (debug_info_level != DINFO_LEVEL_TERSE)
7886 SDB_OUTPUT_SOURCE_LINE (file,
7887 DECL_SOURCE_LINE (current_function_decl));
7888 #endif
7891 /* Issue function start and label. */
7892 if (TARGET_ABI_OPEN_VMS
7893 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7895 fputs ("\t.ent ", file);
7896 assemble_name (file, fnname);
7897 putc ('\n', file);
7899 /* If the function needs GP, we'll write the "..ng" label there.
7900 Otherwise, do it here. */
7901 if (TARGET_ABI_OSF
7902 && ! alpha_function_needs_gp
7903 && ! current_function_is_thunk)
7905 putc ('$', file);
7906 assemble_name (file, fnname);
7907 fputs ("..ng:\n", file);
7911 strcpy (entry_label, fnname);
7912 if (TARGET_ABI_OPEN_VMS)
7913 strcat (entry_label, "..en");
7915 /* For public functions, the label must be globalized by appending an
7916 additional colon. */
7917 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7918 strcat (entry_label, ":");
7920 ASM_OUTPUT_LABEL (file, entry_label);
7921 inside_function = TRUE;
7923 if (TARGET_ABI_OPEN_VMS)
7924 fprintf (file, "\t.base $%d\n", vms_base_regno);
7926 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7927 && !flag_inhibit_size_directive)
7929 /* Set flags in procedure descriptor to request IEEE-conformant
7930 math-library routines. The value we set it to is PDSC_EXC_IEEE
7931 (/usr/include/pdsc.h). */
7932 fputs ("\t.eflag 48\n", file);
7935 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7936 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7937 alpha_arg_offset = -frame_size + 48;
7939 /* Describe our frame. If the frame size is larger than an integer,
7940 print it as zero to avoid an assembler error. We won't be
7941 properly describing such a frame, but that's the best we can do. */
7942 if (TARGET_ABI_UNICOSMK)
7944 else if (TARGET_ABI_OPEN_VMS)
7945 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7946 HOST_WIDE_INT_PRINT_DEC "\n",
7947 vms_unwind_regno,
7948 frame_size >= (1UL << 31) ? 0 : frame_size,
7949 reg_offset);
7950 else if (!flag_inhibit_size_directive)
7951 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7952 (frame_pointer_needed
7953 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7954 frame_size >= (1UL << 31) ? 0 : frame_size,
7955 current_function_pretend_args_size);
7957 /* Describe which registers were spilled. */
7958 if (TARGET_ABI_UNICOSMK)
7960 else if (TARGET_ABI_OPEN_VMS)
7962 if (imask)
7963 /* ??? Does VMS care if mask contains ra? The old code didn't
7964 set it, so I don't here. */
7965 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7966 if (fmask)
7967 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7968 if (alpha_procedure_type == PT_REGISTER)
7969 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7971 else if (!flag_inhibit_size_directive)
7973 if (imask)
7975 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7976 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7978 for (i = 0; i < 32; ++i)
7979 if (imask & (1UL << i))
7980 reg_offset += 8;
7983 if (fmask)
7984 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7985 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7988 #if TARGET_ABI_OPEN_VMS
7989 /* Ifdef'ed cause link_section are only available then. */
7990 switch_to_section (readonly_data_section);
7991 fprintf (file, "\t.align 3\n");
7992 assemble_name (file, fnname); fputs ("..na:\n", file);
7993 fputs ("\t.ascii \"", file);
7994 assemble_name (file, fnname);
7995 fputs ("\\0\"\n", file);
7996 alpha_need_linkage (fnname, 1);
7997 switch_to_section (text_section);
7998 #endif
8001 /* Emit the .prologue note at the scheduled end of the prologue. */
8003 static void
8004 alpha_output_function_end_prologue (FILE *file)
8006 if (TARGET_ABI_UNICOSMK)
8008 else if (TARGET_ABI_OPEN_VMS)
8009 fputs ("\t.prologue\n", file);
8010 else if (TARGET_ABI_WINDOWS_NT)
8011 fputs ("\t.prologue 0\n", file);
8012 else if (!flag_inhibit_size_directive)
8013 fprintf (file, "\t.prologue %d\n",
8014 alpha_function_needs_gp || current_function_is_thunk);
8017 /* Write function epilogue. */
8019 /* ??? At some point we will want to support full unwind, and so will
8020 need to mark the epilogue as well. At the moment, we just confuse
8021 dwarf2out. */
8022 #undef FRP
8023 #define FRP(exp) exp
8025 void
8026 alpha_expand_epilogue (void)
8028 /* Registers to save. */
8029 unsigned long imask = 0;
8030 unsigned long fmask = 0;
8031 /* Stack space needed for pushing registers clobbered by us. */
8032 HOST_WIDE_INT sa_size;
8033 /* Complete stack size needed. */
8034 HOST_WIDE_INT frame_size;
8035 /* Offset from base reg to register save area. */
8036 HOST_WIDE_INT reg_offset;
8037 int fp_is_frame_pointer, fp_offset;
8038 rtx sa_reg, sa_reg_exp = NULL;
8039 rtx sp_adj1, sp_adj2, mem;
8040 rtx eh_ofs;
8041 int i;
8043 sa_size = alpha_sa_size ();
8045 frame_size = get_frame_size ();
8046 if (TARGET_ABI_OPEN_VMS)
8047 frame_size = ALPHA_ROUND (sa_size
8048 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8049 + frame_size
8050 + current_function_pretend_args_size);
8051 else if (TARGET_ABI_UNICOSMK)
8052 frame_size = ALPHA_ROUND (sa_size
8053 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8054 + ALPHA_ROUND (frame_size
8055 + current_function_outgoing_args_size);
8056 else
8057 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8058 + sa_size
8059 + ALPHA_ROUND (frame_size
8060 + current_function_pretend_args_size));
8062 if (TARGET_ABI_OPEN_VMS)
8064 if (alpha_procedure_type == PT_STACK)
8065 reg_offset = 8;
8066 else
8067 reg_offset = 0;
8069 else
8070 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8072 alpha_sa_mask (&imask, &fmask);
8074 fp_is_frame_pointer
8075 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8076 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8077 fp_offset = 0;
8078 sa_reg = stack_pointer_rtx;
8080 if (current_function_calls_eh_return)
8081 eh_ofs = EH_RETURN_STACKADJ_RTX;
8082 else
8083 eh_ofs = NULL_RTX;
8085 if (!TARGET_ABI_UNICOSMK && sa_size)
8087 /* If we have a frame pointer, restore SP from it. */
8088 if ((TARGET_ABI_OPEN_VMS
8089 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8090 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8091 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8093 /* Cope with very large offsets to the register save area. */
8094 if (reg_offset + sa_size > 0x8000)
8096 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8097 HOST_WIDE_INT bias;
8099 if (low + sa_size <= 0x8000)
8100 bias = reg_offset - low, reg_offset = low;
8101 else
8102 bias = reg_offset, reg_offset = 0;
8104 sa_reg = gen_rtx_REG (DImode, 22);
8105 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8107 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8110 /* Restore registers in order, excepting a true frame pointer. */
8112 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8113 if (! eh_ofs)
8114 set_mem_alias_set (mem, alpha_sr_alias_set);
8115 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8117 reg_offset += 8;
8118 imask &= ~(1UL << REG_RA);
8120 for (i = 0; i < 31; ++i)
8121 if (imask & (1UL << i))
8123 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8124 fp_offset = reg_offset;
8125 else
8127 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8128 set_mem_alias_set (mem, alpha_sr_alias_set);
8129 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8131 reg_offset += 8;
8134 for (i = 0; i < 31; ++i)
8135 if (fmask & (1UL << i))
8137 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8138 set_mem_alias_set (mem, alpha_sr_alias_set);
8139 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8140 reg_offset += 8;
8143 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8145 /* Restore callee-saved general-purpose registers. */
8147 reg_offset = -56;
8149 for (i = 9; i < 15; i++)
8150 if (imask & (1UL << i))
8152 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8153 reg_offset));
8154 set_mem_alias_set (mem, alpha_sr_alias_set);
8155 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8156 reg_offset -= 8;
8159 for (i = 2; i < 10; i++)
8160 if (fmask & (1UL << i))
8162 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8163 reg_offset));
8164 set_mem_alias_set (mem, alpha_sr_alias_set);
8165 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8166 reg_offset -= 8;
8169 /* Restore the return address from the DSIB. */
8171 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8172 set_mem_alias_set (mem, alpha_sr_alias_set);
8173 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8176 if (frame_size || eh_ofs)
8178 sp_adj1 = stack_pointer_rtx;
8180 if (eh_ofs)
8182 sp_adj1 = gen_rtx_REG (DImode, 23);
8183 emit_move_insn (sp_adj1,
8184 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8187 /* If the stack size is large, begin computation into a temporary
8188 register so as not to interfere with a potential fp restore,
8189 which must be consecutive with an SP restore. */
8190 if (frame_size < 32768
8191 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8192 sp_adj2 = GEN_INT (frame_size);
8193 else if (TARGET_ABI_UNICOSMK)
8195 sp_adj1 = gen_rtx_REG (DImode, 23);
8196 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8197 sp_adj2 = const0_rtx;
8199 else if (frame_size < 0x40007fffL)
8201 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8203 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8204 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8205 sp_adj1 = sa_reg;
8206 else
8208 sp_adj1 = gen_rtx_REG (DImode, 23);
8209 FRP (emit_move_insn (sp_adj1, sp_adj2));
8211 sp_adj2 = GEN_INT (low);
8213 else
8215 rtx tmp = gen_rtx_REG (DImode, 23);
8216 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8217 3, false));
8218 if (!sp_adj2)
8220 /* We can't drop new things to memory this late, afaik,
8221 so build it up by pieces. */
8222 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8223 -(frame_size < 0)));
8224 gcc_assert (sp_adj2);
8228 /* From now on, things must be in order. So emit blockages. */
8230 /* Restore the frame pointer. */
8231 if (TARGET_ABI_UNICOSMK)
8233 emit_insn (gen_blockage ());
8234 mem = gen_rtx_MEM (DImode,
8235 plus_constant (hard_frame_pointer_rtx, -16));
8236 set_mem_alias_set (mem, alpha_sr_alias_set);
8237 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8239 else if (fp_is_frame_pointer)
8241 emit_insn (gen_blockage ());
8242 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8243 set_mem_alias_set (mem, alpha_sr_alias_set);
8244 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8246 else if (TARGET_ABI_OPEN_VMS)
8248 emit_insn (gen_blockage ());
8249 FRP (emit_move_insn (hard_frame_pointer_rtx,
8250 gen_rtx_REG (DImode, vms_save_fp_regno)));
8253 /* Restore the stack pointer. */
8254 emit_insn (gen_blockage ());
8255 if (sp_adj2 == const0_rtx)
8256 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8257 else
8258 FRP (emit_move_insn (stack_pointer_rtx,
8259 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8261 else
8263 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8265 emit_insn (gen_blockage ());
8266 FRP (emit_move_insn (hard_frame_pointer_rtx,
8267 gen_rtx_REG (DImode, vms_save_fp_regno)));
8269 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8271 /* Decrement the frame pointer if the function does not have a
8272 frame. */
8274 emit_insn (gen_blockage ());
8275 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8276 hard_frame_pointer_rtx, constm1_rtx)));
8281 /* Output the rest of the textual info surrounding the epilogue. */
8283 void
8284 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8286 #if TARGET_ABI_OPEN_VMS
8287 alpha_write_linkage (file, fnname, decl);
8288 #endif
8290 /* End the function. */
8291 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8293 fputs ("\t.end ", file);
8294 assemble_name (file, fnname);
8295 putc ('\n', file);
8297 inside_function = FALSE;
8299 /* Output jump tables and the static subroutine information block. */
8300 if (TARGET_ABI_UNICOSMK)
8302 unicosmk_output_ssib (file, fnname);
8303 unicosmk_output_deferred_case_vectors (file);
8307 #if TARGET_ABI_OSF
8308 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8310 In order to avoid the hordes of differences between generated code
8311 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8312 lots of code loading up large constants, generate rtl and emit it
8313 instead of going straight to text.
8315 Not sure why this idea hasn't been explored before... */
8317 static void
8318 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8319 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8320 tree function)
8322 HOST_WIDE_INT hi, lo;
8323 rtx this, insn, funexp;
8325 reset_block_changes ();
8327 /* We always require a valid GP. */
8328 emit_insn (gen_prologue_ldgp ());
8329 emit_note (NOTE_INSN_PROLOGUE_END);
8331 /* Find the "this" pointer. If the function returns a structure,
8332 the structure return pointer is in $16. */
8333 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8334 this = gen_rtx_REG (Pmode, 17);
8335 else
8336 this = gen_rtx_REG (Pmode, 16);
8338 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8339 entire constant for the add. */
8340 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8341 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8342 if (hi + lo == delta)
8344 if (hi)
8345 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8346 if (lo)
8347 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8349 else
8351 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8352 delta, -(delta < 0));
8353 emit_insn (gen_adddi3 (this, this, tmp));
8356 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8357 if (vcall_offset)
8359 rtx tmp, tmp2;
8361 tmp = gen_rtx_REG (Pmode, 0);
8362 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8364 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8365 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8366 if (hi + lo == vcall_offset)
8368 if (hi)
8369 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8371 else
8373 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8374 vcall_offset, -(vcall_offset < 0));
8375 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8376 lo = 0;
8378 if (lo)
8379 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8380 else
8381 tmp2 = tmp;
8382 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8384 emit_insn (gen_adddi3 (this, this, tmp));
8387 /* Generate a tail call to the target function. */
8388 if (! TREE_USED (function))
8390 assemble_external (function);
8391 TREE_USED (function) = 1;
8393 funexp = XEXP (DECL_RTL (function), 0);
8394 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8395 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8396 SIBLING_CALL_P (insn) = 1;
8398 /* Run just enough of rest_of_compilation to get the insns emitted.
8399 There's not really enough bulk here to make other passes such as
8400 instruction scheduling worth while. Note that use_thunk calls
8401 assemble_start_function and assemble_end_function. */
8402 insn = get_insns ();
8403 insn_locators_initialize ();
8404 shorten_branches (insn);
8405 final_start_function (insn, file, 1);
8406 final (insn, file, 1);
8407 final_end_function ();
8409 #endif /* TARGET_ABI_OSF */
8411 /* Debugging support. */
8413 #include "gstab.h"
8415 /* Count the number of sdb related labels are generated (to find block
8416 start and end boundaries). */
8418 int sdb_label_count = 0;
8420 /* Name of the file containing the current function. */
8422 static const char *current_function_file = "";
8424 /* Offsets to alpha virtual arg/local debugging pointers. */
8426 long alpha_arg_offset;
8427 long alpha_auto_offset;
8429 /* Emit a new filename to a stream. */
8431 void
8432 alpha_output_filename (FILE *stream, const char *name)
8434 static int first_time = TRUE;
8436 if (first_time)
8438 first_time = FALSE;
8439 ++num_source_filenames;
8440 current_function_file = name;
8441 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8442 output_quoted_string (stream, name);
8443 fprintf (stream, "\n");
8444 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8445 fprintf (stream, "\t#@stabs\n");
8448 else if (write_symbols == DBX_DEBUG)
8449 /* dbxout.c will emit an appropriate .stabs directive. */
8450 return;
8452 else if (name != current_function_file
8453 && strcmp (name, current_function_file) != 0)
8455 if (inside_function && ! TARGET_GAS)
8456 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8457 else
8459 ++num_source_filenames;
8460 current_function_file = name;
8461 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8464 output_quoted_string (stream, name);
8465 fprintf (stream, "\n");
8469 /* Structure to show the current status of registers and memory. */
8471 struct shadow_summary
8473 struct {
8474 unsigned int i : 31; /* Mask of int regs */
8475 unsigned int fp : 31; /* Mask of fp regs */
8476 unsigned int mem : 1; /* mem == imem | fpmem */
8477 } used, defd;
8480 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8481 to the summary structure. SET is nonzero if the insn is setting the
8482 object, otherwise zero. */
8484 static void
8485 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8487 const char *format_ptr;
8488 int i, j;
8490 if (x == 0)
8491 return;
8493 switch (GET_CODE (x))
8495 /* ??? Note that this case would be incorrect if the Alpha had a
8496 ZERO_EXTRACT in SET_DEST. */
8497 case SET:
8498 summarize_insn (SET_SRC (x), sum, 0);
8499 summarize_insn (SET_DEST (x), sum, 1);
8500 break;
8502 case CLOBBER:
8503 summarize_insn (XEXP (x, 0), sum, 1);
8504 break;
8506 case USE:
8507 summarize_insn (XEXP (x, 0), sum, 0);
8508 break;
8510 case ASM_OPERANDS:
8511 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8512 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8513 break;
8515 case PARALLEL:
8516 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8517 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8518 break;
8520 case SUBREG:
8521 summarize_insn (SUBREG_REG (x), sum, 0);
8522 break;
8524 case REG:
8526 int regno = REGNO (x);
8527 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8529 if (regno == 31 || regno == 63)
8530 break;
8532 if (set)
8534 if (regno < 32)
8535 sum->defd.i |= mask;
8536 else
8537 sum->defd.fp |= mask;
8539 else
8541 if (regno < 32)
8542 sum->used.i |= mask;
8543 else
8544 sum->used.fp |= mask;
8547 break;
8549 case MEM:
8550 if (set)
8551 sum->defd.mem = 1;
8552 else
8553 sum->used.mem = 1;
8555 /* Find the regs used in memory address computation: */
8556 summarize_insn (XEXP (x, 0), sum, 0);
8557 break;
8559 case CONST_INT: case CONST_DOUBLE:
8560 case SYMBOL_REF: case LABEL_REF: case CONST:
8561 case SCRATCH: case ASM_INPUT:
8562 break;
8564 /* Handle common unary and binary ops for efficiency. */
8565 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8566 case MOD: case UDIV: case UMOD: case AND: case IOR:
8567 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8568 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8569 case NE: case EQ: case GE: case GT: case LE:
8570 case LT: case GEU: case GTU: case LEU: case LTU:
8571 summarize_insn (XEXP (x, 0), sum, 0);
8572 summarize_insn (XEXP (x, 1), sum, 0);
8573 break;
8575 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8576 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8577 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8578 case SQRT: case FFS:
8579 summarize_insn (XEXP (x, 0), sum, 0);
8580 break;
8582 default:
8583 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8584 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8585 switch (format_ptr[i])
8587 case 'e':
8588 summarize_insn (XEXP (x, i), sum, 0);
8589 break;
8591 case 'E':
8592 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8593 summarize_insn (XVECEXP (x, i, j), sum, 0);
8594 break;
8596 case 'i':
8597 break;
8599 default:
8600 gcc_unreachable ();
8605 /* Ensure a sufficient number of `trapb' insns are in the code when
8606 the user requests code with a trap precision of functions or
8607 instructions.
8609 In naive mode, when the user requests a trap-precision of
8610 "instruction", a trapb is needed after every instruction that may
8611 generate a trap. This ensures that the code is resumption safe but
8612 it is also slow.
8614 When optimizations are turned on, we delay issuing a trapb as long
8615 as possible. In this context, a trap shadow is the sequence of
8616 instructions that starts with a (potentially) trap generating
8617 instruction and extends to the next trapb or call_pal instruction
8618 (but GCC never generates call_pal by itself). We can delay (and
8619 therefore sometimes omit) a trapb subject to the following
8620 conditions:
8622 (a) On entry to the trap shadow, if any Alpha register or memory
8623 location contains a value that is used as an operand value by some
8624 instruction in the trap shadow (live on entry), then no instruction
8625 in the trap shadow may modify the register or memory location.
8627 (b) Within the trap shadow, the computation of the base register
8628 for a memory load or store instruction may not involve using the
8629 result of an instruction that might generate an UNPREDICTABLE
8630 result.
8632 (c) Within the trap shadow, no register may be used more than once
8633 as a destination register. (This is to make life easier for the
8634 trap-handler.)
8636 (d) The trap shadow may not include any branch instructions. */
8638 static void
8639 alpha_handle_trap_shadows (void)
8641 struct shadow_summary shadow;
8642 int trap_pending, exception_nesting;
8643 rtx i, n;
8645 trap_pending = 0;
8646 exception_nesting = 0;
8647 shadow.used.i = 0;
8648 shadow.used.fp = 0;
8649 shadow.used.mem = 0;
8650 shadow.defd = shadow.used;
8652 for (i = get_insns (); i ; i = NEXT_INSN (i))
8654 if (GET_CODE (i) == NOTE)
8656 switch (NOTE_LINE_NUMBER (i))
8658 case NOTE_INSN_EH_REGION_BEG:
8659 exception_nesting++;
8660 if (trap_pending)
8661 goto close_shadow;
8662 break;
8664 case NOTE_INSN_EH_REGION_END:
8665 exception_nesting--;
8666 if (trap_pending)
8667 goto close_shadow;
8668 break;
8670 case NOTE_INSN_EPILOGUE_BEG:
8671 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8672 goto close_shadow;
8673 break;
8676 else if (trap_pending)
8678 if (alpha_tp == ALPHA_TP_FUNC)
8680 if (GET_CODE (i) == JUMP_INSN
8681 && GET_CODE (PATTERN (i)) == RETURN)
8682 goto close_shadow;
8684 else if (alpha_tp == ALPHA_TP_INSN)
8686 if (optimize > 0)
8688 struct shadow_summary sum;
8690 sum.used.i = 0;
8691 sum.used.fp = 0;
8692 sum.used.mem = 0;
8693 sum.defd = sum.used;
8695 switch (GET_CODE (i))
8697 case INSN:
8698 /* Annoyingly, get_attr_trap will die on these. */
8699 if (GET_CODE (PATTERN (i)) == USE
8700 || GET_CODE (PATTERN (i)) == CLOBBER)
8701 break;
8703 summarize_insn (PATTERN (i), &sum, 0);
8705 if ((sum.defd.i & shadow.defd.i)
8706 || (sum.defd.fp & shadow.defd.fp))
8708 /* (c) would be violated */
8709 goto close_shadow;
8712 /* Combine shadow with summary of current insn: */
8713 shadow.used.i |= sum.used.i;
8714 shadow.used.fp |= sum.used.fp;
8715 shadow.used.mem |= sum.used.mem;
8716 shadow.defd.i |= sum.defd.i;
8717 shadow.defd.fp |= sum.defd.fp;
8718 shadow.defd.mem |= sum.defd.mem;
8720 if ((sum.defd.i & shadow.used.i)
8721 || (sum.defd.fp & shadow.used.fp)
8722 || (sum.defd.mem & shadow.used.mem))
8724 /* (a) would be violated (also takes care of (b)) */
8725 gcc_assert (get_attr_trap (i) != TRAP_YES
8726 || (!(sum.defd.i & sum.used.i)
8727 && !(sum.defd.fp & sum.used.fp)));
8729 goto close_shadow;
8731 break;
8733 case JUMP_INSN:
8734 case CALL_INSN:
8735 case CODE_LABEL:
8736 goto close_shadow;
8738 default:
8739 gcc_unreachable ();
8742 else
8744 close_shadow:
8745 n = emit_insn_before (gen_trapb (), i);
8746 PUT_MODE (n, TImode);
8747 PUT_MODE (i, TImode);
8748 trap_pending = 0;
8749 shadow.used.i = 0;
8750 shadow.used.fp = 0;
8751 shadow.used.mem = 0;
8752 shadow.defd = shadow.used;
8757 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8758 && GET_CODE (i) == INSN
8759 && GET_CODE (PATTERN (i)) != USE
8760 && GET_CODE (PATTERN (i)) != CLOBBER
8761 && get_attr_trap (i) == TRAP_YES)
8763 if (optimize && !trap_pending)
8764 summarize_insn (PATTERN (i), &shadow, 0);
8765 trap_pending = 1;
8770 /* Alpha can only issue instruction groups simultaneously if they are
8771 suitably aligned. This is very processor-specific. */
8772 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8773 that are marked "fake". These instructions do not exist on that target,
8774 but it is possible to see these insns with deranged combinations of
8775 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8776 choose a result at random. */
8778 enum alphaev4_pipe {
8779 EV4_STOP = 0,
8780 EV4_IB0 = 1,
8781 EV4_IB1 = 2,
8782 EV4_IBX = 4
8785 enum alphaev5_pipe {
8786 EV5_STOP = 0,
8787 EV5_NONE = 1,
8788 EV5_E01 = 2,
8789 EV5_E0 = 4,
8790 EV5_E1 = 8,
8791 EV5_FAM = 16,
8792 EV5_FA = 32,
8793 EV5_FM = 64
8796 static enum alphaev4_pipe
8797 alphaev4_insn_pipe (rtx insn)
8799 if (recog_memoized (insn) < 0)
8800 return EV4_STOP;
8801 if (get_attr_length (insn) != 4)
8802 return EV4_STOP;
8804 switch (get_attr_type (insn))
8806 case TYPE_ILD:
8807 case TYPE_LDSYM:
8808 case TYPE_FLD:
8809 case TYPE_LD_L:
8810 return EV4_IBX;
8812 case TYPE_IADD:
8813 case TYPE_ILOG:
8814 case TYPE_ICMOV:
8815 case TYPE_ICMP:
8816 case TYPE_FST:
8817 case TYPE_SHIFT:
8818 case TYPE_IMUL:
8819 case TYPE_FBR:
8820 case TYPE_MVI: /* fake */
8821 return EV4_IB0;
8823 case TYPE_IST:
8824 case TYPE_MISC:
8825 case TYPE_IBR:
8826 case TYPE_JSR:
8827 case TYPE_CALLPAL:
8828 case TYPE_FCPYS:
8829 case TYPE_FCMOV:
8830 case TYPE_FADD:
8831 case TYPE_FDIV:
8832 case TYPE_FMUL:
8833 case TYPE_ST_C:
8834 case TYPE_MB:
8835 case TYPE_FSQRT: /* fake */
8836 case TYPE_FTOI: /* fake */
8837 case TYPE_ITOF: /* fake */
8838 return EV4_IB1;
8840 default:
8841 gcc_unreachable ();
8845 static enum alphaev5_pipe
8846 alphaev5_insn_pipe (rtx insn)
8848 if (recog_memoized (insn) < 0)
8849 return EV5_STOP;
8850 if (get_attr_length (insn) != 4)
8851 return EV5_STOP;
8853 switch (get_attr_type (insn))
8855 case TYPE_ILD:
8856 case TYPE_FLD:
8857 case TYPE_LDSYM:
8858 case TYPE_IADD:
8859 case TYPE_ILOG:
8860 case TYPE_ICMOV:
8861 case TYPE_ICMP:
8862 return EV5_E01;
8864 case TYPE_IST:
8865 case TYPE_FST:
8866 case TYPE_SHIFT:
8867 case TYPE_IMUL:
8868 case TYPE_MISC:
8869 case TYPE_MVI:
8870 case TYPE_LD_L:
8871 case TYPE_ST_C:
8872 case TYPE_MB:
8873 case TYPE_FTOI: /* fake */
8874 case TYPE_ITOF: /* fake */
8875 return EV5_E0;
8877 case TYPE_IBR:
8878 case TYPE_JSR:
8879 case TYPE_CALLPAL:
8880 return EV5_E1;
8882 case TYPE_FCPYS:
8883 return EV5_FAM;
8885 case TYPE_FBR:
8886 case TYPE_FCMOV:
8887 case TYPE_FADD:
8888 case TYPE_FDIV:
8889 case TYPE_FSQRT: /* fake */
8890 return EV5_FA;
8892 case TYPE_FMUL:
8893 return EV5_FM;
8895 default:
8896 gcc_unreachable ();
8900 /* IN_USE is a mask of the slots currently filled within the insn group.
8901 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8902 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8904 LEN is, of course, the length of the group in bytes. */
8906 static rtx
8907 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8909 int len, in_use;
8911 len = in_use = 0;
8913 if (! INSN_P (insn)
8914 || GET_CODE (PATTERN (insn)) == CLOBBER
8915 || GET_CODE (PATTERN (insn)) == USE)
8916 goto next_and_done;
8918 while (1)
8920 enum alphaev4_pipe pipe;
8922 pipe = alphaev4_insn_pipe (insn);
8923 switch (pipe)
8925 case EV4_STOP:
8926 /* Force complex instructions to start new groups. */
8927 if (in_use)
8928 goto done;
8930 /* If this is a completely unrecognized insn, it's an asm.
8931 We don't know how long it is, so record length as -1 to
8932 signal a needed realignment. */
8933 if (recog_memoized (insn) < 0)
8934 len = -1;
8935 else
8936 len = get_attr_length (insn);
8937 goto next_and_done;
8939 case EV4_IBX:
8940 if (in_use & EV4_IB0)
8942 if (in_use & EV4_IB1)
8943 goto done;
8944 in_use |= EV4_IB1;
8946 else
8947 in_use |= EV4_IB0 | EV4_IBX;
8948 break;
8950 case EV4_IB0:
8951 if (in_use & EV4_IB0)
8953 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8954 goto done;
8955 in_use |= EV4_IB1;
8957 in_use |= EV4_IB0;
8958 break;
8960 case EV4_IB1:
8961 if (in_use & EV4_IB1)
8962 goto done;
8963 in_use |= EV4_IB1;
8964 break;
8966 default:
8967 gcc_unreachable ();
8969 len += 4;
8971 /* Haifa doesn't do well scheduling branches. */
8972 if (GET_CODE (insn) == JUMP_INSN)
8973 goto next_and_done;
8975 next:
8976 insn = next_nonnote_insn (insn);
8978 if (!insn || ! INSN_P (insn))
8979 goto done;
8981 /* Let Haifa tell us where it thinks insn group boundaries are. */
8982 if (GET_MODE (insn) == TImode)
8983 goto done;
8985 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8986 goto next;
8989 next_and_done:
8990 insn = next_nonnote_insn (insn);
8992 done:
8993 *plen = len;
8994 *pin_use = in_use;
8995 return insn;
8998 /* IN_USE is a mask of the slots currently filled within the insn group.
8999 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9000 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9002 LEN is, of course, the length of the group in bytes. */
9004 static rtx
9005 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9007 int len, in_use;
9009 len = in_use = 0;
9011 if (! INSN_P (insn)
9012 || GET_CODE (PATTERN (insn)) == CLOBBER
9013 || GET_CODE (PATTERN (insn)) == USE)
9014 goto next_and_done;
9016 while (1)
9018 enum alphaev5_pipe pipe;
9020 pipe = alphaev5_insn_pipe (insn);
9021 switch (pipe)
9023 case EV5_STOP:
9024 /* Force complex instructions to start new groups. */
9025 if (in_use)
9026 goto done;
9028 /* If this is a completely unrecognized insn, it's an asm.
9029 We don't know how long it is, so record length as -1 to
9030 signal a needed realignment. */
9031 if (recog_memoized (insn) < 0)
9032 len = -1;
9033 else
9034 len = get_attr_length (insn);
9035 goto next_and_done;
9037 /* ??? Most of the places below, we would like to assert never
9038 happen, as it would indicate an error either in Haifa, or
9039 in the scheduling description. Unfortunately, Haifa never
9040 schedules the last instruction of the BB, so we don't have
9041 an accurate TI bit to go off. */
9042 case EV5_E01:
9043 if (in_use & EV5_E0)
9045 if (in_use & EV5_E1)
9046 goto done;
9047 in_use |= EV5_E1;
9049 else
9050 in_use |= EV5_E0 | EV5_E01;
9051 break;
9053 case EV5_E0:
9054 if (in_use & EV5_E0)
9056 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9057 goto done;
9058 in_use |= EV5_E1;
9060 in_use |= EV5_E0;
9061 break;
9063 case EV5_E1:
9064 if (in_use & EV5_E1)
9065 goto done;
9066 in_use |= EV5_E1;
9067 break;
9069 case EV5_FAM:
9070 if (in_use & EV5_FA)
9072 if (in_use & EV5_FM)
9073 goto done;
9074 in_use |= EV5_FM;
9076 else
9077 in_use |= EV5_FA | EV5_FAM;
9078 break;
9080 case EV5_FA:
9081 if (in_use & EV5_FA)
9082 goto done;
9083 in_use |= EV5_FA;
9084 break;
9086 case EV5_FM:
9087 if (in_use & EV5_FM)
9088 goto done;
9089 in_use |= EV5_FM;
9090 break;
9092 case EV5_NONE:
9093 break;
9095 default:
9096 gcc_unreachable ();
9098 len += 4;
9100 /* Haifa doesn't do well scheduling branches. */
9101 /* ??? If this is predicted not-taken, slotting continues, except
9102 that no more IBR, FBR, or JSR insns may be slotted. */
9103 if (GET_CODE (insn) == JUMP_INSN)
9104 goto next_and_done;
9106 next:
9107 insn = next_nonnote_insn (insn);
9109 if (!insn || ! INSN_P (insn))
9110 goto done;
9112 /* Let Haifa tell us where it thinks insn group boundaries are. */
9113 if (GET_MODE (insn) == TImode)
9114 goto done;
9116 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9117 goto next;
9120 next_and_done:
9121 insn = next_nonnote_insn (insn);
9123 done:
9124 *plen = len;
9125 *pin_use = in_use;
9126 return insn;
9129 static rtx
9130 alphaev4_next_nop (int *pin_use)
9132 int in_use = *pin_use;
9133 rtx nop;
9135 if (!(in_use & EV4_IB0))
9137 in_use |= EV4_IB0;
9138 nop = gen_nop ();
9140 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9142 in_use |= EV4_IB1;
9143 nop = gen_nop ();
9145 else if (TARGET_FP && !(in_use & EV4_IB1))
9147 in_use |= EV4_IB1;
9148 nop = gen_fnop ();
9150 else
9151 nop = gen_unop ();
9153 *pin_use = in_use;
9154 return nop;
9157 static rtx
9158 alphaev5_next_nop (int *pin_use)
9160 int in_use = *pin_use;
9161 rtx nop;
9163 if (!(in_use & EV5_E1))
9165 in_use |= EV5_E1;
9166 nop = gen_nop ();
9168 else if (TARGET_FP && !(in_use & EV5_FA))
9170 in_use |= EV5_FA;
9171 nop = gen_fnop ();
9173 else if (TARGET_FP && !(in_use & EV5_FM))
9175 in_use |= EV5_FM;
9176 nop = gen_fnop ();
9178 else
9179 nop = gen_unop ();
9181 *pin_use = in_use;
9182 return nop;
9185 /* The instruction group alignment main loop. */
9187 static void
9188 alpha_align_insns (unsigned int max_align,
9189 rtx (*next_group) (rtx, int *, int *),
9190 rtx (*next_nop) (int *))
9192 /* ALIGN is the known alignment for the insn group. */
9193 unsigned int align;
9194 /* OFS is the offset of the current insn in the insn group. */
9195 int ofs;
9196 int prev_in_use, in_use, len, ldgp;
9197 rtx i, next;
9199 /* Let shorten branches care for assigning alignments to code labels. */
9200 shorten_branches (get_insns ());
9202 if (align_functions < 4)
9203 align = 4;
9204 else if ((unsigned int) align_functions < max_align)
9205 align = align_functions;
9206 else
9207 align = max_align;
9209 ofs = prev_in_use = 0;
9210 i = get_insns ();
9211 if (GET_CODE (i) == NOTE)
9212 i = next_nonnote_insn (i);
9214 ldgp = alpha_function_needs_gp ? 8 : 0;
9216 while (i)
9218 next = (*next_group) (i, &in_use, &len);
9220 /* When we see a label, resync alignment etc. */
9221 if (GET_CODE (i) == CODE_LABEL)
9223 unsigned int new_align = 1 << label_to_alignment (i);
9225 if (new_align >= align)
9227 align = new_align < max_align ? new_align : max_align;
9228 ofs = 0;
9231 else if (ofs & (new_align-1))
9232 ofs = (ofs | (new_align-1)) + 1;
9233 gcc_assert (!len);
9236 /* Handle complex instructions special. */
9237 else if (in_use == 0)
9239 /* Asms will have length < 0. This is a signal that we have
9240 lost alignment knowledge. Assume, however, that the asm
9241 will not mis-align instructions. */
9242 if (len < 0)
9244 ofs = 0;
9245 align = 4;
9246 len = 0;
9250 /* If the known alignment is smaller than the recognized insn group,
9251 realign the output. */
9252 else if ((int) align < len)
9254 unsigned int new_log_align = len > 8 ? 4 : 3;
9255 rtx prev, where;
9257 where = prev = prev_nonnote_insn (i);
9258 if (!where || GET_CODE (where) != CODE_LABEL)
9259 where = i;
9261 /* Can't realign between a call and its gp reload. */
9262 if (! (TARGET_EXPLICIT_RELOCS
9263 && prev && GET_CODE (prev) == CALL_INSN))
9265 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9266 align = 1 << new_log_align;
9267 ofs = 0;
9271 /* We may not insert padding inside the initial ldgp sequence. */
9272 else if (ldgp > 0)
9273 ldgp -= len;
9275 /* If the group won't fit in the same INT16 as the previous,
9276 we need to add padding to keep the group together. Rather
9277 than simply leaving the insn filling to the assembler, we
9278 can make use of the knowledge of what sorts of instructions
9279 were issued in the previous group to make sure that all of
9280 the added nops are really free. */
9281 else if (ofs + len > (int) align)
9283 int nop_count = (align - ofs) / 4;
9284 rtx where;
9286 /* Insert nops before labels, branches, and calls to truly merge
9287 the execution of the nops with the previous instruction group. */
9288 where = prev_nonnote_insn (i);
9289 if (where)
9291 if (GET_CODE (where) == CODE_LABEL)
9293 rtx where2 = prev_nonnote_insn (where);
9294 if (where2 && GET_CODE (where2) == JUMP_INSN)
9295 where = where2;
9297 else if (GET_CODE (where) == INSN)
9298 where = i;
9300 else
9301 where = i;
9304 emit_insn_before ((*next_nop)(&prev_in_use), where);
9305 while (--nop_count);
9306 ofs = 0;
9309 ofs = (ofs + len) & (align - 1);
9310 prev_in_use = in_use;
9311 i = next;
9315 /* Machine dependent reorg pass. */
9317 static void
9318 alpha_reorg (void)
9320 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9321 alpha_handle_trap_shadows ();
9323 /* Due to the number of extra trapb insns, don't bother fixing up
9324 alignment when trap precision is instruction. Moreover, we can
9325 only do our job when sched2 is run. */
9326 if (optimize && !optimize_size
9327 && alpha_tp != ALPHA_TP_INSN
9328 && flag_schedule_insns_after_reload)
9330 if (alpha_tune == PROCESSOR_EV4)
9331 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9332 else if (alpha_tune == PROCESSOR_EV5)
9333 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9337 #if !TARGET_ABI_UNICOSMK
9339 #ifdef HAVE_STAMP_H
9340 #include <stamp.h>
9341 #endif
9343 static void
9344 alpha_file_start (void)
9346 #ifdef OBJECT_FORMAT_ELF
9347 /* If emitting dwarf2 debug information, we cannot generate a .file
9348 directive to start the file, as it will conflict with dwarf2out
9349 file numbers. So it's only useful when emitting mdebug output. */
9350 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9351 #endif
9353 default_file_start ();
9354 #ifdef MS_STAMP
9355 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9356 #endif
9358 fputs ("\t.set noreorder\n", asm_out_file);
9359 fputs ("\t.set volatile\n", asm_out_file);
9360 if (!TARGET_ABI_OPEN_VMS)
9361 fputs ("\t.set noat\n", asm_out_file);
9362 if (TARGET_EXPLICIT_RELOCS)
9363 fputs ("\t.set nomacro\n", asm_out_file);
9364 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9366 const char *arch;
9368 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9369 arch = "ev6";
9370 else if (TARGET_MAX)
9371 arch = "pca56";
9372 else if (TARGET_BWX)
9373 arch = "ev56";
9374 else if (alpha_cpu == PROCESSOR_EV5)
9375 arch = "ev5";
9376 else
9377 arch = "ev4";
9379 fprintf (asm_out_file, "\t.arch %s\n", arch);
9382 #endif
9384 #ifdef OBJECT_FORMAT_ELF
9386 /* Return a section for X. The only special thing we do here is to
9387 honor small data. */
9389 static section *
9390 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9391 unsigned HOST_WIDE_INT align)
9393 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9394 /* ??? Consider using mergeable sdata sections. */
9395 return sdata_section;
9396 else
9397 return default_elf_select_rtx_section (mode, x, align);
9400 #endif /* OBJECT_FORMAT_ELF */
9402 /* Structure to collect function names for final output in link section. */
9403 /* Note that items marked with GTY can't be ifdef'ed out. */
9405 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9406 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9408 struct alpha_links GTY(())
9410 int num;
9411 rtx linkage;
9412 enum links_kind lkind;
9413 enum reloc_kind rkind;
9416 struct alpha_funcs GTY(())
9418 int num;
9419 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9420 links;
9423 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9424 splay_tree alpha_links_tree;
9425 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9426 splay_tree alpha_funcs_tree;
9428 static GTY(()) int alpha_funcs_num;
9430 #if TARGET_ABI_OPEN_VMS
9432 /* Return the VMS argument type corresponding to MODE. */
9434 enum avms_arg_type
9435 alpha_arg_type (enum machine_mode mode)
9437 switch (mode)
9439 case SFmode:
9440 return TARGET_FLOAT_VAX ? FF : FS;
9441 case DFmode:
9442 return TARGET_FLOAT_VAX ? FD : FT;
9443 default:
9444 return I64;
9448 /* Return an rtx for an integer representing the VMS Argument Information
9449 register value. */
9452 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9454 unsigned HOST_WIDE_INT regval = cum.num_args;
9455 int i;
9457 for (i = 0; i < 6; i++)
9458 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9460 return GEN_INT (regval);
9463 /* Make (or fake) .linkage entry for function call.
9465 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9467 Return an SYMBOL_REF rtx for the linkage. */
9470 alpha_need_linkage (const char *name, int is_local)
9472 splay_tree_node node;
9473 struct alpha_links *al;
9475 if (name[0] == '*')
9476 name++;
9478 if (is_local)
9480 struct alpha_funcs *cfaf;
9482 if (!alpha_funcs_tree)
9483 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9484 splay_tree_compare_pointers);
9486 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9488 cfaf->links = 0;
9489 cfaf->num = ++alpha_funcs_num;
9491 splay_tree_insert (alpha_funcs_tree,
9492 (splay_tree_key) current_function_decl,
9493 (splay_tree_value) cfaf);
9496 if (alpha_links_tree)
9498 /* Is this name already defined? */
9500 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9501 if (node)
9503 al = (struct alpha_links *) node->value;
9504 if (is_local)
9506 /* Defined here but external assumed. */
9507 if (al->lkind == KIND_EXTERN)
9508 al->lkind = KIND_LOCAL;
9510 else
9512 /* Used here but unused assumed. */
9513 if (al->lkind == KIND_UNUSED)
9514 al->lkind = KIND_LOCAL;
9516 return al->linkage;
9519 else
9520 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9522 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9523 name = ggc_strdup (name);
9525 /* Assume external if no definition. */
9526 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9528 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9529 get_identifier (name);
9531 /* Construct a SYMBOL_REF for us to call. */
9533 size_t name_len = strlen (name);
9534 char *linksym = alloca (name_len + 6);
9535 linksym[0] = '$';
9536 memcpy (linksym + 1, name, name_len);
9537 memcpy (linksym + 1 + name_len, "..lk", 5);
9538 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9539 ggc_alloc_string (linksym, name_len + 5));
9542 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9543 (splay_tree_value) al);
9545 return al->linkage;
9549 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9551 splay_tree_node cfunnode;
9552 struct alpha_funcs *cfaf;
9553 struct alpha_links *al;
9554 const char *name = XSTR (linkage, 0);
9556 cfaf = (struct alpha_funcs *) 0;
9557 al = (struct alpha_links *) 0;
9559 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9560 cfaf = (struct alpha_funcs *) cfunnode->value;
9562 if (cfaf->links)
9564 splay_tree_node lnode;
9566 /* Is this name already defined? */
9568 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9569 if (lnode)
9570 al = (struct alpha_links *) lnode->value;
9572 else
9573 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9575 if (!al)
9577 size_t name_len;
9578 size_t buflen;
9579 char buf [512];
9580 char *linksym;
9581 splay_tree_node node = 0;
9582 struct alpha_links *anl;
9584 if (name[0] == '*')
9585 name++;
9587 name_len = strlen (name);
9589 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9590 al->num = cfaf->num;
9592 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9593 if (node)
9595 anl = (struct alpha_links *) node->value;
9596 al->lkind = anl->lkind;
9599 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9600 buflen = strlen (buf);
9601 linksym = alloca (buflen + 1);
9602 memcpy (linksym, buf, buflen + 1);
9604 al->linkage = gen_rtx_SYMBOL_REF
9605 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9607 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9608 (splay_tree_value) al);
9611 if (rflag)
9612 al->rkind = KIND_CODEADDR;
9613 else
9614 al->rkind = KIND_LINKAGE;
9616 if (lflag)
9617 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9618 else
9619 return al->linkage;
9622 static int
9623 alpha_write_one_linkage (splay_tree_node node, void *data)
9625 const char *const name = (const char *) node->key;
9626 struct alpha_links *link = (struct alpha_links *) node->value;
9627 FILE *stream = (FILE *) data;
9629 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9630 if (link->rkind == KIND_CODEADDR)
9632 if (link->lkind == KIND_LOCAL)
9634 /* Local and used */
9635 fprintf (stream, "\t.quad %s..en\n", name);
9637 else
9639 /* External and used, request code address. */
9640 fprintf (stream, "\t.code_address %s\n", name);
9643 else
9645 if (link->lkind == KIND_LOCAL)
9647 /* Local and used, build linkage pair. */
9648 fprintf (stream, "\t.quad %s..en\n", name);
9649 fprintf (stream, "\t.quad %s\n", name);
9651 else
9653 /* External and used, request linkage pair. */
9654 fprintf (stream, "\t.linkage %s\n", name);
9658 return 0;
9661 static void
9662 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9664 splay_tree_node node;
9665 struct alpha_funcs *func;
9667 fprintf (stream, "\t.link\n");
9668 fprintf (stream, "\t.align 3\n");
9669 in_section = NULL;
9671 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9672 func = (struct alpha_funcs *) node->value;
9674 fputs ("\t.name ", stream);
9675 assemble_name (stream, funname);
9676 fputs ("..na\n", stream);
9677 ASM_OUTPUT_LABEL (stream, funname);
9678 fprintf (stream, "\t.pdesc ");
9679 assemble_name (stream, funname);
9680 fprintf (stream, "..en,%s\n",
9681 alpha_procedure_type == PT_STACK ? "stack"
9682 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9684 if (func->links)
9686 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9687 /* splay_tree_delete (func->links); */
9691 /* Given a decl, a section name, and whether the decl initializer
9692 has relocs, choose attributes for the section. */
9694 #define SECTION_VMS_OVERLAY SECTION_FORGET
9695 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9696 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9698 static unsigned int
9699 vms_section_type_flags (tree decl, const char *name, int reloc)
9701 unsigned int flags = default_section_type_flags (decl, name, reloc);
9703 if (decl && DECL_ATTRIBUTES (decl)
9704 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9705 flags |= SECTION_VMS_OVERLAY;
9706 if (decl && DECL_ATTRIBUTES (decl)
9707 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9708 flags |= SECTION_VMS_GLOBAL;
9709 if (decl && DECL_ATTRIBUTES (decl)
9710 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9711 flags |= SECTION_VMS_INITIALIZE;
9713 return flags;
9716 /* Switch to an arbitrary section NAME with attributes as specified
9717 by FLAGS. ALIGN specifies any known alignment requirements for
9718 the section; 0 if the default should be used. */
9720 static void
9721 vms_asm_named_section (const char *name, unsigned int flags,
9722 tree decl ATTRIBUTE_UNUSED)
9724 fputc ('\n', asm_out_file);
9725 fprintf (asm_out_file, ".section\t%s", name);
9727 if (flags & SECTION_VMS_OVERLAY)
9728 fprintf (asm_out_file, ",OVR");
9729 if (flags & SECTION_VMS_GLOBAL)
9730 fprintf (asm_out_file, ",GBL");
9731 if (flags & SECTION_VMS_INITIALIZE)
9732 fprintf (asm_out_file, ",NOMOD");
9733 if (flags & SECTION_DEBUG)
9734 fprintf (asm_out_file, ",NOWRT");
9736 fputc ('\n', asm_out_file);
9739 /* Record an element in the table of global constructors. SYMBOL is
9740 a SYMBOL_REF of the function to be called; PRIORITY is a number
9741 between 0 and MAX_INIT_PRIORITY.
9743 Differs from default_ctors_section_asm_out_constructor in that the
9744 width of the .ctors entry is always 64 bits, rather than the 32 bits
9745 used by a normal pointer. */
9747 static void
9748 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9750 switch_to_section (ctors_section);
9751 assemble_align (BITS_PER_WORD);
9752 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9755 static void
9756 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9758 switch_to_section (dtors_section);
9759 assemble_align (BITS_PER_WORD);
9760 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9762 #else
9765 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9766 int is_local ATTRIBUTE_UNUSED)
9768 return NULL_RTX;
9772 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9773 tree cfundecl ATTRIBUTE_UNUSED,
9774 int lflag ATTRIBUTE_UNUSED,
9775 int rflag ATTRIBUTE_UNUSED)
9777 return NULL_RTX;
9780 #endif /* TARGET_ABI_OPEN_VMS */
9782 #if TARGET_ABI_UNICOSMK
9784 /* This evaluates to true if we do not know how to pass TYPE solely in
9785 registers. This is the case for all arguments that do not fit in two
9786 registers. */
9788 static bool
9789 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9791 if (type == NULL)
9792 return false;
9794 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9795 return true;
9796 if (TREE_ADDRESSABLE (type))
9797 return true;
9799 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9802 /* Define the offset between two registers, one to be eliminated, and the
9803 other its replacement, at the start of a routine. */
9806 unicosmk_initial_elimination_offset (int from, int to)
9808 int fixed_size;
9810 fixed_size = alpha_sa_size();
9811 if (fixed_size != 0)
9812 fixed_size += 48;
9814 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9815 return -fixed_size;
9816 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9817 return 0;
9818 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9819 return (ALPHA_ROUND (current_function_outgoing_args_size)
9820 + ALPHA_ROUND (get_frame_size()));
9821 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9822 return (ALPHA_ROUND (fixed_size)
9823 + ALPHA_ROUND (get_frame_size()
9824 + current_function_outgoing_args_size));
9825 else
9826 gcc_unreachable ();
9829 /* Output the module name for .ident and .end directives. We have to strip
9830 directories and add make sure that the module name starts with a letter
9831 or '$'. */
9833 static void
9834 unicosmk_output_module_name (FILE *file)
9836 const char *name = lbasename (main_input_filename);
9837 unsigned len = strlen (name);
9838 char *clean_name = alloca (len + 2);
9839 char *ptr = clean_name;
9841 /* CAM only accepts module names that start with a letter or '$'. We
9842 prefix the module name with a '$' if necessary. */
9844 if (!ISALPHA (*name))
9845 *ptr++ = '$';
9846 memcpy (ptr, name, len + 1);
9847 clean_symbol_name (clean_name);
9848 fputs (clean_name, file);
9851 /* Output the definition of a common variable. */
9853 void
9854 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9856 tree name_tree;
9857 printf ("T3E__: common %s\n", name);
9859 in_section = NULL;
9860 fputs("\t.endp\n\n\t.psect ", file);
9861 assemble_name(file, name);
9862 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9863 fprintf(file, "\t.byte\t0:%d\n", size);
9865 /* Mark the symbol as defined in this module. */
9866 name_tree = get_identifier (name);
9867 TREE_ASM_WRITTEN (name_tree) = 1;
9870 #define SECTION_PUBLIC SECTION_MACH_DEP
9871 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9872 static int current_section_align;
9874 /* A get_unnamed_section callback for switching to the text section. */
9876 static void
9877 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9879 static int count = 0;
9880 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9883 /* A get_unnamed_section callback for switching to the data section. */
9885 static void
9886 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9888 static int count = 1;
9889 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9892 /* Implement TARGET_ASM_INIT_SECTIONS.
9894 The Cray assembler is really weird with respect to sections. It has only
9895 named sections and you can't reopen a section once it has been closed.
9896 This means that we have to generate unique names whenever we want to
9897 reenter the text or the data section. */
9899 static void
9900 unicosmk_init_sections (void)
9902 text_section = get_unnamed_section (SECTION_CODE,
9903 unicosmk_output_text_section_asm_op,
9904 NULL);
9905 data_section = get_unnamed_section (SECTION_WRITE,
9906 unicosmk_output_data_section_asm_op,
9907 NULL);
9908 readonly_data_section = data_section;
9911 static unsigned int
9912 unicosmk_section_type_flags (tree decl, const char *name,
9913 int reloc ATTRIBUTE_UNUSED)
9915 unsigned int flags = default_section_type_flags (decl, name, reloc);
9917 if (!decl)
9918 return flags;
9920 if (TREE_CODE (decl) == FUNCTION_DECL)
9922 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9923 if (align_functions_log > current_section_align)
9924 current_section_align = align_functions_log;
9926 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9927 flags |= SECTION_MAIN;
9929 else
9930 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9932 if (TREE_PUBLIC (decl))
9933 flags |= SECTION_PUBLIC;
9935 return flags;
9938 /* Generate a section name for decl and associate it with the
9939 declaration. */
9941 static void
9942 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9944 const char *name;
9945 int len;
9947 gcc_assert (decl);
9949 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9950 name = default_strip_name_encoding (name);
9951 len = strlen (name);
9953 if (TREE_CODE (decl) == FUNCTION_DECL)
9955 char *string;
9957 /* It is essential that we prefix the section name here because
9958 otherwise the section names generated for constructors and
9959 destructors confuse collect2. */
9961 string = alloca (len + 6);
9962 sprintf (string, "code@%s", name);
9963 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9965 else if (TREE_PUBLIC (decl))
9966 DECL_SECTION_NAME (decl) = build_string (len, name);
9967 else
9969 char *string;
9971 string = alloca (len + 6);
9972 sprintf (string, "data@%s", name);
9973 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9977 /* Switch to an arbitrary section NAME with attributes as specified
9978 by FLAGS. ALIGN specifies any known alignment requirements for
9979 the section; 0 if the default should be used. */
9981 static void
9982 unicosmk_asm_named_section (const char *name, unsigned int flags,
9983 tree decl ATTRIBUTE_UNUSED)
9985 const char *kind;
9987 /* Close the previous section. */
9989 fputs ("\t.endp\n\n", asm_out_file);
9991 /* Find out what kind of section we are opening. */
9993 if (flags & SECTION_MAIN)
9994 fputs ("\t.start\tmain\n", asm_out_file);
9996 if (flags & SECTION_CODE)
9997 kind = "code";
9998 else if (flags & SECTION_PUBLIC)
9999 kind = "common";
10000 else
10001 kind = "data";
10003 if (current_section_align != 0)
10004 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10005 current_section_align, kind);
10006 else
10007 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10010 static void
10011 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10013 if (DECL_P (decl)
10014 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10015 unicosmk_unique_section (decl, 0);
10018 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10019 in code sections because .align fill unused space with zeroes. */
10021 void
10022 unicosmk_output_align (FILE *file, int align)
10024 if (inside_function)
10025 fprintf (file, "\tgcc@code@align\t%d\n", align);
10026 else
10027 fprintf (file, "\t.align\t%d\n", align);
10030 /* Add a case vector to the current function's list of deferred case
10031 vectors. Case vectors have to be put into a separate section because CAM
10032 does not allow data definitions in code sections. */
10034 void
10035 unicosmk_defer_case_vector (rtx lab, rtx vec)
10037 struct machine_function *machine = cfun->machine;
10039 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10040 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10041 machine->addr_list);
10044 /* Output a case vector. */
10046 static void
10047 unicosmk_output_addr_vec (FILE *file, rtx vec)
10049 rtx lab = XEXP (vec, 0);
10050 rtx body = XEXP (vec, 1);
10051 int vlen = XVECLEN (body, 0);
10052 int idx;
10054 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10056 for (idx = 0; idx < vlen; idx++)
10058 ASM_OUTPUT_ADDR_VEC_ELT
10059 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10063 /* Output current function's deferred case vectors. */
10065 static void
10066 unicosmk_output_deferred_case_vectors (FILE *file)
10068 struct machine_function *machine = cfun->machine;
10069 rtx t;
10071 if (machine->addr_list == NULL_RTX)
10072 return;
10074 switch_to_section (data_section);
10075 for (t = machine->addr_list; t; t = XEXP (t, 1))
10076 unicosmk_output_addr_vec (file, XEXP (t, 0));
10079 /* Generate the name of the SSIB section for the current function. */
10081 #define SSIB_PREFIX "__SSIB_"
10082 #define SSIB_PREFIX_LEN 7
10084 static const char *
10085 unicosmk_ssib_name (void)
10087 /* This is ok since CAM won't be able to deal with names longer than that
10088 anyway. */
10090 static char name[256];
10092 rtx x;
10093 const char *fnname;
10094 int len;
10096 x = DECL_RTL (cfun->decl);
10097 gcc_assert (GET_CODE (x) == MEM);
10098 x = XEXP (x, 0);
10099 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10100 fnname = XSTR (x, 0);
10102 len = strlen (fnname);
10103 if (len + SSIB_PREFIX_LEN > 255)
10104 len = 255 - SSIB_PREFIX_LEN;
10106 strcpy (name, SSIB_PREFIX);
10107 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10108 name[len + SSIB_PREFIX_LEN] = 0;
10110 return name;
10113 /* Set up the dynamic subprogram information block (DSIB) and update the
10114 frame pointer register ($15) for subroutines which have a frame. If the
10115 subroutine doesn't have a frame, simply increment $15. */
10117 static void
10118 unicosmk_gen_dsib (unsigned long *imaskP)
10120 if (alpha_procedure_type == PT_STACK)
10122 const char *ssib_name;
10123 rtx mem;
10125 /* Allocate 64 bytes for the DSIB. */
10127 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10128 GEN_INT (-64))));
10129 emit_insn (gen_blockage ());
10131 /* Save the return address. */
10133 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10134 set_mem_alias_set (mem, alpha_sr_alias_set);
10135 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10136 (*imaskP) &= ~(1UL << REG_RA);
10138 /* Save the old frame pointer. */
10140 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10141 set_mem_alias_set (mem, alpha_sr_alias_set);
10142 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10143 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10145 emit_insn (gen_blockage ());
10147 /* Store the SSIB pointer. */
10149 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10150 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10151 set_mem_alias_set (mem, alpha_sr_alias_set);
10153 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10154 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10155 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10157 /* Save the CIW index. */
10159 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10160 set_mem_alias_set (mem, alpha_sr_alias_set);
10161 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10163 emit_insn (gen_blockage ());
10165 /* Set the new frame pointer. */
10167 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10168 stack_pointer_rtx, GEN_INT (64))));
10171 else
10173 /* Increment the frame pointer register to indicate that we do not
10174 have a frame. */
10176 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10177 hard_frame_pointer_rtx, const1_rtx)));
10181 /* Output the static subroutine information block for the current
10182 function. */
10184 static void
10185 unicosmk_output_ssib (FILE *file, const char *fnname)
10187 int len;
10188 int i;
10189 rtx x;
10190 rtx ciw;
10191 struct machine_function *machine = cfun->machine;
10193 in_section = NULL;
10194 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10195 unicosmk_ssib_name ());
10197 /* Some required stuff and the function name length. */
10199 len = strlen (fnname);
10200 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10202 /* Saved registers
10203 ??? We don't do that yet. */
10205 fputs ("\t.quad\t0\n", file);
10207 /* Function address. */
10209 fputs ("\t.quad\t", file);
10210 assemble_name (file, fnname);
10211 putc ('\n', file);
10213 fputs ("\t.quad\t0\n", file);
10214 fputs ("\t.quad\t0\n", file);
10216 /* Function name.
10217 ??? We do it the same way Cray CC does it but this could be
10218 simplified. */
10220 for( i = 0; i < len; i++ )
10221 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10222 if( (len % 8) == 0 )
10223 fputs ("\t.quad\t0\n", file);
10224 else
10225 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10227 /* All call information words used in the function. */
10229 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10231 ciw = XEXP (x, 0);
10232 #if HOST_BITS_PER_WIDE_INT == 32
10233 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10234 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10235 #else
10236 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10237 #endif
10241 /* Add a call information word (CIW) to the list of the current function's
10242 CIWs and return its index.
10244 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10247 unicosmk_add_call_info_word (rtx x)
10249 rtx node;
10250 struct machine_function *machine = cfun->machine;
10252 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10253 if (machine->first_ciw == NULL_RTX)
10254 machine->first_ciw = node;
10255 else
10256 XEXP (machine->last_ciw, 1) = node;
10258 machine->last_ciw = node;
10259 ++machine->ciw_count;
10261 return GEN_INT (machine->ciw_count
10262 + strlen (current_function_name ())/8 + 5);
10265 /* The Cray assembler doesn't accept extern declarations for symbols which
10266 are defined in the same file. We have to keep track of all global
10267 symbols which are referenced and/or defined in a source file and output
10268 extern declarations for those which are referenced but not defined at
10269 the end of file. */
10271 /* List of identifiers for which an extern declaration might have to be
10272 emitted. */
10273 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10275 struct unicosmk_extern_list
10277 struct unicosmk_extern_list *next;
10278 const char *name;
10281 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10283 /* Output extern declarations which are required for every asm file. */
10285 static void
10286 unicosmk_output_default_externs (FILE *file)
10288 static const char *const externs[] =
10289 { "__T3E_MISMATCH" };
10291 int i;
10292 int n;
10294 n = ARRAY_SIZE (externs);
10296 for (i = 0; i < n; i++)
10297 fprintf (file, "\t.extern\t%s\n", externs[i]);
10300 /* Output extern declarations for global symbols which are have been
10301 referenced but not defined. */
10303 static void
10304 unicosmk_output_externs (FILE *file)
10306 struct unicosmk_extern_list *p;
10307 const char *real_name;
10308 int len;
10309 tree name_tree;
10311 len = strlen (user_label_prefix);
10312 for (p = unicosmk_extern_head; p != 0; p = p->next)
10314 /* We have to strip the encoding and possibly remove user_label_prefix
10315 from the identifier in order to handle -fleading-underscore and
10316 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10317 real_name = default_strip_name_encoding (p->name);
10318 if (len && p->name[0] == '*'
10319 && !memcmp (real_name, user_label_prefix, len))
10320 real_name += len;
10322 name_tree = get_identifier (real_name);
10323 if (! TREE_ASM_WRITTEN (name_tree))
10325 TREE_ASM_WRITTEN (name_tree) = 1;
10326 fputs ("\t.extern\t", file);
10327 assemble_name (file, p->name);
10328 putc ('\n', file);
10333 /* Record an extern. */
10335 void
10336 unicosmk_add_extern (const char *name)
10338 struct unicosmk_extern_list *p;
10340 p = (struct unicosmk_extern_list *)
10341 xmalloc (sizeof (struct unicosmk_extern_list));
10342 p->next = unicosmk_extern_head;
10343 p->name = name;
10344 unicosmk_extern_head = p;
10347 /* The Cray assembler generates incorrect code if identifiers which
10348 conflict with register names are used as instruction operands. We have
10349 to replace such identifiers with DEX expressions. */
10351 /* Structure to collect identifiers which have been replaced by DEX
10352 expressions. */
10353 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10355 struct unicosmk_dex {
10356 struct unicosmk_dex *next;
10357 const char *name;
10360 /* List of identifiers which have been replaced by DEX expressions. The DEX
10361 number is determined by the position in the list. */
10363 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10365 /* The number of elements in the DEX list. */
10367 static int unicosmk_dex_count = 0;
10369 /* Check if NAME must be replaced by a DEX expression. */
10371 static int
10372 unicosmk_special_name (const char *name)
10374 if (name[0] == '*')
10375 ++name;
10377 if (name[0] == '$')
10378 ++name;
10380 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10381 return 0;
10383 switch (name[1])
10385 case '1': case '2':
10386 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10388 case '3':
10389 return (name[2] == '\0'
10390 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10392 default:
10393 return (ISDIGIT (name[1]) && name[2] == '\0');
10397 /* Return the DEX number if X must be replaced by a DEX expression and 0
10398 otherwise. */
10400 static int
10401 unicosmk_need_dex (rtx x)
10403 struct unicosmk_dex *dex;
10404 const char *name;
10405 int i;
10407 if (GET_CODE (x) != SYMBOL_REF)
10408 return 0;
10410 name = XSTR (x,0);
10411 if (! unicosmk_special_name (name))
10412 return 0;
10414 i = unicosmk_dex_count;
10415 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10417 if (! strcmp (name, dex->name))
10418 return i;
10419 --i;
10422 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10423 dex->name = name;
10424 dex->next = unicosmk_dex_list;
10425 unicosmk_dex_list = dex;
10427 ++unicosmk_dex_count;
10428 return unicosmk_dex_count;
10431 /* Output the DEX definitions for this file. */
10433 static void
10434 unicosmk_output_dex (FILE *file)
10436 struct unicosmk_dex *dex;
10437 int i;
10439 if (unicosmk_dex_list == NULL)
10440 return;
10442 fprintf (file, "\t.dexstart\n");
10444 i = unicosmk_dex_count;
10445 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10447 fprintf (file, "\tDEX (%d) = ", i);
10448 assemble_name (file, dex->name);
10449 putc ('\n', file);
10450 --i;
10453 fprintf (file, "\t.dexend\n");
10456 /* Output text that to appear at the beginning of an assembler file. */
10458 static void
10459 unicosmk_file_start (void)
10461 int i;
10463 fputs ("\t.ident\t", asm_out_file);
10464 unicosmk_output_module_name (asm_out_file);
10465 fputs ("\n\n", asm_out_file);
10467 /* The Unicos/Mk assembler uses different register names. Instead of trying
10468 to support them, we simply use micro definitions. */
10470 /* CAM has different register names: rN for the integer register N and fN
10471 for the floating-point register N. Instead of trying to use these in
10472 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10473 register. */
10475 for (i = 0; i < 32; ++i)
10476 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10478 for (i = 0; i < 32; ++i)
10479 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10481 putc ('\n', asm_out_file);
10483 /* The .align directive fill unused space with zeroes which does not work
10484 in code sections. We define the macro 'gcc@code@align' which uses nops
10485 instead. Note that it assumes that code sections always have the
10486 biggest possible alignment since . refers to the current offset from
10487 the beginning of the section. */
10489 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10490 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10491 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10492 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10493 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10494 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10495 fputs ("\t.endr\n", asm_out_file);
10496 fputs ("\t.endif\n", asm_out_file);
10497 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10499 /* Output extern declarations which should always be visible. */
10500 unicosmk_output_default_externs (asm_out_file);
10502 /* Open a dummy section. We always need to be inside a section for the
10503 section-switching code to work correctly.
10504 ??? This should be a module id or something like that. I still have to
10505 figure out what the rules for those are. */
10506 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10509 /* Output text to appear at the end of an assembler file. This includes all
10510 pending extern declarations and DEX expressions. */
10512 static void
10513 unicosmk_file_end (void)
10515 fputs ("\t.endp\n\n", asm_out_file);
10517 /* Output all pending externs. */
10519 unicosmk_output_externs (asm_out_file);
10521 /* Output dex definitions used for functions whose names conflict with
10522 register names. */
10524 unicosmk_output_dex (asm_out_file);
10526 fputs ("\t.end\t", asm_out_file);
10527 unicosmk_output_module_name (asm_out_file);
10528 putc ('\n', asm_out_file);
10531 #else
10533 static void
10534 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10537 static void
10538 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10541 static void
10542 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10543 const char * fnname ATTRIBUTE_UNUSED)
10547 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10549 return NULL_RTX;
10552 static int
10553 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10555 return 0;
10558 #endif /* TARGET_ABI_UNICOSMK */
10560 static void
10561 alpha_init_libfuncs (void)
10563 if (TARGET_ABI_UNICOSMK)
10565 /* Prevent gcc from generating calls to __divsi3. */
10566 set_optab_libfunc (sdiv_optab, SImode, 0);
10567 set_optab_libfunc (udiv_optab, SImode, 0);
10569 /* Use the functions provided by the system library
10570 for DImode integer division. */
10571 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10572 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10574 else if (TARGET_ABI_OPEN_VMS)
10576 /* Use the VMS runtime library functions for division and
10577 remainder. */
10578 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10579 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10580 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10581 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10582 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10583 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10584 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10585 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10590 /* Initialize the GCC target structure. */
10591 #if TARGET_ABI_OPEN_VMS
10592 # undef TARGET_ATTRIBUTE_TABLE
10593 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10594 # undef TARGET_SECTION_TYPE_FLAGS
10595 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10596 #endif
10598 #undef TARGET_IN_SMALL_DATA_P
10599 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10601 #if TARGET_ABI_UNICOSMK
10602 # undef TARGET_INSERT_ATTRIBUTES
10603 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10604 # undef TARGET_SECTION_TYPE_FLAGS
10605 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10606 # undef TARGET_ASM_UNIQUE_SECTION
10607 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10608 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10609 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10610 # undef TARGET_ASM_GLOBALIZE_LABEL
10611 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10612 # undef TARGET_MUST_PASS_IN_STACK
10613 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10614 #endif
10616 #undef TARGET_ASM_ALIGNED_HI_OP
10617 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10618 #undef TARGET_ASM_ALIGNED_DI_OP
10619 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10621 /* Default unaligned ops are provided for ELF systems. To get unaligned
10622 data for non-ELF systems, we have to turn off auto alignment. */
10623 #ifndef OBJECT_FORMAT_ELF
10624 #undef TARGET_ASM_UNALIGNED_HI_OP
10625 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10626 #undef TARGET_ASM_UNALIGNED_SI_OP
10627 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10628 #undef TARGET_ASM_UNALIGNED_DI_OP
10629 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10630 #endif
10632 #ifdef OBJECT_FORMAT_ELF
10633 #undef TARGET_ASM_SELECT_RTX_SECTION
10634 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10635 #endif
10637 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10638 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10640 #undef TARGET_INIT_LIBFUNCS
10641 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10643 #if TARGET_ABI_UNICOSMK
10644 #undef TARGET_ASM_FILE_START
10645 #define TARGET_ASM_FILE_START unicosmk_file_start
10646 #undef TARGET_ASM_FILE_END
10647 #define TARGET_ASM_FILE_END unicosmk_file_end
10648 #else
10649 #undef TARGET_ASM_FILE_START
10650 #define TARGET_ASM_FILE_START alpha_file_start
10651 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10652 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10653 #endif
10655 #undef TARGET_SCHED_ADJUST_COST
10656 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10657 #undef TARGET_SCHED_ISSUE_RATE
10658 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10659 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10660 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10661 alpha_multipass_dfa_lookahead
10663 #undef TARGET_HAVE_TLS
10664 #define TARGET_HAVE_TLS HAVE_AS_TLS
10666 #undef TARGET_INIT_BUILTINS
10667 #define TARGET_INIT_BUILTINS alpha_init_builtins
10668 #undef TARGET_EXPAND_BUILTIN
10669 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10670 #undef TARGET_FOLD_BUILTIN
10671 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10673 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10674 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10675 #undef TARGET_CANNOT_COPY_INSN_P
10676 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10677 #undef TARGET_CANNOT_FORCE_CONST_MEM
10678 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10680 #if TARGET_ABI_OSF
10681 #undef TARGET_ASM_OUTPUT_MI_THUNK
10682 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10683 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10684 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10685 #undef TARGET_STDARG_OPTIMIZE_HOOK
10686 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10687 #endif
10689 #undef TARGET_RTX_COSTS
10690 #define TARGET_RTX_COSTS alpha_rtx_costs
10691 #undef TARGET_ADDRESS_COST
10692 #define TARGET_ADDRESS_COST hook_int_rtx_0
10694 #undef TARGET_MACHINE_DEPENDENT_REORG
10695 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10697 #undef TARGET_PROMOTE_FUNCTION_ARGS
10698 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10699 #undef TARGET_PROMOTE_FUNCTION_RETURN
10700 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10701 #undef TARGET_PROMOTE_PROTOTYPES
10702 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10703 #undef TARGET_RETURN_IN_MEMORY
10704 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10705 #undef TARGET_PASS_BY_REFERENCE
10706 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10707 #undef TARGET_SETUP_INCOMING_VARARGS
10708 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10709 #undef TARGET_STRICT_ARGUMENT_NAMING
10710 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10711 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10712 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10713 #undef TARGET_SPLIT_COMPLEX_ARG
10714 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10715 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10716 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10717 #undef TARGET_ARG_PARTIAL_BYTES
10718 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10720 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10721 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10722 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10723 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10725 #undef TARGET_BUILD_BUILTIN_VA_LIST
10726 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10728 /* The Alpha architecture does not require sequential consistency. See
10729 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10730 for an example of how it can be violated in practice. */
10731 #undef TARGET_RELAXED_ORDERING
10732 #define TARGET_RELAXED_ORDERING true
10734 #undef TARGET_DEFAULT_TARGET_FLAGS
10735 #define TARGET_DEFAULT_TARGET_FLAGS \
10736 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10737 #undef TARGET_HANDLE_OPTION
10738 #define TARGET_HANDLE_OPTION alpha_handle_option
10740 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10741 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10742 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10743 #endif
10745 struct gcc_target targetm = TARGET_INITIALIZER;
10748 #include "gt-alpha.h"