* config/alpha/alpha.c (alpha_start_function): Use switch_to_section.
[official-gcc.git] / gcc / config / alpha / alpha.c
blob244aa271204240e47afd56a7e007eaa8b71d8c9c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
65 static const char * const alpha_cpu_name[] =
67 "ev4", "ev5", "ev6"
70 /* Specify how accurate floating-point traps need to be. */
72 enum alpha_trap_precision alpha_tp;
74 /* Specify the floating-point rounding mode. */
76 enum alpha_fp_rounding_mode alpha_fprm;
78 /* Specify which things cause traps. */
80 enum alpha_fp_trap_mode alpha_fptm;
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
85 struct alpha_compare alpha_compare;
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function = FALSE;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency = 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static GTY(()) int alpha_sr_alias_set;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname;
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
119 /* Costs of various operations on the different architectures. */
121 struct alpha_rtx_cost_data
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
196 #define REG_PV 27
197 #define REG_RA 26
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
212 /* Implement TARGET_HANDLE_OPTION. */
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
217 switch (code)
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
235 return true;
238 /* Parse target option strings. */
240 void
241 override_options (void)
243 static const struct cpu_table {
244 const char *const name;
245 const enum processor_type processor;
246 const int flags;
247 } cpu_table[] = {
248 { "ev4", PROCESSOR_EV4, 0 },
249 { "ev45", PROCESSOR_EV4, 0 },
250 { "21064", PROCESSOR_EV4, 0 },
251 { "ev5", PROCESSOR_EV5, 0 },
252 { "21164", PROCESSOR_EV5, 0 },
253 { "ev56", PROCESSOR_EV5, MASK_BWX },
254 { "21164a", PROCESSOR_EV5, MASK_BWX },
255 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
256 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
257 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
258 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
259 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
260 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
261 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
262 { 0, 0, 0 }
265 int i;
267 /* Unicos/Mk doesn't have shared libraries. */
268 if (TARGET_ABI_UNICOSMK && flag_pic)
270 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
271 (flag_pic > 1) ? "PIC" : "pic");
272 flag_pic = 0;
275 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
276 floating-point instructions. Make that the default for this target. */
277 if (TARGET_ABI_UNICOSMK)
278 alpha_fprm = ALPHA_FPRM_DYN;
279 else
280 alpha_fprm = ALPHA_FPRM_NORM;
282 alpha_tp = ALPHA_TP_PROG;
283 alpha_fptm = ALPHA_FPTM_N;
285 /* We cannot use su and sui qualifiers for conversion instructions on
286 Unicos/Mk. I'm not sure if this is due to assembler or hardware
287 limitations. Right now, we issue a warning if -mieee is specified
288 and then ignore it; eventually, we should either get it right or
289 disable the option altogether. */
291 if (TARGET_IEEE)
293 if (TARGET_ABI_UNICOSMK)
294 warning (0, "-mieee not supported on Unicos/Mk");
295 else
297 alpha_tp = ALPHA_TP_INSN;
298 alpha_fptm = ALPHA_FPTM_SU;
302 if (TARGET_IEEE_WITH_INEXACT)
304 if (TARGET_ABI_UNICOSMK)
305 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
306 else
308 alpha_tp = ALPHA_TP_INSN;
309 alpha_fptm = ALPHA_FPTM_SUI;
313 if (alpha_tp_string)
315 if (! strcmp (alpha_tp_string, "p"))
316 alpha_tp = ALPHA_TP_PROG;
317 else if (! strcmp (alpha_tp_string, "f"))
318 alpha_tp = ALPHA_TP_FUNC;
319 else if (! strcmp (alpha_tp_string, "i"))
320 alpha_tp = ALPHA_TP_INSN;
321 else
322 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
325 if (alpha_fprm_string)
327 if (! strcmp (alpha_fprm_string, "n"))
328 alpha_fprm = ALPHA_FPRM_NORM;
329 else if (! strcmp (alpha_fprm_string, "m"))
330 alpha_fprm = ALPHA_FPRM_MINF;
331 else if (! strcmp (alpha_fprm_string, "c"))
332 alpha_fprm = ALPHA_FPRM_CHOP;
333 else if (! strcmp (alpha_fprm_string,"d"))
334 alpha_fprm = ALPHA_FPRM_DYN;
335 else
336 error ("bad value %qs for -mfp-rounding-mode switch",
337 alpha_fprm_string);
340 if (alpha_fptm_string)
342 if (strcmp (alpha_fptm_string, "n") == 0)
343 alpha_fptm = ALPHA_FPTM_N;
344 else if (strcmp (alpha_fptm_string, "u") == 0)
345 alpha_fptm = ALPHA_FPTM_U;
346 else if (strcmp (alpha_fptm_string, "su") == 0)
347 alpha_fptm = ALPHA_FPTM_SU;
348 else if (strcmp (alpha_fptm_string, "sui") == 0)
349 alpha_fptm = ALPHA_FPTM_SUI;
350 else
351 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
354 if (alpha_cpu_string)
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
359 alpha_tune = alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
361 target_flags |= cpu_table [i].flags;
362 break;
364 if (! cpu_table [i].name)
365 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
368 if (alpha_tune_string)
370 for (i = 0; cpu_table [i].name; i++)
371 if (! strcmp (alpha_tune_string, cpu_table [i].name))
373 alpha_tune = cpu_table [i].processor;
374 break;
376 if (! cpu_table [i].name)
377 error ("bad value %qs for -mcpu switch", alpha_tune_string);
380 /* Do some sanity checks on the above options. */
382 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
384 warning (0, "trap mode not supported on Unicos/Mk");
385 alpha_fptm = ALPHA_FPTM_N;
388 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
389 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
391 warning (0, "fp software completion requires -mtrap-precision=i");
392 alpha_tp = ALPHA_TP_INSN;
395 if (alpha_cpu == PROCESSOR_EV6)
397 /* Except for EV6 pass 1 (not released), we always have precise
398 arithmetic traps. Which means we can do software completion
399 without minding trap shadows. */
400 alpha_tp = ALPHA_TP_PROG;
403 if (TARGET_FLOAT_VAX)
405 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
407 warning (0, "rounding mode not supported for VAX floats");
408 alpha_fprm = ALPHA_FPRM_NORM;
410 if (alpha_fptm == ALPHA_FPTM_SUI)
412 warning (0, "trap mode not supported for VAX floats");
413 alpha_fptm = ALPHA_FPTM_SU;
415 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
416 warning (0, "128-bit long double not supported for VAX floats");
417 target_flags &= ~MASK_LONG_DOUBLE_128;
421 char *end;
422 int lat;
424 if (!alpha_mlat_string)
425 alpha_mlat_string = "L1";
427 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
428 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
430 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
431 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
432 && alpha_mlat_string[2] == '\0')
434 static int const cache_latency[][4] =
436 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
437 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
438 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
441 lat = alpha_mlat_string[1] - '0';
442 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
444 warning (0, "L%d cache latency unknown for %s",
445 lat, alpha_cpu_name[alpha_tune]);
446 lat = 3;
448 else
449 lat = cache_latency[alpha_tune][lat-1];
451 else if (! strcmp (alpha_mlat_string, "main"))
453 /* Most current memories have about 370ns latency. This is
454 a reasonable guess for a fast cpu. */
455 lat = 150;
457 else
459 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
460 lat = 3;
463 alpha_memory_latency = lat;
466 /* Default the definition of "small data" to 8 bytes. */
467 if (!g_switch_set)
468 g_switch_value = 8;
470 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
471 if (flag_pic == 1)
472 target_flags |= MASK_SMALL_DATA;
473 else if (flag_pic == 2)
474 target_flags &= ~MASK_SMALL_DATA;
476 /* Align labels and loops for optimal branching. */
477 /* ??? Kludge these by not doing anything if we don't optimize and also if
478 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
479 if (optimize > 0 && write_symbols != SDB_DEBUG)
481 if (align_loops <= 0)
482 align_loops = 16;
483 if (align_jumps <= 0)
484 align_jumps = 16;
486 if (align_functions <= 0)
487 align_functions = 16;
489 /* Acquire a unique set number for our register saves and restores. */
490 alpha_sr_alias_set = new_alias_set ();
492 /* Register variables and functions with the garbage collector. */
494 /* Set up function hooks. */
495 init_machine_status = alpha_init_machine_status;
497 /* Tell the compiler when we're using VAX floating point. */
498 if (TARGET_FLOAT_VAX)
500 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
501 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
502 REAL_MODE_FORMAT (TFmode) = NULL;
506 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
509 zap_mask (HOST_WIDE_INT value)
511 int i;
513 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
514 i++, value >>= 8)
515 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
516 return 0;
518 return 1;
521 /* Return true if OP is valid for a particular TLS relocation.
522 We are already guaranteed that OP is a CONST. */
525 tls_symbolic_operand_1 (rtx op, int size, int unspec)
527 op = XEXP (op, 0);
529 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
530 return 0;
531 op = XVECEXP (op, 0, 0);
533 if (GET_CODE (op) != SYMBOL_REF)
534 return 0;
536 switch (SYMBOL_REF_TLS_MODEL (op))
538 case TLS_MODEL_LOCAL_DYNAMIC:
539 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
540 case TLS_MODEL_INITIAL_EXEC:
541 return unspec == UNSPEC_TPREL && size == 64;
542 case TLS_MODEL_LOCAL_EXEC:
543 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
544 default:
545 gcc_unreachable ();
549 /* Used by aligned_memory_operand and unaligned_memory_operand to
550 resolve what reload is going to do with OP if it's a register. */
553 resolve_reload_operand (rtx op)
555 if (reload_in_progress)
557 rtx tmp = op;
558 if (GET_CODE (tmp) == SUBREG)
559 tmp = SUBREG_REG (tmp);
560 if (GET_CODE (tmp) == REG
561 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
563 op = reg_equiv_memory_loc[REGNO (tmp)];
564 if (op == 0)
565 return 0;
568 return op;
571 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
572 the range defined for C in [I-P]. */
574 bool
575 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
577 switch (c)
579 case 'I':
580 /* An unsigned 8 bit constant. */
581 return (unsigned HOST_WIDE_INT) value < 0x100;
582 case 'J':
583 /* The constant zero. */
584 return value == 0;
585 case 'K':
586 /* A signed 16 bit constant. */
587 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
588 case 'L':
589 /* A shifted signed 16 bit constant appropriate for LDAH. */
590 return ((value & 0xffff) == 0
591 && ((value) >> 31 == -1 || value >> 31 == 0));
592 case 'M':
593 /* A constant that can be AND'ed with using a ZAP insn. */
594 return zap_mask (value);
595 case 'N':
596 /* A complemented unsigned 8 bit constant. */
597 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
598 case 'O':
599 /* A negated unsigned 8 bit constant. */
600 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
601 case 'P':
602 /* The constant 1, 2 or 3. */
603 return value == 1 || value == 2 || value == 3;
605 default:
606 return false;
610 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
611 matches for C in [GH]. */
613 bool
614 alpha_const_double_ok_for_letter_p (rtx value, int c)
616 switch (c)
618 case 'G':
619 /* The floating point zero constant. */
620 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
621 && value == CONST0_RTX (GET_MODE (value)));
623 case 'H':
624 /* A valid operand of a ZAP insn. */
625 return (GET_MODE (value) == VOIDmode
626 && zap_mask (CONST_DOUBLE_LOW (value))
627 && zap_mask (CONST_DOUBLE_HIGH (value)));
629 default:
630 return false;
634 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
635 matches for C. */
637 bool
638 alpha_extra_constraint (rtx value, int c)
640 switch (c)
642 case 'Q':
643 return normal_memory_operand (value, VOIDmode);
644 case 'R':
645 return direct_call_operand (value, Pmode);
646 case 'S':
647 return (GET_CODE (value) == CONST_INT
648 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
649 case 'T':
650 return GET_CODE (value) == HIGH;
651 case 'U':
652 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
653 case 'W':
654 return (GET_CODE (value) == CONST_VECTOR
655 && value == CONST0_RTX (GET_MODE (value)));
656 default:
657 return false;
661 /* The scalar modes supported differs from the default check-what-c-supports
662 version in that sometimes TFmode is available even when long double
663 indicates only DFmode. On unicosmk, we have the situation that HImode
664 doesn't map to any C type, but of course we still support that. */
666 static bool
667 alpha_scalar_mode_supported_p (enum machine_mode mode)
669 switch (mode)
671 case QImode:
672 case HImode:
673 case SImode:
674 case DImode:
675 case TImode: /* via optabs.c */
676 return true;
678 case SFmode:
679 case DFmode:
680 return true;
682 case TFmode:
683 return TARGET_HAS_XFLOATING_LIBS;
685 default:
686 return false;
690 /* Alpha implements a couple of integer vector mode operations when
691 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
692 which allows the vectorizer to operate on e.g. move instructions,
693 or when expand_vector_operations can do something useful. */
695 static bool
696 alpha_vector_mode_supported_p (enum machine_mode mode)
698 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
701 /* Return 1 if this function can directly return via $26. */
704 direct_return (void)
706 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
707 && reload_completed
708 && alpha_sa_size () == 0
709 && get_frame_size () == 0
710 && current_function_outgoing_args_size == 0
711 && current_function_pretend_args_size == 0);
714 /* Return the ADDR_VEC associated with a tablejump insn. */
717 alpha_tablejump_addr_vec (rtx insn)
719 rtx tmp;
721 tmp = JUMP_LABEL (insn);
722 if (!tmp)
723 return NULL_RTX;
724 tmp = NEXT_INSN (tmp);
725 if (!tmp)
726 return NULL_RTX;
727 if (GET_CODE (tmp) == JUMP_INSN
728 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
729 return PATTERN (tmp);
730 return NULL_RTX;
733 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
736 alpha_tablejump_best_label (rtx insn)
738 rtx jump_table = alpha_tablejump_addr_vec (insn);
739 rtx best_label = NULL_RTX;
741 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
742 there for edge frequency counts from profile data. */
744 if (jump_table)
746 int n_labels = XVECLEN (jump_table, 1);
747 int best_count = -1;
748 int i, j;
750 for (i = 0; i < n_labels; i++)
752 int count = 1;
754 for (j = i + 1; j < n_labels; j++)
755 if (XEXP (XVECEXP (jump_table, 1, i), 0)
756 == XEXP (XVECEXP (jump_table, 1, j), 0))
757 count++;
759 if (count > best_count)
760 best_count = count, best_label = XVECEXP (jump_table, 1, i);
764 return best_label ? best_label : const0_rtx;
767 /* Return the TLS model to use for SYMBOL. */
769 static enum tls_model
770 tls_symbolic_operand_type (rtx symbol)
772 enum tls_model model;
774 if (GET_CODE (symbol) != SYMBOL_REF)
775 return 0;
776 model = SYMBOL_REF_TLS_MODEL (symbol);
778 /* Local-exec with a 64-bit size is the same code as initial-exec. */
779 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
780 model = TLS_MODEL_INITIAL_EXEC;
782 return model;
785 /* Return true if the function DECL will share the same GP as any
786 function in the current unit of translation. */
788 static bool
789 decl_has_samegp (tree decl)
791 /* Functions that are not local can be overridden, and thus may
792 not share the same gp. */
793 if (!(*targetm.binds_local_p) (decl))
794 return false;
796 /* If -msmall-data is in effect, assume that there is only one GP
797 for the module, and so any local symbol has this property. We
798 need explicit relocations to be able to enforce this for symbols
799 not defined in this unit of translation, however. */
800 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
801 return true;
803 /* Functions that are not external are defined in this UoT. */
804 /* ??? Irritatingly, static functions not yet emitted are still
805 marked "external". Apply this to non-static functions only. */
806 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
809 /* Return true if EXP should be placed in the small data section. */
811 static bool
812 alpha_in_small_data_p (tree exp)
814 /* We want to merge strings, so we never consider them small data. */
815 if (TREE_CODE (exp) == STRING_CST)
816 return false;
818 /* Functions are never in the small data area. Duh. */
819 if (TREE_CODE (exp) == FUNCTION_DECL)
820 return false;
822 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
824 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
825 if (strcmp (section, ".sdata") == 0
826 || strcmp (section, ".sbss") == 0)
827 return true;
829 else
831 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
833 /* If this is an incomplete type with size 0, then we can't put it
834 in sdata because it might be too big when completed. */
835 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
836 return true;
839 return false;
842 #if TARGET_ABI_OPEN_VMS
843 static bool
844 alpha_linkage_symbol_p (const char *symname)
846 int symlen = strlen (symname);
848 if (symlen > 4)
849 return strcmp (&symname [symlen - 4], "..lk") == 0;
851 return false;
854 #define LINKAGE_SYMBOL_REF_P(X) \
855 ((GET_CODE (X) == SYMBOL_REF \
856 && alpha_linkage_symbol_p (XSTR (X, 0))) \
857 || (GET_CODE (X) == CONST \
858 && GET_CODE (XEXP (X, 0)) == PLUS \
859 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
860 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
861 #endif
863 /* legitimate_address_p recognizes an RTL expression that is a valid
864 memory address for an instruction. The MODE argument is the
865 machine mode for the MEM expression that wants to use this address.
867 For Alpha, we have either a constant address or the sum of a
868 register and a constant address, or just a register. For DImode,
869 any of those forms can be surrounded with an AND that clear the
870 low-order three bits; this is an "unaligned" access. */
872 bool
873 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
875 /* If this is an ldq_u type address, discard the outer AND. */
876 if (mode == DImode
877 && GET_CODE (x) == AND
878 && GET_CODE (XEXP (x, 1)) == CONST_INT
879 && INTVAL (XEXP (x, 1)) == -8)
880 x = XEXP (x, 0);
882 /* Discard non-paradoxical subregs. */
883 if (GET_CODE (x) == SUBREG
884 && (GET_MODE_SIZE (GET_MODE (x))
885 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
886 x = SUBREG_REG (x);
888 /* Unadorned general registers are valid. */
889 if (REG_P (x)
890 && (strict
891 ? STRICT_REG_OK_FOR_BASE_P (x)
892 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
893 return true;
895 /* Constant addresses (i.e. +/- 32k) are valid. */
896 if (CONSTANT_ADDRESS_P (x))
897 return true;
899 #if TARGET_ABI_OPEN_VMS
900 if (LINKAGE_SYMBOL_REF_P (x))
901 return true;
902 #endif
904 /* Register plus a small constant offset is valid. */
905 if (GET_CODE (x) == PLUS)
907 rtx ofs = XEXP (x, 1);
908 x = XEXP (x, 0);
910 /* Discard non-paradoxical subregs. */
911 if (GET_CODE (x) == SUBREG
912 && (GET_MODE_SIZE (GET_MODE (x))
913 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
914 x = SUBREG_REG (x);
916 if (REG_P (x))
918 if (! strict
919 && NONSTRICT_REG_OK_FP_BASE_P (x)
920 && GET_CODE (ofs) == CONST_INT)
921 return true;
922 if ((strict
923 ? STRICT_REG_OK_FOR_BASE_P (x)
924 : NONSTRICT_REG_OK_FOR_BASE_P (x))
925 && CONSTANT_ADDRESS_P (ofs))
926 return true;
930 /* If we're managing explicit relocations, LO_SUM is valid, as
931 are small data symbols. */
932 else if (TARGET_EXPLICIT_RELOCS)
934 if (small_symbolic_operand (x, Pmode))
935 return true;
937 if (GET_CODE (x) == LO_SUM)
939 rtx ofs = XEXP (x, 1);
940 x = XEXP (x, 0);
942 /* Discard non-paradoxical subregs. */
943 if (GET_CODE (x) == SUBREG
944 && (GET_MODE_SIZE (GET_MODE (x))
945 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
946 x = SUBREG_REG (x);
948 /* Must have a valid base register. */
949 if (! (REG_P (x)
950 && (strict
951 ? STRICT_REG_OK_FOR_BASE_P (x)
952 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
953 return false;
955 /* The symbol must be local. */
956 if (local_symbolic_operand (ofs, Pmode)
957 || dtp32_symbolic_operand (ofs, Pmode)
958 || tp32_symbolic_operand (ofs, Pmode))
959 return true;
963 return false;
966 /* Build the SYMBOL_REF for __tls_get_addr. */
968 static GTY(()) rtx tls_get_addr_libfunc;
970 static rtx
971 get_tls_get_addr (void)
973 if (!tls_get_addr_libfunc)
974 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
975 return tls_get_addr_libfunc;
978 /* Try machine-dependent ways of modifying an illegitimate address
979 to be legitimate. If we find one, return the new, valid address. */
982 alpha_legitimize_address (rtx x, rtx scratch,
983 enum machine_mode mode ATTRIBUTE_UNUSED)
985 HOST_WIDE_INT addend;
987 /* If the address is (plus reg const_int) and the CONST_INT is not a
988 valid offset, compute the high part of the constant and add it to
989 the register. Then our address is (plus temp low-part-const). */
990 if (GET_CODE (x) == PLUS
991 && GET_CODE (XEXP (x, 0)) == REG
992 && GET_CODE (XEXP (x, 1)) == CONST_INT
993 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
995 addend = INTVAL (XEXP (x, 1));
996 x = XEXP (x, 0);
997 goto split_addend;
1000 /* If the address is (const (plus FOO const_int)), find the low-order
1001 part of the CONST_INT. Then load FOO plus any high-order part of the
1002 CONST_INT into a register. Our address is (plus reg low-part-const).
1003 This is done to reduce the number of GOT entries. */
1004 if (!no_new_pseudos
1005 && GET_CODE (x) == CONST
1006 && GET_CODE (XEXP (x, 0)) == PLUS
1007 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1009 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1010 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1011 goto split_addend;
1014 /* If we have a (plus reg const), emit the load as in (2), then add
1015 the two registers, and finally generate (plus reg low-part-const) as
1016 our address. */
1017 if (!no_new_pseudos
1018 && GET_CODE (x) == PLUS
1019 && GET_CODE (XEXP (x, 0)) == REG
1020 && GET_CODE (XEXP (x, 1)) == CONST
1021 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1022 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1024 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1025 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1026 XEXP (XEXP (XEXP (x, 1), 0), 0),
1027 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1028 goto split_addend;
1031 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1032 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1034 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1036 switch (tls_symbolic_operand_type (x))
1038 case TLS_MODEL_NONE:
1039 break;
1041 case TLS_MODEL_GLOBAL_DYNAMIC:
1042 start_sequence ();
1044 r0 = gen_rtx_REG (Pmode, 0);
1045 r16 = gen_rtx_REG (Pmode, 16);
1046 tga = get_tls_get_addr ();
1047 dest = gen_reg_rtx (Pmode);
1048 seq = GEN_INT (alpha_next_sequence_number++);
1050 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1051 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1052 insn = emit_call_insn (insn);
1053 CONST_OR_PURE_CALL_P (insn) = 1;
1054 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1056 insn = get_insns ();
1057 end_sequence ();
1059 emit_libcall_block (insn, dest, r0, x);
1060 return dest;
1062 case TLS_MODEL_LOCAL_DYNAMIC:
1063 start_sequence ();
1065 r0 = gen_rtx_REG (Pmode, 0);
1066 r16 = gen_rtx_REG (Pmode, 16);
1067 tga = get_tls_get_addr ();
1068 scratch = gen_reg_rtx (Pmode);
1069 seq = GEN_INT (alpha_next_sequence_number++);
1071 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1072 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1073 insn = emit_call_insn (insn);
1074 CONST_OR_PURE_CALL_P (insn) = 1;
1075 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1077 insn = get_insns ();
1078 end_sequence ();
1080 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1081 UNSPEC_TLSLDM_CALL);
1082 emit_libcall_block (insn, scratch, r0, eqv);
1084 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1085 eqv = gen_rtx_CONST (Pmode, eqv);
1087 if (alpha_tls_size == 64)
1089 dest = gen_reg_rtx (Pmode);
1090 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1091 emit_insn (gen_adddi3 (dest, dest, scratch));
1092 return dest;
1094 if (alpha_tls_size == 32)
1096 insn = gen_rtx_HIGH (Pmode, eqv);
1097 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1098 scratch = gen_reg_rtx (Pmode);
1099 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1101 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1103 case TLS_MODEL_INITIAL_EXEC:
1104 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1105 eqv = gen_rtx_CONST (Pmode, eqv);
1106 tp = gen_reg_rtx (Pmode);
1107 scratch = gen_reg_rtx (Pmode);
1108 dest = gen_reg_rtx (Pmode);
1110 emit_insn (gen_load_tp (tp));
1111 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1112 emit_insn (gen_adddi3 (dest, tp, scratch));
1113 return dest;
1115 case TLS_MODEL_LOCAL_EXEC:
1116 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1117 eqv = gen_rtx_CONST (Pmode, eqv);
1118 tp = gen_reg_rtx (Pmode);
1120 emit_insn (gen_load_tp (tp));
1121 if (alpha_tls_size == 32)
1123 insn = gen_rtx_HIGH (Pmode, eqv);
1124 insn = gen_rtx_PLUS (Pmode, tp, insn);
1125 tp = gen_reg_rtx (Pmode);
1126 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1128 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1130 default:
1131 gcc_unreachable ();
1134 if (local_symbolic_operand (x, Pmode))
1136 if (small_symbolic_operand (x, Pmode))
1137 return x;
1138 else
1140 if (!no_new_pseudos)
1141 scratch = gen_reg_rtx (Pmode);
1142 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1143 gen_rtx_HIGH (Pmode, x)));
1144 return gen_rtx_LO_SUM (Pmode, scratch, x);
1149 return NULL;
1151 split_addend:
1153 HOST_WIDE_INT low, high;
1155 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1156 addend -= low;
1157 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1158 addend -= high;
1160 if (addend)
1161 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1162 (no_new_pseudos ? scratch : NULL_RTX),
1163 1, OPTAB_LIB_WIDEN);
1164 if (high)
1165 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1166 (no_new_pseudos ? scratch : NULL_RTX),
1167 1, OPTAB_LIB_WIDEN);
1169 return plus_constant (x, low);
1173 /* Primarily this is required for TLS symbols, but given that our move
1174 patterns *ought* to be able to handle any symbol at any time, we
1175 should never be spilling symbolic operands to the constant pool, ever. */
1177 static bool
1178 alpha_cannot_force_const_mem (rtx x)
1180 enum rtx_code code = GET_CODE (x);
1181 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1184 /* We do not allow indirect calls to be optimized into sibling calls, nor
1185 can we allow a call to a function with a different GP to be optimized
1186 into a sibcall. */
1188 static bool
1189 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1191 /* Can't do indirect tail calls, since we don't know if the target
1192 uses the same GP. */
1193 if (!decl)
1194 return false;
1196 /* Otherwise, we can make a tail call if the target function shares
1197 the same GP. */
1198 return decl_has_samegp (decl);
1202 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1204 rtx x = *px;
1206 /* Don't re-split. */
1207 if (GET_CODE (x) == LO_SUM)
1208 return -1;
1210 return small_symbolic_operand (x, Pmode) != 0;
1213 static int
1214 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1216 rtx x = *px;
1218 /* Don't re-split. */
1219 if (GET_CODE (x) == LO_SUM)
1220 return -1;
1222 if (small_symbolic_operand (x, Pmode))
1224 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1225 *px = x;
1226 return -1;
1229 return 0;
1233 split_small_symbolic_operand (rtx x)
1235 x = copy_insn (x);
1236 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1237 return x;
1240 /* Indicate that INSN cannot be duplicated. This is true for any insn
1241 that we've marked with gpdisp relocs, since those have to stay in
1242 1-1 correspondence with one another.
1244 Technically we could copy them if we could set up a mapping from one
1245 sequence number to another, across the set of insns to be duplicated.
1246 This seems overly complicated and error-prone since interblock motion
1247 from sched-ebb could move one of the pair of insns to a different block.
1249 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1250 then they'll be in a different block from their ldgp. Which could lead
1251 the bb reorder code to think that it would be ok to copy just the block
1252 containing the call and branch to the block containing the ldgp. */
1254 static bool
1255 alpha_cannot_copy_insn_p (rtx insn)
1257 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1258 return false;
1259 if (recog_memoized (insn) >= 0)
1260 return get_attr_cannot_copy (insn);
1261 else
1262 return false;
1266 /* Try a machine-dependent way of reloading an illegitimate address
1267 operand. If we find one, push the reload and return the new rtx. */
1270 alpha_legitimize_reload_address (rtx x,
1271 enum machine_mode mode ATTRIBUTE_UNUSED,
1272 int opnum, int type,
1273 int ind_levels ATTRIBUTE_UNUSED)
1275 /* We must recognize output that we have already generated ourselves. */
1276 if (GET_CODE (x) == PLUS
1277 && GET_CODE (XEXP (x, 0)) == PLUS
1278 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1279 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1280 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1282 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1283 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1284 opnum, type);
1285 return x;
1288 /* We wish to handle large displacements off a base register by
1289 splitting the addend across an ldah and the mem insn. This
1290 cuts number of extra insns needed from 3 to 1. */
1291 if (GET_CODE (x) == PLUS
1292 && GET_CODE (XEXP (x, 0)) == REG
1293 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1294 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1295 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1297 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1298 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1299 HOST_WIDE_INT high
1300 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1302 /* Check for 32-bit overflow. */
1303 if (high + low != val)
1304 return NULL_RTX;
1306 /* Reload the high part into a base reg; leave the low part
1307 in the mem directly. */
1308 x = gen_rtx_PLUS (GET_MODE (x),
1309 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1310 GEN_INT (high)),
1311 GEN_INT (low));
1313 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1314 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1315 opnum, type);
1316 return x;
1319 return NULL_RTX;
1322 /* Compute a (partial) cost for rtx X. Return true if the complete
1323 cost has been computed, and false if subexpressions should be
1324 scanned. In either case, *TOTAL contains the cost result. */
1326 static bool
1327 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1329 enum machine_mode mode = GET_MODE (x);
1330 bool float_mode_p = FLOAT_MODE_P (mode);
1331 const struct alpha_rtx_cost_data *cost_data;
1333 if (optimize_size)
1334 cost_data = &alpha_rtx_cost_size;
1335 else
1336 cost_data = &alpha_rtx_cost_data[alpha_tune];
1338 switch (code)
1340 case CONST_INT:
1341 /* If this is an 8-bit constant, return zero since it can be used
1342 nearly anywhere with no cost. If it is a valid operand for an
1343 ADD or AND, likewise return 0 if we know it will be used in that
1344 context. Otherwise, return 2 since it might be used there later.
1345 All other constants take at least two insns. */
1346 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1348 *total = 0;
1349 return true;
1351 /* FALLTHRU */
1353 case CONST_DOUBLE:
1354 if (x == CONST0_RTX (mode))
1355 *total = 0;
1356 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1357 || (outer_code == AND && and_operand (x, VOIDmode)))
1358 *total = 0;
1359 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1360 *total = 2;
1361 else
1362 *total = COSTS_N_INSNS (2);
1363 return true;
1365 case CONST:
1366 case SYMBOL_REF:
1367 case LABEL_REF:
1368 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1369 *total = COSTS_N_INSNS (outer_code != MEM);
1370 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1371 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1372 else if (tls_symbolic_operand_type (x))
1373 /* Estimate of cost for call_pal rduniq. */
1374 /* ??? How many insns do we emit here? More than one... */
1375 *total = COSTS_N_INSNS (15);
1376 else
1377 /* Otherwise we do a load from the GOT. */
1378 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1379 return true;
1381 case HIGH:
1382 /* This is effectively an add_operand. */
1383 *total = 2;
1384 return true;
1386 case PLUS:
1387 case MINUS:
1388 if (float_mode_p)
1389 *total = cost_data->fp_add;
1390 else if (GET_CODE (XEXP (x, 0)) == MULT
1391 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1393 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1394 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1395 return true;
1397 return false;
1399 case MULT:
1400 if (float_mode_p)
1401 *total = cost_data->fp_mult;
1402 else if (mode == DImode)
1403 *total = cost_data->int_mult_di;
1404 else
1405 *total = cost_data->int_mult_si;
1406 return false;
1408 case ASHIFT:
1409 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1410 && INTVAL (XEXP (x, 1)) <= 3)
1412 *total = COSTS_N_INSNS (1);
1413 return false;
1415 /* FALLTHRU */
1417 case ASHIFTRT:
1418 case LSHIFTRT:
1419 *total = cost_data->int_shift;
1420 return false;
1422 case IF_THEN_ELSE:
1423 if (float_mode_p)
1424 *total = cost_data->fp_add;
1425 else
1426 *total = cost_data->int_cmov;
1427 return false;
1429 case DIV:
1430 case UDIV:
1431 case MOD:
1432 case UMOD:
1433 if (!float_mode_p)
1434 *total = cost_data->int_div;
1435 else if (mode == SFmode)
1436 *total = cost_data->fp_div_sf;
1437 else
1438 *total = cost_data->fp_div_df;
1439 return false;
1441 case MEM:
1442 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1443 return true;
1445 case NEG:
1446 if (! float_mode_p)
1448 *total = COSTS_N_INSNS (1);
1449 return false;
1451 /* FALLTHRU */
1453 case ABS:
1454 if (! float_mode_p)
1456 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1457 return false;
1459 /* FALLTHRU */
1461 case FLOAT:
1462 case UNSIGNED_FLOAT:
1463 case FIX:
1464 case UNSIGNED_FIX:
1465 case FLOAT_TRUNCATE:
1466 *total = cost_data->fp_add;
1467 return false;
1469 case FLOAT_EXTEND:
1470 if (GET_CODE (XEXP (x, 0)) == MEM)
1471 *total = 0;
1472 else
1473 *total = cost_data->fp_add;
1474 return false;
1476 default:
1477 return false;
1481 /* REF is an alignable memory location. Place an aligned SImode
1482 reference into *PALIGNED_MEM and the number of bits to shift into
1483 *PBITNUM. SCRATCH is a free register for use in reloading out
1484 of range stack slots. */
1486 void
1487 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1489 rtx base;
1490 HOST_WIDE_INT disp, offset;
1492 gcc_assert (GET_CODE (ref) == MEM);
1494 if (reload_in_progress
1495 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1497 base = find_replacement (&XEXP (ref, 0));
1498 gcc_assert (memory_address_p (GET_MODE (ref), base));
1500 else
1501 base = XEXP (ref, 0);
1503 if (GET_CODE (base) == PLUS)
1504 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1505 else
1506 disp = 0;
1508 /* Find the byte offset within an aligned word. If the memory itself is
1509 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1510 will have examined the base register and determined it is aligned, and
1511 thus displacements from it are naturally alignable. */
1512 if (MEM_ALIGN (ref) >= 32)
1513 offset = 0;
1514 else
1515 offset = disp & 3;
1517 /* Access the entire aligned word. */
1518 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1520 /* Convert the byte offset within the word to a bit offset. */
1521 if (WORDS_BIG_ENDIAN)
1522 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1523 else
1524 offset *= 8;
1525 *pbitnum = GEN_INT (offset);
1528 /* Similar, but just get the address. Handle the two reload cases.
1529 Add EXTRA_OFFSET to the address we return. */
1532 get_unaligned_address (rtx ref, int extra_offset)
1534 rtx base;
1535 HOST_WIDE_INT offset = 0;
1537 gcc_assert (GET_CODE (ref) == MEM);
1539 if (reload_in_progress
1540 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1542 base = find_replacement (&XEXP (ref, 0));
1544 gcc_assert (memory_address_p (GET_MODE (ref), base));
1546 else
1547 base = XEXP (ref, 0);
1549 if (GET_CODE (base) == PLUS)
1550 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1552 return plus_constant (base, offset + extra_offset);
1555 /* On the Alpha, all (non-symbolic) constants except zero go into
1556 a floating-point register via memory. Note that we cannot
1557 return anything that is not a subset of CLASS, and that some
1558 symbolic constants cannot be dropped to memory. */
1560 enum reg_class
1561 alpha_preferred_reload_class(rtx x, enum reg_class class)
1563 /* Zero is present in any register class. */
1564 if (x == CONST0_RTX (GET_MODE (x)))
1565 return class;
1567 /* These sorts of constants we can easily drop to memory. */
1568 if (GET_CODE (x) == CONST_INT
1569 || GET_CODE (x) == CONST_DOUBLE
1570 || GET_CODE (x) == CONST_VECTOR)
1572 if (class == FLOAT_REGS)
1573 return NO_REGS;
1574 if (class == ALL_REGS)
1575 return GENERAL_REGS;
1576 return class;
1579 /* All other kinds of constants should not (and in the case of HIGH
1580 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1581 secondary reload. */
1582 if (CONSTANT_P (x))
1583 return (class == ALL_REGS ? GENERAL_REGS : class);
1585 return class;
1588 /* Loading and storing HImode or QImode values to and from memory
1589 usually requires a scratch register. The exceptions are loading
1590 QImode and HImode from an aligned address to a general register
1591 unless byte instructions are permitted.
1593 We also cannot load an unaligned address or a paradoxical SUBREG
1594 into an FP register.
1596 We also cannot do integral arithmetic into FP regs, as might result
1597 from register elimination into a DImode fp register. */
1599 enum reg_class
1600 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1601 rtx x, int in)
1603 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1605 if (GET_CODE (x) == MEM
1606 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1607 || (GET_CODE (x) == SUBREG
1608 && (GET_CODE (SUBREG_REG (x)) == MEM
1609 || (GET_CODE (SUBREG_REG (x)) == REG
1610 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1612 if (!in || !aligned_memory_operand(x, mode))
1613 return GENERAL_REGS;
1617 if (class == FLOAT_REGS)
1619 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1620 return GENERAL_REGS;
1622 if (GET_CODE (x) == SUBREG
1623 && (GET_MODE_SIZE (GET_MODE (x))
1624 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1625 return GENERAL_REGS;
1627 if (in && INTEGRAL_MODE_P (mode)
1628 && ! (memory_operand (x, mode) || x == const0_rtx))
1629 return GENERAL_REGS;
1632 return NO_REGS;
1635 /* Subfunction of the following function. Update the flags of any MEM
1636 found in part of X. */
1638 static int
1639 alpha_set_memflags_1 (rtx *xp, void *data)
1641 rtx x = *xp, orig = (rtx) data;
1643 if (GET_CODE (x) != MEM)
1644 return 0;
1646 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1647 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1648 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1649 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1650 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1652 /* Sadly, we cannot use alias sets because the extra aliasing
1653 produced by the AND interferes. Given that two-byte quantities
1654 are the only thing we would be able to differentiate anyway,
1655 there does not seem to be any point in convoluting the early
1656 out of the alias check. */
1658 return -1;
1661 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1662 generated to perform a memory operation, look for any MEMs in either
1663 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1664 volatile flags from REF into each of the MEMs found. If REF is not
1665 a MEM, don't do anything. */
1667 void
1668 alpha_set_memflags (rtx insn, rtx ref)
1670 rtx *base_ptr;
1672 if (GET_CODE (ref) != MEM)
1673 return;
1675 /* This is only called from alpha.md, after having had something
1676 generated from one of the insn patterns. So if everything is
1677 zero, the pattern is already up-to-date. */
1678 if (!MEM_VOLATILE_P (ref)
1679 && !MEM_IN_STRUCT_P (ref)
1680 && !MEM_SCALAR_P (ref)
1681 && !MEM_NOTRAP_P (ref)
1682 && !MEM_READONLY_P (ref))
1683 return;
1685 if (INSN_P (insn))
1686 base_ptr = &PATTERN (insn);
1687 else
1688 base_ptr = &insn;
1689 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1692 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1693 int, bool);
1695 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1696 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1697 and return pc_rtx if successful. */
1699 static rtx
1700 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1701 HOST_WIDE_INT c, int n, bool no_output)
1703 HOST_WIDE_INT new;
1704 int i, bits;
1705 /* Use a pseudo if highly optimizing and still generating RTL. */
1706 rtx subtarget
1707 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1708 rtx temp, insn;
1710 /* If this is a sign-extended 32-bit constant, we can do this in at most
1711 three insns, so do it if we have enough insns left. We always have
1712 a sign-extended 32-bit constant when compiling on a narrow machine. */
1714 if (HOST_BITS_PER_WIDE_INT != 64
1715 || c >> 31 == -1 || c >> 31 == 0)
1717 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1718 HOST_WIDE_INT tmp1 = c - low;
1719 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1720 HOST_WIDE_INT extra = 0;
1722 /* If HIGH will be interpreted as negative but the constant is
1723 positive, we must adjust it to do two ldha insns. */
1725 if ((high & 0x8000) != 0 && c >= 0)
1727 extra = 0x4000;
1728 tmp1 -= 0x40000000;
1729 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1732 if (c == low || (low == 0 && extra == 0))
1734 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1735 but that meant that we can't handle INT_MIN on 32-bit machines
1736 (like NT/Alpha), because we recurse indefinitely through
1737 emit_move_insn to gen_movdi. So instead, since we know exactly
1738 what we want, create it explicitly. */
1740 if (no_output)
1741 return pc_rtx;
1742 if (target == NULL)
1743 target = gen_reg_rtx (mode);
1744 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1745 return target;
1747 else if (n >= 2 + (extra != 0))
1749 if (no_output)
1750 return pc_rtx;
1751 if (no_new_pseudos)
1753 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1754 temp = target;
1756 else
1757 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1758 subtarget, mode);
1760 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1761 This means that if we go through expand_binop, we'll try to
1762 generate extensions, etc, which will require new pseudos, which
1763 will fail during some split phases. The SImode add patterns
1764 still exist, but are not named. So build the insns by hand. */
1766 if (extra != 0)
1768 if (! subtarget)
1769 subtarget = gen_reg_rtx (mode);
1770 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1771 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1772 emit_insn (insn);
1773 temp = subtarget;
1776 if (target == NULL)
1777 target = gen_reg_rtx (mode);
1778 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1779 insn = gen_rtx_SET (VOIDmode, target, insn);
1780 emit_insn (insn);
1781 return target;
1785 /* If we couldn't do it that way, try some other methods. But if we have
1786 no instructions left, don't bother. Likewise, if this is SImode and
1787 we can't make pseudos, we can't do anything since the expand_binop
1788 and expand_unop calls will widen and try to make pseudos. */
1790 if (n == 1 || (mode == SImode && no_new_pseudos))
1791 return 0;
1793 /* Next, see if we can load a related constant and then shift and possibly
1794 negate it to get the constant we want. Try this once each increasing
1795 numbers of insns. */
1797 for (i = 1; i < n; i++)
1799 /* First, see if minus some low bits, we've an easy load of
1800 high bits. */
1802 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1803 if (new != 0)
1805 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1806 if (temp)
1808 if (no_output)
1809 return temp;
1810 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1811 target, 0, OPTAB_WIDEN);
1815 /* Next try complementing. */
1816 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1817 if (temp)
1819 if (no_output)
1820 return temp;
1821 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1824 /* Next try to form a constant and do a left shift. We can do this
1825 if some low-order bits are zero; the exact_log2 call below tells
1826 us that information. The bits we are shifting out could be any
1827 value, but here we'll just try the 0- and sign-extended forms of
1828 the constant. To try to increase the chance of having the same
1829 constant in more than one insn, start at the highest number of
1830 bits to shift, but try all possibilities in case a ZAPNOT will
1831 be useful. */
1833 bits = exact_log2 (c & -c);
1834 if (bits > 0)
1835 for (; bits > 0; bits--)
1837 new = c >> bits;
1838 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1839 if (!temp && c < 0)
1841 new = (unsigned HOST_WIDE_INT)c >> bits;
1842 temp = alpha_emit_set_const (subtarget, mode, new,
1843 i, no_output);
1845 if (temp)
1847 if (no_output)
1848 return temp;
1849 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1850 target, 0, OPTAB_WIDEN);
1854 /* Now try high-order zero bits. Here we try the shifted-in bits as
1855 all zero and all ones. Be careful to avoid shifting outside the
1856 mode and to avoid shifting outside the host wide int size. */
1857 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1858 confuse the recursive call and set all of the high 32 bits. */
1860 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1861 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1862 if (bits > 0)
1863 for (; bits > 0; bits--)
1865 new = c << bits;
1866 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1867 if (!temp)
1869 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1870 temp = alpha_emit_set_const (subtarget, mode, new,
1871 i, no_output);
1873 if (temp)
1875 if (no_output)
1876 return temp;
1877 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1878 target, 1, OPTAB_WIDEN);
1882 /* Now try high-order 1 bits. We get that with a sign-extension.
1883 But one bit isn't enough here. Be careful to avoid shifting outside
1884 the mode and to avoid shifting outside the host wide int size. */
1886 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1887 - floor_log2 (~ c) - 2);
1888 if (bits > 0)
1889 for (; bits > 0; bits--)
1891 new = c << bits;
1892 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1893 if (!temp)
1895 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1896 temp = alpha_emit_set_const (subtarget, mode, new,
1897 i, no_output);
1899 if (temp)
1901 if (no_output)
1902 return temp;
1903 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1904 target, 0, OPTAB_WIDEN);
1909 #if HOST_BITS_PER_WIDE_INT == 64
1910 /* Finally, see if can load a value into the target that is the same as the
1911 constant except that all bytes that are 0 are changed to be 0xff. If we
1912 can, then we can do a ZAPNOT to obtain the desired constant. */
1914 new = c;
1915 for (i = 0; i < 64; i += 8)
1916 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1917 new |= (HOST_WIDE_INT) 0xff << i;
1919 /* We are only called for SImode and DImode. If this is SImode, ensure that
1920 we are sign extended to a full word. */
1922 if (mode == SImode)
1923 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1925 if (new != c)
1927 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1928 if (temp)
1930 if (no_output)
1931 return temp;
1932 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1933 target, 0, OPTAB_WIDEN);
1936 #endif
1938 return 0;
1941 /* Try to output insns to set TARGET equal to the constant C if it can be
1942 done in less than N insns. Do all computations in MODE. Returns the place
1943 where the output has been placed if it can be done and the insns have been
1944 emitted. If it would take more than N insns, zero is returned and no
1945 insns and emitted. */
1947 static rtx
1948 alpha_emit_set_const (rtx target, enum machine_mode mode,
1949 HOST_WIDE_INT c, int n, bool no_output)
1951 enum machine_mode orig_mode = mode;
1952 rtx orig_target = target;
1953 rtx result = 0;
1954 int i;
1956 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1957 can't load this constant in one insn, do this in DImode. */
1958 if (no_new_pseudos && mode == SImode
1959 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1961 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1962 if (result)
1963 return result;
1965 target = no_output ? NULL : gen_lowpart (DImode, target);
1966 mode = DImode;
1968 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1970 target = no_output ? NULL : gen_lowpart (DImode, target);
1971 mode = DImode;
1974 /* Try 1 insn, then 2, then up to N. */
1975 for (i = 1; i <= n; i++)
1977 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1978 if (result)
1980 rtx insn, set;
1982 if (no_output)
1983 return result;
1985 insn = get_last_insn ();
1986 set = single_set (insn);
1987 if (! CONSTANT_P (SET_SRC (set)))
1988 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1989 break;
1993 /* Allow for the case where we changed the mode of TARGET. */
1994 if (result)
1996 if (result == target)
1997 result = orig_target;
1998 else if (mode != orig_mode)
1999 result = gen_lowpart (orig_mode, result);
2002 return result;
2005 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2006 fall back to a straight forward decomposition. We do this to avoid
2007 exponential run times encountered when looking for longer sequences
2008 with alpha_emit_set_const. */
2010 static rtx
2011 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2013 HOST_WIDE_INT d1, d2, d3, d4;
2015 /* Decompose the entire word */
2016 #if HOST_BITS_PER_WIDE_INT >= 64
2017 gcc_assert (c2 == -(c1 < 0));
2018 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2019 c1 -= d1;
2020 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2021 c1 = (c1 - d2) >> 32;
2022 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2023 c1 -= d3;
2024 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2025 gcc_assert (c1 == d4);
2026 #else
2027 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2028 c1 -= d1;
2029 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2030 gcc_assert (c1 == d2);
2031 c2 += (d2 < 0);
2032 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2033 c2 -= d3;
2034 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2035 gcc_assert (c2 == d4);
2036 #endif
2038 /* Construct the high word */
2039 if (d4)
2041 emit_move_insn (target, GEN_INT (d4));
2042 if (d3)
2043 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2045 else
2046 emit_move_insn (target, GEN_INT (d3));
2048 /* Shift it into place */
2049 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2051 /* Add in the low bits. */
2052 if (d2)
2053 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2054 if (d1)
2055 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2057 return target;
2060 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2061 the low 64 bits. */
2063 static void
2064 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2066 HOST_WIDE_INT i0, i1;
2068 if (GET_CODE (x) == CONST_VECTOR)
2069 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2072 if (GET_CODE (x) == CONST_INT)
2074 i0 = INTVAL (x);
2075 i1 = -(i0 < 0);
2077 else if (HOST_BITS_PER_WIDE_INT >= 64)
2079 i0 = CONST_DOUBLE_LOW (x);
2080 i1 = -(i0 < 0);
2082 else
2084 i0 = CONST_DOUBLE_LOW (x);
2085 i1 = CONST_DOUBLE_HIGH (x);
2088 *p0 = i0;
2089 *p1 = i1;
2092 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2093 are willing to load the value into a register via a move pattern.
2094 Normally this is all symbolic constants, integral constants that
2095 take three or fewer instructions, and floating-point zero. */
2097 bool
2098 alpha_legitimate_constant_p (rtx x)
2100 enum machine_mode mode = GET_MODE (x);
2101 HOST_WIDE_INT i0, i1;
2103 switch (GET_CODE (x))
2105 case CONST:
2106 case LABEL_REF:
2107 case SYMBOL_REF:
2108 case HIGH:
2109 return true;
2111 case CONST_DOUBLE:
2112 if (x == CONST0_RTX (mode))
2113 return true;
2114 if (FLOAT_MODE_P (mode))
2115 return false;
2116 goto do_integer;
2118 case CONST_VECTOR:
2119 if (x == CONST0_RTX (mode))
2120 return true;
2121 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2122 return false;
2123 if (GET_MODE_SIZE (mode) != 8)
2124 return false;
2125 goto do_integer;
2127 case CONST_INT:
2128 do_integer:
2129 if (TARGET_BUILD_CONSTANTS)
2130 return true;
2131 alpha_extract_integer (x, &i0, &i1);
2132 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2133 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2134 return false;
2136 default:
2137 return false;
2141 /* Operand 1 is known to be a constant, and should require more than one
2142 instruction to load. Emit that multi-part load. */
2144 bool
2145 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2147 HOST_WIDE_INT i0, i1;
2148 rtx temp = NULL_RTX;
2150 alpha_extract_integer (operands[1], &i0, &i1);
2152 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2153 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2155 if (!temp && TARGET_BUILD_CONSTANTS)
2156 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2158 if (temp)
2160 if (!rtx_equal_p (operands[0], temp))
2161 emit_move_insn (operands[0], temp);
2162 return true;
2165 return false;
2168 /* Expand a move instruction; return true if all work is done.
2169 We don't handle non-bwx subword loads here. */
2171 bool
2172 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2174 /* If the output is not a register, the input must be. */
2175 if (GET_CODE (operands[0]) == MEM
2176 && ! reg_or_0_operand (operands[1], mode))
2177 operands[1] = force_reg (mode, operands[1]);
2179 /* Allow legitimize_address to perform some simplifications. */
2180 if (mode == Pmode && symbolic_operand (operands[1], mode))
2182 rtx tmp;
2184 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2185 if (tmp)
2187 if (tmp == operands[0])
2188 return true;
2189 operands[1] = tmp;
2190 return false;
2194 /* Early out for non-constants and valid constants. */
2195 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2196 return false;
2198 /* Split large integers. */
2199 if (GET_CODE (operands[1]) == CONST_INT
2200 || GET_CODE (operands[1]) == CONST_DOUBLE
2201 || GET_CODE (operands[1]) == CONST_VECTOR)
2203 if (alpha_split_const_mov (mode, operands))
2204 return true;
2207 /* Otherwise we've nothing left but to drop the thing to memory. */
2208 operands[1] = force_const_mem (mode, operands[1]);
2209 if (reload_in_progress)
2211 emit_move_insn (operands[0], XEXP (operands[1], 0));
2212 operands[1] = copy_rtx (operands[1]);
2213 XEXP (operands[1], 0) = operands[0];
2215 else
2216 operands[1] = validize_mem (operands[1]);
2217 return false;
2220 /* Expand a non-bwx QImode or HImode move instruction;
2221 return true if all work is done. */
2223 bool
2224 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2226 /* If the output is not a register, the input must be. */
2227 if (GET_CODE (operands[0]) == MEM)
2228 operands[1] = force_reg (mode, operands[1]);
2230 /* Handle four memory cases, unaligned and aligned for either the input
2231 or the output. The only case where we can be called during reload is
2232 for aligned loads; all other cases require temporaries. */
2234 if (GET_CODE (operands[1]) == MEM
2235 || (GET_CODE (operands[1]) == SUBREG
2236 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2237 || (reload_in_progress && GET_CODE (operands[1]) == REG
2238 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2239 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2240 && GET_CODE (SUBREG_REG (operands[1])) == REG
2241 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2243 if (aligned_memory_operand (operands[1], mode))
2245 if (reload_in_progress)
2247 emit_insn ((mode == QImode
2248 ? gen_reload_inqi_help
2249 : gen_reload_inhi_help)
2250 (operands[0], operands[1],
2251 gen_rtx_REG (SImode, REGNO (operands[0]))));
2253 else
2255 rtx aligned_mem, bitnum;
2256 rtx scratch = gen_reg_rtx (SImode);
2257 rtx subtarget;
2258 bool copyout;
2260 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2262 subtarget = operands[0];
2263 if (GET_CODE (subtarget) == REG)
2264 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2265 else
2266 subtarget = gen_reg_rtx (DImode), copyout = true;
2268 emit_insn ((mode == QImode
2269 ? gen_aligned_loadqi
2270 : gen_aligned_loadhi)
2271 (subtarget, aligned_mem, bitnum, scratch));
2273 if (copyout)
2274 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2277 else
2279 /* Don't pass these as parameters since that makes the generated
2280 code depend on parameter evaluation order which will cause
2281 bootstrap failures. */
2283 rtx temp1, temp2, seq, subtarget;
2284 bool copyout;
2286 temp1 = gen_reg_rtx (DImode);
2287 temp2 = gen_reg_rtx (DImode);
2289 subtarget = operands[0];
2290 if (GET_CODE (subtarget) == REG)
2291 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2292 else
2293 subtarget = gen_reg_rtx (DImode), copyout = true;
2295 seq = ((mode == QImode
2296 ? gen_unaligned_loadqi
2297 : gen_unaligned_loadhi)
2298 (subtarget, get_unaligned_address (operands[1], 0),
2299 temp1, temp2));
2300 alpha_set_memflags (seq, operands[1]);
2301 emit_insn (seq);
2303 if (copyout)
2304 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2306 return true;
2309 if (GET_CODE (operands[0]) == MEM
2310 || (GET_CODE (operands[0]) == SUBREG
2311 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2312 || (reload_in_progress && GET_CODE (operands[0]) == REG
2313 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2314 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2315 && GET_CODE (SUBREG_REG (operands[0])) == REG
2316 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2318 if (aligned_memory_operand (operands[0], mode))
2320 rtx aligned_mem, bitnum;
2321 rtx temp1 = gen_reg_rtx (SImode);
2322 rtx temp2 = gen_reg_rtx (SImode);
2324 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2326 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2327 temp1, temp2));
2329 else
2331 rtx temp1 = gen_reg_rtx (DImode);
2332 rtx temp2 = gen_reg_rtx (DImode);
2333 rtx temp3 = gen_reg_rtx (DImode);
2334 rtx seq = ((mode == QImode
2335 ? gen_unaligned_storeqi
2336 : gen_unaligned_storehi)
2337 (get_unaligned_address (operands[0], 0),
2338 operands[1], temp1, temp2, temp3));
2340 alpha_set_memflags (seq, operands[0]);
2341 emit_insn (seq);
2343 return true;
2346 return false;
2349 /* Implement the movmisalign patterns. One of the operands is a memory
2350 that is not naturally aligned. Emit instructions to load it. */
2352 void
2353 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2355 /* Honor misaligned loads, for those we promised to do so. */
2356 if (MEM_P (operands[1]))
2358 rtx tmp;
2360 if (register_operand (operands[0], mode))
2361 tmp = operands[0];
2362 else
2363 tmp = gen_reg_rtx (mode);
2365 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2366 if (tmp != operands[0])
2367 emit_move_insn (operands[0], tmp);
2369 else if (MEM_P (operands[0]))
2371 if (!reg_or_0_operand (operands[1], mode))
2372 operands[1] = force_reg (mode, operands[1]);
2373 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2375 else
2376 gcc_unreachable ();
2379 /* Generate an unsigned DImode to FP conversion. This is the same code
2380 optabs would emit if we didn't have TFmode patterns.
2382 For SFmode, this is the only construction I've found that can pass
2383 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2384 intermediates will work, because you'll get intermediate rounding
2385 that ruins the end result. Some of this could be fixed by turning
2386 on round-to-positive-infinity, but that requires diddling the fpsr,
2387 which kills performance. I tried turning this around and converting
2388 to a negative number, so that I could turn on /m, but either I did
2389 it wrong or there's something else cause I wound up with the exact
2390 same single-bit error. There is a branch-less form of this same code:
2392 srl $16,1,$1
2393 and $16,1,$2
2394 cmplt $16,0,$3
2395 or $1,$2,$2
2396 cmovge $16,$16,$2
2397 itoft $3,$f10
2398 itoft $2,$f11
2399 cvtqs $f11,$f11
2400 adds $f11,$f11,$f0
2401 fcmoveq $f10,$f11,$f0
2403 I'm not using it because it's the same number of instructions as
2404 this branch-full form, and it has more serialized long latency
2405 instructions on the critical path.
2407 For DFmode, we can avoid rounding errors by breaking up the word
2408 into two pieces, converting them separately, and adding them back:
2410 LC0: .long 0,0x5f800000
2412 itoft $16,$f11
2413 lda $2,LC0
2414 cmplt $16,0,$1
2415 cpyse $f11,$f31,$f10
2416 cpyse $f31,$f11,$f11
2417 s4addq $1,$2,$1
2418 lds $f12,0($1)
2419 cvtqt $f10,$f10
2420 cvtqt $f11,$f11
2421 addt $f12,$f10,$f0
2422 addt $f0,$f11,$f0
2424 This doesn't seem to be a clear-cut win over the optabs form.
2425 It probably all depends on the distribution of numbers being
2426 converted -- in the optabs form, all but high-bit-set has a
2427 much lower minimum execution time. */
2429 void
2430 alpha_emit_floatuns (rtx operands[2])
2432 rtx neglab, donelab, i0, i1, f0, in, out;
2433 enum machine_mode mode;
2435 out = operands[0];
2436 in = force_reg (DImode, operands[1]);
2437 mode = GET_MODE (out);
2438 neglab = gen_label_rtx ();
2439 donelab = gen_label_rtx ();
2440 i0 = gen_reg_rtx (DImode);
2441 i1 = gen_reg_rtx (DImode);
2442 f0 = gen_reg_rtx (mode);
2444 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2446 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2447 emit_jump_insn (gen_jump (donelab));
2448 emit_barrier ();
2450 emit_label (neglab);
2452 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2453 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2454 emit_insn (gen_iordi3 (i0, i0, i1));
2455 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2456 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2458 emit_label (donelab);
2461 /* Generate the comparison for a conditional branch. */
2464 alpha_emit_conditional_branch (enum rtx_code code)
2466 enum rtx_code cmp_code, branch_code;
2467 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2468 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2469 rtx tem;
2471 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2473 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2474 op1 = const0_rtx;
2475 alpha_compare.fp_p = 0;
2478 /* The general case: fold the comparison code to the types of compares
2479 that we have, choosing the branch as necessary. */
2480 switch (code)
2482 case EQ: case LE: case LT: case LEU: case LTU:
2483 case UNORDERED:
2484 /* We have these compares: */
2485 cmp_code = code, branch_code = NE;
2486 break;
2488 case NE:
2489 case ORDERED:
2490 /* These must be reversed. */
2491 cmp_code = reverse_condition (code), branch_code = EQ;
2492 break;
2494 case GE: case GT: case GEU: case GTU:
2495 /* For FP, we swap them, for INT, we reverse them. */
2496 if (alpha_compare.fp_p)
2498 cmp_code = swap_condition (code);
2499 branch_code = NE;
2500 tem = op0, op0 = op1, op1 = tem;
2502 else
2504 cmp_code = reverse_condition (code);
2505 branch_code = EQ;
2507 break;
2509 default:
2510 gcc_unreachable ();
2513 if (alpha_compare.fp_p)
2515 cmp_mode = DFmode;
2516 if (flag_unsafe_math_optimizations)
2518 /* When we are not as concerned about non-finite values, and we
2519 are comparing against zero, we can branch directly. */
2520 if (op1 == CONST0_RTX (DFmode))
2521 cmp_code = UNKNOWN, branch_code = code;
2522 else if (op0 == CONST0_RTX (DFmode))
2524 /* Undo the swap we probably did just above. */
2525 tem = op0, op0 = op1, op1 = tem;
2526 branch_code = swap_condition (cmp_code);
2527 cmp_code = UNKNOWN;
2530 else
2532 /* ??? We mark the branch mode to be CCmode to prevent the
2533 compare and branch from being combined, since the compare
2534 insn follows IEEE rules that the branch does not. */
2535 branch_mode = CCmode;
2538 else
2540 cmp_mode = DImode;
2542 /* The following optimizations are only for signed compares. */
2543 if (code != LEU && code != LTU && code != GEU && code != GTU)
2545 /* Whee. Compare and branch against 0 directly. */
2546 if (op1 == const0_rtx)
2547 cmp_code = UNKNOWN, branch_code = code;
2549 /* If the constants doesn't fit into an immediate, but can
2550 be generated by lda/ldah, we adjust the argument and
2551 compare against zero, so we can use beq/bne directly. */
2552 /* ??? Don't do this when comparing against symbols, otherwise
2553 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2554 be declared false out of hand (at least for non-weak). */
2555 else if (GET_CODE (op1) == CONST_INT
2556 && (code == EQ || code == NE)
2557 && !(symbolic_operand (op0, VOIDmode)
2558 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2560 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2562 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2563 && (CONST_OK_FOR_LETTER_P (n, 'K')
2564 || CONST_OK_FOR_LETTER_P (n, 'L')))
2566 cmp_code = PLUS, branch_code = code;
2567 op1 = GEN_INT (n);
2572 if (!reg_or_0_operand (op0, DImode))
2573 op0 = force_reg (DImode, op0);
2574 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2575 op1 = force_reg (DImode, op1);
2578 /* Emit an initial compare instruction, if necessary. */
2579 tem = op0;
2580 if (cmp_code != UNKNOWN)
2582 tem = gen_reg_rtx (cmp_mode);
2583 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2586 /* Zero the operands. */
2587 memset (&alpha_compare, 0, sizeof (alpha_compare));
2589 /* Return the branch comparison. */
2590 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2593 /* Certain simplifications can be done to make invalid setcc operations
2594 valid. Return the final comparison, or NULL if we can't work. */
2597 alpha_emit_setcc (enum rtx_code code)
2599 enum rtx_code cmp_code;
2600 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2601 int fp_p = alpha_compare.fp_p;
2602 rtx tmp;
2604 /* Zero the operands. */
2605 memset (&alpha_compare, 0, sizeof (alpha_compare));
2607 if (fp_p && GET_MODE (op0) == TFmode)
2609 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2610 op1 = const0_rtx;
2611 fp_p = 0;
2614 if (fp_p && !TARGET_FIX)
2615 return NULL_RTX;
2617 /* The general case: fold the comparison code to the types of compares
2618 that we have, choosing the branch as necessary. */
2620 cmp_code = UNKNOWN;
2621 switch (code)
2623 case EQ: case LE: case LT: case LEU: case LTU:
2624 case UNORDERED:
2625 /* We have these compares. */
2626 if (fp_p)
2627 cmp_code = code, code = NE;
2628 break;
2630 case NE:
2631 if (!fp_p && op1 == const0_rtx)
2632 break;
2633 /* FALLTHRU */
2635 case ORDERED:
2636 cmp_code = reverse_condition (code);
2637 code = EQ;
2638 break;
2640 case GE: case GT: case GEU: case GTU:
2641 /* These normally need swapping, but for integer zero we have
2642 special patterns that recognize swapped operands. */
2643 if (!fp_p && op1 == const0_rtx)
2644 break;
2645 code = swap_condition (code);
2646 if (fp_p)
2647 cmp_code = code, code = NE;
2648 tmp = op0, op0 = op1, op1 = tmp;
2649 break;
2651 default:
2652 gcc_unreachable ();
2655 if (!fp_p)
2657 if (!register_operand (op0, DImode))
2658 op0 = force_reg (DImode, op0);
2659 if (!reg_or_8bit_operand (op1, DImode))
2660 op1 = force_reg (DImode, op1);
2663 /* Emit an initial compare instruction, if necessary. */
2664 if (cmp_code != UNKNOWN)
2666 enum machine_mode mode = fp_p ? DFmode : DImode;
2668 tmp = gen_reg_rtx (mode);
2669 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2670 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2672 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2673 op1 = const0_rtx;
2676 /* Return the setcc comparison. */
2677 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2681 /* Rewrite a comparison against zero CMP of the form
2682 (CODE (cc0) (const_int 0)) so it can be written validly in
2683 a conditional move (if_then_else CMP ...).
2684 If both of the operands that set cc0 are nonzero we must emit
2685 an insn to perform the compare (it can't be done within
2686 the conditional move). */
2689 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2691 enum rtx_code code = GET_CODE (cmp);
2692 enum rtx_code cmov_code = NE;
2693 rtx op0 = alpha_compare.op0;
2694 rtx op1 = alpha_compare.op1;
2695 int fp_p = alpha_compare.fp_p;
2696 enum machine_mode cmp_mode
2697 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2698 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2699 enum machine_mode cmov_mode = VOIDmode;
2700 int local_fast_math = flag_unsafe_math_optimizations;
2701 rtx tem;
2703 /* Zero the operands. */
2704 memset (&alpha_compare, 0, sizeof (alpha_compare));
2706 if (fp_p != FLOAT_MODE_P (mode))
2708 enum rtx_code cmp_code;
2710 if (! TARGET_FIX)
2711 return 0;
2713 /* If we have fp<->int register move instructions, do a cmov by
2714 performing the comparison in fp registers, and move the
2715 zero/nonzero value to integer registers, where we can then
2716 use a normal cmov, or vice-versa. */
2718 switch (code)
2720 case EQ: case LE: case LT: case LEU: case LTU:
2721 /* We have these compares. */
2722 cmp_code = code, code = NE;
2723 break;
2725 case NE:
2726 /* This must be reversed. */
2727 cmp_code = EQ, code = EQ;
2728 break;
2730 case GE: case GT: case GEU: case GTU:
2731 /* These normally need swapping, but for integer zero we have
2732 special patterns that recognize swapped operands. */
2733 if (!fp_p && op1 == const0_rtx)
2734 cmp_code = code, code = NE;
2735 else
2737 cmp_code = swap_condition (code);
2738 code = NE;
2739 tem = op0, op0 = op1, op1 = tem;
2741 break;
2743 default:
2744 gcc_unreachable ();
2747 tem = gen_reg_rtx (cmp_op_mode);
2748 emit_insn (gen_rtx_SET (VOIDmode, tem,
2749 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2750 op0, op1)));
2752 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2753 op0 = gen_lowpart (cmp_op_mode, tem);
2754 op1 = CONST0_RTX (cmp_op_mode);
2755 fp_p = !fp_p;
2756 local_fast_math = 1;
2759 /* We may be able to use a conditional move directly.
2760 This avoids emitting spurious compares. */
2761 if (signed_comparison_operator (cmp, VOIDmode)
2762 && (!fp_p || local_fast_math)
2763 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2764 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2766 /* We can't put the comparison inside the conditional move;
2767 emit a compare instruction and put that inside the
2768 conditional move. Make sure we emit only comparisons we have;
2769 swap or reverse as necessary. */
2771 if (no_new_pseudos)
2772 return NULL_RTX;
2774 switch (code)
2776 case EQ: case LE: case LT: case LEU: case LTU:
2777 /* We have these compares: */
2778 break;
2780 case NE:
2781 /* This must be reversed. */
2782 code = reverse_condition (code);
2783 cmov_code = EQ;
2784 break;
2786 case GE: case GT: case GEU: case GTU:
2787 /* These must be swapped. */
2788 if (op1 != CONST0_RTX (cmp_mode))
2790 code = swap_condition (code);
2791 tem = op0, op0 = op1, op1 = tem;
2793 break;
2795 default:
2796 gcc_unreachable ();
2799 if (!fp_p)
2801 if (!reg_or_0_operand (op0, DImode))
2802 op0 = force_reg (DImode, op0);
2803 if (!reg_or_8bit_operand (op1, DImode))
2804 op1 = force_reg (DImode, op1);
2807 /* ??? We mark the branch mode to be CCmode to prevent the compare
2808 and cmov from being combined, since the compare insn follows IEEE
2809 rules that the cmov does not. */
2810 if (fp_p && !local_fast_math)
2811 cmov_mode = CCmode;
2813 tem = gen_reg_rtx (cmp_op_mode);
2814 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2815 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2818 /* Simplify a conditional move of two constants into a setcc with
2819 arithmetic. This is done with a splitter since combine would
2820 just undo the work if done during code generation. It also catches
2821 cases we wouldn't have before cse. */
2824 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2825 rtx t_rtx, rtx f_rtx)
2827 HOST_WIDE_INT t, f, diff;
2828 enum machine_mode mode;
2829 rtx target, subtarget, tmp;
2831 mode = GET_MODE (dest);
2832 t = INTVAL (t_rtx);
2833 f = INTVAL (f_rtx);
2834 diff = t - f;
2836 if (((code == NE || code == EQ) && diff < 0)
2837 || (code == GE || code == GT))
2839 code = reverse_condition (code);
2840 diff = t, t = f, f = diff;
2841 diff = t - f;
2844 subtarget = target = dest;
2845 if (mode != DImode)
2847 target = gen_lowpart (DImode, dest);
2848 if (! no_new_pseudos)
2849 subtarget = gen_reg_rtx (DImode);
2850 else
2851 subtarget = target;
2853 /* Below, we must be careful to use copy_rtx on target and subtarget
2854 in intermediate insns, as they may be a subreg rtx, which may not
2855 be shared. */
2857 if (f == 0 && exact_log2 (diff) > 0
2858 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2859 viable over a longer latency cmove. On EV5, the E0 slot is a
2860 scarce resource, and on EV4 shift has the same latency as a cmove. */
2861 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2863 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2864 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2866 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2867 GEN_INT (exact_log2 (t)));
2868 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2870 else if (f == 0 && t == -1)
2872 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2873 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2875 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2877 else if (diff == 1 || diff == 4 || diff == 8)
2879 rtx add_op;
2881 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2882 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2884 if (diff == 1)
2885 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2886 else
2888 add_op = GEN_INT (f);
2889 if (sext_add_operand (add_op, mode))
2891 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2892 GEN_INT (diff));
2893 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2894 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2896 else
2897 return 0;
2900 else
2901 return 0;
2903 return 1;
2906 /* Look up the function X_floating library function name for the
2907 given operation. */
2909 struct xfloating_op GTY(())
2911 const enum rtx_code code;
2912 const char *const GTY((skip)) osf_func;
2913 const char *const GTY((skip)) vms_func;
2914 rtx libcall;
2917 static GTY(()) struct xfloating_op xfloating_ops[] =
2919 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2920 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2921 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2922 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2923 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2924 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2925 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2926 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2927 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2928 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2929 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2930 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2931 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2932 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2933 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2936 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2938 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2939 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2942 static rtx
2943 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2945 struct xfloating_op *ops = xfloating_ops;
2946 long n = ARRAY_SIZE (xfloating_ops);
2947 long i;
2949 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2951 /* How irritating. Nothing to key off for the main table. */
2952 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2954 ops = vax_cvt_ops;
2955 n = ARRAY_SIZE (vax_cvt_ops);
2958 for (i = 0; i < n; ++i, ++ops)
2959 if (ops->code == code)
2961 rtx func = ops->libcall;
2962 if (!func)
2964 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2965 ? ops->vms_func : ops->osf_func);
2966 ops->libcall = func;
2968 return func;
2971 gcc_unreachable ();
2974 /* Most X_floating operations take the rounding mode as an argument.
2975 Compute that here. */
2977 static int
2978 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2979 enum alpha_fp_rounding_mode round)
2981 int mode;
2983 switch (round)
2985 case ALPHA_FPRM_NORM:
2986 mode = 2;
2987 break;
2988 case ALPHA_FPRM_MINF:
2989 mode = 1;
2990 break;
2991 case ALPHA_FPRM_CHOP:
2992 mode = 0;
2993 break;
2994 case ALPHA_FPRM_DYN:
2995 mode = 4;
2996 break;
2997 default:
2998 gcc_unreachable ();
3000 /* XXX For reference, round to +inf is mode = 3. */
3003 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3004 mode |= 0x10000;
3006 return mode;
3009 /* Emit an X_floating library function call.
3011 Note that these functions do not follow normal calling conventions:
3012 TFmode arguments are passed in two integer registers (as opposed to
3013 indirect); TFmode return values appear in R16+R17.
3015 FUNC is the function to call.
3016 TARGET is where the output belongs.
3017 OPERANDS are the inputs.
3018 NOPERANDS is the count of inputs.
3019 EQUIV is the expression equivalent for the function.
3022 static void
3023 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3024 int noperands, rtx equiv)
3026 rtx usage = NULL_RTX, tmp, reg;
3027 int regno = 16, i;
3029 start_sequence ();
3031 for (i = 0; i < noperands; ++i)
3033 switch (GET_MODE (operands[i]))
3035 case TFmode:
3036 reg = gen_rtx_REG (TFmode, regno);
3037 regno += 2;
3038 break;
3040 case DFmode:
3041 reg = gen_rtx_REG (DFmode, regno + 32);
3042 regno += 1;
3043 break;
3045 case VOIDmode:
3046 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3047 /* FALLTHRU */
3048 case DImode:
3049 reg = gen_rtx_REG (DImode, regno);
3050 regno += 1;
3051 break;
3053 default:
3054 gcc_unreachable ();
3057 emit_move_insn (reg, operands[i]);
3058 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3061 switch (GET_MODE (target))
3063 case TFmode:
3064 reg = gen_rtx_REG (TFmode, 16);
3065 break;
3066 case DFmode:
3067 reg = gen_rtx_REG (DFmode, 32);
3068 break;
3069 case DImode:
3070 reg = gen_rtx_REG (DImode, 0);
3071 break;
3072 default:
3073 gcc_unreachable ();
3076 tmp = gen_rtx_MEM (QImode, func);
3077 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3078 const0_rtx, const0_rtx));
3079 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3080 CONST_OR_PURE_CALL_P (tmp) = 1;
3082 tmp = get_insns ();
3083 end_sequence ();
3085 emit_libcall_block (tmp, target, reg, equiv);
3088 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3090 void
3091 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3093 rtx func;
3094 int mode;
3095 rtx out_operands[3];
3097 func = alpha_lookup_xfloating_lib_func (code);
3098 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3100 out_operands[0] = operands[1];
3101 out_operands[1] = operands[2];
3102 out_operands[2] = GEN_INT (mode);
3103 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3104 gen_rtx_fmt_ee (code, TFmode, operands[1],
3105 operands[2]));
3108 /* Emit an X_floating library function call for a comparison. */
3110 static rtx
3111 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3113 enum rtx_code cmp_code, res_code;
3114 rtx func, out, operands[2];
3116 /* X_floating library comparison functions return
3117 -1 unordered
3118 0 false
3119 1 true
3120 Convert the compare against the raw return value. */
3122 cmp_code = *pcode;
3123 switch (cmp_code)
3125 case UNORDERED:
3126 cmp_code = EQ;
3127 res_code = LT;
3128 break;
3129 case ORDERED:
3130 cmp_code = EQ;
3131 res_code = GE;
3132 break;
3133 case NE:
3134 res_code = NE;
3135 break;
3136 case EQ:
3137 case LT:
3138 case GT:
3139 case LE:
3140 case GE:
3141 res_code = GT;
3142 break;
3143 default:
3144 gcc_unreachable ();
3146 *pcode = res_code;
3148 func = alpha_lookup_xfloating_lib_func (cmp_code);
3150 operands[0] = op0;
3151 operands[1] = op1;
3152 out = gen_reg_rtx (DImode);
3154 /* ??? Strange mode for equiv because what's actually returned
3155 is -1,0,1, not a proper boolean value. */
3156 alpha_emit_xfloating_libcall (func, out, operands, 2,
3157 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3159 return out;
3162 /* Emit an X_floating library function call for a conversion. */
3164 void
3165 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3167 int noperands = 1, mode;
3168 rtx out_operands[2];
3169 rtx func;
3170 enum rtx_code code = orig_code;
3172 if (code == UNSIGNED_FIX)
3173 code = FIX;
3175 func = alpha_lookup_xfloating_lib_func (code);
3177 out_operands[0] = operands[1];
3179 switch (code)
3181 case FIX:
3182 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3183 out_operands[1] = GEN_INT (mode);
3184 noperands = 2;
3185 break;
3186 case FLOAT_TRUNCATE:
3187 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3188 out_operands[1] = GEN_INT (mode);
3189 noperands = 2;
3190 break;
3191 default:
3192 break;
3195 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3196 gen_rtx_fmt_e (orig_code,
3197 GET_MODE (operands[0]),
3198 operands[1]));
3201 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3202 OP[0] into OP[0,1]. Naturally, output operand ordering is
3203 little-endian. */
3205 void
3206 alpha_split_tfmode_pair (rtx operands[4])
3208 switch (GET_CODE (operands[1]))
3210 case REG:
3211 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3212 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3213 break;
3215 case MEM:
3216 operands[3] = adjust_address (operands[1], DImode, 8);
3217 operands[2] = adjust_address (operands[1], DImode, 0);
3218 break;
3220 case CONST_DOUBLE:
3221 gcc_assert (operands[1] == CONST0_RTX (TFmode));
3222 operands[2] = operands[3] = const0_rtx;
3223 break;
3225 default:
3226 gcc_unreachable ();
3229 switch (GET_CODE (operands[0]))
3231 case REG:
3232 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3233 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3234 break;
3236 case MEM:
3237 operands[1] = adjust_address (operands[0], DImode, 8);
3238 operands[0] = adjust_address (operands[0], DImode, 0);
3239 break;
3241 default:
3242 gcc_unreachable ();
3246 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3247 op2 is a register containing the sign bit, operation is the
3248 logical operation to be performed. */
3250 void
3251 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3253 rtx high_bit = operands[2];
3254 rtx scratch;
3255 int move;
3257 alpha_split_tfmode_pair (operands);
3259 /* Detect three flavors of operand overlap. */
3260 move = 1;
3261 if (rtx_equal_p (operands[0], operands[2]))
3262 move = 0;
3263 else if (rtx_equal_p (operands[1], operands[2]))
3265 if (rtx_equal_p (operands[0], high_bit))
3266 move = 2;
3267 else
3268 move = -1;
3271 if (move < 0)
3272 emit_move_insn (operands[0], operands[2]);
3274 /* ??? If the destination overlaps both source tf and high_bit, then
3275 assume source tf is dead in its entirety and use the other half
3276 for a scratch register. Otherwise "scratch" is just the proper
3277 destination register. */
3278 scratch = operands[move < 2 ? 1 : 3];
3280 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3282 if (move > 0)
3284 emit_move_insn (operands[0], operands[2]);
3285 if (move > 1)
3286 emit_move_insn (operands[1], scratch);
3290 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3291 unaligned data:
3293 unsigned: signed:
3294 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3295 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3296 lda r3,X(r11) lda r3,X+2(r11)
3297 extwl r1,r3,r1 extql r1,r3,r1
3298 extwh r2,r3,r2 extqh r2,r3,r2
3299 or r1.r2.r1 or r1,r2,r1
3300 sra r1,48,r1
3302 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3303 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3304 lda r3,X(r11) lda r3,X(r11)
3305 extll r1,r3,r1 extll r1,r3,r1
3306 extlh r2,r3,r2 extlh r2,r3,r2
3307 or r1.r2.r1 addl r1,r2,r1
3309 quad: ldq_u r1,X(r11)
3310 ldq_u r2,X+7(r11)
3311 lda r3,X(r11)
3312 extql r1,r3,r1
3313 extqh r2,r3,r2
3314 or r1.r2.r1
3317 void
3318 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3319 HOST_WIDE_INT ofs, int sign)
3321 rtx meml, memh, addr, extl, exth, tmp, mema;
3322 enum machine_mode mode;
3324 if (TARGET_BWX && size == 2)
3326 meml = adjust_address (mem, QImode, ofs);
3327 memh = adjust_address (mem, QImode, ofs+1);
3328 if (BYTES_BIG_ENDIAN)
3329 tmp = meml, meml = memh, memh = tmp;
3330 extl = gen_reg_rtx (DImode);
3331 exth = gen_reg_rtx (DImode);
3332 emit_insn (gen_zero_extendqidi2 (extl, meml));
3333 emit_insn (gen_zero_extendqidi2 (exth, memh));
3334 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3335 NULL, 1, OPTAB_LIB_WIDEN);
3336 addr = expand_simple_binop (DImode, IOR, extl, exth,
3337 NULL, 1, OPTAB_LIB_WIDEN);
3339 if (sign && GET_MODE (tgt) != HImode)
3341 addr = gen_lowpart (HImode, addr);
3342 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3344 else
3346 if (GET_MODE (tgt) != DImode)
3347 addr = gen_lowpart (GET_MODE (tgt), addr);
3348 emit_move_insn (tgt, addr);
3350 return;
3353 meml = gen_reg_rtx (DImode);
3354 memh = gen_reg_rtx (DImode);
3355 addr = gen_reg_rtx (DImode);
3356 extl = gen_reg_rtx (DImode);
3357 exth = gen_reg_rtx (DImode);
3359 mema = XEXP (mem, 0);
3360 if (GET_CODE (mema) == LO_SUM)
3361 mema = force_reg (Pmode, mema);
3363 /* AND addresses cannot be in any alias set, since they may implicitly
3364 alias surrounding code. Ideally we'd have some alias set that
3365 covered all types except those with alignment 8 or higher. */
3367 tmp = change_address (mem, DImode,
3368 gen_rtx_AND (DImode,
3369 plus_constant (mema, ofs),
3370 GEN_INT (-8)));
3371 set_mem_alias_set (tmp, 0);
3372 emit_move_insn (meml, tmp);
3374 tmp = change_address (mem, DImode,
3375 gen_rtx_AND (DImode,
3376 plus_constant (mema, ofs + size - 1),
3377 GEN_INT (-8)));
3378 set_mem_alias_set (tmp, 0);
3379 emit_move_insn (memh, tmp);
3381 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3383 emit_move_insn (addr, plus_constant (mema, -1));
3385 emit_insn (gen_extqh_be (extl, meml, addr));
3386 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3388 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3389 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3390 addr, 1, OPTAB_WIDEN);
3392 else if (sign && size == 2)
3394 emit_move_insn (addr, plus_constant (mema, ofs+2));
3396 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3397 emit_insn (gen_extqh_le (exth, memh, addr));
3399 /* We must use tgt here for the target. Alpha-vms port fails if we use
3400 addr for the target, because addr is marked as a pointer and combine
3401 knows that pointers are always sign-extended 32 bit values. */
3402 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3403 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3404 addr, 1, OPTAB_WIDEN);
3406 else
3408 if (WORDS_BIG_ENDIAN)
3410 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3411 switch ((int) size)
3413 case 2:
3414 emit_insn (gen_extwh_be (extl, meml, addr));
3415 mode = HImode;
3416 break;
3418 case 4:
3419 emit_insn (gen_extlh_be (extl, meml, addr));
3420 mode = SImode;
3421 break;
3423 case 8:
3424 emit_insn (gen_extqh_be (extl, meml, addr));
3425 mode = DImode;
3426 break;
3428 default:
3429 gcc_unreachable ();
3431 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3433 else
3435 emit_move_insn (addr, plus_constant (mema, ofs));
3436 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3437 switch ((int) size)
3439 case 2:
3440 emit_insn (gen_extwh_le (exth, memh, addr));
3441 mode = HImode;
3442 break;
3444 case 4:
3445 emit_insn (gen_extlh_le (exth, memh, addr));
3446 mode = SImode;
3447 break;
3449 case 8:
3450 emit_insn (gen_extqh_le (exth, memh, addr));
3451 mode = DImode;
3452 break;
3454 default:
3455 gcc_unreachable ();
3459 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3460 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3461 sign, OPTAB_WIDEN);
3464 if (addr != tgt)
3465 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3468 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3470 void
3471 alpha_expand_unaligned_store (rtx dst, rtx src,
3472 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3474 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3476 if (TARGET_BWX && size == 2)
3478 if (src != const0_rtx)
3480 dstl = gen_lowpart (QImode, src);
3481 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3482 NULL, 1, OPTAB_LIB_WIDEN);
3483 dsth = gen_lowpart (QImode, dsth);
3485 else
3486 dstl = dsth = const0_rtx;
3488 meml = adjust_address (dst, QImode, ofs);
3489 memh = adjust_address (dst, QImode, ofs+1);
3490 if (BYTES_BIG_ENDIAN)
3491 addr = meml, meml = memh, memh = addr;
3493 emit_move_insn (meml, dstl);
3494 emit_move_insn (memh, dsth);
3495 return;
3498 dstl = gen_reg_rtx (DImode);
3499 dsth = gen_reg_rtx (DImode);
3500 insl = gen_reg_rtx (DImode);
3501 insh = gen_reg_rtx (DImode);
3503 dsta = XEXP (dst, 0);
3504 if (GET_CODE (dsta) == LO_SUM)
3505 dsta = force_reg (Pmode, dsta);
3507 /* AND addresses cannot be in any alias set, since they may implicitly
3508 alias surrounding code. Ideally we'd have some alias set that
3509 covered all types except those with alignment 8 or higher. */
3511 meml = change_address (dst, DImode,
3512 gen_rtx_AND (DImode,
3513 plus_constant (dsta, ofs),
3514 GEN_INT (-8)));
3515 set_mem_alias_set (meml, 0);
3517 memh = change_address (dst, DImode,
3518 gen_rtx_AND (DImode,
3519 plus_constant (dsta, ofs + size - 1),
3520 GEN_INT (-8)));
3521 set_mem_alias_set (memh, 0);
3523 emit_move_insn (dsth, memh);
3524 emit_move_insn (dstl, meml);
3525 if (WORDS_BIG_ENDIAN)
3527 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3529 if (src != const0_rtx)
3531 switch ((int) size)
3533 case 2:
3534 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3535 break;
3536 case 4:
3537 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3538 break;
3539 case 8:
3540 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3541 break;
3543 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3544 GEN_INT (size*8), addr));
3547 switch ((int) size)
3549 case 2:
3550 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3551 break;
3552 case 4:
3554 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3555 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3556 break;
3558 case 8:
3559 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3560 break;
3563 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3565 else
3567 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3569 if (src != CONST0_RTX (GET_MODE (src)))
3571 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3572 GEN_INT (size*8), addr));
3574 switch ((int) size)
3576 case 2:
3577 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3578 break;
3579 case 4:
3580 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3581 break;
3582 case 8:
3583 emit_insn (gen_insql_le (insl, src, addr));
3584 break;
3588 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3590 switch ((int) size)
3592 case 2:
3593 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3594 break;
3595 case 4:
3597 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3598 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3599 break;
3601 case 8:
3602 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3603 break;
3607 if (src != CONST0_RTX (GET_MODE (src)))
3609 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3610 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3613 if (WORDS_BIG_ENDIAN)
3615 emit_move_insn (meml, dstl);
3616 emit_move_insn (memh, dsth);
3618 else
3620 /* Must store high before low for degenerate case of aligned. */
3621 emit_move_insn (memh, dsth);
3622 emit_move_insn (meml, dstl);
3626 /* The block move code tries to maximize speed by separating loads and
3627 stores at the expense of register pressure: we load all of the data
3628 before we store it back out. There are two secondary effects worth
3629 mentioning, that this speeds copying to/from aligned and unaligned
3630 buffers, and that it makes the code significantly easier to write. */
3632 #define MAX_MOVE_WORDS 8
3634 /* Load an integral number of consecutive unaligned quadwords. */
3636 static void
3637 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3638 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3640 rtx const im8 = GEN_INT (-8);
3641 rtx const i64 = GEN_INT (64);
3642 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3643 rtx sreg, areg, tmp, smema;
3644 HOST_WIDE_INT i;
3646 smema = XEXP (smem, 0);
3647 if (GET_CODE (smema) == LO_SUM)
3648 smema = force_reg (Pmode, smema);
3650 /* Generate all the tmp registers we need. */
3651 for (i = 0; i < words; ++i)
3653 data_regs[i] = out_regs[i];
3654 ext_tmps[i] = gen_reg_rtx (DImode);
3656 data_regs[words] = gen_reg_rtx (DImode);
3658 if (ofs != 0)
3659 smem = adjust_address (smem, GET_MODE (smem), ofs);
3661 /* Load up all of the source data. */
3662 for (i = 0; i < words; ++i)
3664 tmp = change_address (smem, DImode,
3665 gen_rtx_AND (DImode,
3666 plus_constant (smema, 8*i),
3667 im8));
3668 set_mem_alias_set (tmp, 0);
3669 emit_move_insn (data_regs[i], tmp);
3672 tmp = change_address (smem, DImode,
3673 gen_rtx_AND (DImode,
3674 plus_constant (smema, 8*words - 1),
3675 im8));
3676 set_mem_alias_set (tmp, 0);
3677 emit_move_insn (data_regs[words], tmp);
3679 /* Extract the half-word fragments. Unfortunately DEC decided to make
3680 extxh with offset zero a noop instead of zeroing the register, so
3681 we must take care of that edge condition ourselves with cmov. */
3683 sreg = copy_addr_to_reg (smema);
3684 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3685 1, OPTAB_WIDEN);
3686 if (WORDS_BIG_ENDIAN)
3687 emit_move_insn (sreg, plus_constant (sreg, 7));
3688 for (i = 0; i < words; ++i)
3690 if (WORDS_BIG_ENDIAN)
3692 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3693 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3695 else
3697 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3698 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3700 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3701 gen_rtx_IF_THEN_ELSE (DImode,
3702 gen_rtx_EQ (DImode, areg,
3703 const0_rtx),
3704 const0_rtx, ext_tmps[i])));
3707 /* Merge the half-words into whole words. */
3708 for (i = 0; i < words; ++i)
3710 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3711 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3715 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3716 may be NULL to store zeros. */
3718 static void
3719 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3720 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3722 rtx const im8 = GEN_INT (-8);
3723 rtx const i64 = GEN_INT (64);
3724 rtx ins_tmps[MAX_MOVE_WORDS];
3725 rtx st_tmp_1, st_tmp_2, dreg;
3726 rtx st_addr_1, st_addr_2, dmema;
3727 HOST_WIDE_INT i;
3729 dmema = XEXP (dmem, 0);
3730 if (GET_CODE (dmema) == LO_SUM)
3731 dmema = force_reg (Pmode, dmema);
3733 /* Generate all the tmp registers we need. */
3734 if (data_regs != NULL)
3735 for (i = 0; i < words; ++i)
3736 ins_tmps[i] = gen_reg_rtx(DImode);
3737 st_tmp_1 = gen_reg_rtx(DImode);
3738 st_tmp_2 = gen_reg_rtx(DImode);
3740 if (ofs != 0)
3741 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3743 st_addr_2 = change_address (dmem, DImode,
3744 gen_rtx_AND (DImode,
3745 plus_constant (dmema, words*8 - 1),
3746 im8));
3747 set_mem_alias_set (st_addr_2, 0);
3749 st_addr_1 = change_address (dmem, DImode,
3750 gen_rtx_AND (DImode, dmema, im8));
3751 set_mem_alias_set (st_addr_1, 0);
3753 /* Load up the destination end bits. */
3754 emit_move_insn (st_tmp_2, st_addr_2);
3755 emit_move_insn (st_tmp_1, st_addr_1);
3757 /* Shift the input data into place. */
3758 dreg = copy_addr_to_reg (dmema);
3759 if (WORDS_BIG_ENDIAN)
3760 emit_move_insn (dreg, plus_constant (dreg, 7));
3761 if (data_regs != NULL)
3763 for (i = words-1; i >= 0; --i)
3765 if (WORDS_BIG_ENDIAN)
3767 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3768 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3770 else
3772 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3773 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3776 for (i = words-1; i > 0; --i)
3778 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3779 ins_tmps[i-1], ins_tmps[i-1], 1,
3780 OPTAB_WIDEN);
3784 /* Split and merge the ends with the destination data. */
3785 if (WORDS_BIG_ENDIAN)
3787 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3788 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3790 else
3792 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3793 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3796 if (data_regs != NULL)
3798 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3799 st_tmp_2, 1, OPTAB_WIDEN);
3800 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3801 st_tmp_1, 1, OPTAB_WIDEN);
3804 /* Store it all. */
3805 if (WORDS_BIG_ENDIAN)
3806 emit_move_insn (st_addr_1, st_tmp_1);
3807 else
3808 emit_move_insn (st_addr_2, st_tmp_2);
3809 for (i = words-1; i > 0; --i)
3811 rtx tmp = change_address (dmem, DImode,
3812 gen_rtx_AND (DImode,
3813 plus_constant(dmema,
3814 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3815 im8));
3816 set_mem_alias_set (tmp, 0);
3817 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3819 if (WORDS_BIG_ENDIAN)
3820 emit_move_insn (st_addr_2, st_tmp_2);
3821 else
3822 emit_move_insn (st_addr_1, st_tmp_1);
3826 /* Expand string/block move operations.
3828 operands[0] is the pointer to the destination.
3829 operands[1] is the pointer to the source.
3830 operands[2] is the number of bytes to move.
3831 operands[3] is the alignment. */
3834 alpha_expand_block_move (rtx operands[])
3836 rtx bytes_rtx = operands[2];
3837 rtx align_rtx = operands[3];
3838 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3839 HOST_WIDE_INT bytes = orig_bytes;
3840 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3841 HOST_WIDE_INT dst_align = src_align;
3842 rtx orig_src = operands[1];
3843 rtx orig_dst = operands[0];
3844 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3845 rtx tmp;
3846 unsigned int i, words, ofs, nregs = 0;
3848 if (orig_bytes <= 0)
3849 return 1;
3850 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3851 return 0;
3853 /* Look for additional alignment information from recorded register info. */
3855 tmp = XEXP (orig_src, 0);
3856 if (GET_CODE (tmp) == REG)
3857 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3858 else if (GET_CODE (tmp) == PLUS
3859 && GET_CODE (XEXP (tmp, 0)) == REG
3860 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3862 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3863 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3865 if (a > src_align)
3867 if (a >= 64 && c % 8 == 0)
3868 src_align = 64;
3869 else if (a >= 32 && c % 4 == 0)
3870 src_align = 32;
3871 else if (a >= 16 && c % 2 == 0)
3872 src_align = 16;
3876 tmp = XEXP (orig_dst, 0);
3877 if (GET_CODE (tmp) == REG)
3878 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3879 else if (GET_CODE (tmp) == PLUS
3880 && GET_CODE (XEXP (tmp, 0)) == REG
3881 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3883 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3884 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3886 if (a > dst_align)
3888 if (a >= 64 && c % 8 == 0)
3889 dst_align = 64;
3890 else if (a >= 32 && c % 4 == 0)
3891 dst_align = 32;
3892 else if (a >= 16 && c % 2 == 0)
3893 dst_align = 16;
3897 ofs = 0;
3898 if (src_align >= 64 && bytes >= 8)
3900 words = bytes / 8;
3902 for (i = 0; i < words; ++i)
3903 data_regs[nregs + i] = gen_reg_rtx (DImode);
3905 for (i = 0; i < words; ++i)
3906 emit_move_insn (data_regs[nregs + i],
3907 adjust_address (orig_src, DImode, ofs + i * 8));
3909 nregs += words;
3910 bytes -= words * 8;
3911 ofs += words * 8;
3914 if (src_align >= 32 && bytes >= 4)
3916 words = bytes / 4;
3918 for (i = 0; i < words; ++i)
3919 data_regs[nregs + i] = gen_reg_rtx (SImode);
3921 for (i = 0; i < words; ++i)
3922 emit_move_insn (data_regs[nregs + i],
3923 adjust_address (orig_src, SImode, ofs + i * 4));
3925 nregs += words;
3926 bytes -= words * 4;
3927 ofs += words * 4;
3930 if (bytes >= 8)
3932 words = bytes / 8;
3934 for (i = 0; i < words+1; ++i)
3935 data_regs[nregs + i] = gen_reg_rtx (DImode);
3937 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3938 words, ofs);
3940 nregs += words;
3941 bytes -= words * 8;
3942 ofs += words * 8;
3945 if (! TARGET_BWX && bytes >= 4)
3947 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3948 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3949 bytes -= 4;
3950 ofs += 4;
3953 if (bytes >= 2)
3955 if (src_align >= 16)
3957 do {
3958 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3959 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3960 bytes -= 2;
3961 ofs += 2;
3962 } while (bytes >= 2);
3964 else if (! TARGET_BWX)
3966 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3967 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3968 bytes -= 2;
3969 ofs += 2;
3973 while (bytes > 0)
3975 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3976 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3977 bytes -= 1;
3978 ofs += 1;
3981 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3983 /* Now save it back out again. */
3985 i = 0, ofs = 0;
3987 /* Write out the data in whatever chunks reading the source allowed. */
3988 if (dst_align >= 64)
3990 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3992 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3993 data_regs[i]);
3994 ofs += 8;
3995 i++;
3999 if (dst_align >= 32)
4001 /* If the source has remaining DImode regs, write them out in
4002 two pieces. */
4003 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4005 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4006 NULL_RTX, 1, OPTAB_WIDEN);
4008 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4009 gen_lowpart (SImode, data_regs[i]));
4010 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4011 gen_lowpart (SImode, tmp));
4012 ofs += 8;
4013 i++;
4016 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4018 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4019 data_regs[i]);
4020 ofs += 4;
4021 i++;
4025 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4027 /* Write out a remaining block of words using unaligned methods. */
4029 for (words = 1; i + words < nregs; words++)
4030 if (GET_MODE (data_regs[i + words]) != DImode)
4031 break;
4033 if (words == 1)
4034 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4035 else
4036 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4037 words, ofs);
4039 i += words;
4040 ofs += words * 8;
4043 /* Due to the above, this won't be aligned. */
4044 /* ??? If we have more than one of these, consider constructing full
4045 words in registers and using alpha_expand_unaligned_store_words. */
4046 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4048 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4049 ofs += 4;
4050 i++;
4053 if (dst_align >= 16)
4054 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4056 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4057 i++;
4058 ofs += 2;
4060 else
4061 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4063 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4064 i++;
4065 ofs += 2;
4068 /* The remainder must be byte copies. */
4069 while (i < nregs)
4071 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4072 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4073 i++;
4074 ofs += 1;
4077 return 1;
4081 alpha_expand_block_clear (rtx operands[])
4083 rtx bytes_rtx = operands[1];
4084 rtx align_rtx = operands[3];
4085 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4086 HOST_WIDE_INT bytes = orig_bytes;
4087 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4088 HOST_WIDE_INT alignofs = 0;
4089 rtx orig_dst = operands[0];
4090 rtx tmp;
4091 int i, words, ofs = 0;
4093 if (orig_bytes <= 0)
4094 return 1;
4095 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4096 return 0;
4098 /* Look for stricter alignment. */
4099 tmp = XEXP (orig_dst, 0);
4100 if (GET_CODE (tmp) == REG)
4101 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4102 else if (GET_CODE (tmp) == PLUS
4103 && GET_CODE (XEXP (tmp, 0)) == REG
4104 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4106 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4107 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4109 if (a > align)
4111 if (a >= 64)
4112 align = a, alignofs = 8 - c % 8;
4113 else if (a >= 32)
4114 align = a, alignofs = 4 - c % 4;
4115 else if (a >= 16)
4116 align = a, alignofs = 2 - c % 2;
4120 /* Handle an unaligned prefix first. */
4122 if (alignofs > 0)
4124 #if HOST_BITS_PER_WIDE_INT >= 64
4125 /* Given that alignofs is bounded by align, the only time BWX could
4126 generate three stores is for a 7 byte fill. Prefer two individual
4127 stores over a load/mask/store sequence. */
4128 if ((!TARGET_BWX || alignofs == 7)
4129 && align >= 32
4130 && !(alignofs == 4 && bytes >= 4))
4132 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4133 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4134 rtx mem, tmp;
4135 HOST_WIDE_INT mask;
4137 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4138 set_mem_alias_set (mem, 0);
4140 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4141 if (bytes < alignofs)
4143 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4144 ofs += bytes;
4145 bytes = 0;
4147 else
4149 bytes -= alignofs;
4150 ofs += alignofs;
4152 alignofs = 0;
4154 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4155 NULL_RTX, 1, OPTAB_WIDEN);
4157 emit_move_insn (mem, tmp);
4159 #endif
4161 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4163 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4164 bytes -= 1;
4165 ofs += 1;
4166 alignofs -= 1;
4168 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4170 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4171 bytes -= 2;
4172 ofs += 2;
4173 alignofs -= 2;
4175 if (alignofs == 4 && bytes >= 4)
4177 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4178 bytes -= 4;
4179 ofs += 4;
4180 alignofs = 0;
4183 /* If we've not used the extra lead alignment information by now,
4184 we won't be able to. Downgrade align to match what's left over. */
4185 if (alignofs > 0)
4187 alignofs = alignofs & -alignofs;
4188 align = MIN (align, alignofs * BITS_PER_UNIT);
4192 /* Handle a block of contiguous long-words. */
4194 if (align >= 64 && bytes >= 8)
4196 words = bytes / 8;
4198 for (i = 0; i < words; ++i)
4199 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4200 const0_rtx);
4202 bytes -= words * 8;
4203 ofs += words * 8;
4206 /* If the block is large and appropriately aligned, emit a single
4207 store followed by a sequence of stq_u insns. */
4209 if (align >= 32 && bytes > 16)
4211 rtx orig_dsta;
4213 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4214 bytes -= 4;
4215 ofs += 4;
4217 orig_dsta = XEXP (orig_dst, 0);
4218 if (GET_CODE (orig_dsta) == LO_SUM)
4219 orig_dsta = force_reg (Pmode, orig_dsta);
4221 words = bytes / 8;
4222 for (i = 0; i < words; ++i)
4224 rtx mem
4225 = change_address (orig_dst, DImode,
4226 gen_rtx_AND (DImode,
4227 plus_constant (orig_dsta, ofs + i*8),
4228 GEN_INT (-8)));
4229 set_mem_alias_set (mem, 0);
4230 emit_move_insn (mem, const0_rtx);
4233 /* Depending on the alignment, the first stq_u may have overlapped
4234 with the initial stl, which means that the last stq_u didn't
4235 write as much as it would appear. Leave those questionable bytes
4236 unaccounted for. */
4237 bytes -= words * 8 - 4;
4238 ofs += words * 8 - 4;
4241 /* Handle a smaller block of aligned words. */
4243 if ((align >= 64 && bytes == 4)
4244 || (align == 32 && bytes >= 4))
4246 words = bytes / 4;
4248 for (i = 0; i < words; ++i)
4249 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4250 const0_rtx);
4252 bytes -= words * 4;
4253 ofs += words * 4;
4256 /* An unaligned block uses stq_u stores for as many as possible. */
4258 if (bytes >= 8)
4260 words = bytes / 8;
4262 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4264 bytes -= words * 8;
4265 ofs += words * 8;
4268 /* Next clean up any trailing pieces. */
4270 #if HOST_BITS_PER_WIDE_INT >= 64
4271 /* Count the number of bits in BYTES for which aligned stores could
4272 be emitted. */
4273 words = 0;
4274 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4275 if (bytes & i)
4276 words += 1;
4278 /* If we have appropriate alignment (and it wouldn't take too many
4279 instructions otherwise), mask out the bytes we need. */
4280 if (TARGET_BWX ? words > 2 : bytes > 0)
4282 if (align >= 64)
4284 rtx mem, tmp;
4285 HOST_WIDE_INT mask;
4287 mem = adjust_address (orig_dst, DImode, ofs);
4288 set_mem_alias_set (mem, 0);
4290 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4292 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4293 NULL_RTX, 1, OPTAB_WIDEN);
4295 emit_move_insn (mem, tmp);
4296 return 1;
4298 else if (align >= 32 && bytes < 4)
4300 rtx mem, tmp;
4301 HOST_WIDE_INT mask;
4303 mem = adjust_address (orig_dst, SImode, ofs);
4304 set_mem_alias_set (mem, 0);
4306 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4308 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4309 NULL_RTX, 1, OPTAB_WIDEN);
4311 emit_move_insn (mem, tmp);
4312 return 1;
4315 #endif
4317 if (!TARGET_BWX && bytes >= 4)
4319 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4320 bytes -= 4;
4321 ofs += 4;
4324 if (bytes >= 2)
4326 if (align >= 16)
4328 do {
4329 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4330 const0_rtx);
4331 bytes -= 2;
4332 ofs += 2;
4333 } while (bytes >= 2);
4335 else if (! TARGET_BWX)
4337 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4338 bytes -= 2;
4339 ofs += 2;
4343 while (bytes > 0)
4345 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4346 bytes -= 1;
4347 ofs += 1;
4350 return 1;
4353 /* Returns a mask so that zap(x, value) == x & mask. */
4356 alpha_expand_zap_mask (HOST_WIDE_INT value)
4358 rtx result;
4359 int i;
4361 if (HOST_BITS_PER_WIDE_INT >= 64)
4363 HOST_WIDE_INT mask = 0;
4365 for (i = 7; i >= 0; --i)
4367 mask <<= 8;
4368 if (!((value >> i) & 1))
4369 mask |= 0xff;
4372 result = gen_int_mode (mask, DImode);
4374 else
4376 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4378 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4380 for (i = 7; i >= 4; --i)
4382 mask_hi <<= 8;
4383 if (!((value >> i) & 1))
4384 mask_hi |= 0xff;
4387 for (i = 3; i >= 0; --i)
4389 mask_lo <<= 8;
4390 if (!((value >> i) & 1))
4391 mask_lo |= 0xff;
4394 result = immed_double_const (mask_lo, mask_hi, DImode);
4397 return result;
4400 void
4401 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4402 enum machine_mode mode,
4403 rtx op0, rtx op1, rtx op2)
4405 op0 = gen_lowpart (mode, op0);
4407 if (op1 == const0_rtx)
4408 op1 = CONST0_RTX (mode);
4409 else
4410 op1 = gen_lowpart (mode, op1);
4412 if (op2 == const0_rtx)
4413 op2 = CONST0_RTX (mode);
4414 else
4415 op2 = gen_lowpart (mode, op2);
4417 emit_insn ((*gen) (op0, op1, op2));
4420 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4421 COND is true. Mark the jump as unlikely to be taken. */
4423 static void
4424 emit_unlikely_jump (rtx cond, rtx label)
4426 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4427 rtx x;
4429 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4430 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4431 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4434 /* A subroutine of the atomic operation splitters. Emit a load-locked
4435 instruction in MODE. */
4437 static void
4438 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4440 rtx (*fn) (rtx, rtx) = NULL;
4441 if (mode == SImode)
4442 fn = gen_load_locked_si;
4443 else if (mode == DImode)
4444 fn = gen_load_locked_di;
4445 emit_insn (fn (reg, mem));
4448 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4449 instruction in MODE. */
4451 static void
4452 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4454 rtx (*fn) (rtx, rtx, rtx) = NULL;
4455 if (mode == SImode)
4456 fn = gen_store_conditional_si;
4457 else if (mode == DImode)
4458 fn = gen_store_conditional_di;
4459 emit_insn (fn (res, mem, val));
4462 /* A subroutine of the atomic operation splitters. Emit an insxl
4463 instruction in MODE. */
4465 static rtx
4466 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4468 rtx ret = gen_reg_rtx (DImode);
4469 rtx (*fn) (rtx, rtx, rtx);
4471 if (WORDS_BIG_ENDIAN)
4473 if (mode == QImode)
4474 fn = gen_insbl_be;
4475 else
4476 fn = gen_inswl_be;
4478 else
4480 if (mode == QImode)
4481 fn = gen_insbl_le;
4482 else
4483 fn = gen_inswl_le;
4485 emit_insn (fn (ret, op1, op2));
4487 return ret;
4490 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4491 to perform. MEM is the memory on which to operate. VAL is the second
4492 operand of the binary operator. BEFORE and AFTER are optional locations to
4493 return the value of MEM either before of after the operation. SCRATCH is
4494 a scratch register. */
4496 void
4497 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4498 rtx before, rtx after, rtx scratch)
4500 enum machine_mode mode = GET_MODE (mem);
4501 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4503 emit_insn (gen_memory_barrier ());
4505 label = gen_label_rtx ();
4506 emit_label (label);
4507 label = gen_rtx_LABEL_REF (DImode, label);
4509 if (before == NULL)
4510 before = scratch;
4511 emit_load_locked (mode, before, mem);
4513 if (code == NOT)
4514 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4515 else
4516 x = gen_rtx_fmt_ee (code, mode, before, val);
4517 if (after)
4518 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4519 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4521 emit_store_conditional (mode, cond, mem, scratch);
4523 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4524 emit_unlikely_jump (x, label);
4526 emit_insn (gen_memory_barrier ());
4529 /* Expand a compare and swap operation. */
4531 void
4532 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4533 rtx scratch)
4535 enum machine_mode mode = GET_MODE (mem);
4536 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4538 emit_insn (gen_memory_barrier ());
4540 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4541 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4542 emit_label (XEXP (label1, 0));
4544 emit_load_locked (mode, retval, mem);
4546 x = gen_lowpart (DImode, retval);
4547 if (oldval == const0_rtx)
4548 x = gen_rtx_NE (DImode, x, const0_rtx);
4549 else
4551 x = gen_rtx_EQ (DImode, x, oldval);
4552 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4553 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4555 emit_unlikely_jump (x, label2);
4557 emit_move_insn (scratch, newval);
4558 emit_store_conditional (mode, cond, mem, scratch);
4560 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4561 emit_unlikely_jump (x, label1);
4563 emit_insn (gen_memory_barrier ());
4564 emit_label (XEXP (label2, 0));
4567 void
4568 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4570 enum machine_mode mode = GET_MODE (mem);
4571 rtx addr, align, wdst;
4572 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4574 addr = force_reg (DImode, XEXP (mem, 0));
4575 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4576 NULL_RTX, 1, OPTAB_DIRECT);
4578 oldval = convert_modes (DImode, mode, oldval, 1);
4579 newval = emit_insxl (mode, newval, addr);
4581 wdst = gen_reg_rtx (DImode);
4582 if (mode == QImode)
4583 fn5 = gen_sync_compare_and_swapqi_1;
4584 else
4585 fn5 = gen_sync_compare_and_swaphi_1;
4586 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4588 emit_move_insn (dst, gen_lowpart (mode, wdst));
4591 void
4592 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4593 rtx oldval, rtx newval, rtx align,
4594 rtx scratch, rtx cond)
4596 rtx label1, label2, mem, width, mask, x;
4598 mem = gen_rtx_MEM (DImode, align);
4599 MEM_VOLATILE_P (mem) = 1;
4601 emit_insn (gen_memory_barrier ());
4602 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4603 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4604 emit_label (XEXP (label1, 0));
4606 emit_load_locked (DImode, scratch, mem);
4608 width = GEN_INT (GET_MODE_BITSIZE (mode));
4609 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4610 if (WORDS_BIG_ENDIAN)
4611 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4612 else
4613 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4615 if (oldval == const0_rtx)
4616 x = gen_rtx_NE (DImode, dest, const0_rtx);
4617 else
4619 x = gen_rtx_EQ (DImode, dest, oldval);
4620 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4621 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4623 emit_unlikely_jump (x, label2);
4625 if (WORDS_BIG_ENDIAN)
4626 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4627 else
4628 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4629 emit_insn (gen_iordi3 (scratch, scratch, newval));
4631 emit_store_conditional (DImode, scratch, mem, scratch);
4633 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4634 emit_unlikely_jump (x, label1);
4636 emit_insn (gen_memory_barrier ());
4637 emit_label (XEXP (label2, 0));
4640 /* Expand an atomic exchange operation. */
4642 void
4643 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4645 enum machine_mode mode = GET_MODE (mem);
4646 rtx label, x, cond = gen_lowpart (DImode, scratch);
4648 emit_insn (gen_memory_barrier ());
4650 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4651 emit_label (XEXP (label, 0));
4653 emit_load_locked (mode, retval, mem);
4654 emit_move_insn (scratch, val);
4655 emit_store_conditional (mode, cond, mem, scratch);
4657 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4658 emit_unlikely_jump (x, label);
4661 void
4662 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4664 enum machine_mode mode = GET_MODE (mem);
4665 rtx addr, align, wdst;
4666 rtx (*fn4) (rtx, rtx, rtx, rtx);
4668 /* Force the address into a register. */
4669 addr = force_reg (DImode, XEXP (mem, 0));
4671 /* Align it to a multiple of 8. */
4672 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4673 NULL_RTX, 1, OPTAB_DIRECT);
4675 /* Insert val into the correct byte location within the word. */
4676 val = emit_insxl (mode, val, addr);
4678 wdst = gen_reg_rtx (DImode);
4679 if (mode == QImode)
4680 fn4 = gen_sync_lock_test_and_setqi_1;
4681 else
4682 fn4 = gen_sync_lock_test_and_sethi_1;
4683 emit_insn (fn4 (wdst, addr, val, align));
4685 emit_move_insn (dst, gen_lowpart (mode, wdst));
4688 void
4689 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4690 rtx val, rtx align, rtx scratch)
4692 rtx label, mem, width, mask, x;
4694 mem = gen_rtx_MEM (DImode, align);
4695 MEM_VOLATILE_P (mem) = 1;
4697 emit_insn (gen_memory_barrier ());
4698 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4699 emit_label (XEXP (label, 0));
4701 emit_load_locked (DImode, scratch, mem);
4703 width = GEN_INT (GET_MODE_BITSIZE (mode));
4704 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4705 if (WORDS_BIG_ENDIAN)
4707 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4708 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4710 else
4712 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4713 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4715 emit_insn (gen_iordi3 (scratch, scratch, val));
4717 emit_store_conditional (DImode, scratch, mem, scratch);
4719 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4720 emit_unlikely_jump (x, label);
4723 /* Adjust the cost of a scheduling dependency. Return the new cost of
4724 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4726 static int
4727 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4729 enum attr_type insn_type, dep_insn_type;
4731 /* If the dependence is an anti-dependence, there is no cost. For an
4732 output dependence, there is sometimes a cost, but it doesn't seem
4733 worth handling those few cases. */
4734 if (REG_NOTE_KIND (link) != 0)
4735 return cost;
4737 /* If we can't recognize the insns, we can't really do anything. */
4738 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4739 return cost;
4741 insn_type = get_attr_type (insn);
4742 dep_insn_type = get_attr_type (dep_insn);
4744 /* Bring in the user-defined memory latency. */
4745 if (dep_insn_type == TYPE_ILD
4746 || dep_insn_type == TYPE_FLD
4747 || dep_insn_type == TYPE_LDSYM)
4748 cost += alpha_memory_latency-1;
4750 /* Everything else handled in DFA bypasses now. */
4752 return cost;
4755 /* The number of instructions that can be issued per cycle. */
4757 static int
4758 alpha_issue_rate (void)
4760 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4763 /* How many alternative schedules to try. This should be as wide as the
4764 scheduling freedom in the DFA, but no wider. Making this value too
4765 large results extra work for the scheduler.
4767 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4768 alternative schedules. For EV5, we can choose between E0/E1 and
4769 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4771 static int
4772 alpha_multipass_dfa_lookahead (void)
4774 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4777 /* Machine-specific function data. */
4779 struct machine_function GTY(())
4781 /* For unicosmk. */
4782 /* List of call information words for calls from this function. */
4783 struct rtx_def *first_ciw;
4784 struct rtx_def *last_ciw;
4785 int ciw_count;
4787 /* List of deferred case vectors. */
4788 struct rtx_def *addr_list;
4790 /* For OSF. */
4791 const char *some_ld_name;
4793 /* For TARGET_LD_BUGGY_LDGP. */
4794 struct rtx_def *gp_save_rtx;
4797 /* How to allocate a 'struct machine_function'. */
4799 static struct machine_function *
4800 alpha_init_machine_status (void)
4802 return ((struct machine_function *)
4803 ggc_alloc_cleared (sizeof (struct machine_function)));
4806 /* Functions to save and restore alpha_return_addr_rtx. */
4808 /* Start the ball rolling with RETURN_ADDR_RTX. */
4811 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4813 if (count != 0)
4814 return const0_rtx;
4816 return get_hard_reg_initial_val (Pmode, REG_RA);
4819 /* Return or create a memory slot containing the gp value for the current
4820 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4823 alpha_gp_save_rtx (void)
4825 rtx seq, m = cfun->machine->gp_save_rtx;
4827 if (m == NULL)
4829 start_sequence ();
4831 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4832 m = validize_mem (m);
4833 emit_move_insn (m, pic_offset_table_rtx);
4835 seq = get_insns ();
4836 end_sequence ();
4837 emit_insn_after (seq, entry_of_function ());
4839 cfun->machine->gp_save_rtx = m;
4842 return m;
4845 static int
4846 alpha_ra_ever_killed (void)
4848 rtx top;
4850 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4851 return regs_ever_live[REG_RA];
4853 push_topmost_sequence ();
4854 top = get_insns ();
4855 pop_topmost_sequence ();
4857 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4861 /* Return the trap mode suffix applicable to the current
4862 instruction, or NULL. */
4864 static const char *
4865 get_trap_mode_suffix (void)
4867 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4869 switch (s)
4871 case TRAP_SUFFIX_NONE:
4872 return NULL;
4874 case TRAP_SUFFIX_SU:
4875 if (alpha_fptm >= ALPHA_FPTM_SU)
4876 return "su";
4877 return NULL;
4879 case TRAP_SUFFIX_SUI:
4880 if (alpha_fptm >= ALPHA_FPTM_SUI)
4881 return "sui";
4882 return NULL;
4884 case TRAP_SUFFIX_V_SV:
4885 switch (alpha_fptm)
4887 case ALPHA_FPTM_N:
4888 return NULL;
4889 case ALPHA_FPTM_U:
4890 return "v";
4891 case ALPHA_FPTM_SU:
4892 case ALPHA_FPTM_SUI:
4893 return "sv";
4894 default:
4895 gcc_unreachable ();
4898 case TRAP_SUFFIX_V_SV_SVI:
4899 switch (alpha_fptm)
4901 case ALPHA_FPTM_N:
4902 return NULL;
4903 case ALPHA_FPTM_U:
4904 return "v";
4905 case ALPHA_FPTM_SU:
4906 return "sv";
4907 case ALPHA_FPTM_SUI:
4908 return "svi";
4909 default:
4910 gcc_unreachable ();
4912 break;
4914 case TRAP_SUFFIX_U_SU_SUI:
4915 switch (alpha_fptm)
4917 case ALPHA_FPTM_N:
4918 return NULL;
4919 case ALPHA_FPTM_U:
4920 return "u";
4921 case ALPHA_FPTM_SU:
4922 return "su";
4923 case ALPHA_FPTM_SUI:
4924 return "sui";
4925 default:
4926 gcc_unreachable ();
4928 break;
4930 default:
4931 gcc_unreachable ();
4933 gcc_unreachable ();
4936 /* Return the rounding mode suffix applicable to the current
4937 instruction, or NULL. */
4939 static const char *
4940 get_round_mode_suffix (void)
4942 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4944 switch (s)
4946 case ROUND_SUFFIX_NONE:
4947 return NULL;
4948 case ROUND_SUFFIX_NORMAL:
4949 switch (alpha_fprm)
4951 case ALPHA_FPRM_NORM:
4952 return NULL;
4953 case ALPHA_FPRM_MINF:
4954 return "m";
4955 case ALPHA_FPRM_CHOP:
4956 return "c";
4957 case ALPHA_FPRM_DYN:
4958 return "d";
4959 default:
4960 gcc_unreachable ();
4962 break;
4964 case ROUND_SUFFIX_C:
4965 return "c";
4967 default:
4968 gcc_unreachable ();
4970 gcc_unreachable ();
4973 /* Locate some local-dynamic symbol still in use by this function
4974 so that we can print its name in some movdi_er_tlsldm pattern. */
4976 static int
4977 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4979 rtx x = *px;
4981 if (GET_CODE (x) == SYMBOL_REF
4982 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4984 cfun->machine->some_ld_name = XSTR (x, 0);
4985 return 1;
4988 return 0;
4991 static const char *
4992 get_some_local_dynamic_name (void)
4994 rtx insn;
4996 if (cfun->machine->some_ld_name)
4997 return cfun->machine->some_ld_name;
4999 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5000 if (INSN_P (insn)
5001 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5002 return cfun->machine->some_ld_name;
5004 gcc_unreachable ();
5007 /* Print an operand. Recognize special options, documented below. */
5009 void
5010 print_operand (FILE *file, rtx x, int code)
5012 int i;
5014 switch (code)
5016 case '~':
5017 /* Print the assembler name of the current function. */
5018 assemble_name (file, alpha_fnname);
5019 break;
5021 case '&':
5022 assemble_name (file, get_some_local_dynamic_name ());
5023 break;
5025 case '/':
5027 const char *trap = get_trap_mode_suffix ();
5028 const char *round = get_round_mode_suffix ();
5030 if (trap || round)
5031 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5032 (trap ? trap : ""), (round ? round : ""));
5033 break;
5036 case ',':
5037 /* Generates single precision instruction suffix. */
5038 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5039 break;
5041 case '-':
5042 /* Generates double precision instruction suffix. */
5043 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5044 break;
5046 case '+':
5047 /* Generates a nop after a noreturn call at the very end of the
5048 function. */
5049 if (next_real_insn (current_output_insn) == 0)
5050 fprintf (file, "\n\tnop");
5051 break;
5053 case '#':
5054 if (alpha_this_literal_sequence_number == 0)
5055 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5056 fprintf (file, "%d", alpha_this_literal_sequence_number);
5057 break;
5059 case '*':
5060 if (alpha_this_gpdisp_sequence_number == 0)
5061 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5062 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5063 break;
5065 case 'H':
5066 if (GET_CODE (x) == HIGH)
5067 output_addr_const (file, XEXP (x, 0));
5068 else
5069 output_operand_lossage ("invalid %%H value");
5070 break;
5072 case 'J':
5074 const char *lituse;
5076 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5078 x = XVECEXP (x, 0, 0);
5079 lituse = "lituse_tlsgd";
5081 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5083 x = XVECEXP (x, 0, 0);
5084 lituse = "lituse_tlsldm";
5086 else if (GET_CODE (x) == CONST_INT)
5087 lituse = "lituse_jsr";
5088 else
5090 output_operand_lossage ("invalid %%J value");
5091 break;
5094 if (x != const0_rtx)
5095 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5097 break;
5099 case 'j':
5101 const char *lituse;
5103 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5104 lituse = "lituse_jsrdirect";
5105 #else
5106 lituse = "lituse_jsr";
5107 #endif
5109 gcc_assert (INTVAL (x) != 0);
5110 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5112 break;
5113 case 'r':
5114 /* If this operand is the constant zero, write it as "$31". */
5115 if (GET_CODE (x) == REG)
5116 fprintf (file, "%s", reg_names[REGNO (x)]);
5117 else if (x == CONST0_RTX (GET_MODE (x)))
5118 fprintf (file, "$31");
5119 else
5120 output_operand_lossage ("invalid %%r value");
5121 break;
5123 case 'R':
5124 /* Similar, but for floating-point. */
5125 if (GET_CODE (x) == REG)
5126 fprintf (file, "%s", reg_names[REGNO (x)]);
5127 else if (x == CONST0_RTX (GET_MODE (x)))
5128 fprintf (file, "$f31");
5129 else
5130 output_operand_lossage ("invalid %%R value");
5131 break;
5133 case 'N':
5134 /* Write the 1's complement of a constant. */
5135 if (GET_CODE (x) != CONST_INT)
5136 output_operand_lossage ("invalid %%N value");
5138 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5139 break;
5141 case 'P':
5142 /* Write 1 << C, for a constant C. */
5143 if (GET_CODE (x) != CONST_INT)
5144 output_operand_lossage ("invalid %%P value");
5146 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5147 break;
5149 case 'h':
5150 /* Write the high-order 16 bits of a constant, sign-extended. */
5151 if (GET_CODE (x) != CONST_INT)
5152 output_operand_lossage ("invalid %%h value");
5154 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5155 break;
5157 case 'L':
5158 /* Write the low-order 16 bits of a constant, sign-extended. */
5159 if (GET_CODE (x) != CONST_INT)
5160 output_operand_lossage ("invalid %%L value");
5162 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5163 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5164 break;
5166 case 'm':
5167 /* Write mask for ZAP insn. */
5168 if (GET_CODE (x) == CONST_DOUBLE)
5170 HOST_WIDE_INT mask = 0;
5171 HOST_WIDE_INT value;
5173 value = CONST_DOUBLE_LOW (x);
5174 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5175 i++, value >>= 8)
5176 if (value & 0xff)
5177 mask |= (1 << i);
5179 value = CONST_DOUBLE_HIGH (x);
5180 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5181 i++, value >>= 8)
5182 if (value & 0xff)
5183 mask |= (1 << (i + sizeof (int)));
5185 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5188 else if (GET_CODE (x) == CONST_INT)
5190 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5192 for (i = 0; i < 8; i++, value >>= 8)
5193 if (value & 0xff)
5194 mask |= (1 << i);
5196 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5198 else
5199 output_operand_lossage ("invalid %%m value");
5200 break;
5202 case 'M':
5203 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5204 if (GET_CODE (x) != CONST_INT
5205 || (INTVAL (x) != 8 && INTVAL (x) != 16
5206 && INTVAL (x) != 32 && INTVAL (x) != 64))
5207 output_operand_lossage ("invalid %%M value");
5209 fprintf (file, "%s",
5210 (INTVAL (x) == 8 ? "b"
5211 : INTVAL (x) == 16 ? "w"
5212 : INTVAL (x) == 32 ? "l"
5213 : "q"));
5214 break;
5216 case 'U':
5217 /* Similar, except do it from the mask. */
5218 if (GET_CODE (x) == CONST_INT)
5220 HOST_WIDE_INT value = INTVAL (x);
5222 if (value == 0xff)
5224 fputc ('b', file);
5225 break;
5227 if (value == 0xffff)
5229 fputc ('w', file);
5230 break;
5232 if (value == 0xffffffff)
5234 fputc ('l', file);
5235 break;
5237 if (value == -1)
5239 fputc ('q', file);
5240 break;
5243 else if (HOST_BITS_PER_WIDE_INT == 32
5244 && GET_CODE (x) == CONST_DOUBLE
5245 && CONST_DOUBLE_LOW (x) == 0xffffffff
5246 && CONST_DOUBLE_HIGH (x) == 0)
5248 fputc ('l', file);
5249 break;
5251 output_operand_lossage ("invalid %%U value");
5252 break;
5254 case 's':
5255 /* Write the constant value divided by 8 for little-endian mode or
5256 (56 - value) / 8 for big-endian mode. */
5258 if (GET_CODE (x) != CONST_INT
5259 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5260 ? 56
5261 : 64)
5262 || (INTVAL (x) & 7) != 0)
5263 output_operand_lossage ("invalid %%s value");
5265 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5266 WORDS_BIG_ENDIAN
5267 ? (56 - INTVAL (x)) / 8
5268 : INTVAL (x) / 8);
5269 break;
5271 case 'S':
5272 /* Same, except compute (64 - c) / 8 */
5274 if (GET_CODE (x) != CONST_INT
5275 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5276 && (INTVAL (x) & 7) != 8)
5277 output_operand_lossage ("invalid %%s value");
5279 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5280 break;
5282 case 't':
5284 /* On Unicos/Mk systems: use a DEX expression if the symbol
5285 clashes with a register name. */
5286 int dex = unicosmk_need_dex (x);
5287 if (dex)
5288 fprintf (file, "DEX(%d)", dex);
5289 else
5290 output_addr_const (file, x);
5292 break;
5294 case 'C': case 'D': case 'c': case 'd':
5295 /* Write out comparison name. */
5297 enum rtx_code c = GET_CODE (x);
5299 if (!COMPARISON_P (x))
5300 output_operand_lossage ("invalid %%C value");
5302 else if (code == 'D')
5303 c = reverse_condition (c);
5304 else if (code == 'c')
5305 c = swap_condition (c);
5306 else if (code == 'd')
5307 c = swap_condition (reverse_condition (c));
5309 if (c == LEU)
5310 fprintf (file, "ule");
5311 else if (c == LTU)
5312 fprintf (file, "ult");
5313 else if (c == UNORDERED)
5314 fprintf (file, "un");
5315 else
5316 fprintf (file, "%s", GET_RTX_NAME (c));
5318 break;
5320 case 'E':
5321 /* Write the divide or modulus operator. */
5322 switch (GET_CODE (x))
5324 case DIV:
5325 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5326 break;
5327 case UDIV:
5328 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5329 break;
5330 case MOD:
5331 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5332 break;
5333 case UMOD:
5334 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5335 break;
5336 default:
5337 output_operand_lossage ("invalid %%E value");
5338 break;
5340 break;
5342 case 'A':
5343 /* Write "_u" for unaligned access. */
5344 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5345 fprintf (file, "_u");
5346 break;
5348 case 0:
5349 if (GET_CODE (x) == REG)
5350 fprintf (file, "%s", reg_names[REGNO (x)]);
5351 else if (GET_CODE (x) == MEM)
5352 output_address (XEXP (x, 0));
5353 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5355 switch (XINT (XEXP (x, 0), 1))
5357 case UNSPEC_DTPREL:
5358 case UNSPEC_TPREL:
5359 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5360 break;
5361 default:
5362 output_operand_lossage ("unknown relocation unspec");
5363 break;
5366 else
5367 output_addr_const (file, x);
5368 break;
5370 default:
5371 output_operand_lossage ("invalid %%xn code");
5375 void
5376 print_operand_address (FILE *file, rtx addr)
5378 int basereg = 31;
5379 HOST_WIDE_INT offset = 0;
5381 if (GET_CODE (addr) == AND)
5382 addr = XEXP (addr, 0);
5384 if (GET_CODE (addr) == PLUS
5385 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5387 offset = INTVAL (XEXP (addr, 1));
5388 addr = XEXP (addr, 0);
5391 if (GET_CODE (addr) == LO_SUM)
5393 const char *reloc16, *reloclo;
5394 rtx op1 = XEXP (addr, 1);
5396 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5398 op1 = XEXP (op1, 0);
5399 switch (XINT (op1, 1))
5401 case UNSPEC_DTPREL:
5402 reloc16 = NULL;
5403 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5404 break;
5405 case UNSPEC_TPREL:
5406 reloc16 = NULL;
5407 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5408 break;
5409 default:
5410 output_operand_lossage ("unknown relocation unspec");
5411 return;
5414 output_addr_const (file, XVECEXP (op1, 0, 0));
5416 else
5418 reloc16 = "gprel";
5419 reloclo = "gprellow";
5420 output_addr_const (file, op1);
5423 if (offset)
5424 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5426 addr = XEXP (addr, 0);
5427 switch (GET_CODE (addr))
5429 case REG:
5430 basereg = REGNO (addr);
5431 break;
5433 case SUBREG:
5434 basereg = subreg_regno (addr);
5435 break;
5437 default:
5438 gcc_unreachable ();
5441 fprintf (file, "($%d)\t\t!%s", basereg,
5442 (basereg == 29 ? reloc16 : reloclo));
5443 return;
5446 switch (GET_CODE (addr))
5448 case REG:
5449 basereg = REGNO (addr);
5450 break;
5452 case SUBREG:
5453 basereg = subreg_regno (addr);
5454 break;
5456 case CONST_INT:
5457 offset = INTVAL (addr);
5458 break;
5460 #if TARGET_ABI_OPEN_VMS
5461 case SYMBOL_REF:
5462 fprintf (file, "%s", XSTR (addr, 0));
5463 return;
5465 case CONST:
5466 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5467 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5468 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5469 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5470 INTVAL (XEXP (XEXP (addr, 0), 1)));
5471 return;
5473 #endif
5474 default:
5475 gcc_unreachable ();
5478 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5481 /* Emit RTL insns to initialize the variable parts of a trampoline at
5482 TRAMP. FNADDR is an RTX for the address of the function's pure
5483 code. CXT is an RTX for the static chain value for the function.
5485 The three offset parameters are for the individual template's
5486 layout. A JMPOFS < 0 indicates that the trampoline does not
5487 contain instructions at all.
5489 We assume here that a function will be called many more times than
5490 its address is taken (e.g., it might be passed to qsort), so we
5491 take the trouble to initialize the "hint" field in the JMP insn.
5492 Note that the hint field is PC (new) + 4 * bits 13:0. */
5494 void
5495 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5496 int fnofs, int cxtofs, int jmpofs)
5498 rtx temp, temp1, addr;
5499 /* VMS really uses DImode pointers in memory at this point. */
5500 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5502 #ifdef POINTERS_EXTEND_UNSIGNED
5503 fnaddr = convert_memory_address (mode, fnaddr);
5504 cxt = convert_memory_address (mode, cxt);
5505 #endif
5507 /* Store function address and CXT. */
5508 addr = memory_address (mode, plus_constant (tramp, fnofs));
5509 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5510 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5511 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5513 /* This has been disabled since the hint only has a 32k range, and in
5514 no existing OS is the stack within 32k of the text segment. */
5515 if (0 && jmpofs >= 0)
5517 /* Compute hint value. */
5518 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5519 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5520 OPTAB_WIDEN);
5521 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5522 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5523 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5524 GEN_INT (0x3fff), 0);
5526 /* Merge in the hint. */
5527 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5528 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5529 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5530 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5531 OPTAB_WIDEN);
5532 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5535 #ifdef ENABLE_EXECUTE_STACK
5536 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5537 0, VOIDmode, 1, tramp, Pmode);
5538 #endif
5540 if (jmpofs >= 0)
5541 emit_insn (gen_imb ());
5544 /* Determine where to put an argument to a function.
5545 Value is zero to push the argument on the stack,
5546 or a hard register in which to store the argument.
5548 MODE is the argument's machine mode.
5549 TYPE is the data type of the argument (as a tree).
5550 This is null for libcalls where that information may
5551 not be available.
5552 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5553 the preceding args and about the function being called.
5554 NAMED is nonzero if this argument is a named parameter
5555 (otherwise it is an extra parameter matching an ellipsis).
5557 On Alpha the first 6 words of args are normally in registers
5558 and the rest are pushed. */
5561 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5562 int named ATTRIBUTE_UNUSED)
5564 int basereg;
5565 int num_args;
5567 /* Don't get confused and pass small structures in FP registers. */
5568 if (type && AGGREGATE_TYPE_P (type))
5569 basereg = 16;
5570 else
5572 #ifdef ENABLE_CHECKING
5573 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5574 values here. */
5575 gcc_assert (!COMPLEX_MODE_P (mode));
5576 #endif
5578 /* Set up defaults for FP operands passed in FP registers, and
5579 integral operands passed in integer registers. */
5580 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5581 basereg = 32 + 16;
5582 else
5583 basereg = 16;
5586 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5587 the three platforms, so we can't avoid conditional compilation. */
5588 #if TARGET_ABI_OPEN_VMS
5590 if (mode == VOIDmode)
5591 return alpha_arg_info_reg_val (cum);
5593 num_args = cum.num_args;
5594 if (num_args >= 6
5595 || targetm.calls.must_pass_in_stack (mode, type))
5596 return NULL_RTX;
5598 #elif TARGET_ABI_UNICOSMK
5600 int size;
5602 /* If this is the last argument, generate the call info word (CIW). */
5603 /* ??? We don't include the caller's line number in the CIW because
5604 I don't know how to determine it if debug infos are turned off. */
5605 if (mode == VOIDmode)
5607 int i;
5608 HOST_WIDE_INT lo;
5609 HOST_WIDE_INT hi;
5610 rtx ciw;
5612 lo = 0;
5614 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5615 if (cum.reg_args_type[i])
5616 lo |= (1 << (7 - i));
5618 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5619 lo |= 7;
5620 else
5621 lo |= cum.num_reg_words;
5623 #if HOST_BITS_PER_WIDE_INT == 32
5624 hi = (cum.num_args << 20) | cum.num_arg_words;
5625 #else
5626 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5627 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5628 hi = 0;
5629 #endif
5630 ciw = immed_double_const (lo, hi, DImode);
5632 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5633 UNSPEC_UMK_LOAD_CIW);
5636 size = ALPHA_ARG_SIZE (mode, type, named);
5637 num_args = cum.num_reg_words;
5638 if (cum.force_stack
5639 || cum.num_reg_words + size > 6
5640 || targetm.calls.must_pass_in_stack (mode, type))
5641 return NULL_RTX;
5642 else if (type && TYPE_MODE (type) == BLKmode)
5644 rtx reg1, reg2;
5646 reg1 = gen_rtx_REG (DImode, num_args + 16);
5647 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5649 /* The argument fits in two registers. Note that we still need to
5650 reserve a register for empty structures. */
5651 if (size == 0)
5652 return NULL_RTX;
5653 else if (size == 1)
5654 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5655 else
5657 reg2 = gen_rtx_REG (DImode, num_args + 17);
5658 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5659 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5663 #elif TARGET_ABI_OSF
5665 if (cum >= 6)
5666 return NULL_RTX;
5667 num_args = cum;
5669 /* VOID is passed as a special flag for "last argument". */
5670 if (type == void_type_node)
5671 basereg = 16;
5672 else if (targetm.calls.must_pass_in_stack (mode, type))
5673 return NULL_RTX;
5675 #else
5676 #error Unhandled ABI
5677 #endif
5679 return gen_rtx_REG (mode, num_args + basereg);
5682 static int
5683 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5684 enum machine_mode mode ATTRIBUTE_UNUSED,
5685 tree type ATTRIBUTE_UNUSED,
5686 bool named ATTRIBUTE_UNUSED)
5688 int words = 0;
5690 #if TARGET_ABI_OPEN_VMS
5691 if (cum->num_args < 6
5692 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5693 words = 6 - cum->num_args;
5694 #elif TARGET_ABI_UNICOSMK
5695 /* Never any split arguments. */
5696 #elif TARGET_ABI_OSF
5697 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5698 words = 6 - *cum;
5699 #else
5700 #error Unhandled ABI
5701 #endif
5703 return words * UNITS_PER_WORD;
5707 /* Return true if TYPE must be returned in memory, instead of in registers. */
5709 static bool
5710 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5712 enum machine_mode mode = VOIDmode;
5713 int size;
5715 if (type)
5717 mode = TYPE_MODE (type);
5719 /* All aggregates are returned in memory. */
5720 if (AGGREGATE_TYPE_P (type))
5721 return true;
5724 size = GET_MODE_SIZE (mode);
5725 switch (GET_MODE_CLASS (mode))
5727 case MODE_VECTOR_FLOAT:
5728 /* Pass all float vectors in memory, like an aggregate. */
5729 return true;
5731 case MODE_COMPLEX_FLOAT:
5732 /* We judge complex floats on the size of their element,
5733 not the size of the whole type. */
5734 size = GET_MODE_UNIT_SIZE (mode);
5735 break;
5737 case MODE_INT:
5738 case MODE_FLOAT:
5739 case MODE_COMPLEX_INT:
5740 case MODE_VECTOR_INT:
5741 break;
5743 default:
5744 /* ??? We get called on all sorts of random stuff from
5745 aggregate_value_p. We must return something, but it's not
5746 clear what's safe to return. Pretend it's a struct I
5747 guess. */
5748 return true;
5751 /* Otherwise types must fit in one register. */
5752 return size > UNITS_PER_WORD;
5755 /* Return true if TYPE should be passed by invisible reference. */
5757 static bool
5758 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5759 enum machine_mode mode,
5760 tree type ATTRIBUTE_UNUSED,
5761 bool named ATTRIBUTE_UNUSED)
5763 return mode == TFmode || mode == TCmode;
5766 /* Define how to find the value returned by a function. VALTYPE is the
5767 data type of the value (as a tree). If the precise function being
5768 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5769 MODE is set instead of VALTYPE for libcalls.
5771 On Alpha the value is found in $0 for integer functions and
5772 $f0 for floating-point functions. */
5775 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5776 enum machine_mode mode)
5778 unsigned int regnum, dummy;
5779 enum mode_class class;
5781 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5783 if (valtype)
5784 mode = TYPE_MODE (valtype);
5786 class = GET_MODE_CLASS (mode);
5787 switch (class)
5789 case MODE_INT:
5790 PROMOTE_MODE (mode, dummy, valtype);
5791 /* FALLTHRU */
5793 case MODE_COMPLEX_INT:
5794 case MODE_VECTOR_INT:
5795 regnum = 0;
5796 break;
5798 case MODE_FLOAT:
5799 regnum = 32;
5800 break;
5802 case MODE_COMPLEX_FLOAT:
5804 enum machine_mode cmode = GET_MODE_INNER (mode);
5806 return gen_rtx_PARALLEL
5807 (VOIDmode,
5808 gen_rtvec (2,
5809 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5810 const0_rtx),
5811 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5812 GEN_INT (GET_MODE_SIZE (cmode)))));
5815 default:
5816 gcc_unreachable ();
5819 return gen_rtx_REG (mode, regnum);
5822 /* TCmode complex values are passed by invisible reference. We
5823 should not split these values. */
5825 static bool
5826 alpha_split_complex_arg (tree type)
5828 return TYPE_MODE (type) != TCmode;
5831 static tree
5832 alpha_build_builtin_va_list (void)
5834 tree base, ofs, space, record, type_decl;
5836 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5837 return ptr_type_node;
5839 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5840 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5841 TREE_CHAIN (record) = type_decl;
5842 TYPE_NAME (record) = type_decl;
5844 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5846 /* Dummy field to prevent alignment warnings. */
5847 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5848 DECL_FIELD_CONTEXT (space) = record;
5849 DECL_ARTIFICIAL (space) = 1;
5850 DECL_IGNORED_P (space) = 1;
5852 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5853 integer_type_node);
5854 DECL_FIELD_CONTEXT (ofs) = record;
5855 TREE_CHAIN (ofs) = space;
5857 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5858 ptr_type_node);
5859 DECL_FIELD_CONTEXT (base) = record;
5860 TREE_CHAIN (base) = ofs;
5862 TYPE_FIELDS (record) = base;
5863 layout_type (record);
5865 va_list_gpr_counter_field = ofs;
5866 return record;
5869 #if TARGET_ABI_OSF
5870 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5871 and constant additions. */
5873 static tree
5874 va_list_skip_additions (tree lhs)
5876 tree rhs, stmt;
5878 if (TREE_CODE (lhs) != SSA_NAME)
5879 return lhs;
5881 for (;;)
5883 stmt = SSA_NAME_DEF_STMT (lhs);
5885 if (TREE_CODE (stmt) == PHI_NODE)
5886 return stmt;
5888 if (TREE_CODE (stmt) != MODIFY_EXPR
5889 || TREE_OPERAND (stmt, 0) != lhs)
5890 return lhs;
5892 rhs = TREE_OPERAND (stmt, 1);
5893 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5894 rhs = TREE_OPERAND (rhs, 0);
5896 if ((TREE_CODE (rhs) != NOP_EXPR
5897 && TREE_CODE (rhs) != CONVERT_EXPR
5898 && (TREE_CODE (rhs) != PLUS_EXPR
5899 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5900 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5901 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5902 return rhs;
5904 lhs = TREE_OPERAND (rhs, 0);
5908 /* Check if LHS = RHS statement is
5909 LHS = *(ap.__base + ap.__offset + cst)
5911 LHS = *(ap.__base
5912 + ((ap.__offset + cst <= 47)
5913 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5914 If the former, indicate that GPR registers are needed,
5915 if the latter, indicate that FPR registers are needed.
5916 On alpha, cfun->va_list_gpr_size is used as size of the needed
5917 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5918 GPR registers are needed and bit 1 set if FPR registers are needed.
5919 Return true if va_list references should not be scanned for the current
5920 statement. */
5922 static bool
5923 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5925 tree base, offset, arg1, arg2;
5926 int offset_arg = 1;
5928 if (TREE_CODE (rhs) != INDIRECT_REF
5929 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5930 return false;
5932 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5933 if (lhs == NULL_TREE
5934 || TREE_CODE (lhs) != PLUS_EXPR)
5935 return false;
5937 base = TREE_OPERAND (lhs, 0);
5938 if (TREE_CODE (base) == SSA_NAME)
5939 base = va_list_skip_additions (base);
5941 if (TREE_CODE (base) != COMPONENT_REF
5942 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5944 base = TREE_OPERAND (lhs, 0);
5945 if (TREE_CODE (base) == SSA_NAME)
5946 base = va_list_skip_additions (base);
5948 if (TREE_CODE (base) != COMPONENT_REF
5949 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5950 return false;
5952 offset_arg = 0;
5955 base = get_base_address (base);
5956 if (TREE_CODE (base) != VAR_DECL
5957 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5958 return false;
5960 offset = TREE_OPERAND (lhs, offset_arg);
5961 if (TREE_CODE (offset) == SSA_NAME)
5962 offset = va_list_skip_additions (offset);
5964 if (TREE_CODE (offset) == PHI_NODE)
5966 HOST_WIDE_INT sub;
5968 if (PHI_NUM_ARGS (offset) != 2)
5969 goto escapes;
5971 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5972 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5973 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5975 tree tem = arg1;
5976 arg1 = arg2;
5977 arg2 = tem;
5979 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5980 goto escapes;
5982 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5983 goto escapes;
5985 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5986 if (TREE_CODE (arg2) == MINUS_EXPR)
5987 sub = -sub;
5988 if (sub < -48 || sub > -32)
5989 goto escapes;
5991 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5992 if (arg1 != arg2)
5993 goto escapes;
5995 if (TREE_CODE (arg1) == SSA_NAME)
5996 arg1 = va_list_skip_additions (arg1);
5998 if (TREE_CODE (arg1) != COMPONENT_REF
5999 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6000 || get_base_address (arg1) != base)
6001 goto escapes;
6003 /* Need floating point regs. */
6004 cfun->va_list_fpr_size |= 2;
6006 else if (TREE_CODE (offset) != COMPONENT_REF
6007 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6008 || get_base_address (offset) != base)
6009 goto escapes;
6010 else
6011 /* Need general regs. */
6012 cfun->va_list_fpr_size |= 1;
6013 return false;
6015 escapes:
6016 si->va_list_escapes = true;
6017 return false;
6019 #endif
6021 /* Perform any needed actions needed for a function that is receiving a
6022 variable number of arguments. */
6024 static void
6025 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6026 tree type, int *pretend_size, int no_rtl)
6028 CUMULATIVE_ARGS cum = *pcum;
6030 /* Skip the current argument. */
6031 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6033 #if TARGET_ABI_UNICOSMK
6034 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6035 arguments on the stack. Unfortunately, it doesn't always store the first
6036 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6037 with stdargs as we always have at least one named argument there. */
6038 if (cum.num_reg_words < 6)
6040 if (!no_rtl)
6042 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6043 emit_insn (gen_arg_home_umk ());
6045 *pretend_size = 0;
6047 #elif TARGET_ABI_OPEN_VMS
6048 /* For VMS, we allocate space for all 6 arg registers plus a count.
6050 However, if NO registers need to be saved, don't allocate any space.
6051 This is not only because we won't need the space, but because AP
6052 includes the current_pretend_args_size and we don't want to mess up
6053 any ap-relative addresses already made. */
6054 if (cum.num_args < 6)
6056 if (!no_rtl)
6058 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6059 emit_insn (gen_arg_home ());
6061 *pretend_size = 7 * UNITS_PER_WORD;
6063 #else
6064 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6065 only push those that are remaining. However, if NO registers need to
6066 be saved, don't allocate any space. This is not only because we won't
6067 need the space, but because AP includes the current_pretend_args_size
6068 and we don't want to mess up any ap-relative addresses already made.
6070 If we are not to use the floating-point registers, save the integer
6071 registers where we would put the floating-point registers. This is
6072 not the most efficient way to implement varargs with just one register
6073 class, but it isn't worth doing anything more efficient in this rare
6074 case. */
6075 if (cum >= 6)
6076 return;
6078 if (!no_rtl)
6080 int count, set = get_varargs_alias_set ();
6081 rtx tmp;
6083 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6084 if (count > 6 - cum)
6085 count = 6 - cum;
6087 /* Detect whether integer registers or floating-point registers
6088 are needed by the detected va_arg statements. See above for
6089 how these values are computed. Note that the "escape" value
6090 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6091 these bits set. */
6092 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6094 if (cfun->va_list_fpr_size & 1)
6096 tmp = gen_rtx_MEM (BLKmode,
6097 plus_constant (virtual_incoming_args_rtx,
6098 (cum + 6) * UNITS_PER_WORD));
6099 MEM_NOTRAP_P (tmp) = 1;
6100 set_mem_alias_set (tmp, set);
6101 move_block_from_reg (16 + cum, tmp, count);
6104 if (cfun->va_list_fpr_size & 2)
6106 tmp = gen_rtx_MEM (BLKmode,
6107 plus_constant (virtual_incoming_args_rtx,
6108 cum * UNITS_PER_WORD));
6109 MEM_NOTRAP_P (tmp) = 1;
6110 set_mem_alias_set (tmp, set);
6111 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6114 *pretend_size = 12 * UNITS_PER_WORD;
6115 #endif
6118 void
6119 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6121 HOST_WIDE_INT offset;
6122 tree t, offset_field, base_field;
6124 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6125 return;
6127 if (TARGET_ABI_UNICOSMK)
6128 std_expand_builtin_va_start (valist, nextarg);
6130 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6131 up by 48, storing fp arg registers in the first 48 bytes, and the
6132 integer arg registers in the next 48 bytes. This is only done,
6133 however, if any integer registers need to be stored.
6135 If no integer registers need be stored, then we must subtract 48
6136 in order to account for the integer arg registers which are counted
6137 in argsize above, but which are not actually stored on the stack.
6138 Must further be careful here about structures straddling the last
6139 integer argument register; that futzes with pretend_args_size,
6140 which changes the meaning of AP. */
6142 if (NUM_ARGS < 6)
6143 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6144 else
6145 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6147 if (TARGET_ABI_OPEN_VMS)
6149 nextarg = plus_constant (nextarg, offset);
6150 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6151 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6152 make_tree (ptr_type_node, nextarg));
6153 TREE_SIDE_EFFECTS (t) = 1;
6155 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6157 else
6159 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6160 offset_field = TREE_CHAIN (base_field);
6162 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6163 valist, base_field, NULL_TREE);
6164 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6165 valist, offset_field, NULL_TREE);
6167 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6168 t = build2 (PLUS_EXPR, ptr_type_node, t,
6169 build_int_cst (NULL_TREE, offset));
6170 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6171 TREE_SIDE_EFFECTS (t) = 1;
6172 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6174 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6175 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6176 TREE_SIDE_EFFECTS (t) = 1;
6177 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6181 static tree
6182 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6184 tree type_size, ptr_type, addend, t, addr, internal_post;
6186 /* If the type could not be passed in registers, skip the block
6187 reserved for the registers. */
6188 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6190 t = build_int_cst (TREE_TYPE (offset), 6*8);
6191 t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
6192 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6193 gimplify_and_add (t, pre_p);
6196 addend = offset;
6197 ptr_type = build_pointer_type (type);
6199 if (TREE_CODE (type) == COMPLEX_TYPE)
6201 tree real_part, imag_part, real_temp;
6203 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6204 offset, pre_p);
6206 /* Copy the value into a new temporary, lest the formal temporary
6207 be reused out from under us. */
6208 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6210 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6211 offset, pre_p);
6213 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6215 else if (TREE_CODE (type) == REAL_TYPE)
6217 tree fpaddend, cond, fourtyeight;
6219 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6220 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6221 addend, fourtyeight);
6222 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6223 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6224 fpaddend, addend);
6227 /* Build the final address and force that value into a temporary. */
6228 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6229 fold_convert (ptr_type, addend));
6230 internal_post = NULL;
6231 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6232 append_to_statement_list (internal_post, pre_p);
6234 /* Update the offset field. */
6235 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6236 if (type_size == NULL || TREE_OVERFLOW (type_size))
6237 t = size_zero_node;
6238 else
6240 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6241 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6242 t = size_binop (MULT_EXPR, t, size_int (8));
6244 t = fold_convert (TREE_TYPE (offset), t);
6245 t = build2 (MODIFY_EXPR, void_type_node, offset,
6246 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6247 gimplify_and_add (t, pre_p);
6249 return build_va_arg_indirect_ref (addr);
6252 static tree
6253 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6255 tree offset_field, base_field, offset, base, t, r;
6256 bool indirect;
6258 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6259 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6261 base_field = TYPE_FIELDS (va_list_type_node);
6262 offset_field = TREE_CHAIN (base_field);
6263 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6264 valist, base_field, NULL_TREE);
6265 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6266 valist, offset_field, NULL_TREE);
6268 /* Pull the fields of the structure out into temporaries. Since we never
6269 modify the base field, we can use a formal temporary. Sign-extend the
6270 offset field so that it's the proper width for pointer arithmetic. */
6271 base = get_formal_tmp_var (base_field, pre_p);
6273 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6274 offset = get_initialized_tmp_var (t, pre_p, NULL);
6276 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6277 if (indirect)
6278 type = build_pointer_type (type);
6280 /* Find the value. Note that this will be a stable indirection, or
6281 a composite of stable indirections in the case of complex. */
6282 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6284 /* Stuff the offset temporary back into its field. */
6285 t = build2 (MODIFY_EXPR, void_type_node, offset_field,
6286 fold_convert (TREE_TYPE (offset_field), offset));
6287 gimplify_and_add (t, pre_p);
6289 if (indirect)
6290 r = build_va_arg_indirect_ref (r);
6292 return r;
6295 /* Builtins. */
6297 enum alpha_builtin
6299 ALPHA_BUILTIN_CMPBGE,
6300 ALPHA_BUILTIN_EXTBL,
6301 ALPHA_BUILTIN_EXTWL,
6302 ALPHA_BUILTIN_EXTLL,
6303 ALPHA_BUILTIN_EXTQL,
6304 ALPHA_BUILTIN_EXTWH,
6305 ALPHA_BUILTIN_EXTLH,
6306 ALPHA_BUILTIN_EXTQH,
6307 ALPHA_BUILTIN_INSBL,
6308 ALPHA_BUILTIN_INSWL,
6309 ALPHA_BUILTIN_INSLL,
6310 ALPHA_BUILTIN_INSQL,
6311 ALPHA_BUILTIN_INSWH,
6312 ALPHA_BUILTIN_INSLH,
6313 ALPHA_BUILTIN_INSQH,
6314 ALPHA_BUILTIN_MSKBL,
6315 ALPHA_BUILTIN_MSKWL,
6316 ALPHA_BUILTIN_MSKLL,
6317 ALPHA_BUILTIN_MSKQL,
6318 ALPHA_BUILTIN_MSKWH,
6319 ALPHA_BUILTIN_MSKLH,
6320 ALPHA_BUILTIN_MSKQH,
6321 ALPHA_BUILTIN_UMULH,
6322 ALPHA_BUILTIN_ZAP,
6323 ALPHA_BUILTIN_ZAPNOT,
6324 ALPHA_BUILTIN_AMASK,
6325 ALPHA_BUILTIN_IMPLVER,
6326 ALPHA_BUILTIN_RPCC,
6327 ALPHA_BUILTIN_THREAD_POINTER,
6328 ALPHA_BUILTIN_SET_THREAD_POINTER,
6330 /* TARGET_MAX */
6331 ALPHA_BUILTIN_MINUB8,
6332 ALPHA_BUILTIN_MINSB8,
6333 ALPHA_BUILTIN_MINUW4,
6334 ALPHA_BUILTIN_MINSW4,
6335 ALPHA_BUILTIN_MAXUB8,
6336 ALPHA_BUILTIN_MAXSB8,
6337 ALPHA_BUILTIN_MAXUW4,
6338 ALPHA_BUILTIN_MAXSW4,
6339 ALPHA_BUILTIN_PERR,
6340 ALPHA_BUILTIN_PKLB,
6341 ALPHA_BUILTIN_PKWB,
6342 ALPHA_BUILTIN_UNPKBL,
6343 ALPHA_BUILTIN_UNPKBW,
6345 /* TARGET_CIX */
6346 ALPHA_BUILTIN_CTTZ,
6347 ALPHA_BUILTIN_CTLZ,
6348 ALPHA_BUILTIN_CTPOP,
6350 ALPHA_BUILTIN_max
6353 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6354 CODE_FOR_builtin_cmpbge,
6355 CODE_FOR_builtin_extbl,
6356 CODE_FOR_builtin_extwl,
6357 CODE_FOR_builtin_extll,
6358 CODE_FOR_builtin_extql,
6359 CODE_FOR_builtin_extwh,
6360 CODE_FOR_builtin_extlh,
6361 CODE_FOR_builtin_extqh,
6362 CODE_FOR_builtin_insbl,
6363 CODE_FOR_builtin_inswl,
6364 CODE_FOR_builtin_insll,
6365 CODE_FOR_builtin_insql,
6366 CODE_FOR_builtin_inswh,
6367 CODE_FOR_builtin_inslh,
6368 CODE_FOR_builtin_insqh,
6369 CODE_FOR_builtin_mskbl,
6370 CODE_FOR_builtin_mskwl,
6371 CODE_FOR_builtin_mskll,
6372 CODE_FOR_builtin_mskql,
6373 CODE_FOR_builtin_mskwh,
6374 CODE_FOR_builtin_msklh,
6375 CODE_FOR_builtin_mskqh,
6376 CODE_FOR_umuldi3_highpart,
6377 CODE_FOR_builtin_zap,
6378 CODE_FOR_builtin_zapnot,
6379 CODE_FOR_builtin_amask,
6380 CODE_FOR_builtin_implver,
6381 CODE_FOR_builtin_rpcc,
6382 CODE_FOR_load_tp,
6383 CODE_FOR_set_tp,
6385 /* TARGET_MAX */
6386 CODE_FOR_builtin_minub8,
6387 CODE_FOR_builtin_minsb8,
6388 CODE_FOR_builtin_minuw4,
6389 CODE_FOR_builtin_minsw4,
6390 CODE_FOR_builtin_maxub8,
6391 CODE_FOR_builtin_maxsb8,
6392 CODE_FOR_builtin_maxuw4,
6393 CODE_FOR_builtin_maxsw4,
6394 CODE_FOR_builtin_perr,
6395 CODE_FOR_builtin_pklb,
6396 CODE_FOR_builtin_pkwb,
6397 CODE_FOR_builtin_unpkbl,
6398 CODE_FOR_builtin_unpkbw,
6400 /* TARGET_CIX */
6401 CODE_FOR_ctzdi2,
6402 CODE_FOR_clzdi2,
6403 CODE_FOR_popcountdi2
6406 struct alpha_builtin_def
6408 const char *name;
6409 enum alpha_builtin code;
6410 unsigned int target_mask;
6411 bool is_const;
6414 static struct alpha_builtin_def const zero_arg_builtins[] = {
6415 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6416 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6419 static struct alpha_builtin_def const one_arg_builtins[] = {
6420 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6421 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6422 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6423 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6424 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6425 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6426 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6427 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6430 static struct alpha_builtin_def const two_arg_builtins[] = {
6431 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6432 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6433 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6434 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6435 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6436 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6437 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6438 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6439 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6440 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6441 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6442 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6443 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6444 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6445 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6446 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6447 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6448 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6449 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6450 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6451 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6452 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6453 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6454 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6455 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6456 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6457 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6458 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6459 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6460 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6461 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6462 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6463 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6464 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6467 static GTY(()) tree alpha_v8qi_u;
6468 static GTY(()) tree alpha_v8qi_s;
6469 static GTY(()) tree alpha_v4hi_u;
6470 static GTY(()) tree alpha_v4hi_s;
6472 static void
6473 alpha_init_builtins (void)
6475 const struct alpha_builtin_def *p;
6476 tree dimode_integer_type_node;
6477 tree ftype, attrs[2];
6478 size_t i;
6480 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6482 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6483 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6485 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6487 p = zero_arg_builtins;
6488 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6489 if ((target_flags & p->target_mask) == p->target_mask)
6490 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6491 NULL, attrs[p->is_const]);
6493 ftype = build_function_type_list (dimode_integer_type_node,
6494 dimode_integer_type_node, NULL_TREE);
6496 p = one_arg_builtins;
6497 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6498 if ((target_flags & p->target_mask) == p->target_mask)
6499 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6500 NULL, attrs[p->is_const]);
6502 ftype = build_function_type_list (dimode_integer_type_node,
6503 dimode_integer_type_node,
6504 dimode_integer_type_node, NULL_TREE);
6506 p = two_arg_builtins;
6507 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6508 if ((target_flags & p->target_mask) == p->target_mask)
6509 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6510 NULL, attrs[p->is_const]);
6512 ftype = build_function_type (ptr_type_node, void_list_node);
6513 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6514 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6515 NULL, attrs[0]);
6517 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6518 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6519 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6520 NULL, attrs[0]);
6522 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6523 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6524 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6525 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6528 /* Expand an expression EXP that calls a built-in function,
6529 with result going to TARGET if that's convenient
6530 (and in mode MODE if that's convenient).
6531 SUBTARGET may be used as the target for computing one of EXP's operands.
6532 IGNORE is nonzero if the value is to be ignored. */
6534 static rtx
6535 alpha_expand_builtin (tree exp, rtx target,
6536 rtx subtarget ATTRIBUTE_UNUSED,
6537 enum machine_mode mode ATTRIBUTE_UNUSED,
6538 int ignore ATTRIBUTE_UNUSED)
6540 #define MAX_ARGS 2
6542 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6543 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6544 tree arglist = TREE_OPERAND (exp, 1);
6545 enum insn_code icode;
6546 rtx op[MAX_ARGS], pat;
6547 int arity;
6548 bool nonvoid;
6550 if (fcode >= ALPHA_BUILTIN_max)
6551 internal_error ("bad builtin fcode");
6552 icode = code_for_builtin[fcode];
6553 if (icode == 0)
6554 internal_error ("bad builtin fcode");
6556 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6558 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6559 arglist;
6560 arglist = TREE_CHAIN (arglist), arity++)
6562 const struct insn_operand_data *insn_op;
6564 tree arg = TREE_VALUE (arglist);
6565 if (arg == error_mark_node)
6566 return NULL_RTX;
6567 if (arity > MAX_ARGS)
6568 return NULL_RTX;
6570 insn_op = &insn_data[icode].operand[arity + nonvoid];
6572 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6574 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6575 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6578 if (nonvoid)
6580 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6581 if (!target
6582 || GET_MODE (target) != tmode
6583 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6584 target = gen_reg_rtx (tmode);
6587 switch (arity)
6589 case 0:
6590 pat = GEN_FCN (icode) (target);
6591 break;
6592 case 1:
6593 if (nonvoid)
6594 pat = GEN_FCN (icode) (target, op[0]);
6595 else
6596 pat = GEN_FCN (icode) (op[0]);
6597 break;
6598 case 2:
6599 pat = GEN_FCN (icode) (target, op[0], op[1]);
6600 break;
6601 default:
6602 gcc_unreachable ();
6604 if (!pat)
6605 return NULL_RTX;
6606 emit_insn (pat);
6608 if (nonvoid)
6609 return target;
6610 else
6611 return const0_rtx;
6615 /* Several bits below assume HWI >= 64 bits. This should be enforced
6616 by config.gcc. */
6617 #if HOST_BITS_PER_WIDE_INT < 64
6618 # error "HOST_WIDE_INT too small"
6619 #endif
6621 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6622 with an 8 bit output vector. OPINT contains the integer operands; bit N
6623 of OP_CONST is set if OPINT[N] is valid. */
6625 static tree
6626 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6628 if (op_const == 3)
6630 int i, val;
6631 for (i = 0, val = 0; i < 8; ++i)
6633 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6634 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6635 if (c0 >= c1)
6636 val |= 1 << i;
6638 return build_int_cst (long_integer_type_node, val);
6640 else if (op_const == 2 && opint[1] == 0)
6641 return build_int_cst (long_integer_type_node, 0xff);
6642 return NULL;
6645 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6646 specialized form of an AND operation. Other byte manipulation instructions
6647 are defined in terms of this instruction, so this is also used as a
6648 subroutine for other builtins.
6650 OP contains the tree operands; OPINT contains the extracted integer values.
6651 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6652 OPINT may be considered. */
6654 static tree
6655 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6656 long op_const)
6658 if (op_const & 2)
6660 unsigned HOST_WIDE_INT mask = 0;
6661 int i;
6663 for (i = 0; i < 8; ++i)
6664 if ((opint[1] >> i) & 1)
6665 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6667 if (op_const & 1)
6668 return build_int_cst (long_integer_type_node, opint[0] & mask);
6670 if (op)
6671 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6672 build_int_cst (long_integer_type_node, mask)));
6674 else if ((op_const & 1) && opint[0] == 0)
6675 return build_int_cst (long_integer_type_node, 0);
6676 return NULL;
6679 /* Fold the builtins for the EXT family of instructions. */
6681 static tree
6682 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6683 long op_const, unsigned HOST_WIDE_INT bytemask,
6684 bool is_high)
6686 long zap_const = 2;
6687 tree *zap_op = NULL;
6689 if (op_const & 2)
6691 unsigned HOST_WIDE_INT loc;
6693 loc = opint[1] & 7;
6694 if (BYTES_BIG_ENDIAN)
6695 loc ^= 7;
6696 loc *= 8;
6698 if (loc != 0)
6700 if (op_const & 1)
6702 unsigned HOST_WIDE_INT temp = opint[0];
6703 if (is_high)
6704 temp <<= loc;
6705 else
6706 temp >>= loc;
6707 opint[0] = temp;
6708 zap_const = 3;
6711 else
6712 zap_op = op;
6715 opint[1] = bytemask;
6716 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6719 /* Fold the builtins for the INS family of instructions. */
6721 static tree
6722 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6723 long op_const, unsigned HOST_WIDE_INT bytemask,
6724 bool is_high)
6726 if ((op_const & 1) && opint[0] == 0)
6727 return build_int_cst (long_integer_type_node, 0);
6729 if (op_const & 2)
6731 unsigned HOST_WIDE_INT temp, loc, byteloc;
6732 tree *zap_op = NULL;
6734 loc = opint[1] & 7;
6735 if (BYTES_BIG_ENDIAN)
6736 loc ^= 7;
6737 bytemask <<= loc;
6739 temp = opint[0];
6740 if (is_high)
6742 byteloc = (64 - (loc * 8)) & 0x3f;
6743 if (byteloc == 0)
6744 zap_op = op;
6745 else
6746 temp >>= byteloc;
6747 bytemask >>= 8;
6749 else
6751 byteloc = loc * 8;
6752 if (byteloc == 0)
6753 zap_op = op;
6754 else
6755 temp <<= byteloc;
6758 opint[0] = temp;
6759 opint[1] = bytemask;
6760 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6763 return NULL;
6766 static tree
6767 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6768 long op_const, unsigned HOST_WIDE_INT bytemask,
6769 bool is_high)
6771 if (op_const & 2)
6773 unsigned HOST_WIDE_INT loc;
6775 loc = opint[1] & 7;
6776 if (BYTES_BIG_ENDIAN)
6777 loc ^= 7;
6778 bytemask <<= loc;
6780 if (is_high)
6781 bytemask >>= 8;
6783 opint[1] = bytemask ^ 0xff;
6786 return alpha_fold_builtin_zapnot (op, opint, op_const);
6789 static tree
6790 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6792 switch (op_const)
6794 case 3:
6796 unsigned HOST_WIDE_INT l;
6797 HOST_WIDE_INT h;
6799 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6801 #if HOST_BITS_PER_WIDE_INT > 64
6802 # error fixme
6803 #endif
6805 return build_int_cst (long_integer_type_node, h);
6808 case 1:
6809 opint[1] = opint[0];
6810 /* FALLTHRU */
6811 case 2:
6812 /* Note that (X*1) >> 64 == 0. */
6813 if (opint[1] == 0 || opint[1] == 1)
6814 return build_int_cst (long_integer_type_node, 0);
6815 break;
6817 return NULL;
6820 static tree
6821 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6823 tree op0 = fold_convert (vtype, op[0]);
6824 tree op1 = fold_convert (vtype, op[1]);
6825 tree val = fold (build2 (code, vtype, op0, op1));
6826 return fold_convert (long_integer_type_node, val);
6829 static tree
6830 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6832 unsigned HOST_WIDE_INT temp = 0;
6833 int i;
6835 if (op_const != 3)
6836 return NULL;
6838 for (i = 0; i < 8; ++i)
6840 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6841 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6842 if (a >= b)
6843 temp += a - b;
6844 else
6845 temp += b - a;
6848 return build_int_cst (long_integer_type_node, temp);
6851 static tree
6852 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6854 unsigned HOST_WIDE_INT temp;
6856 if (op_const == 0)
6857 return NULL;
6859 temp = opint[0] & 0xff;
6860 temp |= (opint[0] >> 24) & 0xff00;
6862 return build_int_cst (long_integer_type_node, temp);
6865 static tree
6866 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6868 unsigned HOST_WIDE_INT temp;
6870 if (op_const == 0)
6871 return NULL;
6873 temp = opint[0] & 0xff;
6874 temp |= (opint[0] >> 8) & 0xff00;
6875 temp |= (opint[0] >> 16) & 0xff0000;
6876 temp |= (opint[0] >> 24) & 0xff000000;
6878 return build_int_cst (long_integer_type_node, temp);
6881 static tree
6882 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6884 unsigned HOST_WIDE_INT temp;
6886 if (op_const == 0)
6887 return NULL;
6889 temp = opint[0] & 0xff;
6890 temp |= (opint[0] & 0xff00) << 24;
6892 return build_int_cst (long_integer_type_node, temp);
6895 static tree
6896 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6898 unsigned HOST_WIDE_INT temp;
6900 if (op_const == 0)
6901 return NULL;
6903 temp = opint[0] & 0xff;
6904 temp |= (opint[0] & 0x0000ff00) << 8;
6905 temp |= (opint[0] & 0x00ff0000) << 16;
6906 temp |= (opint[0] & 0xff000000) << 24;
6908 return build_int_cst (long_integer_type_node, temp);
6911 static tree
6912 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6914 unsigned HOST_WIDE_INT temp;
6916 if (op_const == 0)
6917 return NULL;
6919 if (opint[0] == 0)
6920 temp = 64;
6921 else
6922 temp = exact_log2 (opint[0] & -opint[0]);
6924 return build_int_cst (long_integer_type_node, temp);
6927 static tree
6928 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6930 unsigned HOST_WIDE_INT temp;
6932 if (op_const == 0)
6933 return NULL;
6935 if (opint[0] == 0)
6936 temp = 64;
6937 else
6938 temp = 64 - floor_log2 (opint[0]) - 1;
6940 return build_int_cst (long_integer_type_node, temp);
6943 static tree
6944 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6946 unsigned HOST_WIDE_INT temp, op;
6948 if (op_const == 0)
6949 return NULL;
6951 op = opint[0];
6952 temp = 0;
6953 while (op)
6954 temp++, op &= op - 1;
6956 return build_int_cst (long_integer_type_node, temp);
6959 /* Fold one of our builtin functions. */
6961 static tree
6962 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6964 tree op[MAX_ARGS], t;
6965 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6966 long op_const = 0, arity = 0;
6968 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6970 tree arg = TREE_VALUE (t);
6971 if (arg == error_mark_node)
6972 return NULL;
6973 if (arity >= MAX_ARGS)
6974 return NULL;
6976 op[arity] = arg;
6977 opint[arity] = 0;
6978 if (TREE_CODE (arg) == INTEGER_CST)
6980 op_const |= 1L << arity;
6981 opint[arity] = int_cst_value (arg);
6985 switch (DECL_FUNCTION_CODE (fndecl))
6987 case ALPHA_BUILTIN_CMPBGE:
6988 return alpha_fold_builtin_cmpbge (opint, op_const);
6990 case ALPHA_BUILTIN_EXTBL:
6991 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6992 case ALPHA_BUILTIN_EXTWL:
6993 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6994 case ALPHA_BUILTIN_EXTLL:
6995 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6996 case ALPHA_BUILTIN_EXTQL:
6997 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6998 case ALPHA_BUILTIN_EXTWH:
6999 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7000 case ALPHA_BUILTIN_EXTLH:
7001 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7002 case ALPHA_BUILTIN_EXTQH:
7003 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7005 case ALPHA_BUILTIN_INSBL:
7006 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7007 case ALPHA_BUILTIN_INSWL:
7008 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7009 case ALPHA_BUILTIN_INSLL:
7010 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7011 case ALPHA_BUILTIN_INSQL:
7012 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7013 case ALPHA_BUILTIN_INSWH:
7014 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7015 case ALPHA_BUILTIN_INSLH:
7016 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7017 case ALPHA_BUILTIN_INSQH:
7018 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7020 case ALPHA_BUILTIN_MSKBL:
7021 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7022 case ALPHA_BUILTIN_MSKWL:
7023 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7024 case ALPHA_BUILTIN_MSKLL:
7025 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7026 case ALPHA_BUILTIN_MSKQL:
7027 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7028 case ALPHA_BUILTIN_MSKWH:
7029 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7030 case ALPHA_BUILTIN_MSKLH:
7031 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7032 case ALPHA_BUILTIN_MSKQH:
7033 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7035 case ALPHA_BUILTIN_UMULH:
7036 return alpha_fold_builtin_umulh (opint, op_const);
7038 case ALPHA_BUILTIN_ZAP:
7039 opint[1] ^= 0xff;
7040 /* FALLTHRU */
7041 case ALPHA_BUILTIN_ZAPNOT:
7042 return alpha_fold_builtin_zapnot (op, opint, op_const);
7044 case ALPHA_BUILTIN_MINUB8:
7045 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7046 case ALPHA_BUILTIN_MINSB8:
7047 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7048 case ALPHA_BUILTIN_MINUW4:
7049 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7050 case ALPHA_BUILTIN_MINSW4:
7051 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7052 case ALPHA_BUILTIN_MAXUB8:
7053 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7054 case ALPHA_BUILTIN_MAXSB8:
7055 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7056 case ALPHA_BUILTIN_MAXUW4:
7057 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7058 case ALPHA_BUILTIN_MAXSW4:
7059 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7061 case ALPHA_BUILTIN_PERR:
7062 return alpha_fold_builtin_perr (opint, op_const);
7063 case ALPHA_BUILTIN_PKLB:
7064 return alpha_fold_builtin_pklb (opint, op_const);
7065 case ALPHA_BUILTIN_PKWB:
7066 return alpha_fold_builtin_pkwb (opint, op_const);
7067 case ALPHA_BUILTIN_UNPKBL:
7068 return alpha_fold_builtin_unpkbl (opint, op_const);
7069 case ALPHA_BUILTIN_UNPKBW:
7070 return alpha_fold_builtin_unpkbw (opint, op_const);
7072 case ALPHA_BUILTIN_CTTZ:
7073 return alpha_fold_builtin_cttz (opint, op_const);
7074 case ALPHA_BUILTIN_CTLZ:
7075 return alpha_fold_builtin_ctlz (opint, op_const);
7076 case ALPHA_BUILTIN_CTPOP:
7077 return alpha_fold_builtin_ctpop (opint, op_const);
7079 case ALPHA_BUILTIN_AMASK:
7080 case ALPHA_BUILTIN_IMPLVER:
7081 case ALPHA_BUILTIN_RPCC:
7082 case ALPHA_BUILTIN_THREAD_POINTER:
7083 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7084 /* None of these are foldable at compile-time. */
7085 default:
7086 return NULL;
7090 /* This page contains routines that are used to determine what the function
7091 prologue and epilogue code will do and write them out. */
7093 /* Compute the size of the save area in the stack. */
7095 /* These variables are used for communication between the following functions.
7096 They indicate various things about the current function being compiled
7097 that are used to tell what kind of prologue, epilogue and procedure
7098 descriptor to generate. */
7100 /* Nonzero if we need a stack procedure. */
7101 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7102 static enum alpha_procedure_types alpha_procedure_type;
7104 /* Register number (either FP or SP) that is used to unwind the frame. */
7105 static int vms_unwind_regno;
7107 /* Register number used to save FP. We need not have one for RA since
7108 we don't modify it for register procedures. This is only defined
7109 for register frame procedures. */
7110 static int vms_save_fp_regno;
7112 /* Register number used to reference objects off our PV. */
7113 static int vms_base_regno;
7115 /* Compute register masks for saved registers. */
7117 static void
7118 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7120 unsigned long imask = 0;
7121 unsigned long fmask = 0;
7122 unsigned int i;
7124 /* When outputting a thunk, we don't have valid register life info,
7125 but assemble_start_function wants to output .frame and .mask
7126 directives. */
7127 if (current_function_is_thunk)
7129 *imaskP = 0;
7130 *fmaskP = 0;
7131 return;
7134 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7135 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7137 /* One for every register we have to save. */
7138 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7139 if (! fixed_regs[i] && ! call_used_regs[i]
7140 && regs_ever_live[i] && i != REG_RA
7141 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7143 if (i < 32)
7144 imask |= (1UL << i);
7145 else
7146 fmask |= (1UL << (i - 32));
7149 /* We need to restore these for the handler. */
7150 if (current_function_calls_eh_return)
7152 for (i = 0; ; ++i)
7154 unsigned regno = EH_RETURN_DATA_REGNO (i);
7155 if (regno == INVALID_REGNUM)
7156 break;
7157 imask |= 1UL << regno;
7161 /* If any register spilled, then spill the return address also. */
7162 /* ??? This is required by the Digital stack unwind specification
7163 and isn't needed if we're doing Dwarf2 unwinding. */
7164 if (imask || fmask || alpha_ra_ever_killed ())
7165 imask |= (1UL << REG_RA);
7167 *imaskP = imask;
7168 *fmaskP = fmask;
7172 alpha_sa_size (void)
7174 unsigned long mask[2];
7175 int sa_size = 0;
7176 int i, j;
7178 alpha_sa_mask (&mask[0], &mask[1]);
7180 if (TARGET_ABI_UNICOSMK)
7182 if (mask[0] || mask[1])
7183 sa_size = 14;
7185 else
7187 for (j = 0; j < 2; ++j)
7188 for (i = 0; i < 32; ++i)
7189 if ((mask[j] >> i) & 1)
7190 sa_size++;
7193 if (TARGET_ABI_UNICOSMK)
7195 /* We might not need to generate a frame if we don't make any calls
7196 (including calls to __T3E_MISMATCH if this is a vararg function),
7197 don't have any local variables which require stack slots, don't
7198 use alloca and have not determined that we need a frame for other
7199 reasons. */
7201 alpha_procedure_type
7202 = (sa_size || get_frame_size() != 0
7203 || current_function_outgoing_args_size
7204 || current_function_stdarg || current_function_calls_alloca
7205 || frame_pointer_needed)
7206 ? PT_STACK : PT_REGISTER;
7208 /* Always reserve space for saving callee-saved registers if we
7209 need a frame as required by the calling convention. */
7210 if (alpha_procedure_type == PT_STACK)
7211 sa_size = 14;
7213 else if (TARGET_ABI_OPEN_VMS)
7215 /* Start by assuming we can use a register procedure if we don't
7216 make any calls (REG_RA not used) or need to save any
7217 registers and a stack procedure if we do. */
7218 if ((mask[0] >> REG_RA) & 1)
7219 alpha_procedure_type = PT_STACK;
7220 else if (get_frame_size() != 0)
7221 alpha_procedure_type = PT_REGISTER;
7222 else
7223 alpha_procedure_type = PT_NULL;
7225 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7226 made the final decision on stack procedure vs register procedure. */
7227 if (alpha_procedure_type == PT_STACK)
7228 sa_size -= 2;
7230 /* Decide whether to refer to objects off our PV via FP or PV.
7231 If we need FP for something else or if we receive a nonlocal
7232 goto (which expects PV to contain the value), we must use PV.
7233 Otherwise, start by assuming we can use FP. */
7235 vms_base_regno
7236 = (frame_pointer_needed
7237 || current_function_has_nonlocal_label
7238 || alpha_procedure_type == PT_STACK
7239 || current_function_outgoing_args_size)
7240 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7242 /* If we want to copy PV into FP, we need to find some register
7243 in which to save FP. */
7245 vms_save_fp_regno = -1;
7246 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7247 for (i = 0; i < 32; i++)
7248 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7249 vms_save_fp_regno = i;
7251 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7252 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7253 else if (alpha_procedure_type == PT_NULL)
7254 vms_base_regno = REG_PV;
7256 /* Stack unwinding should be done via FP unless we use it for PV. */
7257 vms_unwind_regno = (vms_base_regno == REG_PV
7258 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7260 /* If this is a stack procedure, allow space for saving FP and RA. */
7261 if (alpha_procedure_type == PT_STACK)
7262 sa_size += 2;
7264 else
7266 /* Our size must be even (multiple of 16 bytes). */
7267 if (sa_size & 1)
7268 sa_size++;
7271 return sa_size * 8;
7274 /* Define the offset between two registers, one to be eliminated,
7275 and the other its replacement, at the start of a routine. */
7277 HOST_WIDE_INT
7278 alpha_initial_elimination_offset (unsigned int from,
7279 unsigned int to ATTRIBUTE_UNUSED)
7281 HOST_WIDE_INT ret;
7283 ret = alpha_sa_size ();
7284 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7286 switch (from)
7288 case FRAME_POINTER_REGNUM:
7289 break;
7291 case ARG_POINTER_REGNUM:
7292 ret += (ALPHA_ROUND (get_frame_size ()
7293 + current_function_pretend_args_size)
7294 - current_function_pretend_args_size);
7295 break;
7297 default:
7298 gcc_unreachable ();
7301 return ret;
7305 alpha_pv_save_size (void)
7307 alpha_sa_size ();
7308 return alpha_procedure_type == PT_STACK ? 8 : 0;
7312 alpha_using_fp (void)
7314 alpha_sa_size ();
7315 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7318 #if TARGET_ABI_OPEN_VMS
7320 const struct attribute_spec vms_attribute_table[] =
7322 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7323 { "overlaid", 0, 0, true, false, false, NULL },
7324 { "global", 0, 0, true, false, false, NULL },
7325 { "initialize", 0, 0, true, false, false, NULL },
7326 { NULL, 0, 0, false, false, false, NULL }
7329 #endif
7331 static int
7332 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7334 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7338 alpha_find_lo_sum_using_gp (rtx insn)
7340 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7343 static int
7344 alpha_does_function_need_gp (void)
7346 rtx insn;
7348 /* The GP being variable is an OSF abi thing. */
7349 if (! TARGET_ABI_OSF)
7350 return 0;
7352 /* We need the gp to load the address of __mcount. */
7353 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7354 return 1;
7356 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7357 if (current_function_is_thunk)
7358 return 1;
7360 /* The nonlocal receiver pattern assumes that the gp is valid for
7361 the nested function. Reasonable because it's almost always set
7362 correctly already. For the cases where that's wrong, make sure
7363 the nested function loads its gp on entry. */
7364 if (current_function_has_nonlocal_goto)
7365 return 1;
7367 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7368 Even if we are a static function, we still need to do this in case
7369 our address is taken and passed to something like qsort. */
7371 push_topmost_sequence ();
7372 insn = get_insns ();
7373 pop_topmost_sequence ();
7375 for (; insn; insn = NEXT_INSN (insn))
7376 if (INSN_P (insn)
7377 && GET_CODE (PATTERN (insn)) != USE
7378 && GET_CODE (PATTERN (insn)) != CLOBBER
7379 && get_attr_usegp (insn))
7380 return 1;
7382 return 0;
7386 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7387 sequences. */
7389 static rtx
7390 set_frame_related_p (void)
7392 rtx seq = get_insns ();
7393 rtx insn;
7395 end_sequence ();
7397 if (!seq)
7398 return NULL_RTX;
7400 if (INSN_P (seq))
7402 insn = seq;
7403 while (insn != NULL_RTX)
7405 RTX_FRAME_RELATED_P (insn) = 1;
7406 insn = NEXT_INSN (insn);
7408 seq = emit_insn (seq);
7410 else
7412 seq = emit_insn (seq);
7413 RTX_FRAME_RELATED_P (seq) = 1;
7415 return seq;
7418 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7420 /* Generates a store with the proper unwind info attached. VALUE is
7421 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7422 contains SP+FRAME_BIAS, and that is the unwind info that should be
7423 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7424 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7426 static void
7427 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7428 HOST_WIDE_INT base_ofs, rtx frame_reg)
7430 rtx addr, mem, insn;
7432 addr = plus_constant (base_reg, base_ofs);
7433 mem = gen_rtx_MEM (DImode, addr);
7434 set_mem_alias_set (mem, alpha_sr_alias_set);
7436 insn = emit_move_insn (mem, value);
7437 RTX_FRAME_RELATED_P (insn) = 1;
7439 if (frame_bias || value != frame_reg)
7441 if (frame_bias)
7443 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7444 mem = gen_rtx_MEM (DImode, addr);
7447 REG_NOTES (insn)
7448 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7449 gen_rtx_SET (VOIDmode, mem, frame_reg),
7450 REG_NOTES (insn));
7454 static void
7455 emit_frame_store (unsigned int regno, rtx base_reg,
7456 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7458 rtx reg = gen_rtx_REG (DImode, regno);
7459 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7462 /* Write function prologue. */
7464 /* On vms we have two kinds of functions:
7466 - stack frame (PROC_STACK)
7467 these are 'normal' functions with local vars and which are
7468 calling other functions
7469 - register frame (PROC_REGISTER)
7470 keeps all data in registers, needs no stack
7472 We must pass this to the assembler so it can generate the
7473 proper pdsc (procedure descriptor)
7474 This is done with the '.pdesc' command.
7476 On not-vms, we don't really differentiate between the two, as we can
7477 simply allocate stack without saving registers. */
7479 void
7480 alpha_expand_prologue (void)
7482 /* Registers to save. */
7483 unsigned long imask = 0;
7484 unsigned long fmask = 0;
7485 /* Stack space needed for pushing registers clobbered by us. */
7486 HOST_WIDE_INT sa_size;
7487 /* Complete stack size needed. */
7488 HOST_WIDE_INT frame_size;
7489 /* Offset from base reg to register save area. */
7490 HOST_WIDE_INT reg_offset;
7491 rtx sa_reg;
7492 int i;
7494 sa_size = alpha_sa_size ();
7496 frame_size = get_frame_size ();
7497 if (TARGET_ABI_OPEN_VMS)
7498 frame_size = ALPHA_ROUND (sa_size
7499 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7500 + frame_size
7501 + current_function_pretend_args_size);
7502 else if (TARGET_ABI_UNICOSMK)
7503 /* We have to allocate space for the DSIB if we generate a frame. */
7504 frame_size = ALPHA_ROUND (sa_size
7505 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7506 + ALPHA_ROUND (frame_size
7507 + current_function_outgoing_args_size);
7508 else
7509 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7510 + sa_size
7511 + ALPHA_ROUND (frame_size
7512 + current_function_pretend_args_size));
7514 if (TARGET_ABI_OPEN_VMS)
7515 reg_offset = 8;
7516 else
7517 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7519 alpha_sa_mask (&imask, &fmask);
7521 /* Emit an insn to reload GP, if needed. */
7522 if (TARGET_ABI_OSF)
7524 alpha_function_needs_gp = alpha_does_function_need_gp ();
7525 if (alpha_function_needs_gp)
7526 emit_insn (gen_prologue_ldgp ());
7529 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7530 the call to mcount ourselves, rather than having the linker do it
7531 magically in response to -pg. Since _mcount has special linkage,
7532 don't represent the call as a call. */
7533 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7534 emit_insn (gen_prologue_mcount ());
7536 if (TARGET_ABI_UNICOSMK)
7537 unicosmk_gen_dsib (&imask);
7539 /* Adjust the stack by the frame size. If the frame size is > 4096
7540 bytes, we need to be sure we probe somewhere in the first and last
7541 4096 bytes (we can probably get away without the latter test) and
7542 every 8192 bytes in between. If the frame size is > 32768, we
7543 do this in a loop. Otherwise, we generate the explicit probe
7544 instructions.
7546 Note that we are only allowed to adjust sp once in the prologue. */
7548 if (frame_size <= 32768)
7550 if (frame_size > 4096)
7552 int probed;
7554 for (probed = 4096; probed < frame_size; probed += 8192)
7555 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7556 ? -probed + 64
7557 : -probed)));
7559 /* We only have to do this probe if we aren't saving registers. */
7560 if (sa_size == 0 && frame_size > probed - 4096)
7561 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7564 if (frame_size != 0)
7565 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7566 GEN_INT (TARGET_ABI_UNICOSMK
7567 ? -frame_size + 64
7568 : -frame_size))));
7570 else
7572 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7573 number of 8192 byte blocks to probe. We then probe each block
7574 in the loop and then set SP to the proper location. If the
7575 amount remaining is > 4096, we have to do one more probe if we
7576 are not saving any registers. */
7578 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7579 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7580 rtx ptr = gen_rtx_REG (DImode, 22);
7581 rtx count = gen_rtx_REG (DImode, 23);
7582 rtx seq;
7584 emit_move_insn (count, GEN_INT (blocks));
7585 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7586 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7588 /* Because of the difficulty in emitting a new basic block this
7589 late in the compilation, generate the loop as a single insn. */
7590 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7592 if (leftover > 4096 && sa_size == 0)
7594 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7595 MEM_VOLATILE_P (last) = 1;
7596 emit_move_insn (last, const0_rtx);
7599 if (TARGET_ABI_WINDOWS_NT)
7601 /* For NT stack unwind (done by 'reverse execution'), it's
7602 not OK to take the result of a loop, even though the value
7603 is already in ptr, so we reload it via a single operation
7604 and subtract it to sp.
7606 Yes, that's correct -- we have to reload the whole constant
7607 into a temporary via ldah+lda then subtract from sp. */
7609 HOST_WIDE_INT lo, hi;
7610 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7611 hi = frame_size - lo;
7613 emit_move_insn (ptr, GEN_INT (hi));
7614 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7615 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7616 ptr));
7618 else
7620 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7621 GEN_INT (-leftover)));
7624 /* This alternative is special, because the DWARF code cannot
7625 possibly intuit through the loop above. So we invent this
7626 note it looks at instead. */
7627 RTX_FRAME_RELATED_P (seq) = 1;
7628 REG_NOTES (seq)
7629 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7630 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7631 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7632 GEN_INT (TARGET_ABI_UNICOSMK
7633 ? -frame_size + 64
7634 : -frame_size))),
7635 REG_NOTES (seq));
7638 if (!TARGET_ABI_UNICOSMK)
7640 HOST_WIDE_INT sa_bias = 0;
7642 /* Cope with very large offsets to the register save area. */
7643 sa_reg = stack_pointer_rtx;
7644 if (reg_offset + sa_size > 0x8000)
7646 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7647 rtx sa_bias_rtx;
7649 if (low + sa_size <= 0x8000)
7650 sa_bias = reg_offset - low, reg_offset = low;
7651 else
7652 sa_bias = reg_offset, reg_offset = 0;
7654 sa_reg = gen_rtx_REG (DImode, 24);
7655 sa_bias_rtx = GEN_INT (sa_bias);
7657 if (add_operand (sa_bias_rtx, DImode))
7658 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7659 else
7661 emit_move_insn (sa_reg, sa_bias_rtx);
7662 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7666 /* Save regs in stack order. Beginning with VMS PV. */
7667 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7668 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7670 /* Save register RA next. */
7671 if (imask & (1UL << REG_RA))
7673 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7674 imask &= ~(1UL << REG_RA);
7675 reg_offset += 8;
7678 /* Now save any other registers required to be saved. */
7679 for (i = 0; i < 31; i++)
7680 if (imask & (1UL << i))
7682 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7683 reg_offset += 8;
7686 for (i = 0; i < 31; i++)
7687 if (fmask & (1UL << i))
7689 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7690 reg_offset += 8;
7693 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7695 /* The standard frame on the T3E includes space for saving registers.
7696 We just have to use it. We don't have to save the return address and
7697 the old frame pointer here - they are saved in the DSIB. */
7699 reg_offset = -56;
7700 for (i = 9; i < 15; i++)
7701 if (imask & (1UL << i))
7703 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7704 reg_offset -= 8;
7706 for (i = 2; i < 10; i++)
7707 if (fmask & (1UL << i))
7709 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7710 reg_offset -= 8;
7714 if (TARGET_ABI_OPEN_VMS)
7716 if (alpha_procedure_type == PT_REGISTER)
7717 /* Register frame procedures save the fp.
7718 ?? Ought to have a dwarf2 save for this. */
7719 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7720 hard_frame_pointer_rtx);
7722 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7723 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7724 gen_rtx_REG (DImode, REG_PV)));
7726 if (alpha_procedure_type != PT_NULL
7727 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7728 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7730 /* If we have to allocate space for outgoing args, do it now. */
7731 if (current_function_outgoing_args_size != 0)
7733 rtx seq
7734 = emit_move_insn (stack_pointer_rtx,
7735 plus_constant
7736 (hard_frame_pointer_rtx,
7737 - (ALPHA_ROUND
7738 (current_function_outgoing_args_size))));
7740 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7741 if ! frame_pointer_needed. Setting the bit will change the CFA
7742 computation rule to use sp again, which would be wrong if we had
7743 frame_pointer_needed, as this means sp might move unpredictably
7744 later on.
7746 Also, note that
7747 frame_pointer_needed
7748 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7750 current_function_outgoing_args_size != 0
7751 => alpha_procedure_type != PT_NULL,
7753 so when we are not setting the bit here, we are guaranteed to
7754 have emitted an FRP frame pointer update just before. */
7755 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7758 else if (!TARGET_ABI_UNICOSMK)
7760 /* If we need a frame pointer, set it from the stack pointer. */
7761 if (frame_pointer_needed)
7763 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7764 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7765 else
7766 /* This must always be the last instruction in the
7767 prologue, thus we emit a special move + clobber. */
7768 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7769 stack_pointer_rtx, sa_reg)));
7773 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7774 the prologue, for exception handling reasons, we cannot do this for
7775 any insn that might fault. We could prevent this for mems with a
7776 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7777 have to prevent all such scheduling with a blockage.
7779 Linux, on the other hand, never bothered to implement OSF/1's
7780 exception handling, and so doesn't care about such things. Anyone
7781 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7783 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7784 emit_insn (gen_blockage ());
7787 /* Count the number of .file directives, so that .loc is up to date. */
7788 int num_source_filenames = 0;
7790 /* Output the textual info surrounding the prologue. */
7792 void
7793 alpha_start_function (FILE *file, const char *fnname,
7794 tree decl ATTRIBUTE_UNUSED)
7796 unsigned long imask = 0;
7797 unsigned long fmask = 0;
7798 /* Stack space needed for pushing registers clobbered by us. */
7799 HOST_WIDE_INT sa_size;
7800 /* Complete stack size needed. */
7801 unsigned HOST_WIDE_INT frame_size;
7802 /* Offset from base reg to register save area. */
7803 HOST_WIDE_INT reg_offset;
7804 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7805 int i;
7807 /* Don't emit an extern directive for functions defined in the same file. */
7808 if (TARGET_ABI_UNICOSMK)
7810 tree name_tree;
7811 name_tree = get_identifier (fnname);
7812 TREE_ASM_WRITTEN (name_tree) = 1;
7815 alpha_fnname = fnname;
7816 sa_size = alpha_sa_size ();
7818 frame_size = get_frame_size ();
7819 if (TARGET_ABI_OPEN_VMS)
7820 frame_size = ALPHA_ROUND (sa_size
7821 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7822 + frame_size
7823 + current_function_pretend_args_size);
7824 else if (TARGET_ABI_UNICOSMK)
7825 frame_size = ALPHA_ROUND (sa_size
7826 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7827 + ALPHA_ROUND (frame_size
7828 + current_function_outgoing_args_size);
7829 else
7830 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7831 + sa_size
7832 + ALPHA_ROUND (frame_size
7833 + current_function_pretend_args_size));
7835 if (TARGET_ABI_OPEN_VMS)
7836 reg_offset = 8;
7837 else
7838 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7840 alpha_sa_mask (&imask, &fmask);
7842 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7843 We have to do that before the .ent directive as we cannot switch
7844 files within procedures with native ecoff because line numbers are
7845 linked to procedure descriptors.
7846 Outputting the lineno helps debugging of one line functions as they
7847 would otherwise get no line number at all. Please note that we would
7848 like to put out last_linenum from final.c, but it is not accessible. */
7850 if (write_symbols == SDB_DEBUG)
7852 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7853 ASM_OUTPUT_SOURCE_FILENAME (file,
7854 DECL_SOURCE_FILE (current_function_decl));
7855 #endif
7856 #ifdef SDB_OUTPUT_SOURCE_LINE
7857 if (debug_info_level != DINFO_LEVEL_TERSE)
7858 SDB_OUTPUT_SOURCE_LINE (file,
7859 DECL_SOURCE_LINE (current_function_decl));
7860 #endif
7863 /* Issue function start and label. */
7864 if (TARGET_ABI_OPEN_VMS
7865 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7867 fputs ("\t.ent ", file);
7868 assemble_name (file, fnname);
7869 putc ('\n', file);
7871 /* If the function needs GP, we'll write the "..ng" label there.
7872 Otherwise, do it here. */
7873 if (TARGET_ABI_OSF
7874 && ! alpha_function_needs_gp
7875 && ! current_function_is_thunk)
7877 putc ('$', file);
7878 assemble_name (file, fnname);
7879 fputs ("..ng:\n", file);
7883 strcpy (entry_label, fnname);
7884 if (TARGET_ABI_OPEN_VMS)
7885 strcat (entry_label, "..en");
7887 /* For public functions, the label must be globalized by appending an
7888 additional colon. */
7889 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7890 strcat (entry_label, ":");
7892 ASM_OUTPUT_LABEL (file, entry_label);
7893 inside_function = TRUE;
7895 if (TARGET_ABI_OPEN_VMS)
7896 fprintf (file, "\t.base $%d\n", vms_base_regno);
7898 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7899 && !flag_inhibit_size_directive)
7901 /* Set flags in procedure descriptor to request IEEE-conformant
7902 math-library routines. The value we set it to is PDSC_EXC_IEEE
7903 (/usr/include/pdsc.h). */
7904 fputs ("\t.eflag 48\n", file);
7907 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7908 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7909 alpha_arg_offset = -frame_size + 48;
7911 /* Describe our frame. If the frame size is larger than an integer,
7912 print it as zero to avoid an assembler error. We won't be
7913 properly describing such a frame, but that's the best we can do. */
7914 if (TARGET_ABI_UNICOSMK)
7916 else if (TARGET_ABI_OPEN_VMS)
7917 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7918 HOST_WIDE_INT_PRINT_DEC "\n",
7919 vms_unwind_regno,
7920 frame_size >= (1UL << 31) ? 0 : frame_size,
7921 reg_offset);
7922 else if (!flag_inhibit_size_directive)
7923 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7924 (frame_pointer_needed
7925 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7926 frame_size >= (1UL << 31) ? 0 : frame_size,
7927 current_function_pretend_args_size);
7929 /* Describe which registers were spilled. */
7930 if (TARGET_ABI_UNICOSMK)
7932 else if (TARGET_ABI_OPEN_VMS)
7934 if (imask)
7935 /* ??? Does VMS care if mask contains ra? The old code didn't
7936 set it, so I don't here. */
7937 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7938 if (fmask)
7939 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7940 if (alpha_procedure_type == PT_REGISTER)
7941 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7943 else if (!flag_inhibit_size_directive)
7945 if (imask)
7947 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7948 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7950 for (i = 0; i < 32; ++i)
7951 if (imask & (1UL << i))
7952 reg_offset += 8;
7955 if (fmask)
7956 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7957 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7960 #if TARGET_ABI_OPEN_VMS
7961 /* Ifdef'ed cause link_section are only available then. */
7962 switch_to_section (readonly_data_section);
7963 fprintf (file, "\t.align 3\n");
7964 assemble_name (file, fnname); fputs ("..na:\n", file);
7965 fputs ("\t.ascii \"", file);
7966 assemble_name (file, fnname);
7967 fputs ("\\0\"\n", file);
7968 alpha_need_linkage (fnname, 1);
7969 switch_to_section (text_section);
7970 #endif
7973 /* Emit the .prologue note at the scheduled end of the prologue. */
7975 static void
7976 alpha_output_function_end_prologue (FILE *file)
7978 if (TARGET_ABI_UNICOSMK)
7980 else if (TARGET_ABI_OPEN_VMS)
7981 fputs ("\t.prologue\n", file);
7982 else if (TARGET_ABI_WINDOWS_NT)
7983 fputs ("\t.prologue 0\n", file);
7984 else if (!flag_inhibit_size_directive)
7985 fprintf (file, "\t.prologue %d\n",
7986 alpha_function_needs_gp || current_function_is_thunk);
7989 /* Write function epilogue. */
7991 /* ??? At some point we will want to support full unwind, and so will
7992 need to mark the epilogue as well. At the moment, we just confuse
7993 dwarf2out. */
7994 #undef FRP
7995 #define FRP(exp) exp
7997 void
7998 alpha_expand_epilogue (void)
8000 /* Registers to save. */
8001 unsigned long imask = 0;
8002 unsigned long fmask = 0;
8003 /* Stack space needed for pushing registers clobbered by us. */
8004 HOST_WIDE_INT sa_size;
8005 /* Complete stack size needed. */
8006 HOST_WIDE_INT frame_size;
8007 /* Offset from base reg to register save area. */
8008 HOST_WIDE_INT reg_offset;
8009 int fp_is_frame_pointer, fp_offset;
8010 rtx sa_reg, sa_reg_exp = NULL;
8011 rtx sp_adj1, sp_adj2, mem;
8012 rtx eh_ofs;
8013 int i;
8015 sa_size = alpha_sa_size ();
8017 frame_size = get_frame_size ();
8018 if (TARGET_ABI_OPEN_VMS)
8019 frame_size = ALPHA_ROUND (sa_size
8020 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8021 + frame_size
8022 + current_function_pretend_args_size);
8023 else if (TARGET_ABI_UNICOSMK)
8024 frame_size = ALPHA_ROUND (sa_size
8025 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8026 + ALPHA_ROUND (frame_size
8027 + current_function_outgoing_args_size);
8028 else
8029 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8030 + sa_size
8031 + ALPHA_ROUND (frame_size
8032 + current_function_pretend_args_size));
8034 if (TARGET_ABI_OPEN_VMS)
8036 if (alpha_procedure_type == PT_STACK)
8037 reg_offset = 8;
8038 else
8039 reg_offset = 0;
8041 else
8042 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8044 alpha_sa_mask (&imask, &fmask);
8046 fp_is_frame_pointer
8047 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8048 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8049 fp_offset = 0;
8050 sa_reg = stack_pointer_rtx;
8052 if (current_function_calls_eh_return)
8053 eh_ofs = EH_RETURN_STACKADJ_RTX;
8054 else
8055 eh_ofs = NULL_RTX;
8057 if (!TARGET_ABI_UNICOSMK && sa_size)
8059 /* If we have a frame pointer, restore SP from it. */
8060 if ((TARGET_ABI_OPEN_VMS
8061 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8062 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8063 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8065 /* Cope with very large offsets to the register save area. */
8066 if (reg_offset + sa_size > 0x8000)
8068 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8069 HOST_WIDE_INT bias;
8071 if (low + sa_size <= 0x8000)
8072 bias = reg_offset - low, reg_offset = low;
8073 else
8074 bias = reg_offset, reg_offset = 0;
8076 sa_reg = gen_rtx_REG (DImode, 22);
8077 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8079 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8082 /* Restore registers in order, excepting a true frame pointer. */
8084 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8085 if (! eh_ofs)
8086 set_mem_alias_set (mem, alpha_sr_alias_set);
8087 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8089 reg_offset += 8;
8090 imask &= ~(1UL << REG_RA);
8092 for (i = 0; i < 31; ++i)
8093 if (imask & (1UL << i))
8095 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8096 fp_offset = reg_offset;
8097 else
8099 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8100 set_mem_alias_set (mem, alpha_sr_alias_set);
8101 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8103 reg_offset += 8;
8106 for (i = 0; i < 31; ++i)
8107 if (fmask & (1UL << i))
8109 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8110 set_mem_alias_set (mem, alpha_sr_alias_set);
8111 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8112 reg_offset += 8;
8115 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8117 /* Restore callee-saved general-purpose registers. */
8119 reg_offset = -56;
8121 for (i = 9; i < 15; i++)
8122 if (imask & (1UL << i))
8124 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8125 reg_offset));
8126 set_mem_alias_set (mem, alpha_sr_alias_set);
8127 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8128 reg_offset -= 8;
8131 for (i = 2; i < 10; i++)
8132 if (fmask & (1UL << i))
8134 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8135 reg_offset));
8136 set_mem_alias_set (mem, alpha_sr_alias_set);
8137 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8138 reg_offset -= 8;
8141 /* Restore the return address from the DSIB. */
8143 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8144 set_mem_alias_set (mem, alpha_sr_alias_set);
8145 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8148 if (frame_size || eh_ofs)
8150 sp_adj1 = stack_pointer_rtx;
8152 if (eh_ofs)
8154 sp_adj1 = gen_rtx_REG (DImode, 23);
8155 emit_move_insn (sp_adj1,
8156 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8159 /* If the stack size is large, begin computation into a temporary
8160 register so as not to interfere with a potential fp restore,
8161 which must be consecutive with an SP restore. */
8162 if (frame_size < 32768
8163 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8164 sp_adj2 = GEN_INT (frame_size);
8165 else if (TARGET_ABI_UNICOSMK)
8167 sp_adj1 = gen_rtx_REG (DImode, 23);
8168 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8169 sp_adj2 = const0_rtx;
8171 else if (frame_size < 0x40007fffL)
8173 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8175 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8176 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8177 sp_adj1 = sa_reg;
8178 else
8180 sp_adj1 = gen_rtx_REG (DImode, 23);
8181 FRP (emit_move_insn (sp_adj1, sp_adj2));
8183 sp_adj2 = GEN_INT (low);
8185 else
8187 rtx tmp = gen_rtx_REG (DImode, 23);
8188 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8189 3, false));
8190 if (!sp_adj2)
8192 /* We can't drop new things to memory this late, afaik,
8193 so build it up by pieces. */
8194 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8195 -(frame_size < 0)));
8196 gcc_assert (sp_adj2);
8200 /* From now on, things must be in order. So emit blockages. */
8202 /* Restore the frame pointer. */
8203 if (TARGET_ABI_UNICOSMK)
8205 emit_insn (gen_blockage ());
8206 mem = gen_rtx_MEM (DImode,
8207 plus_constant (hard_frame_pointer_rtx, -16));
8208 set_mem_alias_set (mem, alpha_sr_alias_set);
8209 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8211 else if (fp_is_frame_pointer)
8213 emit_insn (gen_blockage ());
8214 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8215 set_mem_alias_set (mem, alpha_sr_alias_set);
8216 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8218 else if (TARGET_ABI_OPEN_VMS)
8220 emit_insn (gen_blockage ());
8221 FRP (emit_move_insn (hard_frame_pointer_rtx,
8222 gen_rtx_REG (DImode, vms_save_fp_regno)));
8225 /* Restore the stack pointer. */
8226 emit_insn (gen_blockage ());
8227 if (sp_adj2 == const0_rtx)
8228 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8229 else
8230 FRP (emit_move_insn (stack_pointer_rtx,
8231 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8233 else
8235 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8237 emit_insn (gen_blockage ());
8238 FRP (emit_move_insn (hard_frame_pointer_rtx,
8239 gen_rtx_REG (DImode, vms_save_fp_regno)));
8241 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8243 /* Decrement the frame pointer if the function does not have a
8244 frame. */
8246 emit_insn (gen_blockage ());
8247 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8248 hard_frame_pointer_rtx, constm1_rtx)));
8253 /* Output the rest of the textual info surrounding the epilogue. */
8255 void
8256 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8258 #if TARGET_ABI_OPEN_VMS
8259 alpha_write_linkage (file, fnname, decl);
8260 #endif
8262 /* End the function. */
8263 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8265 fputs ("\t.end ", file);
8266 assemble_name (file, fnname);
8267 putc ('\n', file);
8269 inside_function = FALSE;
8271 /* Output jump tables and the static subroutine information block. */
8272 if (TARGET_ABI_UNICOSMK)
8274 unicosmk_output_ssib (file, fnname);
8275 unicosmk_output_deferred_case_vectors (file);
8279 #if TARGET_ABI_OSF
8280 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8282 In order to avoid the hordes of differences between generated code
8283 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8284 lots of code loading up large constants, generate rtl and emit it
8285 instead of going straight to text.
8287 Not sure why this idea hasn't been explored before... */
8289 static void
8290 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8291 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8292 tree function)
8294 HOST_WIDE_INT hi, lo;
8295 rtx this, insn, funexp;
8297 reset_block_changes ();
8299 /* We always require a valid GP. */
8300 emit_insn (gen_prologue_ldgp ());
8301 emit_note (NOTE_INSN_PROLOGUE_END);
8303 /* Find the "this" pointer. If the function returns a structure,
8304 the structure return pointer is in $16. */
8305 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8306 this = gen_rtx_REG (Pmode, 17);
8307 else
8308 this = gen_rtx_REG (Pmode, 16);
8310 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8311 entire constant for the add. */
8312 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8313 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8314 if (hi + lo == delta)
8316 if (hi)
8317 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8318 if (lo)
8319 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8321 else
8323 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8324 delta, -(delta < 0));
8325 emit_insn (gen_adddi3 (this, this, tmp));
8328 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8329 if (vcall_offset)
8331 rtx tmp, tmp2;
8333 tmp = gen_rtx_REG (Pmode, 0);
8334 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8336 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8337 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8338 if (hi + lo == vcall_offset)
8340 if (hi)
8341 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8343 else
8345 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8346 vcall_offset, -(vcall_offset < 0));
8347 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8348 lo = 0;
8350 if (lo)
8351 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8352 else
8353 tmp2 = tmp;
8354 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8356 emit_insn (gen_adddi3 (this, this, tmp));
8359 /* Generate a tail call to the target function. */
8360 if (! TREE_USED (function))
8362 assemble_external (function);
8363 TREE_USED (function) = 1;
8365 funexp = XEXP (DECL_RTL (function), 0);
8366 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8367 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8368 SIBLING_CALL_P (insn) = 1;
8370 /* Run just enough of rest_of_compilation to get the insns emitted.
8371 There's not really enough bulk here to make other passes such as
8372 instruction scheduling worth while. Note that use_thunk calls
8373 assemble_start_function and assemble_end_function. */
8374 insn = get_insns ();
8375 insn_locators_initialize ();
8376 shorten_branches (insn);
8377 final_start_function (insn, file, 1);
8378 final (insn, file, 1);
8379 final_end_function ();
8381 #endif /* TARGET_ABI_OSF */
8383 /* Debugging support. */
8385 #include "gstab.h"
8387 /* Count the number of sdb related labels are generated (to find block
8388 start and end boundaries). */
8390 int sdb_label_count = 0;
8392 /* Name of the file containing the current function. */
8394 static const char *current_function_file = "";
8396 /* Offsets to alpha virtual arg/local debugging pointers. */
8398 long alpha_arg_offset;
8399 long alpha_auto_offset;
8401 /* Emit a new filename to a stream. */
8403 void
8404 alpha_output_filename (FILE *stream, const char *name)
8406 static int first_time = TRUE;
8408 if (first_time)
8410 first_time = FALSE;
8411 ++num_source_filenames;
8412 current_function_file = name;
8413 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8414 output_quoted_string (stream, name);
8415 fprintf (stream, "\n");
8416 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8417 fprintf (stream, "\t#@stabs\n");
8420 else if (write_symbols == DBX_DEBUG)
8421 /* dbxout.c will emit an appropriate .stabs directive. */
8422 return;
8424 else if (name != current_function_file
8425 && strcmp (name, current_function_file) != 0)
8427 if (inside_function && ! TARGET_GAS)
8428 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8429 else
8431 ++num_source_filenames;
8432 current_function_file = name;
8433 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8436 output_quoted_string (stream, name);
8437 fprintf (stream, "\n");
8441 /* Structure to show the current status of registers and memory. */
8443 struct shadow_summary
8445 struct {
8446 unsigned int i : 31; /* Mask of int regs */
8447 unsigned int fp : 31; /* Mask of fp regs */
8448 unsigned int mem : 1; /* mem == imem | fpmem */
8449 } used, defd;
8452 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8453 to the summary structure. SET is nonzero if the insn is setting the
8454 object, otherwise zero. */
8456 static void
8457 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8459 const char *format_ptr;
8460 int i, j;
8462 if (x == 0)
8463 return;
8465 switch (GET_CODE (x))
8467 /* ??? Note that this case would be incorrect if the Alpha had a
8468 ZERO_EXTRACT in SET_DEST. */
8469 case SET:
8470 summarize_insn (SET_SRC (x), sum, 0);
8471 summarize_insn (SET_DEST (x), sum, 1);
8472 break;
8474 case CLOBBER:
8475 summarize_insn (XEXP (x, 0), sum, 1);
8476 break;
8478 case USE:
8479 summarize_insn (XEXP (x, 0), sum, 0);
8480 break;
8482 case ASM_OPERANDS:
8483 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8484 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8485 break;
8487 case PARALLEL:
8488 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8489 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8490 break;
8492 case SUBREG:
8493 summarize_insn (SUBREG_REG (x), sum, 0);
8494 break;
8496 case REG:
8498 int regno = REGNO (x);
8499 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8501 if (regno == 31 || regno == 63)
8502 break;
8504 if (set)
8506 if (regno < 32)
8507 sum->defd.i |= mask;
8508 else
8509 sum->defd.fp |= mask;
8511 else
8513 if (regno < 32)
8514 sum->used.i |= mask;
8515 else
8516 sum->used.fp |= mask;
8519 break;
8521 case MEM:
8522 if (set)
8523 sum->defd.mem = 1;
8524 else
8525 sum->used.mem = 1;
8527 /* Find the regs used in memory address computation: */
8528 summarize_insn (XEXP (x, 0), sum, 0);
8529 break;
8531 case CONST_INT: case CONST_DOUBLE:
8532 case SYMBOL_REF: case LABEL_REF: case CONST:
8533 case SCRATCH: case ASM_INPUT:
8534 break;
8536 /* Handle common unary and binary ops for efficiency. */
8537 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8538 case MOD: case UDIV: case UMOD: case AND: case IOR:
8539 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8540 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8541 case NE: case EQ: case GE: case GT: case LE:
8542 case LT: case GEU: case GTU: case LEU: case LTU:
8543 summarize_insn (XEXP (x, 0), sum, 0);
8544 summarize_insn (XEXP (x, 1), sum, 0);
8545 break;
8547 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8548 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8549 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8550 case SQRT: case FFS:
8551 summarize_insn (XEXP (x, 0), sum, 0);
8552 break;
8554 default:
8555 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8556 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8557 switch (format_ptr[i])
8559 case 'e':
8560 summarize_insn (XEXP (x, i), sum, 0);
8561 break;
8563 case 'E':
8564 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8565 summarize_insn (XVECEXP (x, i, j), sum, 0);
8566 break;
8568 case 'i':
8569 break;
8571 default:
8572 gcc_unreachable ();
8577 /* Ensure a sufficient number of `trapb' insns are in the code when
8578 the user requests code with a trap precision of functions or
8579 instructions.
8581 In naive mode, when the user requests a trap-precision of
8582 "instruction", a trapb is needed after every instruction that may
8583 generate a trap. This ensures that the code is resumption safe but
8584 it is also slow.
8586 When optimizations are turned on, we delay issuing a trapb as long
8587 as possible. In this context, a trap shadow is the sequence of
8588 instructions that starts with a (potentially) trap generating
8589 instruction and extends to the next trapb or call_pal instruction
8590 (but GCC never generates call_pal by itself). We can delay (and
8591 therefore sometimes omit) a trapb subject to the following
8592 conditions:
8594 (a) On entry to the trap shadow, if any Alpha register or memory
8595 location contains a value that is used as an operand value by some
8596 instruction in the trap shadow (live on entry), then no instruction
8597 in the trap shadow may modify the register or memory location.
8599 (b) Within the trap shadow, the computation of the base register
8600 for a memory load or store instruction may not involve using the
8601 result of an instruction that might generate an UNPREDICTABLE
8602 result.
8604 (c) Within the trap shadow, no register may be used more than once
8605 as a destination register. (This is to make life easier for the
8606 trap-handler.)
8608 (d) The trap shadow may not include any branch instructions. */
8610 static void
8611 alpha_handle_trap_shadows (void)
8613 struct shadow_summary shadow;
8614 int trap_pending, exception_nesting;
8615 rtx i, n;
8617 trap_pending = 0;
8618 exception_nesting = 0;
8619 shadow.used.i = 0;
8620 shadow.used.fp = 0;
8621 shadow.used.mem = 0;
8622 shadow.defd = shadow.used;
8624 for (i = get_insns (); i ; i = NEXT_INSN (i))
8626 if (GET_CODE (i) == NOTE)
8628 switch (NOTE_LINE_NUMBER (i))
8630 case NOTE_INSN_EH_REGION_BEG:
8631 exception_nesting++;
8632 if (trap_pending)
8633 goto close_shadow;
8634 break;
8636 case NOTE_INSN_EH_REGION_END:
8637 exception_nesting--;
8638 if (trap_pending)
8639 goto close_shadow;
8640 break;
8642 case NOTE_INSN_EPILOGUE_BEG:
8643 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8644 goto close_shadow;
8645 break;
8648 else if (trap_pending)
8650 if (alpha_tp == ALPHA_TP_FUNC)
8652 if (GET_CODE (i) == JUMP_INSN
8653 && GET_CODE (PATTERN (i)) == RETURN)
8654 goto close_shadow;
8656 else if (alpha_tp == ALPHA_TP_INSN)
8658 if (optimize > 0)
8660 struct shadow_summary sum;
8662 sum.used.i = 0;
8663 sum.used.fp = 0;
8664 sum.used.mem = 0;
8665 sum.defd = sum.used;
8667 switch (GET_CODE (i))
8669 case INSN:
8670 /* Annoyingly, get_attr_trap will die on these. */
8671 if (GET_CODE (PATTERN (i)) == USE
8672 || GET_CODE (PATTERN (i)) == CLOBBER)
8673 break;
8675 summarize_insn (PATTERN (i), &sum, 0);
8677 if ((sum.defd.i & shadow.defd.i)
8678 || (sum.defd.fp & shadow.defd.fp))
8680 /* (c) would be violated */
8681 goto close_shadow;
8684 /* Combine shadow with summary of current insn: */
8685 shadow.used.i |= sum.used.i;
8686 shadow.used.fp |= sum.used.fp;
8687 shadow.used.mem |= sum.used.mem;
8688 shadow.defd.i |= sum.defd.i;
8689 shadow.defd.fp |= sum.defd.fp;
8690 shadow.defd.mem |= sum.defd.mem;
8692 if ((sum.defd.i & shadow.used.i)
8693 || (sum.defd.fp & shadow.used.fp)
8694 || (sum.defd.mem & shadow.used.mem))
8696 /* (a) would be violated (also takes care of (b)) */
8697 gcc_assert (get_attr_trap (i) != TRAP_YES
8698 || (!(sum.defd.i & sum.used.i)
8699 && !(sum.defd.fp & sum.used.fp)));
8701 goto close_shadow;
8703 break;
8705 case JUMP_INSN:
8706 case CALL_INSN:
8707 case CODE_LABEL:
8708 goto close_shadow;
8710 default:
8711 gcc_unreachable ();
8714 else
8716 close_shadow:
8717 n = emit_insn_before (gen_trapb (), i);
8718 PUT_MODE (n, TImode);
8719 PUT_MODE (i, TImode);
8720 trap_pending = 0;
8721 shadow.used.i = 0;
8722 shadow.used.fp = 0;
8723 shadow.used.mem = 0;
8724 shadow.defd = shadow.used;
8729 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8730 && GET_CODE (i) == INSN
8731 && GET_CODE (PATTERN (i)) != USE
8732 && GET_CODE (PATTERN (i)) != CLOBBER
8733 && get_attr_trap (i) == TRAP_YES)
8735 if (optimize && !trap_pending)
8736 summarize_insn (PATTERN (i), &shadow, 0);
8737 trap_pending = 1;
8742 /* Alpha can only issue instruction groups simultaneously if they are
8743 suitably aligned. This is very processor-specific. */
8744 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8745 that are marked "fake". These instructions do not exist on that target,
8746 but it is possible to see these insns with deranged combinations of
8747 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8748 choose a result at random. */
8750 enum alphaev4_pipe {
8751 EV4_STOP = 0,
8752 EV4_IB0 = 1,
8753 EV4_IB1 = 2,
8754 EV4_IBX = 4
8757 enum alphaev5_pipe {
8758 EV5_STOP = 0,
8759 EV5_NONE = 1,
8760 EV5_E01 = 2,
8761 EV5_E0 = 4,
8762 EV5_E1 = 8,
8763 EV5_FAM = 16,
8764 EV5_FA = 32,
8765 EV5_FM = 64
8768 static enum alphaev4_pipe
8769 alphaev4_insn_pipe (rtx insn)
8771 if (recog_memoized (insn) < 0)
8772 return EV4_STOP;
8773 if (get_attr_length (insn) != 4)
8774 return EV4_STOP;
8776 switch (get_attr_type (insn))
8778 case TYPE_ILD:
8779 case TYPE_LDSYM:
8780 case TYPE_FLD:
8781 case TYPE_LD_L:
8782 return EV4_IBX;
8784 case TYPE_IADD:
8785 case TYPE_ILOG:
8786 case TYPE_ICMOV:
8787 case TYPE_ICMP:
8788 case TYPE_FST:
8789 case TYPE_SHIFT:
8790 case TYPE_IMUL:
8791 case TYPE_FBR:
8792 case TYPE_MVI: /* fake */
8793 return EV4_IB0;
8795 case TYPE_IST:
8796 case TYPE_MISC:
8797 case TYPE_IBR:
8798 case TYPE_JSR:
8799 case TYPE_CALLPAL:
8800 case TYPE_FCPYS:
8801 case TYPE_FCMOV:
8802 case TYPE_FADD:
8803 case TYPE_FDIV:
8804 case TYPE_FMUL:
8805 case TYPE_ST_C:
8806 case TYPE_MB:
8807 case TYPE_FSQRT: /* fake */
8808 case TYPE_FTOI: /* fake */
8809 case TYPE_ITOF: /* fake */
8810 return EV4_IB1;
8812 default:
8813 gcc_unreachable ();
8817 static enum alphaev5_pipe
8818 alphaev5_insn_pipe (rtx insn)
8820 if (recog_memoized (insn) < 0)
8821 return EV5_STOP;
8822 if (get_attr_length (insn) != 4)
8823 return EV5_STOP;
8825 switch (get_attr_type (insn))
8827 case TYPE_ILD:
8828 case TYPE_FLD:
8829 case TYPE_LDSYM:
8830 case TYPE_IADD:
8831 case TYPE_ILOG:
8832 case TYPE_ICMOV:
8833 case TYPE_ICMP:
8834 return EV5_E01;
8836 case TYPE_IST:
8837 case TYPE_FST:
8838 case TYPE_SHIFT:
8839 case TYPE_IMUL:
8840 case TYPE_MISC:
8841 case TYPE_MVI:
8842 case TYPE_LD_L:
8843 case TYPE_ST_C:
8844 case TYPE_MB:
8845 case TYPE_FTOI: /* fake */
8846 case TYPE_ITOF: /* fake */
8847 return EV5_E0;
8849 case TYPE_IBR:
8850 case TYPE_JSR:
8851 case TYPE_CALLPAL:
8852 return EV5_E1;
8854 case TYPE_FCPYS:
8855 return EV5_FAM;
8857 case TYPE_FBR:
8858 case TYPE_FCMOV:
8859 case TYPE_FADD:
8860 case TYPE_FDIV:
8861 case TYPE_FSQRT: /* fake */
8862 return EV5_FA;
8864 case TYPE_FMUL:
8865 return EV5_FM;
8867 default:
8868 gcc_unreachable ();
8872 /* IN_USE is a mask of the slots currently filled within the insn group.
8873 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8874 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8876 LEN is, of course, the length of the group in bytes. */
8878 static rtx
8879 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8881 int len, in_use;
8883 len = in_use = 0;
8885 if (! INSN_P (insn)
8886 || GET_CODE (PATTERN (insn)) == CLOBBER
8887 || GET_CODE (PATTERN (insn)) == USE)
8888 goto next_and_done;
8890 while (1)
8892 enum alphaev4_pipe pipe;
8894 pipe = alphaev4_insn_pipe (insn);
8895 switch (pipe)
8897 case EV4_STOP:
8898 /* Force complex instructions to start new groups. */
8899 if (in_use)
8900 goto done;
8902 /* If this is a completely unrecognized insn, it's an asm.
8903 We don't know how long it is, so record length as -1 to
8904 signal a needed realignment. */
8905 if (recog_memoized (insn) < 0)
8906 len = -1;
8907 else
8908 len = get_attr_length (insn);
8909 goto next_and_done;
8911 case EV4_IBX:
8912 if (in_use & EV4_IB0)
8914 if (in_use & EV4_IB1)
8915 goto done;
8916 in_use |= EV4_IB1;
8918 else
8919 in_use |= EV4_IB0 | EV4_IBX;
8920 break;
8922 case EV4_IB0:
8923 if (in_use & EV4_IB0)
8925 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8926 goto done;
8927 in_use |= EV4_IB1;
8929 in_use |= EV4_IB0;
8930 break;
8932 case EV4_IB1:
8933 if (in_use & EV4_IB1)
8934 goto done;
8935 in_use |= EV4_IB1;
8936 break;
8938 default:
8939 gcc_unreachable ();
8941 len += 4;
8943 /* Haifa doesn't do well scheduling branches. */
8944 if (GET_CODE (insn) == JUMP_INSN)
8945 goto next_and_done;
8947 next:
8948 insn = next_nonnote_insn (insn);
8950 if (!insn || ! INSN_P (insn))
8951 goto done;
8953 /* Let Haifa tell us where it thinks insn group boundaries are. */
8954 if (GET_MODE (insn) == TImode)
8955 goto done;
8957 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8958 goto next;
8961 next_and_done:
8962 insn = next_nonnote_insn (insn);
8964 done:
8965 *plen = len;
8966 *pin_use = in_use;
8967 return insn;
8970 /* IN_USE is a mask of the slots currently filled within the insn group.
8971 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8972 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8974 LEN is, of course, the length of the group in bytes. */
8976 static rtx
8977 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8979 int len, in_use;
8981 len = in_use = 0;
8983 if (! INSN_P (insn)
8984 || GET_CODE (PATTERN (insn)) == CLOBBER
8985 || GET_CODE (PATTERN (insn)) == USE)
8986 goto next_and_done;
8988 while (1)
8990 enum alphaev5_pipe pipe;
8992 pipe = alphaev5_insn_pipe (insn);
8993 switch (pipe)
8995 case EV5_STOP:
8996 /* Force complex instructions to start new groups. */
8997 if (in_use)
8998 goto done;
9000 /* If this is a completely unrecognized insn, it's an asm.
9001 We don't know how long it is, so record length as -1 to
9002 signal a needed realignment. */
9003 if (recog_memoized (insn) < 0)
9004 len = -1;
9005 else
9006 len = get_attr_length (insn);
9007 goto next_and_done;
9009 /* ??? Most of the places below, we would like to assert never
9010 happen, as it would indicate an error either in Haifa, or
9011 in the scheduling description. Unfortunately, Haifa never
9012 schedules the last instruction of the BB, so we don't have
9013 an accurate TI bit to go off. */
9014 case EV5_E01:
9015 if (in_use & EV5_E0)
9017 if (in_use & EV5_E1)
9018 goto done;
9019 in_use |= EV5_E1;
9021 else
9022 in_use |= EV5_E0 | EV5_E01;
9023 break;
9025 case EV5_E0:
9026 if (in_use & EV5_E0)
9028 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9029 goto done;
9030 in_use |= EV5_E1;
9032 in_use |= EV5_E0;
9033 break;
9035 case EV5_E1:
9036 if (in_use & EV5_E1)
9037 goto done;
9038 in_use |= EV5_E1;
9039 break;
9041 case EV5_FAM:
9042 if (in_use & EV5_FA)
9044 if (in_use & EV5_FM)
9045 goto done;
9046 in_use |= EV5_FM;
9048 else
9049 in_use |= EV5_FA | EV5_FAM;
9050 break;
9052 case EV5_FA:
9053 if (in_use & EV5_FA)
9054 goto done;
9055 in_use |= EV5_FA;
9056 break;
9058 case EV5_FM:
9059 if (in_use & EV5_FM)
9060 goto done;
9061 in_use |= EV5_FM;
9062 break;
9064 case EV5_NONE:
9065 break;
9067 default:
9068 gcc_unreachable ();
9070 len += 4;
9072 /* Haifa doesn't do well scheduling branches. */
9073 /* ??? If this is predicted not-taken, slotting continues, except
9074 that no more IBR, FBR, or JSR insns may be slotted. */
9075 if (GET_CODE (insn) == JUMP_INSN)
9076 goto next_and_done;
9078 next:
9079 insn = next_nonnote_insn (insn);
9081 if (!insn || ! INSN_P (insn))
9082 goto done;
9084 /* Let Haifa tell us where it thinks insn group boundaries are. */
9085 if (GET_MODE (insn) == TImode)
9086 goto done;
9088 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9089 goto next;
9092 next_and_done:
9093 insn = next_nonnote_insn (insn);
9095 done:
9096 *plen = len;
9097 *pin_use = in_use;
9098 return insn;
9101 static rtx
9102 alphaev4_next_nop (int *pin_use)
9104 int in_use = *pin_use;
9105 rtx nop;
9107 if (!(in_use & EV4_IB0))
9109 in_use |= EV4_IB0;
9110 nop = gen_nop ();
9112 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9114 in_use |= EV4_IB1;
9115 nop = gen_nop ();
9117 else if (TARGET_FP && !(in_use & EV4_IB1))
9119 in_use |= EV4_IB1;
9120 nop = gen_fnop ();
9122 else
9123 nop = gen_unop ();
9125 *pin_use = in_use;
9126 return nop;
9129 static rtx
9130 alphaev5_next_nop (int *pin_use)
9132 int in_use = *pin_use;
9133 rtx nop;
9135 if (!(in_use & EV5_E1))
9137 in_use |= EV5_E1;
9138 nop = gen_nop ();
9140 else if (TARGET_FP && !(in_use & EV5_FA))
9142 in_use |= EV5_FA;
9143 nop = gen_fnop ();
9145 else if (TARGET_FP && !(in_use & EV5_FM))
9147 in_use |= EV5_FM;
9148 nop = gen_fnop ();
9150 else
9151 nop = gen_unop ();
9153 *pin_use = in_use;
9154 return nop;
9157 /* The instruction group alignment main loop. */
9159 static void
9160 alpha_align_insns (unsigned int max_align,
9161 rtx (*next_group) (rtx, int *, int *),
9162 rtx (*next_nop) (int *))
9164 /* ALIGN is the known alignment for the insn group. */
9165 unsigned int align;
9166 /* OFS is the offset of the current insn in the insn group. */
9167 int ofs;
9168 int prev_in_use, in_use, len, ldgp;
9169 rtx i, next;
9171 /* Let shorten branches care for assigning alignments to code labels. */
9172 shorten_branches (get_insns ());
9174 if (align_functions < 4)
9175 align = 4;
9176 else if ((unsigned int) align_functions < max_align)
9177 align = align_functions;
9178 else
9179 align = max_align;
9181 ofs = prev_in_use = 0;
9182 i = get_insns ();
9183 if (GET_CODE (i) == NOTE)
9184 i = next_nonnote_insn (i);
9186 ldgp = alpha_function_needs_gp ? 8 : 0;
9188 while (i)
9190 next = (*next_group) (i, &in_use, &len);
9192 /* When we see a label, resync alignment etc. */
9193 if (GET_CODE (i) == CODE_LABEL)
9195 unsigned int new_align = 1 << label_to_alignment (i);
9197 if (new_align >= align)
9199 align = new_align < max_align ? new_align : max_align;
9200 ofs = 0;
9203 else if (ofs & (new_align-1))
9204 ofs = (ofs | (new_align-1)) + 1;
9205 gcc_assert (!len);
9208 /* Handle complex instructions special. */
9209 else if (in_use == 0)
9211 /* Asms will have length < 0. This is a signal that we have
9212 lost alignment knowledge. Assume, however, that the asm
9213 will not mis-align instructions. */
9214 if (len < 0)
9216 ofs = 0;
9217 align = 4;
9218 len = 0;
9222 /* If the known alignment is smaller than the recognized insn group,
9223 realign the output. */
9224 else if ((int) align < len)
9226 unsigned int new_log_align = len > 8 ? 4 : 3;
9227 rtx prev, where;
9229 where = prev = prev_nonnote_insn (i);
9230 if (!where || GET_CODE (where) != CODE_LABEL)
9231 where = i;
9233 /* Can't realign between a call and its gp reload. */
9234 if (! (TARGET_EXPLICIT_RELOCS
9235 && prev && GET_CODE (prev) == CALL_INSN))
9237 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9238 align = 1 << new_log_align;
9239 ofs = 0;
9243 /* We may not insert padding inside the initial ldgp sequence. */
9244 else if (ldgp > 0)
9245 ldgp -= len;
9247 /* If the group won't fit in the same INT16 as the previous,
9248 we need to add padding to keep the group together. Rather
9249 than simply leaving the insn filling to the assembler, we
9250 can make use of the knowledge of what sorts of instructions
9251 were issued in the previous group to make sure that all of
9252 the added nops are really free. */
9253 else if (ofs + len > (int) align)
9255 int nop_count = (align - ofs) / 4;
9256 rtx where;
9258 /* Insert nops before labels, branches, and calls to truly merge
9259 the execution of the nops with the previous instruction group. */
9260 where = prev_nonnote_insn (i);
9261 if (where)
9263 if (GET_CODE (where) == CODE_LABEL)
9265 rtx where2 = prev_nonnote_insn (where);
9266 if (where2 && GET_CODE (where2) == JUMP_INSN)
9267 where = where2;
9269 else if (GET_CODE (where) == INSN)
9270 where = i;
9272 else
9273 where = i;
9276 emit_insn_before ((*next_nop)(&prev_in_use), where);
9277 while (--nop_count);
9278 ofs = 0;
9281 ofs = (ofs + len) & (align - 1);
9282 prev_in_use = in_use;
9283 i = next;
9287 /* Machine dependent reorg pass. */
9289 static void
9290 alpha_reorg (void)
9292 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9293 alpha_handle_trap_shadows ();
9295 /* Due to the number of extra trapb insns, don't bother fixing up
9296 alignment when trap precision is instruction. Moreover, we can
9297 only do our job when sched2 is run. */
9298 if (optimize && !optimize_size
9299 && alpha_tp != ALPHA_TP_INSN
9300 && flag_schedule_insns_after_reload)
9302 if (alpha_tune == PROCESSOR_EV4)
9303 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9304 else if (alpha_tune == PROCESSOR_EV5)
9305 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9309 #if !TARGET_ABI_UNICOSMK
9311 #ifdef HAVE_STAMP_H
9312 #include <stamp.h>
9313 #endif
9315 static void
9316 alpha_file_start (void)
9318 #ifdef OBJECT_FORMAT_ELF
9319 /* If emitting dwarf2 debug information, we cannot generate a .file
9320 directive to start the file, as it will conflict with dwarf2out
9321 file numbers. So it's only useful when emitting mdebug output. */
9322 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9323 #endif
9325 default_file_start ();
9326 #ifdef MS_STAMP
9327 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9328 #endif
9330 fputs ("\t.set noreorder\n", asm_out_file);
9331 fputs ("\t.set volatile\n", asm_out_file);
9332 if (!TARGET_ABI_OPEN_VMS)
9333 fputs ("\t.set noat\n", asm_out_file);
9334 if (TARGET_EXPLICIT_RELOCS)
9335 fputs ("\t.set nomacro\n", asm_out_file);
9336 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9338 const char *arch;
9340 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9341 arch = "ev6";
9342 else if (TARGET_MAX)
9343 arch = "pca56";
9344 else if (TARGET_BWX)
9345 arch = "ev56";
9346 else if (alpha_cpu == PROCESSOR_EV5)
9347 arch = "ev5";
9348 else
9349 arch = "ev4";
9351 fprintf (asm_out_file, "\t.arch %s\n", arch);
9354 #endif
9356 #ifdef OBJECT_FORMAT_ELF
9358 /* Return a section for X. The only special thing we do here is to
9359 honor small data. */
9361 static section *
9362 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9363 unsigned HOST_WIDE_INT align)
9365 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9366 /* ??? Consider using mergeable sdata sections. */
9367 return sdata_section;
9368 else
9369 return default_elf_select_rtx_section (mode, x, align);
9372 #endif /* OBJECT_FORMAT_ELF */
9374 /* Structure to collect function names for final output in link section. */
9375 /* Note that items marked with GTY can't be ifdef'ed out. */
9377 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9378 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9380 struct alpha_links GTY(())
9382 int num;
9383 rtx linkage;
9384 enum links_kind lkind;
9385 enum reloc_kind rkind;
9388 struct alpha_funcs GTY(())
9390 int num;
9391 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9392 links;
9395 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9396 splay_tree alpha_links_tree;
9397 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9398 splay_tree alpha_funcs_tree;
9400 static GTY(()) int alpha_funcs_num;
9402 #if TARGET_ABI_OPEN_VMS
9404 /* Return the VMS argument type corresponding to MODE. */
9406 enum avms_arg_type
9407 alpha_arg_type (enum machine_mode mode)
9409 switch (mode)
9411 case SFmode:
9412 return TARGET_FLOAT_VAX ? FF : FS;
9413 case DFmode:
9414 return TARGET_FLOAT_VAX ? FD : FT;
9415 default:
9416 return I64;
9420 /* Return an rtx for an integer representing the VMS Argument Information
9421 register value. */
9424 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9426 unsigned HOST_WIDE_INT regval = cum.num_args;
9427 int i;
9429 for (i = 0; i < 6; i++)
9430 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9432 return GEN_INT (regval);
9435 /* Make (or fake) .linkage entry for function call.
9437 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9439 Return an SYMBOL_REF rtx for the linkage. */
9442 alpha_need_linkage (const char *name, int is_local)
9444 splay_tree_node node;
9445 struct alpha_links *al;
9447 if (name[0] == '*')
9448 name++;
9450 if (is_local)
9452 struct alpha_funcs *cfaf;
9454 if (!alpha_funcs_tree)
9455 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9456 splay_tree_compare_pointers);
9458 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9460 cfaf->links = 0;
9461 cfaf->num = ++alpha_funcs_num;
9463 splay_tree_insert (alpha_funcs_tree,
9464 (splay_tree_key) current_function_decl,
9465 (splay_tree_value) cfaf);
9468 if (alpha_links_tree)
9470 /* Is this name already defined? */
9472 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9473 if (node)
9475 al = (struct alpha_links *) node->value;
9476 if (is_local)
9478 /* Defined here but external assumed. */
9479 if (al->lkind == KIND_EXTERN)
9480 al->lkind = KIND_LOCAL;
9482 else
9484 /* Used here but unused assumed. */
9485 if (al->lkind == KIND_UNUSED)
9486 al->lkind = KIND_LOCAL;
9488 return al->linkage;
9491 else
9492 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9494 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9495 name = ggc_strdup (name);
9497 /* Assume external if no definition. */
9498 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9500 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9501 get_identifier (name);
9503 /* Construct a SYMBOL_REF for us to call. */
9505 size_t name_len = strlen (name);
9506 char *linksym = alloca (name_len + 6);
9507 linksym[0] = '$';
9508 memcpy (linksym + 1, name, name_len);
9509 memcpy (linksym + 1 + name_len, "..lk", 5);
9510 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9511 ggc_alloc_string (linksym, name_len + 5));
9514 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9515 (splay_tree_value) al);
9517 return al->linkage;
9521 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9523 splay_tree_node cfunnode;
9524 struct alpha_funcs *cfaf;
9525 struct alpha_links *al;
9526 const char *name = XSTR (linkage, 0);
9528 cfaf = (struct alpha_funcs *) 0;
9529 al = (struct alpha_links *) 0;
9531 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9532 cfaf = (struct alpha_funcs *) cfunnode->value;
9534 if (cfaf->links)
9536 splay_tree_node lnode;
9538 /* Is this name already defined? */
9540 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9541 if (lnode)
9542 al = (struct alpha_links *) lnode->value;
9544 else
9545 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9547 if (!al)
9549 size_t name_len;
9550 size_t buflen;
9551 char buf [512];
9552 char *linksym;
9553 splay_tree_node node = 0;
9554 struct alpha_links *anl;
9556 if (name[0] == '*')
9557 name++;
9559 name_len = strlen (name);
9561 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9562 al->num = cfaf->num;
9564 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9565 if (node)
9567 anl = (struct alpha_links *) node->value;
9568 al->lkind = anl->lkind;
9571 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9572 buflen = strlen (buf);
9573 linksym = alloca (buflen + 1);
9574 memcpy (linksym, buf, buflen + 1);
9576 al->linkage = gen_rtx_SYMBOL_REF
9577 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9579 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9580 (splay_tree_value) al);
9583 if (rflag)
9584 al->rkind = KIND_CODEADDR;
9585 else
9586 al->rkind = KIND_LINKAGE;
9588 if (lflag)
9589 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9590 else
9591 return al->linkage;
9594 static int
9595 alpha_write_one_linkage (splay_tree_node node, void *data)
9597 const char *const name = (const char *) node->key;
9598 struct alpha_links *link = (struct alpha_links *) node->value;
9599 FILE *stream = (FILE *) data;
9601 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9602 if (link->rkind == KIND_CODEADDR)
9604 if (link->lkind == KIND_LOCAL)
9606 /* Local and used */
9607 fprintf (stream, "\t.quad %s..en\n", name);
9609 else
9611 /* External and used, request code address. */
9612 fprintf (stream, "\t.code_address %s\n", name);
9615 else
9617 if (link->lkind == KIND_LOCAL)
9619 /* Local and used, build linkage pair. */
9620 fprintf (stream, "\t.quad %s..en\n", name);
9621 fprintf (stream, "\t.quad %s\n", name);
9623 else
9625 /* External and used, request linkage pair. */
9626 fprintf (stream, "\t.linkage %s\n", name);
9630 return 0;
9633 static void
9634 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9636 splay_tree_node node;
9637 struct alpha_funcs *func;
9639 fprintf (stream, "\t.link\n");
9640 fprintf (stream, "\t.align 3\n");
9641 in_section = NULL;
9643 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9644 func = (struct alpha_funcs *) node->value;
9646 fputs ("\t.name ", stream);
9647 assemble_name (stream, funname);
9648 fputs ("..na\n", stream);
9649 ASM_OUTPUT_LABEL (stream, funname);
9650 fprintf (stream, "\t.pdesc ");
9651 assemble_name (stream, funname);
9652 fprintf (stream, "..en,%s\n",
9653 alpha_procedure_type == PT_STACK ? "stack"
9654 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9656 if (func->links)
9658 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9659 /* splay_tree_delete (func->links); */
9663 /* Given a decl, a section name, and whether the decl initializer
9664 has relocs, choose attributes for the section. */
9666 #define SECTION_VMS_OVERLAY SECTION_FORGET
9667 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9668 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9670 static unsigned int
9671 vms_section_type_flags (tree decl, const char *name, int reloc)
9673 unsigned int flags = default_section_type_flags (decl, name, reloc);
9675 if (decl && DECL_ATTRIBUTES (decl)
9676 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9677 flags |= SECTION_VMS_OVERLAY;
9678 if (decl && DECL_ATTRIBUTES (decl)
9679 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9680 flags |= SECTION_VMS_GLOBAL;
9681 if (decl && DECL_ATTRIBUTES (decl)
9682 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9683 flags |= SECTION_VMS_INITIALIZE;
9685 return flags;
9688 /* Switch to an arbitrary section NAME with attributes as specified
9689 by FLAGS. ALIGN specifies any known alignment requirements for
9690 the section; 0 if the default should be used. */
9692 static void
9693 vms_asm_named_section (const char *name, unsigned int flags,
9694 tree decl ATTRIBUTE_UNUSED)
9696 fputc ('\n', asm_out_file);
9697 fprintf (asm_out_file, ".section\t%s", name);
9699 if (flags & SECTION_VMS_OVERLAY)
9700 fprintf (asm_out_file, ",OVR");
9701 if (flags & SECTION_VMS_GLOBAL)
9702 fprintf (asm_out_file, ",GBL");
9703 if (flags & SECTION_VMS_INITIALIZE)
9704 fprintf (asm_out_file, ",NOMOD");
9705 if (flags & SECTION_DEBUG)
9706 fprintf (asm_out_file, ",NOWRT");
9708 fputc ('\n', asm_out_file);
9711 /* Record an element in the table of global constructors. SYMBOL is
9712 a SYMBOL_REF of the function to be called; PRIORITY is a number
9713 between 0 and MAX_INIT_PRIORITY.
9715 Differs from default_ctors_section_asm_out_constructor in that the
9716 width of the .ctors entry is always 64 bits, rather than the 32 bits
9717 used by a normal pointer. */
9719 static void
9720 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9722 switch_to_section (ctors_section);
9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9727 static void
9728 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9730 switch_to_section (dtors_section);
9731 assemble_align (BITS_PER_WORD);
9732 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9734 #else
9737 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9738 int is_local ATTRIBUTE_UNUSED)
9740 return NULL_RTX;
9744 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9745 tree cfundecl ATTRIBUTE_UNUSED,
9746 int lflag ATTRIBUTE_UNUSED,
9747 int rflag ATTRIBUTE_UNUSED)
9749 return NULL_RTX;
9752 #endif /* TARGET_ABI_OPEN_VMS */
9754 #if TARGET_ABI_UNICOSMK
9756 /* This evaluates to true if we do not know how to pass TYPE solely in
9757 registers. This is the case for all arguments that do not fit in two
9758 registers. */
9760 static bool
9761 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9763 if (type == NULL)
9764 return false;
9766 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9767 return true;
9768 if (TREE_ADDRESSABLE (type))
9769 return true;
9771 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9774 /* Define the offset between two registers, one to be eliminated, and the
9775 other its replacement, at the start of a routine. */
9778 unicosmk_initial_elimination_offset (int from, int to)
9780 int fixed_size;
9782 fixed_size = alpha_sa_size();
9783 if (fixed_size != 0)
9784 fixed_size += 48;
9786 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9787 return -fixed_size;
9788 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9789 return 0;
9790 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9791 return (ALPHA_ROUND (current_function_outgoing_args_size)
9792 + ALPHA_ROUND (get_frame_size()));
9793 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9794 return (ALPHA_ROUND (fixed_size)
9795 + ALPHA_ROUND (get_frame_size()
9796 + current_function_outgoing_args_size));
9797 else
9798 gcc_unreachable ();
9801 /* Output the module name for .ident and .end directives. We have to strip
9802 directories and add make sure that the module name starts with a letter
9803 or '$'. */
9805 static void
9806 unicosmk_output_module_name (FILE *file)
9808 const char *name = lbasename (main_input_filename);
9809 unsigned len = strlen (name);
9810 char *clean_name = alloca (len + 2);
9811 char *ptr = clean_name;
9813 /* CAM only accepts module names that start with a letter or '$'. We
9814 prefix the module name with a '$' if necessary. */
9816 if (!ISALPHA (*name))
9817 *ptr++ = '$';
9818 memcpy (ptr, name, len + 1);
9819 clean_symbol_name (clean_name);
9820 fputs (clean_name, file);
9823 /* Output the definition of a common variable. */
9825 void
9826 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9828 tree name_tree;
9829 printf ("T3E__: common %s\n", name);
9831 in_section = NULL;
9832 fputs("\t.endp\n\n\t.psect ", file);
9833 assemble_name(file, name);
9834 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9835 fprintf(file, "\t.byte\t0:%d\n", size);
9837 /* Mark the symbol as defined in this module. */
9838 name_tree = get_identifier (name);
9839 TREE_ASM_WRITTEN (name_tree) = 1;
9842 #define SECTION_PUBLIC SECTION_MACH_DEP
9843 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9844 static int current_section_align;
9846 /* A get_unnamed_section callback for switching to the text section. */
9848 static void
9849 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9851 static int count = 0;
9852 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9855 /* A get_unnamed_section callback for switching to the data section. */
9857 static void
9858 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9860 static int count = 1;
9861 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9864 /* Implement TARGET_ASM_INIT_SECTIONS.
9866 The Cray assembler is really weird with respect to sections. It has only
9867 named sections and you can't reopen a section once it has been closed.
9868 This means that we have to generate unique names whenever we want to
9869 reenter the text or the data section. */
9871 static void
9872 unicosmk_init_sections (void)
9874 text_section = get_unnamed_section (SECTION_CODE,
9875 unicosmk_output_text_section_asm_op,
9876 NULL);
9877 data_section = get_unnamed_section (SECTION_WRITE,
9878 unicosmk_output_data_section_asm_op,
9879 NULL);
9880 readonly_data_section = data_section;
9883 static unsigned int
9884 unicosmk_section_type_flags (tree decl, const char *name,
9885 int reloc ATTRIBUTE_UNUSED)
9887 unsigned int flags = default_section_type_flags (decl, name, reloc);
9889 if (!decl)
9890 return flags;
9892 if (TREE_CODE (decl) == FUNCTION_DECL)
9894 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9895 if (align_functions_log > current_section_align)
9896 current_section_align = align_functions_log;
9898 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9899 flags |= SECTION_MAIN;
9901 else
9902 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9904 if (TREE_PUBLIC (decl))
9905 flags |= SECTION_PUBLIC;
9907 return flags;
9910 /* Generate a section name for decl and associate it with the
9911 declaration. */
9913 static void
9914 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9916 const char *name;
9917 int len;
9919 gcc_assert (decl);
9921 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9922 name = default_strip_name_encoding (name);
9923 len = strlen (name);
9925 if (TREE_CODE (decl) == FUNCTION_DECL)
9927 char *string;
9929 /* It is essential that we prefix the section name here because
9930 otherwise the section names generated for constructors and
9931 destructors confuse collect2. */
9933 string = alloca (len + 6);
9934 sprintf (string, "code@%s", name);
9935 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9937 else if (TREE_PUBLIC (decl))
9938 DECL_SECTION_NAME (decl) = build_string (len, name);
9939 else
9941 char *string;
9943 string = alloca (len + 6);
9944 sprintf (string, "data@%s", name);
9945 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9949 /* Switch to an arbitrary section NAME with attributes as specified
9950 by FLAGS. ALIGN specifies any known alignment requirements for
9951 the section; 0 if the default should be used. */
9953 static void
9954 unicosmk_asm_named_section (const char *name, unsigned int flags,
9955 tree decl ATTRIBUTE_UNUSED)
9957 const char *kind;
9959 /* Close the previous section. */
9961 fputs ("\t.endp\n\n", asm_out_file);
9963 /* Find out what kind of section we are opening. */
9965 if (flags & SECTION_MAIN)
9966 fputs ("\t.start\tmain\n", asm_out_file);
9968 if (flags & SECTION_CODE)
9969 kind = "code";
9970 else if (flags & SECTION_PUBLIC)
9971 kind = "common";
9972 else
9973 kind = "data";
9975 if (current_section_align != 0)
9976 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9977 current_section_align, kind);
9978 else
9979 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9982 static void
9983 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9985 if (DECL_P (decl)
9986 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9987 unicosmk_unique_section (decl, 0);
9990 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9991 in code sections because .align fill unused space with zeroes. */
9993 void
9994 unicosmk_output_align (FILE *file, int align)
9996 if (inside_function)
9997 fprintf (file, "\tgcc@code@align\t%d\n", align);
9998 else
9999 fprintf (file, "\t.align\t%d\n", align);
10002 /* Add a case vector to the current function's list of deferred case
10003 vectors. Case vectors have to be put into a separate section because CAM
10004 does not allow data definitions in code sections. */
10006 void
10007 unicosmk_defer_case_vector (rtx lab, rtx vec)
10009 struct machine_function *machine = cfun->machine;
10011 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10012 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10013 machine->addr_list);
10016 /* Output a case vector. */
10018 static void
10019 unicosmk_output_addr_vec (FILE *file, rtx vec)
10021 rtx lab = XEXP (vec, 0);
10022 rtx body = XEXP (vec, 1);
10023 int vlen = XVECLEN (body, 0);
10024 int idx;
10026 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10028 for (idx = 0; idx < vlen; idx++)
10030 ASM_OUTPUT_ADDR_VEC_ELT
10031 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10035 /* Output current function's deferred case vectors. */
10037 static void
10038 unicosmk_output_deferred_case_vectors (FILE *file)
10040 struct machine_function *machine = cfun->machine;
10041 rtx t;
10043 if (machine->addr_list == NULL_RTX)
10044 return;
10046 switch_to_section (data_section);
10047 for (t = machine->addr_list; t; t = XEXP (t, 1))
10048 unicosmk_output_addr_vec (file, XEXP (t, 0));
10051 /* Generate the name of the SSIB section for the current function. */
10053 #define SSIB_PREFIX "__SSIB_"
10054 #define SSIB_PREFIX_LEN 7
10056 static const char *
10057 unicosmk_ssib_name (void)
10059 /* This is ok since CAM won't be able to deal with names longer than that
10060 anyway. */
10062 static char name[256];
10064 rtx x;
10065 const char *fnname;
10066 int len;
10068 x = DECL_RTL (cfun->decl);
10069 gcc_assert (GET_CODE (x) == MEM);
10070 x = XEXP (x, 0);
10071 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10072 fnname = XSTR (x, 0);
10074 len = strlen (fnname);
10075 if (len + SSIB_PREFIX_LEN > 255)
10076 len = 255 - SSIB_PREFIX_LEN;
10078 strcpy (name, SSIB_PREFIX);
10079 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10080 name[len + SSIB_PREFIX_LEN] = 0;
10082 return name;
10085 /* Set up the dynamic subprogram information block (DSIB) and update the
10086 frame pointer register ($15) for subroutines which have a frame. If the
10087 subroutine doesn't have a frame, simply increment $15. */
10089 static void
10090 unicosmk_gen_dsib (unsigned long *imaskP)
10092 if (alpha_procedure_type == PT_STACK)
10094 const char *ssib_name;
10095 rtx mem;
10097 /* Allocate 64 bytes for the DSIB. */
10099 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10100 GEN_INT (-64))));
10101 emit_insn (gen_blockage ());
10103 /* Save the return address. */
10105 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10106 set_mem_alias_set (mem, alpha_sr_alias_set);
10107 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10108 (*imaskP) &= ~(1UL << REG_RA);
10110 /* Save the old frame pointer. */
10112 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10113 set_mem_alias_set (mem, alpha_sr_alias_set);
10114 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10115 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10117 emit_insn (gen_blockage ());
10119 /* Store the SSIB pointer. */
10121 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10122 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10123 set_mem_alias_set (mem, alpha_sr_alias_set);
10125 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10126 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10127 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10129 /* Save the CIW index. */
10131 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10132 set_mem_alias_set (mem, alpha_sr_alias_set);
10133 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10135 emit_insn (gen_blockage ());
10137 /* Set the new frame pointer. */
10139 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10140 stack_pointer_rtx, GEN_INT (64))));
10143 else
10145 /* Increment the frame pointer register to indicate that we do not
10146 have a frame. */
10148 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10149 hard_frame_pointer_rtx, const1_rtx)));
10153 /* Output the static subroutine information block for the current
10154 function. */
10156 static void
10157 unicosmk_output_ssib (FILE *file, const char *fnname)
10159 int len;
10160 int i;
10161 rtx x;
10162 rtx ciw;
10163 struct machine_function *machine = cfun->machine;
10165 in_section = NULL;
10166 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10167 unicosmk_ssib_name ());
10169 /* Some required stuff and the function name length. */
10171 len = strlen (fnname);
10172 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10174 /* Saved registers
10175 ??? We don't do that yet. */
10177 fputs ("\t.quad\t0\n", file);
10179 /* Function address. */
10181 fputs ("\t.quad\t", file);
10182 assemble_name (file, fnname);
10183 putc ('\n', file);
10185 fputs ("\t.quad\t0\n", file);
10186 fputs ("\t.quad\t0\n", file);
10188 /* Function name.
10189 ??? We do it the same way Cray CC does it but this could be
10190 simplified. */
10192 for( i = 0; i < len; i++ )
10193 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10194 if( (len % 8) == 0 )
10195 fputs ("\t.quad\t0\n", file);
10196 else
10197 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10199 /* All call information words used in the function. */
10201 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10203 ciw = XEXP (x, 0);
10204 #if HOST_BITS_PER_WIDE_INT == 32
10205 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10206 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10207 #else
10208 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10209 #endif
10213 /* Add a call information word (CIW) to the list of the current function's
10214 CIWs and return its index.
10216 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10219 unicosmk_add_call_info_word (rtx x)
10221 rtx node;
10222 struct machine_function *machine = cfun->machine;
10224 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10225 if (machine->first_ciw == NULL_RTX)
10226 machine->first_ciw = node;
10227 else
10228 XEXP (machine->last_ciw, 1) = node;
10230 machine->last_ciw = node;
10231 ++machine->ciw_count;
10233 return GEN_INT (machine->ciw_count
10234 + strlen (current_function_name ())/8 + 5);
10237 /* The Cray assembler doesn't accept extern declarations for symbols which
10238 are defined in the same file. We have to keep track of all global
10239 symbols which are referenced and/or defined in a source file and output
10240 extern declarations for those which are referenced but not defined at
10241 the end of file. */
10243 /* List of identifiers for which an extern declaration might have to be
10244 emitted. */
10245 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10247 struct unicosmk_extern_list
10249 struct unicosmk_extern_list *next;
10250 const char *name;
10253 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10255 /* Output extern declarations which are required for every asm file. */
10257 static void
10258 unicosmk_output_default_externs (FILE *file)
10260 static const char *const externs[] =
10261 { "__T3E_MISMATCH" };
10263 int i;
10264 int n;
10266 n = ARRAY_SIZE (externs);
10268 for (i = 0; i < n; i++)
10269 fprintf (file, "\t.extern\t%s\n", externs[i]);
10272 /* Output extern declarations for global symbols which are have been
10273 referenced but not defined. */
10275 static void
10276 unicosmk_output_externs (FILE *file)
10278 struct unicosmk_extern_list *p;
10279 const char *real_name;
10280 int len;
10281 tree name_tree;
10283 len = strlen (user_label_prefix);
10284 for (p = unicosmk_extern_head; p != 0; p = p->next)
10286 /* We have to strip the encoding and possibly remove user_label_prefix
10287 from the identifier in order to handle -fleading-underscore and
10288 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10289 real_name = default_strip_name_encoding (p->name);
10290 if (len && p->name[0] == '*'
10291 && !memcmp (real_name, user_label_prefix, len))
10292 real_name += len;
10294 name_tree = get_identifier (real_name);
10295 if (! TREE_ASM_WRITTEN (name_tree))
10297 TREE_ASM_WRITTEN (name_tree) = 1;
10298 fputs ("\t.extern\t", file);
10299 assemble_name (file, p->name);
10300 putc ('\n', file);
10305 /* Record an extern. */
10307 void
10308 unicosmk_add_extern (const char *name)
10310 struct unicosmk_extern_list *p;
10312 p = (struct unicosmk_extern_list *)
10313 xmalloc (sizeof (struct unicosmk_extern_list));
10314 p->next = unicosmk_extern_head;
10315 p->name = name;
10316 unicosmk_extern_head = p;
10319 /* The Cray assembler generates incorrect code if identifiers which
10320 conflict with register names are used as instruction operands. We have
10321 to replace such identifiers with DEX expressions. */
10323 /* Structure to collect identifiers which have been replaced by DEX
10324 expressions. */
10325 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10327 struct unicosmk_dex {
10328 struct unicosmk_dex *next;
10329 const char *name;
10332 /* List of identifiers which have been replaced by DEX expressions. The DEX
10333 number is determined by the position in the list. */
10335 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10337 /* The number of elements in the DEX list. */
10339 static int unicosmk_dex_count = 0;
10341 /* Check if NAME must be replaced by a DEX expression. */
10343 static int
10344 unicosmk_special_name (const char *name)
10346 if (name[0] == '*')
10347 ++name;
10349 if (name[0] == '$')
10350 ++name;
10352 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10353 return 0;
10355 switch (name[1])
10357 case '1': case '2':
10358 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10360 case '3':
10361 return (name[2] == '\0'
10362 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10364 default:
10365 return (ISDIGIT (name[1]) && name[2] == '\0');
10369 /* Return the DEX number if X must be replaced by a DEX expression and 0
10370 otherwise. */
10372 static int
10373 unicosmk_need_dex (rtx x)
10375 struct unicosmk_dex *dex;
10376 const char *name;
10377 int i;
10379 if (GET_CODE (x) != SYMBOL_REF)
10380 return 0;
10382 name = XSTR (x,0);
10383 if (! unicosmk_special_name (name))
10384 return 0;
10386 i = unicosmk_dex_count;
10387 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10389 if (! strcmp (name, dex->name))
10390 return i;
10391 --i;
10394 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10395 dex->name = name;
10396 dex->next = unicosmk_dex_list;
10397 unicosmk_dex_list = dex;
10399 ++unicosmk_dex_count;
10400 return unicosmk_dex_count;
10403 /* Output the DEX definitions for this file. */
10405 static void
10406 unicosmk_output_dex (FILE *file)
10408 struct unicosmk_dex *dex;
10409 int i;
10411 if (unicosmk_dex_list == NULL)
10412 return;
10414 fprintf (file, "\t.dexstart\n");
10416 i = unicosmk_dex_count;
10417 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10419 fprintf (file, "\tDEX (%d) = ", i);
10420 assemble_name (file, dex->name);
10421 putc ('\n', file);
10422 --i;
10425 fprintf (file, "\t.dexend\n");
10428 /* Output text that to appear at the beginning of an assembler file. */
10430 static void
10431 unicosmk_file_start (void)
10433 int i;
10435 fputs ("\t.ident\t", asm_out_file);
10436 unicosmk_output_module_name (asm_out_file);
10437 fputs ("\n\n", asm_out_file);
10439 /* The Unicos/Mk assembler uses different register names. Instead of trying
10440 to support them, we simply use micro definitions. */
10442 /* CAM has different register names: rN for the integer register N and fN
10443 for the floating-point register N. Instead of trying to use these in
10444 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10445 register. */
10447 for (i = 0; i < 32; ++i)
10448 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10450 for (i = 0; i < 32; ++i)
10451 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10453 putc ('\n', asm_out_file);
10455 /* The .align directive fill unused space with zeroes which does not work
10456 in code sections. We define the macro 'gcc@code@align' which uses nops
10457 instead. Note that it assumes that code sections always have the
10458 biggest possible alignment since . refers to the current offset from
10459 the beginning of the section. */
10461 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10462 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10463 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10464 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10465 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10466 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10467 fputs ("\t.endr\n", asm_out_file);
10468 fputs ("\t.endif\n", asm_out_file);
10469 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10471 /* Output extern declarations which should always be visible. */
10472 unicosmk_output_default_externs (asm_out_file);
10474 /* Open a dummy section. We always need to be inside a section for the
10475 section-switching code to work correctly.
10476 ??? This should be a module id or something like that. I still have to
10477 figure out what the rules for those are. */
10478 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10481 /* Output text to appear at the end of an assembler file. This includes all
10482 pending extern declarations and DEX expressions. */
10484 static void
10485 unicosmk_file_end (void)
10487 fputs ("\t.endp\n\n", asm_out_file);
10489 /* Output all pending externs. */
10491 unicosmk_output_externs (asm_out_file);
10493 /* Output dex definitions used for functions whose names conflict with
10494 register names. */
10496 unicosmk_output_dex (asm_out_file);
10498 fputs ("\t.end\t", asm_out_file);
10499 unicosmk_output_module_name (asm_out_file);
10500 putc ('\n', asm_out_file);
10503 #else
10505 static void
10506 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10509 static void
10510 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10513 static void
10514 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10515 const char * fnname ATTRIBUTE_UNUSED)
10519 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10521 return NULL_RTX;
10524 static int
10525 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10527 return 0;
10530 #endif /* TARGET_ABI_UNICOSMK */
10532 static void
10533 alpha_init_libfuncs (void)
10535 if (TARGET_ABI_UNICOSMK)
10537 /* Prevent gcc from generating calls to __divsi3. */
10538 set_optab_libfunc (sdiv_optab, SImode, 0);
10539 set_optab_libfunc (udiv_optab, SImode, 0);
10541 /* Use the functions provided by the system library
10542 for DImode integer division. */
10543 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10544 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10546 else if (TARGET_ABI_OPEN_VMS)
10548 /* Use the VMS runtime library functions for division and
10549 remainder. */
10550 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10551 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10552 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10553 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10554 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10555 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10556 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10557 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10562 /* Initialize the GCC target structure. */
10563 #if TARGET_ABI_OPEN_VMS
10564 # undef TARGET_ATTRIBUTE_TABLE
10565 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10566 # undef TARGET_SECTION_TYPE_FLAGS
10567 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10568 #endif
10570 #undef TARGET_IN_SMALL_DATA_P
10571 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10573 #if TARGET_ABI_UNICOSMK
10574 # undef TARGET_INSERT_ATTRIBUTES
10575 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10576 # undef TARGET_SECTION_TYPE_FLAGS
10577 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10578 # undef TARGET_ASM_UNIQUE_SECTION
10579 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10580 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10581 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10582 # undef TARGET_ASM_GLOBALIZE_LABEL
10583 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10584 # undef TARGET_MUST_PASS_IN_STACK
10585 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10586 #endif
10588 #undef TARGET_ASM_ALIGNED_HI_OP
10589 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10590 #undef TARGET_ASM_ALIGNED_DI_OP
10591 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10593 /* Default unaligned ops are provided for ELF systems. To get unaligned
10594 data for non-ELF systems, we have to turn off auto alignment. */
10595 #ifndef OBJECT_FORMAT_ELF
10596 #undef TARGET_ASM_UNALIGNED_HI_OP
10597 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10598 #undef TARGET_ASM_UNALIGNED_SI_OP
10599 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10600 #undef TARGET_ASM_UNALIGNED_DI_OP
10601 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10602 #endif
10604 #ifdef OBJECT_FORMAT_ELF
10605 #undef TARGET_ASM_SELECT_RTX_SECTION
10606 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10607 #endif
10609 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10610 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10612 #undef TARGET_INIT_LIBFUNCS
10613 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10615 #if TARGET_ABI_UNICOSMK
10616 #undef TARGET_ASM_FILE_START
10617 #define TARGET_ASM_FILE_START unicosmk_file_start
10618 #undef TARGET_ASM_FILE_END
10619 #define TARGET_ASM_FILE_END unicosmk_file_end
10620 #else
10621 #undef TARGET_ASM_FILE_START
10622 #define TARGET_ASM_FILE_START alpha_file_start
10623 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10624 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10625 #endif
10627 #undef TARGET_SCHED_ADJUST_COST
10628 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10629 #undef TARGET_SCHED_ISSUE_RATE
10630 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10631 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10632 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10633 alpha_multipass_dfa_lookahead
10635 #undef TARGET_HAVE_TLS
10636 #define TARGET_HAVE_TLS HAVE_AS_TLS
10638 #undef TARGET_INIT_BUILTINS
10639 #define TARGET_INIT_BUILTINS alpha_init_builtins
10640 #undef TARGET_EXPAND_BUILTIN
10641 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10642 #undef TARGET_FOLD_BUILTIN
10643 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10645 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10646 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10647 #undef TARGET_CANNOT_COPY_INSN_P
10648 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10649 #undef TARGET_CANNOT_FORCE_CONST_MEM
10650 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10652 #if TARGET_ABI_OSF
10653 #undef TARGET_ASM_OUTPUT_MI_THUNK
10654 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10655 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10656 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10657 #undef TARGET_STDARG_OPTIMIZE_HOOK
10658 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10659 #endif
10661 #undef TARGET_RTX_COSTS
10662 #define TARGET_RTX_COSTS alpha_rtx_costs
10663 #undef TARGET_ADDRESS_COST
10664 #define TARGET_ADDRESS_COST hook_int_rtx_0
10666 #undef TARGET_MACHINE_DEPENDENT_REORG
10667 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10669 #undef TARGET_PROMOTE_FUNCTION_ARGS
10670 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10671 #undef TARGET_PROMOTE_FUNCTION_RETURN
10672 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10673 #undef TARGET_PROMOTE_PROTOTYPES
10674 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10675 #undef TARGET_RETURN_IN_MEMORY
10676 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10677 #undef TARGET_PASS_BY_REFERENCE
10678 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10679 #undef TARGET_SETUP_INCOMING_VARARGS
10680 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10681 #undef TARGET_STRICT_ARGUMENT_NAMING
10682 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10683 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10684 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10685 #undef TARGET_SPLIT_COMPLEX_ARG
10686 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10687 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10688 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10689 #undef TARGET_ARG_PARTIAL_BYTES
10690 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10692 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10693 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10694 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10695 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10697 #undef TARGET_BUILD_BUILTIN_VA_LIST
10698 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10700 /* The Alpha architecture does not require sequential consistency. See
10701 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10702 for an example of how it can be violated in practice. */
10703 #undef TARGET_RELAXED_ORDERING
10704 #define TARGET_RELAXED_ORDERING true
10706 #undef TARGET_DEFAULT_TARGET_FLAGS
10707 #define TARGET_DEFAULT_TARGET_FLAGS \
10708 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10709 #undef TARGET_HANDLE_OPTION
10710 #define TARGET_HANDLE_OPTION alpha_handle_option
10712 struct gcc_target targetm = TARGET_INITIALIZER;
10715 #include "gt-alpha.h"